// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tar implements access to tar archives.
//
// Tape archives (tar) are a file format for storing a sequence of files that
// can be read and written in a streaming manner.
// This package aims to cover most variations of the format,
// including those produced by GNU and BSD tar tools.
package tar
import (
"errors"
"fmt"
"internal/godebug"
"io/fs"
"maps"
"math"
"path"
"reflect"
"strconv"
"strings"
"time"
)
// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit
// architectures. If a large value is encountered when decoding, the result
// stored in Header will be the truncated version.
var tarinsecurepath = godebug.New("tarinsecurepath")
var (
ErrHeader = errors.New("archive/tar: invalid tar header")
ErrWriteTooLong = errors.New("archive/tar: write too long")
ErrFieldTooLong = errors.New("archive/tar: header field too long")
ErrWriteAfterClose = errors.New("archive/tar: write after close")
ErrInsecurePath = errors.New("archive/tar: insecure file path")
errMissData = errors.New("archive/tar: sparse file references non-existent data")
errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data")
errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole")
errSparseTooLong = errors.New("archive/tar: sparse map too long")
)
type headerError []string
func (he headerError) Error() string {
const prefix = "archive/tar: cannot encode header"
var ss []string
for _, s := range he {
if s != "" {
ss = append(ss, s)
}
}
if len(ss) == 0 {
return prefix
}
return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and "))
}
// Type flags for Header.Typeflag.
const (
// Type '0' indicates a regular file.
TypeReg = '0'
// Deprecated: Use TypeReg instead.
TypeRegA = '\x00'
// Type '1' to '6' are header-only flags and may not have a data body.
TypeLink = '1' // Hard link
TypeSymlink = '2' // Symbolic link
TypeChar = '3' // Character device node
TypeBlock = '4' // Block device node
TypeDir = '5' // Directory
TypeFifo = '6' // FIFO node
// Type '7' is reserved.
TypeCont = '7'
// Type 'x' is used by the PAX format to store key-value records that
// are only relevant to the next file.
// This package transparently handles these types.
TypeXHeader = 'x'
// Type 'g' is used by the PAX format to store key-value records that
// are relevant to all subsequent files.
// This package only supports parsing and composing such headers,
// but does not currently support persisting the global state across files.
TypeXGlobalHeader = 'g'
// Type 'S' indicates a sparse file in the GNU format.
TypeGNUSparse = 'S'
// Types 'L' and 'K' are used by the GNU format for a meta file
// used to store the path or link name for the next file.
// This package transparently handles these types.
TypeGNULongName = 'L'
TypeGNULongLink = 'K'
)
// Keywords for PAX extended header records.
const (
paxNone = "" // Indicates that no PAX key is suitable
paxPath = "path"
paxLinkpath = "linkpath"
paxSize = "size"
paxUid = "uid"
paxGid = "gid"
paxUname = "uname"
paxGname = "gname"
paxMtime = "mtime"
paxAtime = "atime"
paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid
paxCharset = "charset" // Currently unused
paxComment = "comment" // Currently unused
paxSchilyXattr = "SCHILY.xattr."
// Keywords for GNU sparse files in a PAX extended header.
paxGNUSparse = "GNU.sparse."
paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
paxGNUSparseOffset = "GNU.sparse.offset"
paxGNUSparseNumBytes = "GNU.sparse.numbytes"
paxGNUSparseMap = "GNU.sparse.map"
paxGNUSparseName = "GNU.sparse.name"
paxGNUSparseMajor = "GNU.sparse.major"
paxGNUSparseMinor = "GNU.sparse.minor"
paxGNUSparseSize = "GNU.sparse.size"
paxGNUSparseRealSize = "GNU.sparse.realsize"
)
// basicKeys is a set of the PAX keys for which we have built-in support.
// This does not contain "charset" or "comment", which are both PAX-specific,
// so adding them as first-class features of Header is unlikely.
// Users can use the PAXRecords field to set it themselves.
var basicKeys = map[string]bool{
paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true,
paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true,
}
// A Header represents a single header in a tar archive.
// Some fields may not be populated.
//
// For forward compatibility, users that retrieve a Header from Reader.Next,
// mutate it in some ways, and then pass it back to Writer.WriteHeader
// should do so by creating a new Header and copying the fields
// that they are interested in preserving.
type Header struct {
// Typeflag is the type of header entry.
// The zero value is automatically promoted to either TypeReg or TypeDir
// depending on the presence of a trailing slash in Name.
Typeflag byte
Name string // Name of file entry
Linkname string // Target name of link (valid for TypeLink or TypeSymlink)
Size int64 // Logical file size in bytes
Mode int64 // Permission and mode bits
Uid int // User ID of owner
Gid int // Group ID of owner
Uname string // User name of owner
Gname string // Group name of owner
// If the Format is unspecified, then Writer.WriteHeader rounds ModTime
// to the nearest second and ignores the AccessTime and ChangeTime fields.
//
// To use AccessTime or ChangeTime, specify the Format as PAX or GNU.
// To use sub-second resolution, specify the Format as PAX.
ModTime time.Time // Modification time
AccessTime time.Time // Access time (requires either PAX or GNU support)
ChangeTime time.Time // Change time (requires either PAX or GNU support)
Devmajor int64 // Major device number (valid for TypeChar or TypeBlock)
Devminor int64 // Minor device number (valid for TypeChar or TypeBlock)
// Xattrs stores extended attributes as PAX records under the
// "SCHILY.xattr." namespace.
//
// The following are semantically equivalent:
// h.Xattrs[key] = value
// h.PAXRecords["SCHILY.xattr."+key] = value
//
// When Writer.WriteHeader is called, the contents of Xattrs will take
// precedence over those in PAXRecords.
//
// Deprecated: Use PAXRecords instead.
Xattrs map[string]string
// PAXRecords is a map of PAX extended header records.
//
// User-defined records should have keys of the following form:
// VENDOR.keyword
// Where VENDOR is some namespace in all uppercase, and keyword may
// not contain the '=' character (e.g., "GOLANG.pkg.version").
// The key and value should be non-empty UTF-8 strings.
//
// When Writer.WriteHeader is called, PAX records derived from the
// other fields in Header take precedence over PAXRecords.
PAXRecords map[string]string
// Format specifies the format of the tar header.
//
// This is set by Reader.Next as a best-effort guess at the format.
// Since the Reader liberally reads some non-compliant files,
// it is possible for this to be FormatUnknown.
//
// If the format is unspecified when Writer.WriteHeader is called,
// then it uses the first format (in the order of USTAR, PAX, GNU)
// capable of encoding this Header (see Format).
Format Format
}
// sparseEntry represents a Length-sized fragment at Offset in the file.
type sparseEntry struct{ Offset, Length int64 }
func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length }
// A sparse file can be represented as either a sparseDatas or a sparseHoles.
// As long as the total size is known, they are equivalent and one can be
// converted to the other form and back. The various tar formats with sparse
// file support represent sparse files in the sparseDatas form. That is, they
// specify the fragments in the file that has data, and treat everything else as
// having zero bytes. As such, the encoding and decoding logic in this package
// deals with sparseDatas.
//
// However, the external API uses sparseHoles instead of sparseDatas because the
// zero value of sparseHoles logically represents a normal file (i.e., there are
// no holes in it). On the other hand, the zero value of sparseDatas implies
// that the file has no data in it, which is rather odd.
//
// As an example, if the underlying raw file contains the 10-byte data:
//
// var compactFile = "abcdefgh"
//
// And the sparse map has the following entries:
//
// var spd sparseDatas = []sparseEntry{
// {Offset: 2, Length: 5}, // Data fragment for 2..6
// {Offset: 18, Length: 3}, // Data fragment for 18..20
// }
// var sph sparseHoles = []sparseEntry{
// {Offset: 0, Length: 2}, // Hole fragment for 0..1
// {Offset: 7, Length: 11}, // Hole fragment for 7..17
// {Offset: 21, Length: 4}, // Hole fragment for 21..24
// }
//
// Then the content of the resulting sparse file with a Header.Size of 25 is:
//
// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
type (
sparseDatas []sparseEntry
sparseHoles []sparseEntry
)
// validateSparseEntries reports whether sp is a valid sparse map.
// It does not matter whether sp represents data fragments or hole fragments.
func validateSparseEntries(sp []sparseEntry, size int64) bool {
// Validate all sparse entries. These are the same checks as performed by
// the BSD tar utility.
if size < 0 {
return false
}
var pre sparseEntry
for _, cur := range sp {
switch {
case cur.Offset < 0 || cur.Length < 0:
return false // Negative values are never okay
case cur.Offset > math.MaxInt64-cur.Length:
return false // Integer overflow with large length
case cur.endOffset() > size:
return false // Region extends beyond the actual size
case pre.endOffset() > cur.Offset:
return false // Regions cannot overlap and must be in order
}
pre = cur
}
return true
}
// alignSparseEntries mutates src and returns dst where each fragment's
// starting offset is aligned up to the nearest block edge, and each
// ending offset is aligned down to the nearest block edge.
//
// Even though the Go tar Reader and the BSD tar utility can handle entries
// with arbitrary offsets and lengths, the GNU tar utility can only handle
// offsets and lengths that are multiples of blockSize.
func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry {
dst := src[:0]
for _, s := range src {
pos, end := s.Offset, s.endOffset()
pos += blockPadding(+pos) // Round-up to nearest blockSize
if end != size {
end -= blockPadding(-end) // Round-down to nearest blockSize
}
if pos < end {
dst = append(dst, sparseEntry{Offset: pos, Length: end - pos})
}
}
return dst
}
// invertSparseEntries converts a sparse map from one form to the other.
// If the input is sparseHoles, then it will output sparseDatas and vice-versa.
// The input must have been already validated.
//
// This function mutates src and returns a normalized map where:
// - adjacent fragments are coalesced together
// - only the last fragment may be empty
// - the endOffset of the last fragment is the total size
func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry {
dst := src[:0]
var pre sparseEntry
for _, cur := range src {
if cur.Length == 0 {
continue // Skip empty fragments
}
pre.Length = cur.Offset - pre.Offset
if pre.Length > 0 {
dst = append(dst, pre) // Only add non-empty fragments
}
pre.Offset = cur.endOffset()
}
pre.Length = size - pre.Offset // Possibly the only empty fragment
return append(dst, pre)
}
// fileState tracks the number of logical (includes sparse holes) and physical
// (actual in tar archive) bytes remaining for the current file.
//
// Invariant: logicalRemaining >= physicalRemaining
type fileState interface {
logicalRemaining() int64
physicalRemaining() int64
}
// allowedFormats determines which formats can be used.
// The value returned is the logical OR of multiple possible formats.
// If the value is FormatUnknown, then the input Header cannot be encoded
// and an error is returned explaining why.
//
// As a by-product of checking the fields, this function returns paxHdrs, which
// contain all fields that could not be directly encoded.
// A value receiver ensures that this method does not mutate the source Header.
func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) {
format = FormatUSTAR | FormatPAX | FormatGNU
paxHdrs = make(map[string]string)
var whyNoUSTAR, whyNoPAX, whyNoGNU string
var preferPAX bool // Prefer PAX over USTAR
verifyString := func(s string, size int, name, paxKey string) {
// NUL-terminator is optional for path and linkpath.
// Technically, it is required for uname and gname,
// but neither GNU nor BSD tar checks for it.
tooLong := len(s) > size
allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath
if hasNUL(s) || (tooLong && !allowLongGNU) {
whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s)
format.mustNotBe(FormatGNU)
}
if !isASCII(s) || tooLong {
canSplitUSTAR := paxKey == paxPath
if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok {
whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s)
format.mustNotBe(FormatUSTAR)
}
if paxKey == paxNone {
whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s)
format.mustNotBe(FormatPAX)
} else {
paxHdrs[paxKey] = s
}
}
if v, ok := h.PAXRecords[paxKey]; ok && v == s {
paxHdrs[paxKey] = v
}
}
verifyNumeric := func(n int64, size int, name, paxKey string) {
if !fitsInBase256(size, n) {
whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n)
format.mustNotBe(FormatGNU)
}
if !fitsInOctal(size, n) {
whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n)
format.mustNotBe(FormatUSTAR)
if paxKey == paxNone {
whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n)
format.mustNotBe(FormatPAX)
} else {
paxHdrs[paxKey] = strconv.FormatInt(n, 10)
}
}
if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) {
paxHdrs[paxKey] = v
}
}
verifyTime := func(ts time.Time, size int, name, paxKey string) {
if ts.IsZero() {
return // Always okay
}
if !fitsInBase256(size, ts.Unix()) {
whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts)
format.mustNotBe(FormatGNU)
}
isMtime := paxKey == paxMtime
fitsOctal := fitsInOctal(size, ts.Unix())
if (isMtime && !fitsOctal) || !isMtime {
whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts)
format.mustNotBe(FormatUSTAR)
}
needsNano := ts.Nanosecond() != 0
if !isMtime || !fitsOctal || needsNano {
preferPAX = true // USTAR may truncate sub-second measurements
if paxKey == paxNone {
whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts)
format.mustNotBe(FormatPAX)
} else {
paxHdrs[paxKey] = formatPAXTime(ts)
}
}
if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) {
paxHdrs[paxKey] = v
}
}
// Check basic fields.
var blk block
v7 := blk.toV7()
ustar := blk.toUSTAR()
gnu := blk.toGNU()
verifyString(h.Name, len(v7.name()), "Name", paxPath)
verifyString(h.Linkname, len(v7.linkName()), "Linkname", paxLinkpath)
verifyString(h.Uname, len(ustar.userName()), "Uname", paxUname)
verifyString(h.Gname, len(ustar.groupName()), "Gname", paxGname)
verifyNumeric(h.Mode, len(v7.mode()), "Mode", paxNone)
verifyNumeric(int64(h.Uid), len(v7.uid()), "Uid", paxUid)
verifyNumeric(int64(h.Gid), len(v7.gid()), "Gid", paxGid)
verifyNumeric(h.Size, len(v7.size()), "Size", paxSize)
verifyNumeric(h.Devmajor, len(ustar.devMajor()), "Devmajor", paxNone)
verifyNumeric(h.Devminor, len(ustar.devMinor()), "Devminor", paxNone)
verifyTime(h.ModTime, len(v7.modTime()), "ModTime", paxMtime)
verifyTime(h.AccessTime, len(gnu.accessTime()), "AccessTime", paxAtime)
verifyTime(h.ChangeTime, len(gnu.changeTime()), "ChangeTime", paxCtime)
// Check for header-only types.
var whyOnlyPAX, whyOnlyGNU string
switch h.Typeflag {
case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse:
// Exclude TypeLink and TypeSymlink, since they may reference directories.
if strings.HasSuffix(h.Name, "/") {
return FormatUnknown, nil, headerError{"filename may not have trailing slash"}
}
case TypeXHeader, TypeGNULongName, TypeGNULongLink:
return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"}
case TypeXGlobalHeader:
h2 := Header{Name: h.Name, Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format}
if !reflect.DeepEqual(h, h2) {
return FormatUnknown, nil, headerError{"only PAXRecords should be set for TypeXGlobalHeader"}
}
whyOnlyPAX = "only PAX supports TypeXGlobalHeader"
format.mayOnlyBe(FormatPAX)
}
if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 {
return FormatUnknown, nil, headerError{"negative size on header-only type"}
}
// Check PAX records.
if len(h.Xattrs) > 0 {
for k, v := range h.Xattrs {
paxHdrs[paxSchilyXattr+k] = v
}
whyOnlyPAX = "only PAX supports Xattrs"
format.mayOnlyBe(FormatPAX)
}
if len(h.PAXRecords) > 0 {
for k, v := range h.PAXRecords {
switch _, exists := paxHdrs[k]; {
case exists:
continue // Do not overwrite existing records
case h.Typeflag == TypeXGlobalHeader:
paxHdrs[k] = v // Copy all records
case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse):
paxHdrs[k] = v // Ignore local records that may conflict
}
}
whyOnlyPAX = "only PAX supports PAXRecords"
format.mayOnlyBe(FormatPAX)
}
for k, v := range paxHdrs {
if !validPAXRecord(k, v) {
return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)}
}
}
// TODO(dsnet): Re-enable this when adding sparse support.
// See https://golang.org/issue/22735
/*
// Check sparse files.
if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse {
if isHeaderOnlyType(h.Typeflag) {
return FormatUnknown, nil, headerError{"header-only type cannot be sparse"}
}
if !validateSparseEntries(h.SparseHoles, h.Size) {
return FormatUnknown, nil, headerError{"invalid sparse holes"}
}
if h.Typeflag == TypeGNUSparse {
whyOnlyGNU = "only GNU supports TypeGNUSparse"
format.mayOnlyBe(FormatGNU)
} else {
whyNoGNU = "GNU supports sparse files only with TypeGNUSparse"
format.mustNotBe(FormatGNU)
}
whyNoUSTAR = "USTAR does not support sparse files"
format.mustNotBe(FormatUSTAR)
}
*/
// Check desired format.
if wantFormat := h.Format; wantFormat != FormatUnknown {
if wantFormat.has(FormatPAX) && !preferPAX {
wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too
}
format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted
}
if format == FormatUnknown {
switch h.Format {
case FormatUSTAR:
err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU}
case FormatPAX:
err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU}
case FormatGNU:
err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX}
default:
err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU}
}
}
return format, paxHdrs, err
}
// FileInfo returns an fs.FileInfo for the Header.
func (h *Header) FileInfo() fs.FileInfo {
return headerFileInfo{h}
}
// headerFileInfo implements fs.FileInfo.
type headerFileInfo struct {
h *Header
}
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
func (fi headerFileInfo) Sys() any { return fi.h }
// Name returns the base name of the file.
func (fi headerFileInfo) Name() string {
if fi.IsDir() {
return path.Base(path.Clean(fi.h.Name))
}
return path.Base(fi.h.Name)
}
// Mode returns the permission and mode bits for the headerFileInfo.
func (fi headerFileInfo) Mode() (mode fs.FileMode) {
// Set file permission bits.
mode = fs.FileMode(fi.h.Mode).Perm()
// Set setuid, setgid and sticky bits.
if fi.h.Mode&c_ISUID != 0 {
mode |= fs.ModeSetuid
}
if fi.h.Mode&c_ISGID != 0 {
mode |= fs.ModeSetgid
}
if fi.h.Mode&c_ISVTX != 0 {
mode |= fs.ModeSticky
}
// Set file mode bits; clear perm, setuid, setgid, and sticky bits.
switch m := fs.FileMode(fi.h.Mode) &^ 07777; m {
case c_ISDIR:
mode |= fs.ModeDir
case c_ISFIFO:
mode |= fs.ModeNamedPipe
case c_ISLNK:
mode |= fs.ModeSymlink
case c_ISBLK:
mode |= fs.ModeDevice
case c_ISCHR:
mode |= fs.ModeDevice
mode |= fs.ModeCharDevice
case c_ISSOCK:
mode |= fs.ModeSocket
}
switch fi.h.Typeflag {
case TypeSymlink:
mode |= fs.ModeSymlink
case TypeChar:
mode |= fs.ModeDevice
mode |= fs.ModeCharDevice
case TypeBlock:
mode |= fs.ModeDevice
case TypeDir:
mode |= fs.ModeDir
case TypeFifo:
mode |= fs.ModeNamedPipe
}
return mode
}
func (fi headerFileInfo) String() string {
return fs.FormatFileInfo(fi)
}
// sysStat, if non-nil, populates h from system-dependent fields of fi.
var sysStat func(fi fs.FileInfo, h *Header, doNameLookups bool) error
const (
// Mode constants from the USTAR spec:
// See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
// Common Unix mode constants; these are not defined in any common tar standard.
// Header.FileInfo understands these, but FileInfoHeader will never produce these.
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
// FileInfoHeader creates a partially-populated [Header] from fi.
// If fi describes a symlink, FileInfoHeader records link as the link target.
// If fi describes a directory, a slash is appended to the name.
//
// Since fs.FileInfo's Name method only returns the base name of
// the file it describes, it may be necessary to modify Header.Name
// to provide the full path name of the file.
//
// If fi implements [FileInfoNames]
// Header.Gname and Header.Uname
// are provided by the methods of the interface.
func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
if fi == nil {
return nil, errors.New("archive/tar: FileInfo is nil")
}
fm := fi.Mode()
h := &Header{
Name: fi.Name(),
ModTime: fi.ModTime(),
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
}
switch {
case fm.IsRegular():
h.Typeflag = TypeReg
h.Size = fi.Size()
case fi.IsDir():
h.Typeflag = TypeDir
h.Name += "/"
case fm&fs.ModeSymlink != 0:
h.Typeflag = TypeSymlink
h.Linkname = link
case fm&fs.ModeDevice != 0:
if fm&fs.ModeCharDevice != 0 {
h.Typeflag = TypeChar
} else {
h.Typeflag = TypeBlock
}
case fm&fs.ModeNamedPipe != 0:
h.Typeflag = TypeFifo
case fm&fs.ModeSocket != 0:
return nil, fmt.Errorf("archive/tar: sockets not supported")
default:
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
}
if fm&fs.ModeSetuid != 0 {
h.Mode |= c_ISUID
}
if fm&fs.ModeSetgid != 0 {
h.Mode |= c_ISGID
}
if fm&fs.ModeSticky != 0 {
h.Mode |= c_ISVTX
}
// If possible, populate additional fields from OS-specific
// FileInfo fields.
if sys, ok := fi.Sys().(*Header); ok {
// This FileInfo came from a Header (not the OS). Use the
// original Header to populate all remaining fields.
h.Uid = sys.Uid
h.Gid = sys.Gid
h.Uname = sys.Uname
h.Gname = sys.Gname
h.AccessTime = sys.AccessTime
h.ChangeTime = sys.ChangeTime
h.Xattrs = maps.Clone(sys.Xattrs)
if sys.Typeflag == TypeLink {
// hard link
h.Typeflag = TypeLink
h.Size = 0
h.Linkname = sys.Linkname
}
h.PAXRecords = maps.Clone(sys.PAXRecords)
}
var doNameLookups = true
if iface, ok := fi.(FileInfoNames); ok {
doNameLookups = false
var err error
h.Gname, err = iface.Gname()
if err != nil {
return nil, err
}
h.Uname, err = iface.Uname()
if err != nil {
return nil, err
}
}
if sysStat != nil {
return h, sysStat(fi, h, doNameLookups)
}
return h, nil
}
// FileInfoNames extends [fs.FileInfo].
// Passing an instance of this to [FileInfoHeader] permits the caller
// to avoid a system-dependent name lookup by specifying the Uname and Gname directly.
type FileInfoNames interface {
fs.FileInfo
// Uname should give a user name.
Uname() (string, error)
// Gname should give a group name.
Gname() (string, error)
}
// isHeaderOnlyType checks if the given type flag is of the type that has no
// data section even if a size is specified.
func isHeaderOnlyType(flag byte) bool {
switch flag {
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
return true
default:
return false
}
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import "strings"
// Format represents the tar archive format.
//
// The original tar format was introduced in Unix V7.
// Since then, there have been multiple competing formats attempting to
// standardize or extend the V7 format to overcome its limitations.
// The most common formats are the USTAR, PAX, and GNU formats,
// each with their own advantages and limitations.
//
// The following table captures the capabilities of each format:
//
// | USTAR | PAX | GNU
// ------------------+--------+-----------+----------
// Name | 256B | unlimited | unlimited
// Linkname | 100B | unlimited | unlimited
// Size | uint33 | unlimited | uint89
// Mode | uint21 | uint21 | uint57
// Uid/Gid | uint21 | unlimited | uint57
// Uname/Gname | 32B | unlimited | 32B
// ModTime | uint33 | unlimited | int89
// AccessTime | n/a | unlimited | int89
// ChangeTime | n/a | unlimited | int89
// Devmajor/Devminor | uint21 | uint21 | uint57
// ------------------+--------+-----------+----------
// string encoding | ASCII | UTF-8 | binary
// sub-second times | no | yes | no
// sparse files | no | yes | yes
//
// The table's upper portion shows the [Header] fields, where each format reports
// the maximum number of bytes allowed for each string field and
// the integer type used to store each numeric field
// (where timestamps are stored as the number of seconds since the Unix epoch).
//
// The table's lower portion shows specialized features of each format,
// such as supported string encodings, support for sub-second timestamps,
// or support for sparse files.
//
// The Writer currently provides no support for sparse files.
type Format int
// Constants to identify various tar formats.
const (
// Deliberately hide the meaning of constants from public API.
_ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc...
// FormatUnknown indicates that the format is unknown.
FormatUnknown
// The format of the original Unix V7 tar tool prior to standardization.
formatV7
// FormatUSTAR represents the USTAR header format defined in POSIX.1-1988.
//
// While this format is compatible with most tar readers,
// the format has several limitations making it unsuitable for some usages.
// Most notably, it cannot support sparse files, files larger than 8GiB,
// filenames larger than 256 characters, and non-ASCII filenames.
//
// Reference:
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
FormatUSTAR
// FormatPAX represents the PAX header format defined in POSIX.1-2001.
//
// PAX extends USTAR by writing a special file with Typeflag TypeXHeader
// preceding the original header. This file contains a set of key-value
// records, which are used to overcome USTAR's shortcomings, in addition to
// providing the ability to have sub-second resolution for timestamps.
//
// Some newer formats add their own extensions to PAX by defining their
// own keys and assigning certain semantic meaning to the associated values.
// For example, sparse file support in PAX is implemented using keys
// defined by the GNU manual (e.g., "GNU.sparse.map").
//
// Reference:
// http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html
FormatPAX
// FormatGNU represents the GNU header format.
//
// The GNU header format is older than the USTAR and PAX standards and
// is not compatible with them. The GNU format supports
// arbitrary file sizes, filenames of arbitrary encoding and length,
// sparse files, and other features.
//
// It is recommended that PAX be chosen over GNU unless the target
// application can only parse GNU formatted archives.
//
// Reference:
// https://www.gnu.org/software/tar/manual/html_node/Standard.html
FormatGNU
// Schily's tar format, which is incompatible with USTAR.
// This does not cover STAR extensions to the PAX format; these fall under
// the PAX format.
formatSTAR
formatMax
)
func (f Format) has(f2 Format) bool { return f&f2 != 0 }
func (f *Format) mayBe(f2 Format) { *f |= f2 }
func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 }
func (f *Format) mustNotBe(f2 Format) { *f &^= f2 }
var formatNames = map[Format]string{
formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR",
}
func (f Format) String() string {
var ss []string
for f2 := Format(1); f2 < formatMax; f2 <<= 1 {
if f.has(f2) {
ss = append(ss, formatNames[f2])
}
}
switch len(ss) {
case 0:
return "<unknown>"
case 1:
return ss[0]
default:
return "(" + strings.Join(ss, " | ") + ")"
}
}
// Magics used to identify various formats.
const (
magicGNU, versionGNU = "ustar ", " \x00"
magicUSTAR, versionUSTAR = "ustar\x00", "00"
trailerSTAR = "tar\x00"
)
// Size constants from various tar specifications.
const (
blockSize = 512 // Size of each block in a tar stream
nameSize = 100 // Max length of the name field in USTAR format
prefixSize = 155 // Max length of the prefix field in USTAR format
// Max length of a special file (PAX header, GNU long name or link).
// This matches the limit used by libarchive.
maxSpecialFileSize = 1 << 20
)
// blockPadding computes the number of bytes needed to pad offset up to the
// nearest block edge where 0 <= n < blockSize.
func blockPadding(offset int64) (n int64) {
return -offset & (blockSize - 1)
}
var zeroBlock block
type block [blockSize]byte
// Convert block to any number of formats.
func (b *block) toV7() *headerV7 { return (*headerV7)(b) }
func (b *block) toGNU() *headerGNU { return (*headerGNU)(b) }
func (b *block) toSTAR() *headerSTAR { return (*headerSTAR)(b) }
func (b *block) toUSTAR() *headerUSTAR { return (*headerUSTAR)(b) }
func (b *block) toSparse() sparseArray { return sparseArray(b[:]) }
// getFormat checks that the block is a valid tar header based on the checksum.
// It then attempts to guess the specific format based on magic values.
// If the checksum fails, then FormatUnknown is returned.
func (b *block) getFormat() Format {
// Verify checksum.
var p parser
value := p.parseOctal(b.toV7().chksum())
chksum1, chksum2 := b.computeChecksum()
if p.err != nil || (value != chksum1 && value != chksum2) {
return FormatUnknown
}
// Guess the magic values.
magic := string(b.toUSTAR().magic())
version := string(b.toUSTAR().version())
trailer := string(b.toSTAR().trailer())
switch {
case magic == magicUSTAR && trailer == trailerSTAR:
return formatSTAR
case magic == magicUSTAR:
return FormatUSTAR | FormatPAX
case magic == magicGNU && version == versionGNU:
return FormatGNU
default:
return formatV7
}
}
// setFormat writes the magic values necessary for specified format
// and then updates the checksum accordingly.
func (b *block) setFormat(format Format) {
// Set the magic values.
switch {
case format.has(formatV7):
// Do nothing.
case format.has(FormatGNU):
copy(b.toGNU().magic(), magicGNU)
copy(b.toGNU().version(), versionGNU)
case format.has(formatSTAR):
copy(b.toSTAR().magic(), magicUSTAR)
copy(b.toSTAR().version(), versionUSTAR)
copy(b.toSTAR().trailer(), trailerSTAR)
case format.has(FormatUSTAR | FormatPAX):
copy(b.toUSTAR().magic(), magicUSTAR)
copy(b.toUSTAR().version(), versionUSTAR)
default:
panic("invalid format")
}
// Update checksum.
// This field is special in that it is terminated by a NULL then space.
var f formatter
field := b.toV7().chksum()
chksum, _ := b.computeChecksum() // Possible values are 256..128776
f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143
field[7] = ' '
}
// computeChecksum computes the checksum for the header block.
// POSIX specifies a sum of the unsigned byte values, but the Sun tar used
// signed byte values.
// We compute and return both.
func (b *block) computeChecksum() (unsigned, signed int64) {
for i, c := range b {
if 148 <= i && i < 156 {
c = ' ' // Treat the checksum field itself as all spaces.
}
unsigned += int64(c)
signed += int64(int8(c))
}
return unsigned, signed
}
// reset clears the block with all zeros.
func (b *block) reset() {
*b = block{}
}
type headerV7 [blockSize]byte
func (h *headerV7) name() []byte { return h[000:][:100] }
func (h *headerV7) mode() []byte { return h[100:][:8] }
func (h *headerV7) uid() []byte { return h[108:][:8] }
func (h *headerV7) gid() []byte { return h[116:][:8] }
func (h *headerV7) size() []byte { return h[124:][:12] }
func (h *headerV7) modTime() []byte { return h[136:][:12] }
func (h *headerV7) chksum() []byte { return h[148:][:8] }
func (h *headerV7) typeFlag() []byte { return h[156:][:1] }
func (h *headerV7) linkName() []byte { return h[157:][:100] }
type headerGNU [blockSize]byte
func (h *headerGNU) v7() *headerV7 { return (*headerV7)(h) }
func (h *headerGNU) magic() []byte { return h[257:][:6] }
func (h *headerGNU) version() []byte { return h[263:][:2] }
func (h *headerGNU) userName() []byte { return h[265:][:32] }
func (h *headerGNU) groupName() []byte { return h[297:][:32] }
func (h *headerGNU) devMajor() []byte { return h[329:][:8] }
func (h *headerGNU) devMinor() []byte { return h[337:][:8] }
func (h *headerGNU) accessTime() []byte { return h[345:][:12] }
func (h *headerGNU) changeTime() []byte { return h[357:][:12] }
func (h *headerGNU) sparse() sparseArray { return sparseArray(h[386:][:24*4+1]) }
func (h *headerGNU) realSize() []byte { return h[483:][:12] }
type headerSTAR [blockSize]byte
func (h *headerSTAR) v7() *headerV7 { return (*headerV7)(h) }
func (h *headerSTAR) magic() []byte { return h[257:][:6] }
func (h *headerSTAR) version() []byte { return h[263:][:2] }
func (h *headerSTAR) userName() []byte { return h[265:][:32] }
func (h *headerSTAR) groupName() []byte { return h[297:][:32] }
func (h *headerSTAR) devMajor() []byte { return h[329:][:8] }
func (h *headerSTAR) devMinor() []byte { return h[337:][:8] }
func (h *headerSTAR) prefix() []byte { return h[345:][:131] }
func (h *headerSTAR) accessTime() []byte { return h[476:][:12] }
func (h *headerSTAR) changeTime() []byte { return h[488:][:12] }
func (h *headerSTAR) trailer() []byte { return h[508:][:4] }
type headerUSTAR [blockSize]byte
func (h *headerUSTAR) v7() *headerV7 { return (*headerV7)(h) }
func (h *headerUSTAR) magic() []byte { return h[257:][:6] }
func (h *headerUSTAR) version() []byte { return h[263:][:2] }
func (h *headerUSTAR) userName() []byte { return h[265:][:32] }
func (h *headerUSTAR) groupName() []byte { return h[297:][:32] }
func (h *headerUSTAR) devMajor() []byte { return h[329:][:8] }
func (h *headerUSTAR) devMinor() []byte { return h[337:][:8] }
func (h *headerUSTAR) prefix() []byte { return h[345:][:155] }
type sparseArray []byte
func (s sparseArray) entry(i int) sparseElem { return sparseElem(s[i*24:]) }
func (s sparseArray) isExtended() []byte { return s[24*s.maxEntries():][:1] }
func (s sparseArray) maxEntries() int { return len(s) / 24 }
type sparseElem []byte
func (s sparseElem) offset() []byte { return s[00:][:12] }
func (s sparseElem) length() []byte { return s[12:][:12] }
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"bytes"
"io"
"path/filepath"
"strconv"
"strings"
"time"
)
// Reader provides sequential access to the contents of a tar archive.
// Reader.Next advances to the next file in the archive (including the first),
// and then Reader can be treated as an io.Reader to access the file's data.
type Reader struct {
r io.Reader
pad int64 // Amount of padding (ignored) after current file entry
curr fileReader // Reader for current file entry
blk block // Buffer to use as temporary local storage
// err is a persistent error.
// It is only the responsibility of every exported method of Reader to
// ensure that this error is sticky.
err error
}
type fileReader interface {
io.Reader
fileState
WriteTo(io.Writer) (int64, error)
}
// NewReader creates a new [Reader] reading from r.
func NewReader(r io.Reader) *Reader {
return &Reader{r: r, curr: ®FileReader{r, 0}}
}
// Next advances to the next entry in the tar archive.
// The Header.Size determines how many bytes can be read for the next file.
// Any remaining data in the current file is automatically discarded.
// At the end of the archive, Next returns the error io.EOF.
//
// If Next encounters a non-local name (as defined by [filepath.IsLocal])
// and the GODEBUG environment variable contains `tarinsecurepath=0`,
// Next returns the header with an [ErrInsecurePath] error.
// A future version of Go may introduce this behavior by default.
// Programs that want to accept non-local names can ignore
// the [ErrInsecurePath] error and use the returned header.
func (tr *Reader) Next() (*Header, error) {
if tr.err != nil {
return nil, tr.err
}
hdr, err := tr.next()
tr.err = err
if err == nil && !filepath.IsLocal(hdr.Name) {
if tarinsecurepath.Value() == "0" {
tarinsecurepath.IncNonDefault()
err = ErrInsecurePath
}
}
return hdr, err
}
func (tr *Reader) next() (*Header, error) {
var paxHdrs map[string]string
var gnuLongName, gnuLongLink string
// Externally, Next iterates through the tar archive as if it is a series of
// files. Internally, the tar format often uses fake "files" to add meta
// data that describes the next file. These meta data "files" should not
// normally be visible to the outside. As such, this loop iterates through
// one or more "header files" until it finds a "normal file".
format := FormatUSTAR | FormatPAX | FormatGNU
for {
// Discard the remainder of the file and any padding.
if err := discard(tr.r, tr.curr.physicalRemaining()); err != nil {
return nil, err
}
if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil {
return nil, err
}
tr.pad = 0
hdr, rawHdr, err := tr.readHeader()
if err != nil {
return nil, err
}
if err := tr.handleRegularFile(hdr); err != nil {
return nil, err
}
format.mayOnlyBe(hdr.Format)
// Check for PAX/GNU special headers and files.
switch hdr.Typeflag {
case TypeXHeader, TypeXGlobalHeader:
format.mayOnlyBe(FormatPAX)
paxHdrs, err = parsePAX(tr)
if err != nil {
return nil, err
}
if hdr.Typeflag == TypeXGlobalHeader {
mergePAX(hdr, paxHdrs)
return &Header{
Name: hdr.Name,
Typeflag: hdr.Typeflag,
Xattrs: hdr.Xattrs,
PAXRecords: hdr.PAXRecords,
Format: format,
}, nil
}
continue // This is a meta header affecting the next header
case TypeGNULongName, TypeGNULongLink:
format.mayOnlyBe(FormatGNU)
realname, err := readSpecialFile(tr)
if err != nil {
return nil, err
}
var p parser
switch hdr.Typeflag {
case TypeGNULongName:
gnuLongName = p.parseString(realname)
case TypeGNULongLink:
gnuLongLink = p.parseString(realname)
}
continue // This is a meta header affecting the next header
default:
// The old GNU sparse format is handled here since it is technically
// just a regular file with additional attributes.
if err := mergePAX(hdr, paxHdrs); err != nil {
return nil, err
}
if gnuLongName != "" {
hdr.Name = gnuLongName
}
if gnuLongLink != "" {
hdr.Linkname = gnuLongLink
}
if hdr.Typeflag == TypeRegA {
if strings.HasSuffix(hdr.Name, "/") {
hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories
} else {
hdr.Typeflag = TypeReg
}
}
// The extended headers may have updated the size.
// Thus, setup the regFileReader again after merging PAX headers.
if err := tr.handleRegularFile(hdr); err != nil {
return nil, err
}
// Sparse formats rely on being able to read from the logical data
// section; there must be a preceding call to handleRegularFile.
if err := tr.handleSparseFile(hdr, rawHdr); err != nil {
return nil, err
}
// Set the final guess at the format.
if format.has(FormatUSTAR) && format.has(FormatPAX) {
format.mayOnlyBe(FormatUSTAR)
}
hdr.Format = format
return hdr, nil // This is a file, so stop
}
}
}
// handleRegularFile sets up the current file reader and padding such that it
// can only read the following logical data section. It will properly handle
// special headers that contain no data section.
func (tr *Reader) handleRegularFile(hdr *Header) error {
nb := hdr.Size
if isHeaderOnlyType(hdr.Typeflag) {
nb = 0
}
if nb < 0 {
return ErrHeader
}
tr.pad = blockPadding(nb)
tr.curr = ®FileReader{r: tr.r, nb: nb}
return nil
}
// handleSparseFile checks if the current file is a sparse format of any type
// and sets the curr reader appropriately.
func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error {
var spd sparseDatas
var err error
if hdr.Typeflag == TypeGNUSparse {
spd, err = tr.readOldGNUSparseMap(hdr, rawHdr)
} else {
spd, err = tr.readGNUSparsePAXHeaders(hdr)
}
// If sp is non-nil, then this is a sparse file.
// Note that it is possible for len(sp) == 0.
if err == nil && spd != nil {
if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) {
return ErrHeader
}
sph := invertSparseEntries(spd, hdr.Size)
tr.curr = &sparseFileReader{tr.curr, sph, 0}
}
return err
}
// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers.
// If they are found, then this function reads the sparse map and returns it.
// This assumes that 0.0 headers have already been converted to 0.1 headers
// by the PAX header parsing logic.
func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) {
// Identify the version of GNU headers.
var is1x0 bool
major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor]
switch {
case major == "0" && (minor == "0" || minor == "1"):
is1x0 = false
case major == "1" && minor == "0":
is1x0 = true
case major != "" || minor != "":
return nil, nil // Unknown GNU sparse PAX version
case hdr.PAXRecords[paxGNUSparseMap] != "":
is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess
default:
return nil, nil // Not a PAX format GNU sparse file.
}
hdr.Format.mayOnlyBe(FormatPAX)
// Update hdr from GNU sparse PAX headers.
if name := hdr.PAXRecords[paxGNUSparseName]; name != "" {
hdr.Name = name
}
size := hdr.PAXRecords[paxGNUSparseSize]
if size == "" {
size = hdr.PAXRecords[paxGNUSparseRealSize]
}
if size != "" {
n, err := strconv.ParseInt(size, 10, 64)
if err != nil {
return nil, ErrHeader
}
hdr.Size = n
}
// Read the sparse map according to the appropriate format.
if is1x0 {
return readGNUSparseMap1x0(tr.curr)
}
return readGNUSparseMap0x1(hdr.PAXRecords)
}
// mergePAX merges paxHdrs into hdr for all relevant fields of Header.
func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
for k, v := range paxHdrs {
if v == "" {
continue // Keep the original USTAR value
}
var id64 int64
switch k {
case paxPath:
hdr.Name = v
case paxLinkpath:
hdr.Linkname = v
case paxUname:
hdr.Uname = v
case paxGname:
hdr.Gname = v
case paxUid:
id64, err = strconv.ParseInt(v, 10, 64)
hdr.Uid = int(id64) // Integer overflow possible
case paxGid:
id64, err = strconv.ParseInt(v, 10, 64)
hdr.Gid = int(id64) // Integer overflow possible
case paxAtime:
hdr.AccessTime, err = parsePAXTime(v)
case paxMtime:
hdr.ModTime, err = parsePAXTime(v)
case paxCtime:
hdr.ChangeTime, err = parsePAXTime(v)
case paxSize:
hdr.Size, err = strconv.ParseInt(v, 10, 64)
default:
if strings.HasPrefix(k, paxSchilyXattr) {
if hdr.Xattrs == nil {
hdr.Xattrs = make(map[string]string)
}
hdr.Xattrs[k[len(paxSchilyXattr):]] = v
}
}
if err != nil {
return ErrHeader
}
}
hdr.PAXRecords = paxHdrs
return nil
}
// parsePAX parses PAX headers.
// If an extended header (type 'x') is invalid, ErrHeader is returned.
func parsePAX(r io.Reader) (map[string]string, error) {
buf, err := readSpecialFile(r)
if err != nil {
return nil, err
}
sbuf := string(buf)
// For GNU PAX sparse format 0.0 support.
// This function transforms the sparse format 0.0 headers into format 0.1
// headers since 0.0 headers were not PAX compliant.
var sparseMap []string
paxHdrs := make(map[string]string)
for len(sbuf) > 0 {
key, value, residual, err := parsePAXRecord(sbuf)
if err != nil {
return nil, ErrHeader
}
sbuf = residual
switch key {
case paxGNUSparseOffset, paxGNUSparseNumBytes:
// Validate sparse header order and value.
if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) ||
(len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) ||
strings.Contains(value, ",") {
return nil, ErrHeader
}
sparseMap = append(sparseMap, value)
default:
paxHdrs[key] = value
}
}
if len(sparseMap) > 0 {
paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
}
return paxHdrs, nil
}
// readHeader reads the next block header and assumes that the underlying reader
// is already aligned to a block boundary. It returns the raw block of the
// header in case further processing is required.
//
// The err will be set to io.EOF only when one of the following occurs:
// - Exactly 0 bytes are read and EOF is hit.
// - Exactly 1 block of zeros is read and EOF is hit.
// - At least 2 blocks of zeros are read.
func (tr *Reader) readHeader() (*Header, *block, error) {
// Two blocks of zero bytes marks the end of the archive.
if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
return nil, nil, err // EOF is okay here; exactly 0 bytes read
}
if bytes.Equal(tr.blk[:], zeroBlock[:]) {
if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
return nil, nil, err // EOF is okay here; exactly 1 block of zeros read
}
if bytes.Equal(tr.blk[:], zeroBlock[:]) {
return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read
}
return nil, nil, ErrHeader // Zero block and then non-zero block
}
// Verify the header matches a known format.
format := tr.blk.getFormat()
if format == FormatUnknown {
return nil, nil, ErrHeader
}
var p parser
hdr := new(Header)
// Unpack the V7 header.
v7 := tr.blk.toV7()
hdr.Typeflag = v7.typeFlag()[0]
hdr.Name = p.parseString(v7.name())
hdr.Linkname = p.parseString(v7.linkName())
hdr.Size = p.parseNumeric(v7.size())
hdr.Mode = p.parseNumeric(v7.mode())
hdr.Uid = int(p.parseNumeric(v7.uid()))
hdr.Gid = int(p.parseNumeric(v7.gid()))
hdr.ModTime = time.Unix(p.parseNumeric(v7.modTime()), 0)
// Unpack format specific fields.
if format > formatV7 {
ustar := tr.blk.toUSTAR()
hdr.Uname = p.parseString(ustar.userName())
hdr.Gname = p.parseString(ustar.groupName())
hdr.Devmajor = p.parseNumeric(ustar.devMajor())
hdr.Devminor = p.parseNumeric(ustar.devMinor())
var prefix string
switch {
case format.has(FormatUSTAR | FormatPAX):
hdr.Format = format
ustar := tr.blk.toUSTAR()
prefix = p.parseString(ustar.prefix())
// For Format detection, check if block is properly formatted since
// the parser is more liberal than what USTAR actually permits.
notASCII := func(r rune) bool { return r >= 0x80 }
if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 {
hdr.Format = FormatUnknown // Non-ASCII characters in block.
}
nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 }
if !(nul(v7.size()) && nul(v7.mode()) && nul(v7.uid()) && nul(v7.gid()) &&
nul(v7.modTime()) && nul(ustar.devMajor()) && nul(ustar.devMinor())) {
hdr.Format = FormatUnknown // Numeric fields must end in NUL
}
case format.has(formatSTAR):
star := tr.blk.toSTAR()
prefix = p.parseString(star.prefix())
hdr.AccessTime = time.Unix(p.parseNumeric(star.accessTime()), 0)
hdr.ChangeTime = time.Unix(p.parseNumeric(star.changeTime()), 0)
case format.has(FormatGNU):
hdr.Format = format
var p2 parser
gnu := tr.blk.toGNU()
if b := gnu.accessTime(); b[0] != 0 {
hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0)
}
if b := gnu.changeTime(); b[0] != 0 {
hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0)
}
// Prior to Go1.8, the Writer had a bug where it would output
// an invalid tar file in certain rare situations because the logic
// incorrectly believed that the old GNU format had a prefix field.
// This is wrong and leads to an output file that mangles the
// atime and ctime fields, which are often left unused.
//
// In order to continue reading tar files created by former, buggy
// versions of Go, we skeptically parse the atime and ctime fields.
// If we are unable to parse them and the prefix field looks like
// an ASCII string, then we fallback on the pre-Go1.8 behavior
// of treating these fields as the USTAR prefix field.
//
// Note that this will not use the fallback logic for all possible
// files generated by a pre-Go1.8 toolchain. If the generated file
// happened to have a prefix field that parses as valid
// atime and ctime fields (e.g., when they are valid octal strings),
// then it is impossible to distinguish between a valid GNU file
// and an invalid pre-Go1.8 file.
//
// See https://golang.org/issues/12594
// See https://golang.org/issues/21005
if p2.err != nil {
hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{}
ustar := tr.blk.toUSTAR()
if s := p.parseString(ustar.prefix()); isASCII(s) {
prefix = s
}
hdr.Format = FormatUnknown // Buggy file is not GNU
}
}
if len(prefix) > 0 {
hdr.Name = prefix + "/" + hdr.Name
}
}
return hdr, &tr.blk, p.err
}
// readOldGNUSparseMap reads the sparse map from the old GNU sparse format.
// The sparse map is stored in the tar header if it's small enough.
// If it's larger than four entries, then one or more extension headers are used
// to store the rest of the sparse map.
//
// The Header.Size does not reflect the size of any extended headers used.
// Thus, this function will read from the raw io.Reader to fetch extra headers.
// This method mutates blk in the process.
func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) {
// Make sure that the input format is GNU.
// Unfortunately, the STAR format also has a sparse header format that uses
// the same type flag but has a completely different layout.
if blk.getFormat() != FormatGNU {
return nil, ErrHeader
}
hdr.Format.mayOnlyBe(FormatGNU)
var p parser
hdr.Size = p.parseNumeric(blk.toGNU().realSize())
if p.err != nil {
return nil, p.err
}
s := blk.toGNU().sparse()
spd := make(sparseDatas, 0, s.maxEntries())
for {
for i := 0; i < s.maxEntries(); i++ {
// This termination condition is identical to GNU and BSD tar.
if s.entry(i).offset()[0] == 0x00 {
break // Don't return, need to process extended headers (even if empty)
}
offset := p.parseNumeric(s.entry(i).offset())
length := p.parseNumeric(s.entry(i).length())
if p.err != nil {
return nil, p.err
}
spd = append(spd, sparseEntry{Offset: offset, Length: length})
}
if s.isExtended()[0] > 0 {
// There are more entries. Read an extension header and parse its entries.
if _, err := mustReadFull(tr.r, blk[:]); err != nil {
return nil, err
}
s = blk.toSparse()
continue
}
return spd, nil // Done
}
}
// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
// version 1.0. The format of the sparse map consists of a series of
// newline-terminated numeric fields. The first field is the number of entries
// and is always present. Following this are the entries, consisting of two
// fields (offset, length). This function must stop reading at the end
// boundary of the block containing the last newline.
//
// Note that the GNU manual says that numeric values should be encoded in octal
// format. However, the GNU tar utility itself outputs these values in decimal.
// As such, this library treats values as being encoded in decimal.
func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
var (
cntNewline int64
buf bytes.Buffer
blk block
totalSize int
)
// feedTokens copies data in blocks from r into buf until there are
// at least cnt newlines in buf. It will not read more blocks than needed.
feedTokens := func(n int64) error {
for cntNewline < n {
totalSize += len(blk)
if totalSize > maxSpecialFileSize {
return errSparseTooLong
}
if _, err := mustReadFull(r, blk[:]); err != nil {
return err
}
buf.Write(blk[:])
for _, c := range blk {
if c == '\n' {
cntNewline++
}
}
}
return nil
}
// nextToken gets the next token delimited by a newline. This assumes that
// at least one newline exists in the buffer.
nextToken := func() string {
cntNewline--
tok, _ := buf.ReadString('\n')
return strings.TrimRight(tok, "\n")
}
// Parse for the number of entries.
// Use integer overflow resistant math to check this.
if err := feedTokens(1); err != nil {
return nil, err
}
numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
return nil, ErrHeader
}
// Parse for all member entries.
// numEntries is trusted after this since feedTokens limits the number of
// tokens based on maxSpecialFileSize.
if err := feedTokens(2 * numEntries); err != nil {
return nil, err
}
spd := make(sparseDatas, 0, numEntries)
for i := int64(0); i < numEntries; i++ {
offset, err1 := strconv.ParseInt(nextToken(), 10, 64)
length, err2 := strconv.ParseInt(nextToken(), 10, 64)
if err1 != nil || err2 != nil {
return nil, ErrHeader
}
spd = append(spd, sparseEntry{Offset: offset, Length: length})
}
return spd, nil
}
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
// version 0.1. The sparse map is stored in the PAX headers.
func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) {
// Get number of entries.
// Use integer overflow resistant math to check this.
numEntriesStr := paxHdrs[paxGNUSparseNumBlocks]
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
return nil, ErrHeader
}
// There should be two numbers in sparseMap for each entry.
sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",")
if len(sparseMap) == 1 && sparseMap[0] == "" {
sparseMap = sparseMap[:0]
}
if int64(len(sparseMap)) != 2*numEntries {
return nil, ErrHeader
}
// Loop through the entries in the sparse map.
// numEntries is trusted now.
spd := make(sparseDatas, 0, numEntries)
for len(sparseMap) >= 2 {
offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64)
length, err2 := strconv.ParseInt(sparseMap[1], 10, 64)
if err1 != nil || err2 != nil {
return nil, ErrHeader
}
spd = append(spd, sparseEntry{Offset: offset, Length: length})
sparseMap = sparseMap[2:]
}
return spd, nil
}
// Read reads from the current file in the tar archive.
// It returns (0, io.EOF) when it reaches the end of that file,
// until [Next] is called to advance to the next file.
//
// If the current file is sparse, then the regions marked as a hole
// are read back as NUL-bytes.
//
// Calling Read on special types like [TypeLink], [TypeSymlink], [TypeChar],
// [TypeBlock], [TypeDir], and [TypeFifo] returns (0, [io.EOF]) regardless of what
// the [Header.Size] claims.
func (tr *Reader) Read(b []byte) (int, error) {
if tr.err != nil {
return 0, tr.err
}
n, err := tr.curr.Read(b)
if err != nil && err != io.EOF {
tr.err = err
}
return n, err
}
// writeTo writes the content of the current file to w.
// The bytes written matches the number of remaining bytes in the current file.
//
// If the current file is sparse and w is an io.WriteSeeker,
// then writeTo uses Seek to skip past holes defined in Header.SparseHoles,
// assuming that skipped regions are filled with NULs.
// This always writes the last byte to ensure w is the right size.
//
// TODO(dsnet): Re-export this when adding sparse file support.
// See https://golang.org/issue/22735
func (tr *Reader) writeTo(w io.Writer) (int64, error) {
if tr.err != nil {
return 0, tr.err
}
n, err := tr.curr.WriteTo(w)
if err != nil {
tr.err = err
}
return n, err
}
// regFileReader is a fileReader for reading data from a regular file entry.
type regFileReader struct {
r io.Reader // Underlying Reader
nb int64 // Number of remaining bytes to read
}
func (fr *regFileReader) Read(b []byte) (n int, err error) {
if int64(len(b)) > fr.nb {
b = b[:fr.nb]
}
if len(b) > 0 {
n, err = fr.r.Read(b)
fr.nb -= int64(n)
}
switch {
case err == io.EOF && fr.nb > 0:
return n, io.ErrUnexpectedEOF
case err == nil && fr.nb == 0:
return n, io.EOF
default:
return n, err
}
}
func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) {
return io.Copy(w, struct{ io.Reader }{fr})
}
// logicalRemaining implements fileState.logicalRemaining.
func (fr regFileReader) logicalRemaining() int64 {
return fr.nb
}
// physicalRemaining implements fileState.physicalRemaining.
func (fr regFileReader) physicalRemaining() int64 {
return fr.nb
}
// sparseFileReader is a fileReader for reading data from a sparse file entry.
type sparseFileReader struct {
fr fileReader // Underlying fileReader
sp sparseHoles // Normalized list of sparse holes
pos int64 // Current position in sparse file
}
func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
finished := int64(len(b)) >= sr.logicalRemaining()
if finished {
b = b[:sr.logicalRemaining()]
}
b0 := b
endPos := sr.pos + int64(len(b))
for endPos > sr.pos && err == nil {
var nf int // Bytes read in fragment
holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
if sr.pos < holeStart { // In a data fragment
bf := b[:min(int64(len(b)), holeStart-sr.pos)]
nf, err = tryReadFull(sr.fr, bf)
} else { // In a hole fragment
bf := b[:min(int64(len(b)), holeEnd-sr.pos)]
nf, err = tryReadFull(zeroReader{}, bf)
}
b = b[nf:]
sr.pos += int64(nf)
if sr.pos >= holeEnd && len(sr.sp) > 1 {
sr.sp = sr.sp[1:] // Ensure last fragment always remains
}
}
n = len(b0) - len(b)
switch {
case err == io.EOF:
return n, errMissData // Less data in dense file than sparse file
case err != nil:
return n, err
case sr.logicalRemaining() == 0 && sr.physicalRemaining() > 0:
return n, errUnrefData // More data in dense file than sparse file
case finished:
return n, io.EOF
default:
return n, nil
}
}
func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
ws, ok := w.(io.WriteSeeker)
if ok {
if _, err := ws.Seek(0, io.SeekCurrent); err != nil {
ok = false // Not all io.Seeker can really seek
}
}
if !ok {
return io.Copy(w, struct{ io.Reader }{sr})
}
var writeLastByte bool
pos0 := sr.pos
for sr.logicalRemaining() > 0 && !writeLastByte && err == nil {
var nf int64 // Size of fragment
holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
if sr.pos < holeStart { // In a data fragment
nf = holeStart - sr.pos
nf, err = io.CopyN(ws, sr.fr, nf)
} else { // In a hole fragment
nf = holeEnd - sr.pos
if sr.physicalRemaining() == 0 {
writeLastByte = true
nf--
}
_, err = ws.Seek(nf, io.SeekCurrent)
}
sr.pos += nf
if sr.pos >= holeEnd && len(sr.sp) > 1 {
sr.sp = sr.sp[1:] // Ensure last fragment always remains
}
}
// If the last fragment is a hole, then seek to 1-byte before EOF, and
// write a single byte to ensure the file is the right size.
if writeLastByte && err == nil {
_, err = ws.Write([]byte{0})
sr.pos++
}
n = sr.pos - pos0
switch {
case err == io.EOF:
return n, errMissData // Less data in dense file than sparse file
case err != nil:
return n, err
case sr.logicalRemaining() == 0 && sr.physicalRemaining() > 0:
return n, errUnrefData // More data in dense file than sparse file
default:
return n, nil
}
}
func (sr sparseFileReader) logicalRemaining() int64 {
return sr.sp[len(sr.sp)-1].endOffset() - sr.pos
}
func (sr sparseFileReader) physicalRemaining() int64 {
return sr.fr.physicalRemaining()
}
type zeroReader struct{}
func (zeroReader) Read(b []byte) (int, error) {
clear(b)
return len(b), nil
}
// mustReadFull is like io.ReadFull except it returns
// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read.
func mustReadFull(r io.Reader, b []byte) (int, error) {
n, err := tryReadFull(r, b)
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return n, err
}
// tryReadFull is like io.ReadFull except it returns
// io.EOF when it is hit before len(b) bytes are read.
func tryReadFull(r io.Reader, b []byte) (n int, err error) {
for len(b) > n && err == nil {
var nn int
nn, err = r.Read(b[n:])
n += nn
}
if len(b) == n && err == io.EOF {
err = nil
}
return n, err
}
// readSpecialFile is like io.ReadAll except it returns
// ErrFieldTooLong if more than maxSpecialFileSize is read.
func readSpecialFile(r io.Reader) ([]byte, error) {
buf, err := io.ReadAll(io.LimitReader(r, maxSpecialFileSize+1))
if len(buf) > maxSpecialFileSize {
return nil, ErrFieldTooLong
}
return buf, err
}
// discard skips n bytes in r, reporting an error if unable to do so.
func discard(r io.Reader, n int64) error {
// If possible, Seek to the last byte before the end of the data section.
// Do this because Seek is often lazy about reporting errors; this will mask
// the fact that the stream may be truncated. We can rely on the
// io.CopyN done shortly afterwards to trigger any IO errors.
var seekSkipped int64 // Number of bytes skipped via Seek
if sr, ok := r.(io.Seeker); ok && n > 1 {
// Not all io.Seeker can actually Seek. For example, os.Stdin implements
// io.Seeker, but calling Seek always returns an error and performs
// no action. Thus, we try an innocent seek to the current position
// to see if Seek is really supported.
pos1, err := sr.Seek(0, io.SeekCurrent)
if pos1 >= 0 && err == nil {
// Seek seems supported, so perform the real Seek.
pos2, err := sr.Seek(n-1, io.SeekCurrent)
if pos2 < 0 || err != nil {
return err
}
seekSkipped = pos2 - pos1
}
}
copySkipped, err := io.CopyN(io.Discard, r, n-seekSkipped)
if err == io.EOF && seekSkipped+copySkipped < n {
err = io.ErrUnexpectedEOF
}
return err
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || linux || dragonfly || openbsd || solaris
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atim.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctim.Unix())
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package tar
import (
"io/fs"
"os/user"
"runtime"
"strconv"
"sync"
"syscall"
)
func init() {
sysStat = statUnix
}
// userMap and groupMap cache UID and GID lookups for performance reasons.
// The downside is that renaming uname or gname by the OS never takes effect.
var userMap, groupMap sync.Map // map[int]string
func statUnix(fi fs.FileInfo, h *Header, doNameLookups bool) error {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
h.Uid = int(sys.Uid)
h.Gid = int(sys.Gid)
if doNameLookups {
// Best effort at populating Uname and Gname.
// The os/user functions may fail for any number of reasons
// (not implemented on that platform, cgo not enabled, etc).
if u, ok := userMap.Load(h.Uid); ok {
h.Uname = u.(string)
} else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
h.Uname = u.Username
userMap.Store(h.Uid, h.Uname)
}
if g, ok := groupMap.Load(h.Gid); ok {
h.Gname = g.(string)
} else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
h.Gname = g.Name
groupMap.Store(h.Gid, h.Gname)
}
}
h.AccessTime = statAtime(sys)
h.ChangeTime = statCtime(sys)
// Best effort at populating Devmajor and Devminor.
if h.Typeflag == TypeChar || h.Typeflag == TypeBlock {
dev := uint64(sys.Rdev) // May be int32 or uint32
switch runtime.GOOS {
case "aix":
var major, minor uint32
major = uint32((dev & 0x3fffffff00000000) >> 32)
minor = uint32((dev & 0x00000000ffffffff) >> 0)
h.Devmajor, h.Devminor = int64(major), int64(minor)
case "linux":
// Copied from golang.org/x/sys/unix/dev_linux.go.
major := uint32((dev & 0x00000000000fff00) >> 8)
major |= uint32((dev & 0xfffff00000000000) >> 32)
minor := uint32((dev & 0x00000000000000ff) >> 0)
minor |= uint32((dev & 0x00000ffffff00000) >> 12)
h.Devmajor, h.Devminor = int64(major), int64(minor)
case "darwin", "ios":
// Copied from golang.org/x/sys/unix/dev_darwin.go.
major := uint32((dev >> 24) & 0xff)
minor := uint32(dev & 0xffffff)
h.Devmajor, h.Devminor = int64(major), int64(minor)
case "dragonfly":
// Copied from golang.org/x/sys/unix/dev_dragonfly.go.
major := uint32((dev >> 8) & 0xff)
minor := uint32(dev & 0xffff00ff)
h.Devmajor, h.Devminor = int64(major), int64(minor)
case "freebsd":
// Copied from golang.org/x/sys/unix/dev_freebsd.go.
major := uint32((dev >> 8) & 0xff)
minor := uint32(dev & 0xffff00ff)
h.Devmajor, h.Devminor = int64(major), int64(minor)
case "netbsd":
// Copied from golang.org/x/sys/unix/dev_netbsd.go.
major := uint32((dev & 0x000fff00) >> 8)
minor := uint32((dev & 0x000000ff) >> 0)
minor |= uint32((dev & 0xfff00000) >> 12)
h.Devmajor, h.Devminor = int64(major), int64(minor)
case "openbsd":
// Copied from golang.org/x/sys/unix/dev_openbsd.go.
major := uint32((dev & 0x0000ff00) >> 8)
minor := uint32((dev & 0x000000ff) >> 0)
minor |= uint32((dev & 0xffff0000) >> 8)
h.Devmajor, h.Devminor = int64(major), int64(minor)
default:
// TODO: Implement solaris (see https://golang.org/issue/8106)
}
}
return nil
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"bytes"
"fmt"
"strconv"
"strings"
"time"
)
// hasNUL reports whether the NUL character exists within s.
func hasNUL(s string) bool {
return strings.Contains(s, "\x00")
}
// isASCII reports whether the input is an ASCII C-style string.
func isASCII(s string) bool {
for _, c := range s {
if c >= 0x80 || c == 0x00 {
return false
}
}
return true
}
// toASCII converts the input to an ASCII C-style string.
// This is a best effort conversion, so invalid characters are dropped.
func toASCII(s string) string {
if isASCII(s) {
return s
}
b := make([]byte, 0, len(s))
for _, c := range s {
if c < 0x80 && c != 0x00 {
b = append(b, byte(c))
}
}
return string(b)
}
type parser struct {
err error // Last error seen
}
type formatter struct {
err error // Last error seen
}
// parseString parses bytes as a NUL-terminated C-style string.
// If a NUL byte is not found then the whole slice is returned as a string.
func (*parser) parseString(b []byte) string {
if i := bytes.IndexByte(b, 0); i >= 0 {
return string(b[:i])
}
return string(b)
}
// formatString copies s into b, NUL-terminating if possible.
func (f *formatter) formatString(b []byte, s string) {
if len(s) > len(b) {
f.err = ErrFieldTooLong
}
copy(b, s)
if len(s) < len(b) {
b[len(s)] = 0
}
// Some buggy readers treat regular files with a trailing slash
// in the V7 path field as a directory even though the full path
// recorded elsewhere (e.g., via PAX record) contains no trailing slash.
if len(s) > len(b) && b[len(b)-1] == '/' {
n := len(strings.TrimRight(s[:len(b)-1], "/"))
b[n] = 0 // Replace trailing slash with NUL terminator
}
}
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
// encoding. Unlike octal encoding, base-256 encoding does not require that the
// string ends with a NUL character. Thus, all n bytes are available for output.
//
// If operating in binary mode, this assumes strict GNU binary mode; which means
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
// equivalent to the sign bit in two's complement form.
func fitsInBase256(n int, x int64) bool {
binBits := uint(n-1) * 8
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
}
// parseNumeric parses the input as being encoded in either base-256 or octal.
// This function may return negative numbers.
// If parsing fails or an integer overflow occurs, err will be set.
func (p *parser) parseNumeric(b []byte) int64 {
// Check for base-256 (binary) format first.
// If the first bit is set, then all following bits constitute a two's
// complement encoded number in big-endian byte order.
if len(b) > 0 && b[0]&0x80 != 0 {
// Handling negative numbers relies on the following identity:
// -a-1 == ^a
//
// If the number is negative, we use an inversion mask to invert the
// data bytes and treat the value as an unsigned number.
var inv byte // 0x00 if positive or zero, 0xff if negative
if b[0]&0x40 != 0 {
inv = 0xff
}
var x uint64
for i, c := range b {
c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
if i == 0 {
c &= 0x7f // Ignore signal bit in first byte
}
if (x >> 56) > 0 {
p.err = ErrHeader // Integer overflow
return 0
}
x = x<<8 | uint64(c)
}
if (x >> 63) > 0 {
p.err = ErrHeader // Integer overflow
return 0
}
if inv == 0xff {
return ^int64(x)
}
return int64(x)
}
// Normal case is base-8 (octal) format.
return p.parseOctal(b)
}
// formatNumeric encodes x into b using base-8 (octal) encoding if possible.
// Otherwise it will attempt to use base-256 (binary) encoding.
func (f *formatter) formatNumeric(b []byte, x int64) {
if fitsInOctal(len(b), x) {
f.formatOctal(b, x)
return
}
if fitsInBase256(len(b), x) {
for i := len(b) - 1; i >= 0; i-- {
b[i] = byte(x)
x >>= 8
}
b[0] |= 0x80 // Highest bit indicates binary format
return
}
f.formatOctal(b, 0) // Last resort, just write zero
f.err = ErrFieldTooLong
}
func (p *parser) parseOctal(b []byte) int64 {
// Because unused fields are filled with NULs, we need
// to skip leading NULs. Fields may also be padded with
// spaces or NULs.
// So we remove leading and trailing NULs and spaces to
// be sure.
b = bytes.Trim(b, " \x00")
if len(b) == 0 {
return 0
}
x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
if perr != nil {
p.err = ErrHeader
}
return int64(x)
}
func (f *formatter) formatOctal(b []byte, x int64) {
if !fitsInOctal(len(b), x) {
x = 0 // Last resort, just write zero
f.err = ErrFieldTooLong
}
s := strconv.FormatInt(x, 8)
// Add leading zeros, but leave room for a NUL.
if n := len(b) - len(s) - 1; n > 0 {
s = strings.Repeat("0", n) + s
}
f.formatString(b, s)
}
// fitsInOctal reports whether the integer x fits in a field n-bytes long
// using octal encoding with the appropriate NUL terminator.
func fitsInOctal(n int, x int64) bool {
octBits := uint(n-1) * 3
return x >= 0 && (n >= 22 || x < 1<<octBits)
}
// parsePAXTime takes a string of the form %d.%d as described in the PAX
// specification. Note that this implementation allows for negative timestamps,
// which is allowed for by the PAX specification, but not always portable.
func parsePAXTime(s string) (time.Time, error) {
const maxNanoSecondDigits = 9
// Split string into seconds and sub-seconds parts.
ss, sn, _ := strings.Cut(s, ".")
// Parse the seconds.
secs, err := strconv.ParseInt(ss, 10, 64)
if err != nil {
return time.Time{}, ErrHeader
}
if len(sn) == 0 {
return time.Unix(secs, 0), nil // No sub-second values
}
// Parse the nanoseconds.
// Initialize an array with '0's to handle right padding automatically.
nanoDigits := [maxNanoSecondDigits]byte{'0', '0', '0', '0', '0', '0', '0', '0', '0'}
for i := range len(sn) {
switch c := sn[i]; {
case c < '0' || c > '9':
return time.Time{}, ErrHeader
case i < len(nanoDigits):
nanoDigits[i] = c
}
}
nsecs, _ := strconv.ParseInt(string(nanoDigits[:]), 10, 64) // Must succeed after validation
if len(ss) > 0 && ss[0] == '-' {
return time.Unix(secs, -1*nsecs), nil // Negative correction
}
return time.Unix(secs, nsecs), nil
}
// formatPAXTime converts ts into a time of the form %d.%d as described in the
// PAX specification. This function is capable of negative timestamps.
func formatPAXTime(ts time.Time) (s string) {
secs, nsecs := ts.Unix(), ts.Nanosecond()
if nsecs == 0 {
return strconv.FormatInt(secs, 10)
}
// If seconds is negative, then perform correction.
sign := ""
if secs < 0 {
sign = "-" // Remember sign
secs = -(secs + 1) // Add a second to secs
nsecs = -(nsecs - 1e9) // Take that second away from nsecs
}
return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
}
// parsePAXRecord parses the input PAX record string into a key-value pair.
// If parsing is successful, it will slice off the currently read record and
// return the remainder as r.
func parsePAXRecord(s string) (k, v, r string, err error) {
// The size field ends at the first space.
nStr, rest, ok := strings.Cut(s, " ")
if !ok {
return "", "", s, ErrHeader
}
// Parse the first token as a decimal integer.
n, perr := strconv.ParseInt(nStr, 10, 0) // Intentionally parse as native int
if perr != nil || n < 5 || n > int64(len(s)) {
return "", "", s, ErrHeader
}
n -= int64(len(nStr) + 1) // convert from index in s to index in rest
if n <= 0 {
return "", "", s, ErrHeader
}
// Extract everything between the space and the final newline.
rec, nl, rem := rest[:n-1], rest[n-1:n], rest[n:]
if nl != "\n" {
return "", "", s, ErrHeader
}
// The first equals separates the key from the value.
k, v, ok = strings.Cut(rec, "=")
if !ok {
return "", "", s, ErrHeader
}
if !validPAXRecord(k, v) {
return "", "", s, ErrHeader
}
return k, v, rem, nil
}
// formatPAXRecord formats a single PAX record, prefixing it with the
// appropriate length.
func formatPAXRecord(k, v string) (string, error) {
if !validPAXRecord(k, v) {
return "", ErrHeader
}
const padding = 3 // Extra padding for ' ', '=', and '\n'
size := len(k) + len(v) + padding
size += len(strconv.Itoa(size))
record := strconv.Itoa(size) + " " + k + "=" + v + "\n"
// Final adjustment if adding size field increased the record size.
if len(record) != size {
size = len(record)
record = strconv.Itoa(size) + " " + k + "=" + v + "\n"
}
return record, nil
}
// validPAXRecord reports whether the key-value pair is valid where each
// record is formatted as:
//
// "%d %s=%s\n" % (size, key, value)
//
// Keys and values should be UTF-8, but the number of bad writers out there
// forces us to be more liberal.
// Thus, we only reject all keys with NUL, and only reject NULs in values
// for the PAX version of the USTAR string fields.
// The key must not contain an '=' character.
func validPAXRecord(k, v string) bool {
if k == "" || strings.Contains(k, "=") {
return false
}
switch k {
case paxPath, paxLinkpath, paxUname, paxGname:
return !hasNUL(v)
default:
return !hasNUL(k)
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"errors"
"fmt"
"io"
"io/fs"
"maps"
"path"
"slices"
"strings"
"time"
)
// Writer provides sequential writing of a tar archive.
// [Writer.WriteHeader] begins a new file with the provided [Header],
// and then Writer can be treated as an io.Writer to supply that file's data.
type Writer struct {
w io.Writer
pad int64 // Amount of padding to write after current file entry
curr fileWriter // Writer for current file entry
hdr Header // Shallow copy of Header that is safe for mutations
blk block // Buffer to use as temporary local storage
// err is a persistent error.
// It is only the responsibility of every exported method of Writer to
// ensure that this error is sticky.
err error
}
// NewWriter creates a new Writer writing to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{w: w, curr: ®FileWriter{w, 0}}
}
type fileWriter interface {
io.Writer
fileState
ReadFrom(io.Reader) (int64, error)
}
// Flush finishes writing the current file's block padding.
// The current file must be fully written before Flush can be called.
//
// This is unnecessary as the next call to [Writer.WriteHeader] or [Writer.Close]
// will implicitly flush out the file's padding.
func (tw *Writer) Flush() error {
if tw.err != nil {
return tw.err
}
if nb := tw.curr.logicalRemaining(); nb > 0 {
return fmt.Errorf("archive/tar: missed writing %d bytes", nb)
}
if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil {
return tw.err
}
tw.pad = 0
return nil
}
// WriteHeader writes hdr and prepares to accept the file's contents.
// The Header.Size determines how many bytes can be written for the next file.
// If the current file is not fully written, then this returns an error.
// This implicitly flushes any padding necessary before writing the header.
func (tw *Writer) WriteHeader(hdr *Header) error {
if err := tw.Flush(); err != nil {
return err
}
tw.hdr = *hdr // Shallow copy of Header
// Avoid usage of the legacy TypeRegA flag, and automatically promote
// it to use TypeReg or TypeDir.
if tw.hdr.Typeflag == TypeRegA {
if strings.HasSuffix(tw.hdr.Name, "/") {
tw.hdr.Typeflag = TypeDir
} else {
tw.hdr.Typeflag = TypeReg
}
}
// Round ModTime and ignore AccessTime and ChangeTime unless
// the format is explicitly chosen.
// This ensures nominal usage of WriteHeader (without specifying the format)
// does not always result in the PAX format being chosen, which
// causes a 1KiB increase to every header.
if tw.hdr.Format == FormatUnknown {
tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second)
tw.hdr.AccessTime = time.Time{}
tw.hdr.ChangeTime = time.Time{}
}
allowedFormats, paxHdrs, err := tw.hdr.allowedFormats()
switch {
case allowedFormats.has(FormatUSTAR):
tw.err = tw.writeUSTARHeader(&tw.hdr)
return tw.err
case allowedFormats.has(FormatPAX):
tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs)
return tw.err
case allowedFormats.has(FormatGNU):
tw.err = tw.writeGNUHeader(&tw.hdr)
return tw.err
default:
return err // Non-fatal error
}
}
func (tw *Writer) writeUSTARHeader(hdr *Header) error {
// Check if we can use USTAR prefix/suffix splitting.
var namePrefix string
if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok {
namePrefix, hdr.Name = prefix, suffix
}
// Pack the main header.
var f formatter
blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal)
f.formatString(blk.toUSTAR().prefix(), namePrefix)
blk.setFormat(FormatUSTAR)
if f.err != nil {
return f.err // Should never happen since header is validated
}
return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag)
}
func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
realName, realSize := hdr.Name, hdr.Size
// TODO(dsnet): Re-enable this when adding sparse support.
// See https://golang.org/issue/22735
/*
// Handle sparse files.
var spd sparseDatas
var spb []byte
if len(hdr.SparseHoles) > 0 {
sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
sph = alignSparseEntries(sph, hdr.Size)
spd = invertSparseEntries(sph, hdr.Size)
// Format the sparse map.
hdr.Size = 0 // Replace with encoded size
spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n')
for _, s := range spd {
hdr.Size += s.Length
spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n')
spb = append(strconv.AppendInt(spb, s.Length, 10), '\n')
}
pad := blockPadding(int64(len(spb)))
spb = append(spb, zeroBlock[:pad]...)
hdr.Size += int64(len(spb)) // Accounts for encoded sparse map
// Add and modify appropriate PAX records.
dir, file := path.Split(realName)
hdr.Name = path.Join(dir, "GNUSparseFile.0", file)
paxHdrs[paxGNUSparseMajor] = "1"
paxHdrs[paxGNUSparseMinor] = "0"
paxHdrs[paxGNUSparseName] = realName
paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10)
paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10)
delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName
}
*/
_ = realSize
// Write PAX records to the output.
isGlobal := hdr.Typeflag == TypeXGlobalHeader
if len(paxHdrs) > 0 || isGlobal {
// Write each record to a buffer.
var buf strings.Builder
// Sort keys for deterministic ordering.
for _, k := range slices.Sorted(maps.Keys(paxHdrs)) {
rec, err := formatPAXRecord(k, paxHdrs[k])
if err != nil {
return err
}
buf.WriteString(rec)
}
// Write the extended header file.
var name string
var flag byte
if isGlobal {
name = realName
if name == "" {
name = "GlobalHead.0.0"
}
flag = TypeXGlobalHeader
} else {
dir, file := path.Split(realName)
name = path.Join(dir, "PaxHeaders.0", file)
flag = TypeXHeader
}
data := buf.String()
if len(data) > maxSpecialFileSize {
return ErrFieldTooLong
}
if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal {
return err // Global headers return here
}
}
// Pack the main header.
var f formatter // Ignore errors since they are expected
fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) }
blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal)
blk.setFormat(FormatPAX)
if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
return err
}
// TODO(dsnet): Re-enable this when adding sparse support.
// See https://golang.org/issue/22735
/*
// Write the sparse map and setup the sparse writer if necessary.
if len(spd) > 0 {
// Use tw.curr since the sparse map is accounted for in hdr.Size.
if _, err := tw.curr.Write(spb); err != nil {
return err
}
tw.curr = &sparseFileWriter{tw.curr, spd, 0}
}
*/
return nil
}
func (tw *Writer) writeGNUHeader(hdr *Header) error {
// Use long-link files if Name or Linkname exceeds the field size.
const longName = "././@LongLink"
if len(hdr.Name) > nameSize {
data := hdr.Name + "\x00"
if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil {
return err
}
}
if len(hdr.Linkname) > nameSize {
data := hdr.Linkname + "\x00"
if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil {
return err
}
}
// Pack the main header.
var f formatter // Ignore errors since they are expected
var spd sparseDatas
var spb []byte
blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric)
if !hdr.AccessTime.IsZero() {
f.formatNumeric(blk.toGNU().accessTime(), hdr.AccessTime.Unix())
}
if !hdr.ChangeTime.IsZero() {
f.formatNumeric(blk.toGNU().changeTime(), hdr.ChangeTime.Unix())
}
// TODO(dsnet): Re-enable this when adding sparse support.
// See https://golang.org/issue/22735
/*
if hdr.Typeflag == TypeGNUSparse {
sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
sph = alignSparseEntries(sph, hdr.Size)
spd = invertSparseEntries(sph, hdr.Size)
// Format the sparse map.
formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas {
for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ {
f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset)
f.formatNumeric(sa.Entry(i).Length(), sp[0].Length)
sp = sp[1:]
}
if len(sp) > 0 {
sa.IsExtended()[0] = 1
}
return sp
}
sp2 := formatSPD(spd, blk.GNU().Sparse())
for len(sp2) > 0 {
var spHdr block
sp2 = formatSPD(sp2, spHdr.Sparse())
spb = append(spb, spHdr[:]...)
}
// Update size fields in the header block.
realSize := hdr.Size
hdr.Size = 0 // Encoded size; does not account for encoded sparse map
for _, s := range spd {
hdr.Size += s.Length
}
copy(blk.V7().Size(), zeroBlock[:]) // Reset field
f.formatNumeric(blk.V7().Size(), hdr.Size)
f.formatNumeric(blk.GNU().RealSize(), realSize)
}
*/
blk.setFormat(FormatGNU)
if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
return err
}
// Write the extended sparse map and setup the sparse writer if necessary.
if len(spd) > 0 {
// Use tw.w since the sparse map is not accounted for in hdr.Size.
if _, err := tw.w.Write(spb); err != nil {
return err
}
tw.curr = &sparseFileWriter{tw.curr, spd, 0}
}
return nil
}
type (
stringFormatter func([]byte, string)
numberFormatter func([]byte, int64)
)
// templateV7Plus fills out the V7 fields of a block using values from hdr.
// It also fills out fields (uname, gname, devmajor, devminor) that are
// shared in the USTAR, PAX, and GNU formats using the provided formatters.
//
// The block returned is only valid until the next call to
// templateV7Plus or writeRawFile.
func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block {
tw.blk.reset()
modTime := hdr.ModTime
if modTime.IsZero() {
modTime = time.Unix(0, 0)
}
v7 := tw.blk.toV7()
v7.typeFlag()[0] = hdr.Typeflag
fmtStr(v7.name(), hdr.Name)
fmtStr(v7.linkName(), hdr.Linkname)
fmtNum(v7.mode(), hdr.Mode)
fmtNum(v7.uid(), int64(hdr.Uid))
fmtNum(v7.gid(), int64(hdr.Gid))
fmtNum(v7.size(), hdr.Size)
fmtNum(v7.modTime(), modTime.Unix())
ustar := tw.blk.toUSTAR()
fmtStr(ustar.userName(), hdr.Uname)
fmtStr(ustar.groupName(), hdr.Gname)
fmtNum(ustar.devMajor(), hdr.Devmajor)
fmtNum(ustar.devMinor(), hdr.Devminor)
return &tw.blk
}
// writeRawFile writes a minimal file with the given name and flag type.
// It uses format to encode the header format and will write data as the body.
// It uses default values for all of the other fields (as BSD and GNU tar does).
func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error {
tw.blk.reset()
// Best effort for the filename.
name = toASCII(name)
if len(name) > nameSize {
name = name[:nameSize]
}
name = strings.TrimRight(name, "/")
var f formatter
v7 := tw.blk.toV7()
v7.typeFlag()[0] = flag
f.formatString(v7.name(), name)
f.formatOctal(v7.mode(), 0)
f.formatOctal(v7.uid(), 0)
f.formatOctal(v7.gid(), 0)
f.formatOctal(v7.size(), int64(len(data))) // Must be < 8GiB
f.formatOctal(v7.modTime(), 0)
tw.blk.setFormat(format)
if f.err != nil {
return f.err // Only occurs if size condition is violated
}
// Write the header and data.
if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil {
return err
}
_, err := io.WriteString(tw, data)
return err
}
// writeRawHeader writes the value of blk, regardless of its value.
// It sets up the Writer such that it can accept a file of the given size.
// If the flag is a special header-only flag, then the size is treated as zero.
func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error {
if err := tw.Flush(); err != nil {
return err
}
if _, err := tw.w.Write(blk[:]); err != nil {
return err
}
if isHeaderOnlyType(flag) {
size = 0
}
tw.curr = ®FileWriter{tw.w, size}
tw.pad = blockPadding(size)
return nil
}
// AddFS adds the files from fs.FS to the archive.
// It walks the directory tree starting at the root of the filesystem
// adding each file to the tar archive while maintaining the directory structure.
func (tw *Writer) AddFS(fsys fs.FS) error {
return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if name == "." {
return nil
}
info, err := d.Info()
if err != nil {
return err
}
linkTarget := ""
if typ := d.Type(); typ == fs.ModeSymlink {
var err error
linkTarget, err = fs.ReadLink(fsys, name)
if err != nil {
return err
}
} else if !typ.IsRegular() && typ != fs.ModeDir {
return errors.New("tar: cannot add non-regular file")
}
h, err := FileInfoHeader(info, linkTarget)
if err != nil {
return err
}
h.Name = name
if d.IsDir() {
h.Name += "/"
}
if err := tw.WriteHeader(h); err != nil {
return err
}
if !d.Type().IsRegular() {
return nil
}
f, err := fsys.Open(name)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(tw, f)
return err
})
}
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
// If the path is not splittable, then it will return ("", "", false).
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
length := len(name)
if length <= nameSize || !isASCII(name) {
return "", "", false
} else if length > prefixSize+1 {
length = prefixSize + 1
} else if name[length-1] == '/' {
length--
}
i := strings.LastIndex(name[:length], "/")
nlen := len(name) - i - 1 // nlen is length of suffix
plen := i // plen is length of prefix
if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize {
return "", "", false
}
return name[:i], name[i+1:], true
}
// Write writes to the current file in the tar archive.
// Write returns the error [ErrWriteTooLong] if more than
// Header.Size bytes are written after [Writer.WriteHeader].
//
// Calling Write on special types like [TypeLink], [TypeSymlink], [TypeChar],
// [TypeBlock], [TypeDir], and [TypeFifo] returns (0, [ErrWriteTooLong]) regardless
// of what the [Header.Size] claims.
func (tw *Writer) Write(b []byte) (int, error) {
if tw.err != nil {
return 0, tw.err
}
n, err := tw.curr.Write(b)
if err != nil && err != ErrWriteTooLong {
tw.err = err
}
return n, err
}
// readFrom populates the content of the current file by reading from r.
// The bytes read must match the number of remaining bytes in the current file.
//
// If the current file is sparse and r is an io.ReadSeeker,
// then readFrom uses Seek to skip past holes defined in Header.SparseHoles,
// assuming that skipped regions are all NULs.
// This always reads the last byte to ensure r is the right size.
//
// TODO(dsnet): Re-export this when adding sparse file support.
// See https://golang.org/issue/22735
func (tw *Writer) readFrom(r io.Reader) (int64, error) {
if tw.err != nil {
return 0, tw.err
}
n, err := tw.curr.ReadFrom(r)
if err != nil && err != ErrWriteTooLong {
tw.err = err
}
return n, err
}
// Close closes the tar archive by flushing the padding, and writing the footer.
// If the current file (from a prior call to [Writer.WriteHeader]) is not fully written,
// then this returns an error.
func (tw *Writer) Close() error {
if tw.err == ErrWriteAfterClose {
return nil
}
if tw.err != nil {
return tw.err
}
// Trailer: two zero blocks.
err := tw.Flush()
for i := 0; i < 2 && err == nil; i++ {
_, err = tw.w.Write(zeroBlock[:])
}
// Ensure all future actions are invalid.
tw.err = ErrWriteAfterClose
return err // Report IO errors
}
// regFileWriter is a fileWriter for writing data to a regular file entry.
type regFileWriter struct {
w io.Writer // Underlying Writer
nb int64 // Number of remaining bytes to write
}
func (fw *regFileWriter) Write(b []byte) (n int, err error) {
overwrite := int64(len(b)) > fw.nb
if overwrite {
b = b[:fw.nb]
}
if len(b) > 0 {
n, err = fw.w.Write(b)
fw.nb -= int64(n)
}
switch {
case err != nil:
return n, err
case overwrite:
return n, ErrWriteTooLong
default:
return n, nil
}
}
func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) {
return io.Copy(struct{ io.Writer }{fw}, r)
}
// logicalRemaining implements fileState.logicalRemaining.
func (fw regFileWriter) logicalRemaining() int64 {
return fw.nb
}
// physicalRemaining implements fileState.physicalRemaining.
func (fw regFileWriter) physicalRemaining() int64 {
return fw.nb
}
// sparseFileWriter is a fileWriter for writing data to a sparse file entry.
type sparseFileWriter struct {
fw fileWriter // Underlying fileWriter
sp sparseDatas // Normalized list of data fragments
pos int64 // Current position in sparse file
}
func (sw *sparseFileWriter) Write(b []byte) (n int, err error) {
overwrite := int64(len(b)) > sw.logicalRemaining()
if overwrite {
b = b[:sw.logicalRemaining()]
}
b0 := b
endPos := sw.pos + int64(len(b))
for endPos > sw.pos && err == nil {
var nf int // Bytes written in fragment
dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
if sw.pos < dataStart { // In a hole fragment
bf := b[:min(int64(len(b)), dataStart-sw.pos)]
nf, err = zeroWriter{}.Write(bf)
} else { // In a data fragment
bf := b[:min(int64(len(b)), dataEnd-sw.pos)]
nf, err = sw.fw.Write(bf)
}
b = b[nf:]
sw.pos += int64(nf)
if sw.pos >= dataEnd && len(sw.sp) > 1 {
sw.sp = sw.sp[1:] // Ensure last fragment always remains
}
}
n = len(b0) - len(b)
switch {
case err == ErrWriteTooLong:
return n, errMissData // Not possible; implies bug in validation logic
case err != nil:
return n, err
case sw.logicalRemaining() == 0 && sw.physicalRemaining() > 0:
return n, errUnrefData // Not possible; implies bug in validation logic
case overwrite:
return n, ErrWriteTooLong
default:
return n, nil
}
}
func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
rs, ok := r.(io.ReadSeeker)
if ok {
if _, err := rs.Seek(0, io.SeekCurrent); err != nil {
ok = false // Not all io.Seeker can really seek
}
}
if !ok {
return io.Copy(struct{ io.Writer }{sw}, r)
}
var readLastByte bool
pos0 := sw.pos
for sw.logicalRemaining() > 0 && !readLastByte && err == nil {
var nf int64 // Size of fragment
dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
if sw.pos < dataStart { // In a hole fragment
nf = dataStart - sw.pos
if sw.physicalRemaining() == 0 {
readLastByte = true
nf--
}
_, err = rs.Seek(nf, io.SeekCurrent)
} else { // In a data fragment
nf = dataEnd - sw.pos
nf, err = io.CopyN(sw.fw, rs, nf)
}
sw.pos += nf
if sw.pos >= dataEnd && len(sw.sp) > 1 {
sw.sp = sw.sp[1:] // Ensure last fragment always remains
}
}
// If the last fragment is a hole, then seek to 1-byte before EOF, and
// read a single byte to ensure the file is the right size.
if readLastByte && err == nil {
_, err = mustReadFull(rs, []byte{0})
sw.pos++
}
n = sw.pos - pos0
switch {
case err == io.EOF:
return n, io.ErrUnexpectedEOF
case err == ErrWriteTooLong:
return n, errMissData // Not possible; implies bug in validation logic
case err != nil:
return n, err
case sw.logicalRemaining() == 0 && sw.physicalRemaining() > 0:
return n, errUnrefData // Not possible; implies bug in validation logic
default:
return n, ensureEOF(rs)
}
}
func (sw sparseFileWriter) logicalRemaining() int64 {
return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
}
func (sw sparseFileWriter) physicalRemaining() int64 {
return sw.fw.physicalRemaining()
}
// zeroWriter may only be written with NULs, otherwise it returns errWriteHole.
type zeroWriter struct{}
func (zeroWriter) Write(b []byte) (int, error) {
for i, c := range b {
if c != 0 {
return i, errWriteHole
}
}
return len(b), nil
}
// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so.
func ensureEOF(r io.Reader) error {
n, err := tryReadFull(r, []byte{0})
switch {
case n > 0:
return ErrWriteTooLong
case err == io.EOF:
return nil
default:
return err
}
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package zip
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"hash"
"hash/crc32"
"internal/godebug"
"io"
"io/fs"
"os"
"path"
"path/filepath"
"slices"
"strings"
"sync"
"time"
)
var zipinsecurepath = godebug.New("zipinsecurepath")
var (
ErrFormat = errors.New("zip: not a valid zip file")
ErrAlgorithm = errors.New("zip: unsupported compression algorithm")
ErrChecksum = errors.New("zip: checksum error")
ErrInsecurePath = errors.New("zip: insecure file path")
)
// A Reader serves content from a ZIP archive.
type Reader struct {
r io.ReaderAt
File []*File
Comment string
decompressors map[uint16]Decompressor
// Some JAR files are zip files with a prefix that is a bash script.
// The baseOffset field is the start of the zip file proper.
baseOffset int64
// fileList is a list of files sorted by ename,
// for use by the Open method.
fileListOnce sync.Once
fileList []fileListEntry
}
// A ReadCloser is a [Reader] that must be closed when no longer needed.
type ReadCloser struct {
f *os.File
Reader
}
// A File is a single file in a ZIP archive.
// The file information is in the embedded [FileHeader].
// The file content can be accessed by calling [File.Open].
type File struct {
FileHeader
zip *Reader
zipr io.ReaderAt
headerOffset int64 // includes overall ZIP archive baseOffset
zip64 bool // zip64 extended information extra field presence
}
// OpenReader will open the Zip file specified by name and return a ReadCloser.
//
// If any file inside the archive uses a non-local name
// (as defined by [filepath.IsLocal]) or a name containing backslashes
// and the GODEBUG environment variable contains `zipinsecurepath=0`,
// OpenReader returns the reader with an ErrInsecurePath error.
// A future version of Go may introduce this behavior by default.
// Programs that want to accept non-local names can ignore
// the ErrInsecurePath error and use the returned reader.
func OpenReader(name string) (*ReadCloser, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
fi, err := f.Stat()
if err != nil {
f.Close()
return nil, err
}
r := new(ReadCloser)
if err = r.init(f, fi.Size()); err != nil && err != ErrInsecurePath {
f.Close()
return nil, err
}
r.f = f
return r, err
}
// NewReader returns a new [Reader] reading from r, which is assumed to
// have the given size in bytes.
//
// If any file inside the archive uses a non-local name
// (as defined by [filepath.IsLocal]) or a name containing backslashes
// and the GODEBUG environment variable contains `zipinsecurepath=0`,
// NewReader returns the reader with an [ErrInsecurePath] error.
// A future version of Go may introduce this behavior by default.
// Programs that want to accept non-local names can ignore
// the [ErrInsecurePath] error and use the returned reader.
func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
if size < 0 {
return nil, errors.New("zip: size cannot be negative")
}
zr := new(Reader)
var err error
if err = zr.init(r, size); err != nil && err != ErrInsecurePath {
return nil, err
}
return zr, err
}
func (r *Reader) init(rdr io.ReaderAt, size int64) error {
end, baseOffset, err := readDirectoryEnd(rdr, size)
if err != nil {
return err
}
r.r = rdr
r.baseOffset = baseOffset
// Since the number of directory records is not validated, it is not
// safe to preallocate r.File without first checking that the specified
// number of files is reasonable, since a malformed archive may
// indicate it contains up to 1 << 128 - 1 files. Since each file has a
// header which will be _at least_ 30 bytes we can safely preallocate
// if (data size / 30) >= end.directoryRecords.
if end.directorySize < uint64(size) && (uint64(size)-end.directorySize)/30 >= end.directoryRecords {
r.File = make([]*File, 0, end.directoryRecords)
}
r.Comment = end.comment
rs := io.NewSectionReader(rdr, 0, size)
if _, err = rs.Seek(r.baseOffset+int64(end.directoryOffset), io.SeekStart); err != nil {
return err
}
buf := bufio.NewReader(rs)
// The count of files inside a zip is truncated to fit in a uint16.
// Gloss over this by reading headers until we encounter
// a bad one, and then only report an ErrFormat or UnexpectedEOF if
// the file count modulo 65536 is incorrect.
for {
f := &File{zip: r, zipr: rdr}
err = readDirectoryHeader(f, buf)
if err == ErrFormat || err == io.ErrUnexpectedEOF {
break
}
if err != nil {
return err
}
f.headerOffset += r.baseOffset
r.File = append(r.File, f)
}
if uint16(len(r.File)) != uint16(end.directoryRecords) { // only compare 16 bits here
// Return the readDirectoryHeader error if we read
// the wrong number of directory entries.
return err
}
if zipinsecurepath.Value() == "0" {
for _, f := range r.File {
if f.Name == "" {
// Zip permits an empty file name field.
continue
}
// The zip specification states that names must use forward slashes,
// so consider any backslashes in the name insecure.
if !filepath.IsLocal(f.Name) || strings.Contains(f.Name, `\`) {
zipinsecurepath.IncNonDefault()
return ErrInsecurePath
}
}
}
return nil
}
// RegisterDecompressor registers or overrides a custom decompressor for a
// specific method ID. If a decompressor for a given method is not found,
// [Reader] will default to looking up the decompressor at the package level.
func (r *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) {
if r.decompressors == nil {
r.decompressors = make(map[uint16]Decompressor)
}
r.decompressors[method] = dcomp
}
func (r *Reader) decompressor(method uint16) Decompressor {
dcomp := r.decompressors[method]
if dcomp == nil {
dcomp = decompressor(method)
}
return dcomp
}
// Close closes the Zip file, rendering it unusable for I/O.
func (rc *ReadCloser) Close() error {
return rc.f.Close()
}
// DataOffset returns the offset of the file's possibly-compressed
// data, relative to the beginning of the zip file.
//
// Most callers should instead use [File.Open], which transparently
// decompresses data and verifies checksums.
func (f *File) DataOffset() (offset int64, err error) {
bodyOffset, err := f.findBodyOffset()
if err != nil {
return
}
return f.headerOffset + bodyOffset, nil
}
// Open returns a [ReadCloser] that provides access to the [File]'s contents.
// Multiple files may be read concurrently.
func (f *File) Open() (io.ReadCloser, error) {
bodyOffset, err := f.findBodyOffset()
if err != nil {
return nil, err
}
if strings.HasSuffix(f.Name, "/") {
// The ZIP specification (APPNOTE.TXT) specifies that directories, which
// are technically zero-byte files, must not have any associated file
// data. We previously tried failing here if f.CompressedSize64 != 0,
// but it turns out that a number of implementations (namely, the Java
// jar tool) don't properly set the storage method on directories
// resulting in a file with compressed size > 0 but uncompressed size ==
// 0. We still want to fail when a directory has associated uncompressed
// data, but we are tolerant of cases where the uncompressed size is
// zero but compressed size is not.
if f.UncompressedSize64 != 0 {
return &dirReader{ErrFormat}, nil
} else {
return &dirReader{io.EOF}, nil
}
}
size := int64(f.CompressedSize64)
r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size)
dcomp := f.zip.decompressor(f.Method)
if dcomp == nil {
return nil, ErrAlgorithm
}
var rc io.ReadCloser = dcomp(r)
var desr io.Reader
if f.hasDataDescriptor() {
desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen)
}
rc = &checksumReader{
rc: rc,
hash: crc32.NewIEEE(),
f: f,
desr: desr,
}
return rc, nil
}
// OpenRaw returns a [Reader] that provides access to the [File]'s contents without
// decompression.
func (f *File) OpenRaw() (io.Reader, error) {
bodyOffset, err := f.findBodyOffset()
if err != nil {
return nil, err
}
r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, int64(f.CompressedSize64))
return r, nil
}
type dirReader struct {
err error
}
func (r *dirReader) Read([]byte) (int, error) {
return 0, r.err
}
func (r *dirReader) Close() error {
return nil
}
type checksumReader struct {
rc io.ReadCloser
hash hash.Hash32
nread uint64 // number of bytes read so far
f *File
desr io.Reader // if non-nil, where to read the data descriptor
err error // sticky error
}
func (r *checksumReader) Stat() (fs.FileInfo, error) {
return headerFileInfo{&r.f.FileHeader}, nil
}
func (r *checksumReader) Read(b []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
n, err = r.rc.Read(b)
r.hash.Write(b[:n])
r.nread += uint64(n)
if r.nread > r.f.UncompressedSize64 {
return 0, ErrFormat
}
if err == nil {
return
}
if err == io.EOF {
if r.nread != r.f.UncompressedSize64 {
return 0, io.ErrUnexpectedEOF
}
if r.desr != nil {
if err1 := readDataDescriptor(r.desr, r.f); err1 != nil {
if err1 == io.EOF {
err = io.ErrUnexpectedEOF
} else {
err = err1
}
} else if r.hash.Sum32() != r.f.CRC32 {
err = ErrChecksum
}
} else {
// If there's not a data descriptor, we still compare
// the CRC32 of what we've read against the file header
// or TOC's CRC32, if it seems like it was set.
if r.f.CRC32 != 0 && r.hash.Sum32() != r.f.CRC32 {
err = ErrChecksum
}
}
}
r.err = err
return
}
func (r *checksumReader) Close() error { return r.rc.Close() }
// findBodyOffset does the minimum work to verify the file has a header
// and returns the file body offset.
func (f *File) findBodyOffset() (int64, error) {
var buf [fileHeaderLen]byte
if _, err := f.zipr.ReadAt(buf[:], f.headerOffset); err != nil {
return 0, err
}
b := readBuf(buf[:])
if sig := b.uint32(); sig != fileHeaderSignature {
return 0, ErrFormat
}
b = b[22:] // skip over most of the header
filenameLen := int(b.uint16())
extraLen := int(b.uint16())
return int64(fileHeaderLen + filenameLen + extraLen), nil
}
// readDirectoryHeader attempts to read a directory header from r.
// It returns io.ErrUnexpectedEOF if it cannot read a complete header,
// and ErrFormat if it doesn't find a valid header signature.
func readDirectoryHeader(f *File, r io.Reader) error {
var buf [directoryHeaderLen]byte
if _, err := io.ReadFull(r, buf[:]); err != nil {
return err
}
b := readBuf(buf[:])
if sig := b.uint32(); sig != directoryHeaderSignature {
return ErrFormat
}
f.CreatorVersion = b.uint16()
f.ReaderVersion = b.uint16()
f.Flags = b.uint16()
f.Method = b.uint16()
f.ModifiedTime = b.uint16()
f.ModifiedDate = b.uint16()
f.CRC32 = b.uint32()
f.CompressedSize = b.uint32()
f.UncompressedSize = b.uint32()
f.CompressedSize64 = uint64(f.CompressedSize)
f.UncompressedSize64 = uint64(f.UncompressedSize)
filenameLen := int(b.uint16())
extraLen := int(b.uint16())
commentLen := int(b.uint16())
b = b[4:] // skipped start disk number and internal attributes (2x uint16)
f.ExternalAttrs = b.uint32()
f.headerOffset = int64(b.uint32())
d := make([]byte, filenameLen+extraLen+commentLen)
if _, err := io.ReadFull(r, d); err != nil {
return err
}
f.Name = string(d[:filenameLen])
f.Extra = d[filenameLen : filenameLen+extraLen]
f.Comment = string(d[filenameLen+extraLen:])
// Determine the character encoding.
utf8Valid1, utf8Require1 := detectUTF8(f.Name)
utf8Valid2, utf8Require2 := detectUTF8(f.Comment)
switch {
case !utf8Valid1 || !utf8Valid2:
// Name and Comment definitely not UTF-8.
f.NonUTF8 = true
case !utf8Require1 && !utf8Require2:
// Name and Comment use only single-byte runes that overlap with UTF-8.
f.NonUTF8 = false
default:
// Might be UTF-8, might be some other encoding; preserve existing flag.
// Some ZIP writers use UTF-8 encoding without setting the UTF-8 flag.
// Since it is impossible to always distinguish valid UTF-8 from some
// other encoding (e.g., GBK or Shift-JIS), we trust the flag.
f.NonUTF8 = f.Flags&0x800 == 0
}
needUSize := f.UncompressedSize == ^uint32(0)
needCSize := f.CompressedSize == ^uint32(0)
needHeaderOffset := f.headerOffset == int64(^uint32(0))
// Best effort to find what we need.
// Other zip authors might not even follow the basic format,
// and we'll just ignore the Extra content in that case.
var modified time.Time
parseExtras:
for extra := readBuf(f.Extra); len(extra) >= 4; { // need at least tag and size
fieldTag := extra.uint16()
fieldSize := int(extra.uint16())
if len(extra) < fieldSize {
break
}
fieldBuf := extra.sub(fieldSize)
switch fieldTag {
case zip64ExtraID:
f.zip64 = true
// update directory values from the zip64 extra block.
// They should only be consulted if the sizes read earlier
// are maxed out.
// See golang.org/issue/13367.
if needUSize {
needUSize = false
if len(fieldBuf) < 8 {
return ErrFormat
}
f.UncompressedSize64 = fieldBuf.uint64()
}
if needCSize {
needCSize = false
if len(fieldBuf) < 8 {
return ErrFormat
}
f.CompressedSize64 = fieldBuf.uint64()
}
if needHeaderOffset {
needHeaderOffset = false
if len(fieldBuf) < 8 {
return ErrFormat
}
f.headerOffset = int64(fieldBuf.uint64())
}
case ntfsExtraID:
if len(fieldBuf) < 4 {
continue parseExtras
}
fieldBuf.uint32() // reserved (ignored)
for len(fieldBuf) >= 4 { // need at least tag and size
attrTag := fieldBuf.uint16()
attrSize := int(fieldBuf.uint16())
if len(fieldBuf) < attrSize {
continue parseExtras
}
attrBuf := fieldBuf.sub(attrSize)
if attrTag != 1 || attrSize != 24 {
continue // Ignore irrelevant attributes
}
const ticksPerSecond = 1e7 // Windows timestamp resolution
ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
secs := ts / ticksPerSecond
nsecs := (1e9 / ticksPerSecond) * (ts % ticksPerSecond)
epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
modified = time.Unix(epoch.Unix()+secs, nsecs)
}
case unixExtraID, infoZipUnixExtraID:
if len(fieldBuf) < 8 {
continue parseExtras
}
fieldBuf.uint32() // AcTime (ignored)
ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
modified = time.Unix(ts, 0)
case extTimeExtraID:
if len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 {
continue parseExtras
}
ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
modified = time.Unix(ts, 0)
}
}
msdosModified := msDosTimeToTime(f.ModifiedDate, f.ModifiedTime)
f.Modified = msdosModified
if !modified.IsZero() {
f.Modified = modified.UTC()
// If legacy MS-DOS timestamps are set, we can use the delta between
// the legacy and extended versions to estimate timezone offset.
//
// A non-UTC timezone is always used (even if offset is zero).
// Thus, FileHeader.Modified.Location() == time.UTC is useful for
// determining whether extended timestamps are present.
// This is necessary for users that need to do additional time
// calculations when dealing with legacy ZIP formats.
if f.ModifiedTime != 0 || f.ModifiedDate != 0 {
f.Modified = modified.In(timeZone(msdosModified.Sub(modified)))
}
}
// Assume that uncompressed size 2³²-1 could plausibly happen in
// an old zip32 file that was sharding inputs into the largest chunks
// possible (or is just malicious; search the web for 42.zip).
// If needUSize is true still, it means we didn't see a zip64 extension.
// As long as the compressed size is not also 2³²-1 (implausible)
// and the header is not also 2³²-1 (equally implausible),
// accept the uncompressed size 2³²-1 as valid.
// If nothing else, this keeps archive/zip working with 42.zip.
_ = needUSize
if needCSize || needHeaderOffset {
return ErrFormat
}
return nil
}
func readDataDescriptor(r io.Reader, f *File) error {
var buf [dataDescriptorLen]byte
// The spec says: "Although not originally assigned a
// signature, the value 0x08074b50 has commonly been adopted
// as a signature value for the data descriptor record.
// Implementers should be aware that ZIP files may be
// encountered with or without this signature marking data
// descriptors and should account for either case when reading
// ZIP files to ensure compatibility."
//
// dataDescriptorLen includes the size of the signature but
// first read just those 4 bytes to see if it exists.
if _, err := io.ReadFull(r, buf[:4]); err != nil {
return err
}
off := 0
maybeSig := readBuf(buf[:4])
if maybeSig.uint32() != dataDescriptorSignature {
// No data descriptor signature. Keep these four
// bytes.
off += 4
}
if _, err := io.ReadFull(r, buf[off:12]); err != nil {
return err
}
b := readBuf(buf[:12])
if b.uint32() != f.CRC32 {
return ErrChecksum
}
// The two sizes that follow here can be either 32 bits or 64 bits
// but the spec is not very clear on this and different
// interpretations has been made causing incompatibilities. We
// already have the sizes from the central directory so we can
// just ignore these.
return nil
}
func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, baseOffset int64, err error) {
// look for directoryEndSignature in the last 1k, then in the last 65k
var buf []byte
var directoryEndOffset int64
for i, bLen := range []int64{1024, 65 * 1024} {
if bLen > size {
bLen = size
}
buf = make([]byte, int(bLen))
if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF {
return nil, 0, err
}
if p := findSignatureInBlock(buf); p >= 0 {
buf = buf[p:]
directoryEndOffset = size - bLen + int64(p)
break
}
if i == 1 || bLen == size {
return nil, 0, ErrFormat
}
}
// read header into struct
b := readBuf(buf[4:]) // skip signature
d := &directoryEnd{
diskNbr: uint32(b.uint16()),
dirDiskNbr: uint32(b.uint16()),
dirRecordsThisDisk: uint64(b.uint16()),
directoryRecords: uint64(b.uint16()),
directorySize: uint64(b.uint32()),
directoryOffset: uint64(b.uint32()),
commentLen: b.uint16(),
}
l := int(d.commentLen)
if l > len(b) {
return nil, 0, errors.New("zip: invalid comment length")
}
d.comment = string(b[:l])
// These values mean that the file can be a zip64 file
if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff {
p, err := findDirectory64End(r, directoryEndOffset)
if err == nil && p >= 0 {
directoryEndOffset = p
err = readDirectory64End(r, p, d)
}
if err != nil {
return nil, 0, err
}
}
maxInt64 := uint64(1<<63 - 1)
if d.directorySize > maxInt64 || d.directoryOffset > maxInt64 {
return nil, 0, ErrFormat
}
baseOffset = directoryEndOffset - int64(d.directorySize) - int64(d.directoryOffset)
// Make sure directoryOffset points to somewhere in our file.
if o := baseOffset + int64(d.directoryOffset); o < 0 || o >= size {
return nil, 0, ErrFormat
}
// If the directory end data tells us to use a non-zero baseOffset,
// but we would find a valid directory entry if we assume that the
// baseOffset is 0, then just use a baseOffset of 0.
// We've seen files in which the directory end data gives us
// an incorrect baseOffset.
if baseOffset > 0 {
off := int64(d.directoryOffset)
rs := io.NewSectionReader(r, off, size-off)
if readDirectoryHeader(&File{}, rs) == nil {
baseOffset = 0
}
}
return d, baseOffset, nil
}
// findDirectory64End tries to read the zip64 locator just before the
// directory end and returns the offset of the zip64 directory end if
// found.
func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error) {
locOffset := directoryEndOffset - directory64LocLen
if locOffset < 0 {
return -1, nil // no need to look for a header outside the file
}
buf := make([]byte, directory64LocLen)
if _, err := r.ReadAt(buf, locOffset); err != nil {
return -1, err
}
b := readBuf(buf)
if sig := b.uint32(); sig != directory64LocSignature {
return -1, nil
}
if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory
return -1, nil // the file is not a valid zip64-file
}
p := b.uint64() // relative offset of the zip64 end of central directory record
if b.uint32() != 1 { // total number of disks
return -1, nil // the file is not a valid zip64-file
}
return int64(p), nil
}
// readDirectory64End reads the zip64 directory end and updates the
// directory end with the zip64 directory end values.
func readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) {
buf := make([]byte, directory64EndLen)
if _, err := r.ReadAt(buf, offset); err != nil {
return err
}
b := readBuf(buf)
if sig := b.uint32(); sig != directory64EndSignature {
return ErrFormat
}
b = b[12:] // skip dir size, version and version needed (uint64 + 2x uint16)
d.diskNbr = b.uint32() // number of this disk
d.dirDiskNbr = b.uint32() // number of the disk with the start of the central directory
d.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk
d.directoryRecords = b.uint64() // total number of entries in the central directory
d.directorySize = b.uint64() // size of the central directory
d.directoryOffset = b.uint64() // offset of start of central directory with respect to the starting disk number
return nil
}
func findSignatureInBlock(b []byte) int {
for i := len(b) - directoryEndLen; i >= 0; i-- {
// defined from directoryEndSignature in struct.go
if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 {
// n is length of comment
n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8
if n+directoryEndLen+i > len(b) {
// Truncated comment.
// Some parsers (such as Info-ZIP) ignore the truncated comment
// rather than treating it as a hard error.
return -1
}
return i
}
}
return -1
}
type readBuf []byte
func (b *readBuf) uint8() uint8 {
v := (*b)[0]
*b = (*b)[1:]
return v
}
func (b *readBuf) uint16() uint16 {
v := binary.LittleEndian.Uint16(*b)
*b = (*b)[2:]
return v
}
func (b *readBuf) uint32() uint32 {
v := binary.LittleEndian.Uint32(*b)
*b = (*b)[4:]
return v
}
func (b *readBuf) uint64() uint64 {
v := binary.LittleEndian.Uint64(*b)
*b = (*b)[8:]
return v
}
func (b *readBuf) sub(n int) readBuf {
b2 := (*b)[:n]
*b = (*b)[n:]
return b2
}
// A fileListEntry is a File and its ename.
// If file == nil, the fileListEntry describes a directory without metadata.
type fileListEntry struct {
name string
file *File
isDir bool
isDup bool
}
type fileInfoDirEntry interface {
fs.FileInfo
fs.DirEntry
}
func (f *fileListEntry) stat() (fileInfoDirEntry, error) {
if f.isDup {
return nil, errors.New(f.name + ": duplicate entries in zip file")
}
if !f.isDir {
return headerFileInfo{&f.file.FileHeader}, nil
}
return f, nil
}
// Only used for directories.
func (f *fileListEntry) Name() string { _, elem, _ := split(f.name); return elem }
func (f *fileListEntry) Size() int64 { return 0 }
func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 }
func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir }
func (f *fileListEntry) IsDir() bool { return true }
func (f *fileListEntry) Sys() any { return nil }
func (f *fileListEntry) ModTime() time.Time {
if f.file == nil {
return time.Time{}
}
return f.file.FileHeader.Modified.UTC()
}
func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil }
func (f *fileListEntry) String() string {
return fs.FormatDirEntry(f)
}
// toValidName coerces name to be a valid name for fs.FS.Open.
func toValidName(name string) string {
name = strings.ReplaceAll(name, `\`, `/`)
p := path.Clean(name)
p = strings.TrimPrefix(p, "/")
for strings.HasPrefix(p, "../") {
p = p[len("../"):]
}
return p
}
func (r *Reader) initFileList() {
r.fileListOnce.Do(func() {
// Preallocate the minimum size of the index.
// We may also synthesize additional directory entries.
r.fileList = make([]fileListEntry, 0, len(r.File))
// files and knownDirs map from a file/directory name
// to an index into the r.fileList entry that we are
// building. They are used to mark duplicate entries.
files := make(map[string]int)
knownDirs := make(map[string]int)
// dirs[name] is true if name is known to be a directory,
// because it appears as a prefix in a path.
dirs := make(map[string]bool)
for _, file := range r.File {
isDir := len(file.Name) > 0 && file.Name[len(file.Name)-1] == '/'
name := toValidName(file.Name)
if name == "" {
continue
}
if idx, ok := files[name]; ok {
r.fileList[idx].isDup = true
continue
}
if idx, ok := knownDirs[name]; ok {
r.fileList[idx].isDup = true
continue
}
dir := name
for {
if idx := strings.LastIndex(dir, "/"); idx < 0 {
break
} else {
dir = dir[:idx]
}
if dirs[dir] {
break
}
dirs[dir] = true
}
idx := len(r.fileList)
entry := fileListEntry{
name: name,
file: file,
isDir: isDir,
}
r.fileList = append(r.fileList, entry)
if isDir {
knownDirs[name] = idx
} else {
files[name] = idx
}
}
for dir := range dirs {
if _, ok := knownDirs[dir]; !ok {
if idx, ok := files[dir]; ok {
r.fileList[idx].isDup = true
} else {
entry := fileListEntry{
name: dir,
file: nil,
isDir: true,
}
r.fileList = append(r.fileList, entry)
}
}
}
slices.SortFunc(r.fileList, func(a, b fileListEntry) int {
return fileEntryCompare(a.name, b.name)
})
})
}
func fileEntryCompare(x, y string) int {
xdir, xelem, _ := split(x)
ydir, yelem, _ := split(y)
if xdir != ydir {
return strings.Compare(xdir, ydir)
}
return strings.Compare(xelem, yelem)
}
// Open opens the named file in the ZIP archive,
// using the semantics of fs.FS.Open:
// paths are always slash separated, with no
// leading / or ../ elements.
func (r *Reader) Open(name string) (fs.File, error) {
r.initFileList()
if !fs.ValidPath(name) {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid}
}
e := r.openLookup(name)
if e == nil {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
}
if e.isDir {
return &openDir{e, r.openReadDir(name), 0}, nil
}
rc, err := e.file.Open()
if err != nil {
return nil, err
}
return rc.(fs.File), nil
}
func split(name string) (dir, elem string, isDir bool) {
name, isDir = strings.CutSuffix(name, "/")
i := strings.LastIndexByte(name, '/')
if i < 0 {
return ".", name, isDir
}
return name[:i], name[i+1:], isDir
}
var dotFile = &fileListEntry{name: "./", isDir: true}
func (r *Reader) openLookup(name string) *fileListEntry {
if name == "." {
return dotFile
}
dir, elem, _ := split(name)
files := r.fileList
i, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) (ret int) {
idir, ielem, _ := split(a.name)
if dir != idir {
return strings.Compare(idir, dir)
}
return strings.Compare(ielem, elem)
})
if i < len(files) {
fname := files[i].name
if fname == name || len(fname) == len(name)+1 && fname[len(name)] == '/' && fname[:len(name)] == name {
return &files[i]
}
}
return nil
}
func (r *Reader) openReadDir(dir string) []fileListEntry {
files := r.fileList
i, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) int {
idir, _, _ := split(a.name)
if dir != idir {
return strings.Compare(idir, dir)
}
// find the first entry with dir
return +1
})
j, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) int {
jdir, _, _ := split(a.name)
if dir != jdir {
return strings.Compare(jdir, dir)
}
// find the last entry with dir
return -1
})
return files[i:j]
}
type openDir struct {
e *fileListEntry
files []fileListEntry
offset int
}
func (d *openDir) Close() error { return nil }
func (d *openDir) Stat() (fs.FileInfo, error) { return d.e.stat() }
func (d *openDir) Read([]byte) (int, error) {
return 0, &fs.PathError{Op: "read", Path: d.e.name, Err: errors.New("is a directory")}
}
func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
n := len(d.files) - d.offset
if count > 0 && n > count {
n = count
}
if n == 0 {
if count <= 0 {
return nil, nil
}
return nil, io.EOF
}
list := make([]fs.DirEntry, n)
for i := range list {
s, err := d.files[d.offset+i].stat()
if err != nil {
return nil, err
} else if s.Name() == "." || !fs.ValidPath(s.Name()) {
return nil, &fs.PathError{
Op: "readdir",
Path: d.e.name,
Err: fmt.Errorf("invalid file name: %v", d.files[d.offset+i].name),
}
}
list[i] = s
}
d.offset += n
return list, nil
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package zip
import (
"compress/flate"
"errors"
"io"
"sync"
)
// A Compressor returns a new compressing writer, writing to w.
// The WriteCloser's Close method must be used to flush pending data to w.
// The Compressor itself must be safe to invoke from multiple goroutines
// simultaneously, but each returned writer will be used only by
// one goroutine at a time.
type Compressor func(w io.Writer) (io.WriteCloser, error)
// A Decompressor returns a new decompressing reader, reading from r.
// The [io.ReadCloser]'s Close method must be used to release associated resources.
// The Decompressor itself must be safe to invoke from multiple goroutines
// simultaneously, but each returned reader will be used only by
// one goroutine at a time.
type Decompressor func(r io.Reader) io.ReadCloser
var flateWriterPool sync.Pool
func newFlateWriter(w io.Writer) io.WriteCloser {
fw, ok := flateWriterPool.Get().(*flate.Writer)
if ok {
fw.Reset(w)
} else {
fw, _ = flate.NewWriter(w, 5)
}
return &pooledFlateWriter{fw: fw}
}
type pooledFlateWriter struct {
mu sync.Mutex // guards Close and Write
fw *flate.Writer
}
func (w *pooledFlateWriter) Write(p []byte) (n int, err error) {
w.mu.Lock()
defer w.mu.Unlock()
if w.fw == nil {
return 0, errors.New("Write after Close")
}
return w.fw.Write(p)
}
func (w *pooledFlateWriter) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
var err error
if w.fw != nil {
err = w.fw.Close()
flateWriterPool.Put(w.fw)
w.fw = nil
}
return err
}
var flateReaderPool sync.Pool
func newFlateReader(r io.Reader) io.ReadCloser {
fr, ok := flateReaderPool.Get().(io.ReadCloser)
if ok {
fr.(flate.Resetter).Reset(r, nil)
} else {
fr = flate.NewReader(r)
}
return &pooledFlateReader{fr: fr}
}
type pooledFlateReader struct {
mu sync.Mutex // guards Close and Read
fr io.ReadCloser
}
func (r *pooledFlateReader) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.fr == nil {
return 0, errors.New("Read after Close")
}
return r.fr.Read(p)
}
func (r *pooledFlateReader) Close() error {
r.mu.Lock()
defer r.mu.Unlock()
var err error
if r.fr != nil {
err = r.fr.Close()
flateReaderPool.Put(r.fr)
r.fr = nil
}
return err
}
var (
compressors sync.Map // map[uint16]Compressor
decompressors sync.Map // map[uint16]Decompressor
)
func init() {
compressors.Store(Store, Compressor(func(w io.Writer) (io.WriteCloser, error) { return &nopCloser{w}, nil }))
compressors.Store(Deflate, Compressor(func(w io.Writer) (io.WriteCloser, error) { return newFlateWriter(w), nil }))
decompressors.Store(Store, Decompressor(io.NopCloser))
decompressors.Store(Deflate, Decompressor(newFlateReader))
}
// RegisterDecompressor allows custom decompressors for a specified method ID.
// The common methods [Store] and [Deflate] are built in.
func RegisterDecompressor(method uint16, dcomp Decompressor) {
if _, dup := decompressors.LoadOrStore(method, dcomp); dup {
panic("decompressor already registered")
}
}
// RegisterCompressor registers custom compressors for a specified method ID.
// The common methods [Store] and [Deflate] are built in.
func RegisterCompressor(method uint16, comp Compressor) {
if _, dup := compressors.LoadOrStore(method, comp); dup {
panic("compressor already registered")
}
}
func compressor(method uint16) Compressor {
ci, ok := compressors.Load(method)
if !ok {
return nil
}
return ci.(Compressor)
}
func decompressor(method uint16) Decompressor {
di, ok := decompressors.Load(method)
if !ok {
return nil
}
return di.(Decompressor)
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package zip provides support for reading and writing ZIP archives.
See the [ZIP specification] for details.
This package does not support disk spanning.
A note about ZIP64:
To be backwards compatible the FileHeader has both 32 and 64 bit Size
fields. The 64 bit fields will always contain the correct value and
for normal archives both fields will be the same. For files requiring
the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit
fields must be used instead.
[ZIP specification]: https://support.pkware.com/pkzip/appnote
*/
package zip
import (
"io/fs"
"path"
"time"
)
// Compression methods.
const (
Store uint16 = 0 // no compression
Deflate uint16 = 8 // DEFLATE compressed
)
const (
fileHeaderSignature = 0x04034b50
directoryHeaderSignature = 0x02014b50
directoryEndSignature = 0x06054b50
directory64LocSignature = 0x07064b50
directory64EndSignature = 0x06064b50
dataDescriptorSignature = 0x08074b50 // de-facto standard; required by OS X Finder
fileHeaderLen = 30 // + filename + extra
directoryHeaderLen = 46 // + filename + extra + comment
directoryEndLen = 22 // + comment
dataDescriptorLen = 16 // four uint32: descriptor signature, crc32, compressed size, size
dataDescriptor64Len = 24 // two uint32: signature, crc32 | two uint64: compressed size, size
directory64LocLen = 20 //
directory64EndLen = 56 // + extra
// Constants for the first byte in CreatorVersion.
creatorFAT = 0
creatorUnix = 3
creatorNTFS = 11
creatorVFAT = 14
creatorMacOSX = 19
// Version numbers.
zipVersion20 = 20 // 2.0
zipVersion45 = 45 // 4.5 (reads and writes zip64 archives)
// Limits for non zip64 files.
uint16max = (1 << 16) - 1
uint32max = (1 << 32) - 1
// Extra header IDs.
//
// IDs 0..31 are reserved for official use by PKWARE.
// IDs above that range are defined by third-party vendors.
// Since ZIP lacked high precision timestamps (nor an official specification
// of the timezone used for the date fields), many competing extra fields
// have been invented. Pervasive use effectively makes them "official".
//
// See http://mdfs.net/Docs/Comp/Archiving/Zip/ExtraField
zip64ExtraID = 0x0001 // Zip64 extended information
ntfsExtraID = 0x000a // NTFS
unixExtraID = 0x000d // UNIX
extTimeExtraID = 0x5455 // Extended timestamp
infoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension
)
// FileHeader describes a file within a ZIP file.
// See the [ZIP specification] for details.
//
// [ZIP specification]: https://support.pkware.com/pkzip/appnote
type FileHeader struct {
// Name is the name of the file.
//
// It must be a relative path, not start with a drive letter (such as "C:"),
// and must use forward slashes instead of back slashes. A trailing slash
// indicates that this file is a directory and should have no data.
Name string
// Comment is any arbitrary user-defined string shorter than 64KiB.
Comment string
// NonUTF8 indicates that Name and Comment are not encoded in UTF-8.
//
// By specification, the only other encoding permitted should be CP-437,
// but historically many ZIP readers interpret Name and Comment as whatever
// the system's local character encoding happens to be.
//
// This flag should only be set if the user intends to encode a non-portable
// ZIP file for a specific localized region. Otherwise, the Writer
// automatically sets the ZIP format's UTF-8 flag for valid UTF-8 strings.
NonUTF8 bool
CreatorVersion uint16
ReaderVersion uint16
Flags uint16
// Method is the compression method. If zero, Store is used.
Method uint16
// Modified is the modified time of the file.
//
// When reading, an extended timestamp is preferred over the legacy MS-DOS
// date field, and the offset between the times is used as the timezone.
// If only the MS-DOS date is present, the timezone is assumed to be UTC.
//
// When writing, an extended timestamp (which is timezone-agnostic) is
// always emitted. The legacy MS-DOS date field is encoded according to the
// location of the Modified time.
Modified time.Time
// ModifiedTime is an MS-DOS-encoded time.
//
// Deprecated: Use Modified instead.
ModifiedTime uint16
// ModifiedDate is an MS-DOS-encoded date.
//
// Deprecated: Use Modified instead.
ModifiedDate uint16
// CRC32 is the CRC32 checksum of the file content.
CRC32 uint32
// CompressedSize is the compressed size of the file in bytes.
// If either the uncompressed or compressed size of the file
// does not fit in 32 bits, CompressedSize is set to ^uint32(0).
//
// Deprecated: Use CompressedSize64 instead.
CompressedSize uint32
// UncompressedSize is the uncompressed size of the file in bytes.
// If either the uncompressed or compressed size of the file
// does not fit in 32 bits, UncompressedSize is set to ^uint32(0).
//
// Deprecated: Use UncompressedSize64 instead.
UncompressedSize uint32
// CompressedSize64 is the compressed size of the file in bytes.
CompressedSize64 uint64
// UncompressedSize64 is the uncompressed size of the file in bytes.
UncompressedSize64 uint64
Extra []byte
ExternalAttrs uint32 // Meaning depends on CreatorVersion
}
// FileInfo returns an fs.FileInfo for the [FileHeader].
func (h *FileHeader) FileInfo() fs.FileInfo {
return headerFileInfo{h}
}
// headerFileInfo implements [fs.FileInfo].
type headerFileInfo struct {
fh *FileHeader
}
func (fi headerFileInfo) Name() string { return path.Base(fi.fh.Name) }
func (fi headerFileInfo) Size() int64 {
if fi.fh.UncompressedSize64 > 0 {
return int64(fi.fh.UncompressedSize64)
}
return int64(fi.fh.UncompressedSize)
}
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time {
if fi.fh.Modified.IsZero() {
return fi.fh.ModTime()
}
return fi.fh.Modified.UTC()
}
func (fi headerFileInfo) Mode() fs.FileMode { return fi.fh.Mode() }
func (fi headerFileInfo) Type() fs.FileMode { return fi.fh.Mode().Type() }
func (fi headerFileInfo) Sys() any { return fi.fh }
func (fi headerFileInfo) Info() (fs.FileInfo, error) { return fi, nil }
func (fi headerFileInfo) String() string {
return fs.FormatFileInfo(fi)
}
// FileInfoHeader creates a partially-populated [FileHeader] from an
// fs.FileInfo.
// Because fs.FileInfo's Name method returns only the base name of
// the file it describes, it may be necessary to modify the Name field
// of the returned header to provide the full path name of the file.
// If compression is desired, callers should set the FileHeader.Method
// field; it is unset by default.
func FileInfoHeader(fi fs.FileInfo) (*FileHeader, error) {
size := fi.Size()
fh := &FileHeader{
Name: fi.Name(),
UncompressedSize64: uint64(size),
}
fh.SetModTime(fi.ModTime())
fh.SetMode(fi.Mode())
if fh.UncompressedSize64 > uint32max {
fh.UncompressedSize = uint32max
} else {
fh.UncompressedSize = uint32(fh.UncompressedSize64)
}
return fh, nil
}
type directoryEnd struct {
diskNbr uint32 // unused
dirDiskNbr uint32 // unused
dirRecordsThisDisk uint64 // unused
directoryRecords uint64
directorySize uint64
directoryOffset uint64 // relative to file
commentLen uint16
comment string
}
// timeZone returns a *time.Location based on the provided offset.
// If the offset is non-sensible, then this uses an offset of zero.
func timeZone(offset time.Duration) *time.Location {
const (
minOffset = -12 * time.Hour // E.g., Baker island at -12:00
maxOffset = +14 * time.Hour // E.g., Line island at +14:00
offsetAlias = 15 * time.Minute // E.g., Nepal at +5:45
)
offset = offset.Round(offsetAlias)
if offset < minOffset || maxOffset < offset {
offset = 0
}
return time.FixedZone("", int(offset/time.Second))
}
// msDosTimeToTime converts an MS-DOS date and time into a time.Time.
// The resolution is 2s.
// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-dosdatetimetofiletime
func msDosTimeToTime(dosDate, dosTime uint16) time.Time {
return time.Date(
// date bits 0-4: day of month; 5-8: month; 9-15: years since 1980
int(dosDate>>9+1980),
time.Month(dosDate>>5&0xf),
int(dosDate&0x1f),
// time bits 0-4: second/2; 5-10: minute; 11-15: hour
int(dosTime>>11),
int(dosTime>>5&0x3f),
int(dosTime&0x1f*2),
0, // nanoseconds
time.UTC,
)
}
// timeToMsDosTime converts a time.Time to an MS-DOS date and time.
// The resolution is 2s.
// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-filetimetodosdatetime
func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) {
fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9)
fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11)
return
}
// ModTime returns the modification time in UTC using the legacy
// [ModifiedDate] and [ModifiedTime] fields.
//
// Deprecated: Use [Modified] instead.
func (h *FileHeader) ModTime() time.Time {
return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime)
}
// SetModTime sets the [Modified], [ModifiedTime], and [ModifiedDate] fields
// to the given time in UTC.
//
// Deprecated: Use [Modified] instead.
func (h *FileHeader) SetModTime(t time.Time) {
t = t.UTC() // Convert to UTC for compatibility
h.Modified = t
h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t)
}
const (
// Unix constants. The specification doesn't mention them,
// but these seem to be the values agreed on by tools.
s_IFMT = 0xf000
s_IFSOCK = 0xc000
s_IFLNK = 0xa000
s_IFREG = 0x8000
s_IFBLK = 0x6000
s_IFDIR = 0x4000
s_IFCHR = 0x2000
s_IFIFO = 0x1000
s_ISUID = 0x800
s_ISGID = 0x400
s_ISVTX = 0x200
msdosDir = 0x10
msdosReadOnly = 0x01
)
// Mode returns the permission and mode bits for the [FileHeader].
func (h *FileHeader) Mode() (mode fs.FileMode) {
switch h.CreatorVersion >> 8 {
case creatorUnix, creatorMacOSX:
mode = unixModeToFileMode(h.ExternalAttrs >> 16)
case creatorNTFS, creatorVFAT, creatorFAT:
mode = msdosModeToFileMode(h.ExternalAttrs)
}
if len(h.Name) > 0 && h.Name[len(h.Name)-1] == '/' {
mode |= fs.ModeDir
}
return mode
}
// SetMode changes the permission and mode bits for the [FileHeader].
func (h *FileHeader) SetMode(mode fs.FileMode) {
h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8
h.ExternalAttrs = fileModeToUnixMode(mode) << 16
// set MSDOS attributes too, as the original zip does.
if mode&fs.ModeDir != 0 {
h.ExternalAttrs |= msdosDir
}
if mode&0200 == 0 {
h.ExternalAttrs |= msdosReadOnly
}
}
// isZip64 reports whether the file size exceeds the 32 bit limit
func (h *FileHeader) isZip64() bool {
return h.CompressedSize64 >= uint32max || h.UncompressedSize64 >= uint32max
}
func (h *FileHeader) hasDataDescriptor() bool {
return h.Flags&0x8 != 0
}
func msdosModeToFileMode(m uint32) (mode fs.FileMode) {
if m&msdosDir != 0 {
mode = fs.ModeDir | 0777
} else {
mode = 0666
}
if m&msdosReadOnly != 0 {
mode &^= 0222
}
return mode
}
func fileModeToUnixMode(mode fs.FileMode) uint32 {
var m uint32
switch mode & fs.ModeType {
default:
m = s_IFREG
case fs.ModeDir:
m = s_IFDIR
case fs.ModeSymlink:
m = s_IFLNK
case fs.ModeNamedPipe:
m = s_IFIFO
case fs.ModeSocket:
m = s_IFSOCK
case fs.ModeDevice:
m = s_IFBLK
case fs.ModeDevice | fs.ModeCharDevice:
m = s_IFCHR
}
if mode&fs.ModeSetuid != 0 {
m |= s_ISUID
}
if mode&fs.ModeSetgid != 0 {
m |= s_ISGID
}
if mode&fs.ModeSticky != 0 {
m |= s_ISVTX
}
return m | uint32(mode&0777)
}
func unixModeToFileMode(m uint32) fs.FileMode {
mode := fs.FileMode(m & 0777)
switch m & s_IFMT {
case s_IFBLK:
mode |= fs.ModeDevice
case s_IFCHR:
mode |= fs.ModeDevice | fs.ModeCharDevice
case s_IFDIR:
mode |= fs.ModeDir
case s_IFIFO:
mode |= fs.ModeNamedPipe
case s_IFLNK:
mode |= fs.ModeSymlink
case s_IFREG:
// nothing to do
case s_IFSOCK:
mode |= fs.ModeSocket
}
if m&s_ISGID != 0 {
mode |= fs.ModeSetgid
}
if m&s_ISUID != 0 {
mode |= fs.ModeSetuid
}
if m&s_ISVTX != 0 {
mode |= fs.ModeSticky
}
return mode
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package zip
import (
"bufio"
"encoding/binary"
"errors"
"hash"
"hash/crc32"
"io"
"io/fs"
"strings"
"unicode/utf8"
)
var (
errLongName = errors.New("zip: FileHeader.Name too long")
errLongExtra = errors.New("zip: FileHeader.Extra too long")
)
// Writer implements a zip file writer.
type Writer struct {
cw *countWriter
dir []*header
last *fileWriter
closed bool
compressors map[uint16]Compressor
comment string
// testHookCloseSizeOffset if non-nil is called with the size
// of offset of the central directory at Close.
testHookCloseSizeOffset func(size, offset uint64)
}
type header struct {
*FileHeader
offset uint64
raw bool
}
// NewWriter returns a new [Writer] writing a zip file to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}}
}
// SetOffset sets the offset of the beginning of the zip data within the
// underlying writer. It should be used when the zip data is appended to an
// existing file, such as a binary executable.
// It must be called before any data is written.
func (w *Writer) SetOffset(n int64) {
if w.cw.count != 0 {
panic("zip: SetOffset called after data was written")
}
w.cw.count = n
}
// Flush flushes any buffered data to the underlying writer.
// Calling Flush is not normally necessary; calling Close is sufficient.
func (w *Writer) Flush() error {
return w.cw.w.(*bufio.Writer).Flush()
}
// SetComment sets the end-of-central-directory comment field.
// It can only be called before [Writer.Close].
func (w *Writer) SetComment(comment string) error {
if len(comment) > uint16max {
return errors.New("zip: Writer.Comment too long")
}
w.comment = comment
return nil
}
// Close finishes writing the zip file by writing the central directory.
// It does not close the underlying writer.
func (w *Writer) Close() error {
if w.last != nil && !w.last.closed {
if err := w.last.close(); err != nil {
return err
}
w.last = nil
}
if w.closed {
return errors.New("zip: writer closed twice")
}
w.closed = true
// write central directory
start := w.cw.count
for _, h := range w.dir {
var buf [directoryHeaderLen]byte
b := writeBuf(buf[:])
b.uint32(uint32(directoryHeaderSignature))
b.uint16(h.CreatorVersion)
b.uint16(h.ReaderVersion)
b.uint16(h.Flags)
b.uint16(h.Method)
b.uint16(h.ModifiedTime)
b.uint16(h.ModifiedDate)
b.uint32(h.CRC32)
if h.isZip64() || h.offset >= uint32max {
// the file needs a zip64 header. store maxint in both
// 32 bit size fields (and offset later) to signal that the
// zip64 extra header should be used.
b.uint32(uint32max) // compressed size
b.uint32(uint32max) // uncompressed size
// append a zip64 extra block to Extra
var buf [28]byte // 2x uint16 + 3x uint64
eb := writeBuf(buf[:])
eb.uint16(zip64ExtraID)
eb.uint16(24) // size = 3x uint64
eb.uint64(h.UncompressedSize64)
eb.uint64(h.CompressedSize64)
eb.uint64(h.offset)
h.Extra = append(h.Extra, buf[:]...)
} else {
b.uint32(h.CompressedSize)
b.uint32(h.UncompressedSize)
}
b.uint16(uint16(len(h.Name)))
b.uint16(uint16(len(h.Extra)))
b.uint16(uint16(len(h.Comment)))
b = b[4:] // skip disk number start and internal file attr (2x uint16)
b.uint32(h.ExternalAttrs)
if h.offset > uint32max {
b.uint32(uint32max)
} else {
b.uint32(uint32(h.offset))
}
if _, err := w.cw.Write(buf[:]); err != nil {
return err
}
if _, err := io.WriteString(w.cw, h.Name); err != nil {
return err
}
if _, err := w.cw.Write(h.Extra); err != nil {
return err
}
if _, err := io.WriteString(w.cw, h.Comment); err != nil {
return err
}
}
end := w.cw.count
records := uint64(len(w.dir))
size := uint64(end - start)
offset := uint64(start)
if f := w.testHookCloseSizeOffset; f != nil {
f(size, offset)
}
if records >= uint16max || size >= uint32max || offset >= uint32max {
var buf [directory64EndLen + directory64LocLen]byte
b := writeBuf(buf[:])
// zip64 end of central directory record
b.uint32(directory64EndSignature)
b.uint64(directory64EndLen - 12) // length minus signature (uint32) and length fields (uint64)
b.uint16(zipVersion45) // version made by
b.uint16(zipVersion45) // version needed to extract
b.uint32(0) // number of this disk
b.uint32(0) // number of the disk with the start of the central directory
b.uint64(records) // total number of entries in the central directory on this disk
b.uint64(records) // total number of entries in the central directory
b.uint64(size) // size of the central directory
b.uint64(offset) // offset of start of central directory with respect to the starting disk number
// zip64 end of central directory locator
b.uint32(directory64LocSignature)
b.uint32(0) // number of the disk with the start of the zip64 end of central directory
b.uint64(uint64(end)) // relative offset of the zip64 end of central directory record
b.uint32(1) // total number of disks
if _, err := w.cw.Write(buf[:]); err != nil {
return err
}
// store max values in the regular end record to signal
// that the zip64 values should be used instead
records = uint16max
size = uint32max
offset = uint32max
}
// write end record
var buf [directoryEndLen]byte
b := writeBuf(buf[:])
b.uint32(uint32(directoryEndSignature))
b = b[4:] // skip over disk number and first disk number (2x uint16)
b.uint16(uint16(records)) // number of entries this disk
b.uint16(uint16(records)) // number of entries total
b.uint32(uint32(size)) // size of directory
b.uint32(uint32(offset)) // start of directory
b.uint16(uint16(len(w.comment))) // byte size of EOCD comment
if _, err := w.cw.Write(buf[:]); err != nil {
return err
}
if _, err := io.WriteString(w.cw, w.comment); err != nil {
return err
}
return w.cw.w.(*bufio.Writer).Flush()
}
// Create adds a file to the zip file using the provided name.
// It returns a [Writer] to which the file contents should be written.
// The file contents will be compressed using the [Deflate] method.
// The name must be a relative path: it must not start with a drive
// letter (e.g. C:) or leading slash, and only forward slashes are
// allowed. To create a directory instead of a file, add a trailing
// slash to the name. Duplicate names will not overwrite previous entries
// and are appended to the zip file.
// The file's contents must be written to the [io.Writer] before the next
// call to [Writer.Create], [Writer.CreateHeader], or [Writer.Close].
func (w *Writer) Create(name string) (io.Writer, error) {
header := &FileHeader{
Name: name,
Method: Deflate,
}
return w.CreateHeader(header)
}
// detectUTF8 reports whether s is a valid UTF-8 string, and whether the string
// must be considered UTF-8 encoding (i.e., not compatible with CP-437, ASCII,
// or any other common encoding).
func detectUTF8(s string) (valid, require bool) {
for i := 0; i < len(s); {
r, size := utf8.DecodeRuneInString(s[i:])
i += size
// Officially, ZIP uses CP-437, but many readers use the system's
// local character encoding. Most encoding are compatible with a large
// subset of CP-437, which itself is ASCII-like.
//
// Forbid 0x7e and 0x5c since EUC-KR and Shift-JIS replace those
// characters with localized currency and overline characters.
if r < 0x20 || r > 0x7d || r == 0x5c {
if !utf8.ValidRune(r) || (r == utf8.RuneError && size == 1) {
return false, false
}
require = true
}
}
return true, require
}
// prepare performs the bookkeeping operations required at the start of
// CreateHeader and CreateRaw.
func (w *Writer) prepare(fh *FileHeader) error {
if w.last != nil && !w.last.closed {
if err := w.last.close(); err != nil {
return err
}
}
if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh {
// See https://golang.org/issue/11144 confusion.
return errors.New("archive/zip: invalid duplicate FileHeader")
}
return nil
}
// CreateHeader adds a file to the zip archive using the provided [FileHeader]
// for the file metadata. [Writer] takes ownership of fh and may mutate
// its fields. The caller must not modify fh after calling [Writer.CreateHeader].
//
// This returns a [Writer] to which the file contents should be written.
// The file's contents must be written to the io.Writer before the next
// call to [Writer.Create], [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
if err := w.prepare(fh); err != nil {
return nil, err
}
// The ZIP format has a sad state of affairs regarding character encoding.
// Officially, the name and comment fields are supposed to be encoded
// in CP-437 (which is mostly compatible with ASCII), unless the UTF-8
// flag bit is set. However, there are several problems:
//
// * Many ZIP readers still do not support UTF-8.
// * If the UTF-8 flag is cleared, several readers simply interpret the
// name and comment fields as whatever the local system encoding is.
//
// In order to avoid breaking readers without UTF-8 support,
// we avoid setting the UTF-8 flag if the strings are CP-437 compatible.
// However, if the strings require multibyte UTF-8 encoding and is a
// valid UTF-8 string, then we set the UTF-8 bit.
//
// For the case, where the user explicitly wants to specify the encoding
// as UTF-8, they will need to set the flag bit themselves.
utf8Valid1, utf8Require1 := detectUTF8(fh.Name)
utf8Valid2, utf8Require2 := detectUTF8(fh.Comment)
switch {
case fh.NonUTF8:
fh.Flags &^= 0x800
case (utf8Require1 || utf8Require2) && (utf8Valid1 && utf8Valid2):
fh.Flags |= 0x800
}
fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte
fh.ReaderVersion = zipVersion20
// If Modified is set, this takes precedence over MS-DOS timestamp fields.
if !fh.Modified.IsZero() {
// Contrary to the FileHeader.SetModTime method, we intentionally
// do not convert to UTC, because we assume the user intends to encode
// the date using the specified timezone. A user may want this control
// because many legacy ZIP readers interpret the timestamp according
// to the local timezone.
//
// The timezone is only non-UTC if a user directly sets the Modified
// field directly themselves. All other approaches sets UTC.
fh.ModifiedDate, fh.ModifiedTime = timeToMsDosTime(fh.Modified)
// Use "extended timestamp" format since this is what Info-ZIP uses.
// Nearly every major ZIP implementation uses a different format,
// but at least most seem to be able to understand the other formats.
//
// This format happens to be identical for both local and central header
// if modification time is the only timestamp being encoded.
var mbuf [9]byte // 2*SizeOf(uint16) + SizeOf(uint8) + SizeOf(uint32)
mt := uint32(fh.Modified.Unix())
eb := writeBuf(mbuf[:])
eb.uint16(extTimeExtraID)
eb.uint16(5) // Size: SizeOf(uint8) + SizeOf(uint32)
eb.uint8(1) // Flags: ModTime
eb.uint32(mt) // ModTime
fh.Extra = append(fh.Extra, mbuf[:]...)
}
var (
ow io.Writer
fw *fileWriter
)
h := &header{
FileHeader: fh,
offset: uint64(w.cw.count),
}
if strings.HasSuffix(fh.Name, "/") {
// Set the compression method to Store to ensure data length is truly zero,
// which the writeHeader method always encodes for the size fields.
// This is necessary as most compression formats have non-zero lengths
// even when compressing an empty string.
fh.Method = Store
fh.Flags &^= 0x8 // we will not write a data descriptor
// Explicitly clear sizes as they have no meaning for directories.
fh.CompressedSize = 0
fh.CompressedSize64 = 0
fh.UncompressedSize = 0
fh.UncompressedSize64 = 0
ow = dirWriter{}
} else {
fh.Flags |= 0x8 // we will write a data descriptor
fw = &fileWriter{
zipw: w.cw,
compCount: &countWriter{w: w.cw},
crc32: crc32.NewIEEE(),
}
comp := w.compressor(fh.Method)
if comp == nil {
return nil, ErrAlgorithm
}
var err error
fw.comp, err = comp(fw.compCount)
if err != nil {
return nil, err
}
fw.rawCount = &countWriter{w: fw.comp}
fw.header = h
ow = fw
}
w.dir = append(w.dir, h)
if err := writeHeader(w.cw, h); err != nil {
return nil, err
}
// If we're creating a directory, fw is nil.
w.last = fw
return ow, nil
}
func writeHeader(w io.Writer, h *header) error {
const maxUint16 = 1<<16 - 1
if len(h.Name) > maxUint16 {
return errLongName
}
if len(h.Extra) > maxUint16 {
return errLongExtra
}
var buf [fileHeaderLen]byte
b := writeBuf(buf[:])
b.uint32(uint32(fileHeaderSignature))
b.uint16(h.ReaderVersion)
b.uint16(h.Flags)
b.uint16(h.Method)
b.uint16(h.ModifiedTime)
b.uint16(h.ModifiedDate)
// In raw mode (caller does the compression), the values are either
// written here or in the trailing data descriptor based on the header
// flags.
if h.raw && !h.hasDataDescriptor() {
b.uint32(h.CRC32)
b.uint32(uint32(min(h.CompressedSize64, uint32max)))
b.uint32(uint32(min(h.UncompressedSize64, uint32max)))
} else {
// When this package handle the compression, these values are
// always written to the trailing data descriptor.
b.uint32(0) // crc32
b.uint32(0) // compressed size
b.uint32(0) // uncompressed size
}
b.uint16(uint16(len(h.Name)))
b.uint16(uint16(len(h.Extra)))
if _, err := w.Write(buf[:]); err != nil {
return err
}
if _, err := io.WriteString(w, h.Name); err != nil {
return err
}
_, err := w.Write(h.Extra)
return err
}
// CreateRaw adds a file to the zip archive using the provided [FileHeader] and
// returns a [Writer] to which the file contents should be written. The file's
// contents must be written to the io.Writer before the next call to [Writer.Create],
// [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
//
// In contrast to [Writer.CreateHeader], the bytes passed to Writer are not compressed.
//
// CreateRaw's argument is stored in w. If the argument is a pointer to the embedded
// [FileHeader] in a [File] obtained from a [Reader] created from in-memory data,
// then w will refer to all of that memory.
func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
if err := w.prepare(fh); err != nil {
return nil, err
}
fh.CompressedSize = uint32(min(fh.CompressedSize64, uint32max))
fh.UncompressedSize = uint32(min(fh.UncompressedSize64, uint32max))
h := &header{
FileHeader: fh,
offset: uint64(w.cw.count),
raw: true,
}
w.dir = append(w.dir, h)
if err := writeHeader(w.cw, h); err != nil {
return nil, err
}
if strings.HasSuffix(fh.Name, "/") {
w.last = nil
return dirWriter{}, nil
}
fw := &fileWriter{
header: h,
zipw: w.cw,
}
w.last = fw
return fw, nil
}
// Copy copies the file f (obtained from a [Reader]) into w. It copies the raw
// form directly bypassing decompression, compression, and validation.
func (w *Writer) Copy(f *File) error {
r, err := f.OpenRaw()
if err != nil {
return err
}
// Copy the FileHeader so w doesn't store a pointer to the data
// of f's entire archive. See #65499.
fh := f.FileHeader
fw, err := w.CreateRaw(&fh)
if err != nil {
return err
}
_, err = io.Copy(fw, r)
return err
}
// RegisterCompressor registers or overrides a custom compressor for a specific
// method ID. If a compressor for a given method is not found, [Writer] will
// default to looking up the compressor at the package level.
func (w *Writer) RegisterCompressor(method uint16, comp Compressor) {
if w.compressors == nil {
w.compressors = make(map[uint16]Compressor)
}
w.compressors[method] = comp
}
// AddFS adds the files from fs.FS to the archive.
// It walks the directory tree starting at the root of the filesystem
// adding each file to the zip using deflate while maintaining the directory structure.
func (w *Writer) AddFS(fsys fs.FS) error {
return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if name == "." {
return nil
}
info, err := d.Info()
if err != nil {
return err
}
if !d.IsDir() && !info.Mode().IsRegular() {
return errors.New("zip: cannot add non-regular file")
}
h, err := FileInfoHeader(info)
if err != nil {
return err
}
h.Name = name
if d.IsDir() {
h.Name += "/"
}
h.Method = Deflate
fw, err := w.CreateHeader(h)
if err != nil {
return err
}
if d.IsDir() {
return nil
}
f, err := fsys.Open(name)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(fw, f)
return err
})
}
func (w *Writer) compressor(method uint16) Compressor {
comp := w.compressors[method]
if comp == nil {
comp = compressor(method)
}
return comp
}
type dirWriter struct{}
func (dirWriter) Write(b []byte) (int, error) {
if len(b) == 0 {
return 0, nil
}
return 0, errors.New("zip: write to directory")
}
type fileWriter struct {
*header
zipw io.Writer
rawCount *countWriter
comp io.WriteCloser
compCount *countWriter
crc32 hash.Hash32
closed bool
}
func (w *fileWriter) Write(p []byte) (int, error) {
if w.closed {
return 0, errors.New("zip: write to closed file")
}
if w.raw {
return w.zipw.Write(p)
}
w.crc32.Write(p)
return w.rawCount.Write(p)
}
func (w *fileWriter) close() error {
if w.closed {
return errors.New("zip: file closed twice")
}
w.closed = true
if w.raw {
return w.writeDataDescriptor()
}
if err := w.comp.Close(); err != nil {
return err
}
// update FileHeader
fh := w.header.FileHeader
fh.CRC32 = w.crc32.Sum32()
fh.CompressedSize64 = uint64(w.compCount.count)
fh.UncompressedSize64 = uint64(w.rawCount.count)
if fh.isZip64() {
fh.CompressedSize = uint32max
fh.UncompressedSize = uint32max
fh.ReaderVersion = zipVersion45 // requires 4.5 - File uses ZIP64 format extensions
} else {
fh.CompressedSize = uint32(fh.CompressedSize64)
fh.UncompressedSize = uint32(fh.UncompressedSize64)
}
return w.writeDataDescriptor()
}
func (w *fileWriter) writeDataDescriptor() error {
if !w.hasDataDescriptor() {
return nil
}
// Write data descriptor. This is more complicated than one would
// think, see e.g. comments in zipfile.c:putextended() and
// https://bugs.openjdk.org/browse/JDK-7073588.
// The approach here is to write 8 byte sizes if needed without
// adding a zip64 extra in the local header (too late anyway).
var buf []byte
if w.isZip64() {
buf = make([]byte, dataDescriptor64Len)
} else {
buf = make([]byte, dataDescriptorLen)
}
b := writeBuf(buf)
b.uint32(dataDescriptorSignature) // de-facto standard, required by OS X
b.uint32(w.CRC32)
if w.isZip64() {
b.uint64(w.CompressedSize64)
b.uint64(w.UncompressedSize64)
} else {
b.uint32(w.CompressedSize)
b.uint32(w.UncompressedSize)
}
_, err := w.zipw.Write(buf)
return err
}
type countWriter struct {
w io.Writer
count int64
}
func (w *countWriter) Write(p []byte) (int, error) {
n, err := w.w.Write(p)
w.count += int64(n)
return n, err
}
type nopCloser struct {
io.Writer
}
func (w nopCloser) Close() error {
return nil
}
type writeBuf []byte
func (b *writeBuf) uint8(v uint8) {
(*b)[0] = v
*b = (*b)[1:]
}
func (b *writeBuf) uint16(v uint16) {
binary.LittleEndian.PutUint16(*b, v)
*b = (*b)[2:]
}
func (b *writeBuf) uint32(v uint32) {
binary.LittleEndian.PutUint32(*b, v)
*b = (*b)[4:]
}
func (b *writeBuf) uint64(v uint64) {
binary.LittleEndian.PutUint64(*b, v)
*b = (*b)[8:]
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer
// object, creating another object (Reader or Writer) that also implements
// the interface but provides buffering and some help for textual I/O.
package bufio
import (
"bytes"
"errors"
"io"
"strings"
"unicode/utf8"
)
const (
defaultBufSize = 4096
)
var (
ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte")
ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune")
ErrBufferFull = errors.New("bufio: buffer full")
ErrNegativeCount = errors.New("bufio: negative count")
)
// Buffered input.
// Reader implements buffering for an io.Reader object.
// A new Reader is created by calling [NewReader] or [NewReaderSize];
// alternatively the zero value of a Reader may be used after calling [Reader.Reset]
// on it.
type Reader struct {
buf []byte
rd io.Reader // reader provided by the client
r, w int // buf read and write positions
err error
lastByte int // last byte read for UnreadByte; -1 means invalid
lastRuneSize int // size of last rune read for UnreadRune; -1 means invalid
}
const minReadBufferSize = 16
const maxConsecutiveEmptyReads = 100
// NewReaderSize returns a new [Reader] whose buffer has at least the specified
// size. If the argument io.Reader is already a [Reader] with large enough
// size, it returns the underlying [Reader].
func NewReaderSize(rd io.Reader, size int) *Reader {
// Is it already a Reader?
b, ok := rd.(*Reader)
if ok && len(b.buf) >= size {
return b
}
r := new(Reader)
r.reset(make([]byte, max(size, minReadBufferSize)), rd)
return r
}
// NewReader returns a new [Reader] whose buffer has the default size.
func NewReader(rd io.Reader) *Reader {
return NewReaderSize(rd, defaultBufSize)
}
// Size returns the size of the underlying buffer in bytes.
func (b *Reader) Size() int { return len(b.buf) }
// Reset discards any buffered data, resets all state, and switches
// the buffered reader to read from r.
// Calling Reset on the zero value of [Reader] initializes the internal buffer
// to the default size.
// Calling b.Reset(b) (that is, resetting a [Reader] to itself) does nothing.
func (b *Reader) Reset(r io.Reader) {
// If a Reader r is passed to NewReader, NewReader will return r.
// Different layers of code may do that, and then later pass r
// to Reset. Avoid infinite recursion in that case.
if b == r {
return
}
if b.buf == nil {
b.buf = make([]byte, defaultBufSize)
}
b.reset(b.buf, r)
}
func (b *Reader) reset(buf []byte, r io.Reader) {
*b = Reader{
buf: buf,
rd: r,
lastByte: -1,
lastRuneSize: -1,
}
}
var errNegativeRead = errors.New("bufio: reader returned negative count from Read")
// fill reads a new chunk into the buffer.
func (b *Reader) fill() {
// Slide existing data to beginning.
if b.r > 0 {
copy(b.buf, b.buf[b.r:b.w])
b.w -= b.r
b.r = 0
}
if b.w >= len(b.buf) {
panic("bufio: tried to fill full buffer")
}
// Read new data: try a limited number of times.
for i := maxConsecutiveEmptyReads; i > 0; i-- {
n, err := b.rd.Read(b.buf[b.w:])
if n < 0 {
panic(errNegativeRead)
}
b.w += n
if err != nil {
b.err = err
return
}
if n > 0 {
return
}
}
b.err = io.ErrNoProgress
}
func (b *Reader) readErr() error {
err := b.err
b.err = nil
return err
}
// Peek returns the next n bytes without advancing the reader. The bytes stop
// being valid at the next read call. If necessary, Peek will read more bytes
// into the buffer in order to make n bytes available. If Peek returns fewer
// than n bytes, it also returns an error explaining why the read is short.
// The error is [ErrBufferFull] if n is larger than b's buffer size.
//
// Calling Peek prevents a [Reader.UnreadByte] or [Reader.UnreadRune] call from succeeding
// until the next read operation.
func (b *Reader) Peek(n int) ([]byte, error) {
if n < 0 {
return nil, ErrNegativeCount
}
b.lastByte = -1
b.lastRuneSize = -1
for b.w-b.r < n && b.w-b.r < len(b.buf) && b.err == nil {
b.fill() // b.w-b.r < len(b.buf) => buffer is not full
}
if n > len(b.buf) {
return b.buf[b.r:b.w], ErrBufferFull
}
// 0 <= n <= len(b.buf)
var err error
if avail := b.w - b.r; avail < n {
// not enough data in buffer
n = avail
err = b.readErr()
if err == nil {
err = ErrBufferFull
}
}
return b.buf[b.r : b.r+n], err
}
// Discard skips the next n bytes, returning the number of bytes discarded.
//
// If Discard skips fewer than n bytes, it also returns an error.
// If 0 <= n <= b.Buffered(), Discard is guaranteed to succeed without
// reading from the underlying io.Reader.
func (b *Reader) Discard(n int) (discarded int, err error) {
if n < 0 {
return 0, ErrNegativeCount
}
if n == 0 {
return
}
b.lastByte = -1
b.lastRuneSize = -1
remain := n
for {
skip := b.Buffered()
if skip == 0 {
b.fill()
skip = b.Buffered()
}
if skip > remain {
skip = remain
}
b.r += skip
remain -= skip
if remain == 0 {
return n, nil
}
if b.err != nil {
return n - remain, b.readErr()
}
}
}
// Read reads data into p.
// It returns the number of bytes read into p.
// The bytes are taken from at most one Read on the underlying [Reader],
// hence n may be less than len(p).
// To read exactly len(p) bytes, use io.ReadFull(b, p).
// If the underlying [Reader] can return a non-zero count with io.EOF,
// then this Read method can do so as well; see the [io.Reader] docs.
func (b *Reader) Read(p []byte) (n int, err error) {
n = len(p)
if n == 0 {
if b.Buffered() > 0 {
return 0, nil
}
return 0, b.readErr()
}
if b.r == b.w {
if b.err != nil {
return 0, b.readErr()
}
if len(p) >= len(b.buf) {
// Large read, empty buffer.
// Read directly into p to avoid copy.
n, b.err = b.rd.Read(p)
if n < 0 {
panic(errNegativeRead)
}
if n > 0 {
b.lastByte = int(p[n-1])
b.lastRuneSize = -1
}
return n, b.readErr()
}
// One read.
// Do not use b.fill, which will loop.
b.r = 0
b.w = 0
n, b.err = b.rd.Read(b.buf)
if n < 0 {
panic(errNegativeRead)
}
if n == 0 {
return 0, b.readErr()
}
b.w += n
}
// copy as much as we can
// Note: if the slice panics here, it is probably because
// the underlying reader returned a bad count. See issue 49795.
n = copy(p, b.buf[b.r:b.w])
b.r += n
b.lastByte = int(b.buf[b.r-1])
b.lastRuneSize = -1
return n, nil
}
// ReadByte reads and returns a single byte.
// If no byte is available, returns an error.
func (b *Reader) ReadByte() (byte, error) {
b.lastRuneSize = -1
for b.r == b.w {
if b.err != nil {
return 0, b.readErr()
}
b.fill() // buffer is empty
}
c := b.buf[b.r]
b.r++
b.lastByte = int(c)
return c, nil
}
// UnreadByte unreads the last byte. Only the most recently read byte can be unread.
//
// UnreadByte returns an error if the most recent method called on the
// [Reader] was not a read operation. Notably, [Reader.Peek], [Reader.Discard], and [Reader.WriteTo] are not
// considered read operations.
func (b *Reader) UnreadByte() error {
if b.lastByte < 0 || b.r == 0 && b.w > 0 {
return ErrInvalidUnreadByte
}
// b.r > 0 || b.w == 0
if b.r > 0 {
b.r--
} else {
// b.r == 0 && b.w == 0
b.w = 1
}
b.buf[b.r] = byte(b.lastByte)
b.lastByte = -1
b.lastRuneSize = -1
return nil
}
// ReadRune reads a single UTF-8 encoded Unicode character and returns the
// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
// and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
func (b *Reader) ReadRune() (r rune, size int, err error) {
for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil && b.w-b.r < len(b.buf) {
b.fill() // b.w-b.r < len(buf) => buffer is not full
}
b.lastRuneSize = -1
if b.r == b.w {
return 0, 0, b.readErr()
}
r, size = utf8.DecodeRune(b.buf[b.r:b.w])
b.r += size
b.lastByte = int(b.buf[b.r-1])
b.lastRuneSize = size
return r, size, nil
}
// UnreadRune unreads the last rune. If the most recent method called on
// the [Reader] was not a [Reader.ReadRune], [Reader.UnreadRune] returns an error. (In this
// regard it is stricter than [Reader.UnreadByte], which will unread the last byte
// from any read operation.)
func (b *Reader) UnreadRune() error {
if b.lastRuneSize < 0 || b.r < b.lastRuneSize {
return ErrInvalidUnreadRune
}
b.r -= b.lastRuneSize
b.lastByte = -1
b.lastRuneSize = -1
return nil
}
// Buffered returns the number of bytes that can be read from the current buffer.
func (b *Reader) Buffered() int { return b.w - b.r }
// ReadSlice reads until the first occurrence of delim in the input,
// returning a slice pointing at the bytes in the buffer.
// The bytes stop being valid at the next read.
// If ReadSlice encounters an error before finding a delimiter,
// it returns all the data in the buffer and the error itself (often io.EOF).
// ReadSlice fails with error [ErrBufferFull] if the buffer fills without a delim.
// Because the data returned from ReadSlice will be overwritten
// by the next I/O operation, most clients should use
// [Reader.ReadBytes] or ReadString instead.
// ReadSlice returns err != nil if and only if line does not end in delim.
func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
s := 0 // search start index
for {
// Search buffer.
if i := bytes.IndexByte(b.buf[b.r+s:b.w], delim); i >= 0 {
i += s
line = b.buf[b.r : b.r+i+1]
b.r += i + 1
break
}
// Pending error?
if b.err != nil {
line = b.buf[b.r:b.w]
b.r = b.w
err = b.readErr()
break
}
// Buffer full?
if b.Buffered() >= len(b.buf) {
b.r = b.w
line = b.buf
err = ErrBufferFull
break
}
s = b.w - b.r // do not rescan area we scanned before
b.fill() // buffer is not full
}
// Handle last byte, if any.
if i := len(line) - 1; i >= 0 {
b.lastByte = int(line[i])
b.lastRuneSize = -1
}
return
}
// ReadLine is a low-level line-reading primitive. Most callers should use
// [Reader.ReadBytes]('\n') or [Reader.ReadString]('\n') instead or use a [Scanner].
//
// ReadLine tries to return a single line, not including the end-of-line bytes.
// If the line was too long for the buffer then isPrefix is set and the
// beginning of the line is returned. The rest of the line will be returned
// from future calls. isPrefix will be false when returning the last fragment
// of the line. The returned buffer is only valid until the next call to
// ReadLine. ReadLine either returns a non-nil line or it returns an error,
// never both.
//
// The text returned from ReadLine does not include the line end ("\r\n" or "\n").
// No indication or error is given if the input ends without a final line end.
// Calling [Reader.UnreadByte] after ReadLine will always unread the last byte read
// (possibly a character belonging to the line end) even if that byte is not
// part of the line returned by ReadLine.
func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
line, err = b.ReadSlice('\n')
if err == ErrBufferFull {
// Handle the case where "\r\n" straddles the buffer.
if len(line) > 0 && line[len(line)-1] == '\r' {
// Put the '\r' back on buf and drop it from line.
// Let the next call to ReadLine check for "\r\n".
if b.r == 0 {
// should be unreachable
panic("bufio: tried to rewind past start of buffer")
}
b.r--
line = line[:len(line)-1]
}
return line, true, nil
}
if len(line) == 0 {
if err != nil {
line = nil
}
return
}
err = nil
if line[len(line)-1] == '\n' {
drop := 1
if len(line) > 1 && line[len(line)-2] == '\r' {
drop = 2
}
line = line[:len(line)-drop]
}
return
}
// collectFragments reads until the first occurrence of delim in the input. It
// returns (slice of full buffers, remaining bytes before delim, total number
// of bytes in the combined first two elements, error).
// The complete result is equal to
// `bytes.Join(append(fullBuffers, finalFragment), nil)`, which has a
// length of `totalLen`. The result is structured in this way to allow callers
// to minimize allocations and copies.
func (b *Reader) collectFragments(delim byte) (fullBuffers [][]byte, finalFragment []byte, totalLen int, err error) {
var frag []byte
// Use ReadSlice to look for delim, accumulating full buffers.
for {
var e error
frag, e = b.ReadSlice(delim)
if e == nil { // got final fragment
break
}
if e != ErrBufferFull { // unexpected error
err = e
break
}
// Make a copy of the buffer.
buf := bytes.Clone(frag)
fullBuffers = append(fullBuffers, buf)
totalLen += len(buf)
}
totalLen += len(frag)
return fullBuffers, frag, totalLen, err
}
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
// For simple uses, a Scanner may be more convenient.
func (b *Reader) ReadBytes(delim byte) ([]byte, error) {
full, frag, n, err := b.collectFragments(delim)
// Allocate new buffer to hold the full pieces and the fragment.
buf := make([]byte, n)
n = 0
// Copy full pieces and fragment in.
for i := range full {
n += copy(buf[n:], full[i])
}
copy(buf[n:], frag)
return buf, err
}
// ReadString reads until the first occurrence of delim in the input,
// returning a string containing the data up to and including the delimiter.
// If ReadString encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadString returns err != nil if and only if the returned data does not end in
// delim.
// For simple uses, a Scanner may be more convenient.
func (b *Reader) ReadString(delim byte) (string, error) {
full, frag, n, err := b.collectFragments(delim)
// Allocate new buffer to hold the full pieces and the fragment.
var buf strings.Builder
buf.Grow(n)
// Copy full pieces and fragment in.
for _, fb := range full {
buf.Write(fb)
}
buf.Write(frag)
return buf.String(), err
}
// WriteTo implements io.WriterTo.
// This may make multiple calls to the [Reader.Read] method of the underlying [Reader].
// If the underlying reader supports the [Reader.WriteTo] method,
// this calls the underlying [Reader.WriteTo] without buffering.
func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
b.lastByte = -1
b.lastRuneSize = -1
if b.r < b.w {
n, err = b.writeBuf(w)
if err != nil {
return
}
}
if r, ok := b.rd.(io.WriterTo); ok {
m, err := r.WriteTo(w)
n += m
return n, err
}
if w, ok := w.(io.ReaderFrom); ok {
m, err := w.ReadFrom(b.rd)
n += m
return n, err
}
if b.w-b.r < len(b.buf) {
b.fill() // buffer not full
}
for b.r < b.w {
// b.r < b.w => buffer is not empty
m, err := b.writeBuf(w)
n += m
if err != nil {
return n, err
}
b.fill() // buffer is empty
}
if b.err == io.EOF {
b.err = nil
}
return n, b.readErr()
}
var errNegativeWrite = errors.New("bufio: writer returned negative count from Write")
// writeBuf writes the [Reader]'s buffer to the writer.
func (b *Reader) writeBuf(w io.Writer) (int64, error) {
n, err := w.Write(b.buf[b.r:b.w])
if n < 0 {
panic(errNegativeWrite)
}
b.r += n
return int64(n), err
}
// buffered output
// Writer implements buffering for an [io.Writer] object.
// If an error occurs writing to a [Writer], no more data will be
// accepted and all subsequent writes, and [Writer.Flush], will return the error.
// After all data has been written, the client should call the
// [Writer.Flush] method to guarantee all data has been forwarded to
// the underlying [io.Writer].
type Writer struct {
err error
buf []byte
n int
wr io.Writer
}
// NewWriterSize returns a new [Writer] whose buffer has at least the specified
// size. If the argument io.Writer is already a [Writer] with large enough
// size, it returns the underlying [Writer].
func NewWriterSize(w io.Writer, size int) *Writer {
// Is it already a Writer?
b, ok := w.(*Writer)
if ok && len(b.buf) >= size {
return b
}
if size <= 0 {
size = defaultBufSize
}
return &Writer{
buf: make([]byte, size),
wr: w,
}
}
// NewWriter returns a new [Writer] whose buffer has the default size.
// If the argument io.Writer is already a [Writer] with large enough buffer size,
// it returns the underlying [Writer].
func NewWriter(w io.Writer) *Writer {
return NewWriterSize(w, defaultBufSize)
}
// Size returns the size of the underlying buffer in bytes.
func (b *Writer) Size() int { return len(b.buf) }
// Reset discards any unflushed buffered data, clears any error, and
// resets b to write its output to w.
// Calling Reset on the zero value of [Writer] initializes the internal buffer
// to the default size.
// Calling w.Reset(w) (that is, resetting a [Writer] to itself) does nothing.
func (b *Writer) Reset(w io.Writer) {
// If a Writer w is passed to NewWriter, NewWriter will return w.
// Different layers of code may do that, and then later pass w
// to Reset. Avoid infinite recursion in that case.
if b == w {
return
}
if b.buf == nil {
b.buf = make([]byte, defaultBufSize)
}
b.err = nil
b.n = 0
b.wr = w
}
// Flush writes any buffered data to the underlying [io.Writer].
func (b *Writer) Flush() error {
if b.err != nil {
return b.err
}
if b.n == 0 {
return nil
}
n, err := b.wr.Write(b.buf[0:b.n])
if n < b.n && err == nil {
err = io.ErrShortWrite
}
if err != nil {
if n > 0 && n < b.n {
copy(b.buf[0:b.n-n], b.buf[n:b.n])
}
b.n -= n
b.err = err
return err
}
b.n = 0
return nil
}
// Available returns how many bytes are unused in the buffer.
func (b *Writer) Available() int { return len(b.buf) - b.n }
// AvailableBuffer returns an empty buffer with b.Available() capacity.
// This buffer is intended to be appended to and
// passed to an immediately succeeding [Writer.Write] call.
// The buffer is only valid until the next write operation on b.
func (b *Writer) AvailableBuffer() []byte {
return b.buf[b.n:][:0]
}
// Buffered returns the number of bytes that have been written into the current buffer.
func (b *Writer) Buffered() int { return b.n }
// Write writes the contents of p into the buffer.
// It returns the number of bytes written.
// If nn < len(p), it also returns an error explaining
// why the write is short.
func (b *Writer) Write(p []byte) (nn int, err error) {
for len(p) > b.Available() && b.err == nil {
var n int
if b.Buffered() == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, b.err = b.wr.Write(p)
} else {
n = copy(b.buf[b.n:], p)
b.n += n
b.Flush()
}
nn += n
p = p[n:]
}
if b.err != nil {
return nn, b.err
}
n := copy(b.buf[b.n:], p)
b.n += n
nn += n
return nn, nil
}
// WriteByte writes a single byte.
func (b *Writer) WriteByte(c byte) error {
if b.err != nil {
return b.err
}
if b.Available() <= 0 && b.Flush() != nil {
return b.err
}
b.buf[b.n] = c
b.n++
return nil
}
// WriteRune writes a single Unicode code point, returning
// the number of bytes written and any error.
func (b *Writer) WriteRune(r rune) (size int, err error) {
// Compare as uint32 to correctly handle negative runes.
if uint32(r) < utf8.RuneSelf {
err = b.WriteByte(byte(r))
if err != nil {
return 0, err
}
return 1, nil
}
if b.err != nil {
return 0, b.err
}
n := b.Available()
if n < utf8.UTFMax {
if b.Flush(); b.err != nil {
return 0, b.err
}
n = b.Available()
if n < utf8.UTFMax {
// Can only happen if buffer is silly small.
return b.WriteString(string(r))
}
}
size = utf8.EncodeRune(b.buf[b.n:], r)
b.n += size
return size, nil
}
// WriteString writes a string.
// It returns the number of bytes written.
// If the count is less than len(s), it also returns an error explaining
// why the write is short.
func (b *Writer) WriteString(s string) (int, error) {
var sw io.StringWriter
tryStringWriter := true
nn := 0
for len(s) > b.Available() && b.err == nil {
var n int
if b.Buffered() == 0 && sw == nil && tryStringWriter {
// Check at most once whether b.wr is a StringWriter.
sw, tryStringWriter = b.wr.(io.StringWriter)
}
if b.Buffered() == 0 && tryStringWriter {
// Large write, empty buffer, and the underlying writer supports
// WriteString: forward the write to the underlying StringWriter.
// This avoids an extra copy.
n, b.err = sw.WriteString(s)
} else {
n = copy(b.buf[b.n:], s)
b.n += n
b.Flush()
}
nn += n
s = s[n:]
}
if b.err != nil {
return nn, b.err
}
n := copy(b.buf[b.n:], s)
b.n += n
nn += n
return nn, nil
}
// ReadFrom implements [io.ReaderFrom]. If the underlying writer
// supports the ReadFrom method, this calls the underlying ReadFrom.
// If there is buffered data and an underlying ReadFrom, this fills
// the buffer and writes it before calling ReadFrom.
func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
if b.err != nil {
return 0, b.err
}
readerFrom, readerFromOK := b.wr.(io.ReaderFrom)
var m int
for {
if b.Available() == 0 {
if err1 := b.Flush(); err1 != nil {
return n, err1
}
}
if readerFromOK && b.Buffered() == 0 {
nn, err := readerFrom.ReadFrom(r)
b.err = err
n += nn
return n, err
}
nr := 0
for nr < maxConsecutiveEmptyReads {
m, err = r.Read(b.buf[b.n:])
if m != 0 || err != nil {
break
}
nr++
}
if nr == maxConsecutiveEmptyReads {
return n, io.ErrNoProgress
}
b.n += m
n += int64(m)
if err != nil {
break
}
}
if err == io.EOF {
// If we filled the buffer exactly, flush preemptively.
if b.Available() == 0 {
err = b.Flush()
} else {
err = nil
}
}
return n, err
}
// buffered input and output
// ReadWriter stores pointers to a [Reader] and a [Writer].
// It implements [io.ReadWriter].
type ReadWriter struct {
*Reader
*Writer
}
// NewReadWriter allocates a new [ReadWriter] that dispatches to r and w.
func NewReadWriter(r *Reader, w *Writer) *ReadWriter {
return &ReadWriter{r, w}
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bufio
import (
"bytes"
"errors"
"io"
"unicode/utf8"
)
// Scanner provides a convenient interface for reading data such as
// a file of newline-delimited lines of text. Successive calls to
// the [Scanner.Scan] method will step through the 'tokens' of a file, skipping
// the bytes between the tokens. The specification of a token is
// defined by a split function of type [SplitFunc]; the default split
// function breaks the input into lines with line termination stripped. [Scanner.Split]
// functions are defined in this package for scanning a file into
// lines, bytes, UTF-8-encoded runes, and space-delimited words. The
// client may instead provide a custom split function.
//
// Scanning stops unrecoverably at EOF, the first I/O error, or a token too
// large to fit in the [Scanner.Buffer]. When a scan stops, the reader may have
// advanced arbitrarily far past the last token. Programs that need more
// control over error handling or large tokens, or must run sequential scans
// on a reader, should use [bufio.Reader] instead.
type Scanner struct {
r io.Reader // The reader provided by the client.
split SplitFunc // The function to split the tokens.
maxTokenSize int // Maximum size of a token; modified by tests.
token []byte // Last token returned by split.
buf []byte // Buffer used as argument to split.
start int // First non-processed byte in buf.
end int // End of data in buf.
err error // Sticky error.
empties int // Count of successive empty tokens.
scanCalled bool // Scan has been called; buffer is in use.
done bool // Scan has finished.
}
// SplitFunc is the signature of the split function used to tokenize the
// input. The arguments are an initial substring of the remaining unprocessed
// data and a flag, atEOF, that reports whether the [Reader] has no more data
// to give. The return values are the number of bytes to advance the input
// and the next token to return to the user, if any, plus an error, if any.
//
// Scanning stops if the function returns an error, in which case some of
// the input may be discarded. If that error is [ErrFinalToken], scanning
// stops with no error. A non-nil token delivered with [ErrFinalToken]
// will be the last token, and a nil token with [ErrFinalToken]
// immediately stops the scanning.
//
// Otherwise, the [Scanner] advances the input. If the token is not nil,
// the [Scanner] returns it to the user. If the token is nil, the
// Scanner reads more data and continues scanning; if there is no more
// data--if atEOF was true--the [Scanner] returns. If the data does not
// yet hold a complete token, for instance if it has no newline while
// scanning lines, a [SplitFunc] can return (0, nil, nil) to signal the
// [Scanner] to read more data into the slice and try again with a
// longer slice starting at the same point in the input.
//
// The function is never called with an empty data slice unless atEOF
// is true. If atEOF is true, however, data may be non-empty and,
// as always, holds unprocessed text.
type SplitFunc func(data []byte, atEOF bool) (advance int, token []byte, err error)
// Errors returned by Scanner.
var (
ErrTooLong = errors.New("bufio.Scanner: token too long")
ErrNegativeAdvance = errors.New("bufio.Scanner: SplitFunc returns negative advance count")
ErrAdvanceTooFar = errors.New("bufio.Scanner: SplitFunc returns advance count beyond input")
ErrBadReadCount = errors.New("bufio.Scanner: Read returned impossible count")
)
const (
// MaxScanTokenSize is the maximum size used to buffer a token
// unless the user provides an explicit buffer with [Scanner.Buffer].
// The actual maximum token size may be smaller as the buffer
// may need to include, for instance, a newline.
MaxScanTokenSize = 64 * 1024
startBufSize = 4096 // Size of initial allocation for buffer.
)
// NewScanner returns a new [Scanner] to read from r.
// The split function defaults to [ScanLines].
func NewScanner(r io.Reader) *Scanner {
return &Scanner{
r: r,
split: ScanLines,
maxTokenSize: MaxScanTokenSize,
}
}
// Err returns the first non-EOF error that was encountered by the [Scanner].
func (s *Scanner) Err() error {
if s.err == io.EOF {
return nil
}
return s.err
}
// Bytes returns the most recent token generated by a call to [Scanner.Scan].
// The underlying array may point to data that will be overwritten
// by a subsequent call to Scan. It does no allocation.
func (s *Scanner) Bytes() []byte {
return s.token
}
// Text returns the most recent token generated by a call to [Scanner.Scan]
// as a newly allocated string holding its bytes.
func (s *Scanner) Text() string {
return string(s.token)
}
// ErrFinalToken is a special sentinel error value. It is intended to be
// returned by a Split function to indicate that the scanning should stop
// with no error. If the token being delivered with this error is not nil,
// the token is the last token.
//
// The value is useful to stop processing early or when it is necessary to
// deliver a final empty token (which is different from a nil token).
// One could achieve the same behavior with a custom error value but
// providing one here is tidier.
// See the emptyFinalToken example for a use of this value.
var ErrFinalToken = errors.New("final token")
// Scan advances the [Scanner] to the next token, which will then be
// available through the [Scanner.Bytes] or [Scanner.Text] method. It returns false when
// there are no more tokens, either by reaching the end of the input or an error.
// After Scan returns false, the [Scanner.Err] method will return any error that
// occurred during scanning, except that if it was [io.EOF], [Scanner.Err]
// will return nil.
// Scan panics if the split function returns too many empty
// tokens without advancing the input. This is a common error mode for
// scanners.
func (s *Scanner) Scan() bool {
if s.done {
return false
}
s.scanCalled = true
// Loop until we have a token.
for {
// See if we can get a token with what we already have.
// If we've run out of data but have an error, give the split function
// a chance to recover any remaining, possibly empty token.
if s.end > s.start || s.err != nil {
advance, token, err := s.split(s.buf[s.start:s.end], s.err != nil)
if err != nil {
if err == ErrFinalToken {
s.token = token
s.done = true
// When token is not nil, it means the scanning stops
// with a trailing token, and thus the return value
// should be true to indicate the existence of the token.
return token != nil
}
s.setErr(err)
return false
}
if !s.advance(advance) {
return false
}
s.token = token
if token != nil {
if s.err == nil || advance > 0 {
s.empties = 0
} else {
// Returning tokens not advancing input at EOF.
s.empties++
if s.empties > maxConsecutiveEmptyReads {
panic("bufio.Scan: too many empty tokens without progressing")
}
}
return true
}
}
// We cannot generate a token with what we are holding.
// If we've already hit EOF or an I/O error, we are done.
if s.err != nil {
// Shut it down.
s.start = 0
s.end = 0
return false
}
// Must read more data.
// First, shift data to beginning of buffer if there's lots of empty space
// or space is needed.
if s.start > 0 && (s.end == len(s.buf) || s.start > len(s.buf)/2) {
copy(s.buf, s.buf[s.start:s.end])
s.end -= s.start
s.start = 0
}
// Is the buffer full? If so, resize.
if s.end == len(s.buf) {
// Guarantee no overflow in the multiplication below.
const maxInt = int(^uint(0) >> 1)
if len(s.buf) >= s.maxTokenSize || len(s.buf) > maxInt/2 {
s.setErr(ErrTooLong)
return false
}
newSize := len(s.buf) * 2
if newSize == 0 {
newSize = startBufSize
}
newSize = min(newSize, s.maxTokenSize)
newBuf := make([]byte, newSize)
copy(newBuf, s.buf[s.start:s.end])
s.buf = newBuf
s.end -= s.start
s.start = 0
}
// Finally we can read some input. Make sure we don't get stuck with
// a misbehaving Reader. Officially we don't need to do this, but let's
// be extra careful: Scanner is for safe, simple jobs.
for loop := 0; ; {
n, err := s.r.Read(s.buf[s.end:len(s.buf)])
if n < 0 || len(s.buf)-s.end < n {
s.setErr(ErrBadReadCount)
break
}
s.end += n
if err != nil {
s.setErr(err)
break
}
if n > 0 {
s.empties = 0
break
}
loop++
if loop > maxConsecutiveEmptyReads {
s.setErr(io.ErrNoProgress)
break
}
}
}
}
// advance consumes n bytes of the buffer. It reports whether the advance was legal.
func (s *Scanner) advance(n int) bool {
if n < 0 {
s.setErr(ErrNegativeAdvance)
return false
}
if n > s.end-s.start {
s.setErr(ErrAdvanceTooFar)
return false
}
s.start += n
return true
}
// setErr records the first error encountered.
func (s *Scanner) setErr(err error) {
if s.err == nil || s.err == io.EOF {
s.err = err
}
}
// Buffer controls memory allocation by the Scanner.
// It sets the initial buffer to use when scanning
// and the maximum size of buffer that may be allocated during scanning.
// The contents of the buffer are ignored.
//
// The maximum token size must be less than the larger of max and cap(buf).
// If max <= cap(buf), [Scanner.Scan] will use this buffer only and do no allocation.
//
// By default, [Scanner.Scan] uses an internal buffer and sets the
// maximum token size to [MaxScanTokenSize].
//
// Buffer panics if it is called after scanning has started.
func (s *Scanner) Buffer(buf []byte, max int) {
if s.scanCalled {
panic("Buffer called after Scan")
}
s.buf = buf[0:cap(buf)]
s.maxTokenSize = max
}
// Split sets the split function for the [Scanner].
// The default split function is [ScanLines].
//
// Split panics if it is called after scanning has started.
func (s *Scanner) Split(split SplitFunc) {
if s.scanCalled {
panic("Split called after Scan")
}
s.split = split
}
// Split functions
// ScanBytes is a split function for a [Scanner] that returns each byte as a token.
func ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
return 1, data[0:1], nil
}
var errorRune = []byte(string(utf8.RuneError))
// ScanRunes is a split function for a [Scanner] that returns each
// UTF-8-encoded rune as a token. The sequence of runes returned is
// equivalent to that from a range loop over the input as a string, which
// means that erroneous UTF-8 encodings translate to U+FFFD = "\xef\xbf\xbd".
// Because of the Scan interface, this makes it impossible for the client to
// distinguish correctly encoded replacement runes from encoding errors.
func ScanRunes(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
// Fast path 1: ASCII.
if data[0] < utf8.RuneSelf {
return 1, data[0:1], nil
}
// Fast path 2: Correct UTF-8 decode without error.
_, width := utf8.DecodeRune(data)
if width > 1 {
// It's a valid encoding. Width cannot be one for a correctly encoded
// non-ASCII rune.
return width, data[0:width], nil
}
// We know it's an error: we have width==1 and implicitly r==utf8.RuneError.
// Is the error because there wasn't a full rune to be decoded?
// FullRune distinguishes correctly between erroneous and incomplete encodings.
if !atEOF && !utf8.FullRune(data) {
// Incomplete; get more bytes.
return 0, nil, nil
}
// We have a real UTF-8 encoding error. Return a properly encoded error rune
// but advance only one byte. This matches the behavior of a range loop over
// an incorrectly encoded string.
return 1, errorRune, nil
}
// dropCR drops a terminal \r from the data.
func dropCR(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\r' {
return data[0 : len(data)-1]
}
return data
}
// ScanLines is a split function for a [Scanner] that returns each line of
// text, stripped of any trailing end-of-line marker. The returned line may
// be empty. The end-of-line marker is one optional carriage return followed
// by one mandatory newline. In regular expression notation, it is `\r?\n`.
// The last non-empty line of input will be returned even if it has no
// newline.
func ScanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\n'); i >= 0 {
// We have a full newline-terminated line.
return i + 1, dropCR(data[0:i]), nil
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), dropCR(data), nil
}
// Request more data.
return 0, nil, nil
}
// isSpace reports whether the character is a Unicode white space character.
// We avoid dependency on the unicode package, but check validity of the implementation
// in the tests.
func isSpace(r rune) bool {
if r <= '\u00FF' {
// Obvious ASCII ones: \t through \r plus space. Plus two Latin-1 oddballs.
switch r {
case ' ', '\t', '\n', '\v', '\f', '\r':
return true
case '\u0085', '\u00A0':
return true
}
return false
}
// High-valued ones.
if '\u2000' <= r && r <= '\u200a' {
return true
}
switch r {
case '\u1680', '\u2028', '\u2029', '\u202f', '\u205f', '\u3000':
return true
}
return false
}
// ScanWords is a split function for a [Scanner] that returns each
// space-separated word of text, with surrounding spaces deleted. It will
// never return an empty string. The definition of space is set by
// unicode.IsSpace.
func ScanWords(data []byte, atEOF bool) (advance int, token []byte, err error) {
// Skip leading spaces.
start := 0
for width := 0; start < len(data); start += width {
var r rune
r, width = utf8.DecodeRune(data[start:])
if !isSpace(r) {
break
}
}
// Scan until space, marking end of word.
for width, i := 0, start; i < len(data); i += width {
var r rune
r, width = utf8.DecodeRune(data[i:])
if isSpace(r) {
return i + width, data[start:i], nil
}
}
// If we're at EOF, we have a final, non-empty, non-terminated word. Return it.
if atEOF && len(data) > start {
return len(data), data[start:], nil
}
// Request more data.
return start, nil, nil
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bzip2
import (
"bufio"
"io"
)
// bitReader wraps an io.Reader and provides the ability to read values,
// bit-by-bit, from it. Its Read* methods don't return the usual error
// because the error handling was verbose. Instead, any error is kept and can
// be checked afterwards.
type bitReader struct {
r io.ByteReader
n uint64
bits uint
err error
}
// newBitReader returns a new bitReader reading from r. If r is not
// already an io.ByteReader, it will be converted via a bufio.Reader.
func newBitReader(r io.Reader) bitReader {
byter, ok := r.(io.ByteReader)
if !ok {
byter = bufio.NewReader(r)
}
return bitReader{r: byter}
}
// ReadBits64 reads the given number of bits and returns them in the
// least-significant part of a uint64. In the event of an error, it returns 0
// and the error can be obtained by calling bitReader.Err().
func (br *bitReader) ReadBits64(bits uint) (n uint64) {
for bits > br.bits {
b, err := br.r.ReadByte()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
br.err = err
return 0
}
br.n <<= 8
br.n |= uint64(b)
br.bits += 8
}
// br.n looks like this (assuming that br.bits = 14 and bits = 6):
// Bit: 111111
// 5432109876543210
//
// (6 bits, the desired output)
// |-----|
// V V
// 0101101101001110
// ^ ^
// |------------|
// br.bits (num valid bits)
//
// The next line right shifts the desired bits into the
// least-significant places and masks off anything above.
n = (br.n >> (br.bits - bits)) & ((1 << bits) - 1)
br.bits -= bits
return
}
func (br *bitReader) ReadBits(bits uint) (n int) {
n64 := br.ReadBits64(bits)
return int(n64)
}
func (br *bitReader) ReadBit() bool {
n := br.ReadBits(1)
return n != 0
}
func (br *bitReader) Err() error {
return br.err
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bzip2 implements bzip2 decompression.
package bzip2
import "io"
// There's no RFC for bzip2. I used the Wikipedia page for reference and a lot
// of guessing: https://en.wikipedia.org/wiki/Bzip2
// The source code to pyflate was useful for debugging:
// http://www.paul.sladen.org/projects/pyflate
// A StructuralError is returned when the bzip2 data is found to be
// syntactically invalid.
type StructuralError string
func (s StructuralError) Error() string {
return "bzip2 data invalid: " + string(s)
}
// A reader decompresses bzip2 compressed data.
type reader struct {
br bitReader
fileCRC uint32
blockCRC uint32
wantBlockCRC uint32
setupDone bool // true if we have parsed the bzip2 header.
eof bool
blockSize int // blockSize in bytes, i.e. 900 * 1000.
c [256]uint // the ``C'' array for the inverse BWT.
tt []uint32 // mirrors the ``tt'' array in the bzip2 source and contains the P array in the upper 24 bits.
tPos uint32 // Index of the next output byte in tt.
preRLE []uint32 // contains the RLE data still to be processed.
preRLEUsed int // number of entries of preRLE used.
lastByte int // the last byte value seen.
byteRepeats uint // the number of repeats of lastByte seen.
repeats uint // the number of copies of lastByte to output.
}
// NewReader returns an [io.Reader] which decompresses bzip2 data from r.
// If r does not also implement [io.ByteReader],
// the decompressor may read more data than necessary from r.
func NewReader(r io.Reader) io.Reader {
bz2 := new(reader)
bz2.br = newBitReader(r)
return bz2
}
const bzip2FileMagic = 0x425a // "BZ"
const bzip2BlockMagic = 0x314159265359
const bzip2FinalMagic = 0x177245385090
// setup parses the bzip2 header.
func (bz2 *reader) setup(needMagic bool) error {
br := &bz2.br
if needMagic {
magic := br.ReadBits(16)
if magic != bzip2FileMagic {
return StructuralError("bad magic value")
}
}
t := br.ReadBits(8)
if t != 'h' {
return StructuralError("non-Huffman entropy encoding")
}
level := br.ReadBits(8)
if level < '1' || level > '9' {
return StructuralError("invalid compression level")
}
bz2.fileCRC = 0
bz2.blockSize = 100 * 1000 * (level - '0')
if bz2.blockSize > len(bz2.tt) {
bz2.tt = make([]uint32, bz2.blockSize)
}
return nil
}
func (bz2 *reader) Read(buf []byte) (n int, err error) {
if bz2.eof {
return 0, io.EOF
}
if !bz2.setupDone {
err = bz2.setup(true)
brErr := bz2.br.Err()
if brErr != nil {
err = brErr
}
if err != nil {
return 0, err
}
bz2.setupDone = true
}
n, err = bz2.read(buf)
brErr := bz2.br.Err()
if brErr != nil {
err = brErr
}
return
}
func (bz2 *reader) readFromBlock(buf []byte) int {
// bzip2 is a block based compressor, except that it has a run-length
// preprocessing step. The block based nature means that we can
// preallocate fixed-size buffers and reuse them. However, the RLE
// preprocessing would require allocating huge buffers to store the
// maximum expansion. Thus we process blocks all at once, except for
// the RLE which we decompress as required.
n := 0
for (bz2.repeats > 0 || bz2.preRLEUsed < len(bz2.preRLE)) && n < len(buf) {
// We have RLE data pending.
// The run-length encoding works like this:
// Any sequence of four equal bytes is followed by a length
// byte which contains the number of repeats of that byte to
// include. (The number of repeats can be zero.) Because we are
// decompressing on-demand our state is kept in the reader
// object.
if bz2.repeats > 0 {
buf[n] = byte(bz2.lastByte)
n++
bz2.repeats--
if bz2.repeats == 0 {
bz2.lastByte = -1
}
continue
}
bz2.tPos = bz2.preRLE[bz2.tPos]
b := byte(bz2.tPos)
bz2.tPos >>= 8
bz2.preRLEUsed++
if bz2.byteRepeats == 3 {
bz2.repeats = uint(b)
bz2.byteRepeats = 0
continue
}
if bz2.lastByte == int(b) {
bz2.byteRepeats++
} else {
bz2.byteRepeats = 0
}
bz2.lastByte = int(b)
buf[n] = b
n++
}
return n
}
func (bz2 *reader) read(buf []byte) (int, error) {
for {
n := bz2.readFromBlock(buf)
if n > 0 || len(buf) == 0 {
bz2.blockCRC = updateCRC(bz2.blockCRC, buf[:n])
return n, nil
}
// End of block. Check CRC.
if bz2.blockCRC != bz2.wantBlockCRC {
bz2.br.err = StructuralError("block checksum mismatch")
return 0, bz2.br.err
}
// Find next block.
br := &bz2.br
switch br.ReadBits64(48) {
default:
return 0, StructuralError("bad magic value found")
case bzip2BlockMagic:
// Start of block.
err := bz2.readBlock()
if err != nil {
return 0, err
}
case bzip2FinalMagic:
// Check end-of-file CRC.
wantFileCRC := uint32(br.ReadBits64(32))
if br.err != nil {
return 0, br.err
}
if bz2.fileCRC != wantFileCRC {
br.err = StructuralError("file checksum mismatch")
return 0, br.err
}
// Skip ahead to byte boundary.
// Is there a file concatenated to this one?
// It would start with BZ.
if br.bits%8 != 0 {
br.ReadBits(br.bits % 8)
}
b, err := br.r.ReadByte()
if err == io.EOF {
br.err = io.EOF
bz2.eof = true
return 0, io.EOF
}
if err != nil {
br.err = err
return 0, err
}
z, err := br.r.ReadByte()
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
br.err = err
return 0, err
}
if b != 'B' || z != 'Z' {
return 0, StructuralError("bad magic value in continuation file")
}
if err := bz2.setup(false); err != nil {
return 0, err
}
}
}
}
// readBlock reads a bzip2 block. The magic number should already have been consumed.
func (bz2 *reader) readBlock() (err error) {
br := &bz2.br
bz2.wantBlockCRC = uint32(br.ReadBits64(32)) // skip checksum. TODO: check it if we can figure out what it is.
bz2.blockCRC = 0
bz2.fileCRC = (bz2.fileCRC<<1 | bz2.fileCRC>>31) ^ bz2.wantBlockCRC
randomized := br.ReadBits(1)
if randomized != 0 {
return StructuralError("deprecated randomized files")
}
origPtr := uint(br.ReadBits(24))
// If not every byte value is used in the block (i.e., it's text) then
// the symbol set is reduced. The symbols used are stored as a
// two-level, 16x16 bitmap.
symbolRangeUsedBitmap := br.ReadBits(16)
symbolPresent := make([]bool, 256)
numSymbols := 0
for symRange := uint(0); symRange < 16; symRange++ {
if symbolRangeUsedBitmap&(1<<(15-symRange)) != 0 {
bits := br.ReadBits(16)
for symbol := uint(0); symbol < 16; symbol++ {
if bits&(1<<(15-symbol)) != 0 {
symbolPresent[16*symRange+symbol] = true
numSymbols++
}
}
}
}
if numSymbols == 0 {
// There must be an EOF symbol.
return StructuralError("no symbols in input")
}
// A block uses between two and six different Huffman trees.
numHuffmanTrees := br.ReadBits(3)
if numHuffmanTrees < 2 || numHuffmanTrees > 6 {
return StructuralError("invalid number of Huffman trees")
}
// The Huffman tree can switch every 50 symbols so there's a list of
// tree indexes telling us which tree to use for each 50 symbol block.
numSelectors := br.ReadBits(15)
treeIndexes := make([]uint8, numSelectors)
// The tree indexes are move-to-front transformed and stored as unary
// numbers.
mtfTreeDecoder := newMTFDecoderWithRange(numHuffmanTrees)
for i := range treeIndexes {
c := 0
for {
inc := br.ReadBits(1)
if inc == 0 {
break
}
c++
}
if c >= numHuffmanTrees {
return StructuralError("tree index too large")
}
treeIndexes[i] = mtfTreeDecoder.Decode(c)
}
// The list of symbols for the move-to-front transform is taken from
// the previously decoded symbol bitmap.
symbols := make([]byte, numSymbols)
nextSymbol := 0
for i := 0; i < 256; i++ {
if symbolPresent[i] {
symbols[nextSymbol] = byte(i)
nextSymbol++
}
}
mtf := newMTFDecoder(symbols)
numSymbols += 2 // to account for RUNA and RUNB symbols
huffmanTrees := make([]huffmanTree, numHuffmanTrees)
// Now we decode the arrays of code-lengths for each tree.
lengths := make([]uint8, numSymbols)
for i := range huffmanTrees {
// The code lengths are delta encoded from a 5-bit base value.
length := br.ReadBits(5)
for j := range lengths {
for {
if length < 1 || length > 20 {
return StructuralError("Huffman length out of range")
}
if !br.ReadBit() {
break
}
if br.ReadBit() {
length--
} else {
length++
}
}
lengths[j] = uint8(length)
}
huffmanTrees[i], err = newHuffmanTree(lengths)
if err != nil {
return err
}
}
selectorIndex := 1 // the next tree index to use
if len(treeIndexes) == 0 {
return StructuralError("no tree selectors given")
}
if int(treeIndexes[0]) >= len(huffmanTrees) {
return StructuralError("tree selector out of range")
}
currentHuffmanTree := huffmanTrees[treeIndexes[0]]
bufIndex := 0 // indexes bz2.buf, the output buffer.
// The output of the move-to-front transform is run-length encoded and
// we merge the decoding into the Huffman parsing loop. These two
// variables accumulate the repeat count. See the Wikipedia page for
// details.
repeat := 0
repeatPower := 0
// The `C' array (used by the inverse BWT) needs to be zero initialized.
clear(bz2.c[:])
decoded := 0 // counts the number of symbols decoded by the current tree.
for {
if decoded == 50 {
if selectorIndex >= numSelectors {
return StructuralError("insufficient selector indices for number of symbols")
}
if int(treeIndexes[selectorIndex]) >= len(huffmanTrees) {
return StructuralError("tree selector out of range")
}
currentHuffmanTree = huffmanTrees[treeIndexes[selectorIndex]]
selectorIndex++
decoded = 0
}
v := currentHuffmanTree.Decode(br)
decoded++
if v < 2 {
// This is either the RUNA or RUNB symbol.
if repeat == 0 {
repeatPower = 1
}
repeat += repeatPower << v
repeatPower <<= 1
// This limit of 2 million comes from the bzip2 source
// code. It prevents repeat from overflowing.
if repeat > 2*1024*1024 {
return StructuralError("repeat count too large")
}
continue
}
if repeat > 0 {
// We have decoded a complete run-length so we need to
// replicate the last output symbol.
if repeat > bz2.blockSize-bufIndex {
return StructuralError("repeats past end of block")
}
for i := 0; i < repeat; i++ {
b := mtf.First()
bz2.tt[bufIndex] = uint32(b)
bz2.c[b]++
bufIndex++
}
repeat = 0
}
if int(v) == numSymbols-1 {
// This is the EOF symbol. Because it's always at the
// end of the move-to-front list, and never gets moved
// to the front, it has this unique value.
break
}
// Since two metasymbols (RUNA and RUNB) have values 0 and 1,
// one would expect |v-2| to be passed to the MTF decoder.
// However, the front of the MTF list is never referenced as 0,
// it's always referenced with a run-length of 1. Thus 0
// doesn't need to be encoded and we have |v-1| in the next
// line.
b := mtf.Decode(int(v - 1))
if bufIndex >= bz2.blockSize {
return StructuralError("data exceeds block size")
}
bz2.tt[bufIndex] = uint32(b)
bz2.c[b]++
bufIndex++
}
if origPtr >= uint(bufIndex) {
return StructuralError("origPtr out of bounds")
}
// We have completed the entropy decoding. Now we can perform the
// inverse BWT and setup the RLE buffer.
bz2.preRLE = bz2.tt[:bufIndex]
bz2.preRLEUsed = 0
bz2.tPos = inverseBWT(bz2.preRLE, origPtr, bz2.c[:])
bz2.lastByte = -1
bz2.byteRepeats = 0
bz2.repeats = 0
return nil
}
// inverseBWT implements the inverse Burrows-Wheeler transform as described in
// http://www.hpl.hp.com/techreports/Compaq-DEC/SRC-RR-124.pdf, section 4.2.
// In that document, origPtr is called “I” and c is the “C” array after the
// first pass over the data. It's an argument here because we merge the first
// pass with the Huffman decoding.
//
// This also implements the “single array” method from the bzip2 source code
// which leaves the output, still shuffled, in the bottom 8 bits of tt with the
// index of the next byte in the top 24-bits. The index of the first byte is
// returned.
func inverseBWT(tt []uint32, origPtr uint, c []uint) uint32 {
sum := uint(0)
for i := 0; i < 256; i++ {
sum += c[i]
c[i] = sum - c[i]
}
for i := range tt {
b := tt[i] & 0xff
tt[c[b]] |= uint32(i) << 8
c[b]++
}
return tt[origPtr] >> 8
}
// This is a standard CRC32 like in hash/crc32 except that all the shifts are reversed,
// causing the bits in the input to be processed in the reverse of the usual order.
var crctab [256]uint32
func init() {
const poly = 0x04C11DB7
for i := range crctab {
crc := uint32(i) << 24
for j := 0; j < 8; j++ {
if crc&0x80000000 != 0 {
crc = (crc << 1) ^ poly
} else {
crc <<= 1
}
}
crctab[i] = crc
}
}
// updateCRC updates the crc value to incorporate the data in b.
// The initial value is 0.
func updateCRC(val uint32, b []byte) uint32 {
crc := ^val
for _, v := range b {
crc = crctab[byte(crc>>24)^v] ^ (crc << 8)
}
return ^crc
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bzip2
import (
"cmp"
"slices"
)
// A huffmanTree is a binary tree which is navigated, bit-by-bit to reach a
// symbol.
type huffmanTree struct {
// nodes contains all the non-leaf nodes in the tree. nodes[0] is the
// root of the tree and nextNode contains the index of the next element
// of nodes to use when the tree is being constructed.
nodes []huffmanNode
nextNode int
}
// A huffmanNode is a node in the tree. left and right contain indexes into the
// nodes slice of the tree. If left or right is invalidNodeValue then the child
// is a left node and its value is in leftValue/rightValue.
//
// The symbols are uint16s because bzip2 encodes not only MTF indexes in the
// tree, but also two magic values for run-length encoding and an EOF symbol.
// Thus there are more than 256 possible symbols.
type huffmanNode struct {
left, right uint16
leftValue, rightValue uint16
}
// invalidNodeValue is an invalid index which marks a leaf node in the tree.
const invalidNodeValue = 0xffff
// Decode reads bits from the given bitReader and navigates the tree until a
// symbol is found.
func (t *huffmanTree) Decode(br *bitReader) (v uint16) {
nodeIndex := uint16(0) // node 0 is the root of the tree.
for {
node := &t.nodes[nodeIndex]
var bit uint16
if br.bits > 0 {
// Get next bit - fast path.
br.bits--
bit = uint16(br.n>>(br.bits&63)) & 1
} else {
// Get next bit - slow path.
// Use ReadBits to retrieve a single bit
// from the underling io.ByteReader.
bit = uint16(br.ReadBits(1))
}
// Trick a compiler into generating conditional move instead of branch,
// by making both loads unconditional.
l, r := node.left, node.right
if bit == 1 {
nodeIndex = l
} else {
nodeIndex = r
}
if nodeIndex == invalidNodeValue {
// We found a leaf. Use the value of bit to decide
// whether is a left or a right value.
l, r := node.leftValue, node.rightValue
if bit == 1 {
v = l
} else {
v = r
}
return
}
}
}
// newHuffmanTree builds a Huffman tree from a slice containing the code
// lengths of each symbol. The maximum code length is 32 bits.
func newHuffmanTree(lengths []uint8) (huffmanTree, error) {
// There are many possible trees that assign the same code length to
// each symbol (consider reflecting a tree down the middle, for
// example). Since the code length assignments determine the
// efficiency of the tree, each of these trees is equally good. In
// order to minimize the amount of information needed to build a tree
// bzip2 uses a canonical tree so that it can be reconstructed given
// only the code length assignments.
if len(lengths) < 2 {
panic("newHuffmanTree: too few symbols")
}
var t huffmanTree
// First we sort the code length assignments by ascending code length,
// using the symbol value to break ties.
pairs := make([]huffmanSymbolLengthPair, len(lengths))
for i, length := range lengths {
pairs[i].value = uint16(i)
pairs[i].length = length
}
slices.SortFunc(pairs, func(a, b huffmanSymbolLengthPair) int {
if c := cmp.Compare(a.length, b.length); c != 0 {
return c
}
return cmp.Compare(a.value, b.value)
})
// Now we assign codes to the symbols, starting with the longest code.
// We keep the codes packed into a uint32, at the most-significant end.
// So branches are taken from the MSB downwards. This makes it easy to
// sort them later.
code := uint32(0)
length := uint8(32)
codes := make([]huffmanCode, len(lengths))
for i := len(pairs) - 1; i >= 0; i-- {
if length > pairs[i].length {
length = pairs[i].length
}
codes[i].code = code
codes[i].codeLen = length
codes[i].value = pairs[i].value
// We need to 'increment' the code, which means treating |code|
// like a |length| bit number.
code += 1 << (32 - length)
}
// Now we can sort by the code so that the left half of each branch are
// grouped together, recursively.
slices.SortFunc(codes, func(a, b huffmanCode) int {
return cmp.Compare(a.code, b.code)
})
t.nodes = make([]huffmanNode, len(codes))
_, err := buildHuffmanNode(&t, codes, 0)
return t, err
}
// huffmanSymbolLengthPair contains a symbol and its code length.
type huffmanSymbolLengthPair struct {
value uint16
length uint8
}
// huffmanCode contains a symbol, its code and code length.
type huffmanCode struct {
code uint32
codeLen uint8
value uint16
}
// buildHuffmanNode takes a slice of sorted huffmanCodes and builds a node in
// the Huffman tree at the given level. It returns the index of the newly
// constructed node.
func buildHuffmanNode(t *huffmanTree, codes []huffmanCode, level uint32) (nodeIndex uint16, err error) {
test := uint32(1) << (31 - level)
// We have to search the list of codes to find the divide between the left and right sides.
firstRightIndex := len(codes)
for i, code := range codes {
if code.code&test != 0 {
firstRightIndex = i
break
}
}
left := codes[:firstRightIndex]
right := codes[firstRightIndex:]
if len(left) == 0 || len(right) == 0 {
// There is a superfluous level in the Huffman tree indicating
// a bug in the encoder. However, this bug has been observed in
// the wild so we handle it.
// If this function was called recursively then we know that
// len(codes) >= 2 because, otherwise, we would have hit the
// "leaf node" case, below, and not recurred.
//
// However, for the initial call it's possible that len(codes)
// is zero or one. Both cases are invalid because a zero length
// tree cannot encode anything and a length-1 tree can only
// encode EOF and so is superfluous. We reject both.
if len(codes) < 2 {
return 0, StructuralError("empty Huffman tree")
}
// In this case the recursion doesn't always reduce the length
// of codes so we need to ensure termination via another
// mechanism.
if level == 31 {
// Since len(codes) >= 2 the only way that the values
// can match at all 32 bits is if they are equal, which
// is invalid. This ensures that we never enter
// infinite recursion.
return 0, StructuralError("equal symbols in Huffman tree")
}
if len(left) == 0 {
return buildHuffmanNode(t, right, level+1)
}
return buildHuffmanNode(t, left, level+1)
}
nodeIndex = uint16(t.nextNode)
node := &t.nodes[t.nextNode]
t.nextNode++
if len(left) == 1 {
// leaf node
node.left = invalidNodeValue
node.leftValue = left[0].value
} else {
node.left, err = buildHuffmanNode(t, left, level+1)
}
if err != nil {
return
}
if len(right) == 1 {
// leaf node
node.right = invalidNodeValue
node.rightValue = right[0].value
} else {
node.right, err = buildHuffmanNode(t, right, level+1)
}
return
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bzip2
// moveToFrontDecoder implements a move-to-front list. Such a list is an
// efficient way to transform a string with repeating elements into one with
// many small valued numbers, which is suitable for entropy encoding. It works
// by starting with an initial list of symbols and references symbols by their
// index into that list. When a symbol is referenced, it's moved to the front
// of the list. Thus, a repeated symbol ends up being encoded with many zeros,
// as the symbol will be at the front of the list after the first access.
type moveToFrontDecoder []byte
// newMTFDecoder creates a move-to-front decoder with an explicit initial list
// of symbols.
func newMTFDecoder(symbols []byte) moveToFrontDecoder {
if len(symbols) > 256 {
panic("too many symbols")
}
return moveToFrontDecoder(symbols)
}
// newMTFDecoderWithRange creates a move-to-front decoder with an initial
// symbol list of 0...n-1.
func newMTFDecoderWithRange(n int) moveToFrontDecoder {
if n > 256 {
panic("newMTFDecoderWithRange: cannot have > 256 symbols")
}
m := make([]byte, n)
for i := 0; i < n; i++ {
m[i] = byte(i)
}
return moveToFrontDecoder(m)
}
func (m moveToFrontDecoder) Decode(n int) (b byte) {
// Implement move-to-front with a simple copy. This approach
// beats more sophisticated approaches in benchmarking, probably
// because it has high locality of reference inside of a
// single cache line (most move-to-front operations have n < 64).
b = m[n]
copy(m[1:], m[:n])
m[0] = b
return
}
// First returns the symbol at the front of the list.
func (m moveToFrontDecoder) First() byte {
return m[0]
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"errors"
"fmt"
"io"
"math"
)
const (
NoCompression = 0
BestSpeed = 1
BestCompression = 9
DefaultCompression = -1
// HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
// entropy encoding. This mode is useful in compressing data that has
// already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
// that lacks an entropy encoder. Compression gains are achieved when
// certain bytes in the input stream occur more frequently than others.
//
// Note that HuffmanOnly produces a compressed output that is
// RFC 1951 compliant. That is, any valid DEFLATE decompressor will
// continue to be able to decompress this output.
HuffmanOnly = -2
)
const (
logWindowSize = 15
windowSize = 1 << logWindowSize
windowMask = windowSize - 1
// The LZ77 step produces a sequence of literal tokens and <length, offset>
// pair tokens. The offset is also known as distance. The underlying wire
// format limits the range of lengths and offsets. For example, there are
// 256 legitimate lengths: those in the range [3, 258]. This package's
// compressor uses a higher minimum match length, enabling optimizations
// such as finding matches via 32-bit loads and compares.
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
minMatchLength = 4 // The smallest match length that the compressor actually emits
maxMatchLength = 258 // The largest match length
baseMatchOffset = 1 // The smallest match offset
maxMatchOffset = 1 << 15 // The largest match offset
// The maximum number of tokens we put into a single flate block, just to
// stop things from getting too large.
maxFlateBlockTokens = 1 << 14
maxStoreBlockSize = 65535
hashBits = 17 // After 17 performance degrades
hashSize = 1 << hashBits
hashMask = (1 << hashBits) - 1
maxHashOffset = 1 << 24
skipNever = math.MaxInt32
)
type compressionLevel struct {
level, good, lazy, nice, chain, fastSkipHashing int
}
var levels = []compressionLevel{
{0, 0, 0, 0, 0, 0}, // NoCompression.
{1, 0, 0, 0, 0, 0}, // BestSpeed uses a custom algorithm; see deflatefast.go.
// For levels 2-3 we don't bother trying with lazy matches.
{2, 4, 0, 16, 8, 5},
{3, 4, 0, 32, 32, 6},
// Levels 4-9 use increasingly more lazy matching
// and increasingly stringent conditions for "good enough".
{4, 4, 4, 16, 16, skipNever},
{5, 8, 16, 32, 32, skipNever},
{6, 8, 16, 128, 128, skipNever},
{7, 8, 32, 128, 256, skipNever},
{8, 32, 128, 258, 1024, skipNever},
{9, 32, 258, 258, 4096, skipNever},
}
type compressor struct {
compressionLevel
w *huffmanBitWriter
bulkHasher func([]byte, []uint32)
// compression algorithm
fill func(*compressor, []byte) int // copy data to window
step func(*compressor) // process window
bestSpeed *deflateFast // Encoder for BestSpeed
// input window: unprocessed data is window[index:windowEnd]
index int
window []byte
windowEnd int
blockStart int // window index where current tokens start
byteAvailable bool // if true, still need to process window[index-1].
sync bool // requesting flush
// queued output tokens
tokens []token
// deflate state
length int
offset int
maxInsertIndex int
err error
// Input hash chains
// hashHead[hashValue] contains the largest inputIndex with the specified hash value
// If hashHead[hashValue] is within the current window, then
// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
// with the same hash value.
// These are large and do not contain pointers, so put them
// near the end of the struct so the GC has to scan less.
chainHead int
hashHead [hashSize]uint32
hashPrev [windowSize]uint32
hashOffset int
// hashMatch must be able to contain hashes for the maximum match length.
hashMatch [maxMatchLength - 1]uint32
}
func (d *compressor) fillDeflate(b []byte) int {
if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
// shift the window by windowSize
copy(d.window, d.window[windowSize:2*windowSize])
d.index -= windowSize
d.windowEnd -= windowSize
if d.blockStart >= windowSize {
d.blockStart -= windowSize
} else {
d.blockStart = math.MaxInt32
}
d.hashOffset += windowSize
if d.hashOffset > maxHashOffset {
delta := d.hashOffset - 1
d.hashOffset -= delta
d.chainHead -= delta
// Iterate over slices instead of arrays to avoid copying
// the entire table onto the stack (Issue #18625).
for i, v := range d.hashPrev[:] {
if int(v) > delta {
d.hashPrev[i] = uint32(int(v) - delta)
} else {
d.hashPrev[i] = 0
}
}
for i, v := range d.hashHead[:] {
if int(v) > delta {
d.hashHead[i] = uint32(int(v) - delta)
} else {
d.hashHead[i] = 0
}
}
}
}
n := copy(d.window[d.windowEnd:], b)
d.windowEnd += n
return n
}
func (d *compressor) writeBlock(tokens []token, index int) error {
if index > 0 {
var window []byte
if d.blockStart <= index {
window = d.window[d.blockStart:index]
}
d.blockStart = index
d.w.writeBlock(tokens, false, window)
return d.w.err
}
return nil
}
// fillWindow will fill the current window with the supplied
// dictionary and calculate all hashes.
// This is much faster than doing a full encode.
// Should only be used after a reset.
func (d *compressor) fillWindow(b []byte) {
// Do not fill window if we are in store-only mode.
if d.compressionLevel.level < 2 {
return
}
if d.index != 0 || d.windowEnd != 0 {
panic("internal error: fillWindow called with stale data")
}
// If we are given too much, cut it.
if len(b) > windowSize {
b = b[len(b)-windowSize:]
}
// Add all to window.
n := copy(d.window, b)
// Calculate 256 hashes at the time (more L1 cache hits)
loops := (n + 256 - minMatchLength) / 256
for j := 0; j < loops; j++ {
index := j * 256
end := index + 256 + minMatchLength - 1
if end > n {
end = n
}
toCheck := d.window[index:end]
dstSize := len(toCheck) - minMatchLength + 1
if dstSize <= 0 {
continue
}
dst := d.hashMatch[:dstSize]
d.bulkHasher(toCheck, dst)
for i, val := range dst {
di := i + index
hh := &d.hashHead[val&hashMask]
// Get previous value with the same hash.
// Our chain should point to the previous value.
d.hashPrev[di&windowMask] = *hh
// Set the head of the hash chain to us.
*hh = uint32(di + d.hashOffset)
}
}
// Update window information.
d.windowEnd = n
d.index = n
}
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
minMatchLook = lookahead
}
win := d.window[0 : pos+minMatchLook]
// We quit when we get a match that's at least nice long
nice := len(win) - pos
if d.nice < nice {
nice = d.nice
}
// If we've got a match that's good enough, only look in 1/4 the chain.
tries := d.chain
length = prevLength
if length >= d.good {
tries >>= 2
}
wEnd := win[pos+length]
wPos := win[pos:]
minIndex := pos - windowSize
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:], wPos, minMatchLook)
if n > length && (n > minMatchLength || pos-i <= 4096) {
length = n
offset = pos - i
ok = true
if n >= nice {
// The match is good enough that we don't try to find a better one.
break
}
wEnd = win[pos+n]
}
}
if i == minIndex {
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
i = int(d.hashPrev[i&windowMask]) - d.hashOffset
if i < minIndex || i < 0 {
break
}
}
return
}
func (d *compressor) writeStoredBlock(buf []byte) error {
if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
return d.w.err
}
d.w.writeBytes(buf)
return d.w.err
}
const hashmul = 0x1e35a7bd
// hash4 returns a hash representation of the first 4 bytes
// of the supplied slice.
// The caller must ensure that len(b) >= 4.
func hash4(b []byte) uint32 {
return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits)
}
// bulkHash4 will compute hashes using the same
// algorithm as hash4.
func bulkHash4(b []byte, dst []uint32) {
if len(b) < minMatchLength {
return
}
hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
dst[0] = (hb * hashmul) >> (32 - hashBits)
end := len(b) - minMatchLength + 1
for i := 1; i < end; i++ {
hb = (hb << 8) | uint32(b[i+3])
dst[i] = (hb * hashmul) >> (32 - hashBits)
}
}
// matchLen returns the number of matching bytes in a and b
// up to length 'max'. Both slices must be at least 'max'
// bytes in size.
func matchLen(a, b []byte, max int) int {
a = a[:max]
b = b[:len(a)]
for i, av := range a {
if b[i] != av {
return i
}
}
return max
}
// encSpeed will compress and store the currently added data,
// if enough has been accumulated or we at the end of the stream.
// Any error that occurred will be in d.err
func (d *compressor) encSpeed() {
// We only compress if we have maxStoreBlockSize.
if d.windowEnd < maxStoreBlockSize {
if !d.sync {
return
}
// Handle small sizes.
if d.windowEnd < 128 {
switch {
case d.windowEnd == 0:
return
case d.windowEnd <= 16:
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
default:
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
d.err = d.w.err
}
d.windowEnd = 0
d.bestSpeed.reset()
return
}
}
// Encode the block.
d.tokens = d.bestSpeed.encode(d.tokens[:0], d.window[:d.windowEnd])
// If we removed less than 1/16th, Huffman compress the block.
if len(d.tokens) > d.windowEnd-(d.windowEnd>>4) {
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
} else {
d.w.writeBlockDynamic(d.tokens, false, d.window[:d.windowEnd])
}
d.err = d.w.err
d.windowEnd = 0
}
func (d *compressor) initDeflate() {
d.window = make([]byte, 2*windowSize)
d.hashOffset = 1
d.tokens = make([]token, 0, maxFlateBlockTokens+1)
d.length = minMatchLength - 1
d.offset = 0
d.byteAvailable = false
d.index = 0
d.chainHead = -1
d.bulkHasher = bulkHash4
}
func (d *compressor) deflate() {
if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
return
}
d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
Loop:
for {
if d.index > d.windowEnd {
panic("index > windowEnd")
}
lookahead := d.windowEnd - d.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
break Loop
}
if d.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
// Flush current output block if any.
if d.byteAvailable {
// There is still one pending token that needs to be flushed
d.tokens = append(d.tokens, literalToken(uint32(d.window[d.index-1])))
d.byteAvailable = false
}
if len(d.tokens) > 0 {
if d.err = d.writeBlock(d.tokens, d.index); d.err != nil {
return
}
d.tokens = d.tokens[:0]
}
break Loop
}
}
if d.index < d.maxInsertIndex {
// Update the hash
hash := hash4(d.window[d.index : d.index+minMatchLength])
hh := &d.hashHead[hash&hashMask]
d.chainHead = int(*hh)
d.hashPrev[d.index&windowMask] = uint32(d.chainHead)
*hh = uint32(d.index + d.hashOffset)
}
prevLength := d.length
prevOffset := d.offset
d.length = minMatchLength - 1
d.offset = 0
minIndex := d.index - windowSize
if minIndex < 0 {
minIndex = 0
}
if d.chainHead-d.hashOffset >= minIndex &&
(d.fastSkipHashing != skipNever && lookahead > minMatchLength-1 ||
d.fastSkipHashing == skipNever && lookahead > prevLength && prevLength < d.lazy) {
if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
d.length = newLength
d.offset = newOffset
}
}
if d.fastSkipHashing != skipNever && d.length >= minMatchLength ||
d.fastSkipHashing == skipNever && prevLength >= minMatchLength && d.length <= prevLength {
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
if d.fastSkipHashing != skipNever {
d.tokens = append(d.tokens, matchToken(uint32(d.length-baseMatchLength), uint32(d.offset-baseMatchOffset)))
} else {
d.tokens = append(d.tokens, matchToken(uint32(prevLength-baseMatchLength), uint32(prevOffset-baseMatchOffset)))
}
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash
// table.
if d.length <= d.fastSkipHashing {
var newIndex int
if d.fastSkipHashing != skipNever {
newIndex = d.index + d.length
} else {
newIndex = d.index + prevLength - 1
}
index := d.index
for index++; index < newIndex; index++ {
if index < d.maxInsertIndex {
hash := hash4(d.window[index : index+minMatchLength])
// Get previous value with the same hash.
// Our chain should point to the previous value.
hh := &d.hashHead[hash&hashMask]
d.hashPrev[index&windowMask] = *hh
// Set the head of the hash chain to us.
*hh = uint32(index + d.hashOffset)
}
}
d.index = index
if d.fastSkipHashing == skipNever {
d.byteAvailable = false
d.length = minMatchLength - 1
}
} else {
// For matches this long, we don't bother inserting each individual
// item into the table.
d.index += d.length
}
if len(d.tokens) == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlock(d.tokens, d.index); d.err != nil {
return
}
d.tokens = d.tokens[:0]
}
} else {
if d.fastSkipHashing != skipNever || d.byteAvailable {
i := d.index - 1
if d.fastSkipHashing != skipNever {
i = d.index
}
d.tokens = append(d.tokens, literalToken(uint32(d.window[i])))
if len(d.tokens) == maxFlateBlockTokens {
if d.err = d.writeBlock(d.tokens, i+1); d.err != nil {
return
}
d.tokens = d.tokens[:0]
}
}
d.index++
if d.fastSkipHashing == skipNever {
d.byteAvailable = true
}
}
}
}
func (d *compressor) fillStore(b []byte) int {
n := copy(d.window[d.windowEnd:], b)
d.windowEnd += n
return n
}
func (d *compressor) store() {
if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
d.windowEnd = 0
}
}
// storeHuff compresses and stores the currently added data
// when the d.window is full or we are at the end of the stream.
// Any error that occurred will be in d.err
func (d *compressor) storeHuff() {
if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
return
}
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
d.err = d.w.err
d.windowEnd = 0
}
func (d *compressor) write(b []byte) (n int, err error) {
if d.err != nil {
return 0, d.err
}
n = len(b)
for len(b) > 0 {
d.step(d)
b = b[d.fill(d, b):]
if d.err != nil {
return 0, d.err
}
}
return n, nil
}
func (d *compressor) syncFlush() error {
if d.err != nil {
return d.err
}
d.sync = true
d.step(d)
if d.err == nil {
d.w.writeStoredHeader(0, false)
d.w.flush()
d.err = d.w.err
}
d.sync = false
return d.err
}
func (d *compressor) init(w io.Writer, level int) (err error) {
d.w = newHuffmanBitWriter(w)
switch {
case level == NoCompression:
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillStore
d.step = (*compressor).store
case level == HuffmanOnly:
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillStore
d.step = (*compressor).storeHuff
case level == BestSpeed:
d.compressionLevel = levels[level]
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillStore
d.step = (*compressor).encSpeed
d.bestSpeed = newDeflateFast()
d.tokens = make([]token, maxStoreBlockSize)
case level == DefaultCompression:
level = 6
fallthrough
case 2 <= level && level <= 9:
d.compressionLevel = levels[level]
d.initDeflate()
d.fill = (*compressor).fillDeflate
d.step = (*compressor).deflate
default:
return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
}
return nil
}
func (d *compressor) reset(w io.Writer) {
d.w.reset(w)
d.sync = false
d.err = nil
switch d.compressionLevel.level {
case NoCompression:
d.windowEnd = 0
case BestSpeed:
d.windowEnd = 0
d.tokens = d.tokens[:0]
d.bestSpeed.reset()
default:
d.chainHead = -1
clear(d.hashHead[:])
clear(d.hashPrev[:])
d.hashOffset = 1
d.index, d.windowEnd = 0, 0
d.blockStart, d.byteAvailable = 0, false
d.tokens = d.tokens[:0]
d.length = minMatchLength - 1
d.offset = 0
d.maxInsertIndex = 0
}
}
func (d *compressor) close() error {
if d.err == errWriterClosed {
return nil
}
if d.err != nil {
return d.err
}
d.sync = true
d.step(d)
if d.err != nil {
return d.err
}
if d.w.writeStoredHeader(0, true); d.w.err != nil {
return d.w.err
}
d.w.flush()
if d.w.err != nil {
return d.w.err
}
d.err = errWriterClosed
return nil
}
// NewWriter returns a new [Writer] compressing data at the given level.
// Following zlib, levels range from 1 ([BestSpeed]) to 9 ([BestCompression]);
// higher levels typically run slower but compress more. Level 0
// ([NoCompression]) does not attempt any compression; it only adds the
// necessary DEFLATE framing.
// Level -1 ([DefaultCompression]) uses the default compression level.
// Level -2 ([HuffmanOnly]) will use Huffman compression only, giving
// a very fast compression for all types of input, but sacrificing considerable
// compression efficiency.
//
// If level is in the range [-2, 9] then the error returned will be nil.
// Otherwise the error returned will be non-nil.
func NewWriter(w io.Writer, level int) (*Writer, error) {
var dw Writer
if err := dw.d.init(w, level); err != nil {
return nil, err
}
return &dw, nil
}
// NewWriterDict is like [NewWriter] but initializes the new
// [Writer] with a preset dictionary. The returned [Writer] behaves
// as if the dictionary had been written to it without producing
// any compressed output. The compressed data written to w
// can only be decompressed by a reader initialized with the
// same dictionary (see [NewReaderDict]).
func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
dw := &dictWriter{w}
zw, err := NewWriter(dw, level)
if err != nil {
return nil, err
}
zw.d.fillWindow(dict)
zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
return zw, nil
}
type dictWriter struct {
w io.Writer
}
func (w *dictWriter) Write(b []byte) (n int, err error) {
return w.w.Write(b)
}
var errWriterClosed = errors.New("flate: closed writer")
// A Writer takes data written to it and writes the compressed
// form of that data to an underlying writer (see [NewWriter]).
type Writer struct {
d compressor
dict []byte
}
// Write writes data to w, which will eventually write the
// compressed form of data to its underlying writer.
func (w *Writer) Write(data []byte) (n int, err error) {
return w.d.write(data)
}
// Flush flushes any pending data to the underlying writer.
// It is useful mainly in compressed network protocols, to ensure that
// a remote reader has enough data to reconstruct a packet.
// Flush does not return until the data has been written.
// Calling Flush when there is no pending data still causes the [Writer]
// to emit a sync marker of at least 4 bytes.
// If the underlying writer returns an error, Flush returns that error.
//
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
func (w *Writer) Flush() error {
// For more about flushing:
// https://www.bolet.org/~pornin/deflate-flush.html
return w.d.syncFlush()
}
// Close flushes and closes the writer.
func (w *Writer) Close() error {
return w.d.close()
}
// Reset discards the writer's state and makes it equivalent to
// the result of [NewWriter] or [NewWriterDict] called with dst
// and w's level and dictionary.
func (w *Writer) Reset(dst io.Writer) {
if dw, ok := w.d.w.writer.(*dictWriter); ok {
// w was created with NewWriterDict
dw.w = dst
w.d.reset(dw)
w.d.fillWindow(w.dict)
} else {
// w was created with NewWriter
w.d.reset(dst)
}
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import "math"
// This encoding algorithm, which prioritizes speed over output size, is
// based on Snappy's LZ77-style encoder: github.com/golang/snappy
const (
tableBits = 14 // Bits used in the table.
tableSize = 1 << tableBits // Size of the table.
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
// Reset the buffer offset when reaching this.
// Offsets are stored between blocks as int32 values.
// Since the offset we are checking against is at the beginning
// of the buffer, we need to subtract the current and input
// buffer to not risk overflowing the int32.
bufferReset = math.MaxInt32 - maxStoreBlockSize*2
)
func load32(b []byte, i int32) uint32 {
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func load64(b []byte, i int32) uint64 {
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func hash(u uint32) uint32 {
return (u * 0x1e35a7bd) >> tableShift
}
// These constants are defined by the Snappy implementation so that its
// assembly implementation can fast-path some 16-bytes-at-a-time copies. They
// aren't necessary in the pure Go implementation, as we don't use those same
// optimizations, but using the same thresholds doesn't really hurt.
const (
inputMargin = 16 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
type tableEntry struct {
val uint32 // Value at destination
offset int32
}
// deflateFast maintains the table for matches,
// and the previous byte block for cross block matching.
type deflateFast struct {
table [tableSize]tableEntry
prev []byte // Previous block, zero length if unknown.
cur int32 // Current match offset.
}
func newDeflateFast() *deflateFast {
return &deflateFast{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}
}
// encode encodes a block given in src and appends tokens
// to dst and returns the result.
func (e *deflateFast) encode(dst []token, src []byte) []token {
// Ensure that e.cur doesn't wrap.
if e.cur >= bufferReset {
e.shiftOffsets()
}
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
e.cur += maxStoreBlockSize
e.prev = e.prev[:0]
return emitLiteral(dst, src)
}
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := int32(0)
s := int32(0)
cv := load32(src, s)
nextHash := hash(cv)
for {
// Copied from the C++ snappy implementation:
//
// Heuristic match skipping: If 32 bytes are scanned with no matches
// found, start looking only at every other byte. If 32 more bytes are
// scanned (or skipped), look at every third byte, etc.. When a match
// is found, immediately go back to looking at every byte. This is a
// small loss (~5% performance, ~0.1% density) for compressible data
// due to more bookkeeping, but for non-compressible data (such as
// JPEG) it's a huge win since the compressor quickly "realizes" the
// data is incompressible and doesn't bother looking for matches
// everywhere.
//
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
skip := int32(32)
nextS := s
var candidate tableEntry
for {
s = nextS
bytesBetweenHashLookups := skip >> 5
nextS = s + bytesBetweenHashLookups
skip += bytesBetweenHashLookups
if nextS > sLimit {
goto emitRemainder
}
candidate = e.table[nextHash&tableMask]
now := load32(src, nextS)
e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv}
nextHash = hash(now)
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || cv != candidate.val {
// Out of range or not matched.
cv = now
continue
}
break
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
dst = emitLiteral(dst, src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
//
s += 4
t := candidate.offset - e.cur + 4
l := e.matchLen(s, t, src)
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
dst = append(dst, matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)))
s += l
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load64(src, s-1)
prevHash := hash(uint32(x))
e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)}
x >>= 8
currHash := hash(uint32(x))
candidate = e.table[currHash&tableMask]
e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)}
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x) != candidate.val {
cv = uint32(x >> 8)
nextHash = hash(cv)
s++
break
}
}
}
emitRemainder:
if int(nextEmit) < len(src) {
dst = emitLiteral(dst, src[nextEmit:])
}
e.cur += int32(len(src))
e.prev = e.prev[:len(src)]
copy(e.prev, src)
return dst
}
func emitLiteral(dst []token, lit []byte) []token {
for _, v := range lit {
dst = append(dst, literalToken(uint32(v)))
}
return dst
}
// matchLen returns the match length between src[s:] and src[t:].
// t can be negative to indicate the match is starting in e.prev.
// We assume that src[s-4:s] and src[t-4:t] already match.
func (e *deflateFast) matchLen(s, t int32, src []byte) int32 {
s1 := int(s) + maxMatchLength - 4
if s1 > len(src) {
s1 = len(src)
}
// If we are inside the current block
if t >= 0 {
b := src[t:]
a := src[s:s1]
b = b[:len(a)]
// Extend the match to be as long as possible.
for i := range a {
if a[i] != b[i] {
return int32(i)
}
}
return int32(len(a))
}
// We found a match in the previous block.
tp := int32(len(e.prev)) + t
if tp < 0 {
return 0
}
// Extend the match to be as long as possible.
a := src[s:s1]
b := e.prev[tp:]
if len(b) > len(a) {
b = b[:len(a)]
}
a = a[:len(b)]
for i := range b {
if a[i] != b[i] {
return int32(i)
}
}
// If we reached our limit, we matched everything we are
// allowed to in the previous block and we return.
n := int32(len(b))
if int(s+n) == s1 {
return n
}
// Continue looking for more matches in the current block.
a = src[s+n : s1]
b = src[:len(a)]
for i := range a {
if a[i] != b[i] {
return int32(i) + n
}
}
return int32(len(a)) + n
}
// Reset resets the encoding history.
// This ensures that no matches are made to the previous block.
func (e *deflateFast) reset() {
e.prev = e.prev[:0]
// Bump the offset, so all matches will fail distance check.
// Nothing should be >= e.cur in the table.
e.cur += maxMatchOffset
// Protect against e.cur wraparound.
if e.cur >= bufferReset {
e.shiftOffsets()
}
}
// shiftOffsets will shift down all match offset.
// This is only called in rare situations to prevent integer overflow.
//
// See https://golang.org/issue/18636 and https://github.com/golang/go/issues/34121.
func (e *deflateFast) shiftOffsets() {
if len(e.prev) == 0 {
// We have no history; just clear the table.
clear(e.table[:])
e.cur = maxMatchOffset + 1
return
}
// Shift down everything in the table that isn't already too far away.
for i := range e.table[:] {
v := e.table[i].offset - e.cur + maxMatchOffset + 1
if v < 0 {
// We want to reset e.cur to maxMatchOffset + 1, so we need to shift
// all table entries down by (e.cur - (maxMatchOffset + 1)).
// Because we ignore matches > maxMatchOffset, we can cap
// any negative offsets at 0.
v = 0
}
e.table[i].offset = v
}
e.cur = maxMatchOffset + 1
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
// LZ77 decompresses data through sequences of two forms of commands:
//
// - Literal insertions: Runs of one or more symbols are inserted into the data
// stream as is. This is accomplished through the writeByte method for a
// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
// Any valid stream must start with a literal insertion if no preset dictionary
// is used.
//
// - Backward copies: Runs of one or more symbols are copied from previously
// emitted data. Backward copies come as the tuple (dist, length) where dist
// determines how far back in the stream to copy from and length determines how
// many bytes to copy. Note that it is valid for the length to be greater than
// the distance. Since LZ77 uses forward copies, that situation is used to
// perform a form of run-length encoding on repeated runs of symbols.
// The writeCopy and tryWriteCopy are used to implement this command.
//
// For performance reasons, this implementation performs little to no sanity
// checks about the arguments. As such, the invariants documented for each
// method call must be respected.
type dictDecoder struct {
hist []byte // Sliding window history
// Invariant: 0 <= rdPos <= wrPos <= len(hist)
wrPos int // Current output position in buffer
rdPos int // Have emitted hist[:rdPos] already
full bool // Has a full window length been written yet?
}
// init initializes dictDecoder to have a sliding window dictionary of the given
// size. If a preset dict is provided, it will initialize the dictionary with
// the contents of dict.
func (dd *dictDecoder) init(size int, dict []byte) {
*dd = dictDecoder{hist: dd.hist}
if cap(dd.hist) < size {
dd.hist = make([]byte, size)
}
dd.hist = dd.hist[:size]
if len(dict) > len(dd.hist) {
dict = dict[len(dict)-len(dd.hist):]
}
dd.wrPos = copy(dd.hist, dict)
if dd.wrPos == len(dd.hist) {
dd.wrPos = 0
dd.full = true
}
dd.rdPos = dd.wrPos
}
// histSize reports the total amount of historical data in the dictionary.
func (dd *dictDecoder) histSize() int {
if dd.full {
return len(dd.hist)
}
return dd.wrPos
}
// availRead reports the number of bytes that can be flushed by readFlush.
func (dd *dictDecoder) availRead() int {
return dd.wrPos - dd.rdPos
}
// availWrite reports the available amount of output buffer space.
func (dd *dictDecoder) availWrite() int {
return len(dd.hist) - dd.wrPos
}
// writeSlice returns a slice of the available buffer to write data to.
//
// This invariant will be kept: len(s) <= availWrite()
func (dd *dictDecoder) writeSlice() []byte {
return dd.hist[dd.wrPos:]
}
// writeMark advances the writer pointer by cnt.
//
// This invariant must be kept: 0 <= cnt <= availWrite()
func (dd *dictDecoder) writeMark(cnt int) {
dd.wrPos += cnt
}
// writeByte writes a single byte to the dictionary.
//
// This invariant must be kept: 0 < availWrite()
func (dd *dictDecoder) writeByte(c byte) {
dd.hist[dd.wrPos] = c
dd.wrPos++
}
// writeCopy copies a string at a given (dist, length) to the output.
// This returns the number of bytes copied and may be less than the requested
// length if the available space in the output buffer is too small.
//
// This invariant must be kept: 0 < dist <= histSize()
func (dd *dictDecoder) writeCopy(dist, length int) int {
dstBase := dd.wrPos
dstPos := dstBase
srcPos := dstPos - dist
endPos := dstPos + length
if endPos > len(dd.hist) {
endPos = len(dd.hist)
}
// Copy non-overlapping section after destination position.
//
// This section is non-overlapping in that the copy length for this section
// is always less than or equal to the backwards distance. This can occur
// if a distance refers to data that wraps-around in the buffer.
// Thus, a backwards copy is performed here; that is, the exact bytes in
// the source prior to the copy is placed in the destination.
if srcPos < 0 {
srcPos += len(dd.hist)
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
srcPos = 0
}
// Copy possibly overlapping section before destination position.
//
// This section can overlap if the copy length for this section is larger
// than the backwards distance. This is allowed by LZ77 so that repeated
// strings can be succinctly represented using (dist, length) pairs.
// Thus, a forwards copy is performed here; that is, the bytes copied is
// possibly dependent on the resulting bytes in the destination as the copy
// progresses along. This is functionally equivalent to the following:
//
// for i := 0; i < endPos-dstPos; i++ {
// dd.hist[dstPos+i] = dd.hist[srcPos+i]
// }
// dstPos = endPos
//
for dstPos < endPos {
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
}
dd.wrPos = dstPos
return dstPos - dstBase
}
// tryWriteCopy tries to copy a string at a given (distance, length) to the
// output. This specialized version is optimized for short distances.
//
// This method is designed to be inlined for performance reasons.
//
// This invariant must be kept: 0 < dist <= histSize()
func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
dstPos := dd.wrPos
endPos := dstPos + length
if dstPos < dist || endPos > len(dd.hist) {
return 0
}
dstBase := dstPos
srcPos := dstPos - dist
// Copy possibly overlapping section before destination position.
for dstPos < endPos {
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
}
dd.wrPos = dstPos
return dstPos - dstBase
}
// readFlush returns a slice of the historical buffer that is ready to be
// emitted to the user. The data returned by readFlush must be fully consumed
// before calling any other dictDecoder methods.
func (dd *dictDecoder) readFlush() []byte {
toRead := dd.hist[dd.rdPos:dd.wrPos]
dd.rdPos = dd.wrPos
if dd.wrPos == len(dd.hist) {
dd.wrPos, dd.rdPos = 0, 0
dd.full = true
}
return toRead
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"io"
)
const (
// The largest offset code.
offsetCodeCount = 30
// The special code used to mark the end of a block.
endBlockMarker = 256
// The first length code.
lengthCodesStart = 257
// The number of codegen codes.
codegenCodeCount = 19
badCode = 255
// bufferFlushSize indicates the buffer size
// after which bytes are flushed to the writer.
// Should preferably be a multiple of 6, since
// we accumulate 6 bytes between writes to the buffer.
bufferFlushSize = 240
// bufferSize is the actual output byte buffer size.
// It must have additional headroom for a flush
// which can contain up to 8 bytes.
bufferSize = bufferFlushSize + 8
)
// The number of extra bits needed by length code X - LENGTH_CODES_START.
var lengthExtraBits = []int8{
/* 257 */ 0, 0, 0,
/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
/* 280 */ 4, 5, 5, 5, 5, 0,
}
// The length indicated by length code X - LENGTH_CODES_START.
var lengthBase = []uint32{
0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
64, 80, 96, 112, 128, 160, 192, 224, 255,
}
// offset code word extra bits.
var offsetExtraBits = []int8{
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
}
var offsetBase = []uint32{
0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
}
// The odd order in which the codegen code sizes are written.
var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
type huffmanBitWriter struct {
// writer is the underlying writer.
// Do not use it directly; use the write method, which ensures
// that Write errors are sticky.
writer io.Writer
// Data waiting to be written is bytes[0:nbytes]
// and then the low nbits of bits. Data is always written
// sequentially into the bytes array.
bits uint64
nbits uint
bytes [bufferSize]byte
codegenFreq [codegenCodeCount]int32
nbytes int
literalFreq []int32
offsetFreq []int32
codegen []uint8
literalEncoding *huffmanEncoder
offsetEncoding *huffmanEncoder
codegenEncoding *huffmanEncoder
err error
}
func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
return &huffmanBitWriter{
writer: w,
literalFreq: make([]int32, maxNumLit),
offsetFreq: make([]int32, offsetCodeCount),
codegen: make([]uint8, maxNumLit+offsetCodeCount+1),
literalEncoding: newHuffmanEncoder(maxNumLit),
codegenEncoding: newHuffmanEncoder(codegenCodeCount),
offsetEncoding: newHuffmanEncoder(offsetCodeCount),
}
}
func (w *huffmanBitWriter) reset(writer io.Writer) {
w.writer = writer
w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
}
func (w *huffmanBitWriter) flush() {
if w.err != nil {
w.nbits = 0
return
}
n := w.nbytes
for w.nbits != 0 {
w.bytes[n] = byte(w.bits)
w.bits >>= 8
if w.nbits > 8 { // Avoid underflow
w.nbits -= 8
} else {
w.nbits = 0
}
n++
}
w.bits = 0
w.write(w.bytes[:n])
w.nbytes = 0
}
func (w *huffmanBitWriter) write(b []byte) {
if w.err != nil {
return
}
_, w.err = w.writer.Write(b)
}
func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
if w.err != nil {
return
}
w.bits |= uint64(b) << w.nbits
w.nbits += nb
if w.nbits >= 48 {
bits := w.bits
w.bits >>= 48
w.nbits -= 48
n := w.nbytes
bytes := w.bytes[n : n+6]
bytes[0] = byte(bits)
bytes[1] = byte(bits >> 8)
bytes[2] = byte(bits >> 16)
bytes[3] = byte(bits >> 24)
bytes[4] = byte(bits >> 32)
bytes[5] = byte(bits >> 40)
n += 6
if n >= bufferFlushSize {
w.write(w.bytes[:n])
n = 0
}
w.nbytes = n
}
}
func (w *huffmanBitWriter) writeBytes(bytes []byte) {
if w.err != nil {
return
}
n := w.nbytes
if w.nbits&7 != 0 {
w.err = InternalError("writeBytes with unfinished bits")
return
}
for w.nbits != 0 {
w.bytes[n] = byte(w.bits)
w.bits >>= 8
w.nbits -= 8
n++
}
if n != 0 {
w.write(w.bytes[:n])
}
w.nbytes = 0
w.write(bytes)
}
// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
// the literal and offset lengths arrays (which are concatenated into a single
// array). This method generates that run-length encoding.
//
// The result is written into the codegen array, and the frequencies
// of each code is written into the codegenFreq array.
// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
// information. Code badCode is an end marker
//
// numLiterals The number of literals in literalEncoding
// numOffsets The number of offsets in offsetEncoding
// litenc, offenc The literal and offset encoder to use
func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
clear(w.codegenFreq[:])
// Note that we are using codegen both as a temporary variable for holding
// a copy of the frequencies, and as the place where we put the result.
// This is fine because the output is always shorter than the input used
// so far.
codegen := w.codegen // cache
// Copy the concatenated code sizes to codegen. Put a marker at the end.
cgnl := codegen[:numLiterals]
for i := range cgnl {
cgnl[i] = uint8(litEnc.codes[i].len)
}
cgnl = codegen[numLiterals : numLiterals+numOffsets]
for i := range cgnl {
cgnl[i] = uint8(offEnc.codes[i].len)
}
codegen[numLiterals+numOffsets] = badCode
size := codegen[0]
count := 1
outIndex := 0
for inIndex := 1; size != badCode; inIndex++ {
// INVARIANT: We have seen "count" copies of size that have not yet
// had output generated for them.
nextSize := codegen[inIndex]
if nextSize == size {
count++
continue
}
// We need to generate codegen indicating "count" of size.
if size != 0 {
codegen[outIndex] = size
outIndex++
w.codegenFreq[size]++
count--
for count >= 3 {
n := 6
if n > count {
n = count
}
codegen[outIndex] = 16
outIndex++
codegen[outIndex] = uint8(n - 3)
outIndex++
w.codegenFreq[16]++
count -= n
}
} else {
for count >= 11 {
n := 138
if n > count {
n = count
}
codegen[outIndex] = 18
outIndex++
codegen[outIndex] = uint8(n - 11)
outIndex++
w.codegenFreq[18]++
count -= n
}
if count >= 3 {
// count >= 3 && count <= 10
codegen[outIndex] = 17
outIndex++
codegen[outIndex] = uint8(count - 3)
outIndex++
w.codegenFreq[17]++
count = 0
}
}
count--
for ; count >= 0; count-- {
codegen[outIndex] = size
outIndex++
w.codegenFreq[size]++
}
// Set up invariant for next time through the loop.
size = nextSize
count = 1
}
// Marker indicating the end of the codegen.
codegen[outIndex] = badCode
}
// dynamicSize returns the size of dynamically encoded data in bits.
func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
numCodegens = len(w.codegenFreq)
for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
numCodegens--
}
header := 3 + 5 + 5 + 4 + (3 * numCodegens) +
w.codegenEncoding.bitLength(w.codegenFreq[:]) +
int(w.codegenFreq[16])*2 +
int(w.codegenFreq[17])*3 +
int(w.codegenFreq[18])*7
size = header +
litEnc.bitLength(w.literalFreq) +
offEnc.bitLength(w.offsetFreq) +
extraBits
return size, numCodegens
}
// fixedSize returns the size of dynamically encoded data in bits.
func (w *huffmanBitWriter) fixedSize(extraBits int) int {
return 3 +
fixedLiteralEncoding.bitLength(w.literalFreq) +
fixedOffsetEncoding.bitLength(w.offsetFreq) +
extraBits
}
// storedSize calculates the stored size, including header.
// The function returns the size in bits and whether the block
// fits inside a single block.
func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
if in == nil {
return 0, false
}
if len(in) <= maxStoreBlockSize {
return (len(in) + 5) * 8, true
}
return 0, false
}
func (w *huffmanBitWriter) writeCode(c hcode) {
if w.err != nil {
return
}
w.bits |= uint64(c.code) << w.nbits
w.nbits += uint(c.len)
if w.nbits >= 48 {
bits := w.bits
w.bits >>= 48
w.nbits -= 48
n := w.nbytes
bytes := w.bytes[n : n+6]
bytes[0] = byte(bits)
bytes[1] = byte(bits >> 8)
bytes[2] = byte(bits >> 16)
bytes[3] = byte(bits >> 24)
bytes[4] = byte(bits >> 32)
bytes[5] = byte(bits >> 40)
n += 6
if n >= bufferFlushSize {
w.write(w.bytes[:n])
n = 0
}
w.nbytes = n
}
}
// Write the header of a dynamic Huffman block to the output stream.
//
// numLiterals The number of literals specified in codegen
// numOffsets The number of offsets specified in codegen
// numCodegens The number of codegens used in codegen
func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
if w.err != nil {
return
}
var firstBits int32 = 4
if isEof {
firstBits = 5
}
w.writeBits(firstBits, 3)
w.writeBits(int32(numLiterals-257), 5)
w.writeBits(int32(numOffsets-1), 5)
w.writeBits(int32(numCodegens-4), 4)
for i := 0; i < numCodegens; i++ {
value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
w.writeBits(int32(value), 3)
}
i := 0
for {
var codeWord int = int(w.codegen[i])
i++
if codeWord == badCode {
break
}
w.writeCode(w.codegenEncoding.codes[uint32(codeWord)])
switch codeWord {
case 16:
w.writeBits(int32(w.codegen[i]), 2)
i++
case 17:
w.writeBits(int32(w.codegen[i]), 3)
i++
case 18:
w.writeBits(int32(w.codegen[i]), 7)
i++
}
}
}
func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
if w.err != nil {
return
}
var flag int32
if isEof {
flag = 1
}
w.writeBits(flag, 3)
w.flush()
w.writeBits(int32(length), 16)
w.writeBits(int32(^uint16(length)), 16)
}
func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
if w.err != nil {
return
}
// Indicate that we are a fixed Huffman block
var value int32 = 2
if isEof {
value = 3
}
w.writeBits(value, 3)
}
// writeBlock will write a block of tokens with the smallest encoding.
// The original input can be supplied, and if the huffman encoded data
// is larger than the original bytes, the data will be written as a
// stored block.
// If the input is nil, the tokens will always be Huffman encoded.
func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
if w.err != nil {
return
}
tokens = append(tokens, endBlockMarker)
numLiterals, numOffsets := w.indexTokens(tokens)
var extraBits int
storedSize, storable := w.storedSize(input)
if storable {
// We only bother calculating the costs of the extra bits required by
// the length of offset fields (which will be the same for both fixed
// and dynamic encoding), if we need to compare those two encodings
// against stored encoding.
for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ {
// First eight length codes have extra size = 0.
extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart])
}
for offsetCode := 4; offsetCode < numOffsets; offsetCode++ {
// First four offset codes have extra size = 0.
extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode])
}
}
// Figure out smallest code.
// Fixed Huffman baseline.
var literalEncoding = fixedLiteralEncoding
var offsetEncoding = fixedOffsetEncoding
var size = w.fixedSize(extraBits)
// Dynamic Huffman?
var numCodegens int
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
w.codegenEncoding.generate(w.codegenFreq[:], 7)
dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
if dynamicSize < size {
size = dynamicSize
literalEncoding = w.literalEncoding
offsetEncoding = w.offsetEncoding
}
// Stored bytes?
if storable && storedSize < size {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
// Huffman.
if literalEncoding == fixedLiteralEncoding {
w.writeFixedHeader(eof)
} else {
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
}
// Write the tokens.
w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes)
}
// writeBlockDynamic encodes a block using a dynamic Huffman table.
// This should be used if the symbols used have a disproportionate
// histogram distribution.
// If input is supplied and the compression savings are below 1/16th of the
// input size the block is stored.
func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) {
if w.err != nil {
return
}
tokens = append(tokens, endBlockMarker)
numLiterals, numOffsets := w.indexTokens(tokens)
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
w.codegenEncoding.generate(w.codegenFreq[:], 7)
size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0)
// Store bytes, if we don't get a reasonable improvement.
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
// Write Huffman table.
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
// Write the tokens.
w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes)
}
// indexTokens indexes a slice of tokens, and updates
// literalFreq and offsetFreq, and generates literalEncoding
// and offsetEncoding.
// The number of literal and offset tokens is returned.
func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) {
clear(w.literalFreq)
clear(w.offsetFreq)
for _, t := range tokens {
if t < matchType {
w.literalFreq[t.literal()]++
continue
}
length := t.length()
offset := t.offset()
w.literalFreq[lengthCodesStart+lengthCode(length)]++
w.offsetFreq[offsetCode(offset)]++
}
// get the number of literals
numLiterals = len(w.literalFreq)
for w.literalFreq[numLiterals-1] == 0 {
numLiterals--
}
// get the number of offsets
numOffsets = len(w.offsetFreq)
for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
numOffsets--
}
if numOffsets == 0 {
// We haven't found a single match. If we want to go with the dynamic encoding,
// we should count at least one offset to be sure that the offset huffman tree could be encoded.
w.offsetFreq[0] = 1
numOffsets = 1
}
w.literalEncoding.generate(w.literalFreq, 15)
w.offsetEncoding.generate(w.offsetFreq, 15)
return
}
// writeTokens writes a slice of tokens to the output.
// codes for literal and offset encoding must be supplied.
func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
if w.err != nil {
return
}
for _, t := range tokens {
if t < matchType {
w.writeCode(leCodes[t.literal()])
continue
}
// Write the length
length := t.length()
lengthCode := lengthCode(length)
w.writeCode(leCodes[lengthCode+lengthCodesStart])
extraLengthBits := uint(lengthExtraBits[lengthCode])
if extraLengthBits > 0 {
extraLength := int32(length - lengthBase[lengthCode])
w.writeBits(extraLength, extraLengthBits)
}
// Write the offset
offset := t.offset()
offsetCode := offsetCode(offset)
w.writeCode(oeCodes[offsetCode])
extraOffsetBits := uint(offsetExtraBits[offsetCode])
if extraOffsetBits > 0 {
extraOffset := int32(offset - offsetBase[offsetCode])
w.writeBits(extraOffset, extraOffsetBits)
}
}
}
// huffOffset is a static offset encoder used for huffman only encoding.
// It can be reused since we will not be encoding offset values.
var huffOffset *huffmanEncoder
func init() {
offsetFreq := make([]int32, offsetCodeCount)
offsetFreq[0] = 1
huffOffset = newHuffmanEncoder(offsetCodeCount)
huffOffset.generate(offsetFreq, 15)
}
// writeBlockHuff encodes a block of bytes as either
// Huffman encoded literals or uncompressed bytes if the
// results only gains very little from compression.
func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
if w.err != nil {
return
}
// Clear histogram
clear(w.literalFreq)
// Add everything as literals
histogram(input, w.literalFreq)
w.literalFreq[endBlockMarker] = 1
const numLiterals = endBlockMarker + 1
w.offsetFreq[0] = 1
const numOffsets = 1
w.literalEncoding.generate(w.literalFreq, 15)
// Figure out smallest code.
// Always use dynamic Huffman or Store
var numCodegens int
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
w.codegenEncoding.generate(w.codegenFreq[:], 7)
size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0)
// Store bytes, if we don't get a reasonable improvement.
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
// Huffman.
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
encoding := w.literalEncoding.codes[:257]
n := w.nbytes
for _, t := range input {
// Bitwriting inlined, ~30% speedup
c := encoding[t]
w.bits |= uint64(c.code) << w.nbits
w.nbits += uint(c.len)
if w.nbits < 48 {
continue
}
// Store 6 bytes
bits := w.bits
w.bits >>= 48
w.nbits -= 48
bytes := w.bytes[n : n+6]
bytes[0] = byte(bits)
bytes[1] = byte(bits >> 8)
bytes[2] = byte(bits >> 16)
bytes[3] = byte(bits >> 24)
bytes[4] = byte(bits >> 32)
bytes[5] = byte(bits >> 40)
n += 6
if n < bufferFlushSize {
continue
}
w.write(w.bytes[:n])
if w.err != nil {
return // Return early in the event of write failures
}
n = 0
}
w.nbytes = n
w.writeCode(encoding[endBlockMarker])
}
// histogram accumulates a histogram of b in h.
//
// len(h) must be >= 256, and h's elements must be all zeroes.
func histogram(b []byte, h []int32) {
h = h[:256]
for _, t := range b {
h[t]++
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"math"
"math/bits"
"sort"
)
// hcode is a huffman code with a bit code and bit length.
type hcode struct {
code, len uint16
}
type huffmanEncoder struct {
codes []hcode
freqcache []literalNode
bitCount [17]int32
lns byLiteral // stored to avoid repeated allocation in generate
lfs byFreq // stored to avoid repeated allocation in generate
}
type literalNode struct {
literal uint16
freq int32
}
// A levelInfo describes the state of the constructed tree for a given depth.
type levelInfo struct {
// Our level. for better printing
level int32
// The frequency of the last node at this level
lastFreq int32
// The frequency of the next character to add to this level
nextCharFreq int32
// The frequency of the next pair (from level below) to add to this level.
// Only valid if the "needed" value of the next lower level is 0.
nextPairFreq int32
// The number of chains remaining to generate for this level before moving
// up to the next level
needed int32
}
// set sets the code and length of an hcode.
func (h *hcode) set(code uint16, length uint16) {
h.len = length
h.code = code
}
func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
func newHuffmanEncoder(size int) *huffmanEncoder {
return &huffmanEncoder{codes: make([]hcode, size)}
}
// Generates a HuffmanCode corresponding to the fixed literal table.
func generateFixedLiteralEncoding() *huffmanEncoder {
h := newHuffmanEncoder(maxNumLit)
codes := h.codes
var ch uint16
for ch = 0; ch < maxNumLit; ch++ {
var bits uint16
var size uint16
switch {
case ch < 144:
// size 8, 000110000 .. 10111111
bits = ch + 48
size = 8
case ch < 256:
// size 9, 110010000 .. 111111111
bits = ch + 400 - 144
size = 9
case ch < 280:
// size 7, 0000000 .. 0010111
bits = ch - 256
size = 7
default:
// size 8, 11000000 .. 11000111
bits = ch + 192 - 280
size = 8
}
codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
}
return h
}
func generateFixedOffsetEncoding() *huffmanEncoder {
h := newHuffmanEncoder(30)
codes := h.codes
for ch := range codes {
codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5}
}
return h
}
var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding()
var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding()
func (h *huffmanEncoder) bitLength(freq []int32) int {
var total int
for i, f := range freq {
if f != 0 {
total += int(f) * int(h.codes[i].len)
}
}
return total
}
const maxBitsLimit = 16
// bitCounts computes the number of literals assigned to each bit size in the Huffman encoding.
// It is only called when list.length >= 3.
// The cases of 0, 1, and 2 literals are handled by special case code.
//
// list is an array of the literals with non-zero frequencies
// and their associated frequencies. The array is in order of increasing
// frequency and has as its last element a special element with frequency
// MaxInt32.
//
// maxBits is the maximum number of bits that should be used to encode any literal.
// It must be less than 16.
//
// bitCounts returns an integer slice in which slice[i] indicates the number of literals
// that should be encoded in i bits.
func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
if maxBits >= maxBitsLimit {
panic("flate: maxBits too large")
}
n := int32(len(list))
list = list[0 : n+1]
list[n] = maxNode()
// The tree can't have greater depth than n - 1, no matter what. This
// saves a little bit of work in some small cases
if maxBits > n-1 {
maxBits = n - 1
}
// Create information about each of the levels.
// A bogus "Level 0" whose sole purpose is so that
// level1.prev.needed==0. This makes level1.nextPairFreq
// be a legitimate value that never gets chosen.
var levels [maxBitsLimit]levelInfo
// leafCounts[i] counts the number of literals at the left
// of ancestors of the rightmost node at level i.
// leafCounts[i][j] is the number of literals at the left
// of the level j ancestor.
var leafCounts [maxBitsLimit][maxBitsLimit]int32
for level := int32(1); level <= maxBits; level++ {
// For every level, the first two items are the first two characters.
// We initialize the levels as if we had already figured this out.
levels[level] = levelInfo{
level: level,
lastFreq: list[1].freq,
nextCharFreq: list[2].freq,
nextPairFreq: list[0].freq + list[1].freq,
}
leafCounts[level][level] = 2
if level == 1 {
levels[level].nextPairFreq = math.MaxInt32
}
}
// We need a total of 2*n - 2 items at top level and have already generated 2.
levels[maxBits].needed = 2*n - 4
level := maxBits
for {
l := &levels[level]
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
// We've run out of both leaves and pairs.
// End all calculations for this level.
// To make sure we never come back to this level or any lower level,
// set nextPairFreq impossibly large.
l.needed = 0
levels[level+1].nextPairFreq = math.MaxInt32
level++
continue
}
prevFreq := l.lastFreq
if l.nextCharFreq < l.nextPairFreq {
// The next item on this row is a leaf node.
n := leafCounts[level][level] + 1
l.lastFreq = l.nextCharFreq
// Lower leafCounts are the same of the previous node.
leafCounts[level][level] = n
l.nextCharFreq = list[n].freq
} else {
// The next item on this row is a pair from the previous row.
// nextPairFreq isn't valid until we generate two
// more values in the level below
l.lastFreq = l.nextPairFreq
// Take leaf counts from the lower level, except counts[level] remains the same.
copy(leafCounts[level][:level], leafCounts[level-1][:level])
levels[l.level-1].needed = 2
}
if l.needed--; l.needed == 0 {
// We've done everything we need to do for this level.
// Continue calculating one level up. Fill in nextPairFreq
// of that level with the sum of the two nodes we've just calculated on
// this level.
if l.level == maxBits {
// All done!
break
}
levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
level++
} else {
// If we stole from below, move down temporarily to replenish it.
for levels[level-1].needed > 0 {
level--
}
}
}
// Somethings is wrong if at the end, the top level is null or hasn't used
// all of the leaves.
if leafCounts[maxBits][maxBits] != n {
panic("leafCounts[maxBits][maxBits] != n")
}
bitCount := h.bitCount[:maxBits+1]
bits := 1
counts := &leafCounts[maxBits]
for level := maxBits; level > 0; level-- {
// chain.leafCount gives the number of literals requiring at least "bits"
// bits to encode.
bitCount[bits] = counts[level] - counts[level-1]
bits++
}
return bitCount
}
// Look at the leaves and assign them a bit count and an encoding as specified
// in RFC 1951 3.2.2
func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
code := uint16(0)
for n, bits := range bitCount {
code <<= 1
if n == 0 || bits == 0 {
continue
}
// The literals list[len(list)-bits] .. list[len(list)-bits]
// are encoded using "bits" bits, and get the values
// code, code + 1, .... The code values are
// assigned in literal order (not frequency order).
chunk := list[len(list)-int(bits):]
h.lns.sort(chunk)
for _, node := range chunk {
h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
code++
}
list = list[0 : len(list)-int(bits)]
}
}
// Update this Huffman Code object to be the minimum code for the specified frequency count.
//
// freq is an array of frequencies, in which freq[i] gives the frequency of literal i.
// maxBits The maximum number of bits to use for any literal.
func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
if h.freqcache == nil {
// Allocate a reusable buffer with the longest possible frequency table.
// Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit.
// The largest of these is maxNumLit, so we allocate for that case.
h.freqcache = make([]literalNode, maxNumLit+1)
}
list := h.freqcache[:len(freq)+1]
// Number of non-zero literals
count := 0
// Set list to be the set of all non-zero literals and their frequencies
for i, f := range freq {
if f != 0 {
list[count] = literalNode{uint16(i), f}
count++
} else {
h.codes[i].len = 0
}
}
list = list[:count]
if count <= 2 {
// Handle the small cases here, because they are awkward for the general case code. With
// two or fewer literals, everything has bit length 1.
for i, node := range list {
// "list" is in order of increasing literal value.
h.codes[node.literal].set(uint16(i), 1)
}
return
}
h.lfs.sort(list)
// Get the number of literals for each bit count
bitCount := h.bitCounts(list, maxBits)
// And do the assignment
h.assignEncodingAndSize(bitCount, list)
}
type byLiteral []literalNode
func (s *byLiteral) sort(a []literalNode) {
*s = byLiteral(a)
sort.Sort(s)
}
func (s byLiteral) Len() int { return len(s) }
func (s byLiteral) Less(i, j int) bool {
return s[i].literal < s[j].literal
}
func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
type byFreq []literalNode
func (s *byFreq) sort(a []literalNode) {
*s = byFreq(a)
sort.Sort(s)
}
func (s byFreq) Len() int { return len(s) }
func (s byFreq) Less(i, j int) bool {
if s[i].freq == s[j].freq {
return s[i].literal < s[j].literal
}
return s[i].freq < s[j].freq
}
func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func reverseBits(number uint16, bitLength byte) uint16 {
return bits.Reverse16(number << (16 - bitLength))
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package flate implements the DEFLATE compressed data format, described in
// RFC 1951. The [compress/gzip] and [compress/zlib] packages implement access
// to DEFLATE-based file formats.
package flate
import (
"bufio"
"io"
"math/bits"
"strconv"
"sync"
)
const (
maxCodeLen = 16 // max length of Huffman code
// The next three numbers come from the RFC section 3.2.7, with the
// additional proviso in section 3.2.5 which implies that distance codes
// 30 and 31 should never occur in compressed data.
maxNumLit = 286
maxNumDist = 30
numCodes = 19 // number of codes in Huffman meta-code
)
// Initialize the fixedHuffmanDecoder only once upon first use.
var fixedOnce sync.Once
var fixedHuffmanDecoder huffmanDecoder
// A CorruptInputError reports the presence of corrupt input at a given offset.
type CorruptInputError int64
func (e CorruptInputError) Error() string {
return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10)
}
// An InternalError reports an error in the flate code itself.
type InternalError string
func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
// A ReadError reports an error encountered while reading input.
//
// Deprecated: No longer returned.
type ReadError struct {
Offset int64 // byte offset where error occurred
Err error // error returned by underlying Read
}
func (e *ReadError) Error() string {
return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
}
// A WriteError reports an error encountered while writing output.
//
// Deprecated: No longer returned.
type WriteError struct {
Offset int64 // byte offset where error occurred
Err error // error returned by underlying Write
}
func (e *WriteError) Error() string {
return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
}
// Resetter resets a ReadCloser returned by [NewReader] or [NewReaderDict]
// to switch to a new underlying [Reader]. This permits reusing a ReadCloser
// instead of allocating a new one.
type Resetter interface {
// Reset discards any buffered data and resets the Resetter as if it was
// newly initialized with the given reader.
Reset(r io.Reader, dict []byte) error
}
// The data structure for decoding Huffman tables is based on that of
// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
// For codes smaller than the table width, there are multiple entries
// (each combination of trailing bits has the same value). For codes
// larger than the table width, the table contains a link to an overflow
// table. The width of each entry in the link table is the maximum code
// size minus the chunk width.
//
// Note that you can do a lookup in the table even without all bits
// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
// have the property that shorter codes come before longer ones, the
// bit length estimate in the result is a lower bound on the actual
// number of bits.
//
// See the following:
// https://github.com/madler/zlib/raw/master/doc/algorithm.txt
// chunk & 15 is number of bits
// chunk >> 4 is value, including table link
const (
huffmanChunkBits = 9
huffmanNumChunks = 1 << huffmanChunkBits
huffmanCountMask = 15
huffmanValueShift = 4
)
type huffmanDecoder struct {
min int // the minimum code length
chunks [huffmanNumChunks]uint32 // chunks as described above
links [][]uint32 // overflow links
linkMask uint32 // mask the width of the link table
}
// Initialize Huffman decoding tables from array of code lengths.
// Following this function, h is guaranteed to be initialized into a complete
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
// degenerate case where the tree has only a single symbol with length 1. Empty
// trees are permitted.
func (h *huffmanDecoder) init(lengths []int) bool {
// Sanity enables additional runtime tests during Huffman
// table construction. It's intended to be used during
// development to supplement the currently ad-hoc unit tests.
const sanity = false
if h.min != 0 {
*h = huffmanDecoder{}
}
// Count number of codes of each length,
// compute min and max length.
var count [maxCodeLen]int
var min, max int
for _, n := range lengths {
if n == 0 {
continue
}
if min == 0 || n < min {
min = n
}
if n > max {
max = n
}
count[n]++
}
// Empty tree. The decompressor.huffSym function will fail later if the tree
// is used. Technically, an empty tree is only valid for the HDIST tree and
// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
// is guaranteed to fail since it will attempt to use the tree to decode the
// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
// guaranteed to fail later since the compressed data section must be
// composed of at least one symbol (the end-of-block marker).
if max == 0 {
return true
}
code := 0
var nextcode [maxCodeLen]int
for i := min; i <= max; i++ {
code <<= 1
nextcode[i] = code
code += count[i]
}
// Check that the coding is complete (i.e., that we've
// assigned all 2-to-the-max possible bit sequences).
// Exception: To be compatible with zlib, we also need to
// accept degenerate single-code codings. See also
// TestDegenerateHuffmanCoding.
if code != 1<<uint(max) && !(code == 1 && max == 1) {
return false
}
h.min = min
if max > huffmanChunkBits {
numLinks := 1 << (uint(max) - huffmanChunkBits)
h.linkMask = uint32(numLinks - 1)
// create link tables
link := nextcode[huffmanChunkBits+1] >> 1
h.links = make([][]uint32, huffmanNumChunks-link)
for j := uint(link); j < huffmanNumChunks; j++ {
reverse := int(bits.Reverse16(uint16(j)))
reverse >>= uint(16 - huffmanChunkBits)
off := j - uint(link)
if sanity && h.chunks[reverse] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
h.links[off] = make([]uint32, numLinks)
}
}
for i, n := range lengths {
if n == 0 {
continue
}
code := nextcode[n]
nextcode[n]++
chunk := uint32(i<<huffmanValueShift | n)
reverse := int(bits.Reverse16(uint16(code)))
reverse >>= uint(16 - n)
if n <= huffmanChunkBits {
for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
// We should never need to overwrite
// an existing chunk. Also, 0 is
// never a valid chunk, because the
// lower 4 "count" bits should be
// between 1 and 15.
if sanity && h.chunks[off] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[off] = chunk
}
} else {
j := reverse & (huffmanNumChunks - 1)
if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
// Longer codes should have been
// associated with a link table above.
panic("impossible: not an indirect chunk")
}
value := h.chunks[j] >> huffmanValueShift
linktab := h.links[value]
reverse >>= huffmanChunkBits
for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
if sanity && linktab[off] != 0 {
panic("impossible: overwriting existing chunk")
}
linktab[off] = chunk
}
}
}
if sanity {
// Above we've sanity checked that we never overwrote
// an existing entry. Here we additionally check that
// we filled the tables completely.
for i, chunk := range h.chunks {
if chunk == 0 {
// As an exception, in the degenerate
// single-code case, we allow odd
// chunks to be missing.
if code == 1 && i%2 == 1 {
continue
}
panic("impossible: missing chunk")
}
}
for _, linktab := range h.links {
for _, chunk := range linktab {
if chunk == 0 {
panic("impossible: missing chunk")
}
}
}
}
return true
}
// The actual read interface needed by [NewReader].
// If the passed in [io.Reader] does not also have ReadByte,
// the [NewReader] will introduce its own buffering.
type Reader interface {
io.Reader
io.ByteReader
}
// Decompress state.
type decompressor struct {
// Input source.
r Reader
rBuf *bufio.Reader // created if provided io.Reader does not implement io.ByteReader
roffset int64
// Input bits, in top of b.
b uint32
nb uint
// Huffman decoders for literal/length, distance.
h1, h2 huffmanDecoder
// Length arrays used to define Huffman codes.
bits *[maxNumLit + maxNumDist]int
codebits *[numCodes]int
// Output history, buffer.
dict dictDecoder
// Temporary buffer (avoids repeated allocation).
buf [4]byte
// Next step in the decompression,
// and decompression state.
step func(*decompressor)
stepState int
final bool
err error
toRead []byte
hl, hd *huffmanDecoder
copyLen int
copyDist int
}
func (f *decompressor) nextBlock() {
for f.nb < 1+2 {
if f.err = f.moreBits(); f.err != nil {
return
}
}
f.final = f.b&1 == 1
f.b >>= 1
typ := f.b & 3
f.b >>= 2
f.nb -= 1 + 2
switch typ {
case 0:
f.dataBlock()
case 1:
// compressed, fixed Huffman tables
f.hl = &fixedHuffmanDecoder
f.hd = nil
f.huffmanBlock()
case 2:
// compressed, dynamic Huffman tables
if f.err = f.readHuffman(); f.err != nil {
break
}
f.hl = &f.h1
f.hd = &f.h2
f.huffmanBlock()
default:
// 3 is reserved.
f.err = CorruptInputError(f.roffset)
}
}
func (f *decompressor) Read(b []byte) (int, error) {
for {
if len(f.toRead) > 0 {
n := copy(b, f.toRead)
f.toRead = f.toRead[n:]
if len(f.toRead) == 0 {
return n, f.err
}
return n, nil
}
if f.err != nil {
return 0, f.err
}
f.step(f)
if f.err != nil && len(f.toRead) == 0 {
f.toRead = f.dict.readFlush() // Flush what's left in case of error
}
}
}
func (f *decompressor) Close() error {
if f.err == io.EOF {
return nil
}
return f.err
}
// RFC 1951 section 3.2.7.
// Compression with dynamic Huffman codes
var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
func (f *decompressor) readHuffman() error {
// HLIT[5], HDIST[5], HCLEN[4].
for f.nb < 5+5+4 {
if err := f.moreBits(); err != nil {
return err
}
}
nlit := int(f.b&0x1F) + 257
if nlit > maxNumLit {
return CorruptInputError(f.roffset)
}
f.b >>= 5
ndist := int(f.b&0x1F) + 1
if ndist > maxNumDist {
return CorruptInputError(f.roffset)
}
f.b >>= 5
nclen := int(f.b&0xF) + 4
// numCodes is 19, so nclen is always valid.
f.b >>= 4
f.nb -= 5 + 5 + 4
// (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
for i := 0; i < nclen; i++ {
for f.nb < 3 {
if err := f.moreBits(); err != nil {
return err
}
}
f.codebits[codeOrder[i]] = int(f.b & 0x7)
f.b >>= 3
f.nb -= 3
}
for i := nclen; i < len(codeOrder); i++ {
f.codebits[codeOrder[i]] = 0
}
if !f.h1.init(f.codebits[0:]) {
return CorruptInputError(f.roffset)
}
// HLIT + 257 code lengths, HDIST + 1 code lengths,
// using the code length Huffman code.
for i, n := 0, nlit+ndist; i < n; {
x, err := f.huffSym(&f.h1)
if err != nil {
return err
}
if x < 16 {
// Actual length.
f.bits[i] = x
i++
continue
}
// Repeat previous length or zero.
var rep int
var nb uint
var b int
switch x {
default:
return InternalError("unexpected length code")
case 16:
rep = 3
nb = 2
if i == 0 {
return CorruptInputError(f.roffset)
}
b = f.bits[i-1]
case 17:
rep = 3
nb = 3
b = 0
case 18:
rep = 11
nb = 7
b = 0
}
for f.nb < nb {
if err := f.moreBits(); err != nil {
return err
}
}
rep += int(f.b & uint32(1<<nb-1))
f.b >>= nb
f.nb -= nb
if i+rep > n {
return CorruptInputError(f.roffset)
}
for j := 0; j < rep; j++ {
f.bits[i] = b
i++
}
}
if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
return CorruptInputError(f.roffset)
}
// As an optimization, we can initialize the min bits to read at a time
// for the HLIT tree to the length of the EOB marker since we know that
// every block must terminate with one. This preserves the property that
// we never read any extra bytes after the end of the DEFLATE stream.
if f.h1.min < f.bits[endBlockMarker] {
f.h1.min = f.bits[endBlockMarker]
}
return nil
}
// Decode a single Huffman block from f.
// hl and hd are the Huffman states for the lit/length values
// and the distance values, respectively. If hd == nil, using the
// fixed distance encoding associated with fixed Huffman blocks.
func (f *decompressor) huffmanBlock() {
const (
stateInit = iota // Zero value must be stateInit
stateDict
)
switch f.stepState {
case stateInit:
goto readLiteral
case stateDict:
goto copyHistory
}
readLiteral:
// Read literal and/or (length, distance) according to RFC section 3.2.3.
{
v, err := f.huffSym(f.hl)
if err != nil {
f.err = err
return
}
var n uint // number of bits extra
var length int
switch {
case v < 256:
f.dict.writeByte(byte(v))
if f.dict.availWrite() == 0 {
f.toRead = f.dict.readFlush()
f.step = (*decompressor).huffmanBlock
f.stepState = stateInit
return
}
goto readLiteral
case v == 256:
f.finishBlock()
return
// otherwise, reference to older data
case v < 265:
length = v - (257 - 3)
n = 0
case v < 269:
length = v*2 - (265*2 - 11)
n = 1
case v < 273:
length = v*4 - (269*4 - 19)
n = 2
case v < 277:
length = v*8 - (273*8 - 35)
n = 3
case v < 281:
length = v*16 - (277*16 - 67)
n = 4
case v < 285:
length = v*32 - (281*32 - 131)
n = 5
case v < maxNumLit:
length = 258
n = 0
default:
f.err = CorruptInputError(f.roffset)
return
}
if n > 0 {
for f.nb < n {
if err = f.moreBits(); err != nil {
f.err = err
return
}
}
length += int(f.b & uint32(1<<n-1))
f.b >>= n
f.nb -= n
}
var dist int
if f.hd == nil {
for f.nb < 5 {
if err = f.moreBits(); err != nil {
f.err = err
return
}
}
dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
if dist, err = f.huffSym(f.hd); err != nil {
f.err = err
return
}
}
switch {
case dist < 4:
dist++
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << nb
for f.nb < nb {
if err = f.moreBits(); err != nil {
f.err = err
return
}
}
extra |= int(f.b & uint32(1<<nb-1))
f.b >>= nb
f.nb -= nb
dist = 1<<(nb+1) + 1 + extra
default:
f.err = CorruptInputError(f.roffset)
return
}
// No check on length; encoding can be prescient.
if dist > f.dict.histSize() {
f.err = CorruptInputError(f.roffset)
return
}
f.copyLen, f.copyDist = length, dist
goto copyHistory
}
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
f.step = (*decompressor).huffmanBlock // We need to continue this work
f.stepState = stateDict
return
}
goto readLiteral
}
}
// Copy a single uncompressed data block from input to output.
func (f *decompressor) dataBlock() {
// Uncompressed.
// Discard current half-byte.
f.nb = 0
f.b = 0
// Length then ones-complement of length.
nr, err := io.ReadFull(f.r, f.buf[0:4])
f.roffset += int64(nr)
if err != nil {
f.err = noEOF(err)
return
}
n := int(f.buf[0]) | int(f.buf[1])<<8
nn := int(f.buf[2]) | int(f.buf[3])<<8
if uint16(nn) != uint16(^n) {
f.err = CorruptInputError(f.roffset)
return
}
if n == 0 {
f.toRead = f.dict.readFlush()
f.finishBlock()
return
}
f.copyLen = n
f.copyData()
}
// copyData copies f.copyLen bytes from the underlying reader into f.hist.
// It pauses for reads when f.hist is full.
func (f *decompressor) copyData() {
buf := f.dict.writeSlice()
if len(buf) > f.copyLen {
buf = buf[:f.copyLen]
}
cnt, err := io.ReadFull(f.r, buf)
f.roffset += int64(cnt)
f.copyLen -= cnt
f.dict.writeMark(cnt)
if err != nil {
f.err = noEOF(err)
return
}
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
f.step = (*decompressor).copyData
return
}
f.finishBlock()
}
func (f *decompressor) finishBlock() {
if f.final {
if f.dict.availRead() > 0 {
f.toRead = f.dict.readFlush()
}
f.err = io.EOF
}
f.step = (*decompressor).nextBlock
}
// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
func noEOF(e error) error {
if e == io.EOF {
return io.ErrUnexpectedEOF
}
return e
}
func (f *decompressor) moreBits() error {
c, err := f.r.ReadByte()
if err != nil {
return noEOF(err)
}
f.roffset++
f.b |= uint32(c) << f.nb
f.nb += 8
return nil
}
// Read the next Huffman-encoded symbol from f according to h.
func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(h.min)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
nb, b := f.nb, f.b
for {
for nb < n {
c, err := f.r.ReadByte()
if err != nil {
f.b = b
f.nb = nb
return 0, noEOF(err)
}
f.roffset++
b |= uint32(c) << (nb & 31)
nb += 8
}
chunk := h.chunks[b&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= nb {
if n == 0 {
f.b = b
f.nb = nb
f.err = CorruptInputError(f.roffset)
return 0, f.err
}
f.b = b >> (n & 31)
f.nb = nb - n
return int(chunk >> huffmanValueShift), nil
}
}
}
func (f *decompressor) makeReader(r io.Reader) {
if rr, ok := r.(Reader); ok {
f.rBuf = nil
f.r = rr
return
}
// Reuse rBuf if possible. Invariant: rBuf is always created (and owned) by decompressor.
if f.rBuf != nil {
f.rBuf.Reset(r)
} else {
// bufio.NewReader will not return r, as r does not implement flate.Reader, so it is not bufio.Reader.
f.rBuf = bufio.NewReader(r)
}
f.r = f.rBuf
}
func fixedHuffmanDecoderInit() {
fixedOnce.Do(func() {
// These come from the RFC section 3.2.6.
var bits [288]int
for i := 0; i < 144; i++ {
bits[i] = 8
}
for i := 144; i < 256; i++ {
bits[i] = 9
}
for i := 256; i < 280; i++ {
bits[i] = 7
}
for i := 280; i < 288; i++ {
bits[i] = 8
}
fixedHuffmanDecoder.init(bits[:])
})
}
func (f *decompressor) Reset(r io.Reader, dict []byte) error {
*f = decompressor{
rBuf: f.rBuf,
bits: f.bits,
codebits: f.codebits,
dict: f.dict,
step: (*decompressor).nextBlock,
}
f.makeReader(r)
f.dict.init(maxMatchOffset, dict)
return nil
}
// NewReader returns a new ReadCloser that can be used
// to read the uncompressed version of r.
// If r does not also implement [io.ByteReader],
// the decompressor may read more data than necessary from r.
// The reader returns [io.EOF] after the final block in the DEFLATE stream has
// been encountered. Any trailing data after the final block is ignored.
//
// The [io.ReadCloser] returned by NewReader also implements [Resetter].
func NewReader(r io.Reader) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
f.makeReader(r)
f.bits = new([maxNumLit + maxNumDist]int)
f.codebits = new([numCodes]int)
f.step = (*decompressor).nextBlock
f.dict.init(maxMatchOffset, nil)
return &f
}
// NewReaderDict is like [NewReader] but initializes the reader
// with a preset dictionary. The returned reader behaves as if
// the uncompressed data stream started with the given dictionary,
// which has already been read. NewReaderDict is typically used
// to read data compressed by [NewWriterDict].
//
// The ReadCloser returned by NewReaderDict also implements [Resetter].
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
f.makeReader(r)
f.bits = new([maxNumLit + maxNumDist]int)
f.codebits = new([numCodes]int)
f.step = (*decompressor).nextBlock
f.dict.init(maxMatchOffset, dict)
return &f
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
const (
// 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
// 8 bits: xlength = length - MIN_MATCH_LENGTH
// 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
lengthShift = 22
offsetMask = 1<<lengthShift - 1
typeMask = 3 << 30
literalType = 0 << 30
matchType = 1 << 30
)
// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
// is lengthCodes[length - MIN_MATCH_LENGTH]
var lengthCodes = [...]uint32{
0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 28,
}
var offsetCodes = [...]uint32{
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
}
type token uint32
// Convert a literal into a literal token.
func literalToken(literal uint32) token { return token(literalType + literal) }
// Convert a < xlength, xoffset > pair into a match token.
func matchToken(xlength uint32, xoffset uint32) token {
return token(matchType + xlength<<lengthShift + xoffset)
}
// Returns the literal of a literal token.
func (t token) literal() uint32 { return uint32(t - literalType) }
// Returns the extra offset of a match token.
func (t token) offset() uint32 { return uint32(t) & offsetMask }
func (t token) length() uint32 { return uint32((t - matchType) >> lengthShift) }
func lengthCode(len uint32) uint32 { return lengthCodes[len] }
// Returns the offset code corresponding to a specific offset.
func offsetCode(off uint32) uint32 {
if off < uint32(len(offsetCodes)) {
return offsetCodes[off]
}
if off>>7 < uint32(len(offsetCodes)) {
return offsetCodes[off>>7] + 14
}
return offsetCodes[off>>14] + 28
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gzip implements reading and writing of gzip format compressed files,
// as specified in RFC 1952.
package gzip
import (
"bufio"
"compress/flate"
"encoding/binary"
"errors"
"hash/crc32"
"io"
"time"
)
const (
gzipID1 = 0x1f
gzipID2 = 0x8b
gzipDeflate = 8
flagText = 1 << 0
flagHdrCrc = 1 << 1
flagExtra = 1 << 2
flagName = 1 << 3
flagComment = 1 << 4
)
var (
// ErrChecksum is returned when reading GZIP data that has an invalid checksum.
ErrChecksum = errors.New("gzip: invalid checksum")
// ErrHeader is returned when reading GZIP data that has an invalid header.
ErrHeader = errors.New("gzip: invalid header")
)
var le = binary.LittleEndian
// noEOF converts io.EOF to io.ErrUnexpectedEOF.
func noEOF(err error) error {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
// The gzip file stores a header giving metadata about the compressed file.
// That header is exposed as the fields of the [Writer] and [Reader] structs.
//
// Strings must be UTF-8 encoded and may only contain Unicode code points
// U+0001 through U+00FF, due to limitations of the GZIP file format.
type Header struct {
Comment string // comment
Extra []byte // "extra data"
ModTime time.Time // modification time
Name string // file name
OS byte // operating system type
}
// A Reader is an [io.Reader] that can be read to retrieve
// uncompressed data from a gzip-format compressed file.
//
// In general, a gzip file can be a concatenation of gzip files,
// each with its own header. Reads from the Reader
// return the concatenation of the uncompressed data of each.
// Only the first header is recorded in the Reader fields.
//
// Gzip files store a length and checksum of the uncompressed data.
// The Reader will return an [ErrChecksum] when [Reader.Read]
// reaches the end of the uncompressed data if it does not
// have the expected length or checksum. Clients should treat data
// returned by [Reader.Read] as tentative until they receive the [io.EOF]
// marking the end of the data.
type Reader struct {
Header // valid after NewReader or Reader.Reset
r flate.Reader
decompressor io.ReadCloser
digest uint32 // CRC-32, IEEE polynomial (section 8)
size uint32 // Uncompressed size (section 2.3.1)
buf [512]byte
err error
multistream bool
}
// NewReader creates a new [Reader] reading the given reader.
// If r does not also implement [io.ByteReader],
// the decompressor may read more data than necessary from r.
//
// It is the caller's responsibility to call [Reader.Close] when done.
//
// The Reader.[Header] fields will be valid in the [Reader] returned.
func NewReader(r io.Reader) (*Reader, error) {
z := new(Reader)
if err := z.Reset(r); err != nil {
return nil, err
}
return z, nil
}
// Reset discards the [Reader] z's state and makes it equivalent to the
// result of its original state from [NewReader], but reading from r instead.
// This permits reusing a [Reader] rather than allocating a new one.
func (z *Reader) Reset(r io.Reader) error {
*z = Reader{
decompressor: z.decompressor,
multistream: true,
}
if rr, ok := r.(flate.Reader); ok {
z.r = rr
} else {
z.r = bufio.NewReader(r)
}
z.Header, z.err = z.readHeader()
return z.err
}
// Multistream controls whether the reader supports multistream files.
//
// If enabled (the default), the [Reader] expects the input to be a sequence
// of individually gzipped data streams, each with its own header and
// trailer, ending at EOF. The effect is that the concatenation of a sequence
// of gzipped files is treated as equivalent to the gzip of the concatenation
// of the sequence. This is standard behavior for gzip readers.
//
// Calling Multistream(false) disables this behavior; disabling the behavior
// can be useful when reading file formats that distinguish individual gzip
// data streams or mix gzip data streams with other data streams.
// In this mode, when the [Reader] reaches the end of the data stream,
// [Reader.Read] returns [io.EOF]. The underlying reader must implement [io.ByteReader]
// in order to be left positioned just after the gzip stream.
// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
// If there is no next stream, z.Reset(r) will return [io.EOF].
func (z *Reader) Multistream(ok bool) {
z.multistream = ok
}
// readString reads a NUL-terminated string from z.r.
// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and
// will output a string encoded using UTF-8.
// This method always updates z.digest with the data read.
func (z *Reader) readString() (string, error) {
var err error
needConv := false
for i := 0; ; i++ {
if i >= len(z.buf) {
return "", ErrHeader
}
z.buf[i], err = z.r.ReadByte()
if err != nil {
return "", err
}
if z.buf[i] > 0x7f {
needConv = true
}
if z.buf[i] == 0 {
// Digest covers the NUL terminator.
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1])
// Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1).
if needConv {
s := make([]rune, 0, i)
for _, v := range z.buf[:i] {
s = append(s, rune(v))
}
return string(s), nil
}
return string(z.buf[:i]), nil
}
}
}
// readHeader reads the GZIP header according to section 2.3.1.
// This method does not set z.err.
func (z *Reader) readHeader() (hdr Header, err error) {
if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil {
// RFC 1952, section 2.2, says the following:
// A gzip file consists of a series of "members" (compressed data sets).
//
// Other than this, the specification does not clarify whether a
// "series" is defined as "one or more" or "zero or more". To err on the
// side of caution, Go interprets this to mean "zero or more".
// Thus, it is okay to return io.EOF here.
return hdr, err
}
if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
return hdr, ErrHeader
}
flg := z.buf[3]
if t := int64(le.Uint32(z.buf[4:8])); t > 0 {
// Section 2.3.1, the zero value for MTIME means that the
// modified time is not set.
hdr.ModTime = time.Unix(t, 0)
}
// z.buf[8] is XFL and is currently ignored.
hdr.OS = z.buf[9]
z.digest = crc32.ChecksumIEEE(z.buf[:10])
if flg&flagExtra != 0 {
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
return hdr, noEOF(err)
}
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2])
data := make([]byte, le.Uint16(z.buf[:2]))
if _, err = io.ReadFull(z.r, data); err != nil {
return hdr, noEOF(err)
}
z.digest = crc32.Update(z.digest, crc32.IEEETable, data)
hdr.Extra = data
}
var s string
if flg&flagName != 0 {
if s, err = z.readString(); err != nil {
return hdr, noEOF(err)
}
hdr.Name = s
}
if flg&flagComment != 0 {
if s, err = z.readString(); err != nil {
return hdr, noEOF(err)
}
hdr.Comment = s
}
if flg&flagHdrCrc != 0 {
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
return hdr, noEOF(err)
}
digest := le.Uint16(z.buf[:2])
if digest != uint16(z.digest) {
return hdr, ErrHeader
}
}
z.digest = 0
if z.decompressor == nil {
z.decompressor = flate.NewReader(z.r)
} else {
z.decompressor.(flate.Resetter).Reset(z.r, nil)
}
return hdr, nil
}
// Read implements [io.Reader], reading uncompressed bytes from its underlying reader.
func (z *Reader) Read(p []byte) (n int, err error) {
if z.err != nil {
return 0, z.err
}
for n == 0 {
n, z.err = z.decompressor.Read(p)
z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
z.size += uint32(n)
if z.err != io.EOF {
// In the normal case we return here.
return n, z.err
}
// Finished file; check checksum and size.
if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
z.err = noEOF(err)
return n, z.err
}
digest := le.Uint32(z.buf[:4])
size := le.Uint32(z.buf[4:8])
if digest != z.digest || size != z.size {
z.err = ErrChecksum
return n, z.err
}
z.digest, z.size = 0, 0
// File is ok; check if there is another.
if !z.multistream {
return n, io.EOF
}
z.err = nil // Remove io.EOF
if _, z.err = z.readHeader(); z.err != nil {
return n, z.err
}
}
return n, nil
}
// Close closes the [Reader]. It does not close the underlying reader.
// In order for the GZIP checksum to be verified, the reader must be
// fully consumed until the [io.EOF].
func (z *Reader) Close() error { return z.decompressor.Close() }
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gzip
import (
"compress/flate"
"errors"
"fmt"
"hash/crc32"
"io"
"time"
)
// These constants are copied from the [flate] package, so that code that imports
// [compress/gzip] does not also have to import [compress/flate].
const (
NoCompression = flate.NoCompression
BestSpeed = flate.BestSpeed
BestCompression = flate.BestCompression
DefaultCompression = flate.DefaultCompression
HuffmanOnly = flate.HuffmanOnly
)
// A Writer is an [io.WriteCloser].
// Writes to a Writer are compressed and written to w.
type Writer struct {
Header // written at first call to Write, Flush, or Close
w io.Writer
level int
wroteHeader bool
closed bool
buf [10]byte
compressor *flate.Writer
digest uint32 // CRC-32, IEEE polynomial (section 8)
size uint32 // Uncompressed size (section 2.3.1)
err error
}
// NewWriter returns a new [Writer].
// Writes to the returned writer are compressed and written to w.
//
// It is the caller's responsibility to call Close on the [Writer] when done.
// Writes may be buffered and not flushed until Close.
//
// Callers that wish to set the fields in Writer.[Header] must do so before
// the first call to Write, Flush, or Close.
func NewWriter(w io.Writer) *Writer {
z, _ := NewWriterLevel(w, DefaultCompression)
return z
}
// NewWriterLevel is like [NewWriter] but specifies the compression level instead
// of assuming [DefaultCompression].
//
// The compression level can be [DefaultCompression], [NoCompression], [HuffmanOnly]
// or any integer value between [BestSpeed] and [BestCompression] inclusive.
// The error returned will be nil if the level is valid.
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
if level < HuffmanOnly || level > BestCompression {
return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
}
z := new(Writer)
z.init(w, level)
return z, nil
}
func (z *Writer) init(w io.Writer, level int) {
compressor := z.compressor
if compressor != nil {
compressor.Reset(w)
}
*z = Writer{
Header: Header{
OS: 255, // unknown
},
w: w,
level: level,
compressor: compressor,
}
}
// Reset discards the [Writer] z's state and makes it equivalent to the
// result of its original state from [NewWriter] or [NewWriterLevel], but
// writing to w instead. This permits reusing a [Writer] rather than
// allocating a new one.
func (z *Writer) Reset(w io.Writer) {
z.init(w, z.level)
}
// writeBytes writes a length-prefixed byte slice to z.w.
func (z *Writer) writeBytes(b []byte) error {
if len(b) > 0xffff {
return errors.New("gzip.Write: Extra data is too large")
}
le.PutUint16(z.buf[:2], uint16(len(b)))
_, err := z.w.Write(z.buf[:2])
if err != nil {
return err
}
_, err = z.w.Write(b)
return err
}
// writeString writes a UTF-8 string s in GZIP's format to z.w.
// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
func (z *Writer) writeString(s string) (err error) {
// GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII.
needconv := false
for _, v := range s {
if v == 0 || v > 0xff {
return errors.New("gzip.Write: non-Latin-1 header string")
}
if v > 0x7f {
needconv = true
}
}
if needconv {
b := make([]byte, 0, len(s))
for _, v := range s {
b = append(b, byte(v))
}
_, err = z.w.Write(b)
} else {
_, err = io.WriteString(z.w, s)
}
if err != nil {
return err
}
// GZIP strings are NUL-terminated.
z.buf[0] = 0
_, err = z.w.Write(z.buf[:1])
return err
}
// Write writes a compressed form of p to the underlying [io.Writer]. The
// compressed bytes are not necessarily flushed until the [Writer] is closed.
func (z *Writer) Write(p []byte) (int, error) {
if z.err != nil {
return 0, z.err
}
var n int
// Write the GZIP header lazily.
if !z.wroteHeader {
z.wroteHeader = true
z.buf = [10]byte{0: gzipID1, 1: gzipID2, 2: gzipDeflate}
if z.Extra != nil {
z.buf[3] |= 0x04
}
if z.Name != "" {
z.buf[3] |= 0x08
}
if z.Comment != "" {
z.buf[3] |= 0x10
}
if z.ModTime.After(time.Unix(0, 0)) {
// Section 2.3.1, the zero value for MTIME means that the
// modified time is not set.
le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix()))
}
if z.level == BestCompression {
z.buf[8] = 2
} else if z.level == BestSpeed {
z.buf[8] = 4
}
z.buf[9] = z.OS
_, z.err = z.w.Write(z.buf[:10])
if z.err != nil {
return 0, z.err
}
if z.Extra != nil {
z.err = z.writeBytes(z.Extra)
if z.err != nil {
return 0, z.err
}
}
if z.Name != "" {
z.err = z.writeString(z.Name)
if z.err != nil {
return 0, z.err
}
}
if z.Comment != "" {
z.err = z.writeString(z.Comment)
if z.err != nil {
return 0, z.err
}
}
if z.compressor == nil {
z.compressor, _ = flate.NewWriter(z.w, z.level)
}
}
z.size += uint32(len(p))
z.digest = crc32.Update(z.digest, crc32.IEEETable, p)
n, z.err = z.compressor.Write(p)
return n, z.err
}
// Flush flushes any pending compressed data to the underlying writer.
//
// It is useful mainly in compressed network protocols, to ensure that
// a remote reader has enough data to reconstruct a packet. Flush does
// not return until the data has been written. If the underlying
// writer returns an error, Flush returns that error.
//
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
func (z *Writer) Flush() error {
if z.err != nil {
return z.err
}
if z.closed {
return nil
}
if !z.wroteHeader {
z.Write(nil)
if z.err != nil {
return z.err
}
}
z.err = z.compressor.Flush()
return z.err
}
// Close closes the [Writer] by flushing any unwritten data to the underlying
// [io.Writer] and writing the GZIP footer.
// It does not close the underlying [io.Writer].
func (z *Writer) Close() error {
if z.err != nil {
return z.err
}
if z.closed {
return nil
}
z.closed = true
if !z.wroteHeader {
z.Write(nil)
if z.err != nil {
return z.err
}
}
z.err = z.compressor.Close()
if z.err != nil {
return z.err
}
le.PutUint32(z.buf[:4], z.digest)
le.PutUint32(z.buf[4:8], z.size)
_, z.err = z.w.Write(z.buf[:8])
return z.err
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lzw implements the Lempel-Ziv-Welch compressed data format,
// described in T. A. Welch, “A Technique for High-Performance Data
// Compression”, Computer, 17(6) (June 1984), pp 8-19.
//
// In particular, it implements LZW as used by the GIF and PDF file
// formats, which means variable-width codes up to 12 bits and the first
// two non-literal codes are a clear code and an EOF code.
//
// The TIFF file format uses a similar but incompatible version of the LZW
// algorithm. See the [golang.org/x/image/tiff/lzw] package for an
// implementation.
package lzw
// TODO(nigeltao): check that PDF uses LZW in the same way as GIF,
// modulo LSB/MSB packing order.
import (
"bufio"
"errors"
"fmt"
"io"
)
// Order specifies the bit ordering in an LZW data stream.
type Order int
const (
// LSB means Least Significant Bits first, as used in the GIF file format.
LSB Order = iota
// MSB means Most Significant Bits first, as used in the TIFF and PDF
// file formats.
MSB
)
const (
maxWidth = 12
decoderInvalidCode = 0xffff
flushBuffer = 1 << maxWidth
)
// Reader is an [io.Reader] which can be used to read compressed data in the
// LZW format.
type Reader struct {
r io.ByteReader
bits uint32
nBits uint
width uint
read func(*Reader) (uint16, error) // readLSB or readMSB
litWidth int // width in bits of literal codes
err error
// The first 1<<litWidth codes are literal codes.
// The next two codes mean clear and EOF.
// Other valid codes are in the range [lo, hi] where lo := clear + 2,
// with the upper bound incrementing on each code seen.
//
// overflow is the code at which hi overflows the code width. It always
// equals 1 << width.
//
// last is the most recently seen code, or decoderInvalidCode.
//
// An invariant is that hi < overflow.
clear, eof, hi, overflow, last uint16
// Each code c in [lo, hi] expands to two or more bytes. For c != hi:
// suffix[c] is the last of these bytes.
// prefix[c] is the code for all but the last byte.
// This code can either be a literal code or another code in [lo, c).
// The c == hi case is a special case.
suffix [1 << maxWidth]uint8
prefix [1 << maxWidth]uint16
// output is the temporary output buffer.
// Literal codes are accumulated from the start of the buffer.
// Non-literal codes decode to a sequence of suffixes that are first
// written right-to-left from the end of the buffer before being copied
// to the start of the buffer.
// It is flushed when it contains >= 1<<maxWidth bytes,
// so that there is always room to decode an entire code.
output [2 * 1 << maxWidth]byte
o int // write index into output
toRead []byte // bytes to return from Read
}
// readLSB returns the next code for "Least Significant Bits first" data.
func (r *Reader) readLSB() (uint16, error) {
for r.nBits < r.width {
x, err := r.r.ReadByte()
if err != nil {
return 0, err
}
r.bits |= uint32(x) << r.nBits
r.nBits += 8
}
code := uint16(r.bits & (1<<r.width - 1))
r.bits >>= r.width
r.nBits -= r.width
return code, nil
}
// readMSB returns the next code for "Most Significant Bits first" data.
func (r *Reader) readMSB() (uint16, error) {
for r.nBits < r.width {
x, err := r.r.ReadByte()
if err != nil {
return 0, err
}
r.bits |= uint32(x) << (24 - r.nBits)
r.nBits += 8
}
code := uint16(r.bits >> (32 - r.width))
r.bits <<= r.width
r.nBits -= r.width
return code, nil
}
// Read implements io.Reader, reading uncompressed bytes from its underlying reader.
func (r *Reader) Read(b []byte) (int, error) {
for {
if len(r.toRead) > 0 {
n := copy(b, r.toRead)
r.toRead = r.toRead[n:]
return n, nil
}
if r.err != nil {
return 0, r.err
}
r.decode()
}
}
// decode decompresses bytes from r and leaves them in d.toRead.
// read specifies how to decode bytes into codes.
// litWidth is the width in bits of literal codes.
func (r *Reader) decode() {
// Loop over the code stream, converting codes into decompressed bytes.
loop:
for {
code, err := r.read(r)
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
r.err = err
break
}
switch {
case code < r.clear:
// We have a literal code.
r.output[r.o] = uint8(code)
r.o++
if r.last != decoderInvalidCode {
// Save what the hi code expands to.
r.suffix[r.hi] = uint8(code)
r.prefix[r.hi] = r.last
}
case code == r.clear:
r.width = 1 + uint(r.litWidth)
r.hi = r.eof
r.overflow = 1 << r.width
r.last = decoderInvalidCode
continue
case code == r.eof:
r.err = io.EOF
break loop
case code <= r.hi:
c, i := code, len(r.output)-1
if code == r.hi && r.last != decoderInvalidCode {
// code == hi is a special case which expands to the last expansion
// followed by the head of the last expansion. To find the head, we walk
// the prefix chain until we find a literal code.
c = r.last
for c >= r.clear {
c = r.prefix[c]
}
r.output[i] = uint8(c)
i--
c = r.last
}
// Copy the suffix chain into output and then write that to w.
for c >= r.clear {
r.output[i] = r.suffix[c]
i--
c = r.prefix[c]
}
r.output[i] = uint8(c)
r.o += copy(r.output[r.o:], r.output[i:])
if r.last != decoderInvalidCode {
// Save what the hi code expands to.
r.suffix[r.hi] = uint8(c)
r.prefix[r.hi] = r.last
}
default:
r.err = errors.New("lzw: invalid code")
break loop
}
r.last, r.hi = code, r.hi+1
if r.hi >= r.overflow {
if r.hi > r.overflow {
panic("unreachable")
}
if r.width == maxWidth {
r.last = decoderInvalidCode
// Undo the d.hi++ a few lines above, so that (1) we maintain
// the invariant that d.hi < d.overflow, and (2) d.hi does not
// eventually overflow a uint16.
r.hi--
} else {
r.width++
r.overflow = 1 << r.width
}
}
if r.o >= flushBuffer {
break
}
}
// Flush pending output.
r.toRead = r.output[:r.o]
r.o = 0
}
var errClosed = errors.New("lzw: reader/writer is closed")
// Close closes the [Reader] and returns an error for any future read operation.
// It does not close the underlying [io.Reader].
func (r *Reader) Close() error {
r.err = errClosed // in case any Reads come along
return nil
}
// Reset clears the [Reader]'s state and allows it to be reused again
// as a new [Reader].
func (r *Reader) Reset(src io.Reader, order Order, litWidth int) {
*r = Reader{}
r.init(src, order, litWidth)
}
// NewReader creates a new [io.ReadCloser].
// Reads from the returned [io.ReadCloser] read and decompress data from r.
// If r does not also implement [io.ByteReader],
// the decompressor may read more data than necessary from r.
// It is the caller's responsibility to call Close on the ReadCloser when
// finished reading.
// The number of bits to use for literal codes, litWidth, must be in the
// range [2,8] and is typically 8. It must equal the litWidth
// used during compression.
//
// It is guaranteed that the underlying type of the returned [io.ReadCloser]
// is a *[Reader].
func NewReader(r io.Reader, order Order, litWidth int) io.ReadCloser {
return newReader(r, order, litWidth)
}
func newReader(src io.Reader, order Order, litWidth int) *Reader {
r := new(Reader)
r.init(src, order, litWidth)
return r
}
func (r *Reader) init(src io.Reader, order Order, litWidth int) {
switch order {
case LSB:
r.read = (*Reader).readLSB
case MSB:
r.read = (*Reader).readMSB
default:
r.err = errors.New("lzw: unknown order")
return
}
if litWidth < 2 || 8 < litWidth {
r.err = fmt.Errorf("lzw: litWidth %d out of range", litWidth)
return
}
br, ok := src.(io.ByteReader)
if !ok && src != nil {
br = bufio.NewReader(src)
}
r.r = br
r.litWidth = litWidth
r.width = 1 + uint(litWidth)
r.clear = uint16(1) << uint(litWidth)
r.eof, r.hi = r.clear+1, r.clear+1
r.overflow = uint16(1) << r.width
r.last = decoderInvalidCode
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lzw
import (
"bufio"
"errors"
"fmt"
"io"
)
// A writer is a buffered, flushable writer.
type writer interface {
io.ByteWriter
Flush() error
}
const (
// A code is a 12 bit value, stored as a uint32 when encoding to avoid
// type conversions when shifting bits.
maxCode = 1<<12 - 1
invalidCode = 1<<32 - 1
// There are 1<<12 possible codes, which is an upper bound on the number of
// valid hash table entries at any given point in time. tableSize is 4x that.
tableSize = 4 * 1 << 12
tableMask = tableSize - 1
// A hash table entry is a uint32. Zero is an invalid entry since the
// lower 12 bits of a valid entry must be a non-literal code.
invalidEntry = 0
)
// Writer is an LZW compressor. It writes the compressed form of the data
// to an underlying writer (see [NewWriter]).
type Writer struct {
// w is the writer that compressed bytes are written to.
w writer
// litWidth is the width in bits of literal codes.
litWidth uint
// order, write, bits, nBits and width are the state for
// converting a code stream into a byte stream.
order Order
write func(*Writer, uint32) error
nBits uint
width uint
bits uint32
// hi is the code implied by the next code emission.
// overflow is the code at which hi overflows the code width.
hi, overflow uint32
// savedCode is the accumulated code at the end of the most recent Write
// call. It is equal to invalidCode if there was no such call.
savedCode uint32
// err is the first error encountered during writing. Closing the writer
// will make any future Write calls return errClosed
err error
// table is the hash table from 20-bit keys to 12-bit values. Each table
// entry contains key<<12|val and collisions resolve by linear probing.
// The keys consist of a 12-bit code prefix and an 8-bit byte suffix.
// The values are a 12-bit code.
table [tableSize]uint32
}
// writeLSB writes the code c for "Least Significant Bits first" data.
func (w *Writer) writeLSB(c uint32) error {
w.bits |= c << w.nBits
w.nBits += w.width
for w.nBits >= 8 {
if err := w.w.WriteByte(uint8(w.bits)); err != nil {
return err
}
w.bits >>= 8
w.nBits -= 8
}
return nil
}
// writeMSB writes the code c for "Most Significant Bits first" data.
func (w *Writer) writeMSB(c uint32) error {
w.bits |= c << (32 - w.width - w.nBits)
w.nBits += w.width
for w.nBits >= 8 {
if err := w.w.WriteByte(uint8(w.bits >> 24)); err != nil {
return err
}
w.bits <<= 8
w.nBits -= 8
}
return nil
}
// errOutOfCodes is an internal error that means that the writer has run out
// of unused codes and a clear code needs to be sent next.
var errOutOfCodes = errors.New("lzw: out of codes")
// incHi increments e.hi and checks for both overflow and running out of
// unused codes. In the latter case, incHi sends a clear code, resets the
// writer state and returns errOutOfCodes.
func (w *Writer) incHi() error {
w.hi++
if w.hi == w.overflow {
w.width++
w.overflow <<= 1
}
if w.hi == maxCode {
clear := uint32(1) << w.litWidth
if err := w.write(w, clear); err != nil {
return err
}
w.width = w.litWidth + 1
w.hi = clear + 1
w.overflow = clear << 1
for i := range w.table {
w.table[i] = invalidEntry
}
return errOutOfCodes
}
return nil
}
// Write writes a compressed representation of p to w's underlying writer.
func (w *Writer) Write(p []byte) (n int, err error) {
if w.err != nil {
return 0, w.err
}
if len(p) == 0 {
return 0, nil
}
if maxLit := uint8(1<<w.litWidth - 1); maxLit != 0xff {
for _, x := range p {
if x > maxLit {
w.err = errors.New("lzw: input byte too large for the litWidth")
return 0, w.err
}
}
}
n = len(p)
code := w.savedCode
if code == invalidCode {
// This is the first write; send a clear code.
// https://www.w3.org/Graphics/GIF/spec-gif89a.txt Appendix F
// "Variable-Length-Code LZW Compression" says that "Encoders should
// output a Clear code as the first code of each image data stream".
//
// LZW compression isn't only used by GIF, but it's cheap to follow
// that directive unconditionally.
clear := uint32(1) << w.litWidth
if err := w.write(w, clear); err != nil {
return 0, err
}
// After the starting clear code, the next code sent (for non-empty
// input) is always a literal code.
code, p = uint32(p[0]), p[1:]
}
loop:
for _, x := range p {
literal := uint32(x)
key := code<<8 | literal
// If there is a hash table hit for this key then we continue the loop
// and do not emit a code yet.
hash := (key>>12 ^ key) & tableMask
for h, t := hash, w.table[hash]; t != invalidEntry; {
if key == t>>12 {
code = t & maxCode
continue loop
}
h = (h + 1) & tableMask
t = w.table[h]
}
// Otherwise, write the current code, and literal becomes the start of
// the next emitted code.
if w.err = w.write(w, code); w.err != nil {
return 0, w.err
}
code = literal
// Increment e.hi, the next implied code. If we run out of codes, reset
// the writer state (including clearing the hash table) and continue.
if err1 := w.incHi(); err1 != nil {
if err1 == errOutOfCodes {
continue
}
w.err = err1
return 0, w.err
}
// Otherwise, insert key -> e.hi into the map that e.table represents.
for {
if w.table[hash] == invalidEntry {
w.table[hash] = (key << 12) | w.hi
break
}
hash = (hash + 1) & tableMask
}
}
w.savedCode = code
return n, nil
}
// Close closes the [Writer], flushing any pending output. It does not close
// w's underlying writer.
func (w *Writer) Close() error {
if w.err != nil {
if w.err == errClosed {
return nil
}
return w.err
}
// Make any future calls to Write return errClosed.
w.err = errClosed
// Write the savedCode if valid.
if w.savedCode != invalidCode {
if err := w.write(w, w.savedCode); err != nil {
return err
}
if err := w.incHi(); err != nil && err != errOutOfCodes {
return err
}
} else {
// Write the starting clear code, as w.Write did not.
clear := uint32(1) << w.litWidth
if err := w.write(w, clear); err != nil {
return err
}
}
// Write the eof code.
eof := uint32(1)<<w.litWidth + 1
if err := w.write(w, eof); err != nil {
return err
}
// Write the final bits.
if w.nBits > 0 {
if w.order == MSB {
w.bits >>= 24
}
if err := w.w.WriteByte(uint8(w.bits)); err != nil {
return err
}
}
return w.w.Flush()
}
// Reset clears the [Writer]'s state and allows it to be reused again
// as a new [Writer].
func (w *Writer) Reset(dst io.Writer, order Order, litWidth int) {
*w = Writer{}
w.init(dst, order, litWidth)
}
// NewWriter creates a new [io.WriteCloser].
// Writes to the returned [io.WriteCloser] are compressed and written to w.
// It is the caller's responsibility to call Close on the WriteCloser when
// finished writing.
// The number of bits to use for literal codes, litWidth, must be in the
// range [2,8] and is typically 8. Input bytes must be less than 1<<litWidth.
//
// It is guaranteed that the underlying type of the returned [io.WriteCloser]
// is a *[Writer].
func NewWriter(w io.Writer, order Order, litWidth int) io.WriteCloser {
return newWriter(w, order, litWidth)
}
func newWriter(dst io.Writer, order Order, litWidth int) *Writer {
w := new(Writer)
w.init(dst, order, litWidth)
return w
}
func (w *Writer) init(dst io.Writer, order Order, litWidth int) {
switch order {
case LSB:
w.write = (*Writer).writeLSB
case MSB:
w.write = (*Writer).writeMSB
default:
w.err = errors.New("lzw: unknown order")
return
}
if litWidth < 2 || 8 < litWidth {
w.err = fmt.Errorf("lzw: litWidth %d out of range", litWidth)
return
}
bw, ok := dst.(writer)
if !ok && dst != nil {
bw = bufio.NewWriter(dst)
}
w.w = bw
lw := uint(litWidth)
w.order = order
w.width = 1 + lw
w.litWidth = lw
w.hi = 1<<lw + 1
w.overflow = 1 << (lw + 1)
w.savedCode = invalidCode
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package zlib implements reading and writing of zlib format compressed data,
as specified in RFC 1950.
The implementation provides filters that uncompress during reading
and compress during writing. For example, to write compressed data
to a buffer:
var b bytes.Buffer
w := zlib.NewWriter(&b)
w.Write([]byte("hello, world\n"))
w.Close()
and to read that data back:
r, err := zlib.NewReader(&b)
io.Copy(os.Stdout, r)
r.Close()
*/
package zlib
import (
"bufio"
"compress/flate"
"encoding/binary"
"errors"
"hash"
"hash/adler32"
"io"
)
const (
zlibDeflate = 8
zlibMaxWindow = 7
)
var (
// ErrChecksum is returned when reading ZLIB data that has an invalid checksum.
ErrChecksum = errors.New("zlib: invalid checksum")
// ErrDictionary is returned when reading ZLIB data that has an invalid dictionary.
ErrDictionary = errors.New("zlib: invalid dictionary")
// ErrHeader is returned when reading ZLIB data that has an invalid header.
ErrHeader = errors.New("zlib: invalid header")
)
type reader struct {
r flate.Reader
decompressor io.ReadCloser
digest hash.Hash32
err error
scratch [4]byte
}
// Resetter resets a ReadCloser returned by [NewReader] or [NewReaderDict]
// to switch to a new underlying Reader. This permits reusing a ReadCloser
// instead of allocating a new one.
type Resetter interface {
// Reset discards any buffered data and resets the Resetter as if it was
// newly initialized with the given reader.
Reset(r io.Reader, dict []byte) error
}
// NewReader creates a new ReadCloser.
// Reads from the returned ReadCloser read and decompress data from r.
// If r does not implement [io.ByteReader], the decompressor may read more
// data than necessary from r.
// It is the caller's responsibility to call Close on the ReadCloser when done.
//
// The [io.ReadCloser] returned by NewReader also implements [Resetter].
func NewReader(r io.Reader) (io.ReadCloser, error) {
return NewReaderDict(r, nil)
}
// NewReaderDict is like [NewReader] but uses a preset dictionary.
// NewReaderDict ignores the dictionary if the compressed data does not refer to it.
// If the compressed data refers to a different dictionary, NewReaderDict returns [ErrDictionary].
//
// The ReadCloser returned by NewReaderDict also implements [Resetter].
func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) {
z := new(reader)
err := z.Reset(r, dict)
if err != nil {
return nil, err
}
return z, nil
}
func (z *reader) Read(p []byte) (int, error) {
if z.err != nil {
return 0, z.err
}
var n int
n, z.err = z.decompressor.Read(p)
z.digest.Write(p[0:n])
if z.err != io.EOF {
// In the normal case we return here.
return n, z.err
}
// Finished file; check checksum.
if _, err := io.ReadFull(z.r, z.scratch[0:4]); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
z.err = err
return n, z.err
}
// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
checksum := binary.BigEndian.Uint32(z.scratch[:4])
if checksum != z.digest.Sum32() {
z.err = ErrChecksum
return n, z.err
}
return n, io.EOF
}
// Calling Close does not close the wrapped [io.Reader] originally passed to [NewReader].
// In order for the ZLIB checksum to be verified, the reader must be
// fully consumed until the [io.EOF].
func (z *reader) Close() error {
if z.err != nil && z.err != io.EOF {
return z.err
}
z.err = z.decompressor.Close()
return z.err
}
func (z *reader) Reset(r io.Reader, dict []byte) error {
*z = reader{decompressor: z.decompressor}
if fr, ok := r.(flate.Reader); ok {
z.r = fr
} else {
z.r = bufio.NewReader(r)
}
// Read the header (RFC 1950 section 2.2.).
_, z.err = io.ReadFull(z.r, z.scratch[0:2])
if z.err != nil {
if z.err == io.EOF {
z.err = io.ErrUnexpectedEOF
}
return z.err
}
h := binary.BigEndian.Uint16(z.scratch[:2])
if (z.scratch[0]&0x0f != zlibDeflate) || (z.scratch[0]>>4 > zlibMaxWindow) || (h%31 != 0) {
z.err = ErrHeader
return z.err
}
haveDict := z.scratch[1]&0x20 != 0
if haveDict {
_, z.err = io.ReadFull(z.r, z.scratch[0:4])
if z.err != nil {
if z.err == io.EOF {
z.err = io.ErrUnexpectedEOF
}
return z.err
}
checksum := binary.BigEndian.Uint32(z.scratch[:4])
if checksum != adler32.Checksum(dict) {
z.err = ErrDictionary
return z.err
}
}
if z.decompressor == nil {
if haveDict {
z.decompressor = flate.NewReaderDict(z.r, dict)
} else {
z.decompressor = flate.NewReader(z.r)
}
} else {
z.decompressor.(flate.Resetter).Reset(z.r, dict)
}
z.digest = adler32.New()
return nil
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package zlib
import (
"compress/flate"
"encoding/binary"
"fmt"
"hash"
"hash/adler32"
"io"
)
// These constants are copied from the [flate] package, so that code that imports
// [compress/zlib] does not also have to import [compress/flate].
const (
NoCompression = flate.NoCompression
BestSpeed = flate.BestSpeed
BestCompression = flate.BestCompression
DefaultCompression = flate.DefaultCompression
HuffmanOnly = flate.HuffmanOnly
)
// A Writer takes data written to it and writes the compressed
// form of that data to an underlying writer (see [NewWriter]).
type Writer struct {
w io.Writer
level int
dict []byte
compressor *flate.Writer
digest hash.Hash32
err error
scratch [4]byte
wroteHeader bool
}
// NewWriter creates a new [Writer].
// Writes to the returned Writer are compressed and written to w.
//
// It is the caller's responsibility to call Close on the Writer when done.
// Writes may be buffered and not flushed until Close.
func NewWriter(w io.Writer) *Writer {
z, _ := NewWriterLevelDict(w, DefaultCompression, nil)
return z
}
// NewWriterLevel is like [NewWriter] but specifies the compression level instead
// of assuming [DefaultCompression].
//
// The compression level can be [DefaultCompression], [NoCompression], [HuffmanOnly]
// or any integer value between [BestSpeed] and [BestCompression] inclusive.
// The error returned will be nil if the level is valid.
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
return NewWriterLevelDict(w, level, nil)
}
// NewWriterLevelDict is like [NewWriterLevel] but specifies a dictionary to
// compress with.
//
// The dictionary may be nil. If not, its contents should not be modified until
// the Writer is closed.
func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) {
if level < HuffmanOnly || level > BestCompression {
return nil, fmt.Errorf("zlib: invalid compression level: %d", level)
}
return &Writer{
w: w,
level: level,
dict: dict,
}, nil
}
// Reset clears the state of the [Writer] z such that it is equivalent to its
// initial state from [NewWriterLevel] or [NewWriterLevelDict], but instead writing
// to w.
func (z *Writer) Reset(w io.Writer) {
z.w = w
// z.level and z.dict left unchanged.
if z.compressor != nil {
z.compressor.Reset(w)
}
if z.digest != nil {
z.digest.Reset()
}
z.err = nil
z.scratch = [4]byte{}
z.wroteHeader = false
}
// writeHeader writes the ZLIB header.
func (z *Writer) writeHeader() (err error) {
z.wroteHeader = true
// ZLIB has a two-byte header (as documented in RFC 1950).
// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size.
// The next four bits is the CM (compression method), which is 8 for deflate.
z.scratch[0] = 0x78
// The next two bits is the FLEVEL (compression level). The four values are:
// 0=fastest, 1=fast, 2=default, 3=best.
// The next bit, FDICT, is set if a dictionary is given.
// The final five FCHECK bits form a mod-31 checksum.
switch z.level {
case -2, 0, 1:
z.scratch[1] = 0 << 6
case 2, 3, 4, 5:
z.scratch[1] = 1 << 6
case 6, -1:
z.scratch[1] = 2 << 6
case 7, 8, 9:
z.scratch[1] = 3 << 6
default:
panic("unreachable")
}
if z.dict != nil {
z.scratch[1] |= 1 << 5
}
z.scratch[1] += uint8(31 - binary.BigEndian.Uint16(z.scratch[:2])%31)
if _, err = z.w.Write(z.scratch[0:2]); err != nil {
return err
}
if z.dict != nil {
// The next four bytes are the Adler-32 checksum of the dictionary.
binary.BigEndian.PutUint32(z.scratch[:], adler32.Checksum(z.dict))
if _, err = z.w.Write(z.scratch[0:4]); err != nil {
return err
}
}
if z.compressor == nil {
// Initialize deflater unless the Writer is being reused
// after a Reset call.
z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict)
if err != nil {
return err
}
z.digest = adler32.New()
}
return nil
}
// Write writes a compressed form of p to the underlying [io.Writer]. The
// compressed bytes are not necessarily flushed until the [Writer] is closed or
// explicitly flushed.
func (z *Writer) Write(p []byte) (n int, err error) {
if !z.wroteHeader {
z.err = z.writeHeader()
}
if z.err != nil {
return 0, z.err
}
if len(p) == 0 {
return 0, nil
}
n, err = z.compressor.Write(p)
if err != nil {
z.err = err
return
}
z.digest.Write(p)
return
}
// Flush flushes the Writer to its underlying [io.Writer].
func (z *Writer) Flush() error {
if !z.wroteHeader {
z.err = z.writeHeader()
}
if z.err != nil {
return z.err
}
z.err = z.compressor.Flush()
return z.err
}
// Close closes the Writer, flushing any unwritten data to the underlying
// [io.Writer], but does not close the underlying io.Writer.
func (z *Writer) Close() error {
if !z.wroteHeader {
z.err = z.writeHeader()
}
if z.err != nil {
return z.err
}
z.err = z.compressor.Close()
if z.err != nil {
return z.err
}
checksum := z.digest.Sum32()
// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
binary.BigEndian.PutUint32(z.scratch[:], checksum)
_, z.err = z.w.Write(z.scratch[0:4])
return z.err
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package list implements a doubly linked list.
//
// To iterate over a list (where l is a *List):
//
// for e := l.Front(); e != nil; e = e.Next() {
// // do something with e.Value
// }
package list
// Element is an element of a linked list.
type Element struct {
// Next and previous pointers in the doubly-linked list of elements.
// To simplify the implementation, internally a list l is implemented
// as a ring, such that &l.root is both the next element of the last
// list element (l.Back()) and the previous element of the first list
// element (l.Front()).
next, prev *Element
// The list to which this element belongs.
list *List
// The value stored with this element.
Value any
}
// Next returns the next list element or nil.
func (e *Element) Next() *Element {
if p := e.next; e.list != nil && p != &e.list.root {
return p
}
return nil
}
// Prev returns the previous list element or nil.
func (e *Element) Prev() *Element {
if p := e.prev; e.list != nil && p != &e.list.root {
return p
}
return nil
}
// List represents a doubly linked list.
// The zero value for List is an empty list ready to use.
type List struct {
root Element // sentinel list element, only &root, root.prev, and root.next are used
len int // current list length excluding (this) sentinel element
}
// Init initializes or clears list l.
func (l *List) Init() *List {
l.root.next = &l.root
l.root.prev = &l.root
l.len = 0
return l
}
// New returns an initialized list.
func New() *List { return new(List).Init() }
// Len returns the number of elements of list l.
// The complexity is O(1).
func (l *List) Len() int { return l.len }
// Front returns the first element of list l or nil if the list is empty.
func (l *List) Front() *Element {
if l.len == 0 {
return nil
}
return l.root.next
}
// Back returns the last element of list l or nil if the list is empty.
func (l *List) Back() *Element {
if l.len == 0 {
return nil
}
return l.root.prev
}
// lazyInit lazily initializes a zero List value.
func (l *List) lazyInit() {
if l.root.next == nil {
l.Init()
}
}
// insert inserts e after at, increments l.len, and returns e.
func (l *List) insert(e, at *Element) *Element {
e.prev = at
e.next = at.next
e.prev.next = e
e.next.prev = e
e.list = l
l.len++
return e
}
// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
func (l *List) insertValue(v any, at *Element) *Element {
return l.insert(&Element{Value: v}, at)
}
// remove removes e from its list, decrements l.len
func (l *List) remove(e *Element) {
e.prev.next = e.next
e.next.prev = e.prev
e.next = nil // avoid memory leaks
e.prev = nil // avoid memory leaks
e.list = nil
l.len--
}
// move moves e to next to at.
func (l *List) move(e, at *Element) {
if e == at {
return
}
e.prev.next = e.next
e.next.prev = e.prev
e.prev = at
e.next = at.next
e.prev.next = e
e.next.prev = e
}
// Remove removes e from l if e is an element of list l.
// It returns the element value e.Value.
// The element must not be nil.
func (l *List) Remove(e *Element) any {
if e.list == l {
// if e.list == l, l must have been initialized when e was inserted
// in l or l == nil (e is a zero Element) and l.remove will crash
l.remove(e)
}
return e.Value
}
// PushFront inserts a new element e with value v at the front of list l and returns e.
func (l *List) PushFront(v any) *Element {
l.lazyInit()
return l.insertValue(v, &l.root)
}
// PushBack inserts a new element e with value v at the back of list l and returns e.
func (l *List) PushBack(v any) *Element {
l.lazyInit()
return l.insertValue(v, l.root.prev)
}
// InsertBefore inserts a new element e with value v immediately before mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
func (l *List) InsertBefore(v any, mark *Element) *Element {
if mark.list != l {
return nil
}
// see comment in List.Remove about initialization of l
return l.insertValue(v, mark.prev)
}
// InsertAfter inserts a new element e with value v immediately after mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
func (l *List) InsertAfter(v any, mark *Element) *Element {
if mark.list != l {
return nil
}
// see comment in List.Remove about initialization of l
return l.insertValue(v, mark)
}
// MoveToFront moves element e to the front of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
func (l *List) MoveToFront(e *Element) {
if e.list != l || l.root.next == e {
return
}
// see comment in List.Remove about initialization of l
l.move(e, &l.root)
}
// MoveToBack moves element e to the back of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
func (l *List) MoveToBack(e *Element) {
if e.list != l || l.root.prev == e {
return
}
// see comment in List.Remove about initialization of l
l.move(e, l.root.prev)
}
// MoveBefore moves element e to its new position before mark.
// If e or mark is not an element of l, or e == mark, the list is not modified.
// The element and mark must not be nil.
func (l *List) MoveBefore(e, mark *Element) {
if e.list != l || e == mark || mark.list != l {
return
}
l.move(e, mark.prev)
}
// MoveAfter moves element e to its new position after mark.
// If e or mark is not an element of l, or e == mark, the list is not modified.
// The element and mark must not be nil.
func (l *List) MoveAfter(e, mark *Element) {
if e.list != l || e == mark || mark.list != l {
return
}
l.move(e, mark)
}
// PushBackList inserts a copy of another list at the back of list l.
// The lists l and other may be the same. They must not be nil.
func (l *List) PushBackList(other *List) {
l.lazyInit()
for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
l.insertValue(e.Value, l.root.prev)
}
}
// PushFrontList inserts a copy of another list at the front of list l.
// The lists l and other may be the same. They must not be nil.
func (l *List) PushFrontList(other *List) {
l.lazyInit()
for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
l.insertValue(e.Value, &l.root)
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ring implements operations on circular lists.
package ring
// A Ring is an element of a circular list, or ring.
// Rings do not have a beginning or end; a pointer to any ring element
// serves as reference to the entire ring. Empty rings are represented
// as nil Ring pointers. The zero value for a Ring is a one-element
// ring with a nil Value.
type Ring struct {
next, prev *Ring
Value any // for use by client; untouched by this library
}
func (r *Ring) init() *Ring {
r.next = r
r.prev = r
return r
}
// Next returns the next ring element. r must not be empty.
func (r *Ring) Next() *Ring {
if r.next == nil {
return r.init()
}
return r.next
}
// Prev returns the previous ring element. r must not be empty.
func (r *Ring) Prev() *Ring {
if r.next == nil {
return r.init()
}
return r.prev
}
// Move moves n % r.Len() elements backward (n < 0) or forward (n >= 0)
// in the ring and returns that ring element. r must not be empty.
func (r *Ring) Move(n int) *Ring {
if r.next == nil {
return r.init()
}
switch {
case n < 0:
for ; n < 0; n++ {
r = r.prev
}
case n > 0:
for ; n > 0; n-- {
r = r.next
}
}
return r
}
// New creates a ring of n elements.
func New(n int) *Ring {
if n <= 0 {
return nil
}
r := new(Ring)
p := r
for i := 1; i < n; i++ {
p.next = &Ring{prev: p}
p = p.next
}
p.next = r
r.prev = p
return r
}
// Link connects ring r with ring s such that r.Next()
// becomes s and returns the original value for r.Next().
// r must not be empty.
//
// If r and s point to the same ring, linking
// them removes the elements between r and s from the ring.
// The removed elements form a subring and the result is a
// reference to that subring (if no elements were removed,
// the result is still the original value for r.Next(),
// and not nil).
//
// If r and s point to different rings, linking
// them creates a single ring with the elements of s inserted
// after r. The result points to the element following the
// last element of s after insertion.
func (r *Ring) Link(s *Ring) *Ring {
n := r.Next()
if s != nil {
p := s.Prev()
// Note: Cannot use multiple assignment because
// evaluation order of LHS is not specified.
r.next = s
s.prev = r
n.prev = p
p.next = n
}
return n
}
// Unlink removes n % r.Len() elements from the ring r, starting
// at r.Next(). If n % r.Len() == 0, r remains unchanged.
// The result is the removed subring. r must not be empty.
func (r *Ring) Unlink(n int) *Ring {
if n <= 0 {
return nil
}
return r.Link(r.Move(n + 1))
}
// Len computes the number of elements in ring r.
// It executes in time proportional to the number of elements.
func (r *Ring) Len() int {
n := 0
if r != nil {
n = 1
for p := r.Next(); p != r; p = p.next {
n++
}
}
return n
}
// Do calls function f on each element of the ring, in forward order.
// The behavior of Do is undefined if f changes *r.
func (r *Ring) Do(f func(any)) {
if r != nil {
f(r.Value)
for p := r.Next(); p != r; p = p.next {
f(p.Value)
}
}
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package context defines the Context type, which carries deadlines,
// cancellation signals, and other request-scoped values across API boundaries
// and between processes.
//
// Incoming requests to a server should create a [Context], and outgoing
// calls to servers should accept a Context. The chain of function
// calls between them must propagate the Context, optionally replacing
// it with a derived Context created using [WithCancel], [WithDeadline],
// [WithTimeout], or [WithValue].
//
// A Context may be canceled to indicate that work done on its behalf should stop.
// A Context with a deadline is canceled after the deadline passes.
// When a Context is canceled, all Contexts derived from it are also canceled.
//
// The [WithCancel], [WithDeadline], and [WithTimeout] functions take a
// Context (the parent) and return a derived Context (the child) and a
// [CancelFunc]. Calling the CancelFunc directly cancels the child and its
// children, removes the parent's reference to the child, and stops
// any associated timers. Failing to call the CancelFunc leaks the
// child and its children until the parent is canceled. The go vet tool
// checks that CancelFuncs are used on all control-flow paths.
//
// The [WithCancelCause] function returns a [CancelCauseFunc], which takes
// an error and records it as the cancellation cause. [WithDeadlineCause]
// and [WithTimeoutCause] take a cause to use when the deadline expires.
// Calling [Cause] on the canceled context or any of its children retrieves
// the cause. If no cause is specified, Cause(ctx) returns the same value
// as ctx.Err().
//
// Programs that use Contexts should follow these rules to keep interfaces
// consistent across packages and enable static analysis tools to check context
// propagation:
//
// Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. This is discussed further in
// https://go.dev/blog/context-and-structs. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO]
// if you are unsure about which Context to use.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
//
// The same Context may be passed to functions running in different goroutines;
// Contexts are safe for simultaneous use by multiple goroutines.
//
// See https://go.dev/blog/context for example code for a server that uses
// Contexts.
package context
import (
"errors"
"internal/reflectlite"
"sync"
"sync/atomic"
"time"
)
// A Context carries a deadline, a cancellation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
// The close of the Done channel may happen asynchronously,
// after the cancel function returns.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See https://go.dev/blog/pipelines for more examples of how to use
// a Done channel for cancellation.
Done() <-chan struct{}
// If Done is not yet closed, Err returns nil.
// If Done is closed, Err returns a non-nil error explaining why:
// DeadlineExceeded if the context's deadline passed,
// or Canceled if the context was canceled for some other reason.
// After Err returns a non-nil error, successive calls to Err return the same error.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stored using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key any) any
}
// Canceled is the error returned by [Context.Err] when the context is canceled
// for some reason other than its deadline passing.
var Canceled = errors.New("context canceled")
// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled
// due to its deadline passing.
var DeadlineExceeded error = deadlineExceededError{}
type deadlineExceededError struct{}
func (deadlineExceededError) Error() string { return "context deadline exceeded" }
func (deadlineExceededError) Timeout() bool { return true }
func (deadlineExceededError) Temporary() bool { return true }
// An emptyCtx is never canceled, has no values, and has no deadline.
// It is the common base of backgroundCtx and todoCtx.
type emptyCtx struct{}
func (emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (emptyCtx) Done() <-chan struct{} {
return nil
}
func (emptyCtx) Err() error {
return nil
}
func (emptyCtx) Value(key any) any {
return nil
}
type backgroundCtx struct{ emptyCtx }
func (backgroundCtx) String() string {
return "context.Background"
}
type todoCtx struct{ emptyCtx }
func (todoCtx) String() string {
return "context.TODO"
}
// Background returns a non-nil, empty [Context]. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
func Background() Context {
return backgroundCtx{}
}
// TODO returns a non-nil, empty [Context]. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter).
func TODO() Context {
return todoCtx{}
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// A CancelFunc may be called by multiple goroutines simultaneously.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()
// WithCancel returns a derived context that points to the parent context
// but has a new Done channel. The returned context's Done channel is closed
// when the returned cancel function is called or when the parent context's
// Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := withCancel(parent)
return c, func() { c.cancel(true, Canceled, nil) }
}
// A CancelCauseFunc behaves like a [CancelFunc] but additionally sets the cancellation cause.
// This cause can be retrieved by calling [Cause] on the canceled Context or on
// any of its derived Contexts.
//
// If the context has already been canceled, CancelCauseFunc does not set the cause.
// For example, if childContext is derived from parentContext:
// - if parentContext is canceled with cause1 before childContext is canceled with cause2,
// then Cause(parentContext) == Cause(childContext) == cause1
// - if childContext is canceled with cause2 before parentContext is canceled with cause1,
// then Cause(parentContext) == cause1 and Cause(childContext) == cause2
type CancelCauseFunc func(cause error)
// WithCancelCause behaves like [WithCancel] but returns a [CancelCauseFunc] instead of a [CancelFunc].
// Calling cancel with a non-nil error (the "cause") records that error in ctx;
// it can then be retrieved using Cause(ctx).
// Calling cancel with nil sets the cause to Canceled.
//
// Example use:
//
// ctx, cancel := context.WithCancelCause(parent)
// cancel(myError)
// ctx.Err() // returns context.Canceled
// context.Cause(ctx) // returns myError
func WithCancelCause(parent Context) (ctx Context, cancel CancelCauseFunc) {
c := withCancel(parent)
return c, func(cause error) { c.cancel(true, Canceled, cause) }
}
func withCancel(parent Context) *cancelCtx {
if parent == nil {
panic("cannot create context from nil parent")
}
c := &cancelCtx{}
c.propagateCancel(parent, c)
return c
}
// Cause returns a non-nil error explaining why c was canceled.
// The first cancellation of c or one of its parents sets the cause.
// If that cancellation happened via a call to CancelCauseFunc(err),
// then [Cause] returns err.
// Otherwise Cause(c) returns the same value as c.Err().
// Cause returns nil if c has not been canceled yet.
func Cause(c Context) error {
err := c.Err()
if err == nil {
return nil
}
if cc, ok := c.Value(&cancelCtxKey).(*cancelCtx); ok {
cc.mu.Lock()
cause := cc.cause
cc.mu.Unlock()
if cause != nil {
return cause
}
// The parent cancelCtx doesn't have a cause,
// so c must have been canceled in some custom context implementation.
}
// We don't have a cause to return from a parent cancelCtx,
// so return the context's error.
return err
}
// AfterFunc arranges to call f in its own goroutine after ctx is canceled.
// If ctx is already canceled, AfterFunc calls f immediately in its own goroutine.
//
// Multiple calls to AfterFunc on a context operate independently;
// one does not replace another.
//
// Calling the returned stop function stops the association of ctx with f.
// It returns true if the call stopped f from being run.
// If stop returns false,
// either the context is canceled and f has been started in its own goroutine;
// or f was already stopped.
// The stop function does not wait for f to complete before returning.
// If the caller needs to know whether f is completed,
// it must coordinate with f explicitly.
//
// If ctx has a "AfterFunc(func()) func() bool" method,
// AfterFunc will use it to schedule the call.
func AfterFunc(ctx Context, f func()) (stop func() bool) {
a := &afterFuncCtx{
f: f,
}
a.cancelCtx.propagateCancel(ctx, a)
return func() bool {
stopped := false
a.once.Do(func() {
stopped = true
})
if stopped {
a.cancel(true, Canceled, nil)
}
return stopped
}
}
type afterFuncer interface {
AfterFunc(func()) func() bool
}
type afterFuncCtx struct {
cancelCtx
once sync.Once // either starts running f or stops f from running
f func()
}
func (a *afterFuncCtx) cancel(removeFromParent bool, err, cause error) {
a.cancelCtx.cancel(false, err, cause)
if removeFromParent {
removeChild(a.Context, a)
}
a.once.Do(func() {
go a.f()
})
}
// A stopCtx is used as the parent context of a cancelCtx when
// an AfterFunc has been registered with the parent.
// It holds the stop function used to unregister the AfterFunc.
type stopCtx struct {
Context
stop func() bool
}
// goroutines counts the number of goroutines ever created; for testing.
var goroutines atomic.Int32
// &cancelCtxKey is the key that a cancelCtx returns itself for.
var cancelCtxKey int
// parentCancelCtx returns the underlying *cancelCtx for parent.
// It does this by looking up parent.Value(&cancelCtxKey) to find
// the innermost enclosing *cancelCtx and then checking whether
// parent.Done() matches that *cancelCtx. (If not, the *cancelCtx
// has been wrapped in a custom implementation providing a
// different done channel, in which case we should not bypass it.)
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
done := parent.Done()
if done == closedchan || done == nil {
return nil, false
}
p, ok := parent.Value(&cancelCtxKey).(*cancelCtx)
if !ok {
return nil, false
}
pdone, _ := p.done.Load().(chan struct{})
if pdone != done {
return nil, false
}
return p, true
}
// removeChild removes a context from its parent.
func removeChild(parent Context, child canceler) {
if s, ok := parent.(stopCtx); ok {
s.stop()
return
}
p, ok := parentCancelCtx(parent)
if !ok {
return
}
p.mu.Lock()
if p.children != nil {
delete(p.children, child)
}
p.mu.Unlock()
}
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
cancel(removeFromParent bool, err, cause error)
Done() <-chan struct{}
}
// closedchan is a reusable closed channel.
var closedchan = make(chan struct{})
func init() {
close(closedchan)
}
// A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler.
type cancelCtx struct {
Context
mu sync.Mutex // protects following fields
done atomic.Value // of chan struct{}, created lazily, closed by first cancel call
children map[canceler]struct{} // set to nil by the first cancel call
err atomic.Value // set to non-nil by the first cancel call
cause error // set to non-nil by the first cancel call
}
func (c *cancelCtx) Value(key any) any {
if key == &cancelCtxKey {
return c
}
return value(c.Context, key)
}
func (c *cancelCtx) Done() <-chan struct{} {
d := c.done.Load()
if d != nil {
return d.(chan struct{})
}
c.mu.Lock()
defer c.mu.Unlock()
d = c.done.Load()
if d == nil {
d = make(chan struct{})
c.done.Store(d)
}
return d.(chan struct{})
}
func (c *cancelCtx) Err() error {
// An atomic load is ~5x faster than a mutex, which can matter in tight loops.
if err := c.err.Load(); err != nil {
// Ensure the done channel has been closed before returning a non-nil error.
<-c.Done()
return err.(error)
}
return nil
}
// propagateCancel arranges for child to be canceled when parent is.
// It sets the parent context of cancelCtx.
func (c *cancelCtx) propagateCancel(parent Context, child canceler) {
c.Context = parent
done := parent.Done()
if done == nil {
return // parent is never canceled
}
select {
case <-done:
// parent is already canceled
child.cancel(false, parent.Err(), Cause(parent))
return
default:
}
if p, ok := parentCancelCtx(parent); ok {
// parent is a *cancelCtx, or derives from one.
p.mu.Lock()
if err := p.err.Load(); err != nil {
// parent has already been canceled
child.cancel(false, err.(error), p.cause)
} else {
if p.children == nil {
p.children = make(map[canceler]struct{})
}
p.children[child] = struct{}{}
}
p.mu.Unlock()
return
}
if a, ok := parent.(afterFuncer); ok {
// parent implements an AfterFunc method.
c.mu.Lock()
stop := a.AfterFunc(func() {
child.cancel(false, parent.Err(), Cause(parent))
})
c.Context = stopCtx{
Context: parent,
stop: stop,
}
c.mu.Unlock()
return
}
goroutines.Add(1)
go func() {
select {
case <-parent.Done():
child.cancel(false, parent.Err(), Cause(parent))
case <-child.Done():
}
}()
}
type stringer interface {
String() string
}
func contextName(c Context) string {
if s, ok := c.(stringer); ok {
return s.String()
}
return reflectlite.TypeOf(c).String()
}
func (c *cancelCtx) String() string {
return contextName(c.Context) + ".WithCancel"
}
// cancel closes c.done, cancels each of c's children, and, if
// removeFromParent is true, removes c from its parent's children.
// cancel sets c.cause to cause if this is the first time c is canceled.
func (c *cancelCtx) cancel(removeFromParent bool, err, cause error) {
if err == nil {
panic("context: internal error: missing cancel error")
}
if cause == nil {
cause = err
}
c.mu.Lock()
if c.err.Load() != nil {
c.mu.Unlock()
return // already canceled
}
c.err.Store(err)
c.cause = cause
d, _ := c.done.Load().(chan struct{})
if d == nil {
c.done.Store(closedchan)
} else {
close(d)
}
for child := range c.children {
// NOTE: acquiring the child's lock while holding parent's lock.
child.cancel(false, err, cause)
}
c.children = nil
c.mu.Unlock()
if removeFromParent {
removeChild(c.Context, c)
}
}
// WithoutCancel returns a derived context that points to the parent context
// and is not canceled when parent is canceled.
// The returned context returns no Deadline or Err, and its Done channel is nil.
// Calling [Cause] on the returned context returns nil.
func WithoutCancel(parent Context) Context {
if parent == nil {
panic("cannot create context from nil parent")
}
return withoutCancelCtx{parent}
}
type withoutCancelCtx struct {
c Context
}
func (withoutCancelCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (withoutCancelCtx) Done() <-chan struct{} {
return nil
}
func (withoutCancelCtx) Err() error {
return nil
}
func (c withoutCancelCtx) Value(key any) any {
return value(c, key)
}
func (c withoutCancelCtx) String() string {
return contextName(c.c) + ".WithoutCancel"
}
// WithDeadline returns a derived context that points to the parent context
// but has the deadline adjusted to be no later than d. If the parent's
// deadline is already earlier than d, WithDeadline(parent, d) is semantically
// equivalent to parent. The returned [Context.Done] channel is closed when
// the deadline expires, when the returned cancel function is called,
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete.
func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
return WithDeadlineCause(parent, d, nil)
}
// WithDeadlineCause behaves like [WithDeadline] but also sets the cause of the
// returned Context when the deadline is exceeded. The returned [CancelFunc] does
// not set the cause.
func WithDeadlineCause(parent Context, d time.Time, cause error) (Context, CancelFunc) {
if parent == nil {
panic("cannot create context from nil parent")
}
if cur, ok := parent.Deadline(); ok && cur.Before(d) {
// The current deadline is already sooner than the new one.
return WithCancel(parent)
}
c := &timerCtx{
deadline: d,
}
c.cancelCtx.propagateCancel(parent, c)
dur := time.Until(d)
if dur <= 0 {
c.cancel(true, DeadlineExceeded, cause) // deadline has already passed
return c, func() { c.cancel(false, Canceled, nil) }
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err.Load() == nil {
c.timer = time.AfterFunc(dur, func() {
c.cancel(true, DeadlineExceeded, cause)
})
}
return c, func() { c.cancel(true, Canceled, nil) }
}
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel.
type timerCtx struct {
cancelCtx
timer *time.Timer // Under cancelCtx.mu.
deadline time.Time
}
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
return c.deadline, true
}
func (c *timerCtx) String() string {
return contextName(c.cancelCtx.Context) + ".WithDeadline(" +
c.deadline.String() + " [" +
time.Until(c.deadline).String() + "])"
}
func (c *timerCtx) cancel(removeFromParent bool, err, cause error) {
c.cancelCtx.cancel(false, err, cause)
if removeFromParent {
// Remove this timerCtx from its parent cancelCtx's children.
removeChild(c.cancelCtx.Context, c)
}
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
c.timer = nil
}
c.mu.Unlock()
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithTimeoutCause behaves like [WithTimeout] but also sets the cause of the
// returned Context when the timeout expires. The returned [CancelFunc] does
// not set the cause.
func WithTimeoutCause(parent Context, timeout time.Duration, cause error) (Context, CancelFunc) {
return WithDeadlineCause(parent, time.Now().Add(timeout), cause)
}
// WithValue returns a derived context that points to the parent Context.
// In the derived context, the value associated with key is val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
//
// The provided key must be comparable and should not be of type
// string or any other built-in type to avoid collisions between
// packages using context. Users of WithValue should define their own
// types for keys. To avoid allocating when assigning to an
// interface{}, context keys often have concrete type
// struct{}. Alternatively, exported context key variables' static
// type should be a pointer or interface.
func WithValue(parent Context, key, val any) Context {
if parent == nil {
panic("cannot create context from nil parent")
}
if key == nil {
panic("nil key")
}
if !reflectlite.TypeOf(key).Comparable() {
panic("key is not comparable")
}
return &valueCtx{parent, key, val}
}
// A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context.
type valueCtx struct {
Context
key, val any
}
// stringify tries a bit to stringify v, without using fmt, since we don't
// want context depending on the unicode tables. This is only used by
// *valueCtx.String().
func stringify(v any) string {
switch s := v.(type) {
case stringer:
return s.String()
case string:
return s
case nil:
return "<nil>"
}
return reflectlite.TypeOf(v).String()
}
func (c *valueCtx) String() string {
return contextName(c.Context) + ".WithValue(" +
stringify(c.key) + ", " +
stringify(c.val) + ")"
}
func (c *valueCtx) Value(key any) any {
if c.key == key {
return c.val
}
return value(c.Context, key)
}
func value(c Context, key any) any {
for {
switch ctx := c.(type) {
case *valueCtx:
if key == ctx.key {
return ctx.val
}
c = ctx.Context
case *cancelCtx:
if key == &cancelCtxKey {
return c
}
c = ctx.Context
case withoutCancelCtx:
if key == &cancelCtxKey {
// This implements Cause(ctx) == nil
// when ctx is created using WithoutCancel.
return nil
}
c = ctx.c
case *timerCtx:
if key == &cancelCtxKey {
return &ctx.cancelCtx
}
c = ctx.Context
case backgroundCtx, todoCtx:
return nil
default:
return c.Value(key)
}
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package aes implements AES encryption (formerly Rijndael), as defined in
// U.S. Federal Information Processing Standards Publication 197.
//
// The AES operations in this package are not implemented using constant-time algorithms.
// An exception is when running on systems with enabled hardware support for AES
// that makes these operations constant-time. Examples include amd64 systems using AES-NI
// extensions and s390x systems using Message-Security-Assist extensions.
// On such systems, when the result of NewCipher is passed to cipher.NewGCM,
// the GHASH operation used by GCM is also constant-time.
package aes
import (
"crypto/cipher"
"crypto/internal/boring"
"crypto/internal/fips140/aes"
"strconv"
)
// The AES block size in bytes.
const BlockSize = 16
type KeySizeError int
func (k KeySizeError) Error() string {
return "crypto/aes: invalid key size " + strconv.Itoa(int(k))
}
// NewCipher creates and returns a new [cipher.Block].
// The key argument must be the AES key,
// either 16, 24, or 32 bytes to select
// AES-128, AES-192, or AES-256.
func NewCipher(key []byte) (cipher.Block, error) {
k := len(key)
switch k {
default:
return nil, KeySizeError(k)
case 16, 24, 32:
break
}
if boring.Enabled {
return boring.NewAESCipher(key)
}
return aes.New(key)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package des
import (
"internal/byteorder"
"sync"
)
func cryptBlock(subkeys []uint64, dst, src []byte, decrypt bool) {
b := byteorder.BEUint64(src)
b = permuteInitialBlock(b)
left, right := uint32(b>>32), uint32(b)
left = (left << 1) | (left >> 31)
right = (right << 1) | (right >> 31)
if decrypt {
for i := 0; i < 8; i++ {
left, right = feistel(left, right, subkeys[15-2*i], subkeys[15-(2*i+1)])
}
} else {
for i := 0; i < 8; i++ {
left, right = feistel(left, right, subkeys[2*i], subkeys[2*i+1])
}
}
left = (left << 31) | (left >> 1)
right = (right << 31) | (right >> 1)
// switch left & right and perform final permutation
preOutput := (uint64(right) << 32) | uint64(left)
byteorder.BEPutUint64(dst, permuteFinalBlock(preOutput))
}
// DES Feistel function. feistelBox must be initialized via
// feistelBoxOnce.Do(initFeistelBox) first.
func feistel(l, r uint32, k0, k1 uint64) (lout, rout uint32) {
var t uint32
t = r ^ uint32(k0>>32)
l ^= feistelBox[7][t&0x3f] ^
feistelBox[5][(t>>8)&0x3f] ^
feistelBox[3][(t>>16)&0x3f] ^
feistelBox[1][(t>>24)&0x3f]
t = ((r << 28) | (r >> 4)) ^ uint32(k0)
l ^= feistelBox[6][(t)&0x3f] ^
feistelBox[4][(t>>8)&0x3f] ^
feistelBox[2][(t>>16)&0x3f] ^
feistelBox[0][(t>>24)&0x3f]
t = l ^ uint32(k1>>32)
r ^= feistelBox[7][t&0x3f] ^
feistelBox[5][(t>>8)&0x3f] ^
feistelBox[3][(t>>16)&0x3f] ^
feistelBox[1][(t>>24)&0x3f]
t = ((l << 28) | (l >> 4)) ^ uint32(k1)
r ^= feistelBox[6][(t)&0x3f] ^
feistelBox[4][(t>>8)&0x3f] ^
feistelBox[2][(t>>16)&0x3f] ^
feistelBox[0][(t>>24)&0x3f]
return l, r
}
// feistelBox[s][16*i+j] contains the output of permutationFunction
// for sBoxes[s][i][j] << 4*(7-s)
var feistelBox [8][64]uint32
var feistelBoxOnce sync.Once
// general purpose function to perform DES block permutations.
func permuteBlock(src uint64, permutation []uint8) (block uint64) {
for position, n := range permutation {
bit := (src >> n) & 1
block |= bit << uint((len(permutation)-1)-position)
}
return
}
func initFeistelBox() {
for s := range sBoxes {
for i := 0; i < 4; i++ {
for j := 0; j < 16; j++ {
f := uint64(sBoxes[s][i][j]) << (4 * (7 - uint(s)))
f = permuteBlock(f, permutationFunction[:])
// Row is determined by the 1st and 6th bit.
// Column is the middle four bits.
row := uint8(((i & 2) << 4) | i&1)
col := uint8(j << 1)
t := row | col
// The rotation was performed in the feistel rounds, being factored out and now mixed into the feistelBox.
f = (f << 1) | (f >> 31)
feistelBox[s][t] = uint32(f)
}
}
}
}
// permuteInitialBlock is equivalent to the permutation defined
// by initialPermutation.
func permuteInitialBlock(block uint64) uint64 {
// block = b7 b6 b5 b4 b3 b2 b1 b0 (8 bytes)
b1 := block >> 48
b2 := block << 48
block ^= b1 ^ b2 ^ b1<<48 ^ b2>>48
// block = b1 b0 b5 b4 b3 b2 b7 b6
b1 = block >> 32 & 0xff00ff
b2 = (block & 0xff00ff00)
block ^= b1<<32 ^ b2 ^ b1<<8 ^ b2<<24 // exchange b0 b4 with b3 b7
// block is now b1 b3 b5 b7 b0 b2 b4 b6, the permutation:
// ... 8
// ... 24
// ... 40
// ... 56
// 7 6 5 4 3 2 1 0
// 23 22 21 20 19 18 17 16
// ... 32
// ... 48
// exchange 4,5,6,7 with 32,33,34,35 etc.
b1 = block & 0x0f0f00000f0f0000
b2 = block & 0x0000f0f00000f0f0
block ^= b1 ^ b2 ^ b1>>12 ^ b2<<12
// block is the permutation:
//
// [+8] [+40]
//
// 7 6 5 4
// 23 22 21 20
// 3 2 1 0
// 19 18 17 16 [+32]
// exchange 0,1,4,5 with 18,19,22,23
b1 = block & 0x3300330033003300
b2 = block & 0x00cc00cc00cc00cc
block ^= b1 ^ b2 ^ b1>>6 ^ b2<<6
// block is the permutation:
// 15 14
// 13 12
// 11 10
// 9 8
// 7 6
// 5 4
// 3 2
// 1 0 [+16] [+32] [+64]
// exchange 0,2,4,6 with 9,11,13,15:
b1 = block & 0xaaaaaaaa55555555
block ^= b1 ^ b1>>33 ^ b1<<33
// block is the permutation:
// 6 14 22 30 38 46 54 62
// 4 12 20 28 36 44 52 60
// 2 10 18 26 34 42 50 58
// 0 8 16 24 32 40 48 56
// 7 15 23 31 39 47 55 63
// 5 13 21 29 37 45 53 61
// 3 11 19 27 35 43 51 59
// 1 9 17 25 33 41 49 57
return block
}
// permuteFinalBlock is equivalent to the permutation defined
// by finalPermutation.
func permuteFinalBlock(block uint64) uint64 {
// Perform the same bit exchanges as permuteInitialBlock
// but in reverse order.
b1 := block & 0xaaaaaaaa55555555
block ^= b1 ^ b1>>33 ^ b1<<33
b1 = block & 0x3300330033003300
b2 := block & 0x00cc00cc00cc00cc
block ^= b1 ^ b2 ^ b1>>6 ^ b2<<6
b1 = block & 0x0f0f00000f0f0000
b2 = block & 0x0000f0f00000f0f0
block ^= b1 ^ b2 ^ b1>>12 ^ b2<<12
b1 = block >> 32 & 0xff00ff
b2 = (block & 0xff00ff00)
block ^= b1<<32 ^ b2 ^ b1<<8 ^ b2<<24
b1 = block >> 48
b2 = block << 48
block ^= b1 ^ b2 ^ b1<<48 ^ b2>>48
return block
}
// creates 16 28-bit blocks rotated according
// to the rotation schedule.
func ksRotate(in uint32) (out []uint32) {
out = make([]uint32, 16)
last := in
for i := 0; i < 16; i++ {
// 28-bit circular left shift
left := (last << (4 + ksRotations[i])) >> 4
right := (last << 4) >> (32 - ksRotations[i])
out[i] = left | right
last = out[i]
}
return
}
// creates 16 56-bit subkeys from the original key.
func (c *desCipher) generateSubkeys(keyBytes []byte) {
feistelBoxOnce.Do(initFeistelBox)
// apply PC1 permutation to key
key := byteorder.BEUint64(keyBytes)
permutedKey := permuteBlock(key, permutedChoice1[:])
// rotate halves of permuted key according to the rotation schedule
leftRotations := ksRotate(uint32(permutedKey >> 28))
rightRotations := ksRotate(uint32(permutedKey<<4) >> 4)
// generate subkeys
for i := 0; i < 16; i++ {
// combine halves to form 56-bit input to PC2
pc2Input := uint64(leftRotations[i])<<28 | uint64(rightRotations[i])
// apply PC2 permutation to 7 byte input
c.subkeys[i] = unpack(permuteBlock(pc2Input, permutedChoice2[:]))
}
}
// Expand 48-bit input to 64-bit, with each 6-bit block padded by extra two bits at the top.
// By doing so, we can have the input blocks (four bits each), and the key blocks (six bits each) well-aligned without
// extra shifts/rotations for alignments.
func unpack(x uint64) uint64 {
return ((x>>(6*1))&0xff)<<(8*0) |
((x>>(6*3))&0xff)<<(8*1) |
((x>>(6*5))&0xff)<<(8*2) |
((x>>(6*7))&0xff)<<(8*3) |
((x>>(6*0))&0xff)<<(8*4) |
((x>>(6*2))&0xff)<<(8*5) |
((x>>(6*4))&0xff)<<(8*6) |
((x>>(6*6))&0xff)<<(8*7)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package des
import (
"crypto/cipher"
"crypto/internal/fips140/alias"
"crypto/internal/fips140only"
"errors"
"internal/byteorder"
"strconv"
)
// The DES block size in bytes.
const BlockSize = 8
type KeySizeError int
func (k KeySizeError) Error() string {
return "crypto/des: invalid key size " + strconv.Itoa(int(k))
}
// desCipher is an instance of DES encryption.
type desCipher struct {
subkeys [16]uint64
}
// NewCipher creates and returns a new [cipher.Block].
func NewCipher(key []byte) (cipher.Block, error) {
if fips140only.Enforced() {
return nil, errors.New("crypto/des: use of DES is not allowed in FIPS 140-only mode")
}
if len(key) != 8 {
return nil, KeySizeError(len(key))
}
c := new(desCipher)
c.generateSubkeys(key)
return c, nil
}
func (c *desCipher) BlockSize() int { return BlockSize }
func (c *desCipher) Encrypt(dst, src []byte) {
if len(src) < BlockSize {
panic("crypto/des: input not full block")
}
if len(dst) < BlockSize {
panic("crypto/des: output not full block")
}
if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
panic("crypto/des: invalid buffer overlap")
}
cryptBlock(c.subkeys[:], dst, src, false)
}
func (c *desCipher) Decrypt(dst, src []byte) {
if len(src) < BlockSize {
panic("crypto/des: input not full block")
}
if len(dst) < BlockSize {
panic("crypto/des: output not full block")
}
if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
panic("crypto/des: invalid buffer overlap")
}
cryptBlock(c.subkeys[:], dst, src, true)
}
// A tripleDESCipher is an instance of TripleDES encryption.
type tripleDESCipher struct {
cipher1, cipher2, cipher3 desCipher
}
// NewTripleDESCipher creates and returns a new [cipher.Block].
func NewTripleDESCipher(key []byte) (cipher.Block, error) {
if fips140only.Enforced() {
return nil, errors.New("crypto/des: use of TripleDES is not allowed in FIPS 140-only mode")
}
if len(key) != 24 {
return nil, KeySizeError(len(key))
}
c := new(tripleDESCipher)
c.cipher1.generateSubkeys(key[:8])
c.cipher2.generateSubkeys(key[8:16])
c.cipher3.generateSubkeys(key[16:])
return c, nil
}
func (c *tripleDESCipher) BlockSize() int { return BlockSize }
func (c *tripleDESCipher) Encrypt(dst, src []byte) {
if len(src) < BlockSize {
panic("crypto/des: input not full block")
}
if len(dst) < BlockSize {
panic("crypto/des: output not full block")
}
if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
panic("crypto/des: invalid buffer overlap")
}
b := byteorder.BEUint64(src)
b = permuteInitialBlock(b)
left, right := uint32(b>>32), uint32(b)
left = (left << 1) | (left >> 31)
right = (right << 1) | (right >> 31)
for i := 0; i < 8; i++ {
left, right = feistel(left, right, c.cipher1.subkeys[2*i], c.cipher1.subkeys[2*i+1])
}
for i := 0; i < 8; i++ {
right, left = feistel(right, left, c.cipher2.subkeys[15-2*i], c.cipher2.subkeys[15-(2*i+1)])
}
for i := 0; i < 8; i++ {
left, right = feistel(left, right, c.cipher3.subkeys[2*i], c.cipher3.subkeys[2*i+1])
}
left = (left << 31) | (left >> 1)
right = (right << 31) | (right >> 1)
preOutput := (uint64(right) << 32) | uint64(left)
byteorder.BEPutUint64(dst, permuteFinalBlock(preOutput))
}
func (c *tripleDESCipher) Decrypt(dst, src []byte) {
if len(src) < BlockSize {
panic("crypto/des: input not full block")
}
if len(dst) < BlockSize {
panic("crypto/des: output not full block")
}
if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
panic("crypto/des: invalid buffer overlap")
}
b := byteorder.BEUint64(src)
b = permuteInitialBlock(b)
left, right := uint32(b>>32), uint32(b)
left = (left << 1) | (left >> 31)
right = (right << 1) | (right >> 31)
for i := 0; i < 8; i++ {
left, right = feistel(left, right, c.cipher3.subkeys[15-2*i], c.cipher3.subkeys[15-(2*i+1)])
}
for i := 0; i < 8; i++ {
right, left = feistel(right, left, c.cipher2.subkeys[2*i], c.cipher2.subkeys[2*i+1])
}
for i := 0; i < 8; i++ {
left, right = feistel(left, right, c.cipher1.subkeys[15-2*i], c.cipher1.subkeys[15-(2*i+1)])
}
left = (left << 31) | (left >> 1)
right = (right << 31) | (right >> 1)
preOutput := (uint64(right) << 32) | uint64(left)
byteorder.BEPutUint64(dst, permuteFinalBlock(preOutput))
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ecdsa implements the Elliptic Curve Digital Signature Algorithm, as
// defined in [FIPS 186-5].
//
// Signatures generated by this package are not deterministic, but entropy is
// mixed with the private key and the message, achieving the same level of
// security in case of randomness source failure.
//
// Operations involving private keys are implemented using constant-time
// algorithms, as long as an [elliptic.Curve] returned by [elliptic.P224],
// [elliptic.P256], [elliptic.P384], or [elliptic.P521] is used.
//
// [FIPS 186-5]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
package ecdsa
import (
"crypto"
"crypto/ecdh"
"crypto/elliptic"
"crypto/internal/boring"
"crypto/internal/boring/bbig"
"crypto/internal/fips140/ecdsa"
"crypto/internal/fips140/nistec"
"crypto/internal/fips140cache"
"crypto/internal/fips140hash"
"crypto/internal/fips140only"
"crypto/internal/rand"
"crypto/sha512"
"crypto/subtle"
"errors"
"io"
"math/big"
"golang.org/x/crypto/cryptobyte"
"golang.org/x/crypto/cryptobyte/asn1"
)
// PublicKey represents an ECDSA public key.
type PublicKey struct {
elliptic.Curve
// X, Y are the coordinates of the public key point.
//
// Deprecated: modifying the raw coordinates can produce invalid keys, and may
// invalidate internal optimizations; moreover, [big.Int] methods are not
// suitable for operating on cryptographic values. To encode and decode
// PublicKey values, use [PublicKey.Bytes] and [ParseUncompressedPublicKey]
// or [crypto/x509.MarshalPKIXPublicKey] and [crypto/x509.ParsePKIXPublicKey].
// For ECDH, use [crypto/ecdh]. For lower-level elliptic curve operations,
// use a third-party module like filippo.io/nistec.
X, Y *big.Int
}
// Any methods implemented on PublicKey might need to also be implemented on
// PrivateKey, as the latter embeds the former and will expose its methods.
// ECDH returns k as a [ecdh.PublicKey]. It returns an error if the key is
// invalid according to the definition of [ecdh.Curve.NewPublicKey], or if the
// Curve is not supported by crypto/ecdh.
func (pub *PublicKey) ECDH() (*ecdh.PublicKey, error) {
c := curveToECDH(pub.Curve)
if c == nil {
return nil, errors.New("ecdsa: unsupported curve by crypto/ecdh")
}
k, err := pub.Bytes()
if err != nil {
return nil, err
}
return c.NewPublicKey(k)
}
// Equal reports whether pub and x have the same value.
//
// Two keys are only considered to have the same value if they have the same Curve value.
// Note that for example [elliptic.P256] and elliptic.P256().Params() are different
// values, as the latter is a generic not constant time implementation.
func (pub *PublicKey) Equal(x crypto.PublicKey) bool {
xx, ok := x.(*PublicKey)
if !ok {
return false
}
return bigIntEqual(pub.X, xx.X) && bigIntEqual(pub.Y, xx.Y) &&
// Standard library Curve implementations are singletons, so this check
// will work for those. Other Curves might be equivalent even if not
// singletons, but there is no definitive way to check for that, and
// better to err on the side of safety.
pub.Curve == xx.Curve
}
// ParseUncompressedPublicKey parses a public key encoded as an uncompressed
// point according to SEC 1, Version 2.0, Section 2.3.3 (also known as the X9.62
// uncompressed format). It returns an error if the point is not in uncompressed
// form, is not on the curve, or is the point at infinity.
//
// curve must be one of [elliptic.P224], [elliptic.P256], [elliptic.P384], or
// [elliptic.P521], or ParseUncompressedPublicKey returns an error.
//
// ParseUncompressedPublicKey accepts the same format as
// [ecdh.Curve.NewPublicKey] does for NIST curves, but returns a [PublicKey]
// instead of an [ecdh.PublicKey].
//
// Note that public keys are more commonly encoded in DER (or PEM) format, which
// can be parsed with [crypto/x509.ParsePKIXPublicKey] (and [encoding/pem]).
func ParseUncompressedPublicKey(curve elliptic.Curve, data []byte) (*PublicKey, error) {
if len(data) < 1 || data[0] != 4 {
return nil, errors.New("ecdsa: invalid uncompressed public key")
}
switch curve {
case elliptic.P224():
return parseUncompressedPublicKey(ecdsa.P224(), curve, data)
case elliptic.P256():
return parseUncompressedPublicKey(ecdsa.P256(), curve, data)
case elliptic.P384():
return parseUncompressedPublicKey(ecdsa.P384(), curve, data)
case elliptic.P521():
return parseUncompressedPublicKey(ecdsa.P521(), curve, data)
default:
return nil, errors.New("ecdsa: curve not supported by ParseUncompressedPublicKey")
}
}
func parseUncompressedPublicKey[P ecdsa.Point[P]](c *ecdsa.Curve[P], curve elliptic.Curve, data []byte) (*PublicKey, error) {
k, err := ecdsa.NewPublicKey(c, data)
if err != nil {
return nil, err
}
return publicKeyFromFIPS(curve, k)
}
// Bytes encodes the public key as an uncompressed point according to SEC 1,
// Version 2.0, Section 2.3.3 (also known as the X9.62 uncompressed format).
// It returns an error if the public key is invalid.
//
// PublicKey.Curve must be one of [elliptic.P224], [elliptic.P256],
// [elliptic.P384], or [elliptic.P521], or Bytes returns an error.
//
// Bytes returns the same format as [ecdh.PublicKey.Bytes] does for NIST curves.
//
// Note that public keys are more commonly encoded in DER (or PEM) format, which
// can be generated with [crypto/x509.MarshalPKIXPublicKey] (and [encoding/pem]).
func (pub *PublicKey) Bytes() ([]byte, error) {
switch pub.Curve {
case elliptic.P224():
return publicKeyBytes(ecdsa.P224(), pub)
case elliptic.P256():
return publicKeyBytes(ecdsa.P256(), pub)
case elliptic.P384():
return publicKeyBytes(ecdsa.P384(), pub)
case elliptic.P521():
return publicKeyBytes(ecdsa.P521(), pub)
default:
return nil, errors.New("ecdsa: curve not supported by PublicKey.Bytes")
}
}
func publicKeyBytes[P ecdsa.Point[P]](c *ecdsa.Curve[P], pub *PublicKey) ([]byte, error) {
k, err := publicKeyToFIPS(c, pub)
if err != nil {
return nil, err
}
return k.Bytes(), nil
}
// PrivateKey represents an ECDSA private key.
type PrivateKey struct {
PublicKey
// D is the private scalar value.
//
// Deprecated: modifying the raw value can produce invalid keys, and may
// invalidate internal optimizations; moreover, [big.Int] methods are not
// suitable for operating on cryptographic values. To encode and decode
// PrivateKey values, use [PrivateKey.Bytes] and [ParseRawPrivateKey] or
// [crypto/x509.MarshalPKCS8PrivateKey] and [crypto/x509.ParsePKCS8PrivateKey].
// For ECDH, use [crypto/ecdh].
D *big.Int
}
// ECDH returns k as a [ecdh.PrivateKey]. It returns an error if the key is
// invalid according to the definition of [ecdh.Curve.NewPrivateKey], or if the
// Curve is not supported by [crypto/ecdh].
func (priv *PrivateKey) ECDH() (*ecdh.PrivateKey, error) {
c := curveToECDH(priv.Curve)
if c == nil {
return nil, errors.New("ecdsa: unsupported curve by crypto/ecdh")
}
k, err := priv.Bytes()
if err != nil {
return nil, err
}
return c.NewPrivateKey(k)
}
func curveToECDH(c elliptic.Curve) ecdh.Curve {
switch c {
case elliptic.P256():
return ecdh.P256()
case elliptic.P384():
return ecdh.P384()
case elliptic.P521():
return ecdh.P521()
default:
return nil
}
}
// Public returns the public key corresponding to priv.
func (priv *PrivateKey) Public() crypto.PublicKey {
return &priv.PublicKey
}
// Equal reports whether priv and x have the same value.
//
// See [PublicKey.Equal] for details on how Curve is compared.
func (priv *PrivateKey) Equal(x crypto.PrivateKey) bool {
xx, ok := x.(*PrivateKey)
if !ok {
return false
}
return priv.PublicKey.Equal(&xx.PublicKey) && bigIntEqual(priv.D, xx.D)
}
// bigIntEqual reports whether a and b are equal leaking only their bit length
// through timing side-channels.
func bigIntEqual(a, b *big.Int) bool {
return subtle.ConstantTimeCompare(a.Bytes(), b.Bytes()) == 1
}
// ParseRawPrivateKey parses a private key encoded as a fixed-length big-endian
// integer, according to SEC 1, Version 2.0, Section 2.3.6 (sometimes referred
// to as the raw format). It returns an error if the value is not reduced modulo
// the curve's order, or if it's zero.
//
// curve must be one of [elliptic.P224], [elliptic.P256], [elliptic.P384], or
// [elliptic.P521], or ParseRawPrivateKey returns an error.
//
// ParseRawPrivateKey accepts the same format as [ecdh.Curve.NewPrivateKey] does
// for NIST curves, but returns a [PrivateKey] instead of an [ecdh.PrivateKey].
//
// Note that private keys are more commonly encoded in ASN.1 or PKCS#8 format,
// which can be parsed with [crypto/x509.ParseECPrivateKey] or
// [crypto/x509.ParsePKCS8PrivateKey] (and [encoding/pem]).
func ParseRawPrivateKey(curve elliptic.Curve, data []byte) (*PrivateKey, error) {
switch curve {
case elliptic.P224():
return parseRawPrivateKey(ecdsa.P224(), nistec.NewP224Point, curve, data)
case elliptic.P256():
return parseRawPrivateKey(ecdsa.P256(), nistec.NewP256Point, curve, data)
case elliptic.P384():
return parseRawPrivateKey(ecdsa.P384(), nistec.NewP384Point, curve, data)
case elliptic.P521():
return parseRawPrivateKey(ecdsa.P521(), nistec.NewP521Point, curve, data)
default:
return nil, errors.New("ecdsa: curve not supported by ParseRawPrivateKey")
}
}
func parseRawPrivateKey[P ecdsa.Point[P]](c *ecdsa.Curve[P], newPoint func() P, curve elliptic.Curve, data []byte) (*PrivateKey, error) {
q, err := newPoint().ScalarBaseMult(data)
if err != nil {
return nil, err
}
k, err := ecdsa.NewPrivateKey(c, data, q.Bytes())
if err != nil {
return nil, err
}
return privateKeyFromFIPS(curve, k)
}
// Bytes encodes the private key as a fixed-length big-endian integer according
// to SEC 1, Version 2.0, Section 2.3.6 (sometimes referred to as the raw
// format). It returns an error if the private key is invalid.
//
// PrivateKey.Curve must be one of [elliptic.P224], [elliptic.P256],
// [elliptic.P384], or [elliptic.P521], or Bytes returns an error.
//
// Bytes returns the same format as [ecdh.PrivateKey.Bytes] does for NIST curves.
//
// Note that private keys are more commonly encoded in ASN.1 or PKCS#8 format,
// which can be generated with [crypto/x509.MarshalECPrivateKey] or
// [crypto/x509.MarshalPKCS8PrivateKey] (and [encoding/pem]).
func (priv *PrivateKey) Bytes() ([]byte, error) {
switch priv.Curve {
case elliptic.P224():
return privateKeyBytes(ecdsa.P224(), priv)
case elliptic.P256():
return privateKeyBytes(ecdsa.P256(), priv)
case elliptic.P384():
return privateKeyBytes(ecdsa.P384(), priv)
case elliptic.P521():
return privateKeyBytes(ecdsa.P521(), priv)
default:
return nil, errors.New("ecdsa: curve not supported by PrivateKey.Bytes")
}
}
func privateKeyBytes[P ecdsa.Point[P]](c *ecdsa.Curve[P], priv *PrivateKey) ([]byte, error) {
k, err := privateKeyToFIPS(c, priv)
if err != nil {
return nil, err
}
return k.Bytes(), nil
}
// Sign signs a hash (which should be the result of hashing a larger message
// with opts.HashFunc()) using the private key, priv. If the hash is longer than
// the bit-length of the private key's curve order, the hash will be truncated
// to that length. It returns the ASN.1 encoded signature, like [SignASN1].
//
// If random is not nil, the signature is randomized. Most applications should use
// [crypto/rand.Reader] as random, but unless GODEBUG=cryptocustomrand=1 is set, a
// secure source of random bytes is always used, and the actual Reader is ignored.
// The GODEBUG setting will be removed in a future Go release. Instead, use
// [testing/cryptotest.SetGlobalRandom].
//
// If random is nil, Sign will produce a deterministic signature according to RFC
// 6979. When producing a deterministic signature, opts.HashFunc() must be the
// function used to produce digest and priv.Curve must be one of
// [elliptic.P224], [elliptic.P256], [elliptic.P384], or [elliptic.P521].
func (priv *PrivateKey) Sign(random io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
if random == nil {
return signRFC6979(priv, digest, opts)
}
random = rand.CustomReader(random)
return SignASN1(random, priv, digest)
}
// GenerateKey generates a new ECDSA private key for the specified curve.
//
// Since Go 1.26, a secure source of random bytes is always used, and the Reader is
// ignored unless GODEBUG=cryptocustomrand=1 is set. This setting will be removed
// in a future Go release. Instead, use [testing/cryptotest.SetGlobalRandom].
func GenerateKey(c elliptic.Curve, r io.Reader) (*PrivateKey, error) {
if boring.Enabled && rand.IsDefaultReader(r) {
x, y, d, err := boring.GenerateKeyECDSA(c.Params().Name)
if err != nil {
return nil, err
}
return &PrivateKey{PublicKey: PublicKey{Curve: c, X: bbig.Dec(x), Y: bbig.Dec(y)}, D: bbig.Dec(d)}, nil
}
boring.UnreachableExceptTests()
r = rand.CustomReader(r)
switch c.Params() {
case elliptic.P224().Params():
return generateFIPS(c, ecdsa.P224(), r)
case elliptic.P256().Params():
return generateFIPS(c, ecdsa.P256(), r)
case elliptic.P384().Params():
return generateFIPS(c, ecdsa.P384(), r)
case elliptic.P521().Params():
return generateFIPS(c, ecdsa.P521(), r)
default:
return generateLegacy(c, r)
}
}
func generateFIPS[P ecdsa.Point[P]](curve elliptic.Curve, c *ecdsa.Curve[P], rand io.Reader) (*PrivateKey, error) {
if fips140only.Enforced() && !fips140only.ApprovedRandomReader(rand) {
return nil, errors.New("crypto/ecdsa: only crypto/rand.Reader is allowed in FIPS 140-only mode")
}
privateKey, err := ecdsa.GenerateKey(c, rand)
if err != nil {
return nil, err
}
return privateKeyFromFIPS(curve, privateKey)
}
// SignASN1 signs a hash (which should be the result of hashing a larger message)
// using the private key, priv. If the hash is longer than the bit-length of the
// private key's curve order, the hash will be truncated to that length. It
// returns the ASN.1 encoded signature.
//
// The signature is randomized. Since Go 1.26, a secure source of random bytes
// is always used, and the Reader is ignored unless GODEBUG=cryptocustomrand=1
// is set. This setting will be removed in a future Go release. Instead, use
// [testing/cryptotest.SetGlobalRandom].
func SignASN1(r io.Reader, priv *PrivateKey, hash []byte) ([]byte, error) {
if boring.Enabled && rand.IsDefaultReader(r) {
b, err := boringPrivateKey(priv)
if err != nil {
return nil, err
}
return boring.SignMarshalECDSA(b, hash)
}
boring.UnreachableExceptTests()
r = rand.CustomReader(r)
switch priv.Curve.Params() {
case elliptic.P224().Params():
return signFIPS(ecdsa.P224(), priv, r, hash)
case elliptic.P256().Params():
return signFIPS(ecdsa.P256(), priv, r, hash)
case elliptic.P384().Params():
return signFIPS(ecdsa.P384(), priv, r, hash)
case elliptic.P521().Params():
return signFIPS(ecdsa.P521(), priv, r, hash)
default:
return signLegacy(priv, r, hash)
}
}
func signFIPS[P ecdsa.Point[P]](c *ecdsa.Curve[P], priv *PrivateKey, rand io.Reader, hash []byte) ([]byte, error) {
if fips140only.Enforced() && !fips140only.ApprovedRandomReader(rand) {
return nil, errors.New("crypto/ecdsa: only crypto/rand.Reader is allowed in FIPS 140-only mode")
}
k, err := privateKeyToFIPS(c, priv)
if err != nil {
return nil, err
}
// Always using SHA-512 instead of the hash that computed hash is
// technically a violation of draft-irtf-cfrg-det-sigs-with-noise-04 but in
// our API we don't get to know what it was, and this has no security impact.
sig, err := ecdsa.Sign(c, sha512.New, k, rand, hash)
if err != nil {
return nil, err
}
return encodeSignature(sig.R, sig.S)
}
func signRFC6979(priv *PrivateKey, hash []byte, opts crypto.SignerOpts) ([]byte, error) {
if opts == nil {
return nil, errors.New("ecdsa: Sign called with nil opts")
}
h := opts.HashFunc()
if h.Size() != len(hash) {
return nil, errors.New("ecdsa: hash length does not match hash function")
}
switch priv.Curve.Params() {
case elliptic.P224().Params():
return signFIPSDeterministic(ecdsa.P224(), h, priv, hash)
case elliptic.P256().Params():
return signFIPSDeterministic(ecdsa.P256(), h, priv, hash)
case elliptic.P384().Params():
return signFIPSDeterministic(ecdsa.P384(), h, priv, hash)
case elliptic.P521().Params():
return signFIPSDeterministic(ecdsa.P521(), h, priv, hash)
default:
return nil, errors.New("ecdsa: curve not supported by deterministic signatures")
}
}
func signFIPSDeterministic[P ecdsa.Point[P]](c *ecdsa.Curve[P], hashFunc crypto.Hash, priv *PrivateKey, hash []byte) ([]byte, error) {
k, err := privateKeyToFIPS(c, priv)
if err != nil {
return nil, err
}
h := fips140hash.UnwrapNew(hashFunc.New)
if fips140only.Enforced() && !fips140only.ApprovedHash(h()) {
return nil, errors.New("crypto/ecdsa: use of hash functions other than SHA-2 or SHA-3 is not allowed in FIPS 140-only mode")
}
sig, err := ecdsa.SignDeterministic(c, h, k, hash)
if err != nil {
return nil, err
}
return encodeSignature(sig.R, sig.S)
}
func encodeSignature(r, s []byte) ([]byte, error) {
var b cryptobyte.Builder
b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) {
addASN1IntBytes(b, r)
addASN1IntBytes(b, s)
})
return b.Bytes()
}
// addASN1IntBytes encodes in ASN.1 a positive integer represented as
// a big-endian byte slice with zero or more leading zeroes.
func addASN1IntBytes(b *cryptobyte.Builder, bytes []byte) {
for len(bytes) > 0 && bytes[0] == 0 {
bytes = bytes[1:]
}
if len(bytes) == 0 {
b.SetError(errors.New("invalid integer"))
return
}
b.AddASN1(asn1.INTEGER, func(c *cryptobyte.Builder) {
if bytes[0]&0x80 != 0 {
c.AddUint8(0)
}
c.AddBytes(bytes)
})
}
// VerifyASN1 verifies the ASN.1 encoded signature, sig, of hash using the
// public key, pub. Its return value records whether the signature is valid.
//
// The inputs are not considered confidential, and may leak through timing side
// channels, or if an attacker has control of part of the inputs.
func VerifyASN1(pub *PublicKey, hash, sig []byte) bool {
if boring.Enabled {
key, err := boringPublicKey(pub)
if err != nil {
return false
}
return boring.VerifyECDSA(key, hash, sig)
}
boring.UnreachableExceptTests()
switch pub.Curve.Params() {
case elliptic.P224().Params():
return verifyFIPS(ecdsa.P224(), pub, hash, sig)
case elliptic.P256().Params():
return verifyFIPS(ecdsa.P256(), pub, hash, sig)
case elliptic.P384().Params():
return verifyFIPS(ecdsa.P384(), pub, hash, sig)
case elliptic.P521().Params():
return verifyFIPS(ecdsa.P521(), pub, hash, sig)
default:
return verifyLegacy(pub, hash, sig)
}
}
func verifyFIPS[P ecdsa.Point[P]](c *ecdsa.Curve[P], pub *PublicKey, hash, sig []byte) bool {
r, s, err := parseSignature(sig)
if err != nil {
return false
}
k, err := publicKeyToFIPS(c, pub)
if err != nil {
return false
}
if err := ecdsa.Verify(c, k, hash, &ecdsa.Signature{R: r, S: s}); err != nil {
return false
}
return true
}
func parseSignature(sig []byte) (r, s []byte, err error) {
var inner cryptobyte.String
input := cryptobyte.String(sig)
if !input.ReadASN1(&inner, asn1.SEQUENCE) ||
!input.Empty() ||
!inner.ReadASN1Integer(&r) ||
!inner.ReadASN1Integer(&s) ||
!inner.Empty() {
return nil, nil, errors.New("invalid ASN.1")
}
return r, s, nil
}
func publicKeyFromFIPS(curve elliptic.Curve, pub *ecdsa.PublicKey) (*PublicKey, error) {
x, y, err := pointToAffine(curve, pub.Bytes())
if err != nil {
return nil, err
}
return &PublicKey{Curve: curve, X: x, Y: y}, nil
}
func privateKeyFromFIPS(curve elliptic.Curve, priv *ecdsa.PrivateKey) (*PrivateKey, error) {
pub, err := publicKeyFromFIPS(curve, priv.PublicKey())
if err != nil {
return nil, err
}
return &PrivateKey{PublicKey: *pub, D: new(big.Int).SetBytes(priv.Bytes())}, nil
}
func publicKeyToFIPS[P ecdsa.Point[P]](c *ecdsa.Curve[P], pub *PublicKey) (*ecdsa.PublicKey, error) {
Q, err := pointFromAffine(pub.Curve, pub.X, pub.Y)
if err != nil {
return nil, err
}
return ecdsa.NewPublicKey(c, Q)
}
var privateKeyCache fips140cache.Cache[PrivateKey, ecdsa.PrivateKey]
func privateKeyToFIPS[P ecdsa.Point[P]](c *ecdsa.Curve[P], priv *PrivateKey) (*ecdsa.PrivateKey, error) {
Q, err := pointFromAffine(priv.Curve, priv.X, priv.Y)
if err != nil {
return nil, err
}
// Reject values that would not get correctly encoded.
if priv.D.BitLen() > priv.Curve.Params().N.BitLen() {
return nil, errors.New("ecdsa: private key scalar too large")
}
if priv.D.Sign() <= 0 {
return nil, errors.New("ecdsa: private key scalar is zero or negative")
}
size := (priv.Curve.Params().N.BitLen() + 7) / 8
const maxScalarSize = 66 // enough for a P-521 private key
if size > maxScalarSize {
return nil, errors.New("ecdsa: internal error: curve size too large")
}
D := priv.D.FillBytes(make([]byte, size, maxScalarSize))
return privateKeyCache.Get(priv, func() (*ecdsa.PrivateKey, error) {
return ecdsa.NewPrivateKey(c, D, Q)
}, func(k *ecdsa.PrivateKey) bool {
return subtle.ConstantTimeCompare(k.PublicKey().Bytes(), Q) == 1 &&
subtle.ConstantTimeCompare(k.Bytes(), D) == 1
})
}
// pointFromAffine is used to convert the PublicKey to a nistec SetBytes input.
func pointFromAffine(curve elliptic.Curve, x, y *big.Int) ([]byte, error) {
bitSize := curve.Params().BitSize
// Reject values that would not get correctly encoded.
if x.Sign() < 0 || y.Sign() < 0 {
return nil, errors.New("negative coordinate")
}
if x.BitLen() > bitSize || y.BitLen() > bitSize {
return nil, errors.New("overflowing coordinate")
}
// Encode the coordinates and let [ecdsa.NewPublicKey] reject invalid points.
byteLen := (bitSize + 7) / 8
buf := make([]byte, 1+2*byteLen)
buf[0] = 4 // uncompressed point
x.FillBytes(buf[1 : 1+byteLen])
y.FillBytes(buf[1+byteLen : 1+2*byteLen])
return buf, nil
}
// pointToAffine is used to convert a nistec Bytes encoding to a PublicKey.
func pointToAffine(curve elliptic.Curve, p []byte) (x, y *big.Int, err error) {
if len(p) == 1 && p[0] == 0 {
// This is the encoding of the point at infinity.
return nil, nil, errors.New("ecdsa: public key point is the infinity")
}
byteLen := (curve.Params().BitSize + 7) / 8
x = new(big.Int).SetBytes(p[1 : 1+byteLen])
y = new(big.Int).SetBytes(p[1+byteLen:])
return x, y, nil
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ecdsa
import (
"crypto/elliptic"
"crypto/internal/fips140only"
"errors"
"io"
"math/big"
"math/rand/v2"
"golang.org/x/crypto/cryptobyte"
"golang.org/x/crypto/cryptobyte/asn1"
)
// This file contains a math/big implementation of ECDSA that is only used for
// deprecated custom curves.
func generateLegacy(c elliptic.Curve, rand io.Reader) (*PrivateKey, error) {
if fips140only.Enforced() {
return nil, errors.New("crypto/ecdsa: use of custom curves is not allowed in FIPS 140-only mode")
}
k, err := randFieldElement(c, rand)
if err != nil {
return nil, err
}
priv := new(PrivateKey)
priv.PublicKey.Curve = c
priv.D = k
priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())
return priv, nil
}
// hashToInt converts a hash value to an integer. Per FIPS 186-4, Section 6.4,
// we use the left-most bits of the hash to match the bit-length of the order of
// the curve. This also performs Step 5 of SEC 1, Version 2.0, Section 4.1.3.
func hashToInt(hash []byte, c elliptic.Curve) *big.Int {
orderBits := c.Params().N.BitLen()
orderBytes := (orderBits + 7) / 8
if len(hash) > orderBytes {
hash = hash[:orderBytes]
}
ret := new(big.Int).SetBytes(hash)
excess := len(hash)*8 - orderBits
if excess > 0 {
ret.Rsh(ret, uint(excess))
}
return ret
}
var errZeroParam = errors.New("zero parameter")
// Sign signs a hash (which should be the result of hashing a larger message)
// using the private key, priv. If the hash is longer than the bit-length of the
// private key's curve order, the hash will be truncated to that length. It
// returns the signature as a pair of integers. Most applications should use
// [SignASN1] instead of dealing directly with r, s.
//
// The signature is randomized. Since Go 1.26, a secure source of random bytes
// is always used, and the Reader is ignored unless GODEBUG=cryptocustomrand=1
// is set. This setting will be removed in a future Go release. Instead, use
// [testing/cryptotest.SetGlobalRandom].
func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {
sig, err := SignASN1(rand, priv, hash)
if err != nil {
return nil, nil, err
}
r, s = new(big.Int), new(big.Int)
var inner cryptobyte.String
input := cryptobyte.String(sig)
if !input.ReadASN1(&inner, asn1.SEQUENCE) ||
!input.Empty() ||
!inner.ReadASN1Integer(r) ||
!inner.ReadASN1Integer(s) ||
!inner.Empty() {
return nil, nil, errors.New("invalid ASN.1 from SignASN1")
}
return r, s, nil
}
func signLegacy(priv *PrivateKey, csprng io.Reader, hash []byte) (sig []byte, err error) {
if fips140only.Enforced() {
return nil, errors.New("crypto/ecdsa: use of custom curves is not allowed in FIPS 140-only mode")
}
c := priv.Curve
// A cheap version of hedged signatures, for the deprecated path.
var seed [32]byte
if _, err := io.ReadFull(csprng, seed[:]); err != nil {
return nil, err
}
for i, b := range priv.D.Bytes() {
seed[i%32] ^= b
}
for i, b := range hash {
seed[i%32] ^= b
}
csprng = rand.NewChaCha8(seed)
// SEC 1, Version 2.0, Section 4.1.3
N := c.Params().N
if N.Sign() == 0 {
return nil, errZeroParam
}
var k, kInv, r, s *big.Int
for {
for {
k, err = randFieldElement(c, csprng)
if err != nil {
return nil, err
}
kInv = new(big.Int).ModInverse(k, N)
r, _ = c.ScalarBaseMult(k.Bytes())
r.Mod(r, N)
if r.Sign() != 0 {
break
}
}
e := hashToInt(hash, c)
s = new(big.Int).Mul(priv.D, r)
s.Add(s, e)
s.Mul(s, kInv)
s.Mod(s, N) // N != 0
if s.Sign() != 0 {
break
}
}
return encodeSignature(r.Bytes(), s.Bytes())
}
// Verify verifies the signature in r, s of hash using the public key, pub. Its
// return value records whether the signature is valid. Most applications should
// use VerifyASN1 instead of dealing directly with r, s.
//
// The inputs are not considered confidential, and may leak through timing side
// channels, or if an attacker has control of part of the inputs.
func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {
if r.Sign() <= 0 || s.Sign() <= 0 {
return false
}
sig, err := encodeSignature(r.Bytes(), s.Bytes())
if err != nil {
return false
}
return VerifyASN1(pub, hash, sig)
}
func verifyLegacy(pub *PublicKey, hash []byte, sig []byte) bool {
if fips140only.Enforced() {
panic("crypto/ecdsa: use of custom curves is not allowed in FIPS 140-only mode")
}
rBytes, sBytes, err := parseSignature(sig)
if err != nil {
return false
}
r, s := new(big.Int).SetBytes(rBytes), new(big.Int).SetBytes(sBytes)
c := pub.Curve
N := c.Params().N
if r.Sign() <= 0 || s.Sign() <= 0 {
return false
}
if r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {
return false
}
// SEC 1, Version 2.0, Section 4.1.4
e := hashToInt(hash, c)
w := new(big.Int).ModInverse(s, N)
u1 := e.Mul(e, w)
u1.Mod(u1, N)
u2 := w.Mul(r, w)
u2.Mod(u2, N)
x1, y1 := c.ScalarBaseMult(u1.Bytes())
x2, y2 := c.ScalarMult(pub.X, pub.Y, u2.Bytes())
x, y := c.Add(x1, y1, x2, y2)
if x.Sign() == 0 && y.Sign() == 0 {
return false
}
x.Mod(x, N)
return x.Cmp(r) == 0
}
var one = new(big.Int).SetInt64(1)
// randFieldElement returns a random element of the order of the given
// curve using the procedure given in FIPS 186-4, Appendix B.5.2.
func randFieldElement(c elliptic.Curve, rand io.Reader) (k *big.Int, err error) {
for {
N := c.Params().N
b := make([]byte, (N.BitLen()+7)/8)
if _, err = io.ReadFull(rand, b); err != nil {
return
}
if excess := len(b)*8 - N.BitLen(); excess > 0 {
b[0] >>= excess
}
k = new(big.Int).SetBytes(b)
if k.Sign() != 0 && k.Cmp(N) < 0 {
return
}
}
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !boringcrypto
package ecdsa
import "crypto/internal/boring"
func boringPublicKey(*PublicKey) (*boring.PublicKeyECDSA, error) {
panic("boringcrypto: not available")
}
func boringPrivateKey(*PrivateKey) (*boring.PrivateKeyECDSA, error) {
panic("boringcrypto: not available")
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ed25519 implements the Ed25519 signature algorithm. See
// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
//
// Operations involving private keys are implemented using constant-time
// algorithms.
package ed25519
import (
"crypto"
"crypto/internal/fips140/ed25519"
"crypto/internal/fips140cache"
"crypto/internal/fips140only"
"crypto/internal/rand"
cryptorand "crypto/rand"
"crypto/subtle"
"errors"
"internal/godebug"
"io"
"strconv"
)
const (
// PublicKeySize is the size, in bytes, of public keys as used in this package.
PublicKeySize = 32
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
SeedSize = 32
)
// PublicKey is the type of Ed25519 public keys.
type PublicKey []byte
// Any methods implemented on PublicKey might need to also be implemented on
// PrivateKey, as the latter embeds the former and will expose its methods.
// Equal reports whether pub and x have the same value.
func (pub PublicKey) Equal(x crypto.PublicKey) bool {
xx, ok := x.(PublicKey)
if !ok {
return false
}
return subtle.ConstantTimeCompare(pub, xx) == 1
}
// PrivateKey is the type of Ed25519 private keys. It implements [crypto.Signer].
type PrivateKey []byte
// Public returns the [PublicKey] corresponding to priv.
func (priv PrivateKey) Public() crypto.PublicKey {
publicKey := make([]byte, PublicKeySize)
copy(publicKey, priv[32:])
return PublicKey(publicKey)
}
// Equal reports whether priv and x have the same value.
func (priv PrivateKey) Equal(x crypto.PrivateKey) bool {
xx, ok := x.(PrivateKey)
if !ok {
return false
}
return subtle.ConstantTimeCompare(priv, xx) == 1
}
// Seed returns the private key seed corresponding to priv. It is provided for
// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
// in this package.
func (priv PrivateKey) Seed() []byte {
return append(make([]byte, 0, SeedSize), priv[:SeedSize]...)
}
// privateKeyCache uses a pointer to the first byte of underlying storage as a
// key, because [PrivateKey] is a slice header passed around by value.
var privateKeyCache fips140cache.Cache[byte, ed25519.PrivateKey]
// Sign signs the given message with priv. rand is ignored and can be nil.
//
// If opts.HashFunc() is [crypto.SHA512], the pre-hashed variant Ed25519ph is used
// and message is expected to be a SHA-512 hash, otherwise opts.HashFunc() must
// be [crypto.Hash](0) and the message must not be hashed, as Ed25519 performs two
// passes over messages to be signed.
//
// A value of type [Options] can be used as opts, or crypto.Hash(0) or
// crypto.SHA512 directly to select plain Ed25519 or Ed25519ph, respectively.
func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
k, err := privateKeyCache.Get(&priv[0], func() (*ed25519.PrivateKey, error) {
return ed25519.NewPrivateKey(priv)
}, func(k *ed25519.PrivateKey) bool {
return subtle.ConstantTimeCompare(priv, k.Bytes()) == 1
})
if err != nil {
return nil, err
}
hash := opts.HashFunc()
context := ""
if opts, ok := opts.(*Options); ok {
context = opts.Context
}
switch {
case hash == crypto.SHA512: // Ed25519ph
return ed25519.SignPH(k, message, context)
case hash == crypto.Hash(0) && context != "": // Ed25519ctx
if fips140only.Enforced() {
return nil, errors.New("crypto/ed25519: use of Ed25519ctx is not allowed in FIPS 140-only mode")
}
return ed25519.SignCtx(k, message, context)
case hash == crypto.Hash(0): // Ed25519
return ed25519.Sign(k, message), nil
default:
return nil, errors.New("ed25519: expected opts.HashFunc() zero (unhashed message, for standard Ed25519) or SHA-512 (for Ed25519ph)")
}
}
// Options can be used with [PrivateKey.Sign] or [VerifyWithOptions]
// to select Ed25519 variants.
type Options struct {
// Hash can be zero for regular Ed25519, or crypto.SHA512 for Ed25519ph.
Hash crypto.Hash
// Context, if not empty, selects Ed25519ctx or provides the context string
// for Ed25519ph. It can be at most 255 bytes in length.
Context string
}
// HashFunc returns o.Hash.
func (o *Options) HashFunc() crypto.Hash { return o.Hash }
var cryptocustomrand = godebug.New("cryptocustomrand")
// GenerateKey generates a public/private key pair using entropy from random.
//
// If random is nil, a secure random source is used. (Before Go 1.26, a custom
// [crypto/rand.Reader] was used if set by the application. That behavior can be
// restored with GODEBUG=cryptocustomrand=1. This setting will be removed in a
// future Go release. Instead, use [testing/cryptotest.SetGlobalRandom].)
//
// The output of this function is deterministic, and equivalent to reading
// [SeedSize] bytes from random, and passing them to [NewKeyFromSeed].
func GenerateKey(random io.Reader) (PublicKey, PrivateKey, error) {
if random == nil {
if cryptocustomrand.Value() == "1" {
random = cryptorand.Reader
if !rand.IsDefaultReader(random) {
cryptocustomrand.IncNonDefault()
}
} else {
random = rand.Reader
}
}
seed := make([]byte, SeedSize)
if _, err := io.ReadFull(random, seed); err != nil {
return nil, nil, err
}
privateKey := NewKeyFromSeed(seed)
publicKey := privateKey.Public().(PublicKey)
return publicKey, privateKey, nil
}
// NewKeyFromSeed calculates a private key from a seed. It will panic if
// len(seed) is not [SeedSize]. This function is provided for interoperability
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
// package.
func NewKeyFromSeed(seed []byte) PrivateKey {
// Outline the function body so that the returned key can be stack-allocated.
privateKey := make([]byte, PrivateKeySize)
newKeyFromSeed(privateKey, seed)
return privateKey
}
func newKeyFromSeed(privateKey, seed []byte) {
k, err := ed25519.NewPrivateKeyFromSeed(seed)
if err != nil {
// NewPrivateKeyFromSeed only returns an error if the seed length is incorrect.
panic("ed25519: bad seed length: " + strconv.Itoa(len(seed)))
}
copy(privateKey, k.Bytes())
}
// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not [PrivateKeySize].
func Sign(privateKey PrivateKey, message []byte) []byte {
// Outline the function body so that the returned signature can be
// stack-allocated.
signature := make([]byte, SignatureSize)
sign(signature, privateKey, message)
return signature
}
func sign(signature []byte, privateKey PrivateKey, message []byte) {
k, err := privateKeyCache.Get(&privateKey[0], func() (*ed25519.PrivateKey, error) {
return ed25519.NewPrivateKey(privateKey)
}, func(k *ed25519.PrivateKey) bool {
return subtle.ConstantTimeCompare(privateKey, k.Bytes()) == 1
})
if err != nil {
panic("ed25519: bad private key: " + err.Error())
}
sig := ed25519.Sign(k, message)
copy(signature, sig)
}
// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not [PublicKeySize].
//
// The inputs are not considered confidential, and may leak through timing side
// channels, or if an attacker has control of part of the inputs.
func Verify(publicKey PublicKey, message, sig []byte) bool {
return VerifyWithOptions(publicKey, message, sig, &Options{Hash: crypto.Hash(0)}) == nil
}
// VerifyWithOptions reports whether sig is a valid signature of message by
// publicKey. A valid signature is indicated by returning a nil error. It will
// panic if len(publicKey) is not [PublicKeySize].
//
// If opts.Hash is [crypto.SHA512], the pre-hashed variant Ed25519ph is used and
// message is expected to be a SHA-512 hash, otherwise opts.Hash must be
// [crypto.Hash](0) and the message must not be hashed, as Ed25519 performs two
// passes over messages to be signed.
//
// The inputs are not considered confidential, and may leak through timing side
// channels, or if an attacker has control of part of the inputs.
func VerifyWithOptions(publicKey PublicKey, message, sig []byte, opts *Options) error {
if l := len(publicKey); l != PublicKeySize {
panic("ed25519: bad public key length: " + strconv.Itoa(l))
}
k, err := ed25519.NewPublicKey(publicKey)
if err != nil {
return err
}
switch {
case opts.Hash == crypto.SHA512: // Ed25519ph
return ed25519.VerifyPH(k, message, sig, opts.Context)
case opts.Hash == crypto.Hash(0) && opts.Context != "": // Ed25519ctx
if fips140only.Enforced() {
return errors.New("crypto/ed25519: use of Ed25519ctx is not allowed in FIPS 140-only mode")
}
return ed25519.VerifyCtx(k, message, sig, opts.Context)
case opts.Hash == crypto.Hash(0): // Ed25519
return ed25519.Verify(k, message, sig)
default:
return errors.New("ed25519: expected opts.Hash zero (unhashed message, for standard Ed25519) or SHA-512 (for Ed25519ph)")
}
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package elliptic implements the standard NIST P-224, P-256, P-384, and P-521
// elliptic curves over prime fields.
//
// Direct use of this package is deprecated, beyond the [P224], [P256], [P384],
// and [P521] values necessary to use [crypto/ecdsa]. Most other uses
// should migrate to the more efficient and safer [crypto/ecdh], or to
// third-party modules for lower-level functionality.
package elliptic
import (
"io"
"math/big"
"sync"
)
// A Curve represents a short-form Weierstrass curve with a=-3.
//
// The behavior of Add, Double, and ScalarMult when the input is not a point on
// the curve is undefined.
//
// Note that the conventional point at infinity (0, 0) is not considered on the
// curve, although it can be returned by Add, Double, ScalarMult, or
// ScalarBaseMult (but not the [Unmarshal] or [UnmarshalCompressed] functions).
//
// Using Curve implementations besides those returned by [P224], [P256], [P384],
// and [P521] is deprecated.
type Curve interface {
// Params returns the parameters for the curve.
Params() *CurveParams
// IsOnCurve reports whether the given (x,y) lies on the curve.
//
// Deprecated: this is a low-level unsafe API. For ECDH, use the crypto/ecdh
// package. The NewPublicKey methods of NIST curves in crypto/ecdh accept
// the same encoding as the Unmarshal function, and perform on-curve checks.
IsOnCurve(x, y *big.Int) bool
// Add returns the sum of (x1,y1) and (x2,y2).
//
// Deprecated: this is a low-level unsafe API.
Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int)
// Double returns 2*(x,y).
//
// Deprecated: this is a low-level unsafe API.
Double(x1, y1 *big.Int) (x, y *big.Int)
// ScalarMult returns k*(x,y) where k is an integer in big-endian form.
//
// Deprecated: this is a low-level unsafe API. For ECDH, use the crypto/ecdh
// package. Most uses of ScalarMult can be replaced by a call to the ECDH
// methods of NIST curves in crypto/ecdh.
ScalarMult(x1, y1 *big.Int, k []byte) (x, y *big.Int)
// ScalarBaseMult returns k*G, where G is the base point of the group
// and k is an integer in big-endian form.
//
// Deprecated: this is a low-level unsafe API. For ECDH, use the crypto/ecdh
// package. Most uses of ScalarBaseMult can be replaced by a call to the
// PrivateKey.PublicKey method in crypto/ecdh.
ScalarBaseMult(k []byte) (x, y *big.Int)
}
var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f}
// GenerateKey returns a public/private key pair. The private key is
// generated using the given reader, which must return random data.
//
// Deprecated: for ECDH, use the GenerateKey methods of the [crypto/ecdh] package;
// for ECDSA, use the GenerateKey function of the crypto/ecdsa package.
func GenerateKey(curve Curve, rand io.Reader) (priv []byte, x, y *big.Int, err error) {
N := curve.Params().N
bitSize := N.BitLen()
byteLen := (bitSize + 7) / 8
priv = make([]byte, byteLen)
for x == nil {
_, err = io.ReadFull(rand, priv)
if err != nil {
return
}
// We have to mask off any excess bits in the case that the size of the
// underlying field is not a whole number of bytes.
priv[0] &= mask[bitSize%8]
// This is because, in tests, rand will return all zeros and we don't
// want to get the point at infinity and loop forever.
priv[1] ^= 0x42
// If the scalar is out of range, sample another random number.
if new(big.Int).SetBytes(priv).Cmp(N) >= 0 {
continue
}
x, y = curve.ScalarBaseMult(priv)
}
return
}
// Marshal converts a point on the curve into the uncompressed form specified in
// SEC 1, Version 2.0, Section 2.3.3. If the point is not on the curve (or is
// the conventional point at infinity), the behavior is undefined.
//
// Deprecated: for ECDH, use the crypto/ecdh package. This function returns an
// encoding equivalent to that of PublicKey.Bytes in crypto/ecdh.
func Marshal(curve Curve, x, y *big.Int) []byte {
panicIfNotOnCurve(curve, x, y)
byteLen := (curve.Params().BitSize + 7) / 8
ret := make([]byte, 1+2*byteLen)
ret[0] = 4 // uncompressed point
x.FillBytes(ret[1 : 1+byteLen])
y.FillBytes(ret[1+byteLen : 1+2*byteLen])
return ret
}
// MarshalCompressed converts a point on the curve into the compressed form
// specified in SEC 1, Version 2.0, Section 2.3.3. If the point is not on the
// curve (or is the conventional point at infinity), the behavior is undefined.
func MarshalCompressed(curve Curve, x, y *big.Int) []byte {
panicIfNotOnCurve(curve, x, y)
byteLen := (curve.Params().BitSize + 7) / 8
compressed := make([]byte, 1+byteLen)
compressed[0] = byte(y.Bit(0)) | 2
x.FillBytes(compressed[1:])
return compressed
}
// unmarshaler is implemented by curves with their own constant-time Unmarshal.
//
// There isn't an equivalent interface for Marshal/MarshalCompressed because
// that doesn't involve any mathematical operations, only FillBytes and Bit.
type unmarshaler interface {
Unmarshal([]byte) (x, y *big.Int)
UnmarshalCompressed([]byte) (x, y *big.Int)
}
// Assert that the known curves implement unmarshaler.
var _ = []unmarshaler{p224, p256, p384, p521}
// Unmarshal converts a point, serialized by [Marshal], into an x, y pair. It is
// an error if the point is not in uncompressed form, is not on the curve, or is
// the point at infinity. On error, x = nil.
//
// Deprecated: for ECDH, use the crypto/ecdh package. This function accepts an
// encoding equivalent to that of the NewPublicKey methods in crypto/ecdh.
func Unmarshal(curve Curve, data []byte) (x, y *big.Int) {
if c, ok := curve.(unmarshaler); ok {
return c.Unmarshal(data)
}
byteLen := (curve.Params().BitSize + 7) / 8
if len(data) != 1+2*byteLen {
return nil, nil
}
if data[0] != 4 { // uncompressed form
return nil, nil
}
p := curve.Params().P
x = new(big.Int).SetBytes(data[1 : 1+byteLen])
y = new(big.Int).SetBytes(data[1+byteLen:])
if x.Cmp(p) >= 0 || y.Cmp(p) >= 0 {
return nil, nil
}
if !curve.IsOnCurve(x, y) {
return nil, nil
}
return
}
// UnmarshalCompressed converts a point, serialized by [MarshalCompressed], into
// an x, y pair. It is an error if the point is not in compressed form, is not
// on the curve, or is the point at infinity. On error, x = nil.
func UnmarshalCompressed(curve Curve, data []byte) (x, y *big.Int) {
if c, ok := curve.(unmarshaler); ok {
return c.UnmarshalCompressed(data)
}
byteLen := (curve.Params().BitSize + 7) / 8
if len(data) != 1+byteLen {
return nil, nil
}
if data[0] != 2 && data[0] != 3 { // compressed form
return nil, nil
}
p := curve.Params().P
x = new(big.Int).SetBytes(data[1:])
if x.Cmp(p) >= 0 {
return nil, nil
}
// y² = x³ - 3x + b
y = curve.Params().polynomial(x)
y = y.ModSqrt(y, p)
if y == nil {
return nil, nil
}
if byte(y.Bit(0)) != data[0]&1 {
y.Neg(y).Mod(y, p)
}
if !curve.IsOnCurve(x, y) {
return nil, nil
}
return
}
func panicIfNotOnCurve(curve Curve, x, y *big.Int) {
// (0, 0) is the point at infinity by convention. It's ok to operate on it,
// although IsOnCurve is documented to return false for it. See Issue 37294.
if x.Sign() == 0 && y.Sign() == 0 {
return
}
if !curve.IsOnCurve(x, y) {
panic("crypto/elliptic: attempted operation on invalid point")
}
}
var initonce sync.Once
func initAll() {
initP224()
initP256()
initP384()
initP521()
}
// P224 returns a [Curve] which implements NIST P-224 (FIPS 186-3, section D.2.2),
// also known as secp224r1. The CurveParams.Name of this [Curve] is "P-224".
//
// Multiple invocations of this function will return the same value, so it can
// be used for equality checks and switch statements.
//
// The cryptographic operations are implemented using constant-time algorithms.
func P224() Curve {
initonce.Do(initAll)
return p224
}
// P256 returns a [Curve] which implements NIST P-256 (FIPS 186-3, section D.2.3),
// also known as secp256r1 or prime256v1. The CurveParams.Name of this [Curve] is
// "P-256".
//
// Multiple invocations of this function will return the same value, so it can
// be used for equality checks and switch statements.
//
// The cryptographic operations are implemented using constant-time algorithms.
func P256() Curve {
initonce.Do(initAll)
return p256
}
// P384 returns a [Curve] which implements NIST P-384 (FIPS 186-3, section D.2.4),
// also known as secp384r1. The CurveParams.Name of this [Curve] is "P-384".
//
// Multiple invocations of this function will return the same value, so it can
// be used for equality checks and switch statements.
//
// The cryptographic operations are implemented using constant-time algorithms.
func P384() Curve {
initonce.Do(initAll)
return p384
}
// P521 returns a [Curve] which implements NIST P-521 (FIPS 186-3, section D.2.5),
// also known as secp521r1. The CurveParams.Name of this [Curve] is "P-521".
//
// Multiple invocations of this function will return the same value, so it can
// be used for equality checks and switch statements.
//
// The cryptographic operations are implemented using constant-time algorithms.
func P521() Curve {
initonce.Do(initAll)
return p521
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package elliptic
import (
"crypto/internal/fips140/nistec"
"errors"
"math/big"
)
var p224 = &nistCurve[*nistec.P224Point]{
newPoint: nistec.NewP224Point,
}
func initP224() {
p224.params = &CurveParams{
Name: "P-224",
BitSize: 224,
// SP 800-186, Section 3.2.1.2
P: bigFromDecimal("26959946667150639794667015087019630673557916260026308143510066298881"),
N: bigFromDecimal("26959946667150639794667015087019625940457807714424391721682722368061"),
B: bigFromHex("b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4"),
Gx: bigFromHex("b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21"),
Gy: bigFromHex("bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34"),
}
}
var p256 = &nistCurve[*nistec.P256Point]{
newPoint: nistec.NewP256Point,
}
func initP256() {
p256.params = &CurveParams{
Name: "P-256",
BitSize: 256,
// SP 800-186, Section 3.2.1.3
P: bigFromDecimal("115792089210356248762697446949407573530086143415290314195533631308867097853951"),
N: bigFromDecimal("115792089210356248762697446949407573529996955224135760342422259061068512044369"),
B: bigFromHex("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b"),
Gx: bigFromHex("6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296"),
Gy: bigFromHex("4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5"),
}
}
var p384 = &nistCurve[*nistec.P384Point]{
newPoint: nistec.NewP384Point,
}
func initP384() {
p384.params = &CurveParams{
Name: "P-384",
BitSize: 384,
// SP 800-186, Section 3.2.1.4
P: bigFromDecimal("394020061963944792122790401001436138050797392704654" +
"46667948293404245721771496870329047266088258938001861606973112319"),
N: bigFromDecimal("394020061963944792122790401001436138050797392704654" +
"46667946905279627659399113263569398956308152294913554433653942643"),
B: bigFromHex("b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088" +
"f5013875ac656398d8a2ed19d2a85c8edd3ec2aef"),
Gx: bigFromHex("aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741" +
"e082542a385502f25dbf55296c3a545e3872760ab7"),
Gy: bigFromHex("3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da31" +
"13b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f"),
}
}
var p521 = &nistCurve[*nistec.P521Point]{
newPoint: nistec.NewP521Point,
}
func initP521() {
p521.params = &CurveParams{
Name: "P-521",
BitSize: 521,
// SP 800-186, Section 3.2.1.5
P: bigFromDecimal("68647976601306097149819007990813932172694353001433" +
"0540939446345918554318339765605212255964066145455497729631139148" +
"0858037121987999716643812574028291115057151"),
N: bigFromDecimal("68647976601306097149819007990813932172694353001433" +
"0540939446345918554318339765539424505774633321719753296399637136" +
"3321113864768612440380340372808892707005449"),
B: bigFromHex("0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8" +
"b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef" +
"451fd46b503f00"),
Gx: bigFromHex("00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f8" +
"28af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf9" +
"7e7e31c2e5bd66"),
Gy: bigFromHex("011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817" +
"afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088" +
"be94769fd16650"),
}
}
// nistCurve is a Curve implementation based on a nistec Point.
//
// It's a wrapper that exposes the big.Int-based Curve interface and encodes the
// legacy idiosyncrasies it requires, such as invalid and infinity point
// handling.
//
// To interact with the nistec package, points are encoded into and decoded from
// properly formatted byte slices. All big.Int use is limited to this package.
// Encoding and decoding is 1/1000th of the runtime of a scalar multiplication,
// so the overhead is acceptable.
type nistCurve[Point nistPoint[Point]] struct {
newPoint func() Point
params *CurveParams
}
// nistPoint is a generic constraint for the nistec Point types.
type nistPoint[T any] interface {
Bytes() []byte
SetBytes([]byte) (T, error)
Add(T, T) T
Double(T) T
ScalarMult(T, []byte) (T, error)
ScalarBaseMult([]byte) (T, error)
}
func (curve *nistCurve[Point]) Params() *CurveParams {
return curve.params
}
func (curve *nistCurve[Point]) IsOnCurve(x, y *big.Int) bool {
// IsOnCurve is documented to reject (0, 0), the conventional point at
// infinity, which however is accepted by pointFromAffine.
if x.Sign() == 0 && y.Sign() == 0 {
return false
}
_, err := curve.pointFromAffine(x, y)
return err == nil
}
func (curve *nistCurve[Point]) pointFromAffine(x, y *big.Int) (p Point, err error) {
// (0, 0) is by convention the point at infinity, which can't be represented
// in affine coordinates. See Issue 37294.
if x.Sign() == 0 && y.Sign() == 0 {
return curve.newPoint(), nil
}
// Reject values that would not get correctly encoded.
if x.Sign() < 0 || y.Sign() < 0 {
return p, errors.New("negative coordinate")
}
if x.BitLen() > curve.params.BitSize || y.BitLen() > curve.params.BitSize {
return p, errors.New("overflowing coordinate")
}
// Encode the coordinates and let SetBytes reject invalid points.
byteLen := (curve.params.BitSize + 7) / 8
buf := make([]byte, 1+2*byteLen)
buf[0] = 4 // uncompressed point
x.FillBytes(buf[1 : 1+byteLen])
y.FillBytes(buf[1+byteLen : 1+2*byteLen])
return curve.newPoint().SetBytes(buf)
}
func (curve *nistCurve[Point]) pointToAffine(p Point) (x, y *big.Int) {
out := p.Bytes()
if len(out) == 1 && out[0] == 0 {
// This is the encoding of the point at infinity, which the affine
// coordinates API represents as (0, 0) by convention.
return new(big.Int), new(big.Int)
}
byteLen := (curve.params.BitSize + 7) / 8
x = new(big.Int).SetBytes(out[1 : 1+byteLen])
y = new(big.Int).SetBytes(out[1+byteLen:])
return x, y
}
func (curve *nistCurve[Point]) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
p1, err := curve.pointFromAffine(x1, y1)
if err != nil {
panic("crypto/elliptic: Add was called on an invalid point")
}
p2, err := curve.pointFromAffine(x2, y2)
if err != nil {
panic("crypto/elliptic: Add was called on an invalid point")
}
return curve.pointToAffine(p1.Add(p1, p2))
}
func (curve *nistCurve[Point]) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
p, err := curve.pointFromAffine(x1, y1)
if err != nil {
panic("crypto/elliptic: Double was called on an invalid point")
}
return curve.pointToAffine(p.Double(p))
}
// normalizeScalar brings the scalar within the byte size of the order of the
// curve, as expected by the nistec scalar multiplication functions.
func (curve *nistCurve[Point]) normalizeScalar(scalar []byte) []byte {
byteSize := (curve.params.N.BitLen() + 7) / 8
if len(scalar) == byteSize {
return scalar
}
s := new(big.Int).SetBytes(scalar)
if len(scalar) > byteSize {
s.Mod(s, curve.params.N)
}
out := make([]byte, byteSize)
return s.FillBytes(out)
}
func (curve *nistCurve[Point]) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
p, err := curve.pointFromAffine(Bx, By)
if err != nil {
panic("crypto/elliptic: ScalarMult was called on an invalid point")
}
scalar = curve.normalizeScalar(scalar)
p, err = p.ScalarMult(p, scalar)
if err != nil {
panic("crypto/elliptic: nistec rejected normalized scalar")
}
return curve.pointToAffine(p)
}
func (curve *nistCurve[Point]) ScalarBaseMult(scalar []byte) (*big.Int, *big.Int) {
scalar = curve.normalizeScalar(scalar)
p, err := curve.newPoint().ScalarBaseMult(scalar)
if err != nil {
panic("crypto/elliptic: nistec rejected normalized scalar")
}
return curve.pointToAffine(p)
}
func (curve *nistCurve[Point]) Unmarshal(data []byte) (x, y *big.Int) {
if len(data) == 0 || data[0] != 4 {
return nil, nil
}
// Use SetBytes to check that data encodes a valid point.
_, err := curve.newPoint().SetBytes(data)
if err != nil {
return nil, nil
}
// We don't use pointToAffine because it involves an expensive field
// inversion to convert from Jacobian to affine coordinates, which we
// already have.
byteLen := (curve.params.BitSize + 7) / 8
x = new(big.Int).SetBytes(data[1 : 1+byteLen])
y = new(big.Int).SetBytes(data[1+byteLen:])
return x, y
}
func (curve *nistCurve[Point]) UnmarshalCompressed(data []byte) (x, y *big.Int) {
if len(data) == 0 || (data[0] != 2 && data[0] != 3) {
return nil, nil
}
p, err := curve.newPoint().SetBytes(data)
if err != nil {
return nil, nil
}
return curve.pointToAffine(p)
}
func bigFromDecimal(s string) *big.Int {
b, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("crypto/elliptic: internal error: invalid encoding")
}
return b
}
func bigFromHex(s string) *big.Int {
b, ok := new(big.Int).SetString(s, 16)
if !ok {
panic("crypto/elliptic: internal error: invalid encoding")
}
return b
}
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package elliptic
import "math/big"
// CurveParams contains the parameters of an elliptic curve and also provides
// a generic, non-constant time implementation of [Curve].
//
// The generic Curve implementation is deprecated, and using custom curves
// (those not returned by [P224], [P256], [P384], and [P521]) is not guaranteed
// to provide any security property.
type CurveParams struct {
P *big.Int // the order of the underlying field
N *big.Int // the order of the base point
B *big.Int // the constant of the curve equation
Gx, Gy *big.Int // (x,y) of the base point
BitSize int // the size of the underlying field
Name string // the canonical name of the curve
}
func (curve *CurveParams) Params() *CurveParams {
return curve
}
// CurveParams operates, internally, on Jacobian coordinates. For a given
// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1)
// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole
// calculation can be performed within the transform (as in ScalarMult and
// ScalarBaseMult). But even for Add and Double, it's faster to apply and
// reverse the transform than to operate in affine coordinates.
// polynomial returns x³ - 3x + b.
func (curve *CurveParams) polynomial(x *big.Int) *big.Int {
x3 := new(big.Int).Mul(x, x)
x3.Mul(x3, x)
threeX := new(big.Int).Lsh(x, 1)
threeX.Add(threeX, x)
x3.Sub(x3, threeX)
x3.Add(x3, curve.B)
x3.Mod(x3, curve.P)
return x3
}
// IsOnCurve implements [Curve.IsOnCurve].
//
// Deprecated: the [CurveParams] methods are deprecated and are not guaranteed to
// provide any security property. For ECDH, use the [crypto/ecdh] package.
// For ECDSA, use the [crypto/ecdsa] package with a [Curve] value returned directly
// from [P224], [P256], [P384], or [P521].
func (curve *CurveParams) IsOnCurve(x, y *big.Int) bool {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve); ok {
return specific.IsOnCurve(x, y)
}
if x.Sign() < 0 || x.Cmp(curve.P) >= 0 ||
y.Sign() < 0 || y.Cmp(curve.P) >= 0 {
return false
}
// y² = x³ - 3x + b
y2 := new(big.Int).Mul(y, y)
y2.Mod(y2, curve.P)
return curve.polynomial(x).Cmp(y2) == 0
}
// zForAffine returns a Jacobian Z value for the affine point (x, y). If x and
// y are zero, it assumes that they represent the point at infinity because (0,
// 0) is not on the any of the curves handled here.
func zForAffine(x, y *big.Int) *big.Int {
z := new(big.Int)
if x.Sign() != 0 || y.Sign() != 0 {
z.SetInt64(1)
}
return z
}
// affineFromJacobian reverses the Jacobian transform. See the comment at the
// top of the file. If the point is ∞ it returns 0, 0.
func (curve *CurveParams) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
if z.Sign() == 0 {
return new(big.Int), new(big.Int)
}
zinv := new(big.Int).ModInverse(z, curve.P)
zinvsq := new(big.Int).Mul(zinv, zinv)
xOut = new(big.Int).Mul(x, zinvsq)
xOut.Mod(xOut, curve.P)
zinvsq.Mul(zinvsq, zinv)
yOut = new(big.Int).Mul(y, zinvsq)
yOut.Mod(yOut, curve.P)
return
}
// Add implements [Curve.Add].
//
// Deprecated: the [CurveParams] methods are deprecated and are not guaranteed to
// provide any security property. For ECDH, use the [crypto/ecdh] package.
// For ECDSA, use the [crypto/ecdsa] package with a [Curve] value returned directly
// from [P224], [P256], [P384], or [P521].
func (curve *CurveParams) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve); ok {
return specific.Add(x1, y1, x2, y2)
}
panicIfNotOnCurve(curve, x1, y1)
panicIfNotOnCurve(curve, x2, y2)
z1 := zForAffine(x1, y1)
z2 := zForAffine(x2, y2)
return curve.affineFromJacobian(curve.addJacobian(x1, y1, z1, x2, y2, z2))
}
// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and
// (x2, y2, z2) and returns their sum, also in Jacobian form.
func (curve *CurveParams) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
// See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
x3, y3, z3 := new(big.Int), new(big.Int), new(big.Int)
if z1.Sign() == 0 {
x3.Set(x2)
y3.Set(y2)
z3.Set(z2)
return x3, y3, z3
}
if z2.Sign() == 0 {
x3.Set(x1)
y3.Set(y1)
z3.Set(z1)
return x3, y3, z3
}
z1z1 := new(big.Int).Mul(z1, z1)
z1z1.Mod(z1z1, curve.P)
z2z2 := new(big.Int).Mul(z2, z2)
z2z2.Mod(z2z2, curve.P)
u1 := new(big.Int).Mul(x1, z2z2)
u1.Mod(u1, curve.P)
u2 := new(big.Int).Mul(x2, z1z1)
u2.Mod(u2, curve.P)
h := new(big.Int).Sub(u2, u1)
xEqual := h.Sign() == 0
if h.Sign() == -1 {
h.Add(h, curve.P)
}
i := new(big.Int).Lsh(h, 1)
i.Mul(i, i)
j := new(big.Int).Mul(h, i)
s1 := new(big.Int).Mul(y1, z2)
s1.Mul(s1, z2z2)
s1.Mod(s1, curve.P)
s2 := new(big.Int).Mul(y2, z1)
s2.Mul(s2, z1z1)
s2.Mod(s2, curve.P)
r := new(big.Int).Sub(s2, s1)
if r.Sign() == -1 {
r.Add(r, curve.P)
}
yEqual := r.Sign() == 0
if xEqual && yEqual {
return curve.doubleJacobian(x1, y1, z1)
}
r.Lsh(r, 1)
v := new(big.Int).Mul(u1, i)
x3.Set(r)
x3.Mul(x3, x3)
x3.Sub(x3, j)
x3.Sub(x3, v)
x3.Sub(x3, v)
x3.Mod(x3, curve.P)
y3.Set(r)
v.Sub(v, x3)
y3.Mul(y3, v)
s1.Mul(s1, j)
s1.Lsh(s1, 1)
y3.Sub(y3, s1)
y3.Mod(y3, curve.P)
z3.Add(z1, z2)
z3.Mul(z3, z3)
z3.Sub(z3, z1z1)
z3.Sub(z3, z2z2)
z3.Mul(z3, h)
z3.Mod(z3, curve.P)
return x3, y3, z3
}
// Double implements [Curve.Double].
//
// Deprecated: the [CurveParams] methods are deprecated and are not guaranteed to
// provide any security property. For ECDH, use the [crypto/ecdh] package.
// For ECDSA, use the [crypto/ecdsa] package with a [Curve] value returned directly
// from [P224], [P256], [P384], or [P521].
func (curve *CurveParams) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve); ok {
return specific.Double(x1, y1)
}
panicIfNotOnCurve(curve, x1, y1)
z1 := zForAffine(x1, y1)
return curve.affineFromJacobian(curve.doubleJacobian(x1, y1, z1))
}
// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and
// returns its double, also in Jacobian form.
func (curve *CurveParams) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
// See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
delta := new(big.Int).Mul(z, z)
delta.Mod(delta, curve.P)
gamma := new(big.Int).Mul(y, y)
gamma.Mod(gamma, curve.P)
alpha := new(big.Int).Sub(x, delta)
if alpha.Sign() == -1 {
alpha.Add(alpha, curve.P)
}
alpha2 := new(big.Int).Add(x, delta)
alpha.Mul(alpha, alpha2)
alpha2.Set(alpha)
alpha.Lsh(alpha, 1)
alpha.Add(alpha, alpha2)
beta := alpha2.Mul(x, gamma)
x3 := new(big.Int).Mul(alpha, alpha)
beta8 := new(big.Int).Lsh(beta, 3)
beta8.Mod(beta8, curve.P)
x3.Sub(x3, beta8)
if x3.Sign() == -1 {
x3.Add(x3, curve.P)
}
x3.Mod(x3, curve.P)
z3 := new(big.Int).Add(y, z)
z3.Mul(z3, z3)
z3.Sub(z3, gamma)
if z3.Sign() == -1 {
z3.Add(z3, curve.P)
}
z3.Sub(z3, delta)
if z3.Sign() == -1 {
z3.Add(z3, curve.P)
}
z3.Mod(z3, curve.P)
beta.Lsh(beta, 2)
beta.Sub(beta, x3)
if beta.Sign() == -1 {
beta.Add(beta, curve.P)
}
y3 := alpha.Mul(alpha, beta)
gamma.Mul(gamma, gamma)
gamma.Lsh(gamma, 3)
gamma.Mod(gamma, curve.P)
y3.Sub(y3, gamma)
if y3.Sign() == -1 {
y3.Add(y3, curve.P)
}
y3.Mod(y3, curve.P)
return x3, y3, z3
}
// ScalarMult implements [Curve.ScalarMult].
//
// Deprecated: the [CurveParams] methods are deprecated and are not guaranteed to
// provide any security property. For ECDH, use the [crypto/ecdh] package.
// For ECDSA, use the [crypto/ecdsa] package with a [Curve] value returned directly
// from [P224], [P256], [P384], or [P521].
func (curve *CurveParams) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve); ok {
return specific.ScalarMult(Bx, By, k)
}
panicIfNotOnCurve(curve, Bx, By)
Bz := new(big.Int).SetInt64(1)
x, y, z := new(big.Int), new(big.Int), new(big.Int)
for _, b := range k {
for range 8 {
x, y, z = curve.doubleJacobian(x, y, z)
if b&0x80 == 0x80 {
x, y, z = curve.addJacobian(Bx, By, Bz, x, y, z)
}
b <<= 1
}
}
return curve.affineFromJacobian(x, y, z)
}
// ScalarBaseMult implements [Curve.ScalarBaseMult].
//
// Deprecated: the [CurveParams] methods are deprecated and are not guaranteed to
// provide any security property. For ECDH, use the [crypto/ecdh] package.
// For ECDSA, use the [crypto/ecdsa] package with a [Curve] value returned directly
// from [P224], [P256], [P384], or [P521].
func (curve *CurveParams) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve); ok {
return specific.ScalarBaseMult(k)
}
return curve.ScalarMult(curve.Gx, curve.Gy, k)
}
func matchesSpecificCurve(params *CurveParams) (Curve, bool) {
for _, c := range []Curve{p224, p256, p384, p521} {
if params == c.Params() {
return c, true
}
}
return nil, false
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package hmac implements the Keyed-Hash Message Authentication Code (HMAC) as
defined in U.S. Federal Information Processing Standards Publication 198.
An HMAC is a cryptographic hash that uses a key to sign a message.
The receiver verifies the hash by recomputing it using the same key.
Receivers should be careful to use Equal to compare MACs in order to avoid
timing side-channels:
// ValidMAC reports whether messageMAC is a valid HMAC tag for message.
func ValidMAC(message, messageMAC, key []byte) bool {
mac := hmac.New(sha256.New, key)
mac.Write(message)
expectedMAC := mac.Sum(nil)
return hmac.Equal(messageMAC, expectedMAC)
}
*/
package hmac
import (
"crypto/internal/boring"
"crypto/internal/fips140/hmac"
"crypto/internal/fips140hash"
"crypto/internal/fips140only"
"crypto/subtle"
"hash"
)
// New returns a new HMAC hash using the given [hash.Hash] type and key.
// New functions like [crypto/sha256.New] can be used as h.
// h must return a new Hash every time it is called.
// Note that unlike other hash implementations in the standard library,
// the returned Hash does not implement [encoding.BinaryMarshaler]
// or [encoding.BinaryUnmarshaler].
func New(h func() hash.Hash, key []byte) hash.Hash {
if boring.Enabled {
hm := boring.NewHMAC(h, key)
if hm != nil {
return hm
}
// BoringCrypto did not recognize h, so fall through to standard Go code.
}
h = fips140hash.UnwrapNew(h)
if fips140only.Enforced() {
if len(key) < 112/8 {
panic("crypto/hmac: use of keys shorter than 112 bits is not allowed in FIPS 140-only mode")
}
if !fips140only.ApprovedHash(h()) {
panic("crypto/hmac: use of hash functions other than SHA-2 or SHA-3 is not allowed in FIPS 140-only mode")
}
}
return hmac.New(h, key)
}
// Equal compares two MACs for equality without leaking timing information.
func Equal(mac1, mac2 []byte) bool {
// We don't have to be constant time if the lengths of the MACs are
// different as that suggests that a completely different hash function
// was used.
return subtle.ConstantTimeCompare(mac1, mac2) == 1
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go -output md5block.go
// Package md5 implements the MD5 hash algorithm as defined in RFC 1321.
//
// MD5 is cryptographically broken and should not be used for secure
// applications.
package md5
import (
"crypto"
"crypto/internal/fips140only"
"errors"
"hash"
"internal/byteorder"
)
func init() {
crypto.RegisterHash(crypto.MD5, New)
}
// The size of an MD5 checksum in bytes.
const Size = 16
// The blocksize of MD5 in bytes.
const BlockSize = 64
// The maximum number of bytes that can be passed to block(). The limit exists
// because implementations that rely on assembly routines are not preemptible.
const maxAsmIters = 1024
const maxAsmSize = BlockSize * maxAsmIters // 64KiB
const (
init0 = 0x67452301
init1 = 0xEFCDAB89
init2 = 0x98BADCFE
init3 = 0x10325476
)
// digest represents the partial evaluation of a checksum.
type digest struct {
s [4]uint32
x [BlockSize]byte
nx int
len uint64
}
func (d *digest) Reset() {
d.s[0] = init0
d.s[1] = init1
d.s[2] = init2
d.s[3] = init3
d.nx = 0
d.len = 0
}
const (
magic = "md5\x01"
marshaledSize = len(magic) + 4*4 + BlockSize + 8
)
func (d *digest) MarshalBinary() ([]byte, error) {
return d.AppendBinary(make([]byte, 0, marshaledSize))
}
func (d *digest) AppendBinary(b []byte) ([]byte, error) {
b = append(b, magic...)
b = byteorder.BEAppendUint32(b, d.s[0])
b = byteorder.BEAppendUint32(b, d.s[1])
b = byteorder.BEAppendUint32(b, d.s[2])
b = byteorder.BEAppendUint32(b, d.s[3])
b = append(b, d.x[:d.nx]...)
b = append(b, make([]byte, len(d.x)-d.nx)...)
b = byteorder.BEAppendUint64(b, d.len)
return b, nil
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("crypto/md5: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("crypto/md5: invalid hash state size")
}
b = b[len(magic):]
b, d.s[0] = consumeUint32(b)
b, d.s[1] = consumeUint32(b)
b, d.s[2] = consumeUint32(b)
b, d.s[3] = consumeUint32(b)
b = b[copy(d.x[:], b):]
b, d.len = consumeUint64(b)
d.nx = int(d.len % BlockSize)
return nil
}
func consumeUint64(b []byte) ([]byte, uint64) {
return b[8:], byteorder.BEUint64(b[0:8])
}
func consumeUint32(b []byte) ([]byte, uint32) {
return b[4:], byteorder.BEUint32(b[0:4])
}
func (d *digest) Clone() (hash.Cloner, error) {
r := *d
return &r, nil
}
// New returns a new [hash.Hash] computing the MD5 checksum. The Hash
// also implements [encoding.BinaryMarshaler], [encoding.BinaryAppender] and
// [encoding.BinaryUnmarshaler] to marshal and unmarshal the internal
// state of the hash.
func New() hash.Hash {
d := new(digest)
d.Reset()
return d
}
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) {
if fips140only.Enforced() {
return 0, errors.New("crypto/md5: use of MD5 is not allowed in FIPS 140-only mode")
}
// Note that we currently call block or blockGeneric
// directly (guarded using haveAsm) because this allows
// escape analysis to see that p and d don't escape.
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
n := copy(d.x[d.nx:], p)
d.nx += n
if d.nx == BlockSize {
if haveAsm {
block(d, d.x[:])
} else {
blockGeneric(d, d.x[:])
}
d.nx = 0
}
p = p[n:]
}
if len(p) >= BlockSize {
n := len(p) &^ (BlockSize - 1)
if haveAsm {
for n > maxAsmSize {
block(d, p[:maxAsmSize])
p = p[maxAsmSize:]
n -= maxAsmSize
}
block(d, p[:n])
} else {
blockGeneric(d, p[:n])
}
p = p[n:]
}
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
func (d *digest) Sum(in []byte) []byte {
// Make a copy of d so that caller can keep writing and summing.
d0 := *d
hash := d0.checkSum()
return append(in, hash[:]...)
}
func (d *digest) checkSum() [Size]byte {
if fips140only.Enforced() {
panic("crypto/md5: use of MD5 is not allowed in FIPS 140-only mode")
}
// Append 0x80 to the end of the message and then append zeros
// until the length is a multiple of 56 bytes. Finally append
// 8 bytes representing the message length in bits.
//
// 1 byte end marker :: 0-63 padding bytes :: 8 byte length
tmp := [1 + 63 + 8]byte{0x80}
pad := (55 - d.len) % 64 // calculate number of padding bytes
byteorder.LEPutUint64(tmp[1+pad:], d.len<<3) // append length in bits
d.Write(tmp[:1+pad+8])
// The previous write ensures that a whole number of
// blocks (i.e. a multiple of 64 bytes) have been hashed.
if d.nx != 0 {
panic("d.nx != 0")
}
var digest [Size]byte
byteorder.LEPutUint32(digest[0:], d.s[0])
byteorder.LEPutUint32(digest[4:], d.s[1])
byteorder.LEPutUint32(digest[8:], d.s[2])
byteorder.LEPutUint32(digest[12:], d.s[3])
return digest
}
// Sum returns the MD5 checksum of the data.
func Sum(data []byte) [Size]byte {
var d digest
d.Reset()
d.Write(data)
return d.checkSum()
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by go run gen.go -output md5block.go; DO NOT EDIT.
package md5
import (
"internal/byteorder"
"math/bits"
)
func blockGeneric(dig *digest, p []byte) {
// load state
a, b, c, d := dig.s[0], dig.s[1], dig.s[2], dig.s[3]
for i := 0; i <= len(p)-BlockSize; i += BlockSize {
// eliminate bounds checks on p
q := p[i:]
q = q[:BlockSize:BlockSize]
// save current state
aa, bb, cc, dd := a, b, c, d
// load input block
x0 := byteorder.LEUint32(q[4*0x0:])
x1 := byteorder.LEUint32(q[4*0x1:])
x2 := byteorder.LEUint32(q[4*0x2:])
x3 := byteorder.LEUint32(q[4*0x3:])
x4 := byteorder.LEUint32(q[4*0x4:])
x5 := byteorder.LEUint32(q[4*0x5:])
x6 := byteorder.LEUint32(q[4*0x6:])
x7 := byteorder.LEUint32(q[4*0x7:])
x8 := byteorder.LEUint32(q[4*0x8:])
x9 := byteorder.LEUint32(q[4*0x9:])
xa := byteorder.LEUint32(q[4*0xa:])
xb := byteorder.LEUint32(q[4*0xb:])
xc := byteorder.LEUint32(q[4*0xc:])
xd := byteorder.LEUint32(q[4*0xd:])
xe := byteorder.LEUint32(q[4*0xe:])
xf := byteorder.LEUint32(q[4*0xf:])
// round 1
a = b + bits.RotateLeft32((((c^d)&b)^d)+a+x0+0xd76aa478, 7)
d = a + bits.RotateLeft32((((b^c)&a)^c)+d+x1+0xe8c7b756, 12)
c = d + bits.RotateLeft32((((a^b)&d)^b)+c+x2+0x242070db, 17)
b = c + bits.RotateLeft32((((d^a)&c)^a)+b+x3+0xc1bdceee, 22)
a = b + bits.RotateLeft32((((c^d)&b)^d)+a+x4+0xf57c0faf, 7)
d = a + bits.RotateLeft32((((b^c)&a)^c)+d+x5+0x4787c62a, 12)
c = d + bits.RotateLeft32((((a^b)&d)^b)+c+x6+0xa8304613, 17)
b = c + bits.RotateLeft32((((d^a)&c)^a)+b+x7+0xfd469501, 22)
a = b + bits.RotateLeft32((((c^d)&b)^d)+a+x8+0x698098d8, 7)
d = a + bits.RotateLeft32((((b^c)&a)^c)+d+x9+0x8b44f7af, 12)
c = d + bits.RotateLeft32((((a^b)&d)^b)+c+xa+0xffff5bb1, 17)
b = c + bits.RotateLeft32((((d^a)&c)^a)+b+xb+0x895cd7be, 22)
a = b + bits.RotateLeft32((((c^d)&b)^d)+a+xc+0x6b901122, 7)
d = a + bits.RotateLeft32((((b^c)&a)^c)+d+xd+0xfd987193, 12)
c = d + bits.RotateLeft32((((a^b)&d)^b)+c+xe+0xa679438e, 17)
b = c + bits.RotateLeft32((((d^a)&c)^a)+b+xf+0x49b40821, 22)
// round 2
a = b + bits.RotateLeft32((((b^c)&d)^c)+a+x1+0xf61e2562, 5)
d = a + bits.RotateLeft32((((a^b)&c)^b)+d+x6+0xc040b340, 9)
c = d + bits.RotateLeft32((((d^a)&b)^a)+c+xb+0x265e5a51, 14)
b = c + bits.RotateLeft32((((c^d)&a)^d)+b+x0+0xe9b6c7aa, 20)
a = b + bits.RotateLeft32((((b^c)&d)^c)+a+x5+0xd62f105d, 5)
d = a + bits.RotateLeft32((((a^b)&c)^b)+d+xa+0x02441453, 9)
c = d + bits.RotateLeft32((((d^a)&b)^a)+c+xf+0xd8a1e681, 14)
b = c + bits.RotateLeft32((((c^d)&a)^d)+b+x4+0xe7d3fbc8, 20)
a = b + bits.RotateLeft32((((b^c)&d)^c)+a+x9+0x21e1cde6, 5)
d = a + bits.RotateLeft32((((a^b)&c)^b)+d+xe+0xc33707d6, 9)
c = d + bits.RotateLeft32((((d^a)&b)^a)+c+x3+0xf4d50d87, 14)
b = c + bits.RotateLeft32((((c^d)&a)^d)+b+x8+0x455a14ed, 20)
a = b + bits.RotateLeft32((((b^c)&d)^c)+a+xd+0xa9e3e905, 5)
d = a + bits.RotateLeft32((((a^b)&c)^b)+d+x2+0xfcefa3f8, 9)
c = d + bits.RotateLeft32((((d^a)&b)^a)+c+x7+0x676f02d9, 14)
b = c + bits.RotateLeft32((((c^d)&a)^d)+b+xc+0x8d2a4c8a, 20)
// round 3
a = b + bits.RotateLeft32((b^c^d)+a+x5+0xfffa3942, 4)
d = a + bits.RotateLeft32((a^b^c)+d+x8+0x8771f681, 11)
c = d + bits.RotateLeft32((d^a^b)+c+xb+0x6d9d6122, 16)
b = c + bits.RotateLeft32((c^d^a)+b+xe+0xfde5380c, 23)
a = b + bits.RotateLeft32((b^c^d)+a+x1+0xa4beea44, 4)
d = a + bits.RotateLeft32((a^b^c)+d+x4+0x4bdecfa9, 11)
c = d + bits.RotateLeft32((d^a^b)+c+x7+0xf6bb4b60, 16)
b = c + bits.RotateLeft32((c^d^a)+b+xa+0xbebfbc70, 23)
a = b + bits.RotateLeft32((b^c^d)+a+xd+0x289b7ec6, 4)
d = a + bits.RotateLeft32((a^b^c)+d+x0+0xeaa127fa, 11)
c = d + bits.RotateLeft32((d^a^b)+c+x3+0xd4ef3085, 16)
b = c + bits.RotateLeft32((c^d^a)+b+x6+0x04881d05, 23)
a = b + bits.RotateLeft32((b^c^d)+a+x9+0xd9d4d039, 4)
d = a + bits.RotateLeft32((a^b^c)+d+xc+0xe6db99e5, 11)
c = d + bits.RotateLeft32((d^a^b)+c+xf+0x1fa27cf8, 16)
b = c + bits.RotateLeft32((c^d^a)+b+x2+0xc4ac5665, 23)
// round 4
a = b + bits.RotateLeft32((c^(b|^d))+a+x0+0xf4292244, 6)
d = a + bits.RotateLeft32((b^(a|^c))+d+x7+0x432aff97, 10)
c = d + bits.RotateLeft32((a^(d|^b))+c+xe+0xab9423a7, 15)
b = c + bits.RotateLeft32((d^(c|^a))+b+x5+0xfc93a039, 21)
a = b + bits.RotateLeft32((c^(b|^d))+a+xc+0x655b59c3, 6)
d = a + bits.RotateLeft32((b^(a|^c))+d+x3+0x8f0ccc92, 10)
c = d + bits.RotateLeft32((a^(d|^b))+c+xa+0xffeff47d, 15)
b = c + bits.RotateLeft32((d^(c|^a))+b+x1+0x85845dd1, 21)
a = b + bits.RotateLeft32((c^(b|^d))+a+x8+0x6fa87e4f, 6)
d = a + bits.RotateLeft32((b^(a|^c))+d+xf+0xfe2ce6e0, 10)
c = d + bits.RotateLeft32((a^(d|^b))+c+x6+0xa3014314, 15)
b = c + bits.RotateLeft32((d^(c|^a))+b+xd+0x4e0811a1, 21)
a = b + bits.RotateLeft32((c^(b|^d))+a+x4+0xf7537e82, 6)
d = a + bits.RotateLeft32((b^(a|^c))+d+xb+0xbd3af235, 10)
c = d + bits.RotateLeft32((a^(d|^b))+c+x2+0x2ad7d2bb, 15)
b = c + bits.RotateLeft32((d^(c|^a))+b+x9+0xeb86d391, 21)
// add saved state
a += aa
b += bb
c += cc
d += dd
}
// save state
dig.s[0], dig.s[1], dig.s[2], dig.s[3] = a, b, c, d
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package rand implements a cryptographically secure
// random number generator.
package rand
import (
"crypto/internal/boring"
"crypto/internal/fips140/drbg"
"crypto/internal/rand"
"io"
_ "unsafe"
// Ensure the go:linkname from testing/cryptotest to
// crypto/internal/rand.SetTestingReader works.
_ "crypto/internal/rand"
)
// Reader is a global, shared instance of a cryptographically
// secure random number generator. It is safe for concurrent use.
//
// - On Linux, FreeBSD, Dragonfly, and Solaris, Reader uses getrandom(2).
// - On legacy Linux (< 3.17), Reader opens /dev/urandom on first use.
// - On macOS, iOS, and OpenBSD Reader, uses arc4random_buf(3).
// - On NetBSD, Reader uses the kern.arandom sysctl.
// - On Windows, Reader uses the ProcessPrng API.
// - On js/wasm, Reader uses the Web Crypto API.
// - On wasip1/wasm, Reader uses random_get.
//
// In FIPS 140-3 mode, the output passes through an SP 800-90A Rev. 1
// Deterministric Random Bit Generator (DRBG).
var Reader io.Reader = rand.Reader
// fatal is [runtime.fatal], pushed via linkname.
//
//go:linkname fatal
func fatal(string)
// Read fills b with cryptographically secure random bytes. It never returns an
// error, and always fills b entirely.
//
// Read calls [io.ReadFull] on [Reader] and crashes the program irrecoverably if
// an error is returned. The default Reader uses operating system APIs that are
// documented to never return an error on all but legacy Linux systems.
func Read(b []byte) (n int, err error) {
// We don't want b to escape to the heap, but escape analysis can't see
// through a potentially overridden Reader, so we special-case the default
// case which we can keep non-escaping, and in the general case we read into
// a heap buffer and copy from it.
if rand.IsDefaultReader(Reader) {
if boring.Enabled {
_, err = io.ReadFull(boring.RandReader, b)
} else {
drbg.Read(b)
}
} else {
bb := make([]byte, len(b))
_, err = io.ReadFull(Reader, bb)
copy(b, bb)
}
if err != nil {
fatal("crypto/rand: failed to read random data (see https://go.dev/issue/66821): " + err.Error())
panic("unreachable") // To be sure.
}
return len(b), nil
}
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rand
const base32alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
// Text returns a cryptographically random string using the standard RFC 4648 base32 alphabet
// for use when a secret string, token, password, or other text is needed.
// The result contains at least 128 bits of randomness, enough to prevent brute force
// guessing attacks and to make the likelihood of collisions vanishingly small.
// A future version may return longer texts as needed to maintain those properties.
func Text() string {
// ⌈log₃₂ 2¹²⁸⌉ = 26 chars
src := make([]byte, 26)
Read(src)
for i := range src {
src[i] = base32alphabet[src[i]%32]
}
return string(src)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rand
import (
"crypto/internal/fips140only"
"crypto/internal/rand"
"errors"
"io"
"math/big"
)
// Prime returns a number of the given bit length that is prime with high probability.
// Prime will return error for any error returned by rand.Read or if bits < 2.
//
// Since Go 1.26, a secure source of random bytes is always used, and the Reader is
// ignored unless GODEBUG=cryptocustomrand=1 is set. This setting will be removed
// in a future Go release. Instead, use [testing/cryptotest.SetGlobalRandom].
func Prime(r io.Reader, bits int) (*big.Int, error) {
if fips140only.Enforced() {
return nil, errors.New("crypto/rand: use of Prime is not allowed in FIPS 140-only mode")
}
if bits < 2 {
return nil, errors.New("crypto/rand: prime size must be at least 2-bit")
}
r = rand.CustomReader(r)
b := uint(bits % 8)
if b == 0 {
b = 8
}
bytes := make([]byte, (bits+7)/8)
p := new(big.Int)
for {
if _, err := io.ReadFull(r, bytes); err != nil {
return nil, err
}
// Clear bits in the first byte to make sure the candidate has a size <= bits.
bytes[0] &= uint8(int(1<<b) - 1)
// Don't let the value be too small, i.e, set the most significant two bits.
// Setting the top two bits, rather than just the top bit,
// means that when two of these values are multiplied together,
// the result isn't ever one bit short.
if b >= 2 {
bytes[0] |= 3 << (b - 2)
} else {
// Here b==1, because b cannot be zero.
bytes[0] |= 1
if len(bytes) > 1 {
bytes[1] |= 0x80
}
}
// Make the value odd since an even number this large certainly isn't prime.
bytes[len(bytes)-1] |= 1
p.SetBytes(bytes)
if p.ProbablyPrime(20) {
return p, nil
}
}
}
// Int returns a uniform random value in [0, max). It panics if max <= 0, and
// returns an error if rand.Read returns one.
func Int(rand io.Reader, max *big.Int) (n *big.Int, err error) {
if max.Sign() <= 0 {
panic("crypto/rand: argument to Int is <= 0")
}
n = new(big.Int)
n.Sub(max, n.SetUint64(1))
// bitLen is the maximum bit length needed to encode a value < max.
bitLen := n.BitLen()
if bitLen == 0 {
// the only valid result is 0
return
}
// k is the maximum byte length needed to encode a value < max.
k := (bitLen + 7) / 8
// b is the number of bits in the most significant byte of max-1.
b := uint(bitLen % 8)
if b == 0 {
b = 8
}
bytes := make([]byte, k)
for {
_, err = io.ReadFull(rand, bytes)
if err != nil {
return nil, err
}
// Clear bits in the first byte to increase the probability
// that the candidate is < max.
bytes[0] &= uint8(int(1<<b) - 1)
n.SetBytes(bytes)
if n.Cmp(max) < 0 {
return
}
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package rc4 implements RC4 encryption, as defined in Bruce Schneier's
// Applied Cryptography.
//
// RC4 is cryptographically broken and should not be used for secure
// applications.
package rc4
import (
"crypto/internal/fips140/alias"
"crypto/internal/fips140only"
"errors"
"strconv"
)
// A Cipher is an instance of RC4 using a particular key.
type Cipher struct {
s [256]uint32
i, j uint8
}
type KeySizeError int
func (k KeySizeError) Error() string {
return "crypto/rc4: invalid key size " + strconv.Itoa(int(k))
}
// NewCipher creates and returns a new [Cipher]. The key argument should be the
// RC4 key, at least 1 byte and at most 256 bytes.
func NewCipher(key []byte) (*Cipher, error) {
if fips140only.Enforced() {
return nil, errors.New("crypto/rc4: use of RC4 is not allowed in FIPS 140-only mode")
}
k := len(key)
if k < 1 || k > 256 {
return nil, KeySizeError(k)
}
var c Cipher
for i := 0; i < 256; i++ {
c.s[i] = uint32(i)
}
var j uint8 = 0
for i := 0; i < 256; i++ {
j += uint8(c.s[i]) + key[i%k]
c.s[i], c.s[j] = c.s[j], c.s[i]
}
return &c, nil
}
// Reset zeros the key data and makes the [Cipher] unusable.
//
// Deprecated: Reset can't guarantee that the key will be entirely removed from
// the process's memory.
func (c *Cipher) Reset() {
clear(c.s[:])
c.i, c.j = 0, 0
}
// XORKeyStream sets dst to the result of XORing src with the key stream.
// Dst and src must overlap entirely or not at all.
func (c *Cipher) XORKeyStream(dst, src []byte) {
if len(src) == 0 {
return
}
if alias.InexactOverlap(dst[:len(src)], src) {
panic("crypto/rc4: invalid buffer overlap")
}
i, j := c.i, c.j
_ = dst[len(src)-1]
dst = dst[:len(src)] // eliminate bounds check from loop
for k, v := range src {
i += 1
x := c.s[i]
j += uint8(x)
y := c.s[j]
c.s[i], c.s[j] = y, x
dst[k] = v ^ uint8(c.s[uint8(x+y)])
}
c.i, c.j = i, j
}
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rsa
import (
"crypto"
"crypto/internal/boring"
"crypto/internal/fips140/rsa"
"crypto/internal/fips140hash"
"crypto/internal/fips140only"
"crypto/internal/rand"
"errors"
"hash"
"io"
)
const (
// PSSSaltLengthAuto causes the salt in a PSS signature to be as large
// as possible when signing, and to be auto-detected when verifying.
//
// When signing in FIPS 140-3 mode, the salt length is capped at the length
// of the hash function used in the signature.
PSSSaltLengthAuto = 0
// PSSSaltLengthEqualsHash causes the salt length to equal the length
// of the hash used in the signature.
PSSSaltLengthEqualsHash = -1
)
// PSSOptions contains options for creating and verifying PSS signatures.
type PSSOptions struct {
// SaltLength controls the length of the salt used in the PSS signature. It
// can either be a positive number of bytes, or one of the special
// PSSSaltLength constants.
SaltLength int
// Hash is the hash function used to generate the message digest. If not
// zero, it overrides the hash function passed to SignPSS. It's required
// when using PrivateKey.Sign.
Hash crypto.Hash
}
// HashFunc returns opts.Hash so that [PSSOptions] implements [crypto.SignerOpts].
func (opts *PSSOptions) HashFunc() crypto.Hash {
return opts.Hash
}
func (opts *PSSOptions) saltLength() int {
if opts == nil {
return PSSSaltLengthAuto
}
return opts.SaltLength
}
// SignPSS calculates the signature of digest using PSS.
//
// digest must be the result of hashing the input message using the given hash
// function. The opts argument may be nil, in which case sensible defaults are
// used. If opts.Hash is set, it overrides hash.
//
// The signature is randomized depending on the message, key, and salt size,
// using bytes from random. Most applications should use [crypto/rand.Reader] as
// random.
func SignPSS(random io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error) {
if err := checkPublicKeySize(&priv.PublicKey); err != nil {
return nil, err
}
if opts != nil && opts.Hash != 0 {
hash = opts.Hash
}
if boring.Enabled && rand.IsDefaultReader(random) {
bkey, err := boringPrivateKey(priv)
if err != nil {
return nil, err
}
return boring.SignRSAPSS(bkey, hash, digest, opts.saltLength())
}
boring.UnreachableExceptTests()
h := fips140hash.Unwrap(hash.New())
if err := checkFIPS140OnlyPrivateKey(priv); err != nil {
return nil, err
}
if fips140only.Enforced() && !fips140only.ApprovedHash(h) {
return nil, errors.New("crypto/rsa: use of hash functions other than SHA-2 or SHA-3 is not allowed in FIPS 140-only mode")
}
if fips140only.Enforced() && !fips140only.ApprovedRandomReader(random) {
return nil, errors.New("crypto/rsa: only crypto/rand.Reader is allowed in FIPS 140-only mode")
}
k, err := fipsPrivateKey(priv)
if err != nil {
return nil, err
}
saltLength := opts.saltLength()
if fips140only.Enforced() && saltLength > h.Size() {
return nil, errors.New("crypto/rsa: use of PSS salt longer than the hash is not allowed in FIPS 140-only mode")
}
switch saltLength {
case PSSSaltLengthAuto:
saltLength, err = rsa.PSSMaxSaltLength(k.PublicKey(), h)
if err != nil {
return nil, fipsError(err)
}
case PSSSaltLengthEqualsHash:
saltLength = h.Size()
default:
// If we get here saltLength is either > 0 or < -1, in the
// latter case we fail out.
if saltLength <= 0 {
return nil, errors.New("crypto/rsa: invalid PSS salt length")
}
}
return fipsError2(rsa.SignPSS(random, k, h, digest, saltLength))
}
// VerifyPSS verifies a PSS signature.
//
// A valid signature is indicated by returning a nil error. digest must be the
// result of hashing the input message using the given hash function. The opts
// argument may be nil, in which case sensible defaults are used. opts.Hash is
// ignored.
//
// The inputs are not considered confidential, and may leak through timing side
// channels, or if an attacker has control of part of the inputs.
func VerifyPSS(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts *PSSOptions) error {
if err := checkPublicKeySize(pub); err != nil {
return err
}
if boring.Enabled {
bkey, err := boringPublicKey(pub)
if err != nil {
return err
}
if err := boring.VerifyRSAPSS(bkey, hash, digest, sig, opts.saltLength()); err != nil {
return ErrVerification
}
return nil
}
h := fips140hash.Unwrap(hash.New())
if err := checkFIPS140OnlyPublicKey(pub); err != nil {
return err
}
if fips140only.Enforced() && !fips140only.ApprovedHash(h) {
return errors.New("crypto/rsa: use of hash functions other than SHA-2 or SHA-3 is not allowed in FIPS 140-only mode")
}
k, err := fipsPublicKey(pub)
if err != nil {
return err
}
saltLength := opts.saltLength()
if fips140only.Enforced() && saltLength > h.Size() {
return errors.New("crypto/rsa: use of PSS salt longer than the hash is not allowed in FIPS 140-only mode")
}
switch saltLength {
case PSSSaltLengthAuto:
return fipsError(rsa.VerifyPSS(k, h, digest, sig))
case PSSSaltLengthEqualsHash:
return fipsError(rsa.VerifyPSSWithSaltLength(k, h, digest, sig, h.Size()))
default:
return fipsError(rsa.VerifyPSSWithSaltLength(k, h, digest, sig, saltLength))
}
}
// EncryptOAEP encrypts the given message with RSA-OAEP.
//
// OAEP is parameterised by a hash function that is used as a random oracle.
// Encryption and decryption of a given message must use the same hash function
// and sha256.New() is a reasonable choice.
//
// The random parameter is used as a source of entropy to ensure that
// encrypting the same message twice doesn't result in the same ciphertext.
// Most applications should use [crypto/rand.Reader] as random.
//
// The label parameter may contain arbitrary data that will not be encrypted,
// but which gives important context to the message. For example, if a given
// public key is used to encrypt two types of messages then distinct label
// values could be used to ensure that a ciphertext for one purpose cannot be
// used for another by an attacker. If not required it can be empty.
//
// The message must be no longer than the length of the public modulus minus
// twice the hash length, minus a further 2.
func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error) {
return encryptOAEP(hash, hash, random, pub, msg, label)
}
// EncryptOAEPWithOptions encrypts the given message with RSA-OAEP using the
// provided options.
//
// This function should only be used over [EncryptOAEP] when there is a need to
// specify the OAEP and MGF1 hashes separately.
//
// See [EncryptOAEP] for additional details.
func EncryptOAEPWithOptions(random io.Reader, pub *PublicKey, msg []byte, opts *OAEPOptions) ([]byte, error) {
if opts.MGFHash == 0 {
return encryptOAEP(opts.Hash.New(), opts.Hash.New(), random, pub, msg, opts.Label)
}
return encryptOAEP(opts.Hash.New(), opts.MGFHash.New(), random, pub, msg, opts.Label)
}
func encryptOAEP(hash hash.Hash, mgfHash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error) {
if err := checkPublicKeySize(pub); err != nil {
return nil, err
}
defer hash.Reset()
defer mgfHash.Reset()
if boring.Enabled && rand.IsDefaultReader(random) {
k := pub.Size()
if len(msg) > k-2*hash.Size()-2 {
return nil, ErrMessageTooLong
}
bkey, err := boringPublicKey(pub)
if err != nil {
return nil, err
}
return boring.EncryptRSAOAEP(hash, mgfHash, bkey, msg, label)
}
boring.UnreachableExceptTests()
hash = fips140hash.Unwrap(hash)
if err := checkFIPS140OnlyPublicKey(pub); err != nil {
return nil, err
}
if fips140only.Enforced() && !fips140only.ApprovedHash(hash) {
return nil, errors.New("crypto/rsa: use of hash functions other than SHA-2 or SHA-3 is not allowed in FIPS 140-only mode")
}
if fips140only.Enforced() && !fips140only.ApprovedRandomReader(random) {
return nil, errors.New("crypto/rsa: only crypto/rand.Reader is allowed in FIPS 140-only mode")
}
k, err := fipsPublicKey(pub)
if err != nil {
return nil, err
}
return fipsError2(rsa.EncryptOAEP(hash, mgfHash, random, k, msg, label))
}
// DecryptOAEP decrypts ciphertext using RSA-OAEP.
//
// OAEP is parameterised by a hash function that is used as a random oracle.
// Encryption and decryption of a given message must use the same hash function
// and sha256.New() is a reasonable choice.
//
// The random parameter is legacy and ignored, and it can be nil.
//
// The label parameter must match the value given when encrypting. See
// [EncryptOAEP] for details.
func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) ([]byte, error) {
defer hash.Reset()
return decryptOAEP(hash, hash, priv, ciphertext, label)
}
func decryptOAEP(hash, mgfHash hash.Hash, priv *PrivateKey, ciphertext []byte, label []byte) ([]byte, error) {
if err := checkPublicKeySize(&priv.PublicKey); err != nil {
return nil, err
}
if boring.Enabled {
k := priv.Size()
if len(ciphertext) > k ||
k < hash.Size()*2+2 {
return nil, ErrDecryption
}
bkey, err := boringPrivateKey(priv)
if err != nil {
return nil, err
}
out, err := boring.DecryptRSAOAEP(hash, mgfHash, bkey, ciphertext, label)
if err != nil {
return nil, ErrDecryption
}
return out, nil
}
hash = fips140hash.Unwrap(hash)
mgfHash = fips140hash.Unwrap(mgfHash)
if err := checkFIPS140OnlyPrivateKey(priv); err != nil {
return nil, err
}
if fips140only.Enforced() {
if !fips140only.ApprovedHash(hash) || !fips140only.ApprovedHash(mgfHash) {
return nil, errors.New("crypto/rsa: use of hash functions other than SHA-2 or SHA-3 is not allowed in FIPS 140-only mode")
}
}
k, err := fipsPrivateKey(priv)
if err != nil {
return nil, err
}
return fipsError2(rsa.DecryptOAEP(hash, mgfHash, k, ciphertext, label))
}
// SignPKCS1v15 calculates the signature of hashed using
// RSASSA-PKCS1-V1_5-SIGN from RSA PKCS #1 v1.5. Note that hashed must
// be the result of hashing the input message using the given hash
// function. If hash is zero, hashed is signed directly. This isn't
// advisable except for interoperability.
//
// The random parameter is legacy and ignored, and it can be nil.
//
// This function is deterministic. Thus, if the set of possible
// messages is small, an attacker may be able to build a map from
// messages to signatures and identify the signed messages. As ever,
// signatures provide authenticity, not confidentiality.
func SignPKCS1v15(random io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) {
var hashName string
if hash != crypto.Hash(0) {
if len(hashed) != hash.Size() {
return nil, errors.New("crypto/rsa: input must be hashed message")
}
hashName = hash.String()
}
if err := checkPublicKeySize(&priv.PublicKey); err != nil {
return nil, err
}
if boring.Enabled {
bkey, err := boringPrivateKey(priv)
if err != nil {
return nil, err
}
return boring.SignRSAPKCS1v15(bkey, hash, hashed)
}
if err := checkFIPS140OnlyPrivateKey(priv); err != nil {
return nil, err
}
if fips140only.Enforced() && !fips140only.ApprovedHash(fips140hash.Unwrap(hash.New())) {
return nil, errors.New("crypto/rsa: use of hash functions other than SHA-2 or SHA-3 is not allowed in FIPS 140-only mode")
}
k, err := fipsPrivateKey(priv)
if err != nil {
return nil, err
}
return fipsError2(rsa.SignPKCS1v15(k, hashName, hashed))
}
// VerifyPKCS1v15 verifies an RSA PKCS #1 v1.5 signature.
// hashed is the result of hashing the input message using the given hash
// function and sig is the signature. A valid signature is indicated by
// returning a nil error. If hash is zero then hashed is used directly. This
// isn't advisable except for interoperability.
//
// The inputs are not considered confidential, and may leak through timing side
// channels, or if an attacker has control of part of the inputs.
func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) error {
var hashName string
if hash != crypto.Hash(0) {
if len(hashed) != hash.Size() {
return errors.New("crypto/rsa: input must be hashed message")
}
hashName = hash.String()
}
if err := checkPublicKeySize(pub); err != nil {
return err
}
if boring.Enabled {
bkey, err := boringPublicKey(pub)
if err != nil {
return err
}
if err := boring.VerifyRSAPKCS1v15(bkey, hash, hashed, sig); err != nil {
return ErrVerification
}
return nil
}
if err := checkFIPS140OnlyPublicKey(pub); err != nil {
return err
}
if fips140only.Enforced() && !fips140only.ApprovedHash(fips140hash.Unwrap(hash.New())) {
return errors.New("crypto/rsa: use of hash functions other than SHA-2 or SHA-3 is not allowed in FIPS 140-only mode")
}
k, err := fipsPublicKey(pub)
if err != nil {
return err
}
return fipsError(rsa.VerifyPKCS1v15(k, hashName, hashed, sig))
}
func fipsError(err error) error {
switch err {
case rsa.ErrDecryption:
return ErrDecryption
case rsa.ErrVerification:
return ErrVerification
case rsa.ErrMessageTooLong:
return ErrMessageTooLong
}
return err
}
func fipsError2[T any](x T, err error) (T, error) {
return x, fipsError(err)
}
func checkFIPS140OnlyPublicKey(pub *PublicKey) error {
if !fips140only.Enforced() {
return nil
}
if pub.N == nil {
return errors.New("crypto/rsa: public key missing N")
}
if pub.N.BitLen() < 2048 {
return errors.New("crypto/rsa: use of keys smaller than 2048 bits is not allowed in FIPS 140-only mode")
}
if pub.N.BitLen()%2 == 1 {
return errors.New("crypto/rsa: use of keys with odd size is not allowed in FIPS 140-only mode")
}
if pub.E <= 1<<16 {
return errors.New("crypto/rsa: use of public exponent <= 2¹⁶ is not allowed in FIPS 140-only mode")
}
if pub.E&1 == 0 {
return errors.New("crypto/rsa: use of even public exponent is not allowed in FIPS 140-only mode")
}
return nil
}
func checkFIPS140OnlyPrivateKey(priv *PrivateKey) error {
if !fips140only.Enforced() {
return nil
}
if err := checkFIPS140OnlyPublicKey(&priv.PublicKey); err != nil {
return err
}
if len(priv.Primes) != 2 {
return errors.New("crypto/rsa: use of multi-prime keys is not allowed in FIPS 140-only mode")
}
if priv.Primes[0] == nil || priv.Primes[1] == nil || priv.Primes[0].BitLen() != priv.Primes[1].BitLen() {
return errors.New("crypto/rsa: use of primes of different sizes is not allowed in FIPS 140-only mode")
}
return nil
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !boringcrypto
package rsa
import "crypto/internal/boring"
func boringPublicKey(*PublicKey) (*boring.PublicKeyRSA, error) {
panic("boringcrypto: not available")
}
func boringPrivateKey(*PrivateKey) (*boring.PrivateKeyRSA, error) {
panic("boringcrypto: not available")
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rsa
import (
"crypto/internal/boring"
"crypto/internal/fips140/rsa"
"crypto/internal/fips140only"
"crypto/internal/rand"
"crypto/subtle"
"errors"
"io"
)
// This file implements encryption and decryption using PKCS #1 v1.5 padding.
// PKCS1v15DecryptOptions is for passing options to PKCS #1 v1.5 decryption using
// the [crypto.Decrypter] interface.
//
// Deprecated: PKCS #1 v1.5 encryption is dangerous and should not be used.
// See [draft-irtf-cfrg-rsa-guidance-05] for more information. Use
// [EncryptOAEP] and [DecryptOAEP] instead.
//
// [draft-irtf-cfrg-rsa-guidance-05]: https://www.ietf.org/archive/id/draft-irtf-cfrg-rsa-guidance-05.html#name-rationale
type PKCS1v15DecryptOptions struct {
// SessionKeyLen is the length of the session key that is being
// decrypted. If not zero, then a padding error during decryption will
// cause a random plaintext of this length to be returned rather than
// an error. These alternatives happen in constant time.
SessionKeyLen int
}
// EncryptPKCS1v15 encrypts the given message with RSA and the padding
// scheme from PKCS #1 v1.5. The message must be no longer than the
// length of the public modulus minus 11 bytes.
//
// The random parameter is used as a source of entropy to ensure that encrypting
// the same message twice doesn't result in the same ciphertext. Since Go 1.26,
// a secure source of random bytes is always used, and the Reader is ignored
// unless GODEBUG=cryptocustomrand=1 is set. This setting will be removed in a
// future Go release. Instead, use [testing/cryptotest.SetGlobalRandom].
//
// Deprecated: PKCS #1 v1.5 encryption is dangerous and should not be used.
// See [draft-irtf-cfrg-rsa-guidance-05] for more information. Use
// [EncryptOAEP] and [DecryptOAEP] instead.
//
// [draft-irtf-cfrg-rsa-guidance-05]: https://www.ietf.org/archive/id/draft-irtf-cfrg-rsa-guidance-05.html#name-rationale
func EncryptPKCS1v15(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error) {
if fips140only.Enforced() {
return nil, errors.New("crypto/rsa: use of PKCS#1 v1.5 encryption is not allowed in FIPS 140-only mode")
}
if err := checkPublicKeySize(pub); err != nil {
return nil, err
}
k := pub.Size()
if len(msg) > k-11 {
return nil, ErrMessageTooLong
}
if boring.Enabled && rand.IsDefaultReader(random) {
bkey, err := boringPublicKey(pub)
if err != nil {
return nil, err
}
return boring.EncryptRSAPKCS1(bkey, msg)
}
boring.UnreachableExceptTests()
random = rand.CustomReader(random)
// EM = 0x00 || 0x02 || PS || 0x00 || M
em := make([]byte, k)
em[1] = 2
ps, mm := em[2:len(em)-len(msg)-1], em[len(em)-len(msg):]
err := nonZeroRandomBytes(ps, random)
if err != nil {
return nil, err
}
em[len(em)-len(msg)-1] = 0
copy(mm, msg)
if boring.Enabled {
var bkey *boring.PublicKeyRSA
bkey, err = boringPublicKey(pub)
if err != nil {
return nil, err
}
return boring.EncryptRSANoPadding(bkey, em)
}
fk, err := fipsPublicKey(pub)
if err != nil {
return nil, err
}
return rsa.Encrypt(fk, em)
}
// DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from
// PKCS #1 v1.5. The random parameter is legacy and ignored, and it can be nil.
//
// Deprecated: PKCS #1 v1.5 encryption is dangerous and should not be used.
// Whether this function returns an error or not discloses secret information.
// If an attacker can cause this function to run repeatedly and learn whether
// each instance returned an error then they can decrypt and forge signatures as
// if they had the private key. See [draft-irtf-cfrg-rsa-guidance-05] for more
// information. Use [EncryptOAEP] and [DecryptOAEP] instead.
//
// [draft-irtf-cfrg-rsa-guidance-05]: https://www.ietf.org/archive/id/draft-irtf-cfrg-rsa-guidance-05.html#name-rationale
func DecryptPKCS1v15(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error) {
if err := checkPublicKeySize(&priv.PublicKey); err != nil {
return nil, err
}
if boring.Enabled {
bkey, err := boringPrivateKey(priv)
if err != nil {
return nil, err
}
out, err := boring.DecryptRSAPKCS1(bkey, ciphertext)
if err != nil {
return nil, ErrDecryption
}
return out, nil
}
valid, out, index, err := decryptPKCS1v15(priv, ciphertext)
if err != nil {
return nil, err
}
if valid == 0 {
return nil, ErrDecryption
}
return out[index:], nil
}
// DecryptPKCS1v15SessionKey decrypts a session key using RSA and the padding
// scheme from PKCS #1 v1.5. The random parameter is legacy and ignored, and it
// can be nil.
//
// DecryptPKCS1v15SessionKey returns an error if the ciphertext is the wrong
// length or if the ciphertext is greater than the public modulus. Otherwise, no
// error is returned. If the padding is valid, the resulting plaintext message
// is copied into key. Otherwise, key is unchanged. These alternatives occur in
// constant time. It is intended that the user of this function generate a
// random session key beforehand and continue the protocol with the resulting
// value.
//
// Note that if the session key is too small then it may be possible for an
// attacker to brute-force it. If they can do that then they can learn whether a
// random value was used (because it'll be different for the same ciphertext)
// and thus whether the padding was correct. This also defeats the point of this
// function. Using at least a 16-byte key will protect against this attack.
//
// This method implements protections against Bleichenbacher chosen ciphertext
// attacks [0] described in RFC 3218 Section 2.3.2 [1]. While these protections
// make a Bleichenbacher attack significantly more difficult, the protections
// are only effective if the rest of the protocol which uses
// DecryptPKCS1v15SessionKey is designed with these considerations in mind. In
// particular, if any subsequent operations which use the decrypted session key
// leak any information about the key (e.g. whether it is a static or random
// key) then the mitigations are defeated. This method must be used extremely
// carefully, and typically should only be used when absolutely necessary for
// compatibility with an existing protocol (such as TLS) that is designed with
// these properties in mind.
//
// - [0] “Chosen Ciphertext Attacks Against Protocols Based on the RSA Encryption
// Standard PKCS #1”, Daniel Bleichenbacher, Advances in Cryptology (Crypto '98)
// - [1] RFC 3218, Preventing the Million Message Attack on CMS,
// https://www.rfc-editor.org/rfc/rfc3218.html
//
// Deprecated: PKCS #1 v1.5 encryption is dangerous and should not be used. The
// protections implemented by this function are limited and fragile, as
// explained above. See [draft-irtf-cfrg-rsa-guidance-05] for more information.
// Use [EncryptOAEP] and [DecryptOAEP] instead.
//
// [draft-irtf-cfrg-rsa-guidance-05]: https://www.ietf.org/archive/id/draft-irtf-cfrg-rsa-guidance-05.html#name-rationale
func DecryptPKCS1v15SessionKey(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error {
if err := checkPublicKeySize(&priv.PublicKey); err != nil {
return err
}
k := priv.Size()
if k-(len(key)+3+8) < 0 {
return ErrDecryption
}
valid, em, index, err := decryptPKCS1v15(priv, ciphertext)
if err != nil {
return err
}
if len(em) != k {
// This should be impossible because decryptPKCS1v15 always
// returns the full slice.
return ErrDecryption
}
valid &= subtle.ConstantTimeEq(int32(len(em)-index), int32(len(key)))
subtle.ConstantTimeCopy(valid, key, em[len(em)-len(key):])
return nil
}
// decryptPKCS1v15 decrypts ciphertext using priv. It returns one or zero in
// valid that indicates whether the plaintext was correctly structured.
// In either case, the plaintext is returned in em so that it may be read
// independently of whether it was valid in order to maintain constant memory
// access patterns. If the plaintext was valid then index contains the index of
// the original message in em, to allow constant time padding removal.
func decryptPKCS1v15(priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
if fips140only.Enforced() {
return 0, nil, 0, errors.New("crypto/rsa: use of PKCS#1 v1.5 encryption is not allowed in FIPS 140-only mode")
}
k := priv.Size()
if k < 11 {
err = ErrDecryption
return 0, nil, 0, err
}
if boring.Enabled {
var bkey *boring.PrivateKeyRSA
bkey, err = boringPrivateKey(priv)
if err != nil {
return 0, nil, 0, err
}
em, err = boring.DecryptRSANoPadding(bkey, ciphertext)
if err != nil {
return 0, nil, 0, ErrDecryption
}
} else {
fk, err := fipsPrivateKey(priv)
if err != nil {
return 0, nil, 0, err
}
em, err = rsa.DecryptWithoutCheck(fk, ciphertext)
if err != nil {
return 0, nil, 0, ErrDecryption
}
}
firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
secondByteIsTwo := subtle.ConstantTimeByteEq(em[1], 2)
// The remainder of the plaintext must be a string of non-zero random
// octets, followed by a 0, followed by the message.
// lookingForIndex: 1 iff we are still looking for the zero.
// index: the offset of the first zero byte.
lookingForIndex := 1
for i := 2; i < len(em); i++ {
equals0 := subtle.ConstantTimeByteEq(em[i], 0)
index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
}
// The PS padding must be at least 8 bytes long, and it starts two
// bytes into em.
validPS := subtle.ConstantTimeLessOrEq(2+8, index)
valid = firstByteIsZero & secondByteIsTwo & (^lookingForIndex & 1) & validPS
index = subtle.ConstantTimeSelect(valid, index+1, 0)
return valid, em, index, nil
}
// nonZeroRandomBytes fills the given slice with non-zero random octets.
func nonZeroRandomBytes(s []byte, random io.Reader) (err error) {
_, err = io.ReadFull(random, s)
if err != nil {
return
}
for i := 0; i < len(s); i++ {
for s[i] == 0 {
_, err = io.ReadFull(random, s[i:i+1])
if err != nil {
return
}
// In tests, the PRNG may return all zeros so we do
// this to break the loop.
s[i] ^= 0x42
}
}
return
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package rsa implements RSA encryption as specified in PKCS #1 and RFC 8017.
//
// RSA is a single, fundamental operation that is used in this package to
// implement either public-key encryption or public-key signatures.
//
// The original specification for encryption and signatures with RSA is PKCS #1
// and the terms "RSA encryption" and "RSA signatures" by default refer to
// PKCS #1 version 1.5. However, that specification has flaws and new designs
// should use version 2, usually called by just OAEP and PSS, where
// possible.
//
// Two sets of interfaces are included in this package. When a more abstract
// interface isn't necessary, there are functions for encrypting/decrypting
// with v1.5/OAEP and signing/verifying with v1.5/PSS. If one needs to abstract
// over the public key primitive, the PrivateKey type implements the
// Decrypter and Signer interfaces from the crypto package.
//
// Operations involving private keys are implemented using constant-time
// algorithms, except for [GenerateKey] and for some operations involving
// deprecated multi-prime keys.
//
// # Minimum key size
//
// [GenerateKey] returns an error if a key of less than 1024 bits is requested,
// and all Sign, Verify, Encrypt, and Decrypt methods return an error if used
// with a key smaller than 1024 bits. Such keys are insecure and should not be
// used.
//
// The rsa1024min=0 GODEBUG setting suppresses this error, but we recommend
// doing so only in tests, if necessary. Tests can set this option using
// [testing.T.Setenv] or by including "//go:debug rsa1024min=0" in a *_test.go
// source file.
//
// Alternatively, see the [GenerateKey (TestKey)] example for a pregenerated
// test-only 2048-bit key.
//
// [GenerateKey (TestKey)]: https://pkg.go.dev/crypto/rsa#example-GenerateKey-TestKey
package rsa
import (
"crypto"
"crypto/internal/boring"
"crypto/internal/boring/bbig"
"crypto/internal/fips140/bigmod"
"crypto/internal/fips140/rsa"
"crypto/internal/fips140only"
"crypto/internal/rand"
cryptorand "crypto/rand"
"crypto/subtle"
"errors"
"fmt"
"internal/godebug"
"io"
"math"
"math/big"
)
var bigOne = big.NewInt(1)
// A PublicKey represents the public part of an RSA key.
//
// The values of N and E are not considered confidential, and may leak through
// side channels, or could be mathematically derived from other public values.
type PublicKey struct {
N *big.Int // modulus
E int // public exponent
}
// Any methods implemented on PublicKey might need to also be implemented on
// PrivateKey, as the latter embeds the former and will expose its methods.
// Size returns the modulus size in bytes. Raw signatures and ciphertexts
// for or by this public key will have the same size.
func (pub *PublicKey) Size() int {
return (pub.N.BitLen() + 7) / 8
}
// Equal reports whether pub and x have the same value.
func (pub *PublicKey) Equal(x crypto.PublicKey) bool {
xx, ok := x.(*PublicKey)
if !ok {
return false
}
return bigIntEqual(pub.N, xx.N) && pub.E == xx.E
}
// OAEPOptions allows passing options to OAEP encryption and decryption
// through the [PrivateKey.Decrypt] and [EncryptOAEPWithOptions] functions.
type OAEPOptions struct {
// Hash is the hash function that will be used when generating the mask.
Hash crypto.Hash
// MGFHash is the hash function used for MGF1.
// If zero, Hash is used instead.
MGFHash crypto.Hash
// Label is an arbitrary byte string that must be equal to the value
// used when encrypting.
Label []byte
}
// A PrivateKey represents an RSA key.
//
// Its fields must not be modified after calling [PrivateKey.Precompute], and
// should not be used directly as big.Int values for cryptographic purposes.
type PrivateKey struct {
PublicKey // public part.
D *big.Int // private exponent
Primes []*big.Int // prime factors of N, has >= 2 elements.
// Precomputed contains precomputed values that speed up RSA operations,
// if available. It must be generated by calling PrivateKey.Precompute and
// must not be modified afterwards.
Precomputed PrecomputedValues
}
// Public returns the public key corresponding to priv.
func (priv *PrivateKey) Public() crypto.PublicKey {
return &priv.PublicKey
}
// Equal reports whether priv and x have equivalent values. It ignores
// Precomputed values.
func (priv *PrivateKey) Equal(x crypto.PrivateKey) bool {
xx, ok := x.(*PrivateKey)
if !ok {
return false
}
if !priv.PublicKey.Equal(&xx.PublicKey) || !bigIntEqual(priv.D, xx.D) {
return false
}
if len(priv.Primes) != len(xx.Primes) {
return false
}
for i := range priv.Primes {
if !bigIntEqual(priv.Primes[i], xx.Primes[i]) {
return false
}
}
return true
}
// bigIntEqual reports whether a and b are equal leaking only their bit length
// through timing side-channels.
func bigIntEqual(a, b *big.Int) bool {
return subtle.ConstantTimeCompare(a.Bytes(), b.Bytes()) == 1
}
// Sign signs digest with priv, reading randomness from rand. If opts is a
// *[PSSOptions] then the PSS algorithm will be used, otherwise PKCS #1 v1.5 will
// be used. digest must be the result of hashing the input message using
// opts.HashFunc().
//
// This method implements [crypto.Signer], which is an interface to support keys
// where the private part is kept in, for example, a hardware module. Common
// uses should use the Sign* functions in this package directly.
func (priv *PrivateKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
if pssOpts, ok := opts.(*PSSOptions); ok {
return SignPSS(rand, priv, pssOpts.Hash, digest, pssOpts)
}
return SignPKCS1v15(rand, priv, opts.HashFunc(), digest)
}
// Decrypt decrypts ciphertext with priv. If opts is nil or of type
// *[PKCS1v15DecryptOptions] then PKCS #1 v1.5 decryption is performed. Otherwise
// opts must have type *[OAEPOptions] and OAEP decryption is done.
func (priv *PrivateKey) Decrypt(rand io.Reader, ciphertext []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) {
if opts == nil {
return DecryptPKCS1v15(rand, priv, ciphertext)
}
switch opts := opts.(type) {
case *OAEPOptions:
if opts.MGFHash == 0 {
return decryptOAEP(opts.Hash.New(), opts.Hash.New(), priv, ciphertext, opts.Label)
} else {
return decryptOAEP(opts.Hash.New(), opts.MGFHash.New(), priv, ciphertext, opts.Label)
}
case *PKCS1v15DecryptOptions:
if l := opts.SessionKeyLen; l > 0 {
plaintext = make([]byte, l)
if _, err := io.ReadFull(rand, plaintext); err != nil {
return nil, err
}
if err := DecryptPKCS1v15SessionKey(rand, priv, ciphertext, plaintext); err != nil {
return nil, err
}
return plaintext, nil
} else {
return DecryptPKCS1v15(rand, priv, ciphertext)
}
default:
return nil, errors.New("crypto/rsa: invalid options for Decrypt")
}
}
type PrecomputedValues struct {
Dp, Dq *big.Int // D mod (P-1) (or mod Q-1)
Qinv *big.Int // Q^-1 mod P
// CRTValues is used for the 3rd and subsequent primes. Due to a
// historical accident, the CRT for the first two primes is handled
// differently in PKCS #1 and interoperability is sufficiently
// important that we mirror this.
//
// Deprecated: These values are still filled in by Precompute for
// backwards compatibility but are not used. Multi-prime RSA is very rare,
// and is implemented by this package without CRT optimizations to limit
// complexity.
CRTValues []CRTValue
fips *rsa.PrivateKey
}
// CRTValue contains the precomputed Chinese remainder theorem values.
type CRTValue struct {
Exp *big.Int // D mod (prime-1).
Coeff *big.Int // R·Coeff ≡ 1 mod Prime.
R *big.Int // product of primes prior to this (inc p and q).
}
// Validate performs basic sanity checks on the key.
// It returns nil if the key is valid, or else an error describing a problem.
//
// It runs faster on valid keys if run after [PrivateKey.Precompute].
func (priv *PrivateKey) Validate() error {
// We can operate on keys based on d alone, but they can't be encoded with
// [crypto/x509.MarshalPKCS1PrivateKey], which unfortunately doesn't return
// an error, so we need to reject them here.
if len(priv.Primes) < 2 {
return errors.New("crypto/rsa: missing primes")
}
// If Precomputed.fips is set and consistent, then the key has been
// validated by [rsa.NewPrivateKey] or [rsa.NewPrivateKeyWithoutCRT].
if priv.precomputedIsConsistent() {
return nil
}
if priv.Precomputed.fips != nil {
return errors.New("crypto/rsa: precomputed values are inconsistent with the key")
}
_, err := priv.precompute()
return err
}
func (priv *PrivateKey) precomputedIsConsistent() bool {
if priv.Precomputed.fips == nil {
return false
}
N, e, d, P, Q, dP, dQ, qInv := priv.Precomputed.fips.Export()
if !bigIntEqualToBytes(priv.N, N) || priv.E != e || !bigIntEqualToBytes(priv.D, d) {
return false
}
if len(priv.Primes) != 2 {
return P == nil && Q == nil && dP == nil && dQ == nil && qInv == nil
}
return bigIntEqualToBytes(priv.Primes[0], P) &&
bigIntEqualToBytes(priv.Primes[1], Q) &&
bigIntEqualToBytes(priv.Precomputed.Dp, dP) &&
bigIntEqualToBytes(priv.Precomputed.Dq, dQ) &&
bigIntEqualToBytes(priv.Precomputed.Qinv, qInv)
}
// bigIntEqual reports whether a and b are equal, ignoring leading zero bytes in
// b, and leaking only their bit length through timing side-channels.
func bigIntEqualToBytes(a *big.Int, b []byte) bool {
if a == nil || a.BitLen() > len(b)*8 {
return false
}
buf := a.FillBytes(make([]byte, len(b)))
return subtle.ConstantTimeCompare(buf, b) == 1
}
// rsa1024min is a GODEBUG that re-enables weak RSA keys if set to "0".
// See https://go.dev/issue/68762.
var rsa1024min = godebug.New("rsa1024min")
func checkKeySize(size int) error {
if size >= 1024 {
return nil
}
if rsa1024min.Value() == "0" {
rsa1024min.IncNonDefault()
return nil
}
return fmt.Errorf("crypto/rsa: %d-bit keys are insecure (see https://go.dev/pkg/crypto/rsa#hdr-Minimum_key_size)", size)
}
func checkPublicKeySize(k *PublicKey) error {
if k.N == nil {
return errors.New("crypto/rsa: missing public modulus")
}
return checkKeySize(k.N.BitLen())
}
// GenerateKey generates a random RSA private key of the given bit size.
//
// If bits is less than 1024, [GenerateKey] returns an error. See the "[Minimum
// key size]" section for further details.
//
// Since Go 1.26, a secure source of random bytes is always used, and the Reader is
// ignored unless GODEBUG=cryptocustomrand=1 is set. This setting will be removed
// in a future Go release. Instead, use [testing/cryptotest.SetGlobalRandom].
//
// [Minimum key size]: https://pkg.go.dev/crypto/rsa#hdr-Minimum_key_size
func GenerateKey(random io.Reader, bits int) (*PrivateKey, error) {
if err := checkKeySize(bits); err != nil {
return nil, err
}
if boring.Enabled && rand.IsDefaultReader(random) &&
(bits == 2048 || bits == 3072 || bits == 4096) {
bN, bE, bD, bP, bQ, bDp, bDq, bQinv, err := boring.GenerateKeyRSA(bits)
if err != nil {
return nil, err
}
N := bbig.Dec(bN)
E := bbig.Dec(bE)
D := bbig.Dec(bD)
P := bbig.Dec(bP)
Q := bbig.Dec(bQ)
Dp := bbig.Dec(bDp)
Dq := bbig.Dec(bDq)
Qinv := bbig.Dec(bQinv)
e64 := E.Int64()
if !E.IsInt64() || int64(int(e64)) != e64 {
return nil, errors.New("crypto/rsa: generated key exponent too large")
}
key := &PrivateKey{
PublicKey: PublicKey{
N: N,
E: int(e64),
},
D: D,
Primes: []*big.Int{P, Q},
Precomputed: PrecomputedValues{
Dp: Dp,
Dq: Dq,
Qinv: Qinv,
CRTValues: make([]CRTValue, 0), // non-nil, to match Precompute
},
}
return key, nil
}
random = rand.CustomReader(random)
if fips140only.Enforced() && bits < 2048 {
return nil, errors.New("crypto/rsa: use of keys smaller than 2048 bits is not allowed in FIPS 140-only mode")
}
if fips140only.Enforced() && bits%2 == 1 {
return nil, errors.New("crypto/rsa: use of keys with odd size is not allowed in FIPS 140-only mode")
}
if fips140only.Enforced() && !fips140only.ApprovedRandomReader(random) {
return nil, errors.New("crypto/rsa: only crypto/rand.Reader is allowed in FIPS 140-only mode")
}
k, err := rsa.GenerateKey(random, bits)
if bits < 256 && err != nil {
// Toy-sized keys have a non-negligible chance of hitting two hard
// failure cases: p == q and d <= 2^(nlen / 2).
//
// Since these are impossible to hit for real keys, we don't want to
// make the production code path more complex and harder to think about
// to handle them.
//
// Instead, just rerun the whole process a total of 8 times, which
// brings the chance of failure for 32-bit keys down to the same as for
// 256-bit keys.
for i := 1; i < 8 && err != nil; i++ {
k, err = rsa.GenerateKey(random, bits)
}
}
if err != nil {
return nil, err
}
N, e, d, p, q, dP, dQ, qInv := k.Export()
key := &PrivateKey{
PublicKey: PublicKey{
N: new(big.Int).SetBytes(N),
E: e,
},
D: new(big.Int).SetBytes(d),
Primes: []*big.Int{
new(big.Int).SetBytes(p),
new(big.Int).SetBytes(q),
},
Precomputed: PrecomputedValues{
fips: k,
Dp: new(big.Int).SetBytes(dP),
Dq: new(big.Int).SetBytes(dQ),
Qinv: new(big.Int).SetBytes(qInv),
CRTValues: make([]CRTValue, 0), // non-nil, to match Precompute
},
}
return key, nil
}
// GenerateMultiPrimeKey generates a multi-prime RSA keypair of the given bit
// size and the given random source.
//
// Table 1 in "[On the Security of Multi-prime RSA]" suggests maximum numbers of
// primes for a given bit size.
//
// Although the public keys are compatible (actually, indistinguishable) from
// the 2-prime case, the private keys are not. Thus it may not be possible to
// export multi-prime private keys in certain formats or to subsequently import
// them into other code.
//
// This package does not implement CRT optimizations for multi-prime RSA, so the
// keys with more than two primes will have worse performance.
//
// Since Go 1.26, a secure source of random bytes is always used, and the Reader is
// ignored unless GODEBUG=cryptocustomrand=1 is set. This setting will be removed
// in a future Go release. Instead, use [testing/cryptotest.SetGlobalRandom].
//
// Deprecated: The use of this function with a number of primes different from
// two is not recommended for the above security, compatibility, and performance
// reasons. Use [GenerateKey] instead.
//
// [On the Security of Multi-prime RSA]: http://www.cacr.math.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
func GenerateMultiPrimeKey(random io.Reader, nprimes int, bits int) (*PrivateKey, error) {
if nprimes == 2 {
return GenerateKey(random, bits)
}
if fips140only.Enforced() {
return nil, errors.New("crypto/rsa: multi-prime RSA is not allowed in FIPS 140-only mode")
}
random = rand.CustomReader(random)
priv := new(PrivateKey)
priv.E = 65537
if nprimes < 2 {
return nil, errors.New("crypto/rsa: GenerateMultiPrimeKey: nprimes must be >= 2")
}
if bits < 64 {
primeLimit := float64(uint64(1) << uint(bits/nprimes))
// pi approximates the number of primes less than primeLimit
pi := primeLimit / (math.Log(primeLimit) - 1)
// Generated primes start with 11 (in binary) so we can only
// use a quarter of them.
pi /= 4
// Use a factor of two to ensure that key generation terminates
// in a reasonable amount of time.
pi /= 2
if pi <= float64(nprimes) {
return nil, errors.New("crypto/rsa: too few primes of given length to generate an RSA key")
}
}
primes := make([]*big.Int, nprimes)
NextSetOfPrimes:
for {
todo := bits
// crypto/rand should set the top two bits in each prime.
// Thus each prime has the form
// p_i = 2^bitlen(p_i) × 0.11... (in base 2).
// And the product is:
// P = 2^todo × α
// where α is the product of nprimes numbers of the form 0.11...
//
// If α < 1/2 (which can happen for nprimes > 2), we need to
// shift todo to compensate for lost bits: the mean value of 0.11...
// is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2
// will give good results.
if nprimes >= 7 {
todo += (nprimes - 2) / 5
}
for i := 0; i < nprimes; i++ {
var err error
primes[i], err = cryptorand.Prime(random, todo/(nprimes-i))
if err != nil {
return nil, err
}
todo -= primes[i].BitLen()
}
// Make sure that primes is pairwise unequal.
for i, prime := range primes {
for j := 0; j < i; j++ {
if prime.Cmp(primes[j]) == 0 {
continue NextSetOfPrimes
}
}
}
n := new(big.Int).Set(bigOne)
totient := new(big.Int).Set(bigOne)
pminus1 := new(big.Int)
for _, prime := range primes {
n.Mul(n, prime)
pminus1.Sub(prime, bigOne)
totient.Mul(totient, pminus1)
}
if n.BitLen() != bits {
// This should never happen for nprimes == 2 because
// crypto/rand should set the top two bits in each prime.
// For nprimes > 2 we hope it does not happen often.
continue NextSetOfPrimes
}
priv.D = new(big.Int)
e := big.NewInt(int64(priv.E))
ok := priv.D.ModInverse(e, totient)
if ok != nil {
priv.Primes = primes
priv.N = n
break
}
}
priv.Precompute()
if err := priv.Validate(); err != nil {
return nil, err
}
return priv, nil
}
// ErrMessageTooLong is returned when attempting to encrypt or sign a message
// which is too large for the size of the key. When using [SignPSS], this can also
// be returned if the size of the salt is too large.
var ErrMessageTooLong = errors.New("crypto/rsa: message too long for RSA key size")
// ErrDecryption represents a failure to decrypt a message.
// It is deliberately vague to avoid adaptive attacks.
var ErrDecryption = errors.New("crypto/rsa: decryption error")
// ErrVerification represents a failure to verify a signature.
// It is deliberately vague to avoid adaptive attacks.
var ErrVerification = errors.New("crypto/rsa: verification error")
// Precompute performs some calculations that speed up private key operations in
// the future. It is safe to run on non-validated private keys, and it can speed
// up future calls to [PrivateKey.Validate] for valid keys.
//
// Precompute writes to the Precomputed field, so it must not be called
// concurrently with any other method.
//
// Precompute does not return an error. Applications should call
// [PrivateKey.Validate] after Precompute to check for any problems with the
// key, including any that would cause Precompute to fail.
//
// Calling Precompute on a key that has already been precomputed is a no-op.
func (priv *PrivateKey) Precompute() {
if priv.precomputedIsConsistent() {
return
}
precomputed, err := priv.precompute()
if err != nil {
// We don't have a way to report errors, so just leave Precomputed.fips
// nil. Validate will re-run precompute and report its error.
priv.Precomputed.fips = nil
return
}
priv.Precomputed = precomputed
}
// precompute calculates the PrecomputedValues for priv and returns them.
//
// It does NOT modify priv and is safe for concurrent use.
func (priv *PrivateKey) precompute() (PrecomputedValues, error) {
var precomputed PrecomputedValues
if priv.N == nil {
return precomputed, errors.New("crypto/rsa: missing public modulus")
}
if priv.D == nil {
return precomputed, errors.New("crypto/rsa: missing private exponent")
}
if len(priv.Primes) != 2 {
return priv.precomputeLegacy()
}
if priv.Primes[0] == nil {
return precomputed, errors.New("crypto/rsa: prime P is nil")
}
if priv.Primes[1] == nil {
return precomputed, errors.New("crypto/rsa: prime Q is nil")
}
// If the CRT values are already set, use them.
if priv.Precomputed.Dp != nil && priv.Precomputed.Dq != nil && priv.Precomputed.Qinv != nil {
k, err := rsa.NewPrivateKeyWithPrecomputation(priv.N.Bytes(), priv.E, priv.D.Bytes(),
priv.Primes[0].Bytes(), priv.Primes[1].Bytes(),
priv.Precomputed.Dp.Bytes(), priv.Precomputed.Dq.Bytes(), priv.Precomputed.Qinv.Bytes())
if err != nil {
return precomputed, err
}
precomputed = priv.Precomputed
precomputed.fips = k
precomputed.CRTValues = make([]CRTValue, 0)
return precomputed, nil
}
k, err := rsa.NewPrivateKey(priv.N.Bytes(), priv.E, priv.D.Bytes(),
priv.Primes[0].Bytes(), priv.Primes[1].Bytes())
if err != nil {
return precomputed, err
}
precomputed.fips = k
_, _, _, _, _, dP, dQ, qInv := k.Export()
precomputed.Dp = new(big.Int).SetBytes(dP)
precomputed.Dq = new(big.Int).SetBytes(dQ)
precomputed.Qinv = new(big.Int).SetBytes(qInv)
precomputed.CRTValues = make([]CRTValue, 0)
return precomputed, nil
}
func (priv *PrivateKey) precomputeLegacy() (PrecomputedValues, error) {
var precomputed PrecomputedValues
k, err := rsa.NewPrivateKeyWithoutCRT(priv.N.Bytes(), priv.E, priv.D.Bytes())
if err != nil {
return precomputed, err
}
precomputed.fips = k
if len(priv.Primes) < 2 {
return precomputed, nil
}
// Ensure the Mod and ModInverse calls below don't panic.
for _, prime := range priv.Primes {
if prime == nil {
return precomputed, errors.New("crypto/rsa: prime factor is nil")
}
if prime.Cmp(bigOne) <= 0 {
return precomputed, errors.New("crypto/rsa: prime factor is <= 1")
}
}
precomputed.Dp = new(big.Int).Sub(priv.Primes[0], bigOne)
precomputed.Dp.Mod(priv.D, precomputed.Dp)
precomputed.Dq = new(big.Int).Sub(priv.Primes[1], bigOne)
precomputed.Dq.Mod(priv.D, precomputed.Dq)
precomputed.Qinv = new(big.Int).ModInverse(priv.Primes[1], priv.Primes[0])
if precomputed.Qinv == nil {
return precomputed, errors.New("crypto/rsa: prime factors are not relatively prime")
}
r := new(big.Int).Mul(priv.Primes[0], priv.Primes[1])
precomputed.CRTValues = make([]CRTValue, len(priv.Primes)-2)
for i := 2; i < len(priv.Primes); i++ {
prime := priv.Primes[i]
values := &precomputed.CRTValues[i-2]
values.Exp = new(big.Int).Sub(prime, bigOne)
values.Exp.Mod(priv.D, values.Exp)
values.R = new(big.Int).Set(r)
values.Coeff = new(big.Int).ModInverse(r, prime)
if values.Coeff == nil {
return precomputed, errors.New("crypto/rsa: prime factors are not relatively prime")
}
r.Mul(r, prime)
}
return precomputed, nil
}
func fipsPublicKey(pub *PublicKey) (*rsa.PublicKey, error) {
N, err := bigmod.NewModulus(pub.N.Bytes())
if err != nil {
return nil, err
}
return &rsa.PublicKey{N: N, E: pub.E}, nil
}
// fipsPrivateKey returns the *rsa.PrivateKey corresponding to priv, using the
// precomputed values if available, and calculating them if not.
//
// It does NOT modify priv and is safe for concurrent use.
func fipsPrivateKey(priv *PrivateKey) (*rsa.PrivateKey, error) {
if priv.Precomputed.fips != nil {
return priv.Precomputed.fips, nil
}
precomputed, err := priv.precompute()
if err != nil {
return nil, err
}
return precomputed.fips, nil
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha1 implements the SHA-1 hash algorithm as defined in RFC 3174.
//
// SHA-1 is cryptographically broken and should not be used for secure
// applications.
package sha1
import (
"crypto"
"crypto/internal/boring"
"crypto/internal/fips140only"
"errors"
"hash"
"internal/byteorder"
)
func init() {
crypto.RegisterHash(crypto.SHA1, New)
}
// The size of a SHA-1 checksum in bytes.
const Size = 20
// The blocksize of SHA-1 in bytes.
const BlockSize = 64
const (
chunk = 64
init0 = 0x67452301
init1 = 0xEFCDAB89
init2 = 0x98BADCFE
init3 = 0x10325476
init4 = 0xC3D2E1F0
)
// digest represents the partial evaluation of a checksum.
type digest struct {
h [5]uint32
x [chunk]byte
nx int
len uint64
}
const (
magic = "sha\x01"
marshaledSize = len(magic) + 5*4 + chunk + 8
)
func (d *digest) MarshalBinary() ([]byte, error) {
return d.AppendBinary(make([]byte, 0, marshaledSize))
}
func (d *digest) AppendBinary(b []byte) ([]byte, error) {
b = append(b, magic...)
b = byteorder.BEAppendUint32(b, d.h[0])
b = byteorder.BEAppendUint32(b, d.h[1])
b = byteorder.BEAppendUint32(b, d.h[2])
b = byteorder.BEAppendUint32(b, d.h[3])
b = byteorder.BEAppendUint32(b, d.h[4])
b = append(b, d.x[:d.nx]...)
b = append(b, make([]byte, len(d.x)-d.nx)...)
b = byteorder.BEAppendUint64(b, d.len)
return b, nil
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("crypto/sha1: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("crypto/sha1: invalid hash state size")
}
b = b[len(magic):]
b, d.h[0] = consumeUint32(b)
b, d.h[1] = consumeUint32(b)
b, d.h[2] = consumeUint32(b)
b, d.h[3] = consumeUint32(b)
b, d.h[4] = consumeUint32(b)
b = b[copy(d.x[:], b):]
b, d.len = consumeUint64(b)
d.nx = int(d.len % chunk)
return nil
}
func consumeUint64(b []byte) ([]byte, uint64) {
return b[8:], byteorder.BEUint64(b)
}
func consumeUint32(b []byte) ([]byte, uint32) {
return b[4:], byteorder.BEUint32(b)
}
func (d *digest) Clone() (hash.Cloner, error) {
r := *d
return &r, nil
}
func (d *digest) Reset() {
d.h[0] = init0
d.h[1] = init1
d.h[2] = init2
d.h[3] = init3
d.h[4] = init4
d.nx = 0
d.len = 0
}
// New returns a new [hash.Hash] computing the SHA1 checksum. The Hash
// also implements [encoding.BinaryMarshaler], [encoding.BinaryAppender] and
// [encoding.BinaryUnmarshaler] to marshal and unmarshal the internal
// state of the hash.
func New() hash.Hash {
if boring.Enabled {
return boring.NewSHA1()
}
d := new(digest)
d.Reset()
return d
}
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) {
if fips140only.Enforced() {
return 0, errors.New("crypto/sha1: use of SHA-1 is not allowed in FIPS 140-only mode")
}
boring.Unreachable()
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
n := copy(d.x[d.nx:], p)
d.nx += n
if d.nx == chunk {
block(d, d.x[:])
d.nx = 0
}
p = p[n:]
}
if len(p) >= chunk {
n := len(p) &^ (chunk - 1)
block(d, p[:n])
p = p[n:]
}
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
func (d *digest) Sum(in []byte) []byte {
boring.Unreachable()
// Make a copy of d so that caller can keep writing and summing.
d0 := *d
hash := d0.checkSum()
return append(in, hash[:]...)
}
func (d *digest) checkSum() [Size]byte {
if fips140only.Enforced() {
panic("crypto/sha1: use of SHA-1 is not allowed in FIPS 140-only mode")
}
len := d.len
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
var tmp [64 + 8]byte // padding + length buffer
tmp[0] = 0x80
var t uint64
if len%64 < 56 {
t = 56 - len%64
} else {
t = 64 + 56 - len%64
}
// Length in bits.
len <<= 3
padlen := tmp[:t+8]
byteorder.BEPutUint64(padlen[t:], len)
d.Write(padlen)
if d.nx != 0 {
panic("d.nx != 0")
}
var digest [Size]byte
byteorder.BEPutUint32(digest[0:], d.h[0])
byteorder.BEPutUint32(digest[4:], d.h[1])
byteorder.BEPutUint32(digest[8:], d.h[2])
byteorder.BEPutUint32(digest[12:], d.h[3])
byteorder.BEPutUint32(digest[16:], d.h[4])
return digest
}
// ConstantTimeSum computes the same result of [Sum] but in constant time
func (d *digest) ConstantTimeSum(in []byte) []byte {
d0 := *d
hash := d0.constSum()
return append(in, hash[:]...)
}
func (d *digest) constSum() [Size]byte {
if fips140only.Enforced() {
panic("crypto/sha1: use of SHA-1 is not allowed in FIPS 140-only mode")
}
var length [8]byte
l := d.len << 3
for i := uint(0); i < 8; i++ {
length[i] = byte(l >> (56 - 8*i))
}
nx := byte(d.nx)
t := nx - 56 // if nx < 56 then the MSB of t is one
mask1b := byte(int8(t) >> 7) // mask1b is 0xFF iff one block is enough
separator := byte(0x80) // gets reset to 0x00 once used
for i := byte(0); i < chunk; i++ {
mask := byte(int8(i-nx) >> 7) // 0x00 after the end of data
// if we reached the end of the data, replace with 0x80 or 0x00
d.x[i] = (^mask & separator) | (mask & d.x[i])
// zero the separator once used
separator &= mask
if i >= 56 {
// we might have to write the length here if all fit in one block
d.x[i] |= mask1b & length[i-56]
}
}
// compress, and only keep the digest if all fit in one block
block(d, d.x[:])
var digest [Size]byte
for i, s := range d.h {
digest[i*4] = mask1b & byte(s>>24)
digest[i*4+1] = mask1b & byte(s>>16)
digest[i*4+2] = mask1b & byte(s>>8)
digest[i*4+3] = mask1b & byte(s)
}
for i := byte(0); i < chunk; i++ {
// second block, it's always past the end of data, might start with 0x80
if i < 56 {
d.x[i] = separator
separator = 0
} else {
d.x[i] = length[i-56]
}
}
// compress, and only keep the digest if we actually needed the second block
block(d, d.x[:])
for i, s := range d.h {
digest[i*4] |= ^mask1b & byte(s>>24)
digest[i*4+1] |= ^mask1b & byte(s>>16)
digest[i*4+2] |= ^mask1b & byte(s>>8)
digest[i*4+3] |= ^mask1b & byte(s)
}
return digest
}
// Sum returns the SHA-1 checksum of the data.
func Sum(data []byte) [Size]byte {
if boring.Enabled {
return boring.SHA1(data)
}
if fips140only.Enforced() {
panic("crypto/sha1: use of SHA-1 is not allowed in FIPS 140-only mode")
}
var d digest
d.Reset()
d.Write(data)
return d.checkSum()
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha1
import (
"math/bits"
)
const (
_K0 = 0x5A827999
_K1 = 0x6ED9EBA1
_K2 = 0x8F1BBCDC
_K3 = 0xCA62C1D6
)
// blockGeneric is a portable, pure Go version of the SHA-1 block step.
// It's used by sha1block_generic.go and tests.
func blockGeneric(dig *digest, p []byte) {
var w [16]uint32
h0, h1, h2, h3, h4 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4]
for len(p) >= chunk {
// Can interlace the computation of w with the
// rounds below if needed for speed.
for i := 0; i < 16; i++ {
j := i * 4
w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3])
}
a, b, c, d, e := h0, h1, h2, h3, h4
// Each of the four 20-iteration rounds
// differs only in the computation of f and
// the choice of K (_K0, _K1, etc).
i := 0
for ; i < 16; i++ {
f := b&c | (^b)&d
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + _K0
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
for ; i < 20; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = bits.RotateLeft32(tmp, 1)
f := b&c | (^b)&d
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + _K0
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
for ; i < 40; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = bits.RotateLeft32(tmp, 1)
f := b ^ c ^ d
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + _K1
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
for ; i < 60; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = bits.RotateLeft32(tmp, 1)
f := ((b | c) & d) | (b & c)
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + _K2
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
for ; i < 80; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = bits.RotateLeft32(tmp, 1)
f := b ^ c ^ d
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + _K3
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
h0 += a
h1 += b
h2 += c
h3 += d
h4 += e
p = p[chunk:]
}
dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4] = h0, h1, h2, h3, h4
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !purego
package sha1
import (
"crypto/internal/impl"
"internal/cpu"
)
//go:noescape
func blockAVX2(dig *digest, p []byte)
//go:noescape
func blockSHANI(dig *digest, p []byte)
var useAVX2 = cpu.X86.HasAVX && cpu.X86.HasAVX2 && cpu.X86.HasBMI1 && cpu.X86.HasBMI2
var useSHANI = cpu.X86.HasAVX && cpu.X86.HasSHA && cpu.X86.HasSSE41 && cpu.X86.HasSSSE3
func init() {
impl.Register("sha1", "AVX2", &useAVX2)
impl.Register("sha1", "SHA-NI", &useSHANI)
}
func block(dig *digest, p []byte) {
if useSHANI {
blockSHANI(dig, p)
} else if useAVX2 && len(p) >= 256 {
// blockAVX2 calculates sha1 for 2 block per iteration and also
// interleaves precalculation for next block. So it may read up-to 192
// bytes past end of p. We could add checks inside blockAVX2, but this
// would just turn it into a copy of the old pre-AVX2 amd64 SHA1
// assembly implementation, so just call blockGeneric instead.
safeLen := len(p) - 128
if safeLen%128 != 0 {
safeLen -= 64
}
blockAVX2(dig, p[:safeLen])
blockGeneric(dig, p[safeLen:])
} else {
blockGeneric(dig, p)
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha256 implements the SHA224 and SHA256 hash algorithms as defined
// in FIPS 180-4.
package sha256
import (
"crypto"
"crypto/internal/boring"
"crypto/internal/fips140/sha256"
"hash"
)
func init() {
crypto.RegisterHash(crypto.SHA224, New224)
crypto.RegisterHash(crypto.SHA256, New)
}
// The size of a SHA256 checksum in bytes.
const Size = 32
// The size of a SHA224 checksum in bytes.
const Size224 = 28
// The blocksize of SHA256 and SHA224 in bytes.
const BlockSize = 64
// New returns a new [hash.Hash] computing the SHA256 checksum. The Hash
// also implements [encoding.BinaryMarshaler], [encoding.BinaryAppender] and
// [encoding.BinaryUnmarshaler] to marshal and unmarshal the internal
// state of the hash.
func New() hash.Hash {
if boring.Enabled {
return boring.NewSHA256()
}
return sha256.New()
}
// New224 returns a new [hash.Hash] computing the SHA224 checksum. The Hash
// also implements [encoding.BinaryMarshaler], [encoding.BinaryAppender] and
// [encoding.BinaryUnmarshaler] to marshal and unmarshal the internal
// state of the hash.
func New224() hash.Hash {
if boring.Enabled {
return boring.NewSHA224()
}
return sha256.New224()
}
// Sum256 returns the SHA256 checksum of the data.
func Sum256(data []byte) [Size]byte {
if boring.Enabled {
return boring.SHA256(data)
}
h := New()
h.Write(data)
var sum [Size]byte
h.Sum(sum[:0])
return sum
}
// Sum224 returns the SHA224 checksum of the data.
func Sum224(data []byte) [Size224]byte {
if boring.Enabled {
return boring.SHA224(data)
}
h := New224()
h.Write(data)
var sum [Size224]byte
h.Sum(sum[:0])
return sum
}
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha3 implements the SHA-3 hash algorithms and the SHAKE extendable
// output functions defined in FIPS 202.
package sha3
import (
"crypto"
"crypto/internal/fips140/sha3"
"hash"
_ "unsafe"
)
func init() {
crypto.RegisterHash(crypto.SHA3_224, func() hash.Hash { return New224() })
crypto.RegisterHash(crypto.SHA3_256, func() hash.Hash { return New256() })
crypto.RegisterHash(crypto.SHA3_384, func() hash.Hash { return New384() })
crypto.RegisterHash(crypto.SHA3_512, func() hash.Hash { return New512() })
}
// Sum224 returns the SHA3-224 hash of data.
func Sum224(data []byte) [28]byte {
var out [28]byte
h := sha3.New224()
h.Write(data)
h.Sum(out[:0])
return out
}
// Sum256 returns the SHA3-256 hash of data.
func Sum256(data []byte) [32]byte {
var out [32]byte
h := sha3.New256()
h.Write(data)
h.Sum(out[:0])
return out
}
// Sum384 returns the SHA3-384 hash of data.
func Sum384(data []byte) [48]byte {
var out [48]byte
h := sha3.New384()
h.Write(data)
h.Sum(out[:0])
return out
}
// Sum512 returns the SHA3-512 hash of data.
func Sum512(data []byte) [64]byte {
var out [64]byte
h := sha3.New512()
h.Write(data)
h.Sum(out[:0])
return out
}
// SumSHAKE128 applies the SHAKE128 extendable output function to data and
// returns an output of the given length in bytes.
func SumSHAKE128(data []byte, length int) []byte {
// Outline the allocation for up to 256 bits of output to the caller's stack.
out := make([]byte, 32)
return sumSHAKE128(out, data, length)
}
func sumSHAKE128(out, data []byte, length int) []byte {
if len(out) < length {
out = make([]byte, length)
} else {
out = out[:length]
}
h := sha3.NewShake128()
h.Write(data)
h.Read(out)
return out
}
// SumSHAKE256 applies the SHAKE256 extendable output function to data and
// returns an output of the given length in bytes.
func SumSHAKE256(data []byte, length int) []byte {
// Outline the allocation for up to 512 bits of output to the caller's stack.
out := make([]byte, 64)
return sumSHAKE256(out, data, length)
}
func sumSHAKE256(out, data []byte, length int) []byte {
if len(out) < length {
out = make([]byte, length)
} else {
out = out[:length]
}
h := sha3.NewShake256()
h.Write(data)
h.Read(out)
return out
}
// SHA3 is an instance of a SHA-3 hash. It implements [hash.Hash].
// The zero value is a usable SHA3-256 hash.
type SHA3 struct {
s sha3.Digest
}
//go:linkname fips140hash_sha3Unwrap crypto/internal/fips140hash.sha3Unwrap
func fips140hash_sha3Unwrap(sha3 *SHA3) *sha3.Digest {
return &sha3.s
}
// New224 creates a new SHA3-224 hash.
func New224() *SHA3 {
return &SHA3{*sha3.New224()}
}
// New256 creates a new SHA3-256 hash.
func New256() *SHA3 {
return &SHA3{*sha3.New256()}
}
// New384 creates a new SHA3-384 hash.
func New384() *SHA3 {
return &SHA3{*sha3.New384()}
}
// New512 creates a new SHA3-512 hash.
func New512() *SHA3 {
return &SHA3{*sha3.New512()}
}
func (s *SHA3) init() {
if s.s.Size() == 0 {
*s = *New256()
}
}
// Write absorbs more data into the hash's state.
func (s *SHA3) Write(p []byte) (n int, err error) {
s.init()
return s.s.Write(p)
}
// Sum appends the current hash to b and returns the resulting slice.
func (s *SHA3) Sum(b []byte) []byte {
s.init()
return s.s.Sum(b)
}
// Reset resets the hash to its initial state.
func (s *SHA3) Reset() {
s.init()
s.s.Reset()
}
// Size returns the number of bytes Sum will produce.
func (s *SHA3) Size() int {
s.init()
return s.s.Size()
}
// BlockSize returns the hash's rate.
func (s *SHA3) BlockSize() int {
s.init()
return s.s.BlockSize()
}
// MarshalBinary implements [encoding.BinaryMarshaler].
func (s *SHA3) MarshalBinary() ([]byte, error) {
s.init()
return s.s.MarshalBinary()
}
// AppendBinary implements [encoding.BinaryAppender].
func (s *SHA3) AppendBinary(p []byte) ([]byte, error) {
s.init()
return s.s.AppendBinary(p)
}
// UnmarshalBinary implements [encoding.BinaryUnmarshaler].
func (s *SHA3) UnmarshalBinary(data []byte) error {
s.init()
return s.s.UnmarshalBinary(data)
}
// Clone implements [hash.Cloner].
func (d *SHA3) Clone() (hash.Cloner, error) {
r := *d
return &r, nil
}
// SHAKE is an instance of a SHAKE extendable output function.
// The zero value is a usable SHAKE256 hash.
type SHAKE struct {
s sha3.SHAKE
}
func (s *SHAKE) init() {
if s.s.Size() == 0 {
*s = *NewSHAKE256()
}
}
// NewSHAKE128 creates a new SHAKE128 XOF.
func NewSHAKE128() *SHAKE {
return &SHAKE{*sha3.NewShake128()}
}
// NewSHAKE256 creates a new SHAKE256 XOF.
func NewSHAKE256() *SHAKE {
return &SHAKE{*sha3.NewShake256()}
}
// NewCSHAKE128 creates a new cSHAKE128 XOF.
//
// N is used to define functions based on cSHAKE, it can be empty when plain
// cSHAKE is desired. S is a customization byte string used for domain
// separation. When N and S are both empty, this is equivalent to NewSHAKE128.
func NewCSHAKE128(N, S []byte) *SHAKE {
return &SHAKE{*sha3.NewCShake128(N, S)}
}
// NewCSHAKE256 creates a new cSHAKE256 XOF.
//
// N is used to define functions based on cSHAKE, it can be empty when plain
// cSHAKE is desired. S is a customization byte string used for domain
// separation. When N and S are both empty, this is equivalent to NewSHAKE256.
func NewCSHAKE256(N, S []byte) *SHAKE {
return &SHAKE{*sha3.NewCShake256(N, S)}
}
// Write absorbs more data into the XOF's state.
//
// It panics if any output has already been read.
func (s *SHAKE) Write(p []byte) (n int, err error) {
s.init()
return s.s.Write(p)
}
// Read squeezes more output from the XOF.
//
// Any call to Write after a call to Read will panic.
func (s *SHAKE) Read(p []byte) (n int, err error) {
s.init()
return s.s.Read(p)
}
// Reset resets the XOF to its initial state.
func (s *SHAKE) Reset() {
s.init()
s.s.Reset()
}
// BlockSize returns the rate of the XOF.
func (s *SHAKE) BlockSize() int {
s.init()
return s.s.BlockSize()
}
// MarshalBinary implements [encoding.BinaryMarshaler].
func (s *SHAKE) MarshalBinary() ([]byte, error) {
s.init()
return s.s.MarshalBinary()
}
// AppendBinary implements [encoding.BinaryAppender].
func (s *SHAKE) AppendBinary(p []byte) ([]byte, error) {
s.init()
return s.s.AppendBinary(p)
}
// UnmarshalBinary implements [encoding.BinaryUnmarshaler].
func (s *SHAKE) UnmarshalBinary(data []byte) error {
s.init()
return s.s.UnmarshalBinary(data)
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha512 implements the SHA-384, SHA-512, SHA-512/224, and SHA-512/256
// hash algorithms as defined in FIPS 180-4.
//
// All the hash.Hash implementations returned by this package also
// implement encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to
// marshal and unmarshal the internal state of the hash.
package sha512
import (
"crypto"
"crypto/internal/boring"
"crypto/internal/fips140/sha512"
"hash"
)
func init() {
crypto.RegisterHash(crypto.SHA384, New384)
crypto.RegisterHash(crypto.SHA512, New)
crypto.RegisterHash(crypto.SHA512_224, New512_224)
crypto.RegisterHash(crypto.SHA512_256, New512_256)
}
const (
// Size is the size, in bytes, of a SHA-512 checksum.
Size = 64
// Size224 is the size, in bytes, of a SHA-512/224 checksum.
Size224 = 28
// Size256 is the size, in bytes, of a SHA-512/256 checksum.
Size256 = 32
// Size384 is the size, in bytes, of a SHA-384 checksum.
Size384 = 48
// BlockSize is the block size, in bytes, of the SHA-512/224,
// SHA-512/256, SHA-384 and SHA-512 hash functions.
BlockSize = 128
)
// New returns a new [hash.Hash] computing the SHA-512 checksum. The Hash
// also implements [encoding.BinaryMarshaler], [encoding.BinaryAppender] and
// [encoding.BinaryUnmarshaler] to marshal and unmarshal the internal
// state of the hash.
func New() hash.Hash {
if boring.Enabled {
return boring.NewSHA512()
}
return sha512.New()
}
// New512_224 returns a new [hash.Hash] computing the SHA-512/224 checksum. The Hash
// also implements [encoding.BinaryMarshaler], [encoding.BinaryAppender] and
// [encoding.BinaryUnmarshaler] to marshal and unmarshal the internal
// state of the hash.
func New512_224() hash.Hash {
return sha512.New512_224()
}
// New512_256 returns a new [hash.Hash] computing the SHA-512/256 checksum. The Hash
// also implements [encoding.BinaryMarshaler], [encoding.BinaryAppender] and
// [encoding.BinaryUnmarshaler] to marshal and unmarshal the internal
// state of the hash.
func New512_256() hash.Hash {
return sha512.New512_256()
}
// New384 returns a new [hash.Hash] computing the SHA-384 checksum. The Hash
// also implements [encoding.BinaryMarshaler], [encoding.BinaryAppender] and
// [encoding.BinaryUnmarshaler] to marshal and unmarshal the internal
// state of the hash.
func New384() hash.Hash {
if boring.Enabled {
return boring.NewSHA384()
}
return sha512.New384()
}
// Sum512 returns the SHA512 checksum of the data.
func Sum512(data []byte) [Size]byte {
if boring.Enabled {
return boring.SHA512(data)
}
h := New()
h.Write(data)
var sum [Size]byte
h.Sum(sum[:0])
return sum
}
// Sum384 returns the SHA384 checksum of the data.
func Sum384(data []byte) [Size384]byte {
if boring.Enabled {
return boring.SHA384(data)
}
h := New384()
h.Write(data)
var sum [Size384]byte
h.Sum(sum[:0])
return sum
}
// Sum512_224 returns the Sum512/224 checksum of the data.
func Sum512_224(data []byte) [Size224]byte {
h := New512_224()
h.Write(data)
var sum [Size224]byte
h.Sum(sum[:0])
return sum
}
// Sum512_256 returns the Sum512/256 checksum of the data.
func Sum512_256(data []byte) [Size256]byte {
h := New512_256()
h.Write(data)
var sum [Size256]byte
h.Sum(sum[:0])
return sum
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package subtle implements functions that are often useful in cryptographic
// code but require careful thought to use correctly.
package subtle
import (
"crypto/internal/constanttime"
"crypto/internal/fips140/subtle"
)
// These functions are forwarded to crypto/internal/constanttime for intrinsified
// operations, and to crypto/internal/fips140/subtle for byte slice operations.
// ConstantTimeCompare returns 1 if the two slices, x and y, have equal contents
// and 0 otherwise. The time taken is a function of the length of the slices and
// is independent of the contents. If the lengths of x and y do not match it
// returns 0 immediately.
func ConstantTimeCompare(x, y []byte) int {
return subtle.ConstantTimeCompare(x, y)
}
// ConstantTimeSelect returns x if v == 1 and y if v == 0.
// Its behavior is undefined if v takes any other value.
func ConstantTimeSelect(v, x, y int) int {
return constanttime.Select(v, x, y)
}
// ConstantTimeByteEq returns 1 if x == y and 0 otherwise.
func ConstantTimeByteEq(x, y uint8) int {
return constanttime.ByteEq(x, y)
}
// ConstantTimeEq returns 1 if x == y and 0 otherwise.
func ConstantTimeEq(x, y int32) int {
return constanttime.Eq(x, y)
}
// ConstantTimeCopy copies the contents of y into x (a slice of equal length)
// if v == 1. If v == 0, x is left unchanged. Its behavior is undefined if v
// takes any other value.
func ConstantTimeCopy(v int, x, y []byte) {
subtle.ConstantTimeCopy(v, x, y)
}
// ConstantTimeLessOrEq returns 1 if x <= y and 0 otherwise.
// Its behavior is undefined if x or y are negative or > 2**31 - 1.
func ConstantTimeLessOrEq(x, y int) int {
return constanttime.LessOrEq(x, y)
}
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package subtle
import (
"internal/runtime/sys"
_ "unsafe"
)
// WithDataIndependentTiming enables architecture specific features which ensure
// that the timing of specific instructions is independent of their inputs
// before executing f. On f returning it disables these features.
//
// Any goroutine spawned by f will also have data independent timing enabled for
// its lifetime, as well as any of their descendant goroutines.
//
// Any C code called via cgo from within f, or from a goroutine spawned by f, will
// also have data independent timing enabled for the duration of the call. If the
// C code disables data independent timing, it will be re-enabled on return to Go.
//
// If C code called via cgo, from f or elsewhere, enables or disables data
// independent timing then calling into Go will preserve that state for the
// duration of the call.
//
// WithDataIndependentTiming should only be used when f is written to make use
// of constant-time operations. WithDataIndependentTiming does not make
// variable-time code constant-time.
//
// Calls to WithDataIndependentTiming may be nested.
//
// On Arm64 processors with FEAT_DIT, WithDataIndependentTiming enables
// PSTATE.DIT. See https://developer.arm.com/documentation/ka005181/1-0/?lang=en.
//
// Currently, on all other architectures WithDataIndependentTiming executes f immediately
// with no other side-effects.
//
//go:noinline
func WithDataIndependentTiming(f func()) {
if !sys.DITSupported {
f()
return
}
alreadyEnabled := setDITEnabled()
// disableDIT is called in a deferred function so that if f panics we will
// still disable DIT, in case the panic is recovered further up the stack.
defer func() {
if !alreadyEnabled {
setDITDisabled()
}
}()
f()
}
//go:linkname setDITEnabled
func setDITEnabled() bool
//go:linkname setDITDisabled
func setDITDisabled()
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package subtle
import "crypto/internal/fips140/subtle"
// XORBytes sets dst[i] = x[i] ^ y[i] for all i < n = min(len(x), len(y)),
// returning n, the number of bytes written to dst.
//
// If dst does not have length at least n,
// XORBytes panics without writing anything to dst.
//
// dst and x or y may overlap exactly or not at all,
// otherwise XORBytes may panic.
func XORBytes(dst, x, y []byte) int {
return subtle.XORBytes(dst, x, y)
}
// Copyright 2026 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sql
import (
"sync"
"sync/atomic"
)
// A closingMutex is an RWMutex for synchronizing close.
// Unlike a sync.RWMutex, RLock takes priority over Lock.
// Reads can starve out close, but reads are safely reentrant.
type closingMutex struct {
// state is 2*readers+writerWaiting.
// 0 is unlocked
// 1 is unlocked and a writer needs to wake
// >0 is read-locked
// <0 is write-locked
state atomic.Int64
mu sync.Mutex
read *sync.Cond
write *sync.Cond
}
func (m *closingMutex) RLock() {
if m.TryRLock() {
return
}
// Wait for writer.
m.mu.Lock()
defer m.mu.Unlock()
for {
if m.TryRLock() {
return
}
m.init()
m.read.Wait()
}
}
func (m *closingMutex) RUnlock() {
for {
x := m.state.Load()
if x < 2 {
panic("runlock of un-rlocked mutex")
}
if m.state.CompareAndSwap(x, x-2) {
if x-2 == 1 {
// We were the last reader, and a writer is waiting.
// The lock makes sure the writer sees the broadcast.
m.mu.Lock()
defer m.mu.Unlock()
m.write.Broadcast()
}
return
}
}
}
func (m *closingMutex) Lock() {
m.mu.Lock()
defer m.mu.Unlock()
for {
x := m.state.Load()
if (x == 0 || x == 1) && m.state.CompareAndSwap(x, -1) {
return
}
// Set writer waiting bit and sleep.
if x&1 == 0 && !m.state.CompareAndSwap(x, x|1) {
continue
}
m.init()
m.write.Wait()
}
}
func (m *closingMutex) Unlock() {
m.mu.Lock()
defer m.mu.Unlock()
if !m.state.CompareAndSwap(-1, 0) {
panic("unlock of unlocked mutex")
}
if m.read != nil {
m.read.Broadcast()
m.write.Broadcast()
}
}
func (m *closingMutex) TryRLock() bool {
for {
x := m.state.Load()
if x < 0 {
return false
}
if m.state.CompareAndSwap(x, x+2) {
return true
}
}
}
func (m *closingMutex) init() {
// Lazily create the read/write Conds.
// In the common, uncontended case, we'll never need them.
if m.read == nil {
m.read = sync.NewCond(&m.mu)
m.write = sync.NewCond(&m.mu)
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Type conversions for Scan.
package sql
import (
"bytes"
"database/sql/driver"
"errors"
"fmt"
"reflect"
"strconv"
"time"
"unicode"
"unicode/utf8"
_ "unsafe" // for linkname
)
var errNilPtr = errors.New("destination pointer is nil") // embedded in descriptive error
func describeNamedValue(nv *driver.NamedValue) string {
if len(nv.Name) == 0 {
return fmt.Sprintf("$%d", nv.Ordinal)
}
return fmt.Sprintf("with name %q", nv.Name)
}
func validateNamedValueName(name string) error {
if len(name) == 0 {
return nil
}
r, _ := utf8.DecodeRuneInString(name)
if unicode.IsLetter(r) {
return nil
}
return fmt.Errorf("name %q does not begin with a letter", name)
}
// ccChecker wraps the driver.ColumnConverter and allows it to be used
// as if it were a NamedValueChecker. If the driver ColumnConverter
// is not present then the NamedValueChecker will return driver.ErrSkip.
type ccChecker struct {
cci driver.ColumnConverter
want int
}
func (c ccChecker) CheckNamedValue(nv *driver.NamedValue) error {
if c.cci == nil {
return driver.ErrSkip
}
// The column converter shouldn't be called on any index
// it isn't expecting. The final error will be thrown
// in the argument converter loop.
index := nv.Ordinal - 1
if c.want >= 0 && c.want <= index {
return nil
}
// First, see if the value itself knows how to convert
// itself to a driver type. For example, a NullString
// struct changing into a string or nil.
if vr, ok := nv.Value.(driver.Valuer); ok {
sv, err := callValuerValue(vr)
if err != nil {
return err
}
if !driver.IsValue(sv) {
return fmt.Errorf("non-subset type %T returned from Value", sv)
}
nv.Value = sv
}
// Second, ask the column to sanity check itself. For
// example, drivers might use this to make sure that
// an int64 values being inserted into a 16-bit
// integer field is in range (before getting
// truncated), or that a nil can't go into a NOT NULL
// column before going across the network to get the
// same error.
var err error
arg := nv.Value
nv.Value, err = c.cci.ColumnConverter(index).ConvertValue(arg)
if err != nil {
return err
}
if !driver.IsValue(nv.Value) {
return fmt.Errorf("driver ColumnConverter error converted %T to unsupported type %T", arg, nv.Value)
}
return nil
}
// defaultCheckNamedValue wraps the default ColumnConverter to have the same
// function signature as the CheckNamedValue in the driver.NamedValueChecker
// interface.
func defaultCheckNamedValue(nv *driver.NamedValue) (err error) {
nv.Value, err = driver.DefaultParameterConverter.ConvertValue(nv.Value)
return err
}
// driverArgsConnLocked converts arguments from callers of Stmt.Exec and
// Stmt.Query into driver Values.
//
// The statement ds may be nil, if no statement is available.
//
// ci must be locked.
func driverArgsConnLocked(ci driver.Conn, ds *driverStmt, args []any) ([]driver.NamedValue, error) {
nvargs := make([]driver.NamedValue, len(args))
// -1 means the driver doesn't know how to count the number of
// placeholders, so we won't sanity check input here and instead let the
// driver deal with errors.
want := -1
var si driver.Stmt
var cc ccChecker
if ds != nil {
si = ds.si
want = ds.si.NumInput()
cc.want = want
}
// Check all types of interfaces from the start.
// Drivers may opt to use the NamedValueChecker for special
// argument types, then return driver.ErrSkip to pass it along
// to the column converter.
nvc, ok := si.(driver.NamedValueChecker)
if !ok {
nvc, _ = ci.(driver.NamedValueChecker)
}
cci, ok := si.(driver.ColumnConverter)
if ok {
cc.cci = cci
}
// Loop through all the arguments, checking each one.
// If no error is returned simply increment the index
// and continue. However, if driver.ErrRemoveArgument
// is returned the argument is not included in the query
// argument list.
var err error
var n int
for _, arg := range args {
nv := &nvargs[n]
if np, ok := arg.(NamedArg); ok {
if err = validateNamedValueName(np.Name); err != nil {
return nil, err
}
arg = np.Value
nv.Name = np.Name
}
nv.Ordinal = n + 1
nv.Value = arg
// Checking sequence has four routes:
// A: 1. Default
// B: 1. NamedValueChecker 2. Column Converter 3. Default
// C: 1. NamedValueChecker 3. Default
// D: 1. Column Converter 2. Default
//
// The only time a Column Converter is called is first
// or after NamedValueConverter. If first it is handled before
// the nextCheck label. Thus for repeats tries only when the
// NamedValueConverter is selected should the Column Converter
// be used in the retry.
checker := defaultCheckNamedValue
nextCC := false
switch {
case nvc != nil:
nextCC = cci != nil
checker = nvc.CheckNamedValue
case cci != nil:
checker = cc.CheckNamedValue
}
nextCheck:
err = checker(nv)
switch err {
case nil:
n++
continue
case driver.ErrRemoveArgument:
nvargs = nvargs[:len(nvargs)-1]
continue
case driver.ErrSkip:
if nextCC {
nextCC = false
checker = cc.CheckNamedValue
} else {
checker = defaultCheckNamedValue
}
goto nextCheck
default:
return nil, fmt.Errorf("sql: converting argument %s type: %w", describeNamedValue(nv), err)
}
}
// Check the length of arguments after conversion to allow for omitted
// arguments.
if want != -1 && len(nvargs) != want {
return nil, fmt.Errorf("sql: expected %d arguments, got %d", want, len(nvargs))
}
return nvargs, nil
}
// convertAssign is the same as convertAssignRows, but without the optional
// rows argument.
//
// convertAssign should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - ariga.io/entcache
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname convertAssign
func convertAssign(dest, src any) error {
return convertAssignRows(dest, src, nil)
}
// convertAssignRows copies to dest the value in src, converting it if possible.
// An error is returned if the copy would result in loss of information.
// dest should be a pointer type. If rows is passed in, the rows will
// be used as the parent for any cursor values converted from a
// driver.Rows to a *Rows.
func convertAssignRows(dest, src any, rows *Rows) error {
// Common cases, without reflect.
switch s := src.(type) {
case string:
switch d := dest.(type) {
case *string:
if d == nil {
return errNilPtr
}
*d = s
return nil
case *[]byte:
if d == nil {
return errNilPtr
}
*d = []byte(s)
return nil
case *RawBytes:
if d == nil {
return errNilPtr
}
*d = rows.setrawbuf(append(rows.rawbuf(), s...))
return nil
}
case []byte:
switch d := dest.(type) {
case *string:
if d == nil {
return errNilPtr
}
*d = string(s)
return nil
case *any:
if d == nil {
return errNilPtr
}
*d = bytes.Clone(s)
return nil
case *[]byte:
if d == nil {
return errNilPtr
}
*d = bytes.Clone(s)
return nil
case *RawBytes:
if d == nil {
return errNilPtr
}
*d = s
return nil
}
case time.Time:
switch d := dest.(type) {
case *time.Time:
*d = s
return nil
case *string:
*d = s.Format(time.RFC3339Nano)
return nil
case *[]byte:
if d == nil {
return errNilPtr
}
*d = s.AppendFormat(make([]byte, 0, len(time.RFC3339Nano)), time.RFC3339Nano)
return nil
case *RawBytes:
if d == nil {
return errNilPtr
}
*d = rows.setrawbuf(s.AppendFormat(rows.rawbuf(), time.RFC3339Nano))
return nil
}
case decimalDecompose:
switch d := dest.(type) {
case decimalCompose:
return d.Compose(s.Decompose(nil))
}
case nil:
switch d := dest.(type) {
case *any:
if d == nil {
return errNilPtr
}
*d = nil
return nil
case *[]byte:
if d == nil {
return errNilPtr
}
*d = nil
return nil
case *RawBytes:
if d == nil {
return errNilPtr
}
*d = nil
return nil
}
// The driver is returning a cursor the client may iterate over.
case driver.Rows:
switch d := dest.(type) {
case *Rows:
if d == nil {
return errNilPtr
}
if rows == nil {
return errors.New("invalid context to convert cursor rows, missing parent *Rows")
}
*d = Rows{
dc: rows.dc,
releaseConn: func(error) {},
rowsi: s,
}
// Chain the cancel function.
parentCancel := rows.cancel
rows.cancel = func() {
// When Rows.cancel is called, the closemu will be locked as well.
// So we can access rs.lasterr.
d.close(rows.lasterr)
if parentCancel != nil {
parentCancel()
}
}
return nil
}
}
var sv reflect.Value
switch d := dest.(type) {
case *string:
sv = reflect.ValueOf(src)
switch sv.Kind() {
case reflect.Bool,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Float32, reflect.Float64:
*d = asString(src)
return nil
}
case *[]byte:
sv = reflect.ValueOf(src)
if b, ok := asBytes(nil, sv); ok {
*d = b
return nil
}
case *RawBytes:
sv = reflect.ValueOf(src)
if b, ok := asBytes(rows.rawbuf(), sv); ok {
*d = rows.setrawbuf(b)
return nil
}
case *bool:
bv, err := driver.Bool.ConvertValue(src)
if err == nil {
*d = bv.(bool)
}
return err
case *any:
*d = src
return nil
}
if scanner, ok := dest.(Scanner); ok {
return scanner.Scan(src)
}
dpv := reflect.ValueOf(dest)
if dpv.Kind() != reflect.Pointer {
return errors.New("destination not a pointer")
}
if dpv.IsNil() {
return errNilPtr
}
if !sv.IsValid() {
sv = reflect.ValueOf(src)
}
dv := reflect.Indirect(dpv)
if sv.IsValid() && sv.Type().AssignableTo(dv.Type()) {
switch b := src.(type) {
case []byte:
dv.Set(reflect.ValueOf(bytes.Clone(b)))
default:
dv.Set(sv)
}
return nil
}
if dv.Kind() == sv.Kind() && sv.Type().ConvertibleTo(dv.Type()) {
dv.Set(sv.Convert(dv.Type()))
return nil
}
// The following conversions use a string value as an intermediate representation
// to convert between various numeric types.
//
// This also allows scanning into user defined types such as "type Int int64".
// For symmetry, also check for string destination types.
switch dv.Kind() {
case reflect.Pointer:
if src == nil {
dv.SetZero()
return nil
}
dv.Set(reflect.New(dv.Type().Elem()))
return convertAssignRows(dv.Interface(), src, rows)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if src == nil {
return fmt.Errorf("converting NULL to %s is unsupported", dv.Kind())
}
s := asString(src)
i64, err := strconv.ParseInt(s, 10, dv.Type().Bits())
if err != nil {
err = strconvErr(err)
return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
}
dv.SetInt(i64)
return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if src == nil {
return fmt.Errorf("converting NULL to %s is unsupported", dv.Kind())
}
s := asString(src)
u64, err := strconv.ParseUint(s, 10, dv.Type().Bits())
if err != nil {
err = strconvErr(err)
return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
}
dv.SetUint(u64)
return nil
case reflect.Float32, reflect.Float64:
if src == nil {
return fmt.Errorf("converting NULL to %s is unsupported", dv.Kind())
}
s := asString(src)
f64, err := strconv.ParseFloat(s, dv.Type().Bits())
if err != nil {
err = strconvErr(err)
return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
}
dv.SetFloat(f64)
return nil
case reflect.String:
if src == nil {
return fmt.Errorf("converting NULL to %s is unsupported", dv.Kind())
}
switch v := src.(type) {
case string:
dv.SetString(v)
return nil
case []byte:
dv.SetString(string(v))
return nil
}
}
return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dest)
}
func strconvErr(err error) error {
if ne, ok := err.(*strconv.NumError); ok {
return ne.Err
}
return err
}
func asString(src any) string {
switch v := src.(type) {
case string:
return v
case []byte:
return string(v)
}
rv := reflect.ValueOf(src)
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(rv.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return strconv.FormatUint(rv.Uint(), 10)
case reflect.Float64:
return strconv.FormatFloat(rv.Float(), 'g', -1, 64)
case reflect.Float32:
return strconv.FormatFloat(rv.Float(), 'g', -1, 32)
case reflect.Bool:
return strconv.FormatBool(rv.Bool())
}
return fmt.Sprintf("%v", src)
}
func asBytes(buf []byte, rv reflect.Value) (b []byte, ok bool) {
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.AppendInt(buf, rv.Int(), 10), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return strconv.AppendUint(buf, rv.Uint(), 10), true
case reflect.Float32:
return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 32), true
case reflect.Float64:
return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 64), true
case reflect.Bool:
return strconv.AppendBool(buf, rv.Bool()), true
case reflect.String:
s := rv.String()
return append(buf, s...), true
}
return
}
var valuerReflectType = reflect.TypeFor[driver.Valuer]()
// callValuerValue returns vr.Value(), with one exception:
// If vr.Value is an auto-generated method on a pointer type and the
// pointer is nil, it would panic at runtime in the panicwrap
// method. Treat it like nil instead.
// Issue 8415.
//
// This is so people can implement driver.Value on value types and
// still use nil pointers to those types to mean nil/NULL, just like
// string/*string.
//
// This function is mirrored in the database/sql/driver package.
func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Pointer &&
rv.IsNil() &&
rv.Type().Elem().Implements(valuerReflectType) {
return nil, nil
}
return vr.Value()
}
// decimal composes or decomposes a decimal value to and from individual parts.
// There are four parts: a boolean negative flag, a form byte with three possible states
// (finite=0, infinite=1, NaN=2), a base-2 big-endian integer
// coefficient (also known as a significand) as a []byte, and an int32 exponent.
// These are composed into a final value as "decimal = (neg) (form=finite) coefficient * 10 ^ exponent".
// A zero length coefficient is a zero value.
// The big-endian integer coefficient stores the most significant byte first (at coefficient[0]).
// If the form is not finite the coefficient and exponent should be ignored.
// The negative parameter may be set to true for any form, although implementations are not required
// to respect the negative parameter in the non-finite form.
//
// Implementations may choose to set the negative parameter to true on a zero or NaN value,
// but implementations that do not differentiate between negative and positive
// zero or NaN values should ignore the negative parameter without error.
// If an implementation does not support Infinity it may be converted into a NaN without error.
// If a value is set that is larger than what is supported by an implementation,
// an error must be returned.
// Implementations must return an error if a NaN or Infinity is attempted to be set while neither
// are supported.
//
// NOTE(kardianos): This is an experimental interface. See https://golang.org/issue/30870
type decimal interface {
decimalDecompose
decimalCompose
}
type decimalDecompose interface {
// Decompose returns the internal decimal state in parts.
// If the provided buf has sufficient capacity, buf may be returned as the coefficient with
// the value set and length set as appropriate.
Decompose(buf []byte) (form byte, negative bool, coefficient []byte, exponent int32)
}
type decimalCompose interface {
// Compose sets the internal decimal value from parts. If the value cannot be
// represented then an error should be returned.
Compose(form byte, negative bool, coefficient []byte, exponent int32) error
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sql
import (
"context"
"database/sql/driver"
"errors"
)
func ctxDriverPrepare(ctx context.Context, ci driver.Conn, query string) (driver.Stmt, error) {
if ciCtx, is := ci.(driver.ConnPrepareContext); is {
return ciCtx.PrepareContext(ctx, query)
}
si, err := ci.Prepare(query)
if err == nil {
select {
default:
case <-ctx.Done():
si.Close()
return nil, ctx.Err()
}
}
return si, err
}
func ctxDriverExec(ctx context.Context, execerCtx driver.ExecerContext, execer driver.Execer, query string, nvdargs []driver.NamedValue) (driver.Result, error) {
if execerCtx != nil {
return execerCtx.ExecContext(ctx, query, nvdargs)
}
dargs, err := namedValueToValue(nvdargs)
if err != nil {
return nil, err
}
select {
default:
case <-ctx.Done():
return nil, ctx.Err()
}
return execer.Exec(query, dargs)
}
func ctxDriverQuery(ctx context.Context, queryerCtx driver.QueryerContext, queryer driver.Queryer, query string, nvdargs []driver.NamedValue) (driver.Rows, error) {
if queryerCtx != nil {
return queryerCtx.QueryContext(ctx, query, nvdargs)
}
dargs, err := namedValueToValue(nvdargs)
if err != nil {
return nil, err
}
select {
default:
case <-ctx.Done():
return nil, ctx.Err()
}
return queryer.Query(query, dargs)
}
func ctxDriverStmtExec(ctx context.Context, si driver.Stmt, nvdargs []driver.NamedValue) (driver.Result, error) {
if siCtx, is := si.(driver.StmtExecContext); is {
return siCtx.ExecContext(ctx, nvdargs)
}
dargs, err := namedValueToValue(nvdargs)
if err != nil {
return nil, err
}
select {
default:
case <-ctx.Done():
return nil, ctx.Err()
}
return si.Exec(dargs)
}
func ctxDriverStmtQuery(ctx context.Context, si driver.Stmt, nvdargs []driver.NamedValue) (driver.Rows, error) {
if siCtx, is := si.(driver.StmtQueryContext); is {
return siCtx.QueryContext(ctx, nvdargs)
}
dargs, err := namedValueToValue(nvdargs)
if err != nil {
return nil, err
}
select {
default:
case <-ctx.Done():
return nil, ctx.Err()
}
return si.Query(dargs)
}
func ctxDriverBegin(ctx context.Context, opts *TxOptions, ci driver.Conn) (driver.Tx, error) {
if ciCtx, is := ci.(driver.ConnBeginTx); is {
dopts := driver.TxOptions{}
if opts != nil {
dopts.Isolation = driver.IsolationLevel(opts.Isolation)
dopts.ReadOnly = opts.ReadOnly
}
return ciCtx.BeginTx(ctx, dopts)
}
if opts != nil {
// Check the transaction level. If the transaction level is non-default
// then return an error here as the BeginTx driver value is not supported.
if opts.Isolation != LevelDefault {
return nil, errors.New("sql: driver does not support non-default isolation level")
}
// If a read-only transaction is requested return an error as the
// BeginTx driver value is not supported.
if opts.ReadOnly {
return nil, errors.New("sql: driver does not support read-only transactions")
}
}
if ctx.Done() == nil {
return ci.Begin()
}
txi, err := ci.Begin()
if err == nil {
select {
default:
case <-ctx.Done():
txi.Rollback()
return nil, ctx.Err()
}
}
return txi, err
}
func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
dargs := make([]driver.Value, len(named))
for n, param := range named {
if len(param.Name) > 0 {
return nil, errors.New("sql: driver does not support the use of Named Parameters")
}
dargs[n] = param.Value
}
return dargs, nil
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package driver defines interfaces to be implemented by database
// drivers as used by package sql.
//
// Most code should use the [database/sql] package.
//
// The driver interface has evolved over time. Drivers should implement
// [Connector] and [DriverContext] interfaces.
// The Connector.Connect and Driver.Open methods should never return [ErrBadConn].
// [ErrBadConn] should only be returned from [Validator], [SessionResetter], or
// a query method if the connection is already in an invalid (e.g. closed) state.
//
// All [Conn] implementations should implement the following interfaces:
// [Pinger], [SessionResetter], and [Validator].
//
// If named parameters or context are supported, the driver's [Conn] should implement:
// [ExecerContext], [QueryerContext], [ConnPrepareContext], and [ConnBeginTx].
//
// To support custom data types, implement [NamedValueChecker]. [NamedValueChecker]
// also allows queries to accept per-query options as a parameter by returning
// [ErrRemoveArgument] from CheckNamedValue.
//
// If multiple result sets are supported, [Rows] should implement [RowsNextResultSet].
// If the driver knows how to describe the types present in the returned result
// it should implement the following interfaces: [RowsColumnTypeScanType],
// [RowsColumnTypeDatabaseTypeName], [RowsColumnTypeLength], [RowsColumnTypeNullable],
// and [RowsColumnTypePrecisionScale]. A given row value may also return a [Rows]
// type, which may represent a database cursor value.
//
// If a [Conn] implements [Validator], then the IsValid method is called
// before returning the connection to the connection pool. If an entry in the
// connection pool implements [SessionResetter], then ResetSession
// is called before reusing the connection for another query. If a connection is
// never returned to the connection pool but is immediately reused, then
// ResetSession is called prior to reuse but IsValid is not called.
package driver
import (
"context"
"errors"
"reflect"
)
// Value is a value that drivers must be able to handle.
// It is either nil, a type handled by a database driver's [NamedValueChecker]
// interface, or an instance of one of these types:
//
// int64
// float64
// bool
// []byte
// string
// time.Time
//
// If the driver supports cursors, a returned Value may also implement the [Rows] interface
// in this package. This is used, for example, when a user selects a cursor
// such as "select cursor(select * from my_table) from dual". If the [Rows]
// from the select is closed, the cursor [Rows] will also be closed.
type Value any
// NamedValue holds both the value name and value.
type NamedValue struct {
// If the Name is not empty it should be used for the parameter identifier and
// not the ordinal position.
//
// Name will not have a symbol prefix.
Name string
// Ordinal position of the parameter starting from one and is always set.
Ordinal int
// Value is the parameter value.
Value Value
}
// Driver is the interface that must be implemented by a database
// driver.
//
// Database drivers may implement [DriverContext] for access
// to contexts and to parse the name only once for a pool of connections,
// instead of once per connection.
type Driver interface {
// Open returns a new connection to the database.
// The name is a string in a driver-specific format.
//
// Open may return a cached connection (one previously
// closed), but doing so is unnecessary; the sql package
// maintains a pool of idle connections for efficient re-use.
//
// The returned connection is only used by one goroutine at a
// time.
Open(name string) (Conn, error)
}
// If a [Driver] implements DriverContext, then [database/sql.DB] will call
// OpenConnector to obtain a [Connector] and then invoke
// that [Connector]'s Connect method to obtain each needed connection,
// instead of invoking the [Driver]'s Open method for each connection.
// The two-step sequence allows drivers to parse the name just once
// and also provides access to per-[Conn] contexts.
type DriverContext interface {
// OpenConnector must parse the name in the same format that Driver.Open
// parses the name parameter.
OpenConnector(name string) (Connector, error)
}
// A Connector represents a driver in a fixed configuration
// and can create any number of equivalent Conns for use
// by multiple goroutines.
//
// A Connector can be passed to [database/sql.OpenDB], to allow drivers
// to implement their own [database/sql.DB] constructors, or returned by
// [DriverContext]'s OpenConnector method, to allow drivers
// access to context and to avoid repeated parsing of driver
// configuration.
//
// If a Connector implements [io.Closer], the [database/sql.DB.Close]
// method will call the Close method and return error (if any).
type Connector interface {
// Connect returns a connection to the database.
// Connect may return a cached connection (one previously
// closed), but doing so is unnecessary; the sql package
// maintains a pool of idle connections for efficient re-use.
//
// The provided context.Context is for dialing purposes only
// (see net.DialContext) and should not be stored or used for
// other purposes. A default timeout should still be used
// when dialing as a connection pool may call Connect
// asynchronously to any query.
//
// The returned connection is only used by one goroutine at a
// time.
Connect(context.Context) (Conn, error)
// Driver returns the underlying Driver of the Connector,
// mainly to maintain compatibility with the Driver method
// on sql.DB.
Driver() Driver
}
// ErrSkip may be returned by some optional interfaces' methods to
// indicate at runtime that the fast path is unavailable and the sql
// package should continue as if the optional interface was not
// implemented. ErrSkip is only supported where explicitly
// documented.
var ErrSkip = errors.New("driver: skip fast-path; continue as if unimplemented")
// ErrBadConn should be returned by a driver to signal to the [database/sql]
// package that a driver.[Conn] is in a bad state (such as the server
// having earlier closed the connection) and the [database/sql] package should
// retry on a new connection.
//
// To prevent duplicate operations, ErrBadConn should NOT be returned
// if there's a possibility that the database server might have
// performed the operation. Even if the server sends back an error,
// you shouldn't return ErrBadConn.
//
// Errors will be checked using [errors.Is]. An error may
// wrap ErrBadConn or implement the Is(error) bool method.
var ErrBadConn = errors.New("driver: bad connection")
// Pinger is an optional interface that may be implemented by a [Conn].
//
// If a [Conn] does not implement Pinger, the [database/sql.DB.Ping] and
// [database/sql.DB.PingContext] will check if there is at least one [Conn] available.
//
// If Conn.Ping returns [ErrBadConn], [database/sql.DB.Ping] and [database/sql.DB.PingContext] will remove
// the [Conn] from pool.
type Pinger interface {
Ping(ctx context.Context) error
}
// Execer is an optional interface that may be implemented by a [Conn].
//
// If a [Conn] implements neither [ExecerContext] nor [Execer],
// the [database/sql.DB.Exec] will first prepare a query, execute the statement,
// and then close the statement.
//
// Exec may return [ErrSkip].
//
// Deprecated: Drivers should implement [ExecerContext] instead.
type Execer interface {
Exec(query string, args []Value) (Result, error)
}
// ExecerContext is an optional interface that may be implemented by a [Conn].
//
// If a [Conn] does not implement [ExecerContext], the [database/sql.DB.Exec]
// will fall back to [Execer]; if the Conn does not implement Execer either,
// [database/sql.DB.Exec] will first prepare a query, execute the statement, and then
// close the statement.
//
// ExecContext may return [ErrSkip].
//
// ExecContext must honor the context timeout and return when the context is canceled.
type ExecerContext interface {
ExecContext(ctx context.Context, query string, args []NamedValue) (Result, error)
}
// Queryer is an optional interface that may be implemented by a [Conn].
//
// If a [Conn] implements neither [QueryerContext] nor [Queryer],
// the [database/sql.DB.Query] will first prepare a query, execute the statement,
// and then close the statement.
//
// Query may return [ErrSkip].
//
// Deprecated: Drivers should implement [QueryerContext] instead.
type Queryer interface {
Query(query string, args []Value) (Rows, error)
}
// QueryerContext is an optional interface that may be implemented by a [Conn].
//
// If a [Conn] does not implement QueryerContext, the [database/sql.DB.Query]
// will fall back to [Queryer]; if the [Conn] does not implement [Queryer] either,
// [database/sql.DB.Query] will first prepare a query, execute the statement, and then
// close the statement.
//
// QueryContext may return [ErrSkip].
//
// QueryContext must honor the context timeout and return when the context is canceled.
type QueryerContext interface {
QueryContext(ctx context.Context, query string, args []NamedValue) (Rows, error)
}
// Conn is a connection to a database. It is not used concurrently
// by multiple goroutines.
//
// Conn is assumed to be stateful.
type Conn interface {
// Prepare returns a prepared statement, bound to this connection.
Prepare(query string) (Stmt, error)
// Close invalidates and potentially stops any current
// prepared statements and transactions, marking this
// connection as no longer in use.
//
// Because the sql package maintains a free pool of
// connections and only calls Close when there's a surplus of
// idle connections, it shouldn't be necessary for drivers to
// do their own connection caching.
//
// Drivers must ensure all network calls made by Close
// do not block indefinitely (e.g. apply a timeout).
Close() error
// Begin starts and returns a new transaction.
//
// Deprecated: Drivers should implement ConnBeginTx instead (or additionally).
Begin() (Tx, error)
}
// ConnPrepareContext enhances the [Conn] interface with context.
type ConnPrepareContext interface {
// PrepareContext returns a prepared statement, bound to this connection.
// context is for the preparation of the statement,
// it must not store the context within the statement itself.
PrepareContext(ctx context.Context, query string) (Stmt, error)
}
// IsolationLevel is the transaction isolation level stored in [TxOptions].
//
// This type should be considered identical to [database/sql.IsolationLevel] along
// with any values defined on it.
type IsolationLevel int
// TxOptions holds the transaction options.
//
// This type should be considered identical to [database/sql.TxOptions].
type TxOptions struct {
Isolation IsolationLevel
ReadOnly bool
}
// ConnBeginTx enhances the [Conn] interface with context and [TxOptions].
type ConnBeginTx interface {
// BeginTx starts and returns a new transaction.
// If the context is canceled by the user the sql package will
// call Tx.Rollback before discarding and closing the connection.
//
// This must check opts.Isolation to determine if there is a set
// isolation level. If the driver does not support a non-default
// level and one is set or if there is a non-default isolation level
// that is not supported, an error must be returned.
//
// This must also check opts.ReadOnly to determine if the read-only
// value is true to either set the read-only transaction property if supported
// or return an error if it is not supported.
BeginTx(ctx context.Context, opts TxOptions) (Tx, error)
}
// SessionResetter may be implemented by [Conn] to allow drivers to reset the
// session state associated with the connection and to signal a bad connection.
type SessionResetter interface {
// ResetSession is called prior to executing a query on the connection
// if the connection has been used before. If the driver returns ErrBadConn
// the connection is discarded.
ResetSession(ctx context.Context) error
}
// Validator may be implemented by [Conn] to allow drivers to
// signal if a connection is valid or if it should be discarded.
//
// If implemented, drivers may return the underlying error from queries,
// even if the connection should be discarded by the connection pool.
type Validator interface {
// IsValid is called prior to placing the connection into the
// connection pool. The connection will be discarded if false is returned.
IsValid() bool
}
// Result is the result of a query execution.
type Result interface {
// LastInsertId returns the database's auto-generated ID
// after, for example, an INSERT into a table with primary
// key.
LastInsertId() (int64, error)
// RowsAffected returns the number of rows affected by the
// query.
RowsAffected() (int64, error)
}
// Stmt is a prepared statement. It is bound to a [Conn] and not
// used by multiple goroutines concurrently.
type Stmt interface {
// Close closes the statement.
//
// As of Go 1.1, a Stmt will not be closed if it's in use
// by any queries.
//
// Drivers must ensure all network calls made by Close
// do not block indefinitely (e.g. apply a timeout).
Close() error
// NumInput returns the number of placeholder parameters.
//
// If NumInput returns >= 0, the sql package will sanity check
// argument counts from callers and return errors to the caller
// before the statement's Exec or Query methods are called.
//
// NumInput may also return -1, if the driver doesn't know
// its number of placeholders. In that case, the sql package
// will not sanity check Exec or Query argument counts.
NumInput() int
// Exec executes a query that doesn't return rows, such
// as an INSERT or UPDATE.
//
// Deprecated: Drivers should implement StmtExecContext instead (or additionally).
Exec(args []Value) (Result, error)
// Query executes a query that may return rows, such as a
// SELECT.
//
// Deprecated: Drivers should implement StmtQueryContext instead (or additionally).
Query(args []Value) (Rows, error)
}
// StmtExecContext enhances the [Stmt] interface by providing Exec with context.
type StmtExecContext interface {
// ExecContext executes a query that doesn't return rows, such
// as an INSERT or UPDATE.
//
// ExecContext must honor the context timeout and return when it is canceled.
ExecContext(ctx context.Context, args []NamedValue) (Result, error)
}
// StmtQueryContext enhances the [Stmt] interface by providing Query with context.
type StmtQueryContext interface {
// QueryContext executes a query that may return rows, such as a
// SELECT.
//
// QueryContext must honor the context timeout and return when it is canceled.
QueryContext(ctx context.Context, args []NamedValue) (Rows, error)
}
// ErrRemoveArgument may be returned from [NamedValueChecker] to instruct the
// [database/sql] package to not pass the argument to the driver query interface.
// Return when accepting query specific options or structures that aren't
// SQL query arguments.
var ErrRemoveArgument = errors.New("driver: remove argument from query")
// NamedValueChecker may be optionally implemented by [Conn] or [Stmt]. It provides
// the driver more control to handle Go and database types beyond the default
// [Value] types allowed.
//
// The [database/sql] package checks for value checkers in the following order,
// stopping at the first found match: Stmt.NamedValueChecker, Conn.NamedValueChecker,
// Stmt.ColumnConverter, [DefaultParameterConverter].
//
// If CheckNamedValue returns [ErrRemoveArgument], the [NamedValue] will not be included in
// the final query arguments. This may be used to pass special options to
// the query itself.
//
// If [ErrSkip] is returned the column converter error checking
// path is used for the argument. Drivers may wish to return [ErrSkip] after
// they have exhausted their own special cases.
type NamedValueChecker interface {
// CheckNamedValue is called before passing arguments to the driver
// and is called in place of any ColumnConverter. CheckNamedValue must do type
// validation and conversion as appropriate for the driver.
CheckNamedValue(*NamedValue) error
}
// ColumnConverter may be optionally implemented by [Stmt] if the
// statement is aware of its own columns' types and can convert from
// any type to a driver [Value].
//
// Deprecated: Drivers should implement [NamedValueChecker].
type ColumnConverter interface {
// ColumnConverter returns a ValueConverter for the provided
// column index. If the type of a specific column isn't known
// or shouldn't be handled specially, [DefaultParameterConverter]
// can be returned.
ColumnConverter(idx int) ValueConverter
}
// Rows is an iterator over an executed query's results.
type Rows interface {
// Columns returns the names of the columns. The number of
// columns of the result is inferred from the length of the
// slice. If a particular column name isn't known, an empty
// string should be returned for that entry.
Columns() []string
// Close closes the rows iterator.
Close() error
// Next is called to populate the next row of data into
// the provided slice. The provided slice will be the same
// size as the Columns() are wide.
//
// Next should return io.EOF when there are no more rows.
//
// The dest should not be written to outside of Next. Care
// should be taken when closing Rows not to modify
// a buffer held in dest.
Next(dest []Value) error
}
// RowsNextResultSet extends the [Rows] interface by providing a way to signal
// the driver to advance to the next result set.
type RowsNextResultSet interface {
Rows
// HasNextResultSet is called at the end of the current result set and
// reports whether there is another result set after the current one.
HasNextResultSet() bool
// NextResultSet advances the driver to the next result set even
// if there are remaining rows in the current result set.
//
// NextResultSet should return io.EOF when there are no more result sets.
NextResultSet() error
}
// RowsColumnTypeScanType may be implemented by [Rows]. It should return
// the value type that can be used to scan types into. For example, the database
// column type "bigint" this should return "[reflect.TypeOf](int64(0))".
type RowsColumnTypeScanType interface {
Rows
ColumnTypeScanType(index int) reflect.Type
}
// RowsColumnTypeDatabaseTypeName may be implemented by [Rows]. It should return the
// database system type name without the length. Type names should be uppercase.
// Examples of returned types: "VARCHAR", "NVARCHAR", "VARCHAR2", "CHAR", "TEXT",
// "DECIMAL", "SMALLINT", "INT", "BIGINT", "BOOL", "[]BIGINT", "JSONB", "XML",
// "TIMESTAMP".
type RowsColumnTypeDatabaseTypeName interface {
Rows
ColumnTypeDatabaseTypeName(index int) string
}
// RowsColumnTypeLength may be implemented by [Rows]. It should return the length
// of the column type if the column is a variable length type. If the column is
// not a variable length type ok should return false.
// If length is not limited other than system limits, it should return [math.MaxInt64].
// The following are examples of returned values for various types:
//
// TEXT (math.MaxInt64, true)
// varchar(10) (10, true)
// nvarchar(10) (10, true)
// decimal (0, false)
// int (0, false)
// bytea(30) (30, true)
type RowsColumnTypeLength interface {
Rows
ColumnTypeLength(index int) (length int64, ok bool)
}
// RowsColumnTypeNullable may be implemented by [Rows]. The nullable value should
// be true if it is known the column may be null, or false if the column is known
// to be not nullable.
// If the column nullability is unknown, ok should be false.
type RowsColumnTypeNullable interface {
Rows
ColumnTypeNullable(index int) (nullable, ok bool)
}
// RowsColumnTypePrecisionScale may be implemented by [Rows]. It should return
// the precision and scale for decimal types. If not applicable, ok should be false.
// The following are examples of returned values for various types:
//
// decimal(38, 4) (38, 4, true)
// int (0, 0, false)
// decimal (math.MaxInt64, math.MaxInt64, true)
type RowsColumnTypePrecisionScale interface {
Rows
ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool)
}
// Tx is a transaction.
type Tx interface {
Commit() error
Rollback() error
}
// RowsAffected implements [Result] for an INSERT or UPDATE operation
// which mutates a number of rows.
type RowsAffected int64
var _ Result = RowsAffected(0)
func (RowsAffected) LastInsertId() (int64, error) {
return 0, errors.New("LastInsertId is not supported by this driver")
}
func (v RowsAffected) RowsAffected() (int64, error) {
return int64(v), nil
}
// ResultNoRows is a pre-defined [Result] for drivers to return when a DDL
// command (such as a CREATE TABLE) succeeds. It returns an error for both
// LastInsertId and [RowsAffected].
var ResultNoRows noRows
type noRows struct{}
var _ Result = noRows{}
func (noRows) LastInsertId() (int64, error) {
return 0, errors.New("no LastInsertId available after DDL statement")
}
func (noRows) RowsAffected() (int64, error) {
return 0, errors.New("no RowsAffected available after DDL statement")
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package driver
import (
"fmt"
"reflect"
"strconv"
"time"
)
// ValueConverter is the interface providing the ConvertValue method.
//
// Various implementations of ValueConverter are provided by the
// driver package to provide consistent implementations of conversions
// between drivers. The ValueConverters have several uses:
//
// - converting from the [Value] types as provided by the sql package
// into a database table's specific column type and making sure it
// fits, such as making sure a particular int64 fits in a
// table's uint16 column.
//
// - converting a value as given from the database into one of the
// driver [Value] types.
//
// - by the [database/sql] package, for converting from a driver's [Value] type
// to a user's type in a scan.
type ValueConverter interface {
// ConvertValue converts a value to a driver Value.
ConvertValue(v any) (Value, error)
}
// Valuer is the interface providing the Value method.
//
// Errors returned by the [Value] method are wrapped by the database/sql package.
// This allows callers to use [errors.Is] for precise error handling after operations
// like [database/sql.Query], [database/sql.Exec], or [database/sql.QueryRow].
//
// Types implementing Valuer interface are able to convert
// themselves to a driver [Value].
type Valuer interface {
// Value returns a driver Value.
// Value must not panic.
Value() (Value, error)
}
// Bool is a [ValueConverter] that converts input values to bool.
//
// The conversion rules are:
// - booleans are returned unchanged
// - for integer types,
// 1 is true
// 0 is false,
// other integers are an error
// - for strings and []byte, same rules as [strconv.ParseBool]
// - all other types are an error
var Bool boolType
type boolType struct{}
var _ ValueConverter = boolType{}
func (boolType) String() string { return "Bool" }
func (boolType) ConvertValue(src any) (Value, error) {
switch s := src.(type) {
case bool:
return s, nil
case string:
b, err := strconv.ParseBool(s)
if err != nil {
return nil, fmt.Errorf("sql/driver: couldn't convert %q into type bool", s)
}
return b, nil
case []byte:
b, err := strconv.ParseBool(string(s))
if err != nil {
return nil, fmt.Errorf("sql/driver: couldn't convert %q into type bool", s)
}
return b, nil
}
sv := reflect.ValueOf(src)
switch sv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
iv := sv.Int()
if iv == 1 || iv == 0 {
return iv == 1, nil
}
return nil, fmt.Errorf("sql/driver: couldn't convert %d into type bool", iv)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
uv := sv.Uint()
if uv == 1 || uv == 0 {
return uv == 1, nil
}
return nil, fmt.Errorf("sql/driver: couldn't convert %d into type bool", uv)
}
return nil, fmt.Errorf("sql/driver: couldn't convert %v (%T) into type bool", src, src)
}
// Int32 is a [ValueConverter] that converts input values to int64,
// respecting the limits of an int32 value.
var Int32 int32Type
type int32Type struct{}
var _ ValueConverter = int32Type{}
func (int32Type) ConvertValue(v any) (Value, error) {
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i64 := rv.Int()
if i64 > (1<<31)-1 || i64 < -(1<<31) {
return nil, fmt.Errorf("sql/driver: value %d overflows int32", v)
}
return i64, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
u64 := rv.Uint()
if u64 > (1<<31)-1 {
return nil, fmt.Errorf("sql/driver: value %d overflows int32", v)
}
return int64(u64), nil
case reflect.String:
i, err := strconv.Atoi(rv.String())
if err != nil {
return nil, fmt.Errorf("sql/driver: value %q can't be converted to int32", v)
}
return int64(i), nil
}
return nil, fmt.Errorf("sql/driver: unsupported value %v (type %T) converting to int32", v, v)
}
// String is a [ValueConverter] that converts its input to a string.
// If the value is already a string or []byte, it's unchanged.
// If the value is of another type, conversion to string is done
// with fmt.Sprintf("%v", v).
var String stringType
type stringType struct{}
func (stringType) ConvertValue(v any) (Value, error) {
switch v.(type) {
case string, []byte:
return v, nil
}
return fmt.Sprintf("%v", v), nil
}
// Null is a type that implements [ValueConverter] by allowing nil
// values but otherwise delegating to another [ValueConverter].
type Null struct {
Converter ValueConverter
}
func (n Null) ConvertValue(v any) (Value, error) {
if v == nil {
return nil, nil
}
return n.Converter.ConvertValue(v)
}
// NotNull is a type that implements [ValueConverter] by disallowing nil
// values but otherwise delegating to another [ValueConverter].
type NotNull struct {
Converter ValueConverter
}
func (n NotNull) ConvertValue(v any) (Value, error) {
if v == nil {
return nil, fmt.Errorf("nil value not allowed")
}
return n.Converter.ConvertValue(v)
}
// IsValue reports whether v is a valid [Value] parameter type.
func IsValue(v any) bool {
if v == nil {
return true
}
switch v.(type) {
case []byte, bool, float64, int64, string, time.Time:
return true
case decimalDecompose:
return true
}
return false
}
// IsScanValue is equivalent to [IsValue].
// It exists for compatibility.
func IsScanValue(v any) bool {
return IsValue(v)
}
// DefaultParameterConverter is the default implementation of
// [ValueConverter] that's used when a [Stmt] doesn't implement
// [ColumnConverter].
//
// DefaultParameterConverter returns its argument directly if
// IsValue(arg). Otherwise, if the argument implements [Valuer], its
// Value method is used to return a [Value]. As a fallback, the provided
// argument's underlying type is used to convert it to a [Value]:
// underlying integer types are converted to int64, floats to float64,
// bool, string, and []byte to themselves. If the argument is a nil
// pointer, defaultConverter.ConvertValue returns a nil [Value].
// If the argument is a non-nil pointer, it is dereferenced and
// defaultConverter.ConvertValue is called recursively. Other types
// are an error.
var DefaultParameterConverter defaultConverter
type defaultConverter struct{}
var _ ValueConverter = defaultConverter{}
var valuerReflectType = reflect.TypeFor[Valuer]()
// callValuerValue returns vr.Value(), with one exception:
// If vr.Value is an auto-generated method on a pointer type and the
// pointer is nil, it would panic at runtime in the panicwrap
// method. Treat it like nil instead.
// Issue 8415.
//
// This is so people can implement driver.Value on value types and
// still use nil pointers to those types to mean nil/NULL, just like
// string/*string.
//
// This function is mirrored in the database/sql package.
func callValuerValue(vr Valuer) (v Value, err error) {
if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Pointer &&
rv.IsNil() &&
rv.Type().Elem().Implements(valuerReflectType) {
return nil, nil
}
return vr.Value()
}
func (defaultConverter) ConvertValue(v any) (Value, error) {
if IsValue(v) {
return v, nil
}
switch vr := v.(type) {
case Valuer:
sv, err := callValuerValue(vr)
if err != nil {
return nil, err
}
if !IsValue(sv) {
return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
}
return sv, nil
// For now, continue to prefer the Valuer interface over the decimal decompose interface.
case decimalDecompose:
return vr, nil
}
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Pointer:
// indirect pointers
if rv.IsNil() {
return nil, nil
} else {
return defaultConverter{}.ConvertValue(rv.Elem().Interface())
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int(), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
return int64(rv.Uint()), nil
case reflect.Uint64:
u64 := rv.Uint()
if u64 >= 1<<63 {
return nil, fmt.Errorf("uint64 values with high bit set are not supported")
}
return int64(u64), nil
case reflect.Float32, reflect.Float64:
return rv.Float(), nil
case reflect.Bool:
return rv.Bool(), nil
case reflect.Slice:
ek := rv.Type().Elem().Kind()
if ek == reflect.Uint8 {
return rv.Bytes(), nil
}
return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek)
case reflect.String:
return rv.String(), nil
}
return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
}
type decimalDecompose interface {
// Decompose returns the internal decimal state into parts.
// If the provided buf has sufficient capacity, buf may be returned as the coefficient with
// the value set and length set as appropriate.
Decompose(buf []byte) (form byte, negative bool, coefficient []byte, exponent int32)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sql provides a generic interface around SQL (or SQL-like)
// databases.
//
// The sql package must be used in conjunction with a database driver.
// See https://golang.org/s/sqldrivers for a list of drivers.
//
// Drivers that do not support context cancellation will not return until
// after the query is completed.
//
// For usage examples, see the wiki page at
// https://golang.org/s/sqlwiki.
package sql
import (
"context"
"database/sql/driver"
"errors"
"fmt"
"io"
"maps"
"math/rand/v2"
"reflect"
"runtime"
"slices"
"strconv"
"sync"
"sync/atomic"
"time"
_ "unsafe"
)
var driversMu sync.RWMutex
// drivers should be an internal detail,
// but widely used packages access it using linkname.
// (It is extra wrong that they linkname drivers but not driversMu.)
// Notable members of the hall of shame include:
// - github.com/instana/go-sensor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname drivers
var drivers = make(map[string]driver.Driver)
// Register makes a database driver available by the provided name.
// If Register is called twice with the same name or if driver is nil,
// it panics.
func Register(name string, driver driver.Driver) {
driversMu.Lock()
defer driversMu.Unlock()
if driver == nil {
panic("sql: Register driver is nil")
}
if _, dup := drivers[name]; dup {
panic("sql: Register called twice for driver " + name)
}
drivers[name] = driver
}
func unregisterAllDrivers() {
driversMu.Lock()
defer driversMu.Unlock()
// For tests.
drivers = make(map[string]driver.Driver)
}
// Drivers returns a sorted list of the names of the registered drivers.
func Drivers() []string {
driversMu.RLock()
defer driversMu.RUnlock()
return slices.Sorted(maps.Keys(drivers))
}
// A NamedArg is a named argument. NamedArg values may be used as
// arguments to [DB.Query] or [DB.Exec] and bind to the corresponding named
// parameter in the SQL statement.
//
// For a more concise way to create NamedArg values, see
// the [Named] function.
type NamedArg struct {
_NamedFieldsRequired struct{}
// Name is the name of the parameter placeholder.
//
// If empty, the ordinal position in the argument list will be
// used.
//
// Name must omit any symbol prefix.
Name string
// Value is the value of the parameter.
// It may be assigned the same value types as the query
// arguments.
Value any
}
// Named provides a more concise way to create [NamedArg] values.
//
// Example usage:
//
// db.ExecContext(ctx, `
// delete from Invoice
// where
// TimeCreated < @end
// and TimeCreated >= @start;`,
// sql.Named("start", startTime),
// sql.Named("end", endTime),
// )
func Named(name string, value any) NamedArg {
// This method exists because the go1compat promise
// doesn't guarantee that structs don't grow more fields,
// so unkeyed struct literals are a vet error. Thus, we don't
// want to allow sql.NamedArg{name, value}.
return NamedArg{Name: name, Value: value}
}
// IsolationLevel is the transaction isolation level used in [TxOptions].
type IsolationLevel int
// Various isolation levels that drivers may support in [DB.BeginTx].
// If a driver does not support a given isolation level an error may be returned.
//
// See https://en.wikipedia.org/wiki/Isolation_(database_systems)#Isolation_levels.
const (
LevelDefault IsolationLevel = iota
LevelReadUncommitted
LevelReadCommitted
LevelWriteCommitted
LevelRepeatableRead
LevelSnapshot
LevelSerializable
LevelLinearizable
)
// String returns the name of the transaction isolation level.
func (i IsolationLevel) String() string {
switch i {
case LevelDefault:
return "Default"
case LevelReadUncommitted:
return "Read Uncommitted"
case LevelReadCommitted:
return "Read Committed"
case LevelWriteCommitted:
return "Write Committed"
case LevelRepeatableRead:
return "Repeatable Read"
case LevelSnapshot:
return "Snapshot"
case LevelSerializable:
return "Serializable"
case LevelLinearizable:
return "Linearizable"
default:
return "IsolationLevel(" + strconv.Itoa(int(i)) + ")"
}
}
var _ fmt.Stringer = LevelDefault
// TxOptions holds the transaction options to be used in [DB.BeginTx].
type TxOptions struct {
// Isolation is the transaction isolation level.
// If zero, the driver or database's default level is used.
Isolation IsolationLevel
ReadOnly bool
}
// RawBytes is a byte slice that holds a reference to memory owned by
// the database itself. After a [Rows.Scan] into a RawBytes, the slice is only
// valid until the next call to [Rows.Next], [Rows.Scan], or [Rows.Close].
type RawBytes []byte
// NullString represents a string that may be null.
// NullString implements the [Scanner] interface so
// it can be used as a scan destination:
//
// var s NullString
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
// ...
// if s.Valid {
// // use s.String
// } else {
// // NULL value
// }
type NullString struct {
String string
Valid bool // Valid is true if String is not NULL
}
// Scan implements the [Scanner] interface.
func (ns *NullString) Scan(value any) error {
if value == nil {
ns.String, ns.Valid = "", false
return nil
}
err := convertAssign(&ns.String, value)
ns.Valid = err == nil
return err
}
// Value implements the [driver.Valuer] interface.
func (ns NullString) Value() (driver.Value, error) {
if !ns.Valid {
return nil, nil
}
return ns.String, nil
}
// NullInt64 represents an int64 that may be null.
// NullInt64 implements the [Scanner] interface so
// it can be used as a scan destination, similar to [NullString].
type NullInt64 struct {
Int64 int64
Valid bool // Valid is true if Int64 is not NULL
}
// Scan implements the [Scanner] interface.
func (n *NullInt64) Scan(value any) error {
if value == nil {
n.Int64, n.Valid = 0, false
return nil
}
err := convertAssign(&n.Int64, value)
n.Valid = err == nil
return err
}
// Value implements the [driver.Valuer] interface.
func (n NullInt64) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Int64, nil
}
// NullInt32 represents an int32 that may be null.
// NullInt32 implements the [Scanner] interface so
// it can be used as a scan destination, similar to [NullString].
type NullInt32 struct {
Int32 int32
Valid bool // Valid is true if Int32 is not NULL
}
// Scan implements the [Scanner] interface.
func (n *NullInt32) Scan(value any) error {
if value == nil {
n.Int32, n.Valid = 0, false
return nil
}
err := convertAssign(&n.Int32, value)
n.Valid = err == nil
return err
}
// Value implements the [driver.Valuer] interface.
func (n NullInt32) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return int64(n.Int32), nil
}
// NullInt16 represents an int16 that may be null.
// NullInt16 implements the [Scanner] interface so
// it can be used as a scan destination, similar to [NullString].
type NullInt16 struct {
Int16 int16
Valid bool // Valid is true if Int16 is not NULL
}
// Scan implements the [Scanner] interface.
func (n *NullInt16) Scan(value any) error {
if value == nil {
n.Int16, n.Valid = 0, false
return nil
}
err := convertAssign(&n.Int16, value)
n.Valid = err == nil
return err
}
// Value implements the [driver.Valuer] interface.
func (n NullInt16) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return int64(n.Int16), nil
}
// NullByte represents a byte that may be null.
// NullByte implements the [Scanner] interface so
// it can be used as a scan destination, similar to [NullString].
type NullByte struct {
Byte byte
Valid bool // Valid is true if Byte is not NULL
}
// Scan implements the [Scanner] interface.
func (n *NullByte) Scan(value any) error {
if value == nil {
n.Byte, n.Valid = 0, false
return nil
}
err := convertAssign(&n.Byte, value)
n.Valid = err == nil
return err
}
// Value implements the [driver.Valuer] interface.
func (n NullByte) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return int64(n.Byte), nil
}
// NullFloat64 represents a float64 that may be null.
// NullFloat64 implements the [Scanner] interface so
// it can be used as a scan destination, similar to [NullString].
type NullFloat64 struct {
Float64 float64
Valid bool // Valid is true if Float64 is not NULL
}
// Scan implements the [Scanner] interface.
func (n *NullFloat64) Scan(value any) error {
if value == nil {
n.Float64, n.Valid = 0, false
return nil
}
err := convertAssign(&n.Float64, value)
n.Valid = err == nil
return err
}
// Value implements the [driver.Valuer] interface.
func (n NullFloat64) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Float64, nil
}
// NullBool represents a bool that may be null.
// NullBool implements the [Scanner] interface so
// it can be used as a scan destination, similar to [NullString].
type NullBool struct {
Bool bool
Valid bool // Valid is true if Bool is not NULL
}
// Scan implements the [Scanner] interface.
func (n *NullBool) Scan(value any) error {
if value == nil {
n.Bool, n.Valid = false, false
return nil
}
err := convertAssign(&n.Bool, value)
n.Valid = err == nil
return err
}
// Value implements the [driver.Valuer] interface.
func (n NullBool) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Bool, nil
}
// NullTime represents a [time.Time] that may be null.
// NullTime implements the [Scanner] interface so
// it can be used as a scan destination, similar to [NullString].
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
// Scan implements the [Scanner] interface.
func (n *NullTime) Scan(value any) error {
if value == nil {
n.Time, n.Valid = time.Time{}, false
return nil
}
err := convertAssign(&n.Time, value)
n.Valid = err == nil
return err
}
// Value implements the [driver.Valuer] interface.
func (n NullTime) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Time, nil
}
// Null represents a value that may be null.
// Null implements the [Scanner] interface so
// it can be used as a scan destination:
//
// var s Null[string]
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
// ...
// if s.Valid {
// // use s.V
// } else {
// // NULL value
// }
//
// T should be one of the types accepted by [driver.Value].
type Null[T any] struct {
V T
Valid bool
}
func (n *Null[T]) Scan(value any) error {
if value == nil {
n.V, n.Valid = *new(T), false
return nil
}
err := convertAssign(&n.V, value)
n.Valid = err == nil
return err
}
func (n Null[T]) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
v := any(n.V)
// See issue 69728.
if valuer, ok := v.(driver.Valuer); ok {
val, err := callValuerValue(valuer)
if err != nil {
return val, err
}
v = val
}
// See issue 69837.
return driver.DefaultParameterConverter.ConvertValue(v)
}
// Scanner is an interface used by [Rows.Scan].
type Scanner interface {
// Scan assigns a value from a database driver.
//
// The src value will be of one of the following types:
//
// int64
// float64
// bool
// []byte
// string
// time.Time
// nil - for NULL values
//
// An error should be returned if the value cannot be stored
// without loss of information.
//
// Reference types such as []byte are only valid until the next call to Scan
// and should not be retained. Their underlying memory is owned by the driver.
// If retention is necessary, copy their values before the next call to Scan.
Scan(src any) error
}
// Out may be used to retrieve OUTPUT value parameters from stored procedures.
//
// Not all drivers and databases support OUTPUT value parameters.
//
// Example usage:
//
// var outArg string
// _, err := db.ExecContext(ctx, "ProcName", sql.Named("Arg1", sql.Out{Dest: &outArg}))
type Out struct {
_NamedFieldsRequired struct{}
// Dest is a pointer to the value that will be set to the result of the
// stored procedure's OUTPUT parameter.
Dest any
// In is whether the parameter is an INOUT parameter. If so, the input value to the stored
// procedure is the dereferenced value of Dest's pointer, which is then replaced with
// the output value.
In bool
}
// ErrNoRows is returned by [Row.Scan] when [DB.QueryRow] doesn't return a
// row. In such a case, QueryRow returns a placeholder [*Row] value that
// defers this error until a Scan.
var ErrNoRows = errors.New("sql: no rows in result set")
// DB is a database handle representing a pool of zero or more
// underlying connections. It's safe for concurrent use by multiple
// goroutines.
//
// The sql package creates and frees connections automatically; it
// also maintains a free pool of idle connections. If the database has
// a concept of per-connection state, such state can be reliably observed
// within a transaction ([Tx]) or connection ([Conn]). Once [DB.Begin] is called, the
// returned [Tx] is bound to a single connection. Once [Tx.Commit] or
// [Tx.Rollback] is called on the transaction, that transaction's
// connection is returned to [DB]'s idle connection pool. The pool size
// can be controlled with [DB.SetMaxIdleConns].
type DB struct {
// Total time waited for new connections.
waitDuration atomic.Int64
connector driver.Connector
// numClosed is an atomic counter which represents a total number of
// closed connections. Stmt.openStmt checks it before cleaning closed
// connections in Stmt.css.
numClosed atomic.Uint64
mu sync.Mutex // protects following fields
freeConn []*driverConn // free connections ordered by returnedAt oldest to newest
connRequests connRequestSet
numOpen int // number of opened and pending open connections
// Used to signal the need for new connections
// a goroutine running connectionOpener() reads on this chan and
// maybeOpenNewConnections sends on the chan (one send per needed connection)
// It is closed during db.Close(). The close tells the connectionOpener
// goroutine to exit.
openerCh chan struct{}
closed bool
dep map[finalCloser]depSet
lastPut map[*driverConn]string // stacktrace of last conn's put; debug only
maxIdleCount int // zero means defaultMaxIdleConns; negative means 0
maxOpen int // <= 0 means unlimited
maxLifetime time.Duration // maximum amount of time a connection may be reused
maxIdleTime time.Duration // maximum amount of time a connection may be idle before being closed
cleanerCh chan struct{}
waitCount int64 // Total number of connections waited for.
maxIdleClosed int64 // Total number of connections closed due to idle count.
maxIdleTimeClosed int64 // Total number of connections closed due to idle time.
maxLifetimeClosed int64 // Total number of connections closed due to max connection lifetime limit.
stop func() // stop cancels the connection opener.
}
// connReuseStrategy determines how (*DB).conn returns database connections.
type connReuseStrategy uint8
const (
// alwaysNewConn forces a new connection to the database.
alwaysNewConn connReuseStrategy = iota
// cachedOrNewConn returns a cached connection, if available, else waits
// for one to become available (if MaxOpenConns has been reached) or
// creates a new database connection.
cachedOrNewConn
)
// driverConn wraps a driver.Conn with a mutex, to
// be held during all calls into the Conn. (including any calls onto
// interfaces returned via that Conn, such as calls on Tx, Stmt,
// Result, Rows)
type driverConn struct {
db *DB
createdAt time.Time
sync.Mutex // guards following
ci driver.Conn
needReset bool // The connection session should be reset before use if true.
closed bool
finalClosed bool // ci.Close has been called
openStmt map[*driverStmt]bool
// guarded by db.mu
inUse bool
dbmuClosed bool // same as closed, but guarded by db.mu, for removeClosedStmtLocked
returnedAt time.Time // Time the connection was created or returned.
onPut []func() // code (with db.mu held) run when conn is next returned
}
func (dc *driverConn) releaseConn(err error) {
dc.db.putConn(dc, err, true)
}
func (dc *driverConn) removeOpenStmt(ds *driverStmt) {
dc.Lock()
defer dc.Unlock()
delete(dc.openStmt, ds)
}
func (dc *driverConn) expired(timeout time.Duration) bool {
if timeout <= 0 {
return false
}
return dc.createdAt.Add(timeout).Before(time.Now())
}
// resetSession checks if the driver connection needs the
// session to be reset and if required, resets it.
func (dc *driverConn) resetSession(ctx context.Context) error {
dc.Lock()
defer dc.Unlock()
if !dc.needReset {
return nil
}
if cr, ok := dc.ci.(driver.SessionResetter); ok {
return cr.ResetSession(ctx)
}
return nil
}
// validateConnection checks if the connection is valid and can
// still be used. It also marks the session for reset if required.
func (dc *driverConn) validateConnection(needsReset bool) bool {
dc.Lock()
defer dc.Unlock()
if needsReset {
dc.needReset = true
}
if cv, ok := dc.ci.(driver.Validator); ok {
return cv.IsValid()
}
return true
}
// prepareLocked prepares the query on dc. When cg == nil the dc must keep track of
// the prepared statements in a pool.
func (dc *driverConn) prepareLocked(ctx context.Context, cg stmtConnGrabber, query string) (*driverStmt, error) {
si, err := ctxDriverPrepare(ctx, dc.ci, query)
if err != nil {
return nil, err
}
ds := &driverStmt{Locker: dc, si: si}
// No need to manage open statements if there is a single connection grabber.
if cg != nil {
return ds, nil
}
// Track each driverConn's open statements, so we can close them
// before closing the conn.
//
// Wrap all driver.Stmt is *driverStmt to ensure they are only closed once.
if dc.openStmt == nil {
dc.openStmt = make(map[*driverStmt]bool)
}
dc.openStmt[ds] = true
return ds, nil
}
// the dc.db's Mutex is held.
func (dc *driverConn) closeDBLocked() func() error {
dc.Lock()
defer dc.Unlock()
if dc.closed {
return func() error { return errors.New("sql: duplicate driverConn close") }
}
dc.closed = true
return dc.db.removeDepLocked(dc, dc)
}
func (dc *driverConn) Close() error {
dc.Lock()
if dc.closed {
dc.Unlock()
return errors.New("sql: duplicate driverConn close")
}
dc.closed = true
dc.Unlock() // not defer; removeDep finalClose calls may need to lock
// And now updates that require holding dc.mu.Lock.
dc.db.mu.Lock()
dc.dbmuClosed = true
fn := dc.db.removeDepLocked(dc, dc)
dc.db.mu.Unlock()
return fn()
}
func (dc *driverConn) finalClose() error {
var err error
// Each *driverStmt has a lock to the dc. Copy the list out of the dc
// before calling close on each stmt.
var openStmt []*driverStmt
withLock(dc, func() {
openStmt = make([]*driverStmt, 0, len(dc.openStmt))
for ds := range dc.openStmt {
openStmt = append(openStmt, ds)
}
dc.openStmt = nil
})
for _, ds := range openStmt {
ds.Close()
}
withLock(dc, func() {
dc.finalClosed = true
err = dc.ci.Close()
dc.ci = nil
})
dc.db.mu.Lock()
dc.db.numOpen--
dc.db.maybeOpenNewConnections()
dc.db.mu.Unlock()
dc.db.numClosed.Add(1)
return err
}
// driverStmt associates a driver.Stmt with the
// *driverConn from which it came, so the driverConn's lock can be
// held during calls.
type driverStmt struct {
sync.Locker // the *driverConn
si driver.Stmt
closed bool
closeErr error // return value of previous Close call
}
// Close ensures driver.Stmt is only closed once and always returns the same
// result.
func (ds *driverStmt) Close() error {
ds.Lock()
defer ds.Unlock()
if ds.closed {
return ds.closeErr
}
ds.closed = true
ds.closeErr = ds.si.Close()
return ds.closeErr
}
// depSet is a finalCloser's outstanding dependencies
type depSet map[any]bool // set of true bools
// The finalCloser interface is used by (*DB).addDep and related
// dependency reference counting.
type finalCloser interface {
// finalClose is called when the reference count of an object
// goes to zero. (*DB).mu is not held while calling it.
finalClose() error
}
// addDep notes that x now depends on dep, and x's finalClose won't be
// called until all of x's dependencies are removed with removeDep.
func (db *DB) addDep(x finalCloser, dep any) {
db.mu.Lock()
defer db.mu.Unlock()
db.addDepLocked(x, dep)
}
func (db *DB) addDepLocked(x finalCloser, dep any) {
if db.dep == nil {
db.dep = make(map[finalCloser]depSet)
}
xdep := db.dep[x]
if xdep == nil {
xdep = make(depSet)
db.dep[x] = xdep
}
xdep[dep] = true
}
// removeDep notes that x no longer depends on dep.
// If x still has dependencies, nil is returned.
// If x no longer has any dependencies, its finalClose method will be
// called and its error value will be returned.
func (db *DB) removeDep(x finalCloser, dep any) error {
db.mu.Lock()
fn := db.removeDepLocked(x, dep)
db.mu.Unlock()
return fn()
}
func (db *DB) removeDepLocked(x finalCloser, dep any) func() error {
xdep, ok := db.dep[x]
if !ok {
panic(fmt.Sprintf("unpaired removeDep: no deps for %T", x))
}
l0 := len(xdep)
delete(xdep, dep)
switch len(xdep) {
case l0:
// Nothing removed. Shouldn't happen.
panic(fmt.Sprintf("unpaired removeDep: no %T dep on %T", dep, x))
case 0:
// No more dependencies.
delete(db.dep, x)
return x.finalClose
default:
// Dependencies remain.
return func() error { return nil }
}
}
// This is the size of the connectionOpener request chan (DB.openerCh).
// This value should be larger than the maximum typical value
// used for DB.maxOpen. If maxOpen is significantly larger than
// connectionRequestQueueSize then it is possible for ALL calls into the *DB
// to block until the connectionOpener can satisfy the backlog of requests.
var connectionRequestQueueSize = 1000000
type dsnConnector struct {
dsn string
driver driver.Driver
}
func (t dsnConnector) Connect(_ context.Context) (driver.Conn, error) {
return t.driver.Open(t.dsn)
}
func (t dsnConnector) Driver() driver.Driver {
return t.driver
}
// OpenDB opens a database using a [driver.Connector], allowing drivers to
// bypass a string based data source name.
//
// Most users will open a database via a driver-specific connection
// helper function that returns a [*DB]. No database drivers are included
// in the Go standard library. See https://golang.org/s/sqldrivers for
// a list of third-party drivers.
//
// OpenDB may just validate its arguments without creating a connection
// to the database. To verify that the data source name is valid, call
// [DB.Ping].
//
// The returned [DB] is safe for concurrent use by multiple goroutines
// and maintains its own pool of idle connections. Thus, the OpenDB
// function should be called just once. It is rarely necessary to
// close a [DB].
func OpenDB(c driver.Connector) *DB {
ctx, cancel := context.WithCancel(context.Background())
db := &DB{
connector: c,
openerCh: make(chan struct{}, connectionRequestQueueSize),
lastPut: make(map[*driverConn]string),
stop: cancel,
}
go db.connectionOpener(ctx)
return db
}
// Open opens a database specified by its database driver name and a
// driver-specific data source name, usually consisting of at least a
// database name and connection information.
//
// Most users will open a database via a driver-specific connection
// helper function that returns a [*DB]. No database drivers are included
// in the Go standard library. See https://golang.org/s/sqldrivers for
// a list of third-party drivers.
//
// Open may just validate its arguments without creating a connection
// to the database. To verify that the data source name is valid, call
// [DB.Ping].
//
// The returned [DB] is safe for concurrent use by multiple goroutines
// and maintains its own pool of idle connections. Thus, the Open
// function should be called just once. It is rarely necessary to
// close a [DB].
func Open(driverName, dataSourceName string) (*DB, error) {
driversMu.RLock()
driveri, ok := drivers[driverName]
driversMu.RUnlock()
if !ok {
return nil, fmt.Errorf("sql: unknown driver %q (forgotten import?)", driverName)
}
if driverCtx, ok := driveri.(driver.DriverContext); ok {
connector, err := driverCtx.OpenConnector(dataSourceName)
if err != nil {
return nil, err
}
return OpenDB(connector), nil
}
return OpenDB(dsnConnector{dsn: dataSourceName, driver: driveri}), nil
}
func (db *DB) pingDC(ctx context.Context, dc *driverConn, release func(error)) error {
var err error
if pinger, ok := dc.ci.(driver.Pinger); ok {
withLock(dc, func() {
err = pinger.Ping(ctx)
})
}
release(err)
return err
}
// PingContext verifies a connection to the database is still alive,
// establishing a connection if necessary.
func (db *DB) PingContext(ctx context.Context) error {
var dc *driverConn
var err error
err = db.retry(func(strategy connReuseStrategy) error {
dc, err = db.conn(ctx, strategy)
return err
})
if err != nil {
return err
}
return db.pingDC(ctx, dc, dc.releaseConn)
}
// Ping verifies a connection to the database is still alive,
// establishing a connection if necessary.
//
// Ping uses [context.Background] internally; to specify the context, use
// [DB.PingContext].
func (db *DB) Ping() error {
return db.PingContext(context.Background())
}
// Close closes the database and prevents new queries from starting.
// Close then waits for all queries that have started processing on the server
// to finish.
//
// It is rare to Close a [DB], as the [DB] handle is meant to be
// long-lived and shared between many goroutines.
func (db *DB) Close() error {
db.mu.Lock()
if db.closed { // Make DB.Close idempotent
db.mu.Unlock()
return nil
}
if db.cleanerCh != nil {
close(db.cleanerCh)
}
var err error
fns := make([]func() error, 0, len(db.freeConn))
for _, dc := range db.freeConn {
fns = append(fns, dc.closeDBLocked())
}
db.freeConn = nil
db.closed = true
db.connRequests.CloseAndRemoveAll()
db.mu.Unlock()
for _, fn := range fns {
err1 := fn()
if err1 != nil {
err = err1
}
}
db.stop()
if c, ok := db.connector.(io.Closer); ok {
err1 := c.Close()
if err1 != nil {
err = err1
}
}
return err
}
const defaultMaxIdleConns = 2
func (db *DB) maxIdleConnsLocked() int {
n := db.maxIdleCount
switch {
case n == 0:
// TODO(bradfitz): ask driver, if supported, for its default preference
return defaultMaxIdleConns
case n < 0:
return 0
default:
return n
}
}
func (db *DB) shortestIdleTimeLocked() time.Duration {
if db.maxIdleTime <= 0 {
return db.maxLifetime
}
if db.maxLifetime <= 0 {
return db.maxIdleTime
}
return min(db.maxIdleTime, db.maxLifetime)
}
// SetMaxIdleConns sets the maximum number of connections in the idle
// connection pool.
//
// If MaxOpenConns is greater than 0 but less than the new MaxIdleConns,
// then the new MaxIdleConns will be reduced to match the MaxOpenConns limit.
//
// If n <= 0, no idle connections are retained.
//
// The default max idle connections is currently 2. This may change in
// a future release.
func (db *DB) SetMaxIdleConns(n int) {
db.mu.Lock()
if n > 0 {
db.maxIdleCount = n
} else {
// No idle connections.
db.maxIdleCount = -1
}
// Make sure maxIdle doesn't exceed maxOpen
if db.maxOpen > 0 && db.maxIdleConnsLocked() > db.maxOpen {
db.maxIdleCount = db.maxOpen
}
var closing []*driverConn
idleCount := len(db.freeConn)
maxIdle := db.maxIdleConnsLocked()
if idleCount > maxIdle {
closing = db.freeConn[maxIdle:]
db.freeConn = db.freeConn[:maxIdle]
}
db.maxIdleClosed += int64(len(closing))
db.mu.Unlock()
for _, c := range closing {
c.Close()
}
}
// SetMaxOpenConns sets the maximum number of open connections to the database.
//
// If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than
// MaxIdleConns, then MaxIdleConns will be reduced to match the new
// MaxOpenConns limit.
//
// If n <= 0, then there is no limit on the number of open connections.
// The default is 0 (unlimited).
func (db *DB) SetMaxOpenConns(n int) {
db.mu.Lock()
db.maxOpen = n
if n < 0 {
db.maxOpen = 0
}
syncMaxIdle := db.maxOpen > 0 && db.maxIdleConnsLocked() > db.maxOpen
db.mu.Unlock()
if syncMaxIdle {
db.SetMaxIdleConns(n)
}
}
// SetConnMaxLifetime sets the maximum amount of time a connection may be reused.
//
// Expired connections may be closed lazily before reuse.
//
// If d <= 0, connections are not closed due to a connection's age.
func (db *DB) SetConnMaxLifetime(d time.Duration) {
if d < 0 {
d = 0
}
db.mu.Lock()
// Wake cleaner up when lifetime is shortened.
if d > 0 && d < db.shortestIdleTimeLocked() && db.cleanerCh != nil {
select {
case db.cleanerCh <- struct{}{}:
default:
}
}
db.maxLifetime = d
db.startCleanerLocked()
db.mu.Unlock()
}
// SetConnMaxIdleTime sets the maximum amount of time a connection may be idle.
//
// Expired connections may be closed lazily before reuse.
//
// If d <= 0, connections are not closed due to a connection's idle time.
func (db *DB) SetConnMaxIdleTime(d time.Duration) {
if d < 0 {
d = 0
}
db.mu.Lock()
defer db.mu.Unlock()
// Wake cleaner up when idle time is shortened.
if d > 0 && d < db.shortestIdleTimeLocked() && db.cleanerCh != nil {
select {
case db.cleanerCh <- struct{}{}:
default:
}
}
db.maxIdleTime = d
db.startCleanerLocked()
}
// startCleanerLocked starts connectionCleaner if needed.
func (db *DB) startCleanerLocked() {
if (db.maxLifetime > 0 || db.maxIdleTime > 0) && db.numOpen > 0 && db.cleanerCh == nil {
db.cleanerCh = make(chan struct{}, 1)
go db.connectionCleaner(db.shortestIdleTimeLocked())
}
}
func (db *DB) connectionCleaner(d time.Duration) {
const minInterval = time.Second
if d < minInterval {
d = minInterval
}
t := time.NewTimer(d)
for {
select {
case <-t.C:
case <-db.cleanerCh: // maxLifetime was changed or db was closed.
}
db.mu.Lock()
d = db.shortestIdleTimeLocked()
if db.closed || db.numOpen == 0 || d <= 0 {
db.cleanerCh = nil
db.mu.Unlock()
return
}
d, closing := db.connectionCleanerRunLocked(d)
db.mu.Unlock()
for _, c := range closing {
c.Close()
}
if d < minInterval {
d = minInterval
}
if !t.Stop() {
select {
case <-t.C:
default:
}
}
t.Reset(d)
}
}
// connectionCleanerRunLocked removes connections that should be closed from
// freeConn and returns them along side an updated duration to the next check
// if a quicker check is required to ensure connections are checked appropriately.
func (db *DB) connectionCleanerRunLocked(d time.Duration) (time.Duration, []*driverConn) {
var idleClosing int64
var closing []*driverConn
if db.maxIdleTime > 0 {
// As freeConn is ordered by returnedAt process
// in reverse order to minimise the work needed.
idleSince := time.Now().Add(-db.maxIdleTime)
last := len(db.freeConn) - 1
for i := last; i >= 0; i-- {
c := db.freeConn[i]
if c.returnedAt.Before(idleSince) {
i++
closing = db.freeConn[:i:i]
db.freeConn = db.freeConn[i:]
idleClosing = int64(len(closing))
db.maxIdleTimeClosed += idleClosing
break
}
}
if len(db.freeConn) > 0 {
c := db.freeConn[0]
if d2 := c.returnedAt.Sub(idleSince); d2 < d {
// Ensure idle connections are cleaned up as soon as
// possible.
d = d2
}
}
}
if db.maxLifetime > 0 {
expiredSince := time.Now().Add(-db.maxLifetime)
for i := 0; i < len(db.freeConn); i++ {
c := db.freeConn[i]
if c.createdAt.Before(expiredSince) {
closing = append(closing, c)
last := len(db.freeConn) - 1
// Use slow delete as order is required to ensure
// connections are reused least idle time first.
copy(db.freeConn[i:], db.freeConn[i+1:])
db.freeConn[last] = nil
db.freeConn = db.freeConn[:last]
i--
} else if d2 := c.createdAt.Sub(expiredSince); d2 < d {
// Prevent connections sitting the freeConn when they
// have expired by updating our next deadline d.
d = d2
}
}
db.maxLifetimeClosed += int64(len(closing)) - idleClosing
}
return d, closing
}
// DBStats contains database statistics.
type DBStats struct {
MaxOpenConnections int // Maximum number of open connections to the database.
// Pool Status
OpenConnections int // The number of established connections both in use and idle.
InUse int // The number of connections currently in use.
Idle int // The number of idle connections.
// Counters
WaitCount int64 // The total number of connections waited for.
WaitDuration time.Duration // The total time blocked waiting for a new connection.
MaxIdleClosed int64 // The total number of connections closed due to SetMaxIdleConns.
MaxIdleTimeClosed int64 // The total number of connections closed due to SetConnMaxIdleTime.
MaxLifetimeClosed int64 // The total number of connections closed due to SetConnMaxLifetime.
}
// Stats returns database statistics.
func (db *DB) Stats() DBStats {
wait := db.waitDuration.Load()
db.mu.Lock()
defer db.mu.Unlock()
stats := DBStats{
MaxOpenConnections: db.maxOpen,
Idle: len(db.freeConn),
OpenConnections: db.numOpen,
InUse: db.numOpen - len(db.freeConn),
WaitCount: db.waitCount,
WaitDuration: time.Duration(wait),
MaxIdleClosed: db.maxIdleClosed,
MaxIdleTimeClosed: db.maxIdleTimeClosed,
MaxLifetimeClosed: db.maxLifetimeClosed,
}
return stats
}
// Assumes db.mu is locked.
// If there are connRequests and the connection limit hasn't been reached,
// then tell the connectionOpener to open new connections.
func (db *DB) maybeOpenNewConnections() {
numRequests := db.connRequests.Len()
if db.maxOpen > 0 {
numCanOpen := db.maxOpen - db.numOpen
if numRequests > numCanOpen {
numRequests = numCanOpen
}
}
for numRequests > 0 {
db.numOpen++ // optimistically
numRequests--
if db.closed {
return
}
db.openerCh <- struct{}{}
}
}
// Runs in a separate goroutine, opens new connections when requested.
func (db *DB) connectionOpener(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case <-db.openerCh:
db.openNewConnection(ctx)
}
}
}
// Open one new connection
func (db *DB) openNewConnection(ctx context.Context) {
// maybeOpenNewConnections has already executed db.numOpen++ before it sent
// on db.openerCh. This function must execute db.numOpen-- if the
// connection fails or is closed before returning.
ci, err := db.connector.Connect(ctx)
db.mu.Lock()
defer db.mu.Unlock()
if db.closed {
if err == nil {
ci.Close()
}
db.numOpen--
return
}
if err != nil {
db.numOpen--
db.putConnDBLocked(nil, err)
db.maybeOpenNewConnections()
return
}
dc := &driverConn{
db: db,
createdAt: time.Now(),
returnedAt: time.Now(),
ci: ci,
}
if db.putConnDBLocked(dc, err) {
db.addDepLocked(dc, dc)
} else {
db.numOpen--
ci.Close()
}
}
// connRequest represents one request for a new connection
// When there are no idle connections available, DB.conn will create
// a new connRequest and put it on the db.connRequests list.
type connRequest struct {
conn *driverConn
err error
}
var errDBClosed = errors.New("sql: database is closed")
// conn returns a newly-opened or cached *driverConn.
func (db *DB) conn(ctx context.Context, strategy connReuseStrategy) (*driverConn, error) {
db.mu.Lock()
if db.closed {
db.mu.Unlock()
return nil, errDBClosed
}
// Check if the context is expired.
select {
default:
case <-ctx.Done():
db.mu.Unlock()
return nil, ctx.Err()
}
lifetime := db.maxLifetime
// Prefer a free connection, if possible.
last := len(db.freeConn) - 1
if strategy == cachedOrNewConn && last >= 0 {
// Reuse the lowest idle time connection so we can close
// connections which remain idle as soon as possible.
conn := db.freeConn[last]
db.freeConn = db.freeConn[:last]
conn.inUse = true
if conn.expired(lifetime) {
db.maxLifetimeClosed++
db.mu.Unlock()
conn.Close()
return nil, driver.ErrBadConn
}
db.mu.Unlock()
// Reset the session if required.
if err := conn.resetSession(ctx); errors.Is(err, driver.ErrBadConn) {
conn.Close()
return nil, err
}
return conn, nil
}
// Out of free connections or we were asked not to use one. If we're not
// allowed to open any more connections, make a request and wait.
if db.maxOpen > 0 && db.numOpen >= db.maxOpen {
// Make the connRequest channel. It's buffered so that the
// connectionOpener doesn't block while waiting for the req to be read.
req := make(chan connRequest, 1)
delHandle := db.connRequests.Add(req)
db.waitCount++
db.mu.Unlock()
waitStart := time.Now()
// Timeout the connection request with the context.
select {
case <-ctx.Done():
// Remove the connection request and ensure no value has been sent
// on it after removing.
db.mu.Lock()
deleted := db.connRequests.Delete(delHandle)
db.mu.Unlock()
db.waitDuration.Add(int64(time.Since(waitStart)))
// If we failed to delete it, that means either the DB was closed or
// something else grabbed it and is about to send on it.
if !deleted {
// TODO(bradfitz): rather than this best effort select, we
// should probably start a goroutine to read from req. This best
// effort select existed before the change to check 'deleted'.
// But if we know for sure it wasn't deleted and a sender is
// outstanding, we should probably block on req (in a new
// goroutine) to get the connection back.
select {
default:
case ret, ok := <-req:
if ok && ret.conn != nil {
db.putConn(ret.conn, ret.err, false)
}
}
}
return nil, ctx.Err()
case ret, ok := <-req:
db.waitDuration.Add(int64(time.Since(waitStart)))
if !ok {
return nil, errDBClosed
}
// Only check if the connection is expired if the strategy is cachedOrNewConns.
// If we require a new connection, just re-use the connection without looking
// at the expiry time. If it is expired, it will be checked when it is placed
// back into the connection pool.
// This prioritizes giving a valid connection to a client over the exact connection
// lifetime, which could expire exactly after this point anyway.
if strategy == cachedOrNewConn && ret.err == nil && ret.conn.expired(lifetime) {
db.mu.Lock()
db.maxLifetimeClosed++
db.mu.Unlock()
ret.conn.Close()
return nil, driver.ErrBadConn
}
if ret.conn == nil {
return nil, ret.err
}
// Reset the session if required.
if err := ret.conn.resetSession(ctx); errors.Is(err, driver.ErrBadConn) {
ret.conn.Close()
return nil, err
}
return ret.conn, ret.err
}
}
db.numOpen++ // optimistically
db.mu.Unlock()
ci, err := db.connector.Connect(ctx)
if err != nil {
db.mu.Lock()
db.numOpen-- // correct for earlier optimism
db.maybeOpenNewConnections()
db.mu.Unlock()
return nil, err
}
db.mu.Lock()
dc := &driverConn{
db: db,
createdAt: time.Now(),
returnedAt: time.Now(),
ci: ci,
inUse: true,
}
db.addDepLocked(dc, dc)
db.mu.Unlock()
return dc, nil
}
// putConnHook is a hook for testing.
var putConnHook func(*DB, *driverConn)
// noteUnusedDriverStatement notes that ds is no longer used and should
// be closed whenever possible (when c is next not in use), unless c is
// already closed.
func (db *DB) noteUnusedDriverStatement(c *driverConn, ds *driverStmt) {
db.mu.Lock()
defer db.mu.Unlock()
if c.inUse {
c.onPut = append(c.onPut, func() {
ds.Close()
})
} else {
c.Lock()
fc := c.finalClosed
c.Unlock()
if !fc {
ds.Close()
}
}
}
// debugGetPut determines whether getConn & putConn calls' stack traces
// are returned for more verbose crashes.
const debugGetPut = false
// putConn adds a connection to the db's free pool.
// err is optionally the last error that occurred on this connection.
func (db *DB) putConn(dc *driverConn, err error, resetSession bool) {
if !errors.Is(err, driver.ErrBadConn) {
if !dc.validateConnection(resetSession) {
err = driver.ErrBadConn
}
}
db.mu.Lock()
if !dc.inUse {
db.mu.Unlock()
if debugGetPut {
fmt.Printf("putConn(%v) DUPLICATE was: %s\n\nPREVIOUS was: %s", dc, stack(), db.lastPut[dc])
}
panic("sql: connection returned that was never out")
}
if !errors.Is(err, driver.ErrBadConn) && dc.expired(db.maxLifetime) {
db.maxLifetimeClosed++
err = driver.ErrBadConn
}
if debugGetPut {
db.lastPut[dc] = stack()
}
dc.inUse = false
dc.returnedAt = time.Now()
for _, fn := range dc.onPut {
fn()
}
dc.onPut = nil
if errors.Is(err, driver.ErrBadConn) {
// Don't reuse bad connections.
// Since the conn is considered bad and is being discarded, treat it
// as closed. Don't decrement the open count here, finalClose will
// take care of that.
db.maybeOpenNewConnections()
db.mu.Unlock()
dc.Close()
return
}
if putConnHook != nil {
putConnHook(db, dc)
}
added := db.putConnDBLocked(dc, nil)
db.mu.Unlock()
if !added {
dc.Close()
return
}
}
// Satisfy a connRequest or put the driverConn in the idle pool and return true
// or return false.
// putConnDBLocked will satisfy a connRequest if there is one, or it will
// return the *driverConn to the freeConn list if err == nil and the idle
// connection limit will not be exceeded.
// If err != nil, the value of dc is ignored.
// If err == nil, then dc must not equal nil.
// If a connRequest was fulfilled or the *driverConn was placed in the
// freeConn list, then true is returned, otherwise false is returned.
func (db *DB) putConnDBLocked(dc *driverConn, err error) bool {
if db.closed {
return false
}
if db.maxOpen > 0 && db.numOpen > db.maxOpen {
return false
}
if req, ok := db.connRequests.TakeRandom(); ok {
if err == nil {
dc.inUse = true
}
req <- connRequest{
conn: dc,
err: err,
}
return true
} else if err == nil && !db.closed {
if db.maxIdleConnsLocked() > len(db.freeConn) {
db.freeConn = append(db.freeConn, dc)
db.startCleanerLocked()
return true
}
db.maxIdleClosed++
}
return false
}
// maxBadConnRetries is the number of maximum retries if the driver returns
// driver.ErrBadConn to signal a broken connection before forcing a new
// connection to be opened.
const maxBadConnRetries = 2
func (db *DB) retry(fn func(strategy connReuseStrategy) error) error {
for i := int64(0); i < maxBadConnRetries; i++ {
err := fn(cachedOrNewConn)
// retry if err is driver.ErrBadConn
if err == nil || !errors.Is(err, driver.ErrBadConn) {
return err
}
}
return fn(alwaysNewConn)
}
// PrepareContext creates a prepared statement for later queries or executions.
// Multiple queries or executions may be run concurrently from the
// returned statement.
// The caller must call the statement's [*Stmt.Close] method
// when the statement is no longer needed.
//
// The provided context is used for the preparation of the statement, not for the
// execution of the statement.
func (db *DB) PrepareContext(ctx context.Context, query string) (*Stmt, error) {
var stmt *Stmt
var err error
err = db.retry(func(strategy connReuseStrategy) error {
stmt, err = db.prepare(ctx, query, strategy)
return err
})
return stmt, err
}
// Prepare creates a prepared statement for later queries or executions.
// Multiple queries or executions may be run concurrently from the
// returned statement.
// The caller must call the statement's [*Stmt.Close] method
// when the statement is no longer needed.
//
// Prepare uses [context.Background] internally; to specify the context, use
// [DB.PrepareContext].
func (db *DB) Prepare(query string) (*Stmt, error) {
return db.PrepareContext(context.Background(), query)
}
func (db *DB) prepare(ctx context.Context, query string, strategy connReuseStrategy) (*Stmt, error) {
// TODO: check if db.driver supports an optional
// driver.Preparer interface and call that instead, if so,
// otherwise we make a prepared statement that's bound
// to a connection, and to execute this prepared statement
// we either need to use this connection (if it's free), else
// get a new connection + re-prepare + execute on that one.
dc, err := db.conn(ctx, strategy)
if err != nil {
return nil, err
}
return db.prepareDC(ctx, dc, dc.releaseConn, nil, query)
}
// prepareDC prepares a query on the driverConn and calls release before
// returning. When cg == nil it implies that a connection pool is used, and
// when cg != nil only a single driver connection is used.
func (db *DB) prepareDC(ctx context.Context, dc *driverConn, release func(error), cg stmtConnGrabber, query string) (*Stmt, error) {
var ds *driverStmt
var err error
defer func() {
release(err)
}()
withLock(dc, func() {
ds, err = dc.prepareLocked(ctx, cg, query)
})
if err != nil {
return nil, err
}
stmt := &Stmt{
db: db,
query: query,
cg: cg,
cgds: ds,
}
// When cg == nil this statement will need to keep track of various
// connections they are prepared on and record the stmt dependency on
// the DB.
if cg == nil {
stmt.css = []connStmt{{dc, ds}}
stmt.lastNumClosed = db.numClosed.Load()
db.addDep(stmt, stmt)
}
return stmt, nil
}
// ExecContext executes a query without returning any rows.
// The args are for any placeholder parameters in the query.
func (db *DB) ExecContext(ctx context.Context, query string, args ...any) (Result, error) {
var res Result
var err error
err = db.retry(func(strategy connReuseStrategy) error {
res, err = db.exec(ctx, query, args, strategy)
return err
})
return res, err
}
// Exec executes a query without returning any rows.
// The args are for any placeholder parameters in the query.
//
// Exec uses [context.Background] internally; to specify the context, use
// [DB.ExecContext].
func (db *DB) Exec(query string, args ...any) (Result, error) {
return db.ExecContext(context.Background(), query, args...)
}
func (db *DB) exec(ctx context.Context, query string, args []any, strategy connReuseStrategy) (Result, error) {
dc, err := db.conn(ctx, strategy)
if err != nil {
return nil, err
}
return db.execDC(ctx, dc, dc.releaseConn, query, args)
}
func (db *DB) execDC(ctx context.Context, dc *driverConn, release func(error), query string, args []any) (res Result, err error) {
defer func() {
release(err)
}()
execerCtx, ok := dc.ci.(driver.ExecerContext)
var execer driver.Execer
if !ok {
execer, ok = dc.ci.(driver.Execer)
}
if ok {
var nvdargs []driver.NamedValue
var resi driver.Result
withLock(dc, func() {
nvdargs, err = driverArgsConnLocked(dc.ci, nil, args)
if err != nil {
return
}
resi, err = ctxDriverExec(ctx, execerCtx, execer, query, nvdargs)
})
if err != driver.ErrSkip {
if err != nil {
return nil, err
}
return driverResult{dc, resi}, nil
}
}
var si driver.Stmt
withLock(dc, func() {
si, err = ctxDriverPrepare(ctx, dc.ci, query)
})
if err != nil {
return nil, err
}
ds := &driverStmt{Locker: dc, si: si}
defer ds.Close()
return resultFromStatement(ctx, dc.ci, ds, args...)
}
// QueryContext executes a query that returns rows, typically a SELECT.
// The args are for any placeholder parameters in the query.
func (db *DB) QueryContext(ctx context.Context, query string, args ...any) (*Rows, error) {
var rows *Rows
var err error
err = db.retry(func(strategy connReuseStrategy) error {
rows, err = db.query(ctx, query, args, strategy)
return err
})
return rows, err
}
// Query executes a query that returns rows, typically a SELECT.
// The args are for any placeholder parameters in the query.
//
// Query uses [context.Background] internally; to specify the context, use
// [DB.QueryContext].
func (db *DB) Query(query string, args ...any) (*Rows, error) {
return db.QueryContext(context.Background(), query, args...)
}
func (db *DB) query(ctx context.Context, query string, args []any, strategy connReuseStrategy) (*Rows, error) {
dc, err := db.conn(ctx, strategy)
if err != nil {
return nil, err
}
return db.queryDC(ctx, nil, dc, dc.releaseConn, query, args)
}
// queryDC executes a query on the given connection.
// The connection gets released by the releaseConn function.
// The ctx context is from a query method and the txctx context is from an
// optional transaction context.
func (db *DB) queryDC(ctx, txctx context.Context, dc *driverConn, releaseConn func(error), query string, args []any) (*Rows, error) {
queryerCtx, ok := dc.ci.(driver.QueryerContext)
var queryer driver.Queryer
if !ok {
queryer, ok = dc.ci.(driver.Queryer)
}
if ok {
var nvdargs []driver.NamedValue
var rowsi driver.Rows
var err error
withLock(dc, func() {
nvdargs, err = driverArgsConnLocked(dc.ci, nil, args)
if err != nil {
return
}
rowsi, err = ctxDriverQuery(ctx, queryerCtx, queryer, query, nvdargs)
})
if err != driver.ErrSkip {
if err != nil {
releaseConn(err)
return nil, err
}
// Note: ownership of dc passes to the *Rows, to be freed
// with releaseConn.
rows := &Rows{
dc: dc,
releaseConn: releaseConn,
rowsi: rowsi,
}
rows.initContextClose(ctx, txctx)
return rows, nil
}
}
var si driver.Stmt
var err error
withLock(dc, func() {
si, err = ctxDriverPrepare(ctx, dc.ci, query)
})
if err != nil {
releaseConn(err)
return nil, err
}
ds := &driverStmt{Locker: dc, si: si}
rowsi, err := rowsiFromStatement(ctx, dc.ci, ds, args...)
if err != nil {
ds.Close()
releaseConn(err)
return nil, err
}
// Note: ownership of ci passes to the *Rows, to be freed
// with releaseConn.
rows := &Rows{
dc: dc,
releaseConn: releaseConn,
rowsi: rowsi,
closeStmt: ds,
}
rows.initContextClose(ctx, txctx)
return rows, nil
}
// QueryRowContext executes a query that is expected to return at most one row.
// QueryRowContext always returns a non-nil value. Errors are deferred until
// [Row]'s Scan method is called.
// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
// Otherwise, [*Row.Scan] scans the first selected row and discards
// the rest.
func (db *DB) QueryRowContext(ctx context.Context, query string, args ...any) *Row {
rows, err := db.QueryContext(ctx, query, args...)
return &Row{rows: rows, err: err}
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always returns a non-nil value. Errors are deferred until
// [Row]'s Scan method is called.
// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
// Otherwise, [*Row.Scan] scans the first selected row and discards
// the rest.
//
// QueryRow uses [context.Background] internally; to specify the context, use
// [DB.QueryRowContext].
func (db *DB) QueryRow(query string, args ...any) *Row {
return db.QueryRowContext(context.Background(), query, args...)
}
// BeginTx starts a transaction.
//
// The provided context is used until the transaction is committed or rolled back.
// If the context is canceled, the sql package will roll back
// the transaction. [Tx.Commit] will return an error if the context provided to
// BeginTx is canceled.
//
// The provided [TxOptions] is optional and may be nil if defaults should be used.
// If a non-default isolation level is used that the driver doesn't support,
// an error will be returned.
func (db *DB) BeginTx(ctx context.Context, opts *TxOptions) (*Tx, error) {
var tx *Tx
var err error
err = db.retry(func(strategy connReuseStrategy) error {
tx, err = db.begin(ctx, opts, strategy)
return err
})
return tx, err
}
// Begin starts a transaction. The default isolation level is dependent on
// the driver.
//
// Begin uses [context.Background] internally; to specify the context, use
// [DB.BeginTx].
func (db *DB) Begin() (*Tx, error) {
return db.BeginTx(context.Background(), nil)
}
func (db *DB) begin(ctx context.Context, opts *TxOptions, strategy connReuseStrategy) (tx *Tx, err error) {
dc, err := db.conn(ctx, strategy)
if err != nil {
return nil, err
}
return db.beginDC(ctx, dc, dc.releaseConn, opts)
}
// beginDC starts a transaction. The provided dc must be valid and ready to use.
func (db *DB) beginDC(ctx context.Context, dc *driverConn, release func(error), opts *TxOptions) (tx *Tx, err error) {
var txi driver.Tx
keepConnOnRollback := false
withLock(dc, func() {
_, hasSessionResetter := dc.ci.(driver.SessionResetter)
_, hasConnectionValidator := dc.ci.(driver.Validator)
keepConnOnRollback = hasSessionResetter && hasConnectionValidator
txi, err = ctxDriverBegin(ctx, opts, dc.ci)
})
if err != nil {
release(err)
return nil, err
}
// Schedule the transaction to rollback when the context is canceled.
// The cancel function in Tx will be called after done is set to true.
ctx, cancel := context.WithCancel(ctx)
tx = &Tx{
db: db,
dc: dc,
releaseConn: release,
txi: txi,
cancel: cancel,
keepConnOnRollback: keepConnOnRollback,
ctx: ctx,
}
go tx.awaitDone()
return tx, nil
}
// Driver returns the database's underlying driver.
func (db *DB) Driver() driver.Driver {
return db.connector.Driver()
}
// ErrConnDone is returned by any operation that is performed on a connection
// that has already been returned to the connection pool.
var ErrConnDone = errors.New("sql: connection is already closed")
// Conn returns a single connection by either opening a new connection
// or returning an existing connection from the connection pool. Conn will
// block until either a connection is returned or ctx is canceled.
// Queries run on the same Conn will be run in the same database session.
//
// Every Conn must be returned to the database pool after use by
// calling [Conn.Close].
func (db *DB) Conn(ctx context.Context) (*Conn, error) {
var dc *driverConn
var err error
err = db.retry(func(strategy connReuseStrategy) error {
dc, err = db.conn(ctx, strategy)
return err
})
if err != nil {
return nil, err
}
conn := &Conn{
db: db,
dc: dc,
}
return conn, nil
}
type releaseConn func(error)
// Conn represents a single database connection rather than a pool of database
// connections. Prefer running queries from [DB] unless there is a specific
// need for a continuous single database connection.
//
// A Conn must call [Conn.Close] to return the connection to the database pool
// and may do so concurrently with a running query.
//
// After a call to [Conn.Close], all operations on the
// connection fail with [ErrConnDone].
type Conn struct {
db *DB
// closemu prevents the connection from closing while there
// is an active query. It is held for read during queries
// and exclusively during close.
closemu closingMutex
// dc is owned until close, at which point
// it's returned to the connection pool.
dc *driverConn
// done transitions from false to true exactly once, on close.
// Once done, all operations fail with ErrConnDone.
done atomic.Bool
releaseConnOnce sync.Once
// releaseConnCache is a cache of c.closemuRUnlockCondReleaseConn
// to save allocations in a call to grabConn.
releaseConnCache releaseConn
}
// grabConn takes a context to implement stmtConnGrabber
// but the context is not used.
func (c *Conn) grabConn(context.Context) (*driverConn, releaseConn, error) {
if c.done.Load() {
return nil, nil, ErrConnDone
}
c.releaseConnOnce.Do(func() {
c.releaseConnCache = c.closemuRUnlockCondReleaseConn
})
c.closemu.RLock()
return c.dc, c.releaseConnCache, nil
}
// PingContext verifies the connection to the database is still alive.
func (c *Conn) PingContext(ctx context.Context) error {
dc, release, err := c.grabConn(ctx)
if err != nil {
return err
}
return c.db.pingDC(ctx, dc, release)
}
// ExecContext executes a query without returning any rows.
// The args are for any placeholder parameters in the query.
func (c *Conn) ExecContext(ctx context.Context, query string, args ...any) (Result, error) {
dc, release, err := c.grabConn(ctx)
if err != nil {
return nil, err
}
return c.db.execDC(ctx, dc, release, query, args)
}
// QueryContext executes a query that returns rows, typically a SELECT.
// The args are for any placeholder parameters in the query.
func (c *Conn) QueryContext(ctx context.Context, query string, args ...any) (*Rows, error) {
dc, release, err := c.grabConn(ctx)
if err != nil {
return nil, err
}
return c.db.queryDC(ctx, nil, dc, release, query, args)
}
// QueryRowContext executes a query that is expected to return at most one row.
// QueryRowContext always returns a non-nil value. Errors are deferred until
// the [*Row.Scan] method is called.
// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
// Otherwise, the [*Row.Scan] scans the first selected row and discards
// the rest.
func (c *Conn) QueryRowContext(ctx context.Context, query string, args ...any) *Row {
rows, err := c.QueryContext(ctx, query, args...)
return &Row{rows: rows, err: err}
}
// PrepareContext creates a prepared statement for later queries or executions.
// Multiple queries or executions may be run concurrently from the
// returned statement.
// The caller must call the statement's [*Stmt.Close] method
// when the statement is no longer needed.
//
// The provided context is used for the preparation of the statement, not for the
// execution of the statement.
func (c *Conn) PrepareContext(ctx context.Context, query string) (*Stmt, error) {
dc, release, err := c.grabConn(ctx)
if err != nil {
return nil, err
}
return c.db.prepareDC(ctx, dc, release, c, query)
}
// Raw executes f exposing the underlying driver connection for the
// duration of f. The driverConn must not be used outside of f.
//
// Once f returns and err is not [driver.ErrBadConn], the [Conn] will continue to be usable
// until [Conn.Close] is called.
func (c *Conn) Raw(f func(driverConn any) error) (err error) {
var dc *driverConn
var release releaseConn
// grabConn takes a context to implement stmtConnGrabber, but the context is not used.
dc, release, err = c.grabConn(nil)
if err != nil {
return
}
fPanic := true
dc.Mutex.Lock()
defer func() {
dc.Mutex.Unlock()
// If f panics fPanic will remain true.
// Ensure an error is passed to release so the connection
// may be discarded.
if fPanic {
err = driver.ErrBadConn
}
release(err)
}()
err = f(dc.ci)
fPanic = false
return
}
// BeginTx starts a transaction.
//
// The provided context is used until the transaction is committed or rolled back.
// If the context is canceled, the sql package will roll back
// the transaction. [Tx.Commit] will return an error if the context provided to
// BeginTx is canceled.
//
// The provided [TxOptions] is optional and may be nil if defaults should be used.
// If a non-default isolation level is used that the driver doesn't support,
// an error will be returned.
func (c *Conn) BeginTx(ctx context.Context, opts *TxOptions) (*Tx, error) {
dc, release, err := c.grabConn(ctx)
if err != nil {
return nil, err
}
return c.db.beginDC(ctx, dc, release, opts)
}
// closemuRUnlockCondReleaseConn read unlocks closemu
// as the sql operation is done with the dc.
func (c *Conn) closemuRUnlockCondReleaseConn(err error) {
c.closemu.RUnlock()
if errors.Is(err, driver.ErrBadConn) {
c.close(err)
}
}
func (c *Conn) txCtx() context.Context {
return nil
}
func (c *Conn) close(err error) error {
if !c.done.CompareAndSwap(false, true) {
return ErrConnDone
}
// Lock around releasing the driver connection
// to ensure all queries have been stopped before doing so.
c.closemu.Lock()
defer c.closemu.Unlock()
c.dc.releaseConn(err)
c.dc = nil
c.db = nil
return err
}
// Close returns the connection to the connection pool.
// All operations after a Close will return with [ErrConnDone].
// Close is safe to call concurrently with other operations and will
// block until all other operations finish. It may be useful to first
// cancel any used context and then call close directly after.
func (c *Conn) Close() error {
return c.close(nil)
}
// Tx is an in-progress database transaction.
//
// A transaction must end with a call to [Tx.Commit] or [Tx.Rollback].
//
// After a call to [Tx.Commit] or [Tx.Rollback], all operations on the
// transaction fail with [ErrTxDone].
//
// The statements prepared for a transaction by calling
// the transaction's [Tx.Prepare] or [Tx.Stmt] methods are closed
// by the call to [Tx.Commit] or [Tx.Rollback].
type Tx struct {
db *DB
// closemu prevents the transaction from closing while there
// is an active query. It is held for read during queries
// and exclusively during close.
closemu closingMutex
// dc is owned exclusively until Commit or Rollback, at which point
// it's returned with putConn.
dc *driverConn
txi driver.Tx
// releaseConn is called once the Tx is closed to release
// any held driverConn back to the pool.
releaseConn func(error)
// done transitions from false to true exactly once, on Commit
// or Rollback. once done, all operations fail with
// ErrTxDone.
done atomic.Bool
// keepConnOnRollback is true if the driver knows
// how to reset the connection's session and if need be discard
// the connection.
keepConnOnRollback bool
// All Stmts prepared for this transaction. These will be closed after the
// transaction has been committed or rolled back.
stmts struct {
sync.Mutex
v []*Stmt
}
// cancel is called after done transitions from 0 to 1.
cancel func()
// ctx lives for the life of the transaction.
ctx context.Context
}
// awaitDone blocks until the context in Tx is canceled and rolls back
// the transaction if it's not already done.
func (tx *Tx) awaitDone() {
// Wait for either the transaction to be committed or rolled
// back, or for the associated context to be closed.
<-tx.ctx.Done()
// Discard and close the connection used to ensure the
// transaction is closed and the resources are released. This
// rollback does nothing if the transaction has already been
// committed or rolled back.
// Do not discard the connection if the connection knows
// how to reset the session.
discardConnection := !tx.keepConnOnRollback
tx.rollback(discardConnection)
}
func (tx *Tx) isDone() bool {
return tx.done.Load()
}
// ErrTxDone is returned by any operation that is performed on a transaction
// that has already been committed or rolled back.
var ErrTxDone = errors.New("sql: transaction has already been committed or rolled back")
// close returns the connection to the pool and
// must only be called by Tx.rollback or Tx.Commit while
// tx is already canceled and won't be executed concurrently.
func (tx *Tx) close(err error) {
tx.releaseConn(err)
tx.dc = nil
tx.txi = nil
}
// hookTxGrabConn specifies an optional hook to be called on
// a successful call to (*Tx).grabConn. For tests.
var hookTxGrabConn func()
func (tx *Tx) grabConn(ctx context.Context) (*driverConn, releaseConn, error) {
select {
default:
case <-ctx.Done():
return nil, nil, ctx.Err()
}
// closemu.RLock must come before the check for isDone to prevent the Tx from
// closing while a query is executing.
tx.closemu.RLock()
if tx.isDone() {
tx.closemu.RUnlock()
return nil, nil, ErrTxDone
}
if hookTxGrabConn != nil { // test hook
hookTxGrabConn()
}
return tx.dc, tx.closemuRUnlockRelease, nil
}
func (tx *Tx) txCtx() context.Context {
return tx.ctx
}
// closemuRUnlockRelease is used as a func(error) method value in
// [DB.ExecContext] and [DB.QueryContext]. Unlocking in the releaseConn keeps
// the driver conn from being returned to the connection pool until
// the Rows has been closed.
func (tx *Tx) closemuRUnlockRelease(error) {
tx.closemu.RUnlock()
}
// Closes all Stmts prepared for this transaction.
func (tx *Tx) closePrepared() {
tx.stmts.Lock()
defer tx.stmts.Unlock()
for _, stmt := range tx.stmts.v {
stmt.Close()
}
}
// Commit commits the transaction.
func (tx *Tx) Commit() error {
// Check context first to avoid transaction leak.
// If put it behind tx.done CompareAndSwap statement, we can't ensure
// the consistency between tx.done and the real COMMIT operation.
select {
default:
case <-tx.ctx.Done():
if tx.done.Load() {
return ErrTxDone
}
return tx.ctx.Err()
}
if !tx.done.CompareAndSwap(false, true) {
return ErrTxDone
}
// Cancel the Tx to release any active R-closemu locks.
// This is safe to do because tx.done has already transitioned
// from 0 to 1. Hold the W-closemu lock prior to rollback
// to ensure no other connection has an active query.
tx.cancel()
tx.closemu.Lock()
tx.closemu.Unlock()
var err error
withLock(tx.dc, func() {
err = tx.txi.Commit()
})
if !errors.Is(err, driver.ErrBadConn) {
tx.closePrepared()
}
tx.close(err)
return err
}
var rollbackHook func()
// rollback aborts the transaction and optionally forces the pool to discard
// the connection.
func (tx *Tx) rollback(discardConn bool) error {
if !tx.done.CompareAndSwap(false, true) {
return ErrTxDone
}
if rollbackHook != nil {
rollbackHook()
}
// Cancel the Tx to release any active R-closemu locks.
// This is safe to do because tx.done has already transitioned
// from 0 to 1. Hold the W-closemu lock prior to rollback
// to ensure no other connection has an active query.
tx.cancel()
tx.closemu.Lock()
tx.closemu.Unlock()
var err error
withLock(tx.dc, func() {
err = tx.txi.Rollback()
})
if !errors.Is(err, driver.ErrBadConn) {
tx.closePrepared()
}
if discardConn {
err = driver.ErrBadConn
}
tx.close(err)
return err
}
// Rollback aborts the transaction.
func (tx *Tx) Rollback() error {
return tx.rollback(false)
}
// PrepareContext creates a prepared statement for use within a transaction.
//
// The returned statement operates within the transaction and will be closed
// when the transaction has been committed or rolled back.
//
// To use an existing prepared statement on this transaction, see [Tx.Stmt].
//
// The provided context will be used for the preparation of the context, not
// for the execution of the returned statement. The returned statement
// will run in the transaction context.
func (tx *Tx) PrepareContext(ctx context.Context, query string) (*Stmt, error) {
dc, release, err := tx.grabConn(ctx)
if err != nil {
return nil, err
}
stmt, err := tx.db.prepareDC(ctx, dc, release, tx, query)
if err != nil {
return nil, err
}
tx.stmts.Lock()
tx.stmts.v = append(tx.stmts.v, stmt)
tx.stmts.Unlock()
return stmt, nil
}
// Prepare creates a prepared statement for use within a transaction.
//
// The returned statement operates within the transaction and will be closed
// when the transaction has been committed or rolled back.
//
// To use an existing prepared statement on this transaction, see [Tx.Stmt].
//
// Prepare uses [context.Background] internally; to specify the context, use
// [Tx.PrepareContext].
func (tx *Tx) Prepare(query string) (*Stmt, error) {
return tx.PrepareContext(context.Background(), query)
}
// StmtContext returns a transaction-specific prepared statement from
// an existing statement.
//
// Example:
//
// updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
// ...
// tx, err := db.Begin()
// ...
// res, err := tx.StmtContext(ctx, updateMoney).Exec(123.45, 98293203)
//
// The provided context is used for the preparation of the statement, not for the
// execution of the statement.
//
// The returned statement operates within the transaction and will be closed
// when the transaction has been committed or rolled back.
func (tx *Tx) StmtContext(ctx context.Context, stmt *Stmt) *Stmt {
dc, release, err := tx.grabConn(ctx)
if err != nil {
return &Stmt{stickyErr: err}
}
defer release(nil)
if tx.db != stmt.db {
return &Stmt{stickyErr: errors.New("sql: Tx.Stmt: statement from different database used")}
}
var si driver.Stmt
var parentStmt *Stmt
stmt.mu.Lock()
if stmt.closed || stmt.cg != nil {
// If the statement has been closed or already belongs to a
// transaction, we can't reuse it in this connection.
// Since tx.StmtContext should never need to be called with a
// Stmt already belonging to tx, we ignore this edge case and
// re-prepare the statement in this case. No need to add
// code-complexity for this.
stmt.mu.Unlock()
withLock(dc, func() {
si, err = ctxDriverPrepare(ctx, dc.ci, stmt.query)
})
if err != nil {
return &Stmt{stickyErr: err}
}
} else {
stmt.removeClosedStmtLocked()
// See if the statement has already been prepared on this connection,
// and reuse it if possible.
for _, v := range stmt.css {
if v.dc == dc {
si = v.ds.si
break
}
}
stmt.mu.Unlock()
if si == nil {
var ds *driverStmt
withLock(dc, func() {
ds, err = stmt.prepareOnConnLocked(ctx, dc)
})
if err != nil {
return &Stmt{stickyErr: err}
}
si = ds.si
}
parentStmt = stmt
}
txs := &Stmt{
db: tx.db,
cg: tx,
cgds: &driverStmt{
Locker: dc,
si: si,
},
parentStmt: parentStmt,
query: stmt.query,
}
if parentStmt != nil {
tx.db.addDep(parentStmt, txs)
}
tx.stmts.Lock()
tx.stmts.v = append(tx.stmts.v, txs)
tx.stmts.Unlock()
return txs
}
// Stmt returns a transaction-specific prepared statement from
// an existing statement.
//
// Example:
//
// updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
// ...
// tx, err := db.Begin()
// ...
// res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
//
// The returned statement operates within the transaction and will be closed
// when the transaction has been committed or rolled back.
//
// Stmt uses [context.Background] internally; to specify the context, use
// [Tx.StmtContext].
func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
return tx.StmtContext(context.Background(), stmt)
}
// ExecContext executes a query that doesn't return rows.
// For example: an INSERT and UPDATE.
func (tx *Tx) ExecContext(ctx context.Context, query string, args ...any) (Result, error) {
dc, release, err := tx.grabConn(ctx)
if err != nil {
return nil, err
}
return tx.db.execDC(ctx, dc, release, query, args)
}
// Exec executes a query that doesn't return rows.
// For example: an INSERT and UPDATE.
//
// Exec uses [context.Background] internally; to specify the context, use
// [Tx.ExecContext].
func (tx *Tx) Exec(query string, args ...any) (Result, error) {
return tx.ExecContext(context.Background(), query, args...)
}
// QueryContext executes a query that returns rows, typically a SELECT.
func (tx *Tx) QueryContext(ctx context.Context, query string, args ...any) (*Rows, error) {
dc, release, err := tx.grabConn(ctx)
if err != nil {
return nil, err
}
return tx.db.queryDC(ctx, tx.ctx, dc, release, query, args)
}
// Query executes a query that returns rows, typically a SELECT.
//
// Query uses [context.Background] internally; to specify the context, use
// [Tx.QueryContext].
func (tx *Tx) Query(query string, args ...any) (*Rows, error) {
return tx.QueryContext(context.Background(), query, args...)
}
// QueryRowContext executes a query that is expected to return at most one row.
// QueryRowContext always returns a non-nil value. Errors are deferred until
// [Row]'s Scan method is called.
// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
// Otherwise, the [*Row.Scan] scans the first selected row and discards
// the rest.
func (tx *Tx) QueryRowContext(ctx context.Context, query string, args ...any) *Row {
rows, err := tx.QueryContext(ctx, query, args...)
return &Row{rows: rows, err: err}
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always returns a non-nil value. Errors are deferred until
// [Row]'s Scan method is called.
// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
// Otherwise, the [*Row.Scan] scans the first selected row and discards
// the rest.
//
// QueryRow uses [context.Background] internally; to specify the context, use
// [Tx.QueryRowContext].
func (tx *Tx) QueryRow(query string, args ...any) *Row {
return tx.QueryRowContext(context.Background(), query, args...)
}
// connStmt is a prepared statement on a particular connection.
type connStmt struct {
dc *driverConn
ds *driverStmt
}
// stmtConnGrabber represents a Tx or Conn that will return the underlying
// driverConn and release function.
type stmtConnGrabber interface {
// grabConn returns the driverConn and the associated release function
// that must be called when the operation completes.
grabConn(context.Context) (*driverConn, releaseConn, error)
// txCtx returns the transaction context if available.
// The returned context should be selected on along with
// any query context when awaiting a cancel.
txCtx() context.Context
}
var (
_ stmtConnGrabber = &Tx{}
_ stmtConnGrabber = &Conn{}
)
// Stmt is a prepared statement.
// A Stmt is safe for concurrent use by multiple goroutines.
//
// If a Stmt is prepared on a [Tx] or [Conn], it will be bound to a single
// underlying connection forever. If the [Tx] or [Conn] closes, the Stmt will
// become unusable and all operations will return an error.
// If a Stmt is prepared on a [DB], it will remain usable for the lifetime of the
// [DB]. When the Stmt needs to execute on a new underlying connection, it will
// prepare itself on the new connection automatically.
type Stmt struct {
// Immutable:
db *DB // where we came from
query string // that created the Stmt
stickyErr error // if non-nil, this error is returned for all operations
closemu closingMutex // held exclusively during close, for read otherwise.
// If Stmt is prepared on a Tx or Conn then cg is present and will
// only ever grab a connection from cg.
// If cg is nil then the Stmt must grab an arbitrary connection
// from db and determine if it must prepare the stmt again by
// inspecting css.
cg stmtConnGrabber
cgds *driverStmt
// parentStmt is set when a transaction-specific statement
// is requested from an identical statement prepared on the same
// conn. parentStmt is used to track the dependency of this statement
// on its originating ("parent") statement so that parentStmt may
// be closed by the user without them having to know whether or not
// any transactions are still using it.
parentStmt *Stmt
mu sync.Mutex // protects the rest of the fields
closed bool
// css is a list of underlying driver statement interfaces
// that are valid on particular connections. This is only
// used if cg == nil and one is found that has idle
// connections. If cg != nil, cgds is always used.
css []connStmt
// lastNumClosed is copied from db.numClosed when Stmt is created
// without tx and closed connections in css are removed.
lastNumClosed uint64
}
// ExecContext executes a prepared statement with the given arguments and
// returns a [Result] summarizing the effect of the statement.
func (s *Stmt) ExecContext(ctx context.Context, args ...any) (Result, error) {
s.closemu.RLock()
defer s.closemu.RUnlock()
var res Result
err := s.db.retry(func(strategy connReuseStrategy) error {
dc, releaseConn, ds, err := s.connStmt(ctx, strategy)
if err != nil {
return err
}
res, err = resultFromStatement(ctx, dc.ci, ds, args...)
releaseConn(err)
return err
})
return res, err
}
// Exec executes a prepared statement with the given arguments and
// returns a [Result] summarizing the effect of the statement.
//
// Exec uses [context.Background] internally; to specify the context, use
// [Stmt.ExecContext].
func (s *Stmt) Exec(args ...any) (Result, error) {
return s.ExecContext(context.Background(), args...)
}
func resultFromStatement(ctx context.Context, ci driver.Conn, ds *driverStmt, args ...any) (Result, error) {
ds.Lock()
defer ds.Unlock()
dargs, err := driverArgsConnLocked(ci, ds, args)
if err != nil {
return nil, err
}
resi, err := ctxDriverStmtExec(ctx, ds.si, dargs)
if err != nil {
return nil, err
}
return driverResult{ds.Locker, resi}, nil
}
// removeClosedStmtLocked removes closed conns in s.css.
//
// To avoid lock contention on DB.mu, we do it only when
// s.db.numClosed - s.lastNum is large enough.
func (s *Stmt) removeClosedStmtLocked() {
t := len(s.css)/2 + 1
if t > 10 {
t = 10
}
dbClosed := s.db.numClosed.Load()
if dbClosed-s.lastNumClosed < uint64(t) {
return
}
s.db.mu.Lock()
for i := 0; i < len(s.css); i++ {
if s.css[i].dc.dbmuClosed {
s.css[i] = s.css[len(s.css)-1]
// Zero out the last element (for GC) before shrinking the slice.
s.css[len(s.css)-1] = connStmt{}
s.css = s.css[:len(s.css)-1]
i--
}
}
s.db.mu.Unlock()
s.lastNumClosed = dbClosed
}
// connStmt returns a free driver connection on which to execute the
// statement, a function to call to release the connection, and a
// statement bound to that connection.
func (s *Stmt) connStmt(ctx context.Context, strategy connReuseStrategy) (dc *driverConn, releaseConn func(error), ds *driverStmt, err error) {
if err = s.stickyErr; err != nil {
return
}
s.mu.Lock()
if s.closed {
s.mu.Unlock()
err = errors.New("sql: statement is closed")
return
}
// In a transaction or connection, we always use the connection that the
// stmt was created on.
if s.cg != nil {
s.mu.Unlock()
dc, releaseConn, err = s.cg.grabConn(ctx) // blocks, waiting for the connection.
if err != nil {
return
}
return dc, releaseConn, s.cgds, nil
}
s.removeClosedStmtLocked()
s.mu.Unlock()
dc, err = s.db.conn(ctx, strategy)
if err != nil {
return nil, nil, nil, err
}
s.mu.Lock()
for _, v := range s.css {
if v.dc == dc {
s.mu.Unlock()
return dc, dc.releaseConn, v.ds, nil
}
}
s.mu.Unlock()
// No luck; we need to prepare the statement on this connection
withLock(dc, func() {
ds, err = s.prepareOnConnLocked(ctx, dc)
})
if err != nil {
dc.releaseConn(err)
return nil, nil, nil, err
}
return dc, dc.releaseConn, ds, nil
}
// prepareOnConnLocked prepares the query in Stmt s on dc and adds it to the list of
// open connStmt on the statement. It assumes the caller is holding the lock on dc.
func (s *Stmt) prepareOnConnLocked(ctx context.Context, dc *driverConn) (*driverStmt, error) {
si, err := dc.prepareLocked(ctx, s.cg, s.query)
if err != nil {
return nil, err
}
cs := connStmt{dc, si}
s.mu.Lock()
s.css = append(s.css, cs)
s.mu.Unlock()
return cs.ds, nil
}
// QueryContext executes a prepared query statement with the given arguments
// and returns the query results as a [*Rows].
func (s *Stmt) QueryContext(ctx context.Context, args ...any) (*Rows, error) {
s.closemu.RLock()
defer s.closemu.RUnlock()
var rowsi driver.Rows
var rows *Rows
err := s.db.retry(func(strategy connReuseStrategy) error {
dc, releaseConn, ds, err := s.connStmt(ctx, strategy)
if err != nil {
return err
}
rowsi, err = rowsiFromStatement(ctx, dc.ci, ds, args...)
if err == nil {
// Note: ownership of ci passes to the *Rows, to be freed
// with releaseConn.
rows = &Rows{
dc: dc,
rowsi: rowsi,
// releaseConn set below
}
// addDep must be added before initContextClose or it could attempt
// to removeDep before it has been added.
s.db.addDep(s, rows)
// releaseConn must be set before initContextClose or it could
// release the connection before it is set.
rows.releaseConn = func(err error) {
releaseConn(err)
s.db.removeDep(s, rows)
}
var txctx context.Context
if s.cg != nil {
txctx = s.cg.txCtx()
}
rows.initContextClose(ctx, txctx)
return nil
}
releaseConn(err)
return err
})
return rows, err
}
// Query executes a prepared query statement with the given arguments
// and returns the query results as a *Rows.
//
// Query uses [context.Background] internally; to specify the context, use
// [Stmt.QueryContext].
func (s *Stmt) Query(args ...any) (*Rows, error) {
return s.QueryContext(context.Background(), args...)
}
func rowsiFromStatement(ctx context.Context, ci driver.Conn, ds *driverStmt, args ...any) (driver.Rows, error) {
ds.Lock()
defer ds.Unlock()
dargs, err := driverArgsConnLocked(ci, ds, args)
if err != nil {
return nil, err
}
return ctxDriverStmtQuery(ctx, ds.si, dargs)
}
// QueryRowContext executes a prepared query statement with the given arguments.
// If an error occurs during the execution of the statement, that error will
// be returned by a call to Scan on the returned [*Row], which is always non-nil.
// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
// Otherwise, the [*Row.Scan] scans the first selected row and discards
// the rest.
func (s *Stmt) QueryRowContext(ctx context.Context, args ...any) *Row {
rows, err := s.QueryContext(ctx, args...)
if err != nil {
return &Row{err: err}
}
return &Row{rows: rows}
}
// QueryRow executes a prepared query statement with the given arguments.
// If an error occurs during the execution of the statement, that error will
// be returned by a call to Scan on the returned [*Row], which is always non-nil.
// If the query selects no rows, the [*Row.Scan] will return [ErrNoRows].
// Otherwise, the [*Row.Scan] scans the first selected row and discards
// the rest.
//
// Example usage:
//
// var name string
// err := nameByUseridStmt.QueryRow(id).Scan(&name)
//
// QueryRow uses [context.Background] internally; to specify the context, use
// [Stmt.QueryRowContext].
func (s *Stmt) QueryRow(args ...any) *Row {
return s.QueryRowContext(context.Background(), args...)
}
// Close closes the statement.
func (s *Stmt) Close() error {
s.closemu.Lock()
defer s.closemu.Unlock()
if s.stickyErr != nil {
return s.stickyErr
}
s.mu.Lock()
if s.closed {
s.mu.Unlock()
return nil
}
s.closed = true
txds := s.cgds
s.cgds = nil
s.mu.Unlock()
if s.cg == nil {
return s.db.removeDep(s, s)
}
if s.parentStmt != nil {
// If parentStmt is set, we must not close s.txds since it's stored
// in the css array of the parentStmt.
return s.db.removeDep(s.parentStmt, s)
}
return txds.Close()
}
func (s *Stmt) finalClose() error {
s.mu.Lock()
defer s.mu.Unlock()
if s.css != nil {
for _, v := range s.css {
s.db.noteUnusedDriverStatement(v.dc, v.ds)
v.dc.removeOpenStmt(v.ds)
}
s.css = nil
}
return nil
}
// Rows is the result of a query. Its cursor starts before the first row
// of the result set. Use [Rows.Next] to advance from row to row.
type Rows struct {
dc *driverConn // owned; must call releaseConn when closed to release
releaseConn func(error)
rowsi driver.Rows
cancel func() // called when Rows is closed, may be nil.
closeStmt *driverStmt // if non-nil, statement to Close on close
contextDone atomic.Pointer[error] // error that awaitDone saw; set before close attempt
// closemu prevents Rows from closing while there
// is an active streaming result. It is held for read during non-close operations
// and exclusively during close.
//
// closemu guards lasterr and closed.
closemu closingMutex
lasterr error // non-nil only if closed is true
closed bool
// closemuScanHold is whether the previous call to Scan kept closemu RLock'ed
// without unlocking it. It does that when the user passes a *RawBytes scan
// target. In that case, we need to prevent awaitDone from closing the Rows
// while the user's still using the memory. See go.dev/issue/60304.
//
// It is only used by Scan, Next, and NextResultSet which are expected
// not to be called concurrently.
closemuScanHold bool
// hitEOF is whether Next hit the end of the rows without
// encountering an error. It's set in Next before
// returning. It's only used by Next and Err which are
// expected not to be called concurrently.
hitEOF bool
// lastcols is only used in Scan, Next, and NextResultSet which are expected
// not to be called concurrently.
lastcols []driver.Value
// raw is a buffer for RawBytes that persists between Scan calls.
// This is used when the driver returns a mismatched type that requires
// a cloning allocation. For example, if the driver returns a *string and
// the user is scanning into a *RawBytes, we need to copy the string.
// The raw buffer here lets us reuse the memory for that copy across Scan calls.
raw []byte
}
// lasterrOrErrLocked returns either lasterr or the provided err.
// rs.closemu must be read-locked.
func (rs *Rows) lasterrOrErrLocked(err error) error {
if rs.lasterr != nil && rs.lasterr != io.EOF {
return rs.lasterr
}
return err
}
// bypassRowsAwaitDone is only used for testing.
// If true, it will not close the Rows automatically from the context.
var bypassRowsAwaitDone = false
func (rs *Rows) initContextClose(ctx, txctx context.Context) {
if ctx.Done() == nil && (txctx == nil || txctx.Done() == nil) {
return
}
if bypassRowsAwaitDone {
return
}
closectx, cancel := context.WithCancel(ctx)
rs.cancel = cancel
go rs.awaitDone(ctx, txctx, closectx)
}
// awaitDone blocks until ctx, txctx, or closectx is canceled.
// The ctx is provided from the query context.
// If the query was issued in a transaction, the transaction's context
// is also provided in txctx, to ensure Rows is closed if the Tx is closed.
// The closectx is closed by an explicit call to rs.Close.
func (rs *Rows) awaitDone(ctx, txctx, closectx context.Context) {
var txctxDone <-chan struct{}
if txctx != nil {
txctxDone = txctx.Done()
}
select {
case <-ctx.Done():
err := ctx.Err()
rs.contextDone.Store(&err)
case <-txctxDone:
err := txctx.Err()
rs.contextDone.Store(&err)
case <-closectx.Done():
// rs.cancel was called via Close(); don't store this into contextDone
// to ensure Err() is unaffected.
}
rs.close(ctx.Err())
}
// Next prepares the next result row for reading with the [Rows.Scan] method. It
// returns true on success, or false if there is no next result row or an error
// happened while preparing it. [Rows.Err] should be consulted to distinguish between
// the two cases.
//
// Every call to [Rows.Scan], even the first one, must be preceded by a call to [Rows.Next].
func (rs *Rows) Next() bool {
// If the user's calling Next, they're done with their previous row's Scan
// results (any RawBytes memory), so we can release the read lock that would
// be preventing awaitDone from calling close.
rs.closemuRUnlockIfHeldByScan()
if rs.contextDone.Load() != nil {
return false
}
var doClose, ok bool
func() {
rs.closemu.RLock()
defer rs.closemu.RUnlock()
doClose, ok = rs.nextLocked()
}()
if doClose {
rs.Close()
}
if doClose && !ok {
rs.hitEOF = true
}
return ok
}
func (rs *Rows) nextLocked() (doClose, ok bool) {
if rs.closed {
return false, false
}
// Lock the driver connection before calling the driver interface
// rowsi to prevent a Tx from rolling back the connection at the same time.
rs.dc.Lock()
defer rs.dc.Unlock()
if rs.lastcols == nil {
rs.lastcols = make([]driver.Value, len(rs.rowsi.Columns()))
}
rs.lasterr = rs.rowsi.Next(rs.lastcols)
if rs.lasterr != nil {
// Close the connection if there is a driver error.
if rs.lasterr != io.EOF {
return true, false
}
nextResultSet, ok := rs.rowsi.(driver.RowsNextResultSet)
if !ok {
return true, false
}
// The driver is at the end of the current result set.
// Test to see if there is another result set after the current one.
// Only close Rows if there is no further result sets to read.
if !nextResultSet.HasNextResultSet() {
doClose = true
}
return doClose, false
}
return false, true
}
// NextResultSet prepares the next result set for reading. It reports whether
// there is further result sets, or false if there is no further result set
// or if there is an error advancing to it. The [Rows.Err] method should be consulted
// to distinguish between the two cases.
//
// After calling NextResultSet, the [Rows.Next] method should always be called before
// scanning. If there are further result sets they may not have rows in the result
// set.
func (rs *Rows) NextResultSet() bool {
// If the user's calling NextResultSet, they're done with their previous
// row's Scan results (any RawBytes memory), so we can release the read lock
// that would be preventing awaitDone from calling close.
rs.closemuRUnlockIfHeldByScan()
var doClose bool
defer func() {
if doClose {
rs.Close()
}
}()
rs.closemu.RLock()
defer rs.closemu.RUnlock()
if rs.closed {
return false
}
rs.lastcols = nil
nextResultSet, ok := rs.rowsi.(driver.RowsNextResultSet)
if !ok {
doClose = true
return false
}
// Lock the driver connection before calling the driver interface
// rowsi to prevent a Tx from rolling back the connection at the same time.
rs.dc.Lock()
defer rs.dc.Unlock()
rs.lasterr = nextResultSet.NextResultSet()
if rs.lasterr != nil {
doClose = true
return false
}
return true
}
// Err returns the error, if any, that was encountered during iteration.
// Err may be called after an explicit or implicit [Rows.Close].
func (rs *Rows) Err() error {
// Return any context error that might've happened during row iteration,
// but only if we haven't reported the final Next() = false after rows
// are done, in which case the user might've canceled their own context
// before calling Rows.Err.
if !rs.hitEOF {
if errp := rs.contextDone.Load(); errp != nil {
return *errp
}
}
rs.closemu.RLock()
defer rs.closemu.RUnlock()
return rs.lasterrOrErrLocked(nil)
}
// rawbuf returns the buffer to append RawBytes values to.
// This buffer is reused across calls to Rows.Scan.
//
// Usage:
//
// rawBytes = rows.setrawbuf(append(rows.rawbuf(), value...))
func (rs *Rows) rawbuf() []byte {
if rs == nil {
// convertAssignRows can take a nil *Rows; for simplicity handle it here
return nil
}
return rs.raw
}
// setrawbuf updates the RawBytes buffer with the result of appending a new value to it.
// It returns the new value.
func (rs *Rows) setrawbuf(b []byte) RawBytes {
if rs == nil {
// convertAssignRows can take a nil *Rows; for simplicity handle it here
return RawBytes(b)
}
off := len(rs.raw)
rs.raw = b
return RawBytes(rs.raw[off:])
}
var errRowsClosed = errors.New("sql: Rows are closed")
var errNoRows = errors.New("sql: no Rows available")
// Columns returns the column names.
// Columns returns an error if the rows are closed.
func (rs *Rows) Columns() ([]string, error) {
rs.closemu.RLock()
defer rs.closemu.RUnlock()
if rs.closed {
return nil, rs.lasterrOrErrLocked(errRowsClosed)
}
if rs.rowsi == nil {
return nil, rs.lasterrOrErrLocked(errNoRows)
}
rs.dc.Lock()
defer rs.dc.Unlock()
return rs.rowsi.Columns(), nil
}
// ColumnTypes returns column information such as column type, length,
// and nullable. Some information may not be available from some drivers.
func (rs *Rows) ColumnTypes() ([]*ColumnType, error) {
rs.closemu.RLock()
defer rs.closemu.RUnlock()
if rs.closed {
return nil, rs.lasterrOrErrLocked(errRowsClosed)
}
if rs.rowsi == nil {
return nil, rs.lasterrOrErrLocked(errNoRows)
}
rs.dc.Lock()
defer rs.dc.Unlock()
return rowsColumnInfoSetupConnLocked(rs.rowsi), nil
}
// ColumnType contains the name and type of a column.
type ColumnType struct {
name string
hasNullable bool
hasLength bool
hasPrecisionScale bool
nullable bool
length int64
databaseType string
precision int64
scale int64
scanType reflect.Type
}
// Name returns the name or alias of the column.
func (ci *ColumnType) Name() string {
return ci.name
}
// Length returns the column type length for variable length column types such
// as text and binary field types. If the type length is unbounded the value will
// be [math.MaxInt64] (any database limits will still apply).
// If the column type is not variable length, such as an int, or if not supported
// by the driver ok is false.
func (ci *ColumnType) Length() (length int64, ok bool) {
return ci.length, ci.hasLength
}
// DecimalSize returns the scale and precision of a decimal type.
// If not applicable or if not supported ok is false.
func (ci *ColumnType) DecimalSize() (precision, scale int64, ok bool) {
return ci.precision, ci.scale, ci.hasPrecisionScale
}
// ScanType returns a Go type suitable for scanning into using [Rows.Scan].
// If a driver does not support this property ScanType will return
// the type of an empty interface.
func (ci *ColumnType) ScanType() reflect.Type {
return ci.scanType
}
// Nullable reports whether the column may be null.
// If a driver does not support this property ok will be false.
func (ci *ColumnType) Nullable() (nullable, ok bool) {
return ci.nullable, ci.hasNullable
}
// DatabaseTypeName returns the database system name of the column type. If an empty
// string is returned, then the driver type name is not supported.
// Consult your driver documentation for a list of driver data types. [ColumnType.Length] specifiers
// are not included.
// Common type names include "VARCHAR", "TEXT", "NVARCHAR", "DECIMAL", "BOOL",
// "INT", and "BIGINT".
func (ci *ColumnType) DatabaseTypeName() string {
return ci.databaseType
}
func rowsColumnInfoSetupConnLocked(rowsi driver.Rows) []*ColumnType {
names := rowsi.Columns()
list := make([]*ColumnType, len(names))
for i := range list {
ci := &ColumnType{
name: names[i],
}
list[i] = ci
if prop, ok := rowsi.(driver.RowsColumnTypeScanType); ok {
ci.scanType = prop.ColumnTypeScanType(i)
} else {
ci.scanType = reflect.TypeFor[any]()
}
if prop, ok := rowsi.(driver.RowsColumnTypeDatabaseTypeName); ok {
ci.databaseType = prop.ColumnTypeDatabaseTypeName(i)
}
if prop, ok := rowsi.(driver.RowsColumnTypeLength); ok {
ci.length, ci.hasLength = prop.ColumnTypeLength(i)
}
if prop, ok := rowsi.(driver.RowsColumnTypeNullable); ok {
ci.nullable, ci.hasNullable = prop.ColumnTypeNullable(i)
}
if prop, ok := rowsi.(driver.RowsColumnTypePrecisionScale); ok {
ci.precision, ci.scale, ci.hasPrecisionScale = prop.ColumnTypePrecisionScale(i)
}
}
return list
}
// Scan copies the columns in the current row into the values pointed
// at by dest. The number of values in dest must be the same as the
// number of columns in [Rows].
//
// Scan converts columns read from the database into the following
// common Go types and special types provided by the sql package:
//
// *string
// *[]byte
// *int, *int8, *int16, *int32, *int64
// *uint, *uint8, *uint16, *uint32, *uint64
// *bool
// *float32, *float64
// *interface{}
// *RawBytes
// *Rows (cursor value)
// any type implementing Scanner (see Scanner docs)
//
// In the most simple case, if the type of the value from the source
// column is an integer, bool or string type T and dest is of type *T,
// Scan simply assigns the value through the pointer.
//
// Scan also converts between string and numeric types, as long as no
// information would be lost. While Scan stringifies all numbers
// scanned from numeric database columns into *string, scans into
// numeric types are checked for overflow. For example, a float64 with
// value 300 or a string with value "300" can scan into a uint16, but
// not into a uint8, though float64(255) or "255" can scan into a
// uint8. One exception is that scans of some float64 numbers to
// strings may lose information when stringifying. In general, scan
// floating point columns into *float64.
//
// If a dest argument has type *[]byte, Scan saves in that argument a
// copy of the corresponding data. The copy is owned by the caller and
// can be modified and held indefinitely. The copy can be avoided by
// using an argument of type [*RawBytes] instead; see the documentation
// for [RawBytes] for restrictions on its use.
//
// If an argument has type *interface{}, Scan copies the value
// provided by the underlying driver without conversion. When scanning
// from a source value of type []byte to *interface{}, a copy of the
// slice is made and the caller owns the result.
//
// Source values of type [time.Time] may be scanned into values of type
// *time.Time, *interface{}, *string, or *[]byte. When converting to
// the latter two, [time.RFC3339Nano] is used.
//
// Source values of type bool may be scanned into types *bool,
// *interface{}, *string, *[]byte, or [*RawBytes].
//
// For scanning into *bool, the source may be true, false, 1, 0, or
// string inputs parseable by [strconv.ParseBool].
//
// Scan can also convert a cursor returned from a query, such as
// "select cursor(select * from my_table) from dual", into a
// [*Rows] value that can itself be scanned from. The parent
// select query will close any cursor [*Rows] if the parent [*Rows] is closed.
//
// If any of the first arguments implementing [Scanner] returns an error,
// that error will be wrapped in the returned error.
func (rs *Rows) Scan(dest ...any) error {
if rs.closemuScanHold {
// This should only be possible if the user calls Scan twice in a row
// without calling Next.
return fmt.Errorf("sql: Scan called without calling Next (closemuScanHold)")
}
rs.closemu.RLock()
rs.raw = rs.raw[:0]
err := rs.scanLocked(dest...)
if err == nil && scanArgsContainRawBytes(dest) {
rs.closemuScanHold = true
} else {
rs.closemu.RUnlock()
}
return err
}
func (rs *Rows) scanLocked(dest ...any) error {
if rs.lasterr != nil && rs.lasterr != io.EOF {
return rs.lasterr
}
if rs.closed {
return rs.lasterrOrErrLocked(errRowsClosed)
}
if rs.lastcols == nil {
return errors.New("sql: Scan called without calling Next")
}
if len(dest) != len(rs.lastcols) {
return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
}
for i, sv := range rs.lastcols {
err := convertAssignRows(dest[i], sv, rs)
if err != nil {
return fmt.Errorf(`sql: Scan error on column index %d, name %q: %w`, i, rs.rowsi.Columns()[i], err)
}
}
return nil
}
// closemuRUnlockIfHeldByScan releases any closemu.RLock held open by a previous
// call to Scan with *RawBytes.
func (rs *Rows) closemuRUnlockIfHeldByScan() {
if rs.closemuScanHold {
rs.closemuScanHold = false
rs.closemu.RUnlock()
}
}
func scanArgsContainRawBytes(args []any) bool {
for _, a := range args {
if _, ok := a.(*RawBytes); ok {
return true
}
}
return false
}
// rowsCloseHook returns a function so tests may install the
// hook through a test only mutex.
var rowsCloseHook = func() func(*Rows, *error) { return nil }
// Close closes the [Rows], preventing further enumeration. If [Rows.Next] is called
// and returns false and there are no further result sets,
// the [Rows] are closed automatically and it will suffice to check the
// result of [Rows.Err]. Close is idempotent and does not affect the result of [Rows.Err].
func (rs *Rows) Close() error {
// If the user's calling Close, they're done with their previous row's Scan
// results (any RawBytes memory), so we can release the read lock that would
// be preventing awaitDone from calling the unexported close before we do so.
rs.closemuRUnlockIfHeldByScan()
return rs.close(nil)
}
func (rs *Rows) close(err error) error {
rs.closemu.Lock()
defer rs.closemu.Unlock()
if rs.closed {
return nil
}
rs.closed = true
if rs.lasterr == nil {
rs.lasterr = err
}
withLock(rs.dc, func() {
err = rs.rowsi.Close()
})
if fn := rowsCloseHook(); fn != nil {
fn(rs, &err)
}
if rs.cancel != nil {
rs.cancel()
}
if rs.closeStmt != nil {
rs.closeStmt.Close()
}
rs.releaseConn(err)
rs.lasterr = rs.lasterrOrErrLocked(err)
return err
}
// Row is the result of calling [DB.QueryRow] to select a single row.
type Row struct {
// One of these two will be non-nil:
err error // deferred error for easy chaining
rows *Rows
}
// Scan copies the columns from the matched row into the values
// pointed at by dest. See the documentation on [Rows.Scan] for details.
// If more than one row matches the query,
// Scan uses the first row and discards the rest. If no row matches
// the query, Scan returns [ErrNoRows].
func (r *Row) Scan(dest ...any) error {
if r.err != nil {
return r.err
}
// TODO(bradfitz): for now we need to defensively clone all
// []byte that the driver returned (not permitting
// *RawBytes in Rows.Scan), since we're about to close
// the Rows in our defer, when we return from this function.
// the contract with the driver.Next(...) interface is that it
// can return slices into read-only temporary memory that's
// only valid until the next Scan/Close. But the TODO is that
// for a lot of drivers, this copy will be unnecessary. We
// should provide an optional interface for drivers to
// implement to say, "don't worry, the []bytes that I return
// from Next will not be modified again." (for instance, if
// they were obtained from the network anyway) But for now we
// don't care.
defer r.rows.Close()
if scanArgsContainRawBytes(dest) {
return errors.New("sql: RawBytes isn't allowed on Row.Scan")
}
if !r.rows.Next() {
if err := r.rows.Err(); err != nil {
return err
}
return ErrNoRows
}
err := r.rows.Scan(dest...)
if err != nil {
return err
}
// Make sure the query can be processed to completion with no errors.
return r.rows.Close()
}
// Err provides a way for wrapping packages to check for
// query errors without calling [Row.Scan].
// Err returns the error, if any, that was encountered while running the query.
// If this error is not nil, this error will also be returned from [Row.Scan].
func (r *Row) Err() error {
return r.err
}
// A Result summarizes an executed SQL command.
type Result interface {
// LastInsertId returns the integer generated by the database
// in response to a command. Typically this will be from an
// "auto increment" column when inserting a new row. Not all
// databases support this feature, and the syntax of such
// statements varies.
LastInsertId() (int64, error)
// RowsAffected returns the number of rows affected by an
// update, insert, or delete. Not every database or database
// driver may support this.
RowsAffected() (int64, error)
}
type driverResult struct {
sync.Locker // the *driverConn
resi driver.Result
}
func (dr driverResult) LastInsertId() (int64, error) {
dr.Lock()
defer dr.Unlock()
return dr.resi.LastInsertId()
}
func (dr driverResult) RowsAffected() (int64, error) {
dr.Lock()
defer dr.Unlock()
return dr.resi.RowsAffected()
}
func stack() string {
var buf [2 << 10]byte
return string(buf[:runtime.Stack(buf[:], false)])
}
// withLock runs while holding lk.
func withLock(lk sync.Locker, fn func()) {
lk.Lock()
defer lk.Unlock() // in case fn panics
fn()
}
// connRequestSet is a set of chan connRequest that's
// optimized for:
//
// - adding an element
// - removing an element (only by the caller who added it)
// - taking (get + delete) a random element
//
// We previously used a map for this but the take of a random element
// was expensive, making mapiters. This type avoids a map entirely
// and just uses a slice.
type connRequestSet struct {
// s are the elements in the set.
s []connRequestAndIndex
}
type connRequestAndIndex struct {
// req is the element in the set.
req chan connRequest
// curIdx points to the current location of this element in
// connRequestSet.s. It gets set to -1 upon removal.
curIdx *int
}
// CloseAndRemoveAll closes all channels in the set
// and clears the set.
func (s *connRequestSet) CloseAndRemoveAll() {
for _, v := range s.s {
*v.curIdx = -1
close(v.req)
}
s.s = nil
}
// Len returns the length of the set.
func (s *connRequestSet) Len() int { return len(s.s) }
// connRequestDelHandle is an opaque handle to delete an
// item from calling Add.
type connRequestDelHandle struct {
idx *int // pointer to index; or -1 if not in slice
}
// Add adds v to the set of waiting requests.
// The returned connRequestDelHandle can be used to remove the item from
// the set.
func (s *connRequestSet) Add(v chan connRequest) connRequestDelHandle {
idx := len(s.s)
// TODO(bradfitz): for simplicity, this always allocates a new int-sized
// allocation to store the index. But generally the set will be small and
// under a scannable-threshold. As an optimization, we could permit the *int
// to be nil when the set is small and should be scanned. This works even if
// the set grows over the threshold with delete handles outstanding because
// an element can only move to a lower index. So if it starts with a nil
// position, it'll always be in a low index and thus scannable. But that
// can be done in a follow-up change.
idxPtr := &idx
s.s = append(s.s, connRequestAndIndex{v, idxPtr})
return connRequestDelHandle{idxPtr}
}
// Delete removes an element from the set.
//
// It reports whether the element was deleted. (It can return false if a caller
// of TakeRandom took it meanwhile, or upon the second call to Delete)
func (s *connRequestSet) Delete(h connRequestDelHandle) bool {
idx := *h.idx
if idx < 0 {
return false
}
s.deleteIndex(idx)
return true
}
func (s *connRequestSet) deleteIndex(idx int) {
// Mark item as deleted.
*(s.s[idx].curIdx) = -1
// Copy last element, updating its position
// to its new home.
if idx < len(s.s)-1 {
last := s.s[len(s.s)-1]
*last.curIdx = idx
s.s[idx] = last
}
// Zero out last element (for GC) before shrinking the slice.
s.s[len(s.s)-1] = connRequestAndIndex{}
s.s = s.s[:len(s.s)-1]
}
// TakeRandom returns and removes a random element from s
// and reports whether there was one to take. (It returns ok=false
// if the set is empty.)
func (s *connRequestSet) TakeRandom() (v chan connRequest, ok bool) {
if len(s.s) == 0 {
return nil, false
}
pick := rand.IntN(len(s.s))
e := s.s[pick]
s.deleteIndex(pick)
return e.req, true
}
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package buildinfo provides access to information embedded in a Go binary
// about how it was built. This includes the Go toolchain version, and the
// set of modules used (for binaries built in module mode).
//
// Build information is available for the currently running binary in
// runtime/debug.ReadBuildInfo.
package buildinfo
import (
"bytes"
"debug/elf"
"debug/macho"
"debug/pe"
"debug/plan9obj"
"encoding/binary"
"errors"
"fmt"
"internal/saferio"
"internal/xcoff"
"io"
"io/fs"
"os"
"runtime/debug"
_ "unsafe" // for linkname
)
// Type alias for build info. We cannot move the types here, since
// runtime/debug would need to import this package, which would make it
// a much larger dependency.
type BuildInfo = debug.BuildInfo
// errUnrecognizedFormat is returned when a given executable file doesn't
// appear to be in a known format, or it breaks the rules of that format,
// or when there are I/O errors reading the file.
var errUnrecognizedFormat = errors.New("unrecognized file format")
// errNotGoExe is returned when a given executable file is valid but does
// not contain Go build information.
//
// errNotGoExe should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/quay/claircore
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname errNotGoExe
var errNotGoExe = errors.New("not a Go executable")
// The build info blob left by the linker is identified by a 32-byte header,
// consisting of buildInfoMagic (14 bytes), followed by version-dependent
// fields.
var buildInfoMagic = []byte("\xff Go buildinf:")
const (
buildInfoAlign = 16
buildInfoHeaderSize = 32
)
// ReadFile returns build information embedded in a Go binary
// file at the given path. Most information is only available for binaries built
// with module support.
func ReadFile(name string) (info *BuildInfo, err error) {
defer func() {
if _, ok := errors.AsType[*fs.PathError](err); ok {
err = fmt.Errorf("could not read Go build info: %w", err)
} else if err != nil {
err = fmt.Errorf("could not read Go build info from %s: %w", name, err)
}
}()
f, err := os.Open(name)
if err != nil {
return nil, err
}
defer f.Close()
return Read(f)
}
// Read returns build information embedded in a Go binary file
// accessed through the given ReaderAt. Most information is only available for
// binaries built with module support.
func Read(r io.ReaderAt) (*BuildInfo, error) {
vers, mod, err := readRawBuildInfo(r)
if err != nil {
return nil, err
}
bi, err := debug.ParseBuildInfo(mod)
if err != nil {
return nil, err
}
bi.GoVersion = vers
return bi, nil
}
type exe interface {
// DataStart returns the virtual address and size of the segment or section that
// should contain build information. This is either a specially named section
// or the first writable non-zero data segment.
DataStart() (uint64, uint64)
// DataReader returns an io.ReaderAt that reads from addr until the end
// of segment or section that contains addr.
DataReader(addr uint64) (io.ReaderAt, error)
}
// readRawBuildInfo extracts the Go toolchain version and module information
// strings from a Go binary. On success, vers should be non-empty. mod
// is empty if the binary was not built with modules enabled.
func readRawBuildInfo(r io.ReaderAt) (vers, mod string, err error) {
// Read the first bytes of the file to identify the format, then delegate to
// a format-specific function to load segment and section headers.
ident := make([]byte, 16)
if n, err := r.ReadAt(ident, 0); n < len(ident) || err != nil {
return "", "", errUnrecognizedFormat
}
var x exe
switch {
case bytes.HasPrefix(ident, []byte("\x7FELF")):
f, err := elf.NewFile(r)
if err != nil {
return "", "", errUnrecognizedFormat
}
x = &elfExe{f}
case bytes.HasPrefix(ident, []byte("MZ")):
f, err := pe.NewFile(r)
if err != nil {
return "", "", errUnrecognizedFormat
}
x = &peExe{f}
case bytes.HasPrefix(ident, []byte("\xFE\xED\xFA")) || bytes.HasPrefix(ident[1:], []byte("\xFA\xED\xFE")):
f, err := macho.NewFile(r)
if err != nil {
return "", "", errUnrecognizedFormat
}
x = &machoExe{f}
case bytes.HasPrefix(ident, []byte("\xCA\xFE\xBA\xBE")) || bytes.HasPrefix(ident, []byte("\xCA\xFE\xBA\xBF")):
f, err := macho.NewFatFile(r)
if err != nil || len(f.Arches) == 0 {
return "", "", errUnrecognizedFormat
}
x = &machoExe{f.Arches[0].File}
case bytes.HasPrefix(ident, []byte{0x01, 0xDF}) || bytes.HasPrefix(ident, []byte{0x01, 0xF7}):
f, err := xcoff.NewFile(r)
if err != nil {
return "", "", errUnrecognizedFormat
}
x = &xcoffExe{f}
case hasPlan9Magic(ident):
f, err := plan9obj.NewFile(r)
if err != nil {
return "", "", errUnrecognizedFormat
}
x = &plan9objExe{f}
default:
return "", "", errUnrecognizedFormat
}
// Read segment or section to find the build info blob.
// On some platforms, the blob will be in its own section, and DataStart
// returns the address of that section. On others, it's somewhere in the
// data segment; the linker puts it near the beginning.
// See cmd/link/internal/ld.Link.buildinfo.
dataAddr, dataSize := x.DataStart()
if dataSize == 0 {
return "", "", errNotGoExe
}
addr, err := searchMagic(x, dataAddr, dataSize)
if err != nil {
return "", "", err
}
// Read in the full header first.
header, err := readData(x, addr, buildInfoHeaderSize)
if err == io.EOF {
return "", "", errNotGoExe
} else if err != nil {
return "", "", err
}
if len(header) < buildInfoHeaderSize {
return "", "", errNotGoExe
}
const (
ptrSizeOffset = 14
flagsOffset = 15
versPtrOffset = 16
flagsEndianMask = 0x1
flagsEndianLittle = 0x0
flagsEndianBig = 0x1
flagsVersionMask = 0x2
flagsVersionPtr = 0x0
flagsVersionInl = 0x2
)
// Decode the blob. The blob is a 32-byte header, optionally followed
// by 2 varint-prefixed string contents.
//
// type buildInfoHeader struct {
// magic [14]byte
// ptrSize uint8 // used if flagsVersionPtr
// flags uint8
// versPtr targetUintptr // used if flagsVersionPtr
// modPtr targetUintptr // used if flagsVersionPtr
// }
//
// The version bit of the flags field determines the details of the format.
//
// Prior to 1.18, the flags version bit is flagsVersionPtr. In this
// case, the header includes pointers to the version and modinfo Go
// strings in the header. The ptrSize field indicates the size of the
// pointers and the endian bit of the flag indicates the pointer
// endianness.
//
// Since 1.18, the flags version bit is flagsVersionInl. In this case,
// the header is followed by the string contents inline as
// length-prefixed (as varint) string contents. First is the version
// string, followed immediately by the modinfo string.
flags := header[flagsOffset]
if flags&flagsVersionMask == flagsVersionInl {
vers, addr, err = decodeString(x, addr+buildInfoHeaderSize)
if err != nil {
return "", "", err
}
mod, _, err = decodeString(x, addr)
if err != nil {
return "", "", err
}
} else {
// flagsVersionPtr (<1.18)
ptrSize := int(header[ptrSizeOffset])
bigEndian := flags&flagsEndianMask == flagsEndianBig
var bo binary.ByteOrder
if bigEndian {
bo = binary.BigEndian
} else {
bo = binary.LittleEndian
}
var readPtr func([]byte) uint64
if ptrSize == 4 {
readPtr = func(b []byte) uint64 { return uint64(bo.Uint32(b)) }
} else if ptrSize == 8 {
readPtr = bo.Uint64
} else {
return "", "", errNotGoExe
}
vers = readString(x, ptrSize, readPtr, readPtr(header[versPtrOffset:]))
mod = readString(x, ptrSize, readPtr, readPtr(header[versPtrOffset+ptrSize:]))
}
if vers == "" {
return "", "", errNotGoExe
}
if len(mod) >= 33 && mod[len(mod)-17] == '\n' {
// Strip module framing: sentinel strings delimiting the module info.
// These are cmd/go/internal/modload.infoStart and infoEnd.
mod = mod[16 : len(mod)-16]
} else {
mod = ""
}
return vers, mod, nil
}
func hasPlan9Magic(magic []byte) bool {
if len(magic) >= 4 {
m := binary.BigEndian.Uint32(magic)
switch m {
case plan9obj.Magic386, plan9obj.MagicAMD64, plan9obj.MagicARM:
return true
}
}
return false
}
func decodeString(x exe, addr uint64) (string, uint64, error) {
// varint length followed by length bytes of data.
// N.B. ReadData reads _up to_ size bytes from the section containing
// addr. So we don't need to check that size doesn't overflow the
// section.
b, err := readData(x, addr, binary.MaxVarintLen64)
if err == io.EOF {
return "", 0, errNotGoExe
} else if err != nil {
return "", 0, err
}
length, n := binary.Uvarint(b)
if n <= 0 {
return "", 0, errNotGoExe
}
addr += uint64(n)
b, err = readData(x, addr, length)
if err == io.EOF {
return "", 0, errNotGoExe
} else if err == io.ErrUnexpectedEOF {
// Length too large to allocate. Clearly bogus value.
return "", 0, errNotGoExe
} else if err != nil {
return "", 0, err
}
if uint64(len(b)) < length {
// Section ended before we could read the full string.
return "", 0, errNotGoExe
}
return string(b), addr + length, nil
}
// readString returns the string at address addr in the executable x.
func readString(x exe, ptrSize int, readPtr func([]byte) uint64, addr uint64) string {
hdr, err := readData(x, addr, uint64(2*ptrSize))
if err != nil || len(hdr) < 2*ptrSize {
return ""
}
dataAddr := readPtr(hdr)
dataLen := readPtr(hdr[ptrSize:])
data, err := readData(x, dataAddr, dataLen)
if err != nil || uint64(len(data)) < dataLen {
return ""
}
return string(data)
}
const searchChunkSize = 1 << 20 // 1 MB
// searchMagic returns the aligned first instance of buildInfoMagic in the data
// range [addr, addr+size). Returns false if not found.
func searchMagic(x exe, start, size uint64) (uint64, error) {
end := start + size
if end < start {
// Overflow.
return 0, errUnrecognizedFormat
}
// Round up start; magic can't occur in the initial unaligned portion.
start = (start + buildInfoAlign - 1) &^ (buildInfoAlign - 1)
if start >= end {
return 0, errNotGoExe
}
var buf []byte
for start < end {
// Read in chunks to avoid consuming too much memory if data is large.
//
// Normally it would be somewhat painful to handle the magic crossing a
// chunk boundary, but since it must be 16-byte aligned we know it will
// fall within a single chunk.
remaining := end - start
chunkSize := uint64(searchChunkSize)
if chunkSize > remaining {
chunkSize = remaining
}
if buf == nil {
buf = make([]byte, chunkSize)
} else {
// N.B. chunkSize can only decrease, and only on the
// last chunk.
buf = buf[:chunkSize]
clear(buf)
}
n, err := readDataInto(x, start, buf)
if err == io.EOF {
// EOF before finding the magic; must not be a Go executable.
return 0, errNotGoExe
} else if err != nil {
return 0, err
}
data := buf[:n]
for len(data) > 0 {
i := bytes.Index(data, buildInfoMagic)
if i < 0 {
break
}
if remaining-uint64(i) < buildInfoHeaderSize {
// Found magic, but not enough space left for the full header.
return 0, errNotGoExe
}
if i%buildInfoAlign != 0 {
// Found magic, but misaligned. Keep searching.
next := (i + buildInfoAlign - 1) &^ (buildInfoAlign - 1)
if next > len(data) {
// Corrupt object file: the remaining
// count says there is more data,
// but we didn't read it.
return 0, errNotGoExe
}
data = data[next:]
continue
}
// Good match!
return start + uint64(i), nil
}
start += chunkSize
}
return 0, errNotGoExe
}
func readData(x exe, addr, size uint64) ([]byte, error) {
r, err := x.DataReader(addr)
if err != nil {
return nil, err
}
b, err := saferio.ReadDataAt(r, size, 0)
if len(b) > 0 && err == io.EOF {
err = nil
}
return b, err
}
func readDataInto(x exe, addr uint64, b []byte) (int, error) {
r, err := x.DataReader(addr)
if err != nil {
return 0, err
}
n, err := r.ReadAt(b, 0)
if n > 0 && err == io.EOF {
err = nil
}
return n, err
}
// elfExe is the ELF implementation of the exe interface.
type elfExe struct {
f *elf.File
}
func (x *elfExe) DataReader(addr uint64) (io.ReaderAt, error) {
for _, prog := range x.f.Progs {
if prog.Vaddr <= addr && addr <= prog.Vaddr+prog.Filesz-1 {
remaining := prog.Vaddr + prog.Filesz - addr
return io.NewSectionReader(prog, int64(addr-prog.Vaddr), int64(remaining)), nil
}
}
return nil, errUnrecognizedFormat
}
func (x *elfExe) DataStart() (uint64, uint64) {
for _, s := range x.f.Sections {
if s.Name == ".go.buildinfo" {
return s.Addr, s.Size
}
}
return 0, 0
}
// peExe is the PE (Windows Portable Executable) implementation of the exe interface.
type peExe struct {
f *pe.File
}
func (x *peExe) imageBase() uint64 {
switch oh := x.f.OptionalHeader.(type) {
case *pe.OptionalHeader32:
return uint64(oh.ImageBase)
case *pe.OptionalHeader64:
return oh.ImageBase
}
return 0
}
func (x *peExe) DataReader(addr uint64) (io.ReaderAt, error) {
addr -= x.imageBase()
for _, sect := range x.f.Sections {
if uint64(sect.VirtualAddress) <= addr && addr <= uint64(sect.VirtualAddress+sect.Size-1) {
remaining := uint64(sect.VirtualAddress+sect.Size) - addr
return io.NewSectionReader(sect, int64(addr-uint64(sect.VirtualAddress)), int64(remaining)), nil
}
}
return nil, errUnrecognizedFormat
}
func (x *peExe) DataStart() (uint64, uint64) {
// Assume data is first writable section.
const (
IMAGE_SCN_CNT_CODE = 0x00000020
IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040
IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080
IMAGE_SCN_MEM_EXECUTE = 0x20000000
IMAGE_SCN_MEM_READ = 0x40000000
IMAGE_SCN_MEM_WRITE = 0x80000000
IMAGE_SCN_MEM_DISCARDABLE = 0x2000000
IMAGE_SCN_LNK_NRELOC_OVFL = 0x1000000
IMAGE_SCN_ALIGN_32BYTES = 0x600000
)
for _, sect := range x.f.Sections {
if sect.VirtualAddress != 0 && sect.Size != 0 &&
sect.Characteristics&^IMAGE_SCN_ALIGN_32BYTES == IMAGE_SCN_CNT_INITIALIZED_DATA|IMAGE_SCN_MEM_READ|IMAGE_SCN_MEM_WRITE {
return uint64(sect.VirtualAddress) + x.imageBase(), uint64(sect.VirtualSize)
}
}
return 0, 0
}
// machoExe is the Mach-O (Apple macOS/iOS) implementation of the exe interface.
type machoExe struct {
f *macho.File
}
func (x *machoExe) DataReader(addr uint64) (io.ReaderAt, error) {
for _, load := range x.f.Loads {
seg, ok := load.(*macho.Segment)
if !ok {
continue
}
if seg.Addr <= addr && addr <= seg.Addr+seg.Filesz-1 {
if seg.Name == "__PAGEZERO" {
continue
}
remaining := seg.Addr + seg.Filesz - addr
return io.NewSectionReader(seg, int64(addr-seg.Addr), int64(remaining)), nil
}
}
return nil, errUnrecognizedFormat
}
func (x *machoExe) DataStart() (uint64, uint64) {
// Look for section named "__go_buildinfo".
for _, sec := range x.f.Sections {
if sec.Name == "__go_buildinfo" {
return sec.Addr, sec.Size
}
}
return 0, 0
}
// xcoffExe is the XCOFF (AIX eXtended COFF) implementation of the exe interface.
type xcoffExe struct {
f *xcoff.File
}
func (x *xcoffExe) DataReader(addr uint64) (io.ReaderAt, error) {
for _, sect := range x.f.Sections {
if sect.VirtualAddress <= addr && addr <= sect.VirtualAddress+sect.Size-1 {
remaining := sect.VirtualAddress + sect.Size - addr
return io.NewSectionReader(sect, int64(addr-sect.VirtualAddress), int64(remaining)), nil
}
}
return nil, errors.New("address not mapped")
}
func (x *xcoffExe) DataStart() (uint64, uint64) {
if s := x.f.SectionByType(xcoff.STYP_DATA); s != nil {
return s.VirtualAddress, s.Size
}
return 0, 0
}
// plan9objExe is the Plan 9 a.out implementation of the exe interface.
type plan9objExe struct {
f *plan9obj.File
}
func (x *plan9objExe) DataStart() (uint64, uint64) {
if s := x.f.Section("data"); s != nil {
return uint64(s.Offset), uint64(s.Size)
}
return 0, 0
}
func (x *plan9objExe) DataReader(addr uint64) (io.ReaderAt, error) {
for _, sect := range x.f.Sections {
if uint64(sect.Offset) <= addr && addr <= uint64(sect.Offset+sect.Size-1) {
remaining := uint64(sect.Offset+sect.Size) - addr
return io.NewSectionReader(sect, int64(addr-uint64(sect.Offset)), int64(remaining)), nil
}
}
return nil, errors.New("address not mapped")
}
// Code generated by "stringer -type Attr -trimprefix=Attr"; DO NOT EDIT.
package dwarf
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[AttrSibling-1]
_ = x[AttrLocation-2]
_ = x[AttrName-3]
_ = x[AttrOrdering-9]
_ = x[AttrByteSize-11]
_ = x[AttrBitOffset-12]
_ = x[AttrBitSize-13]
_ = x[AttrStmtList-16]
_ = x[AttrLowpc-17]
_ = x[AttrHighpc-18]
_ = x[AttrLanguage-19]
_ = x[AttrDiscr-21]
_ = x[AttrDiscrValue-22]
_ = x[AttrVisibility-23]
_ = x[AttrImport-24]
_ = x[AttrStringLength-25]
_ = x[AttrCommonRef-26]
_ = x[AttrCompDir-27]
_ = x[AttrConstValue-28]
_ = x[AttrContainingType-29]
_ = x[AttrDefaultValue-30]
_ = x[AttrInline-32]
_ = x[AttrIsOptional-33]
_ = x[AttrLowerBound-34]
_ = x[AttrProducer-37]
_ = x[AttrPrototyped-39]
_ = x[AttrReturnAddr-42]
_ = x[AttrStartScope-44]
_ = x[AttrStrideSize-46]
_ = x[AttrUpperBound-47]
_ = x[AttrAbstractOrigin-49]
_ = x[AttrAccessibility-50]
_ = x[AttrAddrClass-51]
_ = x[AttrArtificial-52]
_ = x[AttrBaseTypes-53]
_ = x[AttrCalling-54]
_ = x[AttrCount-55]
_ = x[AttrDataMemberLoc-56]
_ = x[AttrDeclColumn-57]
_ = x[AttrDeclFile-58]
_ = x[AttrDeclLine-59]
_ = x[AttrDeclaration-60]
_ = x[AttrDiscrList-61]
_ = x[AttrEncoding-62]
_ = x[AttrExternal-63]
_ = x[AttrFrameBase-64]
_ = x[AttrFriend-65]
_ = x[AttrIdentifierCase-66]
_ = x[AttrMacroInfo-67]
_ = x[AttrNamelistItem-68]
_ = x[AttrPriority-69]
_ = x[AttrSegment-70]
_ = x[AttrSpecification-71]
_ = x[AttrStaticLink-72]
_ = x[AttrType-73]
_ = x[AttrUseLocation-74]
_ = x[AttrVarParam-75]
_ = x[AttrVirtuality-76]
_ = x[AttrVtableElemLoc-77]
_ = x[AttrAllocated-78]
_ = x[AttrAssociated-79]
_ = x[AttrDataLocation-80]
_ = x[AttrStride-81]
_ = x[AttrEntrypc-82]
_ = x[AttrUseUTF8-83]
_ = x[AttrExtension-84]
_ = x[AttrRanges-85]
_ = x[AttrTrampoline-86]
_ = x[AttrCallColumn-87]
_ = x[AttrCallFile-88]
_ = x[AttrCallLine-89]
_ = x[AttrDescription-90]
_ = x[AttrBinaryScale-91]
_ = x[AttrDecimalScale-92]
_ = x[AttrSmall-93]
_ = x[AttrDecimalSign-94]
_ = x[AttrDigitCount-95]
_ = x[AttrPictureString-96]
_ = x[AttrMutable-97]
_ = x[AttrThreadsScaled-98]
_ = x[AttrExplicit-99]
_ = x[AttrObjectPointer-100]
_ = x[AttrEndianity-101]
_ = x[AttrElemental-102]
_ = x[AttrPure-103]
_ = x[AttrRecursive-104]
_ = x[AttrSignature-105]
_ = x[AttrMainSubprogram-106]
_ = x[AttrDataBitOffset-107]
_ = x[AttrConstExpr-108]
_ = x[AttrEnumClass-109]
_ = x[AttrLinkageName-110]
_ = x[AttrStringLengthBitSize-111]
_ = x[AttrStringLengthByteSize-112]
_ = x[AttrRank-113]
_ = x[AttrStrOffsetsBase-114]
_ = x[AttrAddrBase-115]
_ = x[AttrRnglistsBase-116]
_ = x[AttrDwoName-118]
_ = x[AttrReference-119]
_ = x[AttrRvalueReference-120]
_ = x[AttrMacros-121]
_ = x[AttrCallAllCalls-122]
_ = x[AttrCallAllSourceCalls-123]
_ = x[AttrCallAllTailCalls-124]
_ = x[AttrCallReturnPC-125]
_ = x[AttrCallValue-126]
_ = x[AttrCallOrigin-127]
_ = x[AttrCallParameter-128]
_ = x[AttrCallPC-129]
_ = x[AttrCallTailCall-130]
_ = x[AttrCallTarget-131]
_ = x[AttrCallTargetClobbered-132]
_ = x[AttrCallDataLocation-133]
_ = x[AttrCallDataValue-134]
_ = x[AttrNoreturn-135]
_ = x[AttrAlignment-136]
_ = x[AttrExportSymbols-137]
_ = x[AttrDeleted-138]
_ = x[AttrDefaulted-139]
_ = x[AttrLoclistsBase-140]
}
const _Attr_name = "SiblingLocationNameOrderingByteSizeBitOffsetBitSizeStmtListLowpcHighpcLanguageDiscrDiscrValueVisibilityImportStringLengthCommonRefCompDirConstValueContainingTypeDefaultValueInlineIsOptionalLowerBoundProducerPrototypedReturnAddrStartScopeStrideSizeUpperBoundAbstractOriginAccessibilityAddrClassArtificialBaseTypesCallingCountDataMemberLocDeclColumnDeclFileDeclLineDeclarationDiscrListEncodingExternalFrameBaseFriendIdentifierCaseMacroInfoNamelistItemPrioritySegmentSpecificationStaticLinkTypeUseLocationVarParamVirtualityVtableElemLocAllocatedAssociatedDataLocationStrideEntrypcUseUTF8ExtensionRangesTrampolineCallColumnCallFileCallLineDescriptionBinaryScaleDecimalScaleSmallDecimalSignDigitCountPictureStringMutableThreadsScaledExplicitObjectPointerEndianityElementalPureRecursiveSignatureMainSubprogramDataBitOffsetConstExprEnumClassLinkageNameStringLengthBitSizeStringLengthByteSizeRankStrOffsetsBaseAddrBaseRnglistsBaseDwoNameReferenceRvalueReferenceMacrosCallAllCallsCallAllSourceCallsCallAllTailCallsCallReturnPCCallValueCallOriginCallParameterCallPCCallTailCallCallTargetCallTargetClobberedCallDataLocationCallDataValueNoreturnAlignmentExportSymbolsDeletedDefaultedLoclistsBase"
var _Attr_map = map[Attr]string{
1: _Attr_name[0:7],
2: _Attr_name[7:15],
3: _Attr_name[15:19],
9: _Attr_name[19:27],
11: _Attr_name[27:35],
12: _Attr_name[35:44],
13: _Attr_name[44:51],
16: _Attr_name[51:59],
17: _Attr_name[59:64],
18: _Attr_name[64:70],
19: _Attr_name[70:78],
21: _Attr_name[78:83],
22: _Attr_name[83:93],
23: _Attr_name[93:103],
24: _Attr_name[103:109],
25: _Attr_name[109:121],
26: _Attr_name[121:130],
27: _Attr_name[130:137],
28: _Attr_name[137:147],
29: _Attr_name[147:161],
30: _Attr_name[161:173],
32: _Attr_name[173:179],
33: _Attr_name[179:189],
34: _Attr_name[189:199],
37: _Attr_name[199:207],
39: _Attr_name[207:217],
42: _Attr_name[217:227],
44: _Attr_name[227:237],
46: _Attr_name[237:247],
47: _Attr_name[247:257],
49: _Attr_name[257:271],
50: _Attr_name[271:284],
51: _Attr_name[284:293],
52: _Attr_name[293:303],
53: _Attr_name[303:312],
54: _Attr_name[312:319],
55: _Attr_name[319:324],
56: _Attr_name[324:337],
57: _Attr_name[337:347],
58: _Attr_name[347:355],
59: _Attr_name[355:363],
60: _Attr_name[363:374],
61: _Attr_name[374:383],
62: _Attr_name[383:391],
63: _Attr_name[391:399],
64: _Attr_name[399:408],
65: _Attr_name[408:414],
66: _Attr_name[414:428],
67: _Attr_name[428:437],
68: _Attr_name[437:449],
69: _Attr_name[449:457],
70: _Attr_name[457:464],
71: _Attr_name[464:477],
72: _Attr_name[477:487],
73: _Attr_name[487:491],
74: _Attr_name[491:502],
75: _Attr_name[502:510],
76: _Attr_name[510:520],
77: _Attr_name[520:533],
78: _Attr_name[533:542],
79: _Attr_name[542:552],
80: _Attr_name[552:564],
81: _Attr_name[564:570],
82: _Attr_name[570:577],
83: _Attr_name[577:584],
84: _Attr_name[584:593],
85: _Attr_name[593:599],
86: _Attr_name[599:609],
87: _Attr_name[609:619],
88: _Attr_name[619:627],
89: _Attr_name[627:635],
90: _Attr_name[635:646],
91: _Attr_name[646:657],
92: _Attr_name[657:669],
93: _Attr_name[669:674],
94: _Attr_name[674:685],
95: _Attr_name[685:695],
96: _Attr_name[695:708],
97: _Attr_name[708:715],
98: _Attr_name[715:728],
99: _Attr_name[728:736],
100: _Attr_name[736:749],
101: _Attr_name[749:758],
102: _Attr_name[758:767],
103: _Attr_name[767:771],
104: _Attr_name[771:780],
105: _Attr_name[780:789],
106: _Attr_name[789:803],
107: _Attr_name[803:816],
108: _Attr_name[816:825],
109: _Attr_name[825:834],
110: _Attr_name[834:845],
111: _Attr_name[845:864],
112: _Attr_name[864:884],
113: _Attr_name[884:888],
114: _Attr_name[888:902],
115: _Attr_name[902:910],
116: _Attr_name[910:922],
118: _Attr_name[922:929],
119: _Attr_name[929:938],
120: _Attr_name[938:953],
121: _Attr_name[953:959],
122: _Attr_name[959:971],
123: _Attr_name[971:989],
124: _Attr_name[989:1005],
125: _Attr_name[1005:1017],
126: _Attr_name[1017:1026],
127: _Attr_name[1026:1036],
128: _Attr_name[1036:1049],
129: _Attr_name[1049:1055],
130: _Attr_name[1055:1067],
131: _Attr_name[1067:1077],
132: _Attr_name[1077:1096],
133: _Attr_name[1096:1112],
134: _Attr_name[1112:1125],
135: _Attr_name[1125:1133],
136: _Attr_name[1133:1142],
137: _Attr_name[1142:1155],
138: _Attr_name[1155:1162],
139: _Attr_name[1162:1171],
140: _Attr_name[1171:1183],
}
func (i Attr) String() string {
if str, ok := _Attr_map[i]; ok {
return str
}
return "Attr(" + strconv.FormatInt(int64(i), 10) + ")"
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Buffered reading and decoding of DWARF data streams.
package dwarf
import (
"bytes"
"encoding/binary"
"strconv"
)
// Data buffer being decoded.
type buf struct {
dwarf *Data
order binary.ByteOrder
format dataFormat
name string
off Offset
data []byte
err error
}
// Data format, other than byte order. This affects the handling of
// certain field formats.
type dataFormat interface {
// DWARF version number. Zero means unknown.
version() int
// 64-bit DWARF format?
dwarf64() (dwarf64 bool, isKnown bool)
// Size of an address, in bytes. Zero means unknown.
addrsize() int
}
// Some parts of DWARF have no data format, e.g., abbrevs.
type unknownFormat struct{}
func (u unknownFormat) version() int {
return 0
}
func (u unknownFormat) dwarf64() (bool, bool) {
return false, false
}
func (u unknownFormat) addrsize() int {
return 0
}
func makeBuf(d *Data, format dataFormat, name string, off Offset, data []byte) buf {
return buf{d, d.order, format, name, off, data, nil}
}
func (b *buf) uint8() uint8 {
if len(b.data) < 1 {
b.error("underflow")
return 0
}
val := b.data[0]
b.data = b.data[1:]
b.off++
return val
}
func (b *buf) bytes(n int) []byte {
if n < 0 || len(b.data) < n {
b.error("underflow")
return nil
}
data := b.data[0:n]
b.data = b.data[n:]
b.off += Offset(n)
return data
}
func (b *buf) skip(n int) { b.bytes(n) }
func (b *buf) string() string {
i := bytes.IndexByte(b.data, 0)
if i < 0 {
b.error("underflow")
return ""
}
s := string(b.data[0:i])
b.data = b.data[i+1:]
b.off += Offset(i + 1)
return s
}
func (b *buf) uint16() uint16 {
a := b.bytes(2)
if a == nil {
return 0
}
return b.order.Uint16(a)
}
func (b *buf) uint24() uint32 {
a := b.bytes(3)
if a == nil {
return 0
}
if b.dwarf.bigEndian {
return uint32(a[2]) | uint32(a[1])<<8 | uint32(a[0])<<16
} else {
return uint32(a[0]) | uint32(a[1])<<8 | uint32(a[2])<<16
}
}
func (b *buf) uint32() uint32 {
a := b.bytes(4)
if a == nil {
return 0
}
return b.order.Uint32(a)
}
func (b *buf) uint64() uint64 {
a := b.bytes(8)
if a == nil {
return 0
}
return b.order.Uint64(a)
}
// Read a varint, which is 7 bits per byte, little endian.
// the 0x80 bit means read another byte.
func (b *buf) varint() (c uint64, bits uint) {
for i := 0; i < len(b.data); i++ {
byte := b.data[i]
c |= uint64(byte&0x7F) << bits
bits += 7
if byte&0x80 == 0 {
b.off += Offset(i + 1)
b.data = b.data[i+1:]
return c, bits
}
}
return 0, 0
}
// Unsigned int is just a varint.
func (b *buf) uint() uint64 {
x, _ := b.varint()
return x
}
// Signed int is a sign-extended varint.
func (b *buf) int() int64 {
ux, bits := b.varint()
x := int64(ux)
if x&(1<<(bits-1)) != 0 {
x |= -1 << bits
}
return x
}
// Address-sized uint.
func (b *buf) addr() uint64 {
switch b.format.addrsize() {
case 1:
return uint64(b.uint8())
case 2:
return uint64(b.uint16())
case 4:
return uint64(b.uint32())
case 8:
return b.uint64()
}
b.error("unknown address size")
return 0
}
func (b *buf) unitLength() (length Offset, dwarf64 bool) {
length = Offset(b.uint32())
if length == 0xffffffff {
dwarf64 = true
length = Offset(b.uint64())
} else if length >= 0xfffffff0 {
b.error("unit length has reserved value")
}
return
}
func (b *buf) error(s string) {
if b.err == nil {
b.data = nil
b.err = DecodeError{b.name, b.off, s}
}
}
type DecodeError struct {
Name string
Offset Offset
Err string
}
func (e DecodeError) Error() string {
return "decoding dwarf section " + e.Name + " at offset 0x" + strconv.FormatInt(int64(e.Offset), 16) + ": " + e.Err
}
// Code generated by "stringer -type=Class"; DO NOT EDIT.
package dwarf
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ClassUnknown-0]
_ = x[ClassAddress-1]
_ = x[ClassBlock-2]
_ = x[ClassConstant-3]
_ = x[ClassExprLoc-4]
_ = x[ClassFlag-5]
_ = x[ClassLinePtr-6]
_ = x[ClassLocListPtr-7]
_ = x[ClassMacPtr-8]
_ = x[ClassRangeListPtr-9]
_ = x[ClassReference-10]
_ = x[ClassReferenceSig-11]
_ = x[ClassString-12]
_ = x[ClassReferenceAlt-13]
_ = x[ClassStringAlt-14]
_ = x[ClassAddrPtr-15]
_ = x[ClassLocList-16]
_ = x[ClassRngList-17]
_ = x[ClassRngListsPtr-18]
_ = x[ClassStrOffsetsPtr-19]
}
const _Class_name = "ClassUnknownClassAddressClassBlockClassConstantClassExprLocClassFlagClassLinePtrClassLocListPtrClassMacPtrClassRangeListPtrClassReferenceClassReferenceSigClassStringClassReferenceAltClassStringAltClassAddrPtrClassLocListClassRngListClassRngListsPtrClassStrOffsetsPtr"
var _Class_index = [...]uint16{0, 12, 24, 34, 47, 59, 68, 80, 95, 106, 123, 137, 154, 165, 182, 196, 208, 220, 232, 248, 266}
func (i Class) String() string {
if i < 0 || i >= Class(len(_Class_index)-1) {
return "Class(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Class_name[_Class_index[i]:_Class_index[i+1]]
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Constants
package dwarf
//go:generate stringer -type Attr -trimprefix=Attr
// An Attr identifies the attribute type in a DWARF [Entry.Field].
type Attr uint32
const (
AttrSibling Attr = 0x01
AttrLocation Attr = 0x02
AttrName Attr = 0x03
AttrOrdering Attr = 0x09
AttrByteSize Attr = 0x0B
AttrBitOffset Attr = 0x0C
AttrBitSize Attr = 0x0D
AttrStmtList Attr = 0x10
AttrLowpc Attr = 0x11
AttrHighpc Attr = 0x12
AttrLanguage Attr = 0x13
AttrDiscr Attr = 0x15
AttrDiscrValue Attr = 0x16
AttrVisibility Attr = 0x17
AttrImport Attr = 0x18
AttrStringLength Attr = 0x19
AttrCommonRef Attr = 0x1A
AttrCompDir Attr = 0x1B
AttrConstValue Attr = 0x1C
AttrContainingType Attr = 0x1D
AttrDefaultValue Attr = 0x1E
AttrInline Attr = 0x20
AttrIsOptional Attr = 0x21
AttrLowerBound Attr = 0x22
AttrProducer Attr = 0x25
AttrPrototyped Attr = 0x27
AttrReturnAddr Attr = 0x2A
AttrStartScope Attr = 0x2C
AttrStrideSize Attr = 0x2E
AttrUpperBound Attr = 0x2F
AttrAbstractOrigin Attr = 0x31
AttrAccessibility Attr = 0x32
AttrAddrClass Attr = 0x33
AttrArtificial Attr = 0x34
AttrBaseTypes Attr = 0x35
AttrCalling Attr = 0x36
AttrCount Attr = 0x37
AttrDataMemberLoc Attr = 0x38
AttrDeclColumn Attr = 0x39
AttrDeclFile Attr = 0x3A
AttrDeclLine Attr = 0x3B
AttrDeclaration Attr = 0x3C
AttrDiscrList Attr = 0x3D
AttrEncoding Attr = 0x3E
AttrExternal Attr = 0x3F
AttrFrameBase Attr = 0x40
AttrFriend Attr = 0x41
AttrIdentifierCase Attr = 0x42
AttrMacroInfo Attr = 0x43
AttrNamelistItem Attr = 0x44
AttrPriority Attr = 0x45
AttrSegment Attr = 0x46
AttrSpecification Attr = 0x47
AttrStaticLink Attr = 0x48
AttrType Attr = 0x49
AttrUseLocation Attr = 0x4A
AttrVarParam Attr = 0x4B
AttrVirtuality Attr = 0x4C
AttrVtableElemLoc Attr = 0x4D
// The following are new in DWARF 3.
AttrAllocated Attr = 0x4E
AttrAssociated Attr = 0x4F
AttrDataLocation Attr = 0x50
AttrStride Attr = 0x51
AttrEntrypc Attr = 0x52
AttrUseUTF8 Attr = 0x53
AttrExtension Attr = 0x54
AttrRanges Attr = 0x55
AttrTrampoline Attr = 0x56
AttrCallColumn Attr = 0x57
AttrCallFile Attr = 0x58
AttrCallLine Attr = 0x59
AttrDescription Attr = 0x5A
AttrBinaryScale Attr = 0x5B
AttrDecimalScale Attr = 0x5C
AttrSmall Attr = 0x5D
AttrDecimalSign Attr = 0x5E
AttrDigitCount Attr = 0x5F
AttrPictureString Attr = 0x60
AttrMutable Attr = 0x61
AttrThreadsScaled Attr = 0x62
AttrExplicit Attr = 0x63
AttrObjectPointer Attr = 0x64
AttrEndianity Attr = 0x65
AttrElemental Attr = 0x66
AttrPure Attr = 0x67
AttrRecursive Attr = 0x68
// The following are new in DWARF 4.
AttrSignature Attr = 0x69
AttrMainSubprogram Attr = 0x6A
AttrDataBitOffset Attr = 0x6B
AttrConstExpr Attr = 0x6C
AttrEnumClass Attr = 0x6D
AttrLinkageName Attr = 0x6E
// The following are new in DWARF 5.
AttrStringLengthBitSize Attr = 0x6F
AttrStringLengthByteSize Attr = 0x70
AttrRank Attr = 0x71
AttrStrOffsetsBase Attr = 0x72
AttrAddrBase Attr = 0x73
AttrRnglistsBase Attr = 0x74
AttrDwoName Attr = 0x76
AttrReference Attr = 0x77
AttrRvalueReference Attr = 0x78
AttrMacros Attr = 0x79
AttrCallAllCalls Attr = 0x7A
AttrCallAllSourceCalls Attr = 0x7B
AttrCallAllTailCalls Attr = 0x7C
AttrCallReturnPC Attr = 0x7D
AttrCallValue Attr = 0x7E
AttrCallOrigin Attr = 0x7F
AttrCallParameter Attr = 0x80
AttrCallPC Attr = 0x81
AttrCallTailCall Attr = 0x82
AttrCallTarget Attr = 0x83
AttrCallTargetClobbered Attr = 0x84
AttrCallDataLocation Attr = 0x85
AttrCallDataValue Attr = 0x86
AttrNoreturn Attr = 0x87
AttrAlignment Attr = 0x88
AttrExportSymbols Attr = 0x89
AttrDeleted Attr = 0x8A
AttrDefaulted Attr = 0x8B
AttrLoclistsBase Attr = 0x8C
)
func (a Attr) GoString() string {
if str, ok := _Attr_map[a]; ok {
return "dwarf.Attr" + str
}
return "dwarf." + a.String()
}
// A format is a DWARF data encoding format.
type format uint32
const (
// value formats
formAddr format = 0x01
formDwarfBlock2 format = 0x03
formDwarfBlock4 format = 0x04
formData2 format = 0x05
formData4 format = 0x06
formData8 format = 0x07
formString format = 0x08
formDwarfBlock format = 0x09
formDwarfBlock1 format = 0x0A
formData1 format = 0x0B
formFlag format = 0x0C
formSdata format = 0x0D
formStrp format = 0x0E
formUdata format = 0x0F
formRefAddr format = 0x10
formRef1 format = 0x11
formRef2 format = 0x12
formRef4 format = 0x13
formRef8 format = 0x14
formRefUdata format = 0x15
formIndirect format = 0x16
// The following are new in DWARF 4.
formSecOffset format = 0x17
formExprloc format = 0x18
formFlagPresent format = 0x19
formRefSig8 format = 0x20
// The following are new in DWARF 5.
formStrx format = 0x1A
formAddrx format = 0x1B
formRefSup4 format = 0x1C
formStrpSup format = 0x1D
formData16 format = 0x1E
formLineStrp format = 0x1F
formImplicitConst format = 0x21
formLoclistx format = 0x22
formRnglistx format = 0x23
formRefSup8 format = 0x24
formStrx1 format = 0x25
formStrx2 format = 0x26
formStrx3 format = 0x27
formStrx4 format = 0x28
formAddrx1 format = 0x29
formAddrx2 format = 0x2A
formAddrx3 format = 0x2B
formAddrx4 format = 0x2C
// Extensions for multi-file compression (.dwz)
// http://www.dwarfstd.org/ShowIssue.php?issue=120604.1
formGnuRefAlt format = 0x1f20
formGnuStrpAlt format = 0x1f21
)
//go:generate stringer -type Tag -trimprefix=Tag
// A Tag is the classification (the type) of an [Entry].
type Tag uint32
const (
TagArrayType Tag = 0x01
TagClassType Tag = 0x02
TagEntryPoint Tag = 0x03
TagEnumerationType Tag = 0x04
TagFormalParameter Tag = 0x05
TagImportedDeclaration Tag = 0x08
TagLabel Tag = 0x0A
TagLexDwarfBlock Tag = 0x0B
TagMember Tag = 0x0D
TagPointerType Tag = 0x0F
TagReferenceType Tag = 0x10
TagCompileUnit Tag = 0x11
TagStringType Tag = 0x12
TagStructType Tag = 0x13
TagSubroutineType Tag = 0x15
TagTypedef Tag = 0x16
TagUnionType Tag = 0x17
TagUnspecifiedParameters Tag = 0x18
TagVariant Tag = 0x19
TagCommonDwarfBlock Tag = 0x1A
TagCommonInclusion Tag = 0x1B
TagInheritance Tag = 0x1C
TagInlinedSubroutine Tag = 0x1D
TagModule Tag = 0x1E
TagPtrToMemberType Tag = 0x1F
TagSetType Tag = 0x20
TagSubrangeType Tag = 0x21
TagWithStmt Tag = 0x22
TagAccessDeclaration Tag = 0x23
TagBaseType Tag = 0x24
TagCatchDwarfBlock Tag = 0x25
TagConstType Tag = 0x26
TagConstant Tag = 0x27
TagEnumerator Tag = 0x28
TagFileType Tag = 0x29
TagFriend Tag = 0x2A
TagNamelist Tag = 0x2B
TagNamelistItem Tag = 0x2C
TagPackedType Tag = 0x2D
TagSubprogram Tag = 0x2E
TagTemplateTypeParameter Tag = 0x2F
TagTemplateValueParameter Tag = 0x30
TagThrownType Tag = 0x31
TagTryDwarfBlock Tag = 0x32
TagVariantPart Tag = 0x33
TagVariable Tag = 0x34
TagVolatileType Tag = 0x35
// The following are new in DWARF 3.
TagDwarfProcedure Tag = 0x36
TagRestrictType Tag = 0x37
TagInterfaceType Tag = 0x38
TagNamespace Tag = 0x39
TagImportedModule Tag = 0x3A
TagUnspecifiedType Tag = 0x3B
TagPartialUnit Tag = 0x3C
TagImportedUnit Tag = 0x3D
TagMutableType Tag = 0x3E // Later removed from DWARF.
TagCondition Tag = 0x3F
TagSharedType Tag = 0x40
// The following are new in DWARF 4.
TagTypeUnit Tag = 0x41
TagRvalueReferenceType Tag = 0x42
TagTemplateAlias Tag = 0x43
// The following are new in DWARF 5.
TagCoarrayType Tag = 0x44
TagGenericSubrange Tag = 0x45
TagDynamicType Tag = 0x46
TagAtomicType Tag = 0x47
TagCallSite Tag = 0x48
TagCallSiteParameter Tag = 0x49
TagSkeletonUnit Tag = 0x4A
TagImmutableType Tag = 0x4B
)
func (t Tag) GoString() string {
if t <= TagTemplateAlias {
return "dwarf.Tag" + t.String()
}
return "dwarf." + t.String()
}
// Location expression operators.
// The debug info encodes value locations like 8(R3)
// as a sequence of these op codes.
// This package does not implement full expressions;
// the opPlusUconst operator is expected by the type parser.
const (
opAddr = 0x03 /* 1 op, const addr */
opDeref = 0x06
opConst1u = 0x08 /* 1 op, 1 byte const */
opConst1s = 0x09 /* " signed */
opConst2u = 0x0A /* 1 op, 2 byte const */
opConst2s = 0x0B /* " signed */
opConst4u = 0x0C /* 1 op, 4 byte const */
opConst4s = 0x0D /* " signed */
opConst8u = 0x0E /* 1 op, 8 byte const */
opConst8s = 0x0F /* " signed */
opConstu = 0x10 /* 1 op, LEB128 const */
opConsts = 0x11 /* " signed */
opDup = 0x12
opDrop = 0x13
opOver = 0x14
opPick = 0x15 /* 1 op, 1 byte stack index */
opSwap = 0x16
opRot = 0x17
opXderef = 0x18
opAbs = 0x19
opAnd = 0x1A
opDiv = 0x1B
opMinus = 0x1C
opMod = 0x1D
opMul = 0x1E
opNeg = 0x1F
opNot = 0x20
opOr = 0x21
opPlus = 0x22
opPlusUconst = 0x23 /* 1 op, ULEB128 addend */
opShl = 0x24
opShr = 0x25
opShra = 0x26
opXor = 0x27
opSkip = 0x2F /* 1 op, signed 2-byte constant */
opBra = 0x28 /* 1 op, signed 2-byte constant */
opEq = 0x29
opGe = 0x2A
opGt = 0x2B
opLe = 0x2C
opLt = 0x2D
opNe = 0x2E
opLit0 = 0x30
/* OpLitN = OpLit0 + N for N = 0..31 */
opReg0 = 0x50
/* OpRegN = OpReg0 + N for N = 0..31 */
opBreg0 = 0x70 /* 1 op, signed LEB128 constant */
/* OpBregN = OpBreg0 + N for N = 0..31 */
opRegx = 0x90 /* 1 op, ULEB128 register */
opFbreg = 0x91 /* 1 op, SLEB128 offset */
opBregx = 0x92 /* 2 op, ULEB128 reg; SLEB128 off */
opPiece = 0x93 /* 1 op, ULEB128 size of piece */
opDerefSize = 0x94 /* 1-byte size of data retrieved */
opXderefSize = 0x95 /* 1-byte size of data retrieved */
opNop = 0x96
// The following are new in DWARF 3.
opPushObjAddr = 0x97
opCall2 = 0x98 /* 2-byte offset of DIE */
opCall4 = 0x99 /* 4-byte offset of DIE */
opCallRef = 0x9A /* 4- or 8- byte offset of DIE */
opFormTLSAddress = 0x9B
opCallFrameCFA = 0x9C
opBitPiece = 0x9D
// The following are new in DWARF 4.
opImplicitValue = 0x9E
opStackValue = 0x9F
// The following a new in DWARF 5.
opImplicitPointer = 0xA0
opAddrx = 0xA1
opConstx = 0xA2
opEntryValue = 0xA3
opConstType = 0xA4
opRegvalType = 0xA5
opDerefType = 0xA6
opXderefType = 0xA7
opConvert = 0xA8
opReinterpret = 0xA9
/* 0xE0-0xFF reserved for user-specific */
)
// Basic type encodings -- the value for AttrEncoding in a TagBaseType Entry.
const (
encAddress = 0x01
encBoolean = 0x02
encComplexFloat = 0x03
encFloat = 0x04
encSigned = 0x05
encSignedChar = 0x06
encUnsigned = 0x07
encUnsignedChar = 0x08
// The following are new in DWARF 3.
encImaginaryFloat = 0x09
encPackedDecimal = 0x0A
encNumericString = 0x0B
encEdited = 0x0C
encSignedFixed = 0x0D
encUnsignedFixed = 0x0E
encDecimalFloat = 0x0F
// The following are new in DWARF 4.
encUTF = 0x10
// The following are new in DWARF 5.
encUCS = 0x11
encASCII = 0x12
)
// Statement program standard opcode encodings.
const (
lnsCopy = 1
lnsAdvancePC = 2
lnsAdvanceLine = 3
lnsSetFile = 4
lnsSetColumn = 5
lnsNegateStmt = 6
lnsSetBasicBlock = 7
lnsConstAddPC = 8
lnsFixedAdvancePC = 9
// DWARF 3
lnsSetPrologueEnd = 10
lnsSetEpilogueBegin = 11
lnsSetISA = 12
)
// Statement program extended opcode encodings.
const (
lneEndSequence = 1
lneSetAddress = 2
lneDefineFile = 3
// DWARF 4
lneSetDiscriminator = 4
)
// Line table directory and file name entry formats.
// These are new in DWARF 5.
const (
lnctPath = 0x01
lnctDirectoryIndex = 0x02
lnctTimestamp = 0x03
lnctSize = 0x04
lnctMD5 = 0x05
)
// Location list entry codes.
// These are new in DWARF 5.
const (
lleEndOfList = 0x00
lleBaseAddressx = 0x01
lleStartxEndx = 0x02
lleStartxLength = 0x03
lleOffsetPair = 0x04
lleDefaultLocation = 0x05
lleBaseAddress = 0x06
lleStartEnd = 0x07
lleStartLength = 0x08
)
// Unit header unit type encodings.
// These are new in DWARF 5.
const (
utCompile = 0x01
utType = 0x02
utPartial = 0x03
utSkeleton = 0x04
utSplitCompile = 0x05
utSplitType = 0x06
)
// Opcodes for DWARFv5 debug_rnglists section.
const (
rleEndOfList = 0x0
rleBaseAddressx = 0x1
rleStartxEndx = 0x2
rleStartxLength = 0x3
rleOffsetPair = 0x4
rleBaseAddress = 0x5
rleStartEnd = 0x6
rleStartLength = 0x7
)
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// DWARF debug information entry parser.
// An entry is a sequence of data items of a given format.
// The first word in the entry is an index into what DWARF
// calls the ``abbreviation table.'' An abbreviation is really
// just a type descriptor: it's an array of attribute tag/value format pairs.
package dwarf
import (
"encoding/binary"
"errors"
"fmt"
"strconv"
)
// a single entry's description: a sequence of attributes
type abbrev struct {
tag Tag
children bool
field []afield
}
type afield struct {
attr Attr
fmt format
class Class
val int64 // for formImplicitConst
}
// a map from entry format ids to their descriptions
type abbrevTable map[uint32]abbrev
// parseAbbrev returns the abbreviation table that starts at byte off
// in the .debug_abbrev section.
func (d *Data) parseAbbrev(off uint64, vers int) (abbrevTable, error) {
if m, ok := d.abbrevCache[off]; ok {
return m, nil
}
data := d.abbrev
if off > uint64(len(data)) {
data = nil
} else {
data = data[off:]
}
b := makeBuf(d, unknownFormat{}, "abbrev", 0, data)
// Error handling is simplified by the buf getters
// returning an endless stream of 0s after an error.
m := make(abbrevTable)
for {
// Table ends with id == 0.
id := uint32(b.uint())
if id == 0 {
break
}
// Walk over attributes, counting.
n := 0
b1 := b // Read from copy of b.
b1.uint()
b1.uint8()
for {
tag := b1.uint()
fmt := b1.uint()
if tag == 0 && fmt == 0 {
break
}
if format(fmt) == formImplicitConst {
b1.int()
}
n++
}
if b1.err != nil {
return nil, b1.err
}
// Walk over attributes again, this time writing them down.
var a abbrev
a.tag = Tag(b.uint())
a.children = b.uint8() != 0
a.field = make([]afield, n)
for i := range a.field {
a.field[i].attr = Attr(b.uint())
a.field[i].fmt = format(b.uint())
a.field[i].class = formToClass(a.field[i].fmt, a.field[i].attr, vers, &b)
if a.field[i].fmt == formImplicitConst {
a.field[i].val = b.int()
}
}
b.uint()
b.uint()
m[id] = a
}
if b.err != nil {
return nil, b.err
}
d.abbrevCache[off] = m
return m, nil
}
// attrIsExprloc indicates attributes that allow exprloc values that
// are encoded as block values in DWARF 2 and 3. See DWARF 4, Figure
// 20.
var attrIsExprloc = map[Attr]bool{
AttrLocation: true,
AttrByteSize: true,
AttrBitOffset: true,
AttrBitSize: true,
AttrStringLength: true,
AttrLowerBound: true,
AttrReturnAddr: true,
AttrStrideSize: true,
AttrUpperBound: true,
AttrCount: true,
AttrDataMemberLoc: true,
AttrFrameBase: true,
AttrSegment: true,
AttrStaticLink: true,
AttrUseLocation: true,
AttrVtableElemLoc: true,
AttrAllocated: true,
AttrAssociated: true,
AttrDataLocation: true,
AttrStride: true,
}
// attrPtrClass indicates the *ptr class of attributes that have
// encoding formSecOffset in DWARF 4 or formData* in DWARF 2 and 3.
var attrPtrClass = map[Attr]Class{
AttrLocation: ClassLocListPtr,
AttrStmtList: ClassLinePtr,
AttrStringLength: ClassLocListPtr,
AttrReturnAddr: ClassLocListPtr,
AttrStartScope: ClassRangeListPtr,
AttrDataMemberLoc: ClassLocListPtr,
AttrFrameBase: ClassLocListPtr,
AttrMacroInfo: ClassMacPtr,
AttrSegment: ClassLocListPtr,
AttrStaticLink: ClassLocListPtr,
AttrUseLocation: ClassLocListPtr,
AttrVtableElemLoc: ClassLocListPtr,
AttrRanges: ClassRangeListPtr,
// The following are new in DWARF 5.
AttrStrOffsetsBase: ClassStrOffsetsPtr,
AttrAddrBase: ClassAddrPtr,
AttrRnglistsBase: ClassRngListsPtr,
AttrLoclistsBase: ClassLocListPtr,
}
// formToClass returns the DWARF 4 Class for the given form. If the
// DWARF version is less then 4, it will disambiguate some forms
// depending on the attribute.
func formToClass(form format, attr Attr, vers int, b *buf) Class {
switch form {
default:
b.error("cannot determine class of unknown attribute form")
return 0
case formIndirect:
return ClassUnknown
case formAddr, formAddrx, formAddrx1, formAddrx2, formAddrx3, formAddrx4:
return ClassAddress
case formDwarfBlock1, formDwarfBlock2, formDwarfBlock4, formDwarfBlock:
// In DWARF 2 and 3, ClassExprLoc was encoded as a
// block. DWARF 4 distinguishes ClassBlock and
// ClassExprLoc, but there are no attributes that can
// be both, so we also promote ClassBlock values in
// DWARF 4 that should be ClassExprLoc in case
// producers get this wrong.
if attrIsExprloc[attr] {
return ClassExprLoc
}
return ClassBlock
case formData1, formData2, formData4, formData8, formSdata, formUdata, formData16, formImplicitConst:
// In DWARF 2 and 3, ClassPtr was encoded as a
// constant. Unlike ClassExprLoc/ClassBlock, some
// DWARF 4 attributes need to distinguish Class*Ptr
// from ClassConstant, so we only do this promotion
// for versions 2 and 3.
if class, ok := attrPtrClass[attr]; vers < 4 && ok {
return class
}
return ClassConstant
case formFlag, formFlagPresent:
return ClassFlag
case formRefAddr, formRef1, formRef2, formRef4, formRef8, formRefUdata, formRefSup4, formRefSup8:
return ClassReference
case formRefSig8:
return ClassReferenceSig
case formString, formStrp, formStrx, formStrpSup, formLineStrp, formStrx1, formStrx2, formStrx3, formStrx4:
return ClassString
case formSecOffset:
// DWARF 4 defines four *ptr classes, but doesn't
// distinguish them in the encoding. Disambiguate
// these classes using the attribute.
if class, ok := attrPtrClass[attr]; ok {
return class
}
return ClassUnknown
case formExprloc:
return ClassExprLoc
case formGnuRefAlt:
return ClassReferenceAlt
case formGnuStrpAlt:
return ClassStringAlt
case formLoclistx:
return ClassLocList
case formRnglistx:
return ClassRngList
}
}
// An Entry is a sequence of attribute/value pairs.
type Entry struct {
Offset Offset // offset of Entry in DWARF info
Tag Tag // tag (kind of Entry)
Children bool // whether Entry is followed by children
Field []Field
}
// A Field is a single attribute/value pair in an [Entry].
//
// A value can be one of several "attribute classes" defined by DWARF.
// The Go types corresponding to each class are:
//
// DWARF class Go type Class
// ----------- ------- -----
// address uint64 ClassAddress
// block []byte ClassBlock
// constant int64 ClassConstant
// flag bool ClassFlag
// reference
// to info dwarf.Offset ClassReference
// to type unit uint64 ClassReferenceSig
// string string ClassString
// exprloc []byte ClassExprLoc
// lineptr int64 ClassLinePtr
// loclistptr int64 ClassLocListPtr
// macptr int64 ClassMacPtr
// rangelistptr int64 ClassRangeListPtr
//
// For unrecognized or vendor-defined attributes, [Class] may be
// [ClassUnknown].
type Field struct {
Attr Attr
Val any
Class Class
}
// A Class is the DWARF 4 class of an attribute value.
//
// In general, a given attribute's value may take on one of several
// possible classes defined by DWARF, each of which leads to a
// slightly different interpretation of the attribute.
//
// DWARF version 4 distinguishes attribute value classes more finely
// than previous versions of DWARF. The reader will disambiguate
// coarser classes from earlier versions of DWARF into the appropriate
// DWARF 4 class. For example, DWARF 2 uses "constant" for constants
// as well as all types of section offsets, but the reader will
// canonicalize attributes in DWARF 2 files that refer to section
// offsets to one of the Class*Ptr classes, even though these classes
// were only defined in DWARF 3.
type Class int
const (
// ClassUnknown represents values of unknown DWARF class.
ClassUnknown Class = iota
// ClassAddress represents values of type uint64 that are
// addresses on the target machine.
ClassAddress
// ClassBlock represents values of type []byte whose
// interpretation depends on the attribute.
ClassBlock
// ClassConstant represents values of type int64 that are
// constants. The interpretation of this constant depends on
// the attribute.
ClassConstant
// ClassExprLoc represents values of type []byte that contain
// an encoded DWARF expression or location description.
ClassExprLoc
// ClassFlag represents values of type bool.
ClassFlag
// ClassLinePtr represents values that are an int64 offset
// into the "line" section.
ClassLinePtr
// ClassLocListPtr represents values that are an int64 offset
// into the "loclist" section.
ClassLocListPtr
// ClassMacPtr represents values that are an int64 offset into
// the "mac" section.
ClassMacPtr
// ClassRangeListPtr represents values that are an int64 offset into
// the "rangelist" section.
ClassRangeListPtr
// ClassReference represents values that are an Offset offset
// of an Entry in the info section (for use with Reader.Seek).
// The DWARF specification combines ClassReference and
// ClassReferenceSig into class "reference".
ClassReference
// ClassReferenceSig represents values that are a uint64 type
// signature referencing a type Entry.
ClassReferenceSig
// ClassString represents values that are strings. If the
// compilation unit specifies the AttrUseUTF8 flag (strongly
// recommended), the string value will be encoded in UTF-8.
// Otherwise, the encoding is unspecified.
ClassString
// ClassReferenceAlt represents values of type int64 that are
// an offset into the DWARF "info" section of an alternate
// object file.
ClassReferenceAlt
// ClassStringAlt represents values of type int64 that are an
// offset into the DWARF string section of an alternate object
// file.
ClassStringAlt
// ClassAddrPtr represents values that are an int64 offset
// into the "addr" section.
ClassAddrPtr
// ClassLocList represents values that are an int64 offset
// into the "loclists" section.
ClassLocList
// ClassRngList represents values that are a uint64 offset
// from the base of the "rnglists" section.
ClassRngList
// ClassRngListsPtr represents values that are an int64 offset
// into the "rnglists" section. These are used as the base for
// ClassRngList values.
ClassRngListsPtr
// ClassStrOffsetsPtr represents values that are an int64
// offset into the "str_offsets" section.
ClassStrOffsetsPtr
)
//go:generate stringer -type=Class
func (i Class) GoString() string {
return "dwarf." + i.String()
}
// Val returns the value associated with attribute [Attr] in [Entry],
// or nil if there is no such attribute.
//
// A common idiom is to merge the check for nil return with
// the check that the value has the expected dynamic type, as in:
//
// v, ok := e.Val(AttrSibling).(int64)
func (e *Entry) Val(a Attr) any {
if f := e.AttrField(a); f != nil {
return f.Val
}
return nil
}
// AttrField returns the [Field] associated with attribute [Attr] in
// [Entry], or nil if there is no such attribute.
func (e *Entry) AttrField(a Attr) *Field {
for i, f := range e.Field {
if f.Attr == a {
return &e.Field[i]
}
}
return nil
}
// An Offset represents the location of an [Entry] within the DWARF info.
// (See [Reader.Seek].)
type Offset uint32
// Entry reads a single entry from buf, decoding
// according to the given abbreviation table.
func (b *buf) entry(cu *Entry, u *unit) *Entry {
atab, ubase, vers := u.atable, u.base, u.vers
off := b.off
id := uint32(b.uint())
if id == 0 {
return &Entry{}
}
a, ok := atab[id]
if !ok {
b.error("unknown abbreviation table index")
return nil
}
e := &Entry{
Offset: off,
Tag: a.tag,
Children: a.children,
Field: make([]Field, len(a.field)),
}
resolveStrx := func(strBase, off uint64) string {
off += strBase
if uint64(int(off)) != off {
b.error("DW_FORM_strx offset out of range")
}
b1 := makeBuf(b.dwarf, b.format, "str_offsets", 0, b.dwarf.strOffsets)
b1.skip(int(off))
is64, _ := b.format.dwarf64()
if is64 {
off = b1.uint64()
} else {
off = uint64(b1.uint32())
}
if b1.err != nil {
b.err = b1.err
return ""
}
if uint64(int(off)) != off {
b.error("DW_FORM_strx indirect offset out of range")
}
b1 = makeBuf(b.dwarf, b.format, "str", 0, b.dwarf.str)
b1.skip(int(off))
val := b1.string()
if b1.err != nil {
b.err = b1.err
}
return val
}
resolveRnglistx := func(rnglistsBase, off uint64) uint64 {
is64, _ := b.format.dwarf64()
if is64 {
off *= 8
} else {
off *= 4
}
off += rnglistsBase
if uint64(int(off)) != off {
b.error("DW_FORM_rnglistx offset out of range")
}
b1 := makeBuf(b.dwarf, b.format, "rnglists", 0, b.dwarf.rngLists)
b1.skip(int(off))
if is64 {
off = b1.uint64()
} else {
off = uint64(b1.uint32())
}
if b1.err != nil {
b.err = b1.err
return 0
}
if uint64(int(off)) != off {
b.error("DW_FORM_rnglistx indirect offset out of range")
}
return rnglistsBase + off
}
for i := range e.Field {
e.Field[i].Attr = a.field[i].attr
e.Field[i].Class = a.field[i].class
fmt := a.field[i].fmt
if fmt == formIndirect {
fmt = format(b.uint())
e.Field[i].Class = formToClass(fmt, a.field[i].attr, vers, b)
}
var val any
switch fmt {
default:
b.error("unknown entry attr format 0x" + strconv.FormatInt(int64(fmt), 16))
// address
case formAddr:
val = b.addr()
case formAddrx, formAddrx1, formAddrx2, formAddrx3, formAddrx4:
var off uint64
switch fmt {
case formAddrx:
off = b.uint()
case formAddrx1:
off = uint64(b.uint8())
case formAddrx2:
off = uint64(b.uint16())
case formAddrx3:
off = uint64(b.uint24())
case formAddrx4:
off = uint64(b.uint32())
}
if b.dwarf.addr == nil {
b.error("DW_FORM_addrx with no .debug_addr section")
}
if b.err != nil {
return nil
}
addrBase := int64(u.addrBase())
var err error
val, err = b.dwarf.debugAddr(b.format, uint64(addrBase), off)
if err != nil {
if b.err == nil {
b.err = err
}
return nil
}
// block
case formDwarfBlock1:
val = b.bytes(int(b.uint8()))
case formDwarfBlock2:
val = b.bytes(int(b.uint16()))
case formDwarfBlock4:
val = b.bytes(int(b.uint32()))
case formDwarfBlock:
val = b.bytes(int(b.uint()))
// constant
case formData1:
val = int64(b.uint8())
case formData2:
val = int64(b.uint16())
case formData4:
val = int64(b.uint32())
case formData8:
val = int64(b.uint64())
case formData16:
val = b.bytes(16)
case formSdata:
val = b.int()
case formUdata:
val = int64(b.uint())
case formImplicitConst:
val = a.field[i].val
// flag
case formFlag:
val = b.uint8() == 1
// New in DWARF 4.
case formFlagPresent:
// The attribute is implicitly indicated as present, and no value is
// encoded in the debugging information entry itself.
val = true
// reference to other entry
case formRefAddr:
vers := b.format.version()
if vers == 0 {
b.error("unknown version for DW_FORM_ref_addr")
} else if vers == 2 {
val = Offset(b.addr())
} else {
is64, known := b.format.dwarf64()
if !known {
b.error("unknown size for DW_FORM_ref_addr")
} else if is64 {
val = Offset(b.uint64())
} else {
val = Offset(b.uint32())
}
}
case formRef1:
val = Offset(b.uint8()) + ubase
case formRef2:
val = Offset(b.uint16()) + ubase
case formRef4:
val = Offset(b.uint32()) + ubase
case formRef8:
val = Offset(b.uint64()) + ubase
case formRefUdata:
val = Offset(b.uint()) + ubase
// string
case formString:
val = b.string()
case formStrp, formLineStrp:
var off uint64 // offset into .debug_str
is64, known := b.format.dwarf64()
if !known {
b.error("unknown size for DW_FORM_strp/line_strp")
} else if is64 {
off = b.uint64()
} else {
off = uint64(b.uint32())
}
if uint64(int(off)) != off {
b.error("DW_FORM_strp/line_strp offset out of range")
}
if b.err != nil {
return nil
}
var b1 buf
if fmt == formStrp {
b1 = makeBuf(b.dwarf, b.format, "str", 0, b.dwarf.str)
} else {
if len(b.dwarf.lineStr) == 0 {
b.error("DW_FORM_line_strp with no .debug_line_str section")
return nil
}
b1 = makeBuf(b.dwarf, b.format, "line_str", 0, b.dwarf.lineStr)
}
b1.skip(int(off))
val = b1.string()
if b1.err != nil {
b.err = b1.err
return nil
}
case formStrx, formStrx1, formStrx2, formStrx3, formStrx4:
var off uint64
switch fmt {
case formStrx:
off = b.uint()
case formStrx1:
off = uint64(b.uint8())
case formStrx2:
off = uint64(b.uint16())
case formStrx3:
off = uint64(b.uint24())
case formStrx4:
off = uint64(b.uint32())
}
if len(b.dwarf.strOffsets) == 0 {
b.error("DW_FORM_strx with no .debug_str_offsets section")
}
is64, known := b.format.dwarf64()
if !known {
b.error("unknown offset size for DW_FORM_strx")
}
if b.err != nil {
return nil
}
if is64 {
off *= 8
} else {
off *= 4
}
strBase := int64(u.strOffsetsBase())
val = resolveStrx(uint64(strBase), off)
case formStrpSup:
is64, known := b.format.dwarf64()
if !known {
b.error("unknown size for DW_FORM_strp_sup")
} else if is64 {
val = b.uint64()
} else {
val = b.uint32()
}
// lineptr, loclistptr, macptr, rangelistptr
// New in DWARF 4, but clang can generate them with -gdwarf-2.
// Section reference, replacing use of formData4 and formData8.
case formSecOffset, formGnuRefAlt, formGnuStrpAlt:
is64, known := b.format.dwarf64()
if !known {
b.error("unknown size for form 0x" + strconv.FormatInt(int64(fmt), 16))
} else if is64 {
val = int64(b.uint64())
} else {
val = int64(b.uint32())
}
// exprloc
// New in DWARF 4.
case formExprloc:
val = b.bytes(int(b.uint()))
// reference
// New in DWARF 4.
case formRefSig8:
// 64-bit type signature.
val = b.uint64()
case formRefSup4:
val = b.uint32()
case formRefSup8:
val = b.uint64()
// loclist
case formLoclistx:
val = b.uint()
// rnglist
case formRnglistx:
off := b.uint()
rnglistsBase := int64(u.rngListsBase())
val = resolveRnglistx(uint64(rnglistsBase), off)
}
e.Field[i].Val = val
}
if b.err != nil {
return nil
}
return e
}
// A Reader allows reading [Entry] structures from a DWARF “info” section.
// The [Entry] structures are arranged in a tree. The [Reader.Next] function
// return successive entries from a pre-order traversal of the tree.
// If an entry has children, its Children field will be true, and the children
// follow, terminated by an [Entry] with [Tag] 0.
type Reader struct {
b buf
d *Data
err error
unit int
lastUnit bool // set if last entry returned by Next is TagCompileUnit/TagPartialUnit
lastChildren bool // .Children of last entry returned by Next
lastSibling Offset // .Val(AttrSibling) of last entry returned by Next
cu *Entry // current compilation unit
}
// Reader returns a new Reader for [Data].
// The reader is positioned at byte offset 0 in the DWARF “info” section.
func (d *Data) Reader() *Reader {
r := &Reader{d: d}
r.Seek(0)
return r
}
// AddressSize returns the size in bytes of addresses in the current compilation
// unit.
func (r *Reader) AddressSize() int {
return r.d.unit[r.unit].asize
}
// ByteOrder returns the byte order in the current compilation unit.
func (r *Reader) ByteOrder() binary.ByteOrder {
return r.b.order
}
// Seek positions the [Reader] at offset off in the encoded entry stream.
// Offset 0 can be used to denote the first entry.
func (r *Reader) Seek(off Offset) {
d := r.d
r.err = nil
r.lastChildren = false
if off == 0 {
if len(d.unit) == 0 {
return
}
u := &d.unit[0]
r.unit = 0
r.b = makeBuf(r.d, u, "info", u.off, u.data)
r.collectDwarf5BaseOffsets(u)
r.cu = nil
return
}
i := d.offsetToUnit(off)
if i == -1 {
r.err = errors.New("offset out of range")
return
}
if i != r.unit {
r.cu = nil
}
u := &d.unit[i]
r.unit = i
r.b = makeBuf(r.d, u, "info", off, u.data[off-u.off:])
r.collectDwarf5BaseOffsets(u)
}
// maybeNextUnit advances to the next unit if this one is finished.
func (r *Reader) maybeNextUnit() {
for len(r.b.data) == 0 && r.unit+1 < len(r.d.unit) {
r.nextUnit()
}
}
// nextUnit advances to the next unit.
func (r *Reader) nextUnit() {
r.unit++
u := &r.d.unit[r.unit]
r.b = makeBuf(r.d, u, "info", u.off, u.data)
r.cu = nil
r.collectDwarf5BaseOffsets(u)
}
func (r *Reader) collectDwarf5BaseOffsets(u *unit) {
if u.vers < 5 || u.unit5 != nil {
return
}
u.unit5 = new(unit5)
if err := r.d.collectDwarf5BaseOffsets(u); err != nil {
r.err = err
}
}
// Next reads the next entry from the encoded entry stream.
// It returns nil, nil when it reaches the end of the section.
// It returns an error if the current offset is invalid or the data at the
// offset cannot be decoded as a valid [Entry].
func (r *Reader) Next() (*Entry, error) {
if r.err != nil {
return nil, r.err
}
r.maybeNextUnit()
if len(r.b.data) == 0 {
return nil, nil
}
u := &r.d.unit[r.unit]
e := r.b.entry(r.cu, u)
if r.b.err != nil {
r.err = r.b.err
return nil, r.err
}
r.lastUnit = false
if e != nil {
r.lastChildren = e.Children
if r.lastChildren {
r.lastSibling, _ = e.Val(AttrSibling).(Offset)
}
if e.Tag == TagCompileUnit || e.Tag == TagPartialUnit {
r.lastUnit = true
r.cu = e
}
} else {
r.lastChildren = false
}
return e, nil
}
// SkipChildren skips over the child entries associated with
// the last [Entry] returned by [Reader.Next]. If that [Entry] did not have
// children or [Reader.Next] has not been called, SkipChildren is a no-op.
func (r *Reader) SkipChildren() {
if r.err != nil || !r.lastChildren {
return
}
// If the last entry had a sibling attribute,
// that attribute gives the offset of the next
// sibling, so we can avoid decoding the
// child subtrees.
if r.lastSibling >= r.b.off {
r.Seek(r.lastSibling)
return
}
if r.lastUnit && r.unit+1 < len(r.d.unit) {
r.nextUnit()
return
}
for {
e, err := r.Next()
if err != nil || e == nil || e.Tag == 0 {
break
}
if e.Children {
r.SkipChildren()
}
}
}
// clone returns a copy of the reader. This is used by the typeReader
// interface.
func (r *Reader) clone() typeReader {
return r.d.Reader()
}
// offset returns the current buffer offset. This is used by the
// typeReader interface.
func (r *Reader) offset() Offset {
return r.b.off
}
// SeekPC returns the [Entry] for the compilation unit that includes pc,
// and positions the reader to read the children of that unit. If pc
// is not covered by any unit, SeekPC returns [ErrUnknownPC] and the
// position of the reader is undefined.
//
// Because compilation units can describe multiple regions of the
// executable, in the worst case SeekPC must search through all the
// ranges in all the compilation units. Each call to SeekPC starts the
// search at the compilation unit of the last call, so in general
// looking up a series of PCs will be faster if they are sorted. If
// the caller wishes to do repeated fast PC lookups, it should build
// an appropriate index using the Ranges method.
func (r *Reader) SeekPC(pc uint64) (*Entry, error) {
unit := r.unit
for i := 0; i < len(r.d.unit); i++ {
if unit >= len(r.d.unit) {
unit = 0
}
r.err = nil
r.lastChildren = false
r.unit = unit
r.cu = nil
u := &r.d.unit[unit]
r.b = makeBuf(r.d, u, "info", u.off, u.data)
r.collectDwarf5BaseOffsets(u)
e, err := r.Next()
if err != nil {
return nil, err
}
if e == nil || e.Tag == 0 {
return nil, ErrUnknownPC
}
ranges, err := r.d.Ranges(e)
if err != nil {
return nil, err
}
for _, pcs := range ranges {
if pcs[0] <= pc && pc < pcs[1] {
return e, nil
}
}
unit++
}
return nil, ErrUnknownPC
}
// Ranges returns the PC ranges covered by e, a slice of [low,high) pairs.
// Only some entry types, such as [TagCompileUnit] or [TagSubprogram], have PC
// ranges; for others, this will return nil with no error.
func (d *Data) Ranges(e *Entry) ([][2]uint64, error) {
var ret [][2]uint64
low, lowOK := e.Val(AttrLowpc).(uint64)
var high uint64
var highOK bool
highField := e.AttrField(AttrHighpc)
if highField != nil {
switch highField.Class {
case ClassAddress:
high, highOK = highField.Val.(uint64)
case ClassConstant:
off, ok := highField.Val.(int64)
if ok {
high = low + uint64(off)
highOK = true
}
}
}
if lowOK && highOK {
ret = append(ret, [2]uint64{low, high})
}
var u *unit
if uidx := d.offsetToUnit(e.Offset); uidx >= 0 && uidx < len(d.unit) {
u = &d.unit[uidx]
}
if u != nil && u.vers >= 5 && d.rngLists != nil {
// DWARF version 5 and later
field := e.AttrField(AttrRanges)
if field == nil {
return ret, nil
}
switch field.Class {
case ClassRangeListPtr:
ranges, rangesOK := field.Val.(int64)
if !rangesOK {
return ret, nil
}
cu, base, err := d.baseAddressForEntry(e)
if err != nil {
return nil, err
}
return d.dwarf5Ranges(u, cu, base, ranges, ret)
case ClassRngList:
rnglist, ok := field.Val.(uint64)
if !ok {
return ret, nil
}
cu, base, err := d.baseAddressForEntry(e)
if err != nil {
return nil, err
}
return d.dwarf5Ranges(u, cu, base, int64(rnglist), ret)
default:
return ret, nil
}
}
// DWARF version 2 through 4
ranges, rangesOK := e.Val(AttrRanges).(int64)
if rangesOK && d.ranges != nil {
_, base, err := d.baseAddressForEntry(e)
if err != nil {
return nil, err
}
return d.dwarf2Ranges(u, base, ranges, ret)
}
return ret, nil
}
// baseAddressForEntry returns the initial base address to be used when
// looking up the range list of entry e.
// DWARF specifies that this should be the lowpc attribute of the enclosing
// compilation unit, however comments in gdb/dwarf2read.c say that some
// versions of GCC use the entrypc attribute, so we check that too.
func (d *Data) baseAddressForEntry(e *Entry) (*Entry, uint64, error) {
var cu *Entry
if e.Tag == TagCompileUnit {
cu = e
} else {
i := d.offsetToUnit(e.Offset)
if i == -1 {
return nil, 0, errors.New("no unit for entry")
}
u := &d.unit[i]
b := makeBuf(d, u, "info", u.off, u.data)
cu = b.entry(nil, u)
if b.err != nil {
return nil, 0, b.err
}
}
if cuEntry, cuEntryOK := cu.Val(AttrEntrypc).(uint64); cuEntryOK {
return cu, cuEntry, nil
} else if cuLow, cuLowOK := cu.Val(AttrLowpc).(uint64); cuLowOK {
return cu, cuLow, nil
}
return cu, 0, nil
}
func (d *Data) dwarf2Ranges(u *unit, base uint64, ranges int64, ret [][2]uint64) ([][2]uint64, error) {
if ranges < 0 || ranges > int64(len(d.ranges)) {
return nil, fmt.Errorf("invalid range offset %d (max %d)", ranges, len(d.ranges))
}
buf := makeBuf(d, u, "ranges", Offset(ranges), d.ranges[ranges:])
for len(buf.data) > 0 {
low := buf.addr()
high := buf.addr()
if low == 0 && high == 0 {
break
}
if low == ^uint64(0)>>uint((8-u.addrsize())*8) {
base = high
} else {
ret = append(ret, [2]uint64{base + low, base + high})
}
}
return ret, nil
}
// dwarf5Ranges interprets a debug_rnglists sequence, see DWARFv5 section
// 2.17.3 (page 53).
func (d *Data) dwarf5Ranges(u *unit, cu *Entry, base uint64, ranges int64, ret [][2]uint64) ([][2]uint64, error) {
if ranges < 0 || ranges > int64(len(d.rngLists)) {
return nil, fmt.Errorf("invalid rnglist offset %d (max %d)", ranges, len(d.ranges))
}
var addrBase int64
if cu != nil {
addrBase, _ = cu.Val(AttrAddrBase).(int64)
}
buf := makeBuf(d, u, "rnglists", 0, d.rngLists)
buf.skip(int(ranges))
for {
opcode := buf.uint8()
switch opcode {
case rleEndOfList:
if buf.err != nil {
return nil, buf.err
}
return ret, nil
case rleBaseAddressx:
baseIdx := buf.uint()
var err error
base, err = d.debugAddr(u, uint64(addrBase), baseIdx)
if err != nil {
return nil, err
}
case rleStartxEndx:
startIdx := buf.uint()
endIdx := buf.uint()
start, err := d.debugAddr(u, uint64(addrBase), startIdx)
if err != nil {
return nil, err
}
end, err := d.debugAddr(u, uint64(addrBase), endIdx)
if err != nil {
return nil, err
}
ret = append(ret, [2]uint64{start, end})
case rleStartxLength:
startIdx := buf.uint()
len := buf.uint()
start, err := d.debugAddr(u, uint64(addrBase), startIdx)
if err != nil {
return nil, err
}
ret = append(ret, [2]uint64{start, start + len})
case rleOffsetPair:
off1 := buf.uint()
off2 := buf.uint()
ret = append(ret, [2]uint64{base + off1, base + off2})
case rleBaseAddress:
base = buf.addr()
case rleStartEnd:
start := buf.addr()
end := buf.addr()
ret = append(ret, [2]uint64{start, end})
case rleStartLength:
start := buf.addr()
len := buf.uint()
ret = append(ret, [2]uint64{start, start + len})
}
}
}
// debugAddr returns the address at idx in debug_addr
func (d *Data) debugAddr(format dataFormat, addrBase, idx uint64) (uint64, error) {
off := idx*uint64(format.addrsize()) + addrBase
if uint64(int(off)) != off {
return 0, errors.New("offset out of range")
}
b := makeBuf(d, format, "addr", 0, d.addr)
b.skip(int(off))
val := b.addr()
if b.err != nil {
return 0, b.err
}
return val, nil
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dwarf
import (
"errors"
"fmt"
"io"
"path"
"strings"
)
// A LineReader reads a sequence of [LineEntry] structures from a DWARF
// "line" section for a single compilation unit. LineEntries occur in
// order of increasing PC and each [LineEntry] gives metadata for the
// instructions from that [LineEntry]'s PC to just before the next
// [LineEntry]'s PC. The last entry will have the [LineEntry.EndSequence] field set.
type LineReader struct {
buf buf
// Original .debug_line section data. Used by Seek.
section []byte
str []byte // .debug_str
lineStr []byte // .debug_line_str
// Header information
version uint16
addrsize int
segmentSelectorSize int
minInstructionLength int
maxOpsPerInstruction int
defaultIsStmt bool
lineBase int
lineRange int
opcodeBase int
opcodeLengths []int
directories []string
fileEntries []*LineFile
programOffset Offset // section offset of line number program
endOffset Offset // section offset of byte following program
initialFileEntries int // initial length of fileEntries
// Current line number program state machine registers
state LineEntry // public state
fileIndex int // private state
}
// A LineEntry is a row in a DWARF line table.
type LineEntry struct {
// Address is the program-counter value of a machine
// instruction generated by the compiler. This LineEntry
// applies to each instruction from Address to just before the
// Address of the next LineEntry.
Address uint64
// OpIndex is the index of an operation within a VLIW
// instruction. The index of the first operation is 0. For
// non-VLIW architectures, it will always be 0. Address and
// OpIndex together form an operation pointer that can
// reference any individual operation within the instruction
// stream.
OpIndex int
// File is the source file corresponding to these
// instructions.
File *LineFile
// Line is the source code line number corresponding to these
// instructions. Lines are numbered beginning at 1. It may be
// 0 if these instructions cannot be attributed to any source
// line.
Line int
// Column is the column number within the source line of these
// instructions. Columns are numbered beginning at 1. It may
// be 0 to indicate the "left edge" of the line.
Column int
// IsStmt indicates that Address is a recommended breakpoint
// location, such as the beginning of a line, statement, or a
// distinct subpart of a statement.
IsStmt bool
// BasicBlock indicates that Address is the beginning of a
// basic block.
BasicBlock bool
// PrologueEnd indicates that Address is one (of possibly
// many) PCs where execution should be suspended for a
// breakpoint on entry to the containing function.
//
// Added in DWARF 3.
PrologueEnd bool
// EpilogueBegin indicates that Address is one (of possibly
// many) PCs where execution should be suspended for a
// breakpoint on exit from this function.
//
// Added in DWARF 3.
EpilogueBegin bool
// ISA is the instruction set architecture for these
// instructions. Possible ISA values should be defined by the
// applicable ABI specification.
//
// Added in DWARF 3.
ISA int
// Discriminator is an arbitrary integer indicating the block
// to which these instructions belong. It serves to
// distinguish among multiple blocks that may all have with
// the same source file, line, and column. Where only one
// block exists for a given source position, it should be 0.
//
// Added in DWARF 3.
Discriminator int
// EndSequence indicates that Address is the first byte after
// the end of a sequence of target machine instructions. If it
// is set, only this and the Address field are meaningful. A
// line number table may contain information for multiple
// potentially disjoint instruction sequences. The last entry
// in a line table should always have EndSequence set.
EndSequence bool
}
// A LineFile is a source file referenced by a DWARF line table entry.
type LineFile struct {
Name string
Mtime uint64 // Implementation defined modification time, or 0 if unknown
Length int // File length, or 0 if unknown
}
// LineReader returns a new reader for the line table of compilation
// unit cu, which must be an [Entry] with tag [TagCompileUnit].
//
// If this compilation unit has no line table, it returns nil, nil.
func (d *Data) LineReader(cu *Entry) (*LineReader, error) {
if d.line == nil {
// No line tables available.
return nil, nil
}
// Get line table information from cu.
off, ok := cu.Val(AttrStmtList).(int64)
if !ok {
// cu has no line table.
return nil, nil
}
if off < 0 || off > int64(len(d.line)) {
return nil, errors.New("AttrStmtList value out of range")
}
// AttrCompDir is optional if all file names are absolute. Use
// the empty string if it's not present.
compDir, _ := cu.Val(AttrCompDir).(string)
// Create the LineReader.
u := &d.unit[d.offsetToUnit(cu.Offset)]
buf := makeBuf(d, u, "line", Offset(off), d.line[off:])
// The compilation directory is implicitly directories[0].
r := LineReader{
buf: buf,
section: d.line,
str: d.str,
lineStr: d.lineStr,
}
// Read the header.
if err := r.readHeader(compDir); err != nil {
return nil, err
}
// Initialize line reader state.
r.Reset()
return &r, nil
}
// readHeader reads the line number program header from r.buf and sets
// all of the header fields in r.
func (r *LineReader) readHeader(compDir string) error {
buf := &r.buf
// Read basic header fields [DWARF2 6.2.4].
hdrOffset := buf.off
unitLength, dwarf64 := buf.unitLength()
r.endOffset = buf.off + unitLength
if r.endOffset > buf.off+Offset(len(buf.data)) {
return DecodeError{"line", hdrOffset, fmt.Sprintf("line table end %d exceeds section size %d", r.endOffset, buf.off+Offset(len(buf.data)))}
}
r.version = buf.uint16()
if buf.err == nil && (r.version < 2 || r.version > 5) {
// DWARF goes to all this effort to make new opcodes
// backward-compatible, and then adds fields right in
// the middle of the header in new versions, so we're
// picky about only supporting known line table
// versions.
return DecodeError{"line", hdrOffset, fmt.Sprintf("unknown line table version %d", r.version)}
}
if r.version >= 5 {
r.addrsize = int(buf.uint8())
r.segmentSelectorSize = int(buf.uint8())
} else {
r.addrsize = buf.format.addrsize()
r.segmentSelectorSize = 0
}
var headerLength Offset
if dwarf64 {
headerLength = Offset(buf.uint64())
} else {
headerLength = Offset(buf.uint32())
}
programOffset := buf.off + headerLength
if programOffset > r.endOffset {
return DecodeError{"line", hdrOffset, fmt.Sprintf("malformed line table: program offset %d exceeds end offset %d", programOffset, r.endOffset)}
}
r.programOffset = programOffset
r.minInstructionLength = int(buf.uint8())
if r.version >= 4 {
// [DWARF4 6.2.4]
r.maxOpsPerInstruction = int(buf.uint8())
} else {
r.maxOpsPerInstruction = 1
}
r.defaultIsStmt = buf.uint8() != 0
r.lineBase = int(int8(buf.uint8()))
r.lineRange = int(buf.uint8())
// Validate header.
if buf.err != nil {
return buf.err
}
if r.maxOpsPerInstruction == 0 {
return DecodeError{"line", hdrOffset, "invalid maximum operations per instruction: 0"}
}
if r.lineRange == 0 {
return DecodeError{"line", hdrOffset, "invalid line range: 0"}
}
// Read standard opcode length table. This table starts with opcode 1.
r.opcodeBase = int(buf.uint8())
r.opcodeLengths = make([]int, r.opcodeBase)
for i := 1; i < r.opcodeBase; i++ {
r.opcodeLengths[i] = int(buf.uint8())
}
// Validate opcode lengths.
if buf.err != nil {
return buf.err
}
for i, length := range r.opcodeLengths {
if known, ok := knownOpcodeLengths[i]; ok && known != length {
return DecodeError{"line", hdrOffset, fmt.Sprintf("opcode %d expected to have length %d, but has length %d", i, known, length)}
}
}
if r.version < 5 {
// Read include directories table.
r.directories = []string{compDir}
for {
directory := buf.string()
if buf.err != nil {
return buf.err
}
if len(directory) == 0 {
break
}
if !pathIsAbs(directory) {
// Relative paths are implicitly relative to
// the compilation directory.
directory = pathJoin(compDir, directory)
}
r.directories = append(r.directories, directory)
}
// Read file name list. File numbering starts with 1,
// so leave the first entry nil.
r.fileEntries = make([]*LineFile, 1)
for {
if done, err := r.readFileEntry(); err != nil {
return err
} else if done {
break
}
}
} else {
dirFormat := r.readLNCTFormat()
c := buf.uint()
r.directories = make([]string, c)
for i := range r.directories {
dir, _, _, err := r.readLNCT(dirFormat, dwarf64)
if err != nil {
return err
}
r.directories[i] = dir
}
fileFormat := r.readLNCTFormat()
c = buf.uint()
r.fileEntries = make([]*LineFile, c)
for i := range r.fileEntries {
name, mtime, size, err := r.readLNCT(fileFormat, dwarf64)
if err != nil {
return err
}
r.fileEntries[i] = &LineFile{name, mtime, int(size)}
}
}
r.initialFileEntries = len(r.fileEntries)
return buf.err
}
// lnctForm is a pair of an LNCT code and a form. This represents an
// entry in the directory name or file name description in the DWARF 5
// line number program header.
type lnctForm struct {
lnct int
form format
}
// readLNCTFormat reads an LNCT format description.
func (r *LineReader) readLNCTFormat() []lnctForm {
c := r.buf.uint8()
ret := make([]lnctForm, c)
for i := range ret {
ret[i].lnct = int(r.buf.uint())
ret[i].form = format(r.buf.uint())
}
return ret
}
// readLNCT reads a sequence of LNCT entries and returns path information.
func (r *LineReader) readLNCT(s []lnctForm, dwarf64 bool) (path string, mtime uint64, size uint64, err error) {
var dir string
for _, lf := range s {
var str string
var val uint64
switch lf.form {
case formString:
str = r.buf.string()
case formStrp, formLineStrp:
var off uint64
if dwarf64 {
off = r.buf.uint64()
} else {
off = uint64(r.buf.uint32())
}
if uint64(int(off)) != off {
return "", 0, 0, DecodeError{"line", r.buf.off, "strp/line_strp offset out of range"}
}
var b1 buf
if lf.form == formStrp {
b1 = makeBuf(r.buf.dwarf, r.buf.format, "str", 0, r.str)
} else {
b1 = makeBuf(r.buf.dwarf, r.buf.format, "line_str", 0, r.lineStr)
}
b1.skip(int(off))
str = b1.string()
if b1.err != nil {
return "", 0, 0, DecodeError{"line", r.buf.off, b1.err.Error()}
}
case formStrpSup:
// Supplemental sections not yet supported.
if dwarf64 {
r.buf.uint64()
} else {
r.buf.uint32()
}
case formStrx:
// .debug_line.dwo sections not yet supported.
r.buf.uint()
case formStrx1:
r.buf.uint8()
case formStrx2:
r.buf.uint16()
case formStrx3:
r.buf.uint24()
case formStrx4:
r.buf.uint32()
case formData1:
val = uint64(r.buf.uint8())
case formData2:
val = uint64(r.buf.uint16())
case formData4:
val = uint64(r.buf.uint32())
case formData8:
val = r.buf.uint64()
case formData16:
r.buf.bytes(16)
case formDwarfBlock:
r.buf.bytes(int(r.buf.uint()))
case formUdata:
val = r.buf.uint()
}
switch lf.lnct {
case lnctPath:
path = str
case lnctDirectoryIndex:
if val >= uint64(len(r.directories)) {
return "", 0, 0, DecodeError{"line", r.buf.off, "directory index out of range"}
}
dir = r.directories[val]
case lnctTimestamp:
mtime = val
case lnctSize:
size = val
case lnctMD5:
// Ignored.
}
}
if dir != "" && path != "" {
path = pathJoin(dir, path)
}
return path, mtime, size, nil
}
// readFileEntry reads a file entry from either the header or a
// DW_LNE_define_file extended opcode and adds it to r.fileEntries. A
// true return value indicates that there are no more entries to read.
func (r *LineReader) readFileEntry() (bool, error) {
name := r.buf.string()
if r.buf.err != nil {
return false, r.buf.err
}
if len(name) == 0 {
return true, nil
}
off := r.buf.off
dirIndex := int(r.buf.uint())
if !pathIsAbs(name) {
if dirIndex >= len(r.directories) {
return false, DecodeError{"line", off, "directory index too large"}
}
name = pathJoin(r.directories[dirIndex], name)
}
mtime := r.buf.uint()
length := int(r.buf.uint())
// If this is a dynamically added path and the cursor was
// backed up, we may have already added this entry. Avoid
// updating existing line table entries in this case. This
// avoids an allocation and potential racy access to the slice
// backing store if the user called Files.
if len(r.fileEntries) < cap(r.fileEntries) {
fe := r.fileEntries[:len(r.fileEntries)+1]
if fe[len(fe)-1] != nil {
// We already processed this addition.
r.fileEntries = fe
return false, nil
}
}
r.fileEntries = append(r.fileEntries, &LineFile{name, mtime, length})
return false, nil
}
// updateFile updates r.state.File after r.fileIndex has
// changed or r.fileEntries has changed.
func (r *LineReader) updateFile() {
if r.fileIndex < len(r.fileEntries) {
r.state.File = r.fileEntries[r.fileIndex]
} else {
r.state.File = nil
}
}
// Next sets *entry to the next row in this line table and moves to
// the next row. If there are no more entries and the line table is
// properly terminated, it returns [io.EOF].
//
// Rows are always in order of increasing entry.Address, but
// entry.Line may go forward or backward.
func (r *LineReader) Next(entry *LineEntry) error {
if r.buf.err != nil {
return r.buf.err
}
// Execute opcodes until we reach an opcode that emits a line
// table entry.
for {
if len(r.buf.data) == 0 {
return io.EOF
}
emit := r.step(entry)
if r.buf.err != nil {
return r.buf.err
}
if emit {
return nil
}
}
}
// knownOpcodeLengths gives the opcode lengths (in varint arguments)
// of known standard opcodes.
var knownOpcodeLengths = map[int]int{
lnsCopy: 0,
lnsAdvancePC: 1,
lnsAdvanceLine: 1,
lnsSetFile: 1,
lnsNegateStmt: 0,
lnsSetBasicBlock: 0,
lnsConstAddPC: 0,
lnsSetPrologueEnd: 0,
lnsSetEpilogueBegin: 0,
lnsSetISA: 1,
// lnsFixedAdvancePC takes a uint8 rather than a varint; it's
// unclear what length the header is supposed to claim, so
// ignore it.
}
// step processes the next opcode and updates r.state. If the opcode
// emits a row in the line table, this updates *entry and returns
// true.
func (r *LineReader) step(entry *LineEntry) bool {
opcode := int(r.buf.uint8())
if opcode >= r.opcodeBase {
// Special opcode [DWARF2 6.2.5.1, DWARF4 6.2.5.1]
adjustedOpcode := opcode - r.opcodeBase
r.advancePC(adjustedOpcode / r.lineRange)
lineDelta := r.lineBase + adjustedOpcode%r.lineRange
r.state.Line += lineDelta
goto emit
}
switch opcode {
case 0:
// Extended opcode [DWARF2 6.2.5.3]
length := Offset(r.buf.uint())
startOff := r.buf.off
opcode := r.buf.uint8()
switch opcode {
case lneEndSequence:
r.state.EndSequence = true
*entry = r.state
r.resetState()
case lneSetAddress:
switch r.addrsize {
case 1:
r.state.Address = uint64(r.buf.uint8())
case 2:
r.state.Address = uint64(r.buf.uint16())
case 4:
r.state.Address = uint64(r.buf.uint32())
case 8:
r.state.Address = r.buf.uint64()
default:
r.buf.error("unknown address size")
}
case lneDefineFile:
if done, err := r.readFileEntry(); err != nil {
r.buf.err = err
return false
} else if done {
r.buf.err = DecodeError{"line", startOff, "malformed DW_LNE_define_file operation"}
return false
}
r.updateFile()
case lneSetDiscriminator:
// [DWARF4 6.2.5.3]
r.state.Discriminator = int(r.buf.uint())
}
r.buf.skip(int(startOff + length - r.buf.off))
if opcode == lneEndSequence {
return true
}
// Standard opcodes [DWARF2 6.2.5.2]
case lnsCopy:
goto emit
case lnsAdvancePC:
r.advancePC(int(r.buf.uint()))
case lnsAdvanceLine:
r.state.Line += int(r.buf.int())
case lnsSetFile:
r.fileIndex = int(r.buf.uint())
r.updateFile()
case lnsSetColumn:
r.state.Column = int(r.buf.uint())
case lnsNegateStmt:
r.state.IsStmt = !r.state.IsStmt
case lnsSetBasicBlock:
r.state.BasicBlock = true
case lnsConstAddPC:
r.advancePC((255 - r.opcodeBase) / r.lineRange)
case lnsFixedAdvancePC:
r.state.Address += uint64(r.buf.uint16())
// DWARF3 standard opcodes [DWARF3 6.2.5.2]
case lnsSetPrologueEnd:
r.state.PrologueEnd = true
case lnsSetEpilogueBegin:
r.state.EpilogueBegin = true
case lnsSetISA:
r.state.ISA = int(r.buf.uint())
default:
// Unhandled standard opcode. Skip the number of
// arguments that the prologue says this opcode has.
for i := 0; i < r.opcodeLengths[opcode]; i++ {
r.buf.uint()
}
}
return false
emit:
*entry = r.state
r.state.BasicBlock = false
r.state.PrologueEnd = false
r.state.EpilogueBegin = false
r.state.Discriminator = 0
return true
}
// advancePC advances "operation pointer" (the combination of Address
// and OpIndex) in r.state by opAdvance steps.
func (r *LineReader) advancePC(opAdvance int) {
opIndex := r.state.OpIndex + opAdvance
r.state.Address += uint64(r.minInstructionLength * (opIndex / r.maxOpsPerInstruction))
r.state.OpIndex = opIndex % r.maxOpsPerInstruction
}
// A LineReaderPos represents a position in a line table.
type LineReaderPos struct {
// off is the current offset in the DWARF line section.
off Offset
// numFileEntries is the length of fileEntries.
numFileEntries int
// state and fileIndex are the statement machine state at
// offset off.
state LineEntry
fileIndex int
}
// Tell returns the current position in the line table.
func (r *LineReader) Tell() LineReaderPos {
return LineReaderPos{r.buf.off, len(r.fileEntries), r.state, r.fileIndex}
}
// Seek restores the line table reader to a position returned by [LineReader.Tell].
//
// The argument pos must have been returned by a call to [LineReader.Tell] on this
// line table.
func (r *LineReader) Seek(pos LineReaderPos) {
r.buf.off = pos.off
r.buf.data = r.section[r.buf.off:r.endOffset]
r.fileEntries = r.fileEntries[:pos.numFileEntries]
r.state = pos.state
r.fileIndex = pos.fileIndex
}
// Reset repositions the line table reader at the beginning of the
// line table.
func (r *LineReader) Reset() {
// Reset buffer to the line number program offset.
r.buf.off = r.programOffset
r.buf.data = r.section[r.buf.off:r.endOffset]
// Reset file entries list.
r.fileEntries = r.fileEntries[:r.initialFileEntries]
// Reset line number program state.
r.resetState()
}
// resetState resets r.state to its default values
func (r *LineReader) resetState() {
// Reset the state machine registers to the defaults given in
// [DWARF4 6.2.2].
r.state = LineEntry{
Address: 0,
OpIndex: 0,
File: nil,
Line: 1,
Column: 0,
IsStmt: r.defaultIsStmt,
BasicBlock: false,
PrologueEnd: false,
EpilogueBegin: false,
ISA: 0,
Discriminator: 0,
}
r.fileIndex = 1
r.updateFile()
}
// Files returns the file name table of this compilation unit as of
// the current position in the line table. The file name table may be
// referenced from attributes in this compilation unit such as
// [AttrDeclFile].
//
// Entry 0 is always nil, since file index 0 represents "no file".
//
// The file name table of a compilation unit is not fixed. Files
// returns the file table as of the current position in the line
// table. This may contain more entries than the file table at an
// earlier position in the line table, though existing entries never
// change.
func (r *LineReader) Files() []*LineFile {
return r.fileEntries
}
// ErrUnknownPC is the error returned by LineReader.ScanPC when the
// seek PC is not covered by any entry in the line table.
var ErrUnknownPC = errors.New("ErrUnknownPC")
// SeekPC sets *entry to the [LineEntry] that includes pc and positions
// the reader on the next entry in the line table. If necessary, this
// will seek backwards to find pc.
//
// If pc is not covered by any entry in this line table, SeekPC
// returns [ErrUnknownPC]. In this case, *entry and the final seek
// position are unspecified.
//
// Note that DWARF line tables only permit sequential, forward scans.
// Hence, in the worst case, this takes time linear in the size of the
// line table. If the caller wishes to do repeated fast PC lookups, it
// should build an appropriate index of the line table.
func (r *LineReader) SeekPC(pc uint64, entry *LineEntry) error {
if err := r.Next(entry); err != nil {
return err
}
if entry.Address > pc {
// We're too far. Start at the beginning of the table.
r.Reset()
if err := r.Next(entry); err != nil {
return err
}
if entry.Address > pc {
// The whole table starts after pc.
r.Reset()
return ErrUnknownPC
}
}
// Scan until we pass pc, then back up one.
for {
var next LineEntry
pos := r.Tell()
if err := r.Next(&next); err != nil {
if err == io.EOF {
return ErrUnknownPC
}
return err
}
if next.Address > pc {
if entry.EndSequence {
// pc is in a hole in the table.
return ErrUnknownPC
}
// entry is the desired entry. Back up the
// cursor to "next" and return success.
r.Seek(pos)
return nil
}
*entry = next
}
}
// pathIsAbs reports whether path is an absolute path (or "full path
// name" in DWARF parlance). This is in "whatever form makes sense for
// the host system", so this accepts both UNIX-style and DOS-style
// absolute paths. We avoid the filepath package because we want this
// to behave the same regardless of our host system and because we
// don't know what system the paths came from.
func pathIsAbs(path string) bool {
_, path = splitDrive(path)
return len(path) > 0 && (path[0] == '/' || path[0] == '\\')
}
// pathJoin joins dirname and filename. filename must be relative.
// DWARF paths can be UNIX-style or DOS-style, so this handles both.
func pathJoin(dirname, filename string) string {
if len(dirname) == 0 {
return filename
}
// dirname should be absolute, which means we can determine
// whether it's a DOS path reasonably reliably by looking for
// a drive letter or UNC path.
drive, dirname := splitDrive(dirname)
if drive == "" {
// UNIX-style path.
return path.Join(dirname, filename)
}
// DOS-style path.
drive2, filename := splitDrive(filename)
if drive2 != "" {
if !strings.EqualFold(drive, drive2) {
// Different drives. There's not much we can
// do here, so just ignore the directory.
return drive2 + filename
}
// Drives are the same. Ignore drive on filename.
}
if !(strings.HasSuffix(dirname, "/") || strings.HasSuffix(dirname, `\`)) && dirname != "" {
sep := `\`
if strings.HasPrefix(dirname, "/") {
sep = `/`
}
dirname += sep
}
return drive + dirname + filename
}
// splitDrive splits the DOS drive letter or UNC share point from
// path, if any. path == drive + rest
func splitDrive(path string) (drive, rest string) {
if len(path) >= 2 && path[1] == ':' {
if c := path[0]; 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
return path[:2], path[2:]
}
}
if len(path) > 3 && (path[0] == '\\' || path[0] == '/') && (path[1] == '\\' || path[1] == '/') {
// Normalize the path so we can search for just \ below.
npath := strings.ReplaceAll(path, "/", `\`)
// Get the host part, which must be non-empty.
slash1 := strings.IndexByte(npath[2:], '\\') + 2
if slash1 > 2 {
// Get the mount-point part, which must be non-empty.
slash2 := strings.IndexByte(npath[slash1+1:], '\\') + slash1 + 1
if slash2 > slash1 {
return path[:slash2], path[slash2:]
}
}
}
return "", path
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package dwarf provides access to DWARF debugging information loaded from
executable files, as defined in the DWARF 2.0 Standard at
http://dwarfstd.org/doc/dwarf-2.0.0.pdf.
# Security
This package is not designed to be hardened against adversarial inputs, and is
outside the scope of https://go.dev/security/policy. In particular, only basic
validation is done when parsing object files. As such, care should be taken when
parsing untrusted inputs, as parsing malformed files may consume significant
resources, or cause panics.
*/
package dwarf
import (
"encoding/binary"
"errors"
)
// Data represents the DWARF debugging information
// loaded from an executable file (for example, an ELF or Mach-O executable).
type Data struct {
// raw data
abbrev []byte
aranges []byte
frame []byte
info []byte
line []byte
pubnames []byte
ranges []byte
str []byte
// New sections added in DWARF 5.
addr []byte
lineStr []byte
strOffsets []byte
rngLists []byte
// parsed data
abbrevCache map[uint64]abbrevTable
bigEndian bool
order binary.ByteOrder
typeCache map[Offset]Type
typeSigs map[uint64]*typeUnit
unit []unit
}
var errSegmentSelector = errors.New("non-zero segment_selector size not supported")
// New returns a new [Data] object initialized from the given parameters.
// Rather than calling this function directly, clients should typically use
// the DWARF method of the File type of the appropriate package [debug/elf],
// [debug/macho], or [debug/pe].
//
// The []byte arguments are the data from the corresponding debug section
// in the object file; for example, for an ELF object, abbrev is the contents of
// the ".debug_abbrev" section.
func New(abbrev, aranges, frame, info, line, pubnames, ranges, str []byte) (*Data, error) {
d := &Data{
abbrev: abbrev,
aranges: aranges,
frame: frame,
info: info,
line: line,
pubnames: pubnames,
ranges: ranges,
str: str,
abbrevCache: make(map[uint64]abbrevTable),
typeCache: make(map[Offset]Type),
typeSigs: make(map[uint64]*typeUnit),
}
// Sniff .debug_info to figure out byte order.
// 32-bit DWARF: 4 byte length, 2 byte version.
// 64-bit DWARf: 4 bytes of 0xff, 8 byte length, 2 byte version.
if len(d.info) < 6 {
return nil, DecodeError{"info", Offset(len(d.info)), "too short"}
}
offset := 4
if d.info[0] == 0xff && d.info[1] == 0xff && d.info[2] == 0xff && d.info[3] == 0xff {
if len(d.info) < 14 {
return nil, DecodeError{"info", Offset(len(d.info)), "too short"}
}
offset = 12
}
// Fetch the version, a tiny 16-bit number (1, 2, 3, 4, 5).
x, y := d.info[offset], d.info[offset+1]
switch {
case x == 0 && y == 0:
return nil, DecodeError{"info", 4, "unsupported version 0"}
case x == 0:
d.bigEndian = true
d.order = binary.BigEndian
case y == 0:
d.bigEndian = false
d.order = binary.LittleEndian
default:
return nil, DecodeError{"info", 4, "cannot determine byte order"}
}
u, err := d.parseUnits()
if err != nil {
return nil, err
}
d.unit = u
return d, nil
}
// AddTypes will add one .debug_types section to the DWARF data. A
// typical object with DWARF version 4 debug info will have multiple
// .debug_types sections. The name is used for error reporting only,
// and serves to distinguish one .debug_types section from another.
func (d *Data) AddTypes(name string, types []byte) error {
return d.parseTypes(name, types)
}
// AddSection adds another DWARF section by name. The name should be a
// DWARF section name such as ".debug_addr", ".debug_str_offsets", and
// so forth. This approach is used for new DWARF sections added in
// DWARF 5 and later.
func (d *Data) AddSection(name string, contents []byte) error {
var err error
switch name {
case ".debug_addr":
d.addr = contents
case ".debug_line_str":
d.lineStr = contents
case ".debug_str_offsets":
d.strOffsets = contents
case ".debug_rnglists":
d.rngLists = contents
}
// Just ignore names that we don't yet support.
return err
}
// Code generated by "stringer -type Tag -trimprefix=Tag"; DO NOT EDIT.
package dwarf
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[TagArrayType-1]
_ = x[TagClassType-2]
_ = x[TagEntryPoint-3]
_ = x[TagEnumerationType-4]
_ = x[TagFormalParameter-5]
_ = x[TagImportedDeclaration-8]
_ = x[TagLabel-10]
_ = x[TagLexDwarfBlock-11]
_ = x[TagMember-13]
_ = x[TagPointerType-15]
_ = x[TagReferenceType-16]
_ = x[TagCompileUnit-17]
_ = x[TagStringType-18]
_ = x[TagStructType-19]
_ = x[TagSubroutineType-21]
_ = x[TagTypedef-22]
_ = x[TagUnionType-23]
_ = x[TagUnspecifiedParameters-24]
_ = x[TagVariant-25]
_ = x[TagCommonDwarfBlock-26]
_ = x[TagCommonInclusion-27]
_ = x[TagInheritance-28]
_ = x[TagInlinedSubroutine-29]
_ = x[TagModule-30]
_ = x[TagPtrToMemberType-31]
_ = x[TagSetType-32]
_ = x[TagSubrangeType-33]
_ = x[TagWithStmt-34]
_ = x[TagAccessDeclaration-35]
_ = x[TagBaseType-36]
_ = x[TagCatchDwarfBlock-37]
_ = x[TagConstType-38]
_ = x[TagConstant-39]
_ = x[TagEnumerator-40]
_ = x[TagFileType-41]
_ = x[TagFriend-42]
_ = x[TagNamelist-43]
_ = x[TagNamelistItem-44]
_ = x[TagPackedType-45]
_ = x[TagSubprogram-46]
_ = x[TagTemplateTypeParameter-47]
_ = x[TagTemplateValueParameter-48]
_ = x[TagThrownType-49]
_ = x[TagTryDwarfBlock-50]
_ = x[TagVariantPart-51]
_ = x[TagVariable-52]
_ = x[TagVolatileType-53]
_ = x[TagDwarfProcedure-54]
_ = x[TagRestrictType-55]
_ = x[TagInterfaceType-56]
_ = x[TagNamespace-57]
_ = x[TagImportedModule-58]
_ = x[TagUnspecifiedType-59]
_ = x[TagPartialUnit-60]
_ = x[TagImportedUnit-61]
_ = x[TagMutableType-62]
_ = x[TagCondition-63]
_ = x[TagSharedType-64]
_ = x[TagTypeUnit-65]
_ = x[TagRvalueReferenceType-66]
_ = x[TagTemplateAlias-67]
_ = x[TagCoarrayType-68]
_ = x[TagGenericSubrange-69]
_ = x[TagDynamicType-70]
_ = x[TagAtomicType-71]
_ = x[TagCallSite-72]
_ = x[TagCallSiteParameter-73]
_ = x[TagSkeletonUnit-74]
_ = x[TagImmutableType-75]
}
const (
_Tag_name_0 = "ArrayTypeClassTypeEntryPointEnumerationTypeFormalParameter"
_Tag_name_1 = "ImportedDeclaration"
_Tag_name_2 = "LabelLexDwarfBlock"
_Tag_name_3 = "Member"
_Tag_name_4 = "PointerTypeReferenceTypeCompileUnitStringTypeStructType"
_Tag_name_5 = "SubroutineTypeTypedefUnionTypeUnspecifiedParametersVariantCommonDwarfBlockCommonInclusionInheritanceInlinedSubroutineModulePtrToMemberTypeSetTypeSubrangeTypeWithStmtAccessDeclarationBaseTypeCatchDwarfBlockConstTypeConstantEnumeratorFileTypeFriendNamelistNamelistItemPackedTypeSubprogramTemplateTypeParameterTemplateValueParameterThrownTypeTryDwarfBlockVariantPartVariableVolatileTypeDwarfProcedureRestrictTypeInterfaceTypeNamespaceImportedModuleUnspecifiedTypePartialUnitImportedUnitMutableTypeConditionSharedTypeTypeUnitRvalueReferenceTypeTemplateAliasCoarrayTypeGenericSubrangeDynamicTypeAtomicTypeCallSiteCallSiteParameterSkeletonUnitImmutableType"
)
var (
_Tag_index_0 = [...]uint8{0, 9, 18, 28, 43, 58}
_Tag_index_2 = [...]uint8{0, 5, 18}
_Tag_index_4 = [...]uint8{0, 11, 24, 35, 45, 55}
_Tag_index_5 = [...]uint16{0, 14, 21, 30, 51, 58, 74, 89, 100, 117, 123, 138, 145, 157, 165, 182, 190, 205, 214, 222, 232, 240, 246, 254, 266, 276, 286, 307, 329, 339, 352, 363, 371, 383, 397, 409, 422, 431, 445, 460, 471, 483, 494, 503, 513, 521, 540, 553, 564, 579, 590, 600, 608, 625, 637, 650}
)
func (i Tag) String() string {
switch {
case 1 <= i && i <= 5:
i -= 1
return _Tag_name_0[_Tag_index_0[i]:_Tag_index_0[i+1]]
case i == 8:
return _Tag_name_1
case 10 <= i && i <= 11:
i -= 10
return _Tag_name_2[_Tag_index_2[i]:_Tag_index_2[i+1]]
case i == 13:
return _Tag_name_3
case 15 <= i && i <= 19:
i -= 15
return _Tag_name_4[_Tag_index_4[i]:_Tag_index_4[i+1]]
case 21 <= i && i <= 75:
i -= 21
return _Tag_name_5[_Tag_index_5[i]:_Tag_index_5[i+1]]
default:
return "Tag(" + strconv.FormatInt(int64(i), 10) + ")"
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// DWARF type information structures.
// The format is heavily biased toward C, but for simplicity
// the String methods use a pseudo-Go syntax.
package dwarf
import "strconv"
// A Type conventionally represents a pointer to any of the
// specific Type structures ([CharType], [StructType], etc.).
type Type interface {
Common() *CommonType
String() string
Size() int64
}
// A CommonType holds fields common to multiple types.
// If a field is not known or not applicable for a given type,
// the zero value is used.
type CommonType struct {
ByteSize int64 // size of value of this type, in bytes
Name string // name that can be used to refer to type
}
func (c *CommonType) Common() *CommonType { return c }
func (c *CommonType) Size() int64 { return c.ByteSize }
// Basic types
// A BasicType holds fields common to all basic types.
//
// See the documentation for [StructField] for more info on the interpretation of
// the BitSize/BitOffset/DataBitOffset fields.
type BasicType struct {
CommonType
BitSize int64
BitOffset int64
DataBitOffset int64
}
func (b *BasicType) Basic() *BasicType { return b }
func (t *BasicType) String() string {
if t.Name != "" {
return t.Name
}
return "?"
}
// A CharType represents a signed character type.
type CharType struct {
BasicType
}
// A UcharType represents an unsigned character type.
type UcharType struct {
BasicType
}
// An IntType represents a signed integer type.
type IntType struct {
BasicType
}
// A UintType represents an unsigned integer type.
type UintType struct {
BasicType
}
// A FloatType represents a floating point type.
type FloatType struct {
BasicType
}
// A ComplexType represents a complex floating point type.
type ComplexType struct {
BasicType
}
// A BoolType represents a boolean type.
type BoolType struct {
BasicType
}
// An AddrType represents a machine address type.
type AddrType struct {
BasicType
}
// An UnspecifiedType represents an implicit, unknown, ambiguous or nonexistent type.
type UnspecifiedType struct {
BasicType
}
// qualifiers
// A QualType represents a type that has the C/C++ "const", "restrict", or "volatile" qualifier.
type QualType struct {
CommonType
Qual string
Type Type
}
func (t *QualType) String() string { return t.Qual + " " + t.Type.String() }
func (t *QualType) Size() int64 { return t.Type.Size() }
// An ArrayType represents a fixed size array type.
type ArrayType struct {
CommonType
Type Type
StrideBitSize int64 // if > 0, number of bits to hold each element
Count int64 // if == -1, an incomplete array, like char x[].
}
func (t *ArrayType) String() string {
return "[" + strconv.FormatInt(t.Count, 10) + "]" + t.Type.String()
}
func (t *ArrayType) Size() int64 {
if t.Count == -1 {
return 0
}
return t.Count * t.Type.Size()
}
// A VoidType represents the C void type.
type VoidType struct {
CommonType
}
func (t *VoidType) String() string { return "void" }
// A PtrType represents a pointer type.
type PtrType struct {
CommonType
Type Type
}
func (t *PtrType) String() string { return "*" + t.Type.String() }
// A StructType represents a struct, union, or C++ class type.
type StructType struct {
CommonType
StructName string
Kind string // "struct", "union", or "class".
Field []*StructField
Incomplete bool // if true, struct, union, class is declared but not defined
}
// A StructField represents a field in a struct, union, or C++ class type.
//
// # Bit Fields
//
// The BitSize, BitOffset, and DataBitOffset fields describe the bit
// size and offset of data members declared as bit fields in C/C++
// struct/union/class types.
//
// BitSize is the number of bits in the bit field.
//
// DataBitOffset, if non-zero, is the number of bits from the start of
// the enclosing entity (e.g. containing struct/class/union) to the
// start of the bit field. This corresponds to the DW_AT_data_bit_offset
// DWARF attribute that was introduced in DWARF 4.
//
// BitOffset, if non-zero, is the number of bits between the most
// significant bit of the storage unit holding the bit field to the
// most significant bit of the bit field. Here "storage unit" is the
// type name before the bit field (for a field "unsigned x:17", the
// storage unit is "unsigned"). BitOffset values can vary depending on
// the endianness of the system. BitOffset corresponds to the
// DW_AT_bit_offset DWARF attribute that was deprecated in DWARF 4 and
// removed in DWARF 5.
//
// At most one of DataBitOffset and BitOffset will be non-zero;
// DataBitOffset/BitOffset will only be non-zero if BitSize is
// non-zero. Whether a C compiler uses one or the other
// will depend on compiler vintage and command line options.
//
// Here is an example of C/C++ bit field use, along with what to
// expect in terms of DWARF bit offset info. Consider this code:
//
// struct S {
// int q;
// int j:5;
// int k:6;
// int m:5;
// int n:8;
// } s;
//
// For the code above, one would expect to see the following for
// DW_AT_bit_offset values (using GCC 8):
//
// Little | Big
// Endian | Endian
// |
// "j": 27 | 0
// "k": 21 | 5
// "m": 16 | 11
// "n": 8 | 16
//
// Note that in the above the offsets are purely with respect to the
// containing storage unit for j/k/m/n -- these values won't vary based
// on the size of prior data members in the containing struct.
//
// If the compiler emits DW_AT_data_bit_offset, the expected values
// would be:
//
// "j": 32
// "k": 37
// "m": 43
// "n": 48
//
// Here the value 32 for "j" reflects the fact that the bit field is
// preceded by other data members (recall that DW_AT_data_bit_offset
// values are relative to the start of the containing struct). Hence
// DW_AT_data_bit_offset values can be quite large for structs with
// many fields.
//
// DWARF also allow for the possibility of base types that have
// non-zero bit size and bit offset, so this information is also
// captured for base types, but it is worth noting that it is not
// possible to trigger this behavior using mainstream languages.
type StructField struct {
Name string
Type Type
ByteOffset int64
ByteSize int64 // usually zero; use Type.Size() for normal fields
BitOffset int64
DataBitOffset int64
BitSize int64 // zero if not a bit field
}
func (t *StructType) String() string {
if t.StructName != "" {
return t.Kind + " " + t.StructName
}
return t.Defn()
}
func (f *StructField) bitOffset() int64 {
if f.BitOffset != 0 {
return f.BitOffset
}
return f.DataBitOffset
}
func (t *StructType) Defn() string {
s := t.Kind
if t.StructName != "" {
s += " " + t.StructName
}
if t.Incomplete {
s += " /*incomplete*/"
return s
}
s += " {"
for i, f := range t.Field {
if i > 0 {
s += "; "
}
s += f.Name + " " + f.Type.String()
s += "@" + strconv.FormatInt(f.ByteOffset, 10)
if f.BitSize > 0 {
s += " : " + strconv.FormatInt(f.BitSize, 10)
s += "@" + strconv.FormatInt(f.bitOffset(), 10)
}
}
s += "}"
return s
}
// An EnumType represents an enumerated type.
// The only indication of its native integer type is its ByteSize
// (inside [CommonType]).
type EnumType struct {
CommonType
EnumName string
Val []*EnumValue
}
// An EnumValue represents a single enumeration value.
type EnumValue struct {
Name string
Val int64
}
func (t *EnumType) String() string {
s := "enum"
if t.EnumName != "" {
s += " " + t.EnumName
}
s += " {"
for i, v := range t.Val {
if i > 0 {
s += "; "
}
s += v.Name + "=" + strconv.FormatInt(v.Val, 10)
}
s += "}"
return s
}
// A FuncType represents a function type.
type FuncType struct {
CommonType
ReturnType Type
ParamType []Type
}
func (t *FuncType) String() string {
s := "func("
for i, t := range t.ParamType {
if i > 0 {
s += ", "
}
s += t.String()
}
s += ")"
if t.ReturnType != nil {
s += " " + t.ReturnType.String()
}
return s
}
// A DotDotDotType represents the variadic ... function parameter.
type DotDotDotType struct {
CommonType
}
func (t *DotDotDotType) String() string { return "..." }
// A TypedefType represents a named type.
type TypedefType struct {
CommonType
Type Type
}
func (t *TypedefType) String() string { return t.Name }
func (t *TypedefType) Size() int64 { return t.Type.Size() }
// An UnsupportedType is a placeholder returned in situations where we
// encounter a type that isn't supported.
type UnsupportedType struct {
CommonType
Tag Tag
}
func (t *UnsupportedType) String() string {
if t.Name != "" {
return t.Name
}
return t.Name + "(unsupported type " + t.Tag.String() + ")"
}
// typeReader is used to read from either the info section or the
// types section.
type typeReader interface {
Seek(Offset)
Next() (*Entry, error)
clone() typeReader
offset() Offset
// AddressSize returns the size in bytes of addresses in the current
// compilation unit.
AddressSize() int
}
// Type reads the type at off in the DWARF “info” section.
func (d *Data) Type(off Offset) (Type, error) {
return d.readType("info", d.Reader(), off, d.typeCache, nil)
}
type typeFixer struct {
typedefs []*TypedefType
arraytypes []*Type
}
func (tf *typeFixer) recordArrayType(t *Type) {
if t == nil {
return
}
_, ok := (*t).(*ArrayType)
if ok {
tf.arraytypes = append(tf.arraytypes, t)
}
}
func (tf *typeFixer) apply() {
for _, t := range tf.typedefs {
t.Common().ByteSize = t.Type.Size()
}
for _, t := range tf.arraytypes {
zeroArray(t)
}
}
// readType reads a type from r at off of name. It adds types to the
// type cache, appends new typedef types to typedefs, and computes the
// sizes of types. Callers should pass nil for typedefs; this is used
// for internal recursion.
func (d *Data) readType(name string, r typeReader, off Offset, typeCache map[Offset]Type, fixups *typeFixer) (Type, error) {
if t, ok := typeCache[off]; ok {
return t, nil
}
r.Seek(off)
e, err := r.Next()
if err != nil {
return nil, err
}
addressSize := r.AddressSize()
if e == nil || e.Offset != off {
return nil, DecodeError{name, off, "no type at offset"}
}
// If this is the root of the recursion, prepare to resolve
// typedef sizes and perform other fixups once the recursion is
// done. This must be done after the type graph is constructed
// because it may need to resolve cycles in a different order than
// readType encounters them.
if fixups == nil {
var fixer typeFixer
defer func() {
fixer.apply()
}()
fixups = &fixer
}
// Parse type from Entry.
// Must always set typeCache[off] before calling
// d.readType recursively, to handle circular types correctly.
var typ Type
nextDepth := 0
// Get next child; set err if error happens.
next := func() *Entry {
if !e.Children {
return nil
}
// Only return direct children.
// Skip over composite entries that happen to be nested
// inside this one. Most DWARF generators wouldn't generate
// such a thing, but clang does.
// See golang.org/issue/6472.
for {
kid, err1 := r.Next()
if err1 != nil {
err = err1
return nil
}
if kid == nil {
err = DecodeError{name, r.offset(), "unexpected end of DWARF entries"}
return nil
}
if kid.Tag == 0 {
if nextDepth > 0 {
nextDepth--
continue
}
return nil
}
if kid.Children {
nextDepth++
}
if nextDepth > 0 {
continue
}
return kid
}
}
// Get Type referred to by Entry's AttrType field.
// Set err if error happens. Not having a type is an error.
typeOf := func(e *Entry) Type {
tval := e.Val(AttrType)
var t Type
switch toff := tval.(type) {
case Offset:
if t, err = d.readType(name, r.clone(), toff, typeCache, fixups); err != nil {
return nil
}
case uint64:
if t, err = d.sigToType(toff); err != nil {
return nil
}
default:
// It appears that no Type means "void".
return new(VoidType)
}
return t
}
switch e.Tag {
case TagArrayType:
// Multi-dimensional array. (DWARF v2 §5.4)
// Attributes:
// AttrType:subtype [required]
// AttrStrideSize: size in bits of each element of the array
// AttrByteSize: size of entire array
// Children:
// TagSubrangeType or TagEnumerationType giving one dimension.
// dimensions are in left to right order.
t := new(ArrayType)
typ = t
typeCache[off] = t
if t.Type = typeOf(e); err != nil {
goto Error
}
t.StrideBitSize, _ = e.Val(AttrStrideSize).(int64)
// Accumulate dimensions,
var dims []int64
for kid := next(); kid != nil; kid = next() {
// TODO(rsc): Can also be TagEnumerationType
// but haven't seen that in the wild yet.
switch kid.Tag {
case TagSubrangeType:
count, ok := kid.Val(AttrCount).(int64)
if !ok {
// Old binaries may have an upper bound instead.
count, ok = kid.Val(AttrUpperBound).(int64)
if ok {
count++ // Length is one more than upper bound.
} else if len(dims) == 0 {
count = -1 // As in x[].
}
}
dims = append(dims, count)
case TagEnumerationType:
err = DecodeError{name, kid.Offset, "cannot handle enumeration type as array bound"}
goto Error
}
}
if len(dims) == 0 {
// LLVM generates this for x[].
dims = []int64{-1}
}
t.Count = dims[0]
for i := len(dims) - 1; i >= 1; i-- {
t.Type = &ArrayType{Type: t.Type, Count: dims[i]}
}
case TagBaseType:
// Basic type. (DWARF v2 §5.1)
// Attributes:
// AttrName: name of base type in programming language of the compilation unit [required]
// AttrEncoding: encoding value for type (encFloat etc) [required]
// AttrByteSize: size of type in bytes [required]
// AttrBitOffset: bit offset of value within containing storage unit
// AttrDataBitOffset: bit offset of value within containing storage unit
// AttrBitSize: size in bits
//
// For most languages BitOffset/DataBitOffset/BitSize will not be present
// for base types.
name, _ := e.Val(AttrName).(string)
enc, ok := e.Val(AttrEncoding).(int64)
if !ok {
err = DecodeError{name, e.Offset, "missing encoding attribute for " + name}
goto Error
}
switch enc {
default:
err = DecodeError{name, e.Offset, "unrecognized encoding attribute value"}
goto Error
case encAddress:
typ = new(AddrType)
case encBoolean:
typ = new(BoolType)
case encComplexFloat:
typ = new(ComplexType)
if name == "complex" {
// clang writes out 'complex' instead of 'complex float' or 'complex double'.
// clang also writes out a byte size that we can use to distinguish.
// See issue 8694.
switch byteSize, _ := e.Val(AttrByteSize).(int64); byteSize {
case 8:
name = "complex float"
case 16:
name = "complex double"
}
}
case encFloat:
typ = new(FloatType)
case encSigned:
typ = new(IntType)
case encUnsigned:
typ = new(UintType)
case encSignedChar:
typ = new(CharType)
case encUnsignedChar:
typ = new(UcharType)
}
typeCache[off] = typ
t := typ.(interface {
Basic() *BasicType
}).Basic()
t.Name = name
t.BitSize, _ = e.Val(AttrBitSize).(int64)
haveBitOffset := false
haveDataBitOffset := false
t.BitOffset, haveBitOffset = e.Val(AttrBitOffset).(int64)
t.DataBitOffset, haveDataBitOffset = e.Val(AttrDataBitOffset).(int64)
if haveBitOffset && haveDataBitOffset {
err = DecodeError{name, e.Offset, "duplicate bit offset attributes"}
goto Error
}
case TagClassType, TagStructType, TagUnionType:
// Structure, union, or class type. (DWARF v2 §5.5)
// Attributes:
// AttrName: name of struct, union, or class
// AttrByteSize: byte size [required]
// AttrDeclaration: if true, struct/union/class is incomplete
// Children:
// TagMember to describe one member.
// AttrName: name of member [required]
// AttrType: type of member [required]
// AttrByteSize: size in bytes
// AttrBitOffset: bit offset within bytes for bit fields
// AttrDataBitOffset: field bit offset relative to struct start
// AttrBitSize: bit size for bit fields
// AttrDataMemberLoc: location within struct [required for struct, class]
// There is much more to handle C++, all ignored for now.
t := new(StructType)
typ = t
typeCache[off] = t
switch e.Tag {
case TagClassType:
t.Kind = "class"
case TagStructType:
t.Kind = "struct"
case TagUnionType:
t.Kind = "union"
}
t.StructName, _ = e.Val(AttrName).(string)
t.Incomplete = e.Val(AttrDeclaration) != nil
t.Field = make([]*StructField, 0, 8)
var lastFieldType *Type
var lastFieldBitSize int64
var lastFieldByteOffset int64
for kid := next(); kid != nil; kid = next() {
if kid.Tag != TagMember {
continue
}
f := new(StructField)
if f.Type = typeOf(kid); err != nil {
goto Error
}
switch loc := kid.Val(AttrDataMemberLoc).(type) {
case []byte:
// TODO: Should have original compilation
// unit here, not unknownFormat.
b := makeBuf(d, unknownFormat{}, "location", 0, loc)
if b.uint8() != opPlusUconst {
err = DecodeError{name, kid.Offset, "unexpected opcode"}
goto Error
}
f.ByteOffset = int64(b.uint())
if b.err != nil {
err = b.err
goto Error
}
case int64:
f.ByteOffset = loc
}
f.Name, _ = kid.Val(AttrName).(string)
f.ByteSize, _ = kid.Val(AttrByteSize).(int64)
haveBitOffset := false
haveDataBitOffset := false
f.BitOffset, haveBitOffset = kid.Val(AttrBitOffset).(int64)
f.DataBitOffset, haveDataBitOffset = kid.Val(AttrDataBitOffset).(int64)
if haveBitOffset && haveDataBitOffset {
err = DecodeError{name, e.Offset, "duplicate bit offset attributes"}
goto Error
}
f.BitSize, _ = kid.Val(AttrBitSize).(int64)
t.Field = append(t.Field, f)
if lastFieldBitSize == 0 && lastFieldByteOffset == f.ByteOffset && t.Kind != "union" {
// Last field was zero width. Fix array length.
// (DWARF writes out 0-length arrays as if they were 1-length arrays.)
fixups.recordArrayType(lastFieldType)
}
lastFieldType = &f.Type
lastFieldByteOffset = f.ByteOffset
lastFieldBitSize = f.BitSize
}
if t.Kind != "union" {
b, ok := e.Val(AttrByteSize).(int64)
if ok && b == lastFieldByteOffset {
// Final field must be zero width. Fix array length.
fixups.recordArrayType(lastFieldType)
}
}
case TagConstType, TagVolatileType, TagRestrictType:
// Type modifier (DWARF v2 §5.2)
// Attributes:
// AttrType: subtype
t := new(QualType)
typ = t
typeCache[off] = t
if t.Type = typeOf(e); err != nil {
goto Error
}
switch e.Tag {
case TagConstType:
t.Qual = "const"
case TagRestrictType:
t.Qual = "restrict"
case TagVolatileType:
t.Qual = "volatile"
}
case TagEnumerationType:
// Enumeration type (DWARF v2 §5.6)
// Attributes:
// AttrName: enum name if any
// AttrByteSize: bytes required to represent largest value
// Children:
// TagEnumerator:
// AttrName: name of constant
// AttrConstValue: value of constant
t := new(EnumType)
typ = t
typeCache[off] = t
t.EnumName, _ = e.Val(AttrName).(string)
t.Val = make([]*EnumValue, 0, 8)
for kid := next(); kid != nil; kid = next() {
if kid.Tag == TagEnumerator {
f := new(EnumValue)
f.Name, _ = kid.Val(AttrName).(string)
f.Val, _ = kid.Val(AttrConstValue).(int64)
n := len(t.Val)
if n >= cap(t.Val) {
val := make([]*EnumValue, n, n*2)
copy(val, t.Val)
t.Val = val
}
t.Val = t.Val[0 : n+1]
t.Val[n] = f
}
}
case TagPointerType:
// Type modifier (DWARF v2 §5.2)
// Attributes:
// AttrType: subtype [not required! void* has no AttrType]
// AttrAddrClass: address class [ignored]
t := new(PtrType)
typ = t
typeCache[off] = t
if e.Val(AttrType) == nil {
t.Type = &VoidType{}
break
}
t.Type = typeOf(e)
case TagSubroutineType:
// Subroutine type. (DWARF v2 §5.7)
// Attributes:
// AttrType: type of return value if any
// AttrName: possible name of type [ignored]
// AttrPrototyped: whether used ANSI C prototype [ignored]
// Children:
// TagFormalParameter: typed parameter
// AttrType: type of parameter
// TagUnspecifiedParameter: final ...
t := new(FuncType)
typ = t
typeCache[off] = t
if t.ReturnType = typeOf(e); err != nil {
goto Error
}
t.ParamType = make([]Type, 0, 8)
for kid := next(); kid != nil; kid = next() {
var tkid Type
switch kid.Tag {
default:
continue
case TagFormalParameter:
if tkid = typeOf(kid); err != nil {
goto Error
}
case TagUnspecifiedParameters:
tkid = &DotDotDotType{}
}
t.ParamType = append(t.ParamType, tkid)
}
case TagTypedef:
// Typedef (DWARF v2 §5.3)
// Attributes:
// AttrName: name [required]
// AttrType: type definition [required]
t := new(TypedefType)
typ = t
typeCache[off] = t
t.Name, _ = e.Val(AttrName).(string)
t.Type = typeOf(e)
case TagUnspecifiedType:
// Unspecified type (DWARF v3 §5.2)
// Attributes:
// AttrName: name
t := new(UnspecifiedType)
typ = t
typeCache[off] = t
t.Name, _ = e.Val(AttrName).(string)
default:
// This is some other type DIE that we're currently not
// equipped to handle. Return an abstract "unsupported type"
// object in such cases.
t := new(UnsupportedType)
typ = t
typeCache[off] = t
t.Tag = e.Tag
t.Name, _ = e.Val(AttrName).(string)
}
if err != nil {
goto Error
}
{
b, ok := e.Val(AttrByteSize).(int64)
if !ok {
b = -1
switch t := typ.(type) {
case *TypedefType:
// Record that we need to resolve this
// type's size once the type graph is
// constructed.
fixups.typedefs = append(fixups.typedefs, t)
case *PtrType:
b = int64(addressSize)
}
}
typ.Common().ByteSize = b
}
return typ, nil
Error:
// If the parse fails, take the type out of the cache
// so that the next call with this offset doesn't hit
// the cache and return success.
delete(typeCache, off)
return nil, err
}
func zeroArray(t *Type) {
at := (*t).(*ArrayType)
if at.Type.Size() == 0 {
return
}
// Make a copy to avoid invalidating typeCache.
tt := *at
tt.Count = 0
*t = &tt
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dwarf
import (
"fmt"
"strconv"
)
// Parse the type units stored in a DWARF4 .debug_types section. Each
// type unit defines a single primary type and an 8-byte signature.
// Other sections may then use formRefSig8 to refer to the type.
// The typeUnit format is a single type with a signature. It holds
// the same data as a compilation unit.
type typeUnit struct {
unit
toff Offset // Offset to signature type within data.
name string // Name of .debug_type section.
cache Type // Cache the type, nil to start.
}
// Parse a .debug_types section.
func (d *Data) parseTypes(name string, types []byte) error {
b := makeBuf(d, unknownFormat{}, name, 0, types)
for len(b.data) > 0 {
base := b.off
n, dwarf64 := b.unitLength()
if n != Offset(uint32(n)) {
b.error("type unit length overflow")
return b.err
}
hdroff := b.off
vers := int(b.uint16())
if vers != 4 {
b.error("unsupported DWARF version " + strconv.Itoa(vers))
return b.err
}
var ao uint64
if !dwarf64 {
ao = uint64(b.uint32())
} else {
ao = b.uint64()
}
atable, err := d.parseAbbrev(ao, vers)
if err != nil {
return err
}
asize := b.uint8()
sig := b.uint64()
var toff uint32
if !dwarf64 {
toff = b.uint32()
} else {
to64 := b.uint64()
if to64 != uint64(uint32(to64)) {
b.error("type unit type offset overflow")
return b.err
}
toff = uint32(to64)
}
boff := b.off
d.typeSigs[sig] = &typeUnit{
unit: unit{
base: base,
off: boff,
data: b.bytes(int(n - (b.off - hdroff))),
atable: atable,
asize: int(asize),
vers: vers,
is64: dwarf64,
},
toff: Offset(toff),
name: name,
}
if b.err != nil {
return b.err
}
}
return nil
}
// Return the type for a type signature.
func (d *Data) sigToType(sig uint64) (Type, error) {
tu := d.typeSigs[sig]
if tu == nil {
return nil, fmt.Errorf("no type unit with signature %v", sig)
}
if tu.cache != nil {
return tu.cache, nil
}
b := makeBuf(d, tu, tu.name, tu.off, tu.data)
r := &typeUnitReader{d: d, tu: tu, b: b}
t, err := d.readType(tu.name, r, tu.toff, make(map[Offset]Type), nil)
if err != nil {
return nil, err
}
tu.cache = t
return t, nil
}
// typeUnitReader is a typeReader for a tagTypeUnit.
type typeUnitReader struct {
d *Data
tu *typeUnit
b buf
err error
}
// Seek to a new position in the type unit.
func (tur *typeUnitReader) Seek(off Offset) {
tur.err = nil
doff := off - tur.tu.off
if doff < 0 || doff >= Offset(len(tur.tu.data)) {
tur.err = fmt.Errorf("%s: offset %d out of range; max %d", tur.tu.name, doff, len(tur.tu.data))
return
}
tur.b = makeBuf(tur.d, tur.tu, tur.tu.name, off, tur.tu.data[doff:])
}
// AddressSize returns the size in bytes of addresses in the current type unit.
func (tur *typeUnitReader) AddressSize() int {
return tur.tu.unit.asize
}
// Next reads the next [Entry] from the type unit.
func (tur *typeUnitReader) Next() (*Entry, error) {
if tur.err != nil {
return nil, tur.err
}
if len(tur.tu.data) == 0 {
return nil, nil
}
e := tur.b.entry(nil, &tur.tu.unit)
if tur.b.err != nil {
tur.err = tur.b.err
return nil, tur.err
}
return e, nil
}
// clone returns a new reader for the type unit.
func (tur *typeUnitReader) clone() typeReader {
return &typeUnitReader{
d: tur.d,
tu: tur.tu,
b: makeBuf(tur.d, tur.tu, tur.tu.name, tur.tu.off, tur.tu.data),
}
}
// offset returns the current offset.
func (tur *typeUnitReader) offset() Offset {
return tur.b.off
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dwarf
import (
"sort"
"strconv"
)
// DWARF debug info is split into a sequence of compilation units.
// Each unit has its own abbreviation table and address size.
type unit struct {
base Offset // byte offset of header within the aggregate info
off Offset // byte offset of data within the aggregate info
data []byte
atable abbrevTable
*unit5 // info specific to DWARF 5 units
asize int
vers int
is64 bool // True for 64-bit DWARF format
utype uint8 // DWARF 5 unit type
}
type unit5 struct {
addrBase uint64
strOffsetsBase uint64
rngListsBase uint64
locListsBase uint64
}
// Implement the dataFormat interface.
func (u *unit) version() int {
return u.vers
}
func (u *unit) dwarf64() (bool, bool) {
return u.is64, true
}
func (u *unit) addrsize() int {
return u.asize
}
func (u *unit) addrBase() uint64 {
if u.unit5 != nil {
return u.unit5.addrBase
}
return 0
}
func (u *unit) strOffsetsBase() uint64 {
if u.unit5 != nil {
return u.unit5.strOffsetsBase
}
return 0
}
func (u *unit) rngListsBase() uint64 {
if u.unit5 != nil {
return u.unit5.rngListsBase
}
return 0
}
func (u *unit) locListsBase() uint64 {
if u.unit5 != nil {
return u.unit5.locListsBase
}
return 0
}
func (d *Data) parseUnits() ([]unit, error) {
// Count units.
nunit := 0
b := makeBuf(d, unknownFormat{}, "info", 0, d.info)
for len(b.data) > 0 {
len, _ := b.unitLength()
if len != Offset(uint32(len)) {
b.error("unit length overflow")
break
}
b.skip(int(len))
if len > 0 {
nunit++
}
}
if b.err != nil {
return nil, b.err
}
// Again, this time writing them down.
b = makeBuf(d, unknownFormat{}, "info", 0, d.info)
units := make([]unit, nunit)
for i := range units {
u := &units[i]
u.base = b.off
var n Offset
if b.err != nil {
return nil, b.err
}
for n == 0 {
n, u.is64 = b.unitLength()
}
dataOff := b.off
vers := b.uint16()
if vers < 2 || vers > 5 {
b.error("unsupported DWARF version " + strconv.Itoa(int(vers)))
break
}
u.vers = int(vers)
if vers >= 5 {
u.utype = b.uint8()
u.asize = int(b.uint8())
}
var abbrevOff uint64
if u.is64 {
abbrevOff = b.uint64()
} else {
abbrevOff = uint64(b.uint32())
}
atable, err := d.parseAbbrev(abbrevOff, u.vers)
if err != nil {
if b.err == nil {
b.err = err
}
break
}
u.atable = atable
if vers < 5 {
u.asize = int(b.uint8())
}
switch u.utype {
case utSkeleton, utSplitCompile:
b.uint64() // unit ID
case utType, utSplitType:
b.uint64() // type signature
if u.is64 { // type offset
b.uint64()
} else {
b.uint32()
}
}
u.off = b.off
u.data = b.bytes(int(n - (b.off - dataOff)))
}
if b.err != nil {
return nil, b.err
}
return units, nil
}
// offsetToUnit returns the index of the unit containing offset off.
// It returns -1 if no unit contains this offset.
func (d *Data) offsetToUnit(off Offset) int {
// Find the unit after off
next := sort.Search(len(d.unit), func(i int) bool {
return d.unit[i].off > off
})
if next == 0 {
return -1
}
u := &d.unit[next-1]
if u.off <= off && off < u.off+Offset(len(u.data)) {
return next - 1
}
return -1
}
func (d *Data) collectDwarf5BaseOffsets(u *unit) error {
if u.unit5 == nil {
panic("expected unit5 to be set up already")
}
b := makeBuf(d, u, "info", u.off, u.data)
cu := b.entry(nil, u)
if cu == nil {
// Unknown abbreviation table entry or some other fatal
// problem; bail early on the assumption that this will be
// detected at some later point.
return b.err
}
if iAddrBase, ok := cu.Val(AttrAddrBase).(int64); ok {
u.unit5.addrBase = uint64(iAddrBase)
}
if iStrOffsetsBase, ok := cu.Val(AttrStrOffsetsBase).(int64); ok {
u.unit5.strOffsetsBase = uint64(iStrOffsetsBase)
}
if iRngListsBase, ok := cu.Val(AttrRnglistsBase).(int64); ok {
u.unit5.rngListsBase = uint64(iRngListsBase)
}
if iLocListsBase, ok := cu.Val(AttrLoclistsBase).(int64); ok {
u.unit5.locListsBase = uint64(iLocListsBase)
}
return nil
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
* Line tables
*/
package gosym
import (
"bytes"
"encoding/binary"
"internal/abi"
"sort"
"sync"
)
// version of the pclntab
type version int
const (
verUnknown version = iota
ver11
ver12
ver116
ver118
ver120
)
// A LineTable is a data structure mapping program counters to line numbers.
//
// In Go 1.1 and earlier, each function (represented by a [Func]) had its own LineTable,
// and the line number corresponded to a numbering of all source lines in the
// program, across all files. That absolute line number would then have to be
// converted separately to a file name and line number within the file.
//
// In Go 1.2, the format of the data changed so that there is a single LineTable
// for the entire program, shared by all Funcs, and there are no absolute line
// numbers, just line numbers within specific files.
//
// For the most part, LineTable's methods should be treated as an internal
// detail of the package; callers should use the methods on [Table] instead.
type LineTable struct {
Data []byte
PC uint64
Line int
// This mutex is used to keep parsing of pclntab synchronous.
mu sync.Mutex
// Contains the version of the pclntab section.
version version
// Go 1.2/1.16/1.18 state
binary binary.ByteOrder
quantum uint32
ptrsize uint32
textStart uint64 // address of runtime.text symbol (1.18+)
funcnametab []byte
cutab []byte
funcdata []byte
functab []byte
nfunctab uint32
filetab []byte
pctab []byte // points to the pctables.
nfiletab uint32
funcNames map[uint32]string // cache the function names
strings map[uint32]string // interned substrings of Data, keyed by offset
// fileMap varies depending on the version of the object file.
// For ver12, it maps the name to the index in the file table.
// For ver116, it maps the name to the offset in filetab.
fileMap map[string]uint32
}
// NOTE(rsc): This is wrong for GOARCH=arm, which uses a quantum of 4,
// but we have no idea whether we're using arm or not. This only
// matters in the old (pre-Go 1.2) symbol table format, so it's not worth
// fixing.
const oldQuantum = 1
func (t *LineTable) parse(targetPC uint64, targetLine int) (b []byte, pc uint64, line int) {
// The PC/line table can be thought of as a sequence of
// <pc update>* <line update>
// batches. Each update batch results in a (pc, line) pair,
// where line applies to every PC from pc up to but not
// including the pc of the next pair.
//
// Here we process each update individually, which simplifies
// the code, but makes the corner cases more confusing.
b, pc, line = t.Data, t.PC, t.Line
for pc <= targetPC && line != targetLine && len(b) > 0 {
code := b[0]
b = b[1:]
switch {
case code == 0:
if len(b) < 4 {
b = b[0:0]
break
}
val := binary.BigEndian.Uint32(b)
b = b[4:]
line += int(val)
case code <= 64:
line += int(code)
case code <= 128:
line -= int(code - 64)
default:
pc += oldQuantum * uint64(code-128)
continue
}
pc += oldQuantum
}
return b, pc, line
}
func (t *LineTable) slice(pc uint64) *LineTable {
data, pc, line := t.parse(pc, -1)
return &LineTable{Data: data, PC: pc, Line: line}
}
// PCToLine returns the line number for the given program counter.
//
// Deprecated: Use Table's PCToLine method instead.
func (t *LineTable) PCToLine(pc uint64) int {
if t.isGo12() {
return t.go12PCToLine(pc)
}
_, _, line := t.parse(pc, -1)
return line
}
// LineToPC returns the program counter for the given line number,
// considering only program counters before maxpc.
//
// Deprecated: Use Table's LineToPC method instead.
func (t *LineTable) LineToPC(line int, maxpc uint64) uint64 {
if t.isGo12() {
return 0
}
_, pc, line1 := t.parse(maxpc, line)
if line1 != line {
return 0
}
// Subtract quantum from PC to account for post-line increment
return pc - oldQuantum
}
// NewLineTable returns a new PC/line table
// corresponding to the encoded data.
// Text must be the start address of the
// corresponding text segment, with the exact
// value stored in the 'runtime.text' symbol.
// This value may differ from the start
// address of the text segment if
// binary was built with cgo enabled.
func NewLineTable(data []byte, text uint64) *LineTable {
return &LineTable{Data: data, PC: text, Line: 0, funcNames: make(map[uint32]string), strings: make(map[uint32]string)}
}
// Go 1.2 symbol table format.
// See golang.org/s/go12symtab.
//
// A general note about the methods here: rather than try to avoid
// index out of bounds errors, we trust Go to detect them, and then
// we recover from the panics and treat them as indicative of a malformed
// or incomplete table.
//
// The methods called by symtab.go, which begin with "go12" prefixes,
// are expected to have that recovery logic.
// isGo12 reports whether this is a Go 1.2 (or later) symbol table.
func (t *LineTable) isGo12() bool {
t.parsePclnTab()
return t.version >= ver12
}
// uintptr returns the pointer-sized value encoded at b.
// The pointer size is dictated by the table being read.
func (t *LineTable) uintptr(b []byte) uint64 {
if t.ptrsize == 4 {
return uint64(t.binary.Uint32(b))
}
return t.binary.Uint64(b)
}
// parsePclnTab parses the pclntab, setting the version.
func (t *LineTable) parsePclnTab() {
t.mu.Lock()
defer t.mu.Unlock()
if t.version != verUnknown {
return
}
// Note that during this function, setting the version is the last thing we do.
// If we set the version too early, and parsing failed (likely as a panic on
// slice lookups), we'd have a mistaken version.
//
// Error paths through this code will default the version to 1.1.
t.version = ver11
if !disableRecover {
defer func() {
// If we panic parsing, assume it's a Go 1.1 pclntab.
recover()
}()
}
// Check header: 4-byte magic, two zeros, pc quantum, pointer size.
if len(t.Data) < 16 || t.Data[4] != 0 || t.Data[5] != 0 ||
(t.Data[6] != 1 && t.Data[6] != 2 && t.Data[6] != 4) || // pc quantum
(t.Data[7] != 4 && t.Data[7] != 8) { // pointer size
return
}
var possibleVersion version
// The magic numbers are chosen such that reading the value with
// a different endianness does not result in the same value.
// That lets us the magic number to determine the endianness.
leMagic := abi.PCLnTabMagic(binary.LittleEndian.Uint32(t.Data))
beMagic := abi.PCLnTabMagic(binary.BigEndian.Uint32(t.Data))
switch {
case leMagic == abi.Go12PCLnTabMagic:
t.binary, possibleVersion = binary.LittleEndian, ver12
case beMagic == abi.Go12PCLnTabMagic:
t.binary, possibleVersion = binary.BigEndian, ver12
case leMagic == abi.Go116PCLnTabMagic:
t.binary, possibleVersion = binary.LittleEndian, ver116
case beMagic == abi.Go116PCLnTabMagic:
t.binary, possibleVersion = binary.BigEndian, ver116
case leMagic == abi.Go118PCLnTabMagic:
t.binary, possibleVersion = binary.LittleEndian, ver118
case beMagic == abi.Go118PCLnTabMagic:
t.binary, possibleVersion = binary.BigEndian, ver118
case leMagic == abi.Go120PCLnTabMagic:
t.binary, possibleVersion = binary.LittleEndian, ver120
case beMagic == abi.Go120PCLnTabMagic:
t.binary, possibleVersion = binary.BigEndian, ver120
default:
return
}
t.version = possibleVersion
// quantum and ptrSize are the same between 1.2, 1.16, and 1.18
t.quantum = uint32(t.Data[6])
t.ptrsize = uint32(t.Data[7])
offset := func(word uint32) uint64 {
return t.uintptr(t.Data[8+word*t.ptrsize:])
}
data := func(word uint32) []byte {
return t.Data[offset(word):]
}
switch possibleVersion {
case ver118, ver120:
t.nfunctab = uint32(offset(0))
t.nfiletab = uint32(offset(1))
t.textStart = t.PC // use the start PC instead of reading from the table, which may be unrelocated
t.funcnametab = data(3)
t.cutab = data(4)
t.filetab = data(5)
t.pctab = data(6)
t.funcdata = data(7)
t.functab = data(7)
functabsize := (int(t.nfunctab)*2 + 1) * t.functabFieldSize()
t.functab = t.functab[:functabsize]
case ver116:
t.nfunctab = uint32(offset(0))
t.nfiletab = uint32(offset(1))
t.funcnametab = data(2)
t.cutab = data(3)
t.filetab = data(4)
t.pctab = data(5)
t.funcdata = data(6)
t.functab = data(6)
functabsize := (int(t.nfunctab)*2 + 1) * t.functabFieldSize()
t.functab = t.functab[:functabsize]
case ver12:
t.nfunctab = uint32(t.uintptr(t.Data[8:]))
t.funcdata = t.Data
t.funcnametab = t.Data
t.functab = t.Data[8+t.ptrsize:]
t.pctab = t.Data
functabsize := (int(t.nfunctab)*2 + 1) * t.functabFieldSize()
fileoff := t.binary.Uint32(t.functab[functabsize:])
t.functab = t.functab[:functabsize]
t.filetab = t.Data[fileoff:]
t.nfiletab = t.binary.Uint32(t.filetab)
t.filetab = t.filetab[:t.nfiletab*4]
default:
panic("unreachable")
}
}
// go12Funcs returns a slice of Funcs derived from the Go 1.2+ pcln table.
func (t *LineTable) go12Funcs() []Func {
// Assume it is malformed and return nil on error.
if !disableRecover {
defer func() {
recover()
}()
}
ft := t.funcTab()
funcs := make([]Func, ft.Count())
syms := make([]Sym, len(funcs))
for i := range funcs {
f := &funcs[i]
f.Entry = ft.pc(i)
f.End = ft.pc(i + 1)
info := t.funcData(uint32(i))
f.LineTable = t
f.FrameSize = int(info.deferreturn())
syms[i] = Sym{
Value: f.Entry,
Type: 'T',
Name: t.funcName(info.nameOff()),
GoType: 0,
Func: f,
goVersion: t.version,
}
f.Sym = &syms[i]
}
return funcs
}
// findFunc returns the funcData corresponding to the given program counter.
func (t *LineTable) findFunc(pc uint64) funcData {
ft := t.funcTab()
if pc < ft.pc(0) || pc >= ft.pc(ft.Count()) {
return funcData{}
}
idx := sort.Search(int(t.nfunctab), func(i int) bool {
return ft.pc(i) > pc
})
idx--
return t.funcData(uint32(idx))
}
// readvarint reads, removes, and returns a varint from *pp.
func (t *LineTable) readvarint(pp *[]byte) uint32 {
var v, shift uint32
p := *pp
for shift = 0; ; shift += 7 {
b := p[0]
p = p[1:]
v |= (uint32(b) & 0x7F) << shift
if b&0x80 == 0 {
break
}
}
*pp = p
return v
}
// funcName returns the name of the function found at off.
func (t *LineTable) funcName(off uint32) string {
if s, ok := t.funcNames[off]; ok {
return s
}
i := bytes.IndexByte(t.funcnametab[off:], 0)
s := string(t.funcnametab[off : off+uint32(i)])
t.funcNames[off] = s
return s
}
// stringFrom returns a Go string found at off from a position.
func (t *LineTable) stringFrom(arr []byte, off uint32) string {
if s, ok := t.strings[off]; ok {
return s
}
i := bytes.IndexByte(arr[off:], 0)
s := string(arr[off : off+uint32(i)])
t.strings[off] = s
return s
}
// string returns a Go string found at off.
func (t *LineTable) string(off uint32) string {
return t.stringFrom(t.funcdata, off)
}
// functabFieldSize returns the size in bytes of a single functab field.
func (t *LineTable) functabFieldSize() int {
if t.version >= ver118 {
return 4
}
return int(t.ptrsize)
}
// funcTab returns t's funcTab.
func (t *LineTable) funcTab() funcTab {
return funcTab{LineTable: t, sz: t.functabFieldSize()}
}
// funcTab is memory corresponding to a slice of functab structs, followed by an invalid PC.
// A functab struct is a PC and a func offset.
type funcTab struct {
*LineTable
sz int // cached result of t.functabFieldSize
}
// Count returns the number of func entries in f.
func (f funcTab) Count() int {
return int(f.nfunctab)
}
// pc returns the PC of the i'th func in f.
func (f funcTab) pc(i int) uint64 {
u := f.uint(f.functab[2*i*f.sz:])
if f.version >= ver118 {
u += f.textStart
}
return u
}
// funcOff returns the funcdata offset of the i'th func in f.
func (f funcTab) funcOff(i int) uint64 {
return f.uint(f.functab[(2*i+1)*f.sz:])
}
// uint returns the uint stored at b.
func (f funcTab) uint(b []byte) uint64 {
if f.sz == 4 {
return uint64(f.binary.Uint32(b))
}
return f.binary.Uint64(b)
}
// funcData is memory corresponding to an _func struct.
type funcData struct {
t *LineTable // LineTable this data is a part of
data []byte // raw memory for the function
}
// funcData returns the ith funcData in t.functab.
func (t *LineTable) funcData(i uint32) funcData {
data := t.funcdata[t.funcTab().funcOff(int(i)):]
return funcData{t: t, data: data}
}
// IsZero reports whether f is the zero value.
func (f funcData) IsZero() bool {
return f.t == nil && f.data == nil
}
// entryPC returns the func's entry PC.
func (f *funcData) entryPC() uint64 {
// In Go 1.18, the first field of _func changed
// from a uintptr entry PC to a uint32 entry offset.
if f.t.version >= ver118 {
// TODO: support multiple text sections.
// See runtime/symtab.go:(*moduledata).textAddr.
return uint64(f.t.binary.Uint32(f.data)) + f.t.textStart
}
return f.t.uintptr(f.data)
}
func (f funcData) nameOff() uint32 { return f.field(1) }
func (f funcData) deferreturn() uint32 { return f.field(3) }
func (f funcData) pcfile() uint32 { return f.field(5) }
func (f funcData) pcln() uint32 { return f.field(6) }
func (f funcData) cuOffset() uint32 { return f.field(8) }
// field returns the nth field of the _func struct.
// It panics if n == 0 or n > 9; for n == 0, call f.entryPC.
// Most callers should use a named field accessor (just above).
func (f funcData) field(n uint32) uint32 {
if n == 0 || n > 9 {
panic("bad funcdata field")
}
// In Go 1.18, the first field of _func changed
// from a uintptr entry PC to a uint32 entry offset.
sz0 := f.t.ptrsize
if f.t.version >= ver118 {
sz0 = 4
}
off := sz0 + (n-1)*4 // subsequent fields are 4 bytes each
data := f.data[off:]
return f.t.binary.Uint32(data)
}
// step advances to the next pc, value pair in the encoded table.
func (t *LineTable) step(p *[]byte, pc *uint64, val *int32, first bool) bool {
uvdelta := t.readvarint(p)
if uvdelta == 0 && !first {
return false
}
if uvdelta&1 != 0 {
uvdelta = ^(uvdelta >> 1)
} else {
uvdelta >>= 1
}
vdelta := int32(uvdelta)
pcdelta := t.readvarint(p) * t.quantum
*pc += uint64(pcdelta)
*val += vdelta
return true
}
// pcvalue reports the value associated with the target pc.
// off is the offset to the beginning of the pc-value table,
// and entry is the start PC for the corresponding function.
func (t *LineTable) pcvalue(off uint32, entry, targetpc uint64) int32 {
p := t.pctab[off:]
val := int32(-1)
pc := entry
for t.step(&p, &pc, &val, pc == entry) {
if targetpc < pc {
return val
}
}
return -1
}
// findFileLine scans one function in the binary looking for a
// program counter in the given file on the given line.
// It does so by running the pc-value tables mapping program counter
// to file number. Since most functions come from a single file, these
// are usually short and quick to scan. If a file match is found, then the
// code goes to the expense of looking for a simultaneous line number match.
func (t *LineTable) findFileLine(entry uint64, filetab, linetab uint32, filenum, line int32, cutab []byte) uint64 {
if filetab == 0 || linetab == 0 {
return 0
}
fp := t.pctab[filetab:]
fl := t.pctab[linetab:]
fileVal := int32(-1)
filePC := entry
lineVal := int32(-1)
linePC := entry
fileStartPC := filePC
for t.step(&fp, &filePC, &fileVal, filePC == entry) {
fileIndex := fileVal
if t.version == ver116 || t.version == ver118 || t.version == ver120 {
fileIndex = int32(t.binary.Uint32(cutab[fileVal*4:]))
}
if fileIndex == filenum && fileStartPC < filePC {
// fileIndex is in effect starting at fileStartPC up to
// but not including filePC, and it's the file we want.
// Run the PC table looking for a matching line number
// or until we reach filePC.
lineStartPC := linePC
for linePC < filePC && t.step(&fl, &linePC, &lineVal, linePC == entry) {
// lineVal is in effect until linePC, and lineStartPC < filePC.
if lineVal == line {
if fileStartPC <= lineStartPC {
return lineStartPC
}
if fileStartPC < linePC {
return fileStartPC
}
}
lineStartPC = linePC
}
}
fileStartPC = filePC
}
return 0
}
// go12PCToLine maps program counter to line number for the Go 1.2+ pcln table.
func (t *LineTable) go12PCToLine(pc uint64) (line int) {
defer func() {
if !disableRecover && recover() != nil {
line = -1
}
}()
f := t.findFunc(pc)
if f.IsZero() {
return -1
}
entry := f.entryPC()
linetab := f.pcln()
return int(t.pcvalue(linetab, entry, pc))
}
// go12PCToFile maps program counter to file name for the Go 1.2+ pcln table.
func (t *LineTable) go12PCToFile(pc uint64) (file string) {
defer func() {
if !disableRecover && recover() != nil {
file = ""
}
}()
f := t.findFunc(pc)
if f.IsZero() {
return ""
}
entry := f.entryPC()
filetab := f.pcfile()
fno := t.pcvalue(filetab, entry, pc)
if t.version == ver12 {
if fno <= 0 {
return ""
}
return t.string(t.binary.Uint32(t.filetab[4*fno:]))
}
// Go ≥ 1.16
if fno < 0 { // 0 is valid for ≥ 1.16
return ""
}
cuoff := f.cuOffset()
if fnoff := t.binary.Uint32(t.cutab[(cuoff+uint32(fno))*4:]); fnoff != ^uint32(0) {
return t.stringFrom(t.filetab, fnoff)
}
return ""
}
// go12LineToPC maps a (file, line) pair to a program counter for the Go 1.2+ pcln table.
func (t *LineTable) go12LineToPC(file string, line int) (pc uint64) {
defer func() {
if !disableRecover && recover() != nil {
pc = 0
}
}()
t.initFileMap()
filenum, ok := t.fileMap[file]
if !ok {
return 0
}
// Scan all functions.
// If this turns out to be a bottleneck, we could build a map[int32][]int32
// mapping file number to a list of functions with code from that file.
var cutab []byte
for i := uint32(0); i < t.nfunctab; i++ {
f := t.funcData(i)
entry := f.entryPC()
filetab := f.pcfile()
linetab := f.pcln()
if t.version == ver116 || t.version == ver118 || t.version == ver120 {
if f.cuOffset() == ^uint32(0) {
// skip functions without compilation unit (not real function, or linker generated)
continue
}
cutab = t.cutab[f.cuOffset()*4:]
}
pc := t.findFileLine(entry, filetab, linetab, int32(filenum), int32(line), cutab)
if pc != 0 {
return pc
}
}
return 0
}
// initFileMap initializes the map from file name to file number.
func (t *LineTable) initFileMap() {
t.mu.Lock()
defer t.mu.Unlock()
if t.fileMap != nil {
return
}
m := make(map[string]uint32)
if t.version == ver12 {
for i := uint32(1); i < t.nfiletab; i++ {
s := t.string(t.binary.Uint32(t.filetab[4*i:]))
m[s] = i
}
} else {
var pos uint32
for i := uint32(0); i < t.nfiletab; i++ {
s := t.stringFrom(t.filetab, pos)
m[s] = pos
pos += uint32(len(s) + 1)
}
}
t.fileMap = m
}
// go12MapFiles adds to m a key for every file in the Go 1.2 LineTable.
// Every key maps to obj. That's not a very interesting map, but it provides
// a way for callers to obtain the list of files in the program.
func (t *LineTable) go12MapFiles(m map[string]*Obj, obj *Obj) {
if !disableRecover {
defer func() {
recover()
}()
}
t.initFileMap()
for file := range t.fileMap {
m[file] = obj
}
}
// disableRecover causes this package not to swallow panics.
// This is useful when making changes.
const disableRecover = false
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gosym implements access to the Go symbol
// and line number tables embedded in Go binaries generated
// by the gc compilers.
package gosym
import (
"bytes"
"encoding/binary"
"fmt"
"strconv"
"strings"
)
/*
* Symbols
*/
// A Sym represents a single symbol table entry.
type Sym struct {
Value uint64
Type byte
Name string
GoType uint64
// If this symbol is a function symbol, the corresponding Func
Func *Func
goVersion version
}
// Static reports whether this symbol is static (not visible outside its file).
func (s *Sym) Static() bool { return s.Type >= 'a' }
// nameWithoutInst returns s.Name if s.Name has no brackets (does not reference an
// instantiated type, function, or method). If s.Name contains brackets, then it
// returns s.Name with all the contents between (and including) the outermost left
// and right bracket removed. This is useful to ignore any extra slashes or dots
// inside the brackets from the string searches below, where needed.
func (s *Sym) nameWithoutInst() string {
start := strings.Index(s.Name, "[")
if start < 0 {
return s.Name
}
end := strings.LastIndex(s.Name, "]")
if end < 0 {
// Malformed name, should contain closing bracket too.
return s.Name
}
return s.Name[0:start] + s.Name[end+1:]
}
// PackageName returns the package part of the symbol name,
// or the empty string if there is none.
func (s *Sym) PackageName() string {
name := s.nameWithoutInst()
// Since go1.20, a prefix of "type:" and "go:" is a compiler-generated symbol,
// they do not belong to any package.
//
// See cmd/compile/internal/base/link.go:ReservedImports variable.
if s.goVersion >= ver120 && (strings.HasPrefix(name, "go:") || strings.HasPrefix(name, "type:")) {
return ""
}
// For go1.18 and below, the prefix are "type." and "go." instead.
if s.goVersion <= ver118 && (strings.HasPrefix(name, "go.") || strings.HasPrefix(name, "type.")) {
return ""
}
pathend := strings.LastIndex(name, "/")
if pathend < 0 {
pathend = 0
}
if i := strings.Index(name[pathend:], "."); i != -1 {
return name[:pathend+i]
}
return ""
}
// ReceiverName returns the receiver type name of this symbol,
// or the empty string if there is none. A receiver name is only detected in
// the case that s.Name is fully-specified with a package name.
func (s *Sym) ReceiverName() string {
name := s.nameWithoutInst()
// If we find a slash in name, it should precede any bracketed expression
// that was removed, so pathend will apply correctly to name and s.Name.
pathend := strings.LastIndex(name, "/")
if pathend < 0 {
pathend = 0
}
// Find the first dot after pathend (or from the beginning, if there was
// no slash in name).
l := strings.Index(name[pathend:], ".")
// Find the last dot after pathend (or the beginning).
r := strings.LastIndex(name[pathend:], ".")
if l == -1 || r == -1 || l == r {
// There is no receiver if we didn't find two distinct dots after pathend.
return ""
}
// Given there is a trailing '.' that is in name, find it now in s.Name.
// pathend+l should apply to s.Name, because it should be the dot in the
// package name.
r = strings.LastIndex(s.Name[pathend:], ".")
return s.Name[pathend+l+1 : pathend+r]
}
// BaseName returns the symbol name without the package or receiver name.
func (s *Sym) BaseName() string {
name := s.nameWithoutInst()
if i := strings.LastIndex(name, "."); i != -1 {
if s.Name != name {
brack := strings.Index(s.Name, "[")
if i > brack {
// BaseName is a method name after the brackets, so
// recalculate for s.Name. Otherwise, i applies
// correctly to s.Name, since it is before the
// brackets.
i = strings.LastIndex(s.Name, ".")
}
}
return s.Name[i+1:]
}
return s.Name
}
// A Func collects information about a single function.
type Func struct {
Entry uint64
*Sym
End uint64
Params []*Sym // nil for Go 1.3 and later binaries
Locals []*Sym // nil for Go 1.3 and later binaries
FrameSize int
LineTable *LineTable
Obj *Obj
}
// An Obj represents a collection of functions in a symbol table.
//
// The exact method of division of a binary into separate Objs is an internal detail
// of the symbol table format.
//
// In early versions of Go each source file became a different Obj.
//
// In Go 1 and Go 1.1, each package produced one Obj for all Go sources
// and one Obj per C source file.
//
// In Go 1.2, there is a single Obj for the entire program.
type Obj struct {
// Funcs is a list of functions in the Obj.
Funcs []Func
// In Go 1.1 and earlier, Paths is a list of symbols corresponding
// to the source file names that produced the Obj.
// In Go 1.2, Paths is nil.
// Use the keys of Table.Files to obtain a list of source files.
Paths []Sym // meta
}
/*
* Symbol tables
*/
// Table represents a Go symbol table. It stores all of the
// symbols decoded from the program and provides methods to translate
// between symbols, names, and addresses.
type Table struct {
Syms []Sym // nil for Go 1.3 and later binaries
Funcs []Func
Files map[string]*Obj // for Go 1.2 and later all files map to one Obj
Objs []Obj // for Go 1.2 and later only one Obj in slice
go12line *LineTable // Go 1.2 line number table
}
type sym struct {
value uint64
gotype uint64
typ byte
name []byte
}
var (
littleEndianSymtab = []byte{0xFD, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00}
bigEndianSymtab = []byte{0xFF, 0xFF, 0xFF, 0xFD, 0x00, 0x00, 0x00}
oldLittleEndianSymtab = []byte{0xFE, 0xFF, 0xFF, 0xFF, 0x00, 0x00}
)
func walksymtab(data []byte, fn func(sym) error) error {
if len(data) == 0 { // missing symtab is okay
return nil
}
var order binary.ByteOrder = binary.BigEndian
newTable := false
switch {
case bytes.HasPrefix(data, oldLittleEndianSymtab):
// Same as Go 1.0, but little endian.
// Format was used during interim development between Go 1.0 and Go 1.1.
// Should not be widespread, but easy to support.
data = data[6:]
order = binary.LittleEndian
case bytes.HasPrefix(data, bigEndianSymtab):
newTable = true
case bytes.HasPrefix(data, littleEndianSymtab):
newTable = true
order = binary.LittleEndian
}
var ptrsz int
if newTable {
if len(data) < 8 {
return &DecodingError{len(data), "unexpected EOF", nil}
}
ptrsz = int(data[7])
if ptrsz != 4 && ptrsz != 8 {
return &DecodingError{7, "invalid pointer size", ptrsz}
}
data = data[8:]
}
var s sym
p := data
for len(p) >= 4 {
var typ byte
if newTable {
// Symbol type, value, Go type.
typ = p[0] & 0x3F
wideValue := p[0]&0x40 != 0
goType := p[0]&0x80 != 0
if typ < 26 {
typ += 'A'
} else {
typ += 'a' - 26
}
s.typ = typ
p = p[1:]
if wideValue {
if len(p) < ptrsz {
return &DecodingError{len(data), "unexpected EOF", nil}
}
// fixed-width value
if ptrsz == 8 {
s.value = order.Uint64(p[0:8])
p = p[8:]
} else {
s.value = uint64(order.Uint32(p[0:4]))
p = p[4:]
}
} else {
// varint value
s.value = 0
shift := uint(0)
for len(p) > 0 && p[0]&0x80 != 0 {
s.value |= uint64(p[0]&0x7F) << shift
shift += 7
p = p[1:]
}
if len(p) == 0 {
return &DecodingError{len(data), "unexpected EOF", nil}
}
s.value |= uint64(p[0]) << shift
p = p[1:]
}
if goType {
if len(p) < ptrsz {
return &DecodingError{len(data), "unexpected EOF", nil}
}
// fixed-width go type
if ptrsz == 8 {
s.gotype = order.Uint64(p[0:8])
p = p[8:]
} else {
s.gotype = uint64(order.Uint32(p[0:4]))
p = p[4:]
}
}
} else {
// Value, symbol type.
s.value = uint64(order.Uint32(p[0:4]))
if len(p) < 5 {
return &DecodingError{len(data), "unexpected EOF", nil}
}
typ = p[4]
if typ&0x80 == 0 {
return &DecodingError{len(data) - len(p) + 4, "bad symbol type", typ}
}
typ &^= 0x80
s.typ = typ
p = p[5:]
}
// Name.
var i int
var nnul int
for i = 0; i < len(p); i++ {
if p[i] == 0 {
nnul = 1
break
}
}
switch typ {
case 'z', 'Z':
p = p[i+nnul:]
for i = 0; i+2 <= len(p); i += 2 {
if p[i] == 0 && p[i+1] == 0 {
nnul = 2
break
}
}
}
if len(p) < i+nnul {
return &DecodingError{len(data), "unexpected EOF", nil}
}
s.name = p[0:i]
i += nnul
p = p[i:]
if !newTable {
if len(p) < 4 {
return &DecodingError{len(data), "unexpected EOF", nil}
}
// Go type.
s.gotype = uint64(order.Uint32(p[:4]))
p = p[4:]
}
fn(s)
}
return nil
}
// NewTable decodes the Go symbol table (the ".gosymtab" section in ELF),
// returning an in-memory representation.
// Starting with Go 1.3, the Go symbol table no longer includes symbol data;
// callers should pass nil for the symtab parameter.
func NewTable(symtab []byte, pcln *LineTable) (*Table, error) {
var n int
err := walksymtab(symtab, func(s sym) error {
n++
return nil
})
if err != nil {
return nil, err
}
var t Table
if pcln.isGo12() {
t.go12line = pcln
}
fname := make(map[uint16]string)
t.Syms = make([]Sym, 0, n)
nf := 0
nz := 0
lasttyp := uint8(0)
err = walksymtab(symtab, func(s sym) error {
n := len(t.Syms)
t.Syms = t.Syms[0 : n+1]
ts := &t.Syms[n]
ts.Type = s.typ
ts.Value = s.value
ts.GoType = s.gotype
ts.goVersion = pcln.version
switch s.typ {
default:
// rewrite name to use . instead of · (c2 b7)
w := 0
b := s.name
for i := 0; i < len(b); i++ {
if b[i] == 0xc2 && i+1 < len(b) && b[i+1] == 0xb7 {
i++
b[i] = '.'
}
b[w] = b[i]
w++
}
ts.Name = string(s.name[0:w])
case 'z', 'Z':
if lasttyp != 'z' && lasttyp != 'Z' {
nz++
}
for i := 0; i < len(s.name); i += 2 {
eltIdx := binary.BigEndian.Uint16(s.name[i : i+2])
elt, ok := fname[eltIdx]
if !ok {
return &DecodingError{-1, "bad filename code", eltIdx}
}
if n := len(ts.Name); n > 0 && ts.Name[n-1] != '/' {
ts.Name += "/"
}
ts.Name += elt
}
}
switch s.typ {
case 'T', 't', 'L', 'l':
nf++
case 'f':
fname[uint16(s.value)] = ts.Name
}
lasttyp = s.typ
return nil
})
if err != nil {
return nil, err
}
t.Funcs = make([]Func, 0, nf)
t.Files = make(map[string]*Obj)
var obj *Obj
if t.go12line != nil {
// Put all functions into one Obj.
t.Objs = make([]Obj, 1)
obj = &t.Objs[0]
t.go12line.go12MapFiles(t.Files, obj)
} else {
t.Objs = make([]Obj, 0, nz)
}
// Count text symbols and attach frame sizes, parameters, and
// locals to them. Also, find object file boundaries.
lastf := 0
for i := 0; i < len(t.Syms); i++ {
sym := &t.Syms[i]
switch sym.Type {
case 'Z', 'z': // path symbol
if t.go12line != nil {
// Go 1.2 binaries have the file information elsewhere. Ignore.
break
}
// Finish the current object
if obj != nil {
obj.Funcs = t.Funcs[lastf:]
}
lastf = len(t.Funcs)
// Start new object
n := len(t.Objs)
t.Objs = t.Objs[0 : n+1]
obj = &t.Objs[n]
// Count & copy path symbols
var end int
for end = i + 1; end < len(t.Syms); end++ {
if c := t.Syms[end].Type; c != 'Z' && c != 'z' {
break
}
}
obj.Paths = t.Syms[i:end]
i = end - 1 // loop will i++
// Record file names
depth := 0
for j := range obj.Paths {
s := &obj.Paths[j]
if s.Name == "" {
depth--
} else {
if depth == 0 {
t.Files[s.Name] = obj
}
depth++
}
}
case 'T', 't', 'L', 'l': // text symbol
if n := len(t.Funcs); n > 0 {
t.Funcs[n-1].End = sym.Value
}
if sym.Name == "runtime.etext" || sym.Name == "etext" {
continue
}
// Count parameter and local (auto) syms
var np, na int
var end int
countloop:
for end = i + 1; end < len(t.Syms); end++ {
switch t.Syms[end].Type {
case 'T', 't', 'L', 'l', 'Z', 'z':
break countloop
case 'p':
np++
case 'a':
na++
}
}
// Fill in the function symbol
n := len(t.Funcs)
t.Funcs = t.Funcs[0 : n+1]
fn := &t.Funcs[n]
sym.Func = fn
fn.Params = make([]*Sym, 0, np)
fn.Locals = make([]*Sym, 0, na)
fn.Sym = sym
fn.Entry = sym.Value
fn.Obj = obj
if t.go12line != nil {
// All functions share the same line table.
// It knows how to narrow down to a specific
// function quickly.
fn.LineTable = t.go12line
} else if pcln != nil {
fn.LineTable = pcln.slice(fn.Entry)
pcln = fn.LineTable
}
for j := i; j < end; j++ {
s := &t.Syms[j]
switch s.Type {
case 'm':
fn.FrameSize = int(s.Value)
case 'p':
n := len(fn.Params)
fn.Params = fn.Params[0 : n+1]
fn.Params[n] = s
case 'a':
n := len(fn.Locals)
fn.Locals = fn.Locals[0 : n+1]
fn.Locals[n] = s
}
}
i = end - 1 // loop will i++
}
}
if t.go12line != nil && nf == 0 {
t.Funcs = t.go12line.go12Funcs()
}
if obj != nil {
obj.Funcs = t.Funcs[lastf:]
}
return &t, nil
}
// PCToFunc returns the function containing the program counter pc,
// or nil if there is no such function.
func (t *Table) PCToFunc(pc uint64) *Func {
funcs := t.Funcs
for len(funcs) > 0 {
m := len(funcs) / 2
fn := &funcs[m]
switch {
case pc < fn.Entry:
funcs = funcs[0:m]
case fn.Entry <= pc && pc < fn.End:
return fn
default:
funcs = funcs[m+1:]
}
}
return nil
}
// PCToLine looks up line number information for a program counter.
// If there is no information, it returns fn == nil.
func (t *Table) PCToLine(pc uint64) (file string, line int, fn *Func) {
if fn = t.PCToFunc(pc); fn == nil {
return
}
if t.go12line != nil {
file = t.go12line.go12PCToFile(pc)
line = t.go12line.go12PCToLine(pc)
} else {
file, line = fn.Obj.lineFromAline(fn.LineTable.PCToLine(pc))
}
return
}
// LineToPC looks up the first program counter on the given line in
// the named file. It returns [UnknownFileError] or [UnknownLineError] if
// there is an error looking up this line.
func (t *Table) LineToPC(file string, line int) (pc uint64, fn *Func, err error) {
obj, ok := t.Files[file]
if !ok {
return 0, nil, UnknownFileError(file)
}
if t.go12line != nil {
pc := t.go12line.go12LineToPC(file, line)
if pc == 0 {
return 0, nil, &UnknownLineError{file, line}
}
return pc, t.PCToFunc(pc), nil
}
abs, err := obj.alineFromLine(file, line)
if err != nil {
return
}
for i := range obj.Funcs {
f := &obj.Funcs[i]
pc := f.LineTable.LineToPC(abs, f.End)
if pc != 0 {
return pc, f, nil
}
}
return 0, nil, &UnknownLineError{file, line}
}
// LookupSym returns the text, data, or bss symbol with the given name,
// or nil if no such symbol is found.
func (t *Table) LookupSym(name string) *Sym {
// TODO(austin) Maybe make a map
for i := range t.Syms {
s := &t.Syms[i]
switch s.Type {
case 'T', 't', 'L', 'l', 'D', 'd', 'B', 'b':
if s.Name == name {
return s
}
}
}
return nil
}
// LookupFunc returns the text, data, or bss symbol with the given name,
// or nil if no such symbol is found.
func (t *Table) LookupFunc(name string) *Func {
for i := range t.Funcs {
f := &t.Funcs[i]
if f.Sym.Name == name {
return f
}
}
return nil
}
// SymByAddr returns the text, data, or bss symbol starting at the given address.
func (t *Table) SymByAddr(addr uint64) *Sym {
for i := range t.Syms {
s := &t.Syms[i]
switch s.Type {
case 'T', 't', 'L', 'l', 'D', 'd', 'B', 'b':
if s.Value == addr {
return s
}
}
}
return nil
}
/*
* Object files
*/
// This is legacy code for Go 1.1 and earlier, which used the
// Plan 9 format for pc-line tables. This code was never quite
// correct. It's probably very close, and it's usually correct, but
// we never quite found all the corner cases.
//
// Go 1.2 and later use a simpler format, documented at golang.org/s/go12symtab.
func (o *Obj) lineFromAline(aline int) (string, int) {
type stackEnt struct {
path string
start int
offset int
prev *stackEnt
}
noPath := &stackEnt{"", 0, 0, nil}
tos := noPath
pathloop:
for _, s := range o.Paths {
val := int(s.Value)
switch {
case val > aline:
break pathloop
case val == 1:
// Start a new stack
tos = &stackEnt{s.Name, val, 0, noPath}
case s.Name == "":
// Pop
if tos == noPath {
return "<malformed symbol table>", 0
}
tos.prev.offset += val - tos.start
tos = tos.prev
default:
// Push
tos = &stackEnt{s.Name, val, 0, tos}
}
}
if tos == noPath {
return "", 0
}
return tos.path, aline - tos.start - tos.offset + 1
}
func (o *Obj) alineFromLine(path string, line int) (int, error) {
if line < 1 {
return 0, &UnknownLineError{path, line}
}
for i, s := range o.Paths {
// Find this path
if s.Name != path {
continue
}
// Find this line at this stack level
depth := 0
var incstart int
line += int(s.Value)
pathloop:
for _, s := range o.Paths[i:] {
val := int(s.Value)
switch {
case depth == 1 && val >= line:
return line - 1, nil
case s.Name == "":
depth--
if depth == 0 {
break pathloop
} else if depth == 1 {
line += val - incstart
}
default:
if depth == 1 {
incstart = val
}
depth++
}
}
return 0, &UnknownLineError{path, line}
}
return 0, UnknownFileError(path)
}
/*
* Errors
*/
// UnknownFileError represents a failure to find the specific file in
// the symbol table.
type UnknownFileError string
func (e UnknownFileError) Error() string { return "unknown file: " + string(e) }
// UnknownLineError represents a failure to map a line to a program
// counter, either because the line is beyond the bounds of the file
// or because there is no code on the given line.
type UnknownLineError struct {
File string
Line int
}
func (e *UnknownLineError) Error() string {
return "no code at " + e.File + ":" + strconv.Itoa(e.Line)
}
// DecodingError represents an error during the decoding of
// the symbol table.
type DecodingError struct {
off int
msg string
val any
}
func (e *DecodingError) Error() string {
msg := e.msg
if e.val != nil {
msg += fmt.Sprintf(" '%v'", e.val)
}
msg += fmt.Sprintf(" at byte %#x", e.off)
return msg
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package macho
import (
"encoding/binary"
"fmt"
"internal/saferio"
"io"
"os"
)
// A FatFile is a Mach-O universal binary that contains at least one architecture.
type FatFile struct {
Magic uint32
Arches []FatArch
closer io.Closer
}
// A FatArchHeader represents a fat header for a specific image architecture.
type FatArchHeader struct {
Cpu Cpu
SubCpu uint32
Offset uint32
Size uint32
Align uint32
}
const fatArchHeaderSize = 5 * 4
// A FatArch is a Mach-O File inside a FatFile.
type FatArch struct {
FatArchHeader
*File
}
// ErrNotFat is returned from [NewFatFile] or [OpenFat] when the file is not a
// universal binary but may be a thin binary, based on its magic number.
var ErrNotFat = &FormatError{0, "not a fat Mach-O file", nil}
// NewFatFile creates a new [FatFile] for accessing all the Mach-O images in a
// universal binary. The Mach-O binary is expected to start at position 0 in
// the ReaderAt.
func NewFatFile(r io.ReaderAt) (*FatFile, error) {
var ff FatFile
sr := io.NewSectionReader(r, 0, 1<<63-1)
// Read the fat_header struct, which is always in big endian.
// Start with the magic number.
err := binary.Read(sr, binary.BigEndian, &ff.Magic)
if err != nil {
return nil, &FormatError{0, "error reading magic number", nil}
} else if ff.Magic != MagicFat {
// See if this is a Mach-O file via its magic number. The magic
// must be converted to little endian first though.
var buf [4]byte
binary.BigEndian.PutUint32(buf[:], ff.Magic)
leMagic := binary.LittleEndian.Uint32(buf[:])
if leMagic == Magic32 || leMagic == Magic64 {
return nil, ErrNotFat
} else {
return nil, &FormatError{0, "invalid magic number", nil}
}
}
offset := int64(4)
// Read the number of FatArchHeaders that come after the fat_header.
var narch uint32
err = binary.Read(sr, binary.BigEndian, &narch)
if err != nil {
return nil, &FormatError{offset, "invalid fat_header", nil}
}
offset += 4
if narch < 1 {
return nil, &FormatError{offset, "file contains no images", nil}
}
// Combine the Cpu and SubCpu (both uint32) into a uint64 to make sure
// there are not duplicate architectures.
seenArches := make(map[uint64]bool)
// Make sure that all images are for the same MH_ type.
var machoType Type
// Following the fat_header comes narch fat_arch structs that index
// Mach-O images further in the file.
c := saferio.SliceCap[FatArch](uint64(narch))
if c < 0 {
return nil, &FormatError{offset, "too many images", nil}
}
ff.Arches = make([]FatArch, 0, c)
for i := uint32(0); i < narch; i++ {
var fa FatArch
err = binary.Read(sr, binary.BigEndian, &fa.FatArchHeader)
if err != nil {
return nil, &FormatError{offset, "invalid fat_arch header", nil}
}
offset += fatArchHeaderSize
fr := io.NewSectionReader(r, int64(fa.Offset), int64(fa.Size))
fa.File, err = NewFile(fr)
if err != nil {
return nil, err
}
// Make sure the architecture for this image is not duplicate.
seenArch := (uint64(fa.Cpu) << 32) | uint64(fa.SubCpu)
if o, k := seenArches[seenArch]; o || k {
return nil, &FormatError{offset, fmt.Sprintf("duplicate architecture cpu=%v, subcpu=%#x", fa.Cpu, fa.SubCpu), nil}
}
seenArches[seenArch] = true
// Make sure the Mach-O type matches that of the first image.
if i == 0 {
machoType = fa.Type
} else {
if fa.Type != machoType {
return nil, &FormatError{offset, fmt.Sprintf("Mach-O type for architecture #%d (type=%#x) does not match first (type=%#x)", i, fa.Type, machoType), nil}
}
}
ff.Arches = append(ff.Arches, fa)
}
return &ff, nil
}
// OpenFat opens the named file using [os.Open] and prepares it for use as a Mach-O
// universal binary.
func OpenFat(name string) (*FatFile, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
ff, err := NewFatFile(f)
if err != nil {
f.Close()
return nil, err
}
ff.closer = f
return ff, nil
}
func (ff *FatFile) Close() error {
var err error
if ff.closer != nil {
err = ff.closer.Close()
ff.closer = nil
}
return err
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package macho implements access to Mach-O object files.
# Security
This package is not designed to be hardened against adversarial inputs, and is
outside the scope of https://go.dev/security/policy. In particular, only basic
validation is done when parsing object files. As such, care should be taken when
parsing untrusted inputs, as parsing malformed files may consume significant
resources, or cause panics.
*/
package macho
// High level access to low level data structures.
import (
"bytes"
"compress/zlib"
"debug/dwarf"
"encoding/binary"
"fmt"
"internal/saferio"
"io"
"os"
"strings"
)
// A File represents an open Mach-O file.
type File struct {
FileHeader
ByteOrder binary.ByteOrder
Loads []Load
Sections []*Section
Symtab *Symtab
Dysymtab *Dysymtab
closer io.Closer
}
// A Load represents any Mach-O load command.
type Load interface {
Raw() []byte
}
// A LoadBytes is the uninterpreted bytes of a Mach-O load command.
type LoadBytes []byte
func (b LoadBytes) Raw() []byte { return b }
// A SegmentHeader is the header for a Mach-O 32-bit or 64-bit load segment command.
type SegmentHeader struct {
Cmd LoadCmd
Len uint32
Name string
Addr uint64
Memsz uint64
Offset uint64
Filesz uint64
Maxprot uint32
Prot uint32
Nsect uint32
Flag uint32
}
// A Segment represents a Mach-O 32-bit or 64-bit load segment command.
type Segment struct {
LoadBytes
SegmentHeader
// Embed ReaderAt for ReadAt method.
// Do not embed SectionReader directly
// to avoid having Read and Seek.
// If a client wants Read and Seek it must use
// Open() to avoid fighting over the seek offset
// with other clients.
io.ReaderAt
sr *io.SectionReader
}
// Data reads and returns the contents of the segment.
func (s *Segment) Data() ([]byte, error) {
return saferio.ReadDataAt(s.sr, s.Filesz, 0)
}
// Open returns a new ReadSeeker reading the segment.
func (s *Segment) Open() io.ReadSeeker { return io.NewSectionReader(s.sr, 0, 1<<63-1) }
type SectionHeader struct {
Name string
Seg string
Addr uint64
Size uint64
Offset uint32
Align uint32
Reloff uint32
Nreloc uint32
Flags uint32
}
// A Reloc represents a Mach-O relocation.
type Reloc struct {
Addr uint32
Value uint32
// when Scattered == false && Extern == true, Value is the symbol number.
// when Scattered == false && Extern == false, Value is the section number.
// when Scattered == true, Value is the value that this reloc refers to.
Type uint8
Len uint8 // 0=byte, 1=word, 2=long, 3=quad
Pcrel bool
Extern bool // valid if Scattered == false
Scattered bool
}
type Section struct {
SectionHeader
Relocs []Reloc
// Embed ReaderAt for ReadAt method.
// Do not embed SectionReader directly
// to avoid having Read and Seek.
// If a client wants Read and Seek it must use
// Open() to avoid fighting over the seek offset
// with other clients.
io.ReaderAt
sr *io.SectionReader
}
// Data reads and returns the contents of the Mach-O section.
func (s *Section) Data() ([]byte, error) {
return saferio.ReadDataAt(s.sr, s.Size, 0)
}
// Open returns a new ReadSeeker reading the Mach-O section.
func (s *Section) Open() io.ReadSeeker { return io.NewSectionReader(s.sr, 0, 1<<63-1) }
// A Dylib represents a Mach-O load dynamic library command.
type Dylib struct {
LoadBytes
Name string
Time uint32
CurrentVersion uint32
CompatVersion uint32
}
// A Symtab represents a Mach-O symbol table command.
type Symtab struct {
LoadBytes
SymtabCmd
Syms []Symbol
}
// A Dysymtab represents a Mach-O dynamic symbol table command.
type Dysymtab struct {
LoadBytes
DysymtabCmd
IndirectSyms []uint32 // indices into Symtab.Syms
}
// A Rpath represents a Mach-O rpath command.
type Rpath struct {
LoadBytes
Path string
}
// A Symbol is a Mach-O 32-bit or 64-bit symbol table entry.
type Symbol struct {
Name string
Type uint8
Sect uint8
Desc uint16
Value uint64
}
/*
* Mach-O reader
*/
// FormatError is returned by some operations if the data does
// not have the correct format for an object file.
type FormatError struct {
off int64
msg string
val any
}
func (e *FormatError) Error() string {
msg := e.msg
if e.val != nil {
msg += fmt.Sprintf(" '%v'", e.val)
}
msg += fmt.Sprintf(" in record at byte %#x", e.off)
return msg
}
// Open opens the named file using [os.Open] and prepares it for use as a Mach-O binary.
func Open(name string) (*File, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
ff, err := NewFile(f)
if err != nil {
f.Close()
return nil, err
}
ff.closer = f
return ff, nil
}
// Close closes the [File].
// If the [File] was created using [NewFile] directly instead of [Open],
// Close has no effect.
func (f *File) Close() error {
var err error
if f.closer != nil {
err = f.closer.Close()
f.closer = nil
}
return err
}
// NewFile creates a new [File] for accessing a Mach-O binary in an underlying reader.
// The Mach-O binary is expected to start at position 0 in the ReaderAt.
func NewFile(r io.ReaderAt) (*File, error) {
f := new(File)
sr := io.NewSectionReader(r, 0, 1<<63-1)
// Read and decode Mach magic to determine byte order, size.
// Magic32 and Magic64 differ only in the bottom bit.
var ident [4]byte
if _, err := r.ReadAt(ident[0:], 0); err != nil {
return nil, err
}
be := binary.BigEndian.Uint32(ident[0:])
le := binary.LittleEndian.Uint32(ident[0:])
switch Magic32 &^ 1 {
case be &^ 1:
f.ByteOrder = binary.BigEndian
f.Magic = be
case le &^ 1:
f.ByteOrder = binary.LittleEndian
f.Magic = le
default:
return nil, &FormatError{0, "invalid magic number", nil}
}
// Read entire file header.
if err := binary.Read(sr, f.ByteOrder, &f.FileHeader); err != nil {
return nil, err
}
// Then load commands.
offset := int64(fileHeaderSize32)
if f.Magic == Magic64 {
offset = fileHeaderSize64
}
dat, err := saferio.ReadDataAt(r, uint64(f.Cmdsz), offset)
if err != nil {
return nil, err
}
c := saferio.SliceCap[Load](uint64(f.Ncmd))
if c < 0 {
return nil, &FormatError{offset, "too many load commands", nil}
}
f.Loads = make([]Load, 0, c)
bo := f.ByteOrder
for i := uint32(0); i < f.Ncmd; i++ {
// Each load command begins with uint32 command and length.
if len(dat) < 8 {
return nil, &FormatError{offset, "command block too small", nil}
}
cmd, siz := LoadCmd(bo.Uint32(dat[0:4])), bo.Uint32(dat[4:8])
if siz < 8 || siz > uint32(len(dat)) {
return nil, &FormatError{offset, "invalid command block size", nil}
}
var cmddat []byte
cmddat, dat = dat[0:siz], dat[siz:]
offset += int64(siz)
var s *Segment
switch cmd {
default:
f.Loads = append(f.Loads, LoadBytes(cmddat))
case LoadCmdRpath:
var hdr RpathCmd
b := bytes.NewReader(cmddat)
if err := binary.Read(b, bo, &hdr); err != nil {
return nil, err
}
l := new(Rpath)
if hdr.Path >= uint32(len(cmddat)) {
return nil, &FormatError{offset, "invalid path in rpath command", hdr.Path}
}
l.Path = cstring(cmddat[hdr.Path:])
l.LoadBytes = LoadBytes(cmddat)
f.Loads = append(f.Loads, l)
case LoadCmdDylib:
var hdr DylibCmd
b := bytes.NewReader(cmddat)
if err := binary.Read(b, bo, &hdr); err != nil {
return nil, err
}
l := new(Dylib)
if hdr.Name >= uint32(len(cmddat)) {
return nil, &FormatError{offset, "invalid name in dynamic library command", hdr.Name}
}
l.Name = cstring(cmddat[hdr.Name:])
l.Time = hdr.Time
l.CurrentVersion = hdr.CurrentVersion
l.CompatVersion = hdr.CompatVersion
l.LoadBytes = LoadBytes(cmddat)
f.Loads = append(f.Loads, l)
case LoadCmdSymtab:
var hdr SymtabCmd
b := bytes.NewReader(cmddat)
if err := binary.Read(b, bo, &hdr); err != nil {
return nil, err
}
strtab, err := saferio.ReadDataAt(r, uint64(hdr.Strsize), int64(hdr.Stroff))
if err != nil {
return nil, err
}
var symsz int
if f.Magic == Magic64 {
symsz = 16
} else {
symsz = 12
}
symdat, err := saferio.ReadDataAt(r, uint64(hdr.Nsyms)*uint64(symsz), int64(hdr.Symoff))
if err != nil {
return nil, err
}
st, err := f.parseSymtab(symdat, strtab, cmddat, &hdr, offset)
if err != nil {
return nil, err
}
f.Loads = append(f.Loads, st)
f.Symtab = st
case LoadCmdDysymtab:
var hdr DysymtabCmd
b := bytes.NewReader(cmddat)
if err := binary.Read(b, bo, &hdr); err != nil {
return nil, err
}
if f.Symtab == nil {
return nil, &FormatError{offset, "dynamic symbol table seen before any ordinary symbol table", nil}
} else if hdr.Iundefsym > uint32(len(f.Symtab.Syms)) {
return nil, &FormatError{offset, fmt.Sprintf(
"undefined symbols index in dynamic symbol table command is greater than symbol table length (%d > %d)",
hdr.Iundefsym, len(f.Symtab.Syms)), nil}
} else if hdr.Iundefsym+hdr.Nundefsym > uint32(len(f.Symtab.Syms)) {
return nil, &FormatError{offset, fmt.Sprintf(
"number of undefined symbols after index in dynamic symbol table command is greater than symbol table length (%d > %d)",
hdr.Iundefsym+hdr.Nundefsym, len(f.Symtab.Syms)), nil}
}
dat, err := saferio.ReadDataAt(r, uint64(hdr.Nindirectsyms)*4, int64(hdr.Indirectsymoff))
if err != nil {
return nil, err
}
x := make([]uint32, hdr.Nindirectsyms)
if err := binary.Read(bytes.NewReader(dat), bo, x); err != nil {
return nil, err
}
st := new(Dysymtab)
st.LoadBytes = LoadBytes(cmddat)
st.DysymtabCmd = hdr
st.IndirectSyms = x
f.Loads = append(f.Loads, st)
f.Dysymtab = st
case LoadCmdSegment:
var seg32 Segment32
b := bytes.NewReader(cmddat)
if err := binary.Read(b, bo, &seg32); err != nil {
return nil, err
}
s = new(Segment)
s.LoadBytes = cmddat
s.Cmd = cmd
s.Len = siz
s.Name = cstring(seg32.Name[0:])
s.Addr = uint64(seg32.Addr)
s.Memsz = uint64(seg32.Memsz)
s.Offset = uint64(seg32.Offset)
s.Filesz = uint64(seg32.Filesz)
s.Maxprot = seg32.Maxprot
s.Prot = seg32.Prot
s.Nsect = seg32.Nsect
s.Flag = seg32.Flag
f.Loads = append(f.Loads, s)
for i := 0; i < int(s.Nsect); i++ {
var sh32 Section32
if err := binary.Read(b, bo, &sh32); err != nil {
return nil, err
}
sh := new(Section)
sh.Name = cstring(sh32.Name[0:])
sh.Seg = cstring(sh32.Seg[0:])
sh.Addr = uint64(sh32.Addr)
sh.Size = uint64(sh32.Size)
sh.Offset = sh32.Offset
sh.Align = sh32.Align
sh.Reloff = sh32.Reloff
sh.Nreloc = sh32.Nreloc
sh.Flags = sh32.Flags
if err := f.pushSection(sh, r); err != nil {
return nil, err
}
}
case LoadCmdSegment64:
var seg64 Segment64
b := bytes.NewReader(cmddat)
if err := binary.Read(b, bo, &seg64); err != nil {
return nil, err
}
s = new(Segment)
s.LoadBytes = cmddat
s.Cmd = cmd
s.Len = siz
s.Name = cstring(seg64.Name[0:])
s.Addr = seg64.Addr
s.Memsz = seg64.Memsz
s.Offset = seg64.Offset
s.Filesz = seg64.Filesz
s.Maxprot = seg64.Maxprot
s.Prot = seg64.Prot
s.Nsect = seg64.Nsect
s.Flag = seg64.Flag
f.Loads = append(f.Loads, s)
for i := 0; i < int(s.Nsect); i++ {
var sh64 Section64
if err := binary.Read(b, bo, &sh64); err != nil {
return nil, err
}
sh := new(Section)
sh.Name = cstring(sh64.Name[0:])
sh.Seg = cstring(sh64.Seg[0:])
sh.Addr = sh64.Addr
sh.Size = sh64.Size
sh.Offset = sh64.Offset
sh.Align = sh64.Align
sh.Reloff = sh64.Reloff
sh.Nreloc = sh64.Nreloc
sh.Flags = sh64.Flags
if err := f.pushSection(sh, r); err != nil {
return nil, err
}
}
}
if s != nil {
if int64(s.Offset) < 0 {
return nil, &FormatError{offset, "invalid section offset", s.Offset}
}
if int64(s.Filesz) < 0 {
return nil, &FormatError{offset, "invalid section file size", s.Filesz}
}
s.sr = io.NewSectionReader(r, int64(s.Offset), int64(s.Filesz))
s.ReaderAt = s.sr
}
}
return f, nil
}
func (f *File) parseSymtab(symdat, strtab, cmddat []byte, hdr *SymtabCmd, offset int64) (*Symtab, error) {
bo := f.ByteOrder
c := saferio.SliceCap[Symbol](uint64(hdr.Nsyms))
if c < 0 {
return nil, &FormatError{offset, "too many symbols", nil}
}
symtab := make([]Symbol, 0, c)
b := bytes.NewReader(symdat)
for i := 0; i < int(hdr.Nsyms); i++ {
var n Nlist64
if f.Magic == Magic64 {
if err := binary.Read(b, bo, &n); err != nil {
return nil, err
}
} else {
var n32 Nlist32
if err := binary.Read(b, bo, &n32); err != nil {
return nil, err
}
n.Name = n32.Name
n.Type = n32.Type
n.Sect = n32.Sect
n.Desc = n32.Desc
n.Value = uint64(n32.Value)
}
if n.Name >= uint32(len(strtab)) {
return nil, &FormatError{offset, "invalid name in symbol table", n.Name}
}
// We add "_" to Go symbols. Strip it here. See issue 33808.
name := cstring(strtab[n.Name:])
if strings.Contains(name, ".") && name[0] == '_' {
name = name[1:]
}
symtab = append(symtab, Symbol{
Name: name,
Type: n.Type,
Sect: n.Sect,
Desc: n.Desc,
Value: n.Value,
})
}
st := new(Symtab)
st.LoadBytes = LoadBytes(cmddat)
st.Syms = symtab
return st, nil
}
type relocInfo struct {
Addr uint32
Symnum uint32
}
func (f *File) pushSection(sh *Section, r io.ReaderAt) error {
f.Sections = append(f.Sections, sh)
sh.sr = io.NewSectionReader(r, int64(sh.Offset), int64(sh.Size))
sh.ReaderAt = sh.sr
if sh.Nreloc > 0 {
reldat, err := saferio.ReadDataAt(r, uint64(sh.Nreloc)*8, int64(sh.Reloff))
if err != nil {
return err
}
b := bytes.NewReader(reldat)
bo := f.ByteOrder
sh.Relocs = make([]Reloc, sh.Nreloc)
for i := range sh.Relocs {
rel := &sh.Relocs[i]
var ri relocInfo
if err := binary.Read(b, bo, &ri); err != nil {
return err
}
if ri.Addr&(1<<31) != 0 { // scattered
rel.Addr = ri.Addr & (1<<24 - 1)
rel.Type = uint8((ri.Addr >> 24) & (1<<4 - 1))
rel.Len = uint8((ri.Addr >> 28) & (1<<2 - 1))
rel.Pcrel = ri.Addr&(1<<30) != 0
rel.Value = ri.Symnum
rel.Scattered = true
} else {
switch bo {
case binary.LittleEndian:
rel.Addr = ri.Addr
rel.Value = ri.Symnum & (1<<24 - 1)
rel.Pcrel = ri.Symnum&(1<<24) != 0
rel.Len = uint8((ri.Symnum >> 25) & (1<<2 - 1))
rel.Extern = ri.Symnum&(1<<27) != 0
rel.Type = uint8((ri.Symnum >> 28) & (1<<4 - 1))
case binary.BigEndian:
rel.Addr = ri.Addr
rel.Value = ri.Symnum >> 8
rel.Pcrel = ri.Symnum&(1<<7) != 0
rel.Len = uint8((ri.Symnum >> 5) & (1<<2 - 1))
rel.Extern = ri.Symnum&(1<<4) != 0
rel.Type = uint8(ri.Symnum & (1<<4 - 1))
default:
panic("unreachable")
}
}
}
}
return nil
}
func cstring(b []byte) string {
i := bytes.IndexByte(b, 0)
if i == -1 {
i = len(b)
}
return string(b[0:i])
}
// Segment returns the first Segment with the given name, or nil if no such segment exists.
func (f *File) Segment(name string) *Segment {
for _, l := range f.Loads {
if s, ok := l.(*Segment); ok && s.Name == name {
return s
}
}
return nil
}
// Section returns the first section with the given name, or nil if no such
// section exists.
func (f *File) Section(name string) *Section {
for _, s := range f.Sections {
if s.Name == name {
return s
}
}
return nil
}
// DWARF returns the DWARF debug information for the Mach-O file.
func (f *File) DWARF() (*dwarf.Data, error) {
dwarfSuffix := func(s *Section) string {
sectname := s.Name
var pfx int
switch {
case strings.HasPrefix(sectname, "__debug_"):
pfx = 8
case strings.HasPrefix(sectname, "__zdebug_"):
pfx = 9
default:
return ""
}
// Mach-O executables truncate section names to 16 characters, mangling some DWARF sections.
// As of DWARFv5 these are the only problematic section names (see DWARFv5 Appendix G).
for _, longname := range []string{
"__debug_str_offsets",
"__zdebug_line_str",
"__zdebug_loclists",
"__zdebug_pubnames",
"__zdebug_pubtypes",
"__zdebug_rnglists",
"__zdebug_str_offsets",
} {
if sectname == longname[:16] {
sectname = longname
break
}
}
return sectname[pfx:]
}
sectionData := func(s *Section) ([]byte, error) {
b, err := s.Data()
if err != nil && uint64(len(b)) < s.Size {
return nil, err
}
if len(b) >= 12 && string(b[:4]) == "ZLIB" {
dlen := binary.BigEndian.Uint64(b[4:12])
dbuf := make([]byte, dlen)
r, err := zlib.NewReader(bytes.NewBuffer(b[12:]))
if err != nil {
return nil, err
}
if _, err := io.ReadFull(r, dbuf); err != nil {
return nil, err
}
if err := r.Close(); err != nil {
return nil, err
}
b = dbuf
}
return b, nil
}
// There are many other DWARF sections, but these
// are the ones the debug/dwarf package uses.
// Don't bother loading others.
var dat = map[string][]byte{"abbrev": nil, "info": nil, "str": nil, "line": nil, "ranges": nil}
for _, s := range f.Sections {
suffix := dwarfSuffix(s)
if suffix == "" {
continue
}
if _, ok := dat[suffix]; !ok {
continue
}
b, err := sectionData(s)
if err != nil {
return nil, err
}
dat[suffix] = b
}
d, err := dwarf.New(dat["abbrev"], nil, nil, dat["info"], dat["line"], nil, dat["ranges"], dat["str"])
if err != nil {
return nil, err
}
// Look for DWARF4 .debug_types sections and DWARF5 sections.
for i, s := range f.Sections {
suffix := dwarfSuffix(s)
if suffix == "" {
continue
}
if _, ok := dat[suffix]; ok {
// Already handled.
continue
}
b, err := sectionData(s)
if err != nil {
return nil, err
}
if suffix == "types" {
err = d.AddTypes(fmt.Sprintf("types-%d", i), b)
} else {
err = d.AddSection(".debug_"+suffix, b)
}
if err != nil {
return nil, err
}
}
return d, nil
}
// ImportedSymbols returns the names of all symbols
// referred to by the binary f that are expected to be
// satisfied by other libraries at dynamic load time.
func (f *File) ImportedSymbols() ([]string, error) {
if f.Symtab == nil {
return nil, &FormatError{0, "missing symbol table", nil}
}
st := f.Symtab
dt := f.Dysymtab
var all []string
if dt != nil {
for _, s := range st.Syms[dt.Iundefsym : dt.Iundefsym+dt.Nundefsym] {
all = append(all, s.Name)
}
} else {
// From Darwin's include/mach-o/nlist.h
const (
N_TYPE = 0x0e
N_UNDF = 0x0
N_EXT = 0x01
)
for _, s := range st.Syms {
if s.Type&N_TYPE == N_UNDF && s.Type&N_EXT != 0 {
all = append(all, s.Name)
}
}
}
return all, nil
}
// ImportedLibraries returns the paths of all libraries
// referred to by the binary f that are expected to be
// linked with the binary at dynamic link time.
func (f *File) ImportedLibraries() ([]string, error) {
var all []string
for _, l := range f.Loads {
if lib, ok := l.(*Dylib); ok {
all = append(all, lib.Name)
}
}
return all, nil
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Mach-O header data structures
// Originally at:
// http://developer.apple.com/mac/library/documentation/DeveloperTools/Conceptual/MachORuntime/Reference/reference.html (since deleted by Apple)
// Archived copy at:
// https://web.archive.org/web/20090819232456/http://developer.apple.com/documentation/DeveloperTools/Conceptual/MachORuntime/index.html
// For cloned PDF see:
// https://github.com/aidansteele/osx-abi-macho-file-format-reference
package macho
import "strconv"
// A FileHeader represents a Mach-O file header.
type FileHeader struct {
Magic uint32
Cpu Cpu
SubCpu uint32
Type Type
Ncmd uint32
Cmdsz uint32
Flags uint32
}
const (
fileHeaderSize32 = 7 * 4
fileHeaderSize64 = 8 * 4
)
const (
Magic32 uint32 = 0xfeedface
Magic64 uint32 = 0xfeedfacf
MagicFat uint32 = 0xcafebabe
)
// A Type is the Mach-O file type, e.g. an object file, executable, or dynamic library.
type Type uint32
const (
TypeObj Type = 1
TypeExec Type = 2
TypeDylib Type = 6
TypeBundle Type = 8
)
var typeStrings = []intName{
{uint32(TypeObj), "Obj"},
{uint32(TypeExec), "Exec"},
{uint32(TypeDylib), "Dylib"},
{uint32(TypeBundle), "Bundle"},
}
func (t Type) String() string { return stringName(uint32(t), typeStrings, false) }
func (t Type) GoString() string { return stringName(uint32(t), typeStrings, true) }
// A Cpu is a Mach-O cpu type.
type Cpu uint32
const cpuArch64 = 0x01000000
const (
Cpu386 Cpu = 7
CpuAmd64 Cpu = Cpu386 | cpuArch64
CpuArm Cpu = 12
CpuArm64 Cpu = CpuArm | cpuArch64
CpuPpc Cpu = 18
CpuPpc64 Cpu = CpuPpc | cpuArch64
)
var cpuStrings = []intName{
{uint32(Cpu386), "Cpu386"},
{uint32(CpuAmd64), "CpuAmd64"},
{uint32(CpuArm), "CpuArm"},
{uint32(CpuArm64), "CpuArm64"},
{uint32(CpuPpc), "CpuPpc"},
{uint32(CpuPpc64), "CpuPpc64"},
}
func (i Cpu) String() string { return stringName(uint32(i), cpuStrings, false) }
func (i Cpu) GoString() string { return stringName(uint32(i), cpuStrings, true) }
// A LoadCmd is a Mach-O load command.
type LoadCmd uint32
const (
LoadCmdSegment LoadCmd = 0x1
LoadCmdSymtab LoadCmd = 0x2
LoadCmdThread LoadCmd = 0x4
LoadCmdUnixThread LoadCmd = 0x5 // thread+stack
LoadCmdDysymtab LoadCmd = 0xb
LoadCmdDylib LoadCmd = 0xc // load dylib command
LoadCmdDylinker LoadCmd = 0xf // id dylinker command (not load dylinker command)
LoadCmdSegment64 LoadCmd = 0x19
LoadCmdRpath LoadCmd = 0x8000001c
)
var cmdStrings = []intName{
{uint32(LoadCmdSegment), "LoadCmdSegment"},
{uint32(LoadCmdThread), "LoadCmdThread"},
{uint32(LoadCmdUnixThread), "LoadCmdUnixThread"},
{uint32(LoadCmdDylib), "LoadCmdDylib"},
{uint32(LoadCmdSegment64), "LoadCmdSegment64"},
{uint32(LoadCmdRpath), "LoadCmdRpath"},
}
func (i LoadCmd) String() string { return stringName(uint32(i), cmdStrings, false) }
func (i LoadCmd) GoString() string { return stringName(uint32(i), cmdStrings, true) }
type (
// A Segment32 is a 32-bit Mach-O segment load command.
Segment32 struct {
Cmd LoadCmd
Len uint32
Name [16]byte
Addr uint32
Memsz uint32
Offset uint32
Filesz uint32
Maxprot uint32
Prot uint32
Nsect uint32
Flag uint32
}
// A Segment64 is a 64-bit Mach-O segment load command.
Segment64 struct {
Cmd LoadCmd
Len uint32
Name [16]byte
Addr uint64
Memsz uint64
Offset uint64
Filesz uint64
Maxprot uint32
Prot uint32
Nsect uint32
Flag uint32
}
// A SymtabCmd is a Mach-O symbol table command.
SymtabCmd struct {
Cmd LoadCmd
Len uint32
Symoff uint32
Nsyms uint32
Stroff uint32
Strsize uint32
}
// A DysymtabCmd is a Mach-O dynamic symbol table command.
DysymtabCmd struct {
Cmd LoadCmd
Len uint32
Ilocalsym uint32
Nlocalsym uint32
Iextdefsym uint32
Nextdefsym uint32
Iundefsym uint32
Nundefsym uint32
Tocoffset uint32
Ntoc uint32
Modtaboff uint32
Nmodtab uint32
Extrefsymoff uint32
Nextrefsyms uint32
Indirectsymoff uint32
Nindirectsyms uint32
Extreloff uint32
Nextrel uint32
Locreloff uint32
Nlocrel uint32
}
// A DylibCmd is a Mach-O load dynamic library command.
DylibCmd struct {
Cmd LoadCmd
Len uint32
Name uint32
Time uint32
CurrentVersion uint32
CompatVersion uint32
}
// A RpathCmd is a Mach-O rpath command.
RpathCmd struct {
Cmd LoadCmd
Len uint32
Path uint32
}
// A Thread is a Mach-O thread state command.
Thread struct {
Cmd LoadCmd
Len uint32
Type uint32
Data []uint32
}
)
const (
FlagNoUndefs uint32 = 0x1
FlagIncrLink uint32 = 0x2
FlagDyldLink uint32 = 0x4
FlagBindAtLoad uint32 = 0x8
FlagPrebound uint32 = 0x10
FlagSplitSegs uint32 = 0x20
FlagLazyInit uint32 = 0x40
FlagTwoLevel uint32 = 0x80
FlagForceFlat uint32 = 0x100
FlagNoMultiDefs uint32 = 0x200
FlagNoFixPrebinding uint32 = 0x400
FlagPrebindable uint32 = 0x800
FlagAllModsBound uint32 = 0x1000
FlagSubsectionsViaSymbols uint32 = 0x2000
FlagCanonical uint32 = 0x4000
FlagWeakDefines uint32 = 0x8000
FlagBindsToWeak uint32 = 0x10000
FlagAllowStackExecution uint32 = 0x20000
FlagRootSafe uint32 = 0x40000
FlagSetuidSafe uint32 = 0x80000
FlagNoReexportedDylibs uint32 = 0x100000
FlagPIE uint32 = 0x200000
FlagDeadStrippableDylib uint32 = 0x400000
FlagHasTLVDescriptors uint32 = 0x800000
FlagNoHeapExecution uint32 = 0x1000000
FlagAppExtensionSafe uint32 = 0x2000000
)
// A Section32 is a 32-bit Mach-O section header.
type Section32 struct {
Name [16]byte
Seg [16]byte
Addr uint32
Size uint32
Offset uint32
Align uint32
Reloff uint32
Nreloc uint32
Flags uint32
Reserve1 uint32
Reserve2 uint32
}
// A Section64 is a 64-bit Mach-O section header.
type Section64 struct {
Name [16]byte
Seg [16]byte
Addr uint64
Size uint64
Offset uint32
Align uint32
Reloff uint32
Nreloc uint32
Flags uint32
Reserve1 uint32
Reserve2 uint32
Reserve3 uint32
}
// An Nlist32 is a Mach-O 32-bit symbol table entry.
type Nlist32 struct {
Name uint32
Type uint8
Sect uint8
Desc uint16
Value uint32
}
// An Nlist64 is a Mach-O 64-bit symbol table entry.
type Nlist64 struct {
Name uint32
Type uint8
Sect uint8
Desc uint16
Value uint64
}
// Regs386 is the Mach-O 386 register structure.
type Regs386 struct {
AX uint32
BX uint32
CX uint32
DX uint32
DI uint32
SI uint32
BP uint32
SP uint32
SS uint32
FLAGS uint32
IP uint32
CS uint32
DS uint32
ES uint32
FS uint32
GS uint32
}
// RegsAMD64 is the Mach-O AMD64 register structure.
type RegsAMD64 struct {
AX uint64
BX uint64
CX uint64
DX uint64
DI uint64
SI uint64
BP uint64
SP uint64
R8 uint64
R9 uint64
R10 uint64
R11 uint64
R12 uint64
R13 uint64
R14 uint64
R15 uint64
IP uint64
FLAGS uint64
CS uint64
FS uint64
GS uint64
}
type intName struct {
i uint32
s string
}
func stringName(i uint32, names []intName, goSyntax bool) string {
for _, n := range names {
if n.i == i {
if goSyntax {
return "macho." + n.s
}
return n.s
}
}
return strconv.FormatUint(uint64(i), 10)
}
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package macho
//go:generate stringer -type=RelocTypeGeneric,RelocTypeX86_64,RelocTypeARM,RelocTypeARM64 -output reloctype_string.go
type RelocTypeGeneric int
const (
GENERIC_RELOC_VANILLA RelocTypeGeneric = 0
GENERIC_RELOC_PAIR RelocTypeGeneric = 1
GENERIC_RELOC_SECTDIFF RelocTypeGeneric = 2
GENERIC_RELOC_PB_LA_PTR RelocTypeGeneric = 3
GENERIC_RELOC_LOCAL_SECTDIFF RelocTypeGeneric = 4
GENERIC_RELOC_TLV RelocTypeGeneric = 5
)
func (r RelocTypeGeneric) GoString() string { return "macho." + r.String() }
type RelocTypeX86_64 int
const (
X86_64_RELOC_UNSIGNED RelocTypeX86_64 = 0
X86_64_RELOC_SIGNED RelocTypeX86_64 = 1
X86_64_RELOC_BRANCH RelocTypeX86_64 = 2
X86_64_RELOC_GOT_LOAD RelocTypeX86_64 = 3
X86_64_RELOC_GOT RelocTypeX86_64 = 4
X86_64_RELOC_SUBTRACTOR RelocTypeX86_64 = 5
X86_64_RELOC_SIGNED_1 RelocTypeX86_64 = 6
X86_64_RELOC_SIGNED_2 RelocTypeX86_64 = 7
X86_64_RELOC_SIGNED_4 RelocTypeX86_64 = 8
X86_64_RELOC_TLV RelocTypeX86_64 = 9
)
func (r RelocTypeX86_64) GoString() string { return "macho." + r.String() }
type RelocTypeARM int
const (
ARM_RELOC_VANILLA RelocTypeARM = 0
ARM_RELOC_PAIR RelocTypeARM = 1
ARM_RELOC_SECTDIFF RelocTypeARM = 2
ARM_RELOC_LOCAL_SECTDIFF RelocTypeARM = 3
ARM_RELOC_PB_LA_PTR RelocTypeARM = 4
ARM_RELOC_BR24 RelocTypeARM = 5
ARM_THUMB_RELOC_BR22 RelocTypeARM = 6
ARM_THUMB_32BIT_BRANCH RelocTypeARM = 7
ARM_RELOC_HALF RelocTypeARM = 8
ARM_RELOC_HALF_SECTDIFF RelocTypeARM = 9
)
func (r RelocTypeARM) GoString() string { return "macho." + r.String() }
type RelocTypeARM64 int
const (
ARM64_RELOC_UNSIGNED RelocTypeARM64 = 0
ARM64_RELOC_SUBTRACTOR RelocTypeARM64 = 1
ARM64_RELOC_BRANCH26 RelocTypeARM64 = 2
ARM64_RELOC_PAGE21 RelocTypeARM64 = 3
ARM64_RELOC_PAGEOFF12 RelocTypeARM64 = 4
ARM64_RELOC_GOT_LOAD_PAGE21 RelocTypeARM64 = 5
ARM64_RELOC_GOT_LOAD_PAGEOFF12 RelocTypeARM64 = 6
ARM64_RELOC_POINTER_TO_GOT RelocTypeARM64 = 7
ARM64_RELOC_TLVP_LOAD_PAGE21 RelocTypeARM64 = 8
ARM64_RELOC_TLVP_LOAD_PAGEOFF12 RelocTypeARM64 = 9
ARM64_RELOC_ADDEND RelocTypeARM64 = 10
)
func (r RelocTypeARM64) GoString() string { return "macho." + r.String() }
// Code generated by "stringer -type=RelocTypeGeneric,RelocTypeX86_64,RelocTypeARM,RelocTypeARM64 -output reloctype_string.go"; DO NOT EDIT.
package macho
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[GENERIC_RELOC_VANILLA-0]
_ = x[GENERIC_RELOC_PAIR-1]
_ = x[GENERIC_RELOC_SECTDIFF-2]
_ = x[GENERIC_RELOC_PB_LA_PTR-3]
_ = x[GENERIC_RELOC_LOCAL_SECTDIFF-4]
_ = x[GENERIC_RELOC_TLV-5]
}
const _RelocTypeGeneric_name = "GENERIC_RELOC_VANILLAGENERIC_RELOC_PAIRGENERIC_RELOC_SECTDIFFGENERIC_RELOC_PB_LA_PTRGENERIC_RELOC_LOCAL_SECTDIFFGENERIC_RELOC_TLV"
var _RelocTypeGeneric_index = [...]uint8{0, 21, 39, 61, 84, 112, 129}
func (i RelocTypeGeneric) String() string {
if i < 0 || i >= RelocTypeGeneric(len(_RelocTypeGeneric_index)-1) {
return "RelocTypeGeneric(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _RelocTypeGeneric_name[_RelocTypeGeneric_index[i]:_RelocTypeGeneric_index[i+1]]
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[X86_64_RELOC_UNSIGNED-0]
_ = x[X86_64_RELOC_SIGNED-1]
_ = x[X86_64_RELOC_BRANCH-2]
_ = x[X86_64_RELOC_GOT_LOAD-3]
_ = x[X86_64_RELOC_GOT-4]
_ = x[X86_64_RELOC_SUBTRACTOR-5]
_ = x[X86_64_RELOC_SIGNED_1-6]
_ = x[X86_64_RELOC_SIGNED_2-7]
_ = x[X86_64_RELOC_SIGNED_4-8]
_ = x[X86_64_RELOC_TLV-9]
}
const _RelocTypeX86_64_name = "X86_64_RELOC_UNSIGNEDX86_64_RELOC_SIGNEDX86_64_RELOC_BRANCHX86_64_RELOC_GOT_LOADX86_64_RELOC_GOTX86_64_RELOC_SUBTRACTORX86_64_RELOC_SIGNED_1X86_64_RELOC_SIGNED_2X86_64_RELOC_SIGNED_4X86_64_RELOC_TLV"
var _RelocTypeX86_64_index = [...]uint8{0, 21, 40, 59, 80, 96, 119, 140, 161, 182, 198}
func (i RelocTypeX86_64) String() string {
if i < 0 || i >= RelocTypeX86_64(len(_RelocTypeX86_64_index)-1) {
return "RelocTypeX86_64(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _RelocTypeX86_64_name[_RelocTypeX86_64_index[i]:_RelocTypeX86_64_index[i+1]]
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ARM_RELOC_VANILLA-0]
_ = x[ARM_RELOC_PAIR-1]
_ = x[ARM_RELOC_SECTDIFF-2]
_ = x[ARM_RELOC_LOCAL_SECTDIFF-3]
_ = x[ARM_RELOC_PB_LA_PTR-4]
_ = x[ARM_RELOC_BR24-5]
_ = x[ARM_THUMB_RELOC_BR22-6]
_ = x[ARM_THUMB_32BIT_BRANCH-7]
_ = x[ARM_RELOC_HALF-8]
_ = x[ARM_RELOC_HALF_SECTDIFF-9]
}
const _RelocTypeARM_name = "ARM_RELOC_VANILLAARM_RELOC_PAIRARM_RELOC_SECTDIFFARM_RELOC_LOCAL_SECTDIFFARM_RELOC_PB_LA_PTRARM_RELOC_BR24ARM_THUMB_RELOC_BR22ARM_THUMB_32BIT_BRANCHARM_RELOC_HALFARM_RELOC_HALF_SECTDIFF"
var _RelocTypeARM_index = [...]uint8{0, 17, 31, 49, 73, 92, 106, 126, 148, 162, 185}
func (i RelocTypeARM) String() string {
if i < 0 || i >= RelocTypeARM(len(_RelocTypeARM_index)-1) {
return "RelocTypeARM(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _RelocTypeARM_name[_RelocTypeARM_index[i]:_RelocTypeARM_index[i+1]]
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ARM64_RELOC_UNSIGNED-0]
_ = x[ARM64_RELOC_SUBTRACTOR-1]
_ = x[ARM64_RELOC_BRANCH26-2]
_ = x[ARM64_RELOC_PAGE21-3]
_ = x[ARM64_RELOC_PAGEOFF12-4]
_ = x[ARM64_RELOC_GOT_LOAD_PAGE21-5]
_ = x[ARM64_RELOC_GOT_LOAD_PAGEOFF12-6]
_ = x[ARM64_RELOC_POINTER_TO_GOT-7]
_ = x[ARM64_RELOC_TLVP_LOAD_PAGE21-8]
_ = x[ARM64_RELOC_TLVP_LOAD_PAGEOFF12-9]
_ = x[ARM64_RELOC_ADDEND-10]
}
const _RelocTypeARM64_name = "ARM64_RELOC_UNSIGNEDARM64_RELOC_SUBTRACTORARM64_RELOC_BRANCH26ARM64_RELOC_PAGE21ARM64_RELOC_PAGEOFF12ARM64_RELOC_GOT_LOAD_PAGE21ARM64_RELOC_GOT_LOAD_PAGEOFF12ARM64_RELOC_POINTER_TO_GOTARM64_RELOC_TLVP_LOAD_PAGE21ARM64_RELOC_TLVP_LOAD_PAGEOFF12ARM64_RELOC_ADDEND"
var _RelocTypeARM64_index = [...]uint16{0, 20, 42, 62, 80, 101, 128, 158, 184, 212, 243, 261}
func (i RelocTypeARM64) String() string {
if i < 0 || i >= RelocTypeARM64(len(_RelocTypeARM64_index)-1) {
return "RelocTypeARM64(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _RelocTypeARM64_name[_RelocTypeARM64_index[i]:_RelocTypeARM64_index[i+1]]
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package pe implements access to PE (Microsoft Windows Portable Executable) files.
# Security
This package is not designed to be hardened against adversarial inputs, and is
outside the scope of https://go.dev/security/policy. In particular, only basic
validation is done when parsing object files. As such, care should be taken when
parsing untrusted inputs, as parsing malformed files may consume significant
resources, or cause panics.
*/
package pe
import (
"bytes"
"compress/zlib"
"debug/dwarf"
"encoding/binary"
"errors"
"fmt"
"io"
"os"
"strings"
)
// A File represents an open PE file.
type File struct {
FileHeader
OptionalHeader any // of type *OptionalHeader32 or *OptionalHeader64
Sections []*Section
Symbols []*Symbol // COFF symbols with auxiliary symbol records removed
COFFSymbols []COFFSymbol // all COFF symbols (including auxiliary symbol records)
StringTable StringTable
closer io.Closer
}
// Open opens the named file using [os.Open] and prepares it for use as a PE binary.
func Open(name string) (*File, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
ff, err := NewFile(f)
if err != nil {
f.Close()
return nil, err
}
ff.closer = f
return ff, nil
}
// Close closes the [File].
// If the [File] was created using [NewFile] directly instead of [Open],
// Close has no effect.
func (f *File) Close() error {
var err error
if f.closer != nil {
err = f.closer.Close()
f.closer = nil
}
return err
}
// TODO(brainman): add Load function, as a replacement for NewFile, that does not call removeAuxSymbols (for performance)
// NewFile creates a new [File] for accessing a PE binary in an underlying reader.
func NewFile(r io.ReaderAt) (*File, error) {
f := new(File)
sr := io.NewSectionReader(r, 0, 1<<63-1)
var dosheader [96]byte
if _, err := r.ReadAt(dosheader[0:], 0); err != nil {
return nil, err
}
var base int64
if dosheader[0] == 'M' && dosheader[1] == 'Z' {
signoff := int64(binary.LittleEndian.Uint32(dosheader[0x3c:]))
var sign [4]byte
r.ReadAt(sign[:], signoff)
if !(sign[0] == 'P' && sign[1] == 'E' && sign[2] == 0 && sign[3] == 0) {
return nil, fmt.Errorf("invalid PE file signature: % x", sign)
}
base = signoff + 4
} else {
base = int64(0)
}
sr.Seek(base, io.SeekStart)
if err := binary.Read(sr, binary.LittleEndian, &f.FileHeader); err != nil {
return nil, err
}
switch f.FileHeader.Machine {
case IMAGE_FILE_MACHINE_AMD64,
IMAGE_FILE_MACHINE_ARM64,
IMAGE_FILE_MACHINE_ARMNT,
IMAGE_FILE_MACHINE_I386,
IMAGE_FILE_MACHINE_RISCV32,
IMAGE_FILE_MACHINE_RISCV64,
IMAGE_FILE_MACHINE_RISCV128,
IMAGE_FILE_MACHINE_UNKNOWN:
// ok
default:
return nil, fmt.Errorf("unrecognized PE machine: %#x", f.FileHeader.Machine)
}
var err error
// Read string table.
f.StringTable, err = readStringTable(&f.FileHeader, sr)
if err != nil {
return nil, err
}
// Read symbol table.
f.COFFSymbols, err = readCOFFSymbols(&f.FileHeader, sr)
if err != nil {
return nil, err
}
f.Symbols, err = removeAuxSymbols(f.COFFSymbols, f.StringTable)
if err != nil {
return nil, err
}
// Seek past file header.
_, err = sr.Seek(base+int64(binary.Size(f.FileHeader)), io.SeekStart)
if err != nil {
return nil, err
}
// Read optional header.
f.OptionalHeader, err = readOptionalHeader(sr, f.FileHeader.SizeOfOptionalHeader)
if err != nil {
return nil, err
}
// Process sections.
f.Sections = make([]*Section, f.FileHeader.NumberOfSections)
for i := 0; i < int(f.FileHeader.NumberOfSections); i++ {
sh := new(SectionHeader32)
if err := binary.Read(sr, binary.LittleEndian, sh); err != nil {
return nil, err
}
name, err := sh.fullName(f.StringTable)
if err != nil {
return nil, err
}
s := new(Section)
s.SectionHeader = SectionHeader{
Name: name,
VirtualSize: sh.VirtualSize,
VirtualAddress: sh.VirtualAddress,
Size: sh.SizeOfRawData,
Offset: sh.PointerToRawData,
PointerToRelocations: sh.PointerToRelocations,
PointerToLineNumbers: sh.PointerToLineNumbers,
NumberOfRelocations: sh.NumberOfRelocations,
NumberOfLineNumbers: sh.NumberOfLineNumbers,
Characteristics: sh.Characteristics,
}
r2 := r
if sh.PointerToRawData == 0 { // .bss must have all 0s
r2 = &nobitsSectionReader{}
}
s.sr = io.NewSectionReader(r2, int64(s.SectionHeader.Offset), int64(s.SectionHeader.Size))
s.ReaderAt = s.sr
f.Sections[i] = s
}
for i := range f.Sections {
var err error
f.Sections[i].Relocs, err = readRelocs(&f.Sections[i].SectionHeader, sr)
if err != nil {
return nil, err
}
}
return f, nil
}
type nobitsSectionReader struct{}
func (*nobitsSectionReader) ReadAt(p []byte, off int64) (n int, err error) {
return 0, errors.New("unexpected read from section with uninitialized data")
}
// getString extracts a string from symbol string table.
func getString(section []byte, start int) (string, bool) {
if start < 0 || start >= len(section) {
return "", false
}
for end := start; end < len(section); end++ {
if section[end] == 0 {
return string(section[start:end]), true
}
}
return "", false
}
// Section returns the first section with the given name, or nil if no such
// section exists.
func (f *File) Section(name string) *Section {
for _, s := range f.Sections {
if s.Name == name {
return s
}
}
return nil
}
func (f *File) DWARF() (*dwarf.Data, error) {
dwarfSuffix := func(s *Section) string {
switch {
case strings.HasPrefix(s.Name, ".debug_"):
return s.Name[7:]
case strings.HasPrefix(s.Name, ".zdebug_"):
return s.Name[8:]
default:
return ""
}
}
// sectionData gets the data for s and checks its size.
sectionData := func(s *Section) ([]byte, error) {
b, err := s.Data()
if err != nil && uint32(len(b)) < s.Size {
return nil, err
}
if 0 < s.VirtualSize && s.VirtualSize < s.Size {
b = b[:s.VirtualSize]
}
if len(b) >= 12 && string(b[:4]) == "ZLIB" {
dlen := binary.BigEndian.Uint64(b[4:12])
dbuf := make([]byte, dlen)
r, err := zlib.NewReader(bytes.NewBuffer(b[12:]))
if err != nil {
return nil, err
}
if _, err := io.ReadFull(r, dbuf); err != nil {
return nil, err
}
if err := r.Close(); err != nil {
return nil, err
}
b = dbuf
}
return b, nil
}
// There are many other DWARF sections, but these
// are the ones the debug/dwarf package uses.
// Don't bother loading others.
var dat = map[string][]byte{"abbrev": nil, "info": nil, "str": nil, "line": nil, "ranges": nil}
for _, s := range f.Sections {
suffix := dwarfSuffix(s)
if suffix == "" {
continue
}
if _, ok := dat[suffix]; !ok {
continue
}
b, err := sectionData(s)
if err != nil {
return nil, err
}
dat[suffix] = b
}
d, err := dwarf.New(dat["abbrev"], nil, nil, dat["info"], dat["line"], nil, dat["ranges"], dat["str"])
if err != nil {
return nil, err
}
// Look for DWARF4 .debug_types sections and DWARF5 sections.
for i, s := range f.Sections {
suffix := dwarfSuffix(s)
if suffix == "" {
continue
}
if _, ok := dat[suffix]; ok {
// Already handled.
continue
}
b, err := sectionData(s)
if err != nil {
return nil, err
}
if suffix == "types" {
err = d.AddTypes(fmt.Sprintf("types-%d", i), b)
} else {
err = d.AddSection(".debug_"+suffix, b)
}
if err != nil {
return nil, err
}
}
return d, nil
}
// TODO(brainman): document ImportDirectory once we decide what to do with it.
type ImportDirectory struct {
OriginalFirstThunk uint32
TimeDateStamp uint32
ForwarderChain uint32
Name uint32
FirstThunk uint32
dll string
}
// ImportedSymbols returns the names of all symbols
// referred to by the binary f that are expected to be
// satisfied by other libraries at dynamic load time.
// It does not return weak symbols.
func (f *File) ImportedSymbols() ([]string, error) {
if f.OptionalHeader == nil {
return nil, nil
}
_, pe64 := f.OptionalHeader.(*OptionalHeader64)
// grab the number of data directory entries
var dd_length uint32
if pe64 {
dd_length = f.OptionalHeader.(*OptionalHeader64).NumberOfRvaAndSizes
} else {
dd_length = f.OptionalHeader.(*OptionalHeader32).NumberOfRvaAndSizes
}
// check that the length of data directory entries is large
// enough to include the imports directory.
if dd_length < IMAGE_DIRECTORY_ENTRY_IMPORT+1 {
return nil, nil
}
// grab the import data directory entry
var idd DataDirectory
if pe64 {
idd = f.OptionalHeader.(*OptionalHeader64).DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT]
} else {
idd = f.OptionalHeader.(*OptionalHeader32).DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT]
}
// figure out which section contains the import directory table
var ds *Section
ds = nil
for _, s := range f.Sections {
if s.Offset == 0 {
continue
}
// We are using distance between s.VirtualAddress and idd.VirtualAddress
// to avoid potential overflow of uint32 caused by addition of s.VirtualSize
// to s.VirtualAddress.
if s.VirtualAddress <= idd.VirtualAddress && idd.VirtualAddress-s.VirtualAddress < s.VirtualSize {
ds = s
break
}
}
// didn't find a section, so no import libraries were found
if ds == nil {
return nil, nil
}
d, err := ds.Data()
if err != nil {
return nil, err
}
// seek to the virtual address specified in the import data directory
seek := idd.VirtualAddress - ds.VirtualAddress
if seek >= uint32(len(d)) {
return nil, errors.New("optional header data directory virtual size doesn't fit within data seek")
}
d = d[seek:]
// start decoding the import directory
var ida []ImportDirectory
for len(d) >= 20 {
var dt ImportDirectory
dt.OriginalFirstThunk = binary.LittleEndian.Uint32(d[0:4])
dt.TimeDateStamp = binary.LittleEndian.Uint32(d[4:8])
dt.ForwarderChain = binary.LittleEndian.Uint32(d[8:12])
dt.Name = binary.LittleEndian.Uint32(d[12:16])
dt.FirstThunk = binary.LittleEndian.Uint32(d[16:20])
d = d[20:]
if dt.OriginalFirstThunk == 0 {
break
}
ida = append(ida, dt)
}
// TODO(brainman): this needs to be rewritten
// ds.Data() returns contents of section containing import table. Why store in variable called "names"?
// Why we are retrieving it second time? We already have it in "d", and it is not modified anywhere.
// getString does not extracts a string from symbol string table (as getString doco says).
// Why ds.Data() called again and again in the loop?
// Needs test before rewrite.
names, _ := ds.Data()
var all []string
for _, dt := range ida {
dt.dll, _ = getString(names, int(dt.Name-ds.VirtualAddress))
d, _ = ds.Data()
// seek to OriginalFirstThunk
seek := dt.OriginalFirstThunk - ds.VirtualAddress
if seek >= uint32(len(d)) {
return nil, errors.New("import directory original first thunk doesn't fit within data seek")
}
d = d[seek:]
for len(d) > 0 {
if pe64 { // 64bit
if len(d) < 8 {
return nil, errors.New("thunk parsing needs at least 8-bytes")
}
va := binary.LittleEndian.Uint64(d[0:8])
d = d[8:]
if va == 0 {
break
}
if va&0x8000000000000000 > 0 { // is Ordinal
// TODO add dynimport ordinal support.
} else {
fn, _ := getString(names, int(uint32(va)-ds.VirtualAddress+2))
all = append(all, fn+":"+dt.dll)
}
} else { // 32bit
if len(d) <= 4 {
return nil, errors.New("thunk parsing needs at least 5-bytes")
}
va := binary.LittleEndian.Uint32(d[0:4])
d = d[4:]
if va == 0 {
break
}
if va&0x80000000 > 0 { // is Ordinal
// TODO add dynimport ordinal support.
//ord := va&0x0000FFFF
} else {
fn, _ := getString(names, int(va-ds.VirtualAddress+2))
all = append(all, fn+":"+dt.dll)
}
}
}
}
return all, nil
}
// ImportedLibraries returns the names of all libraries
// referred to by the binary f that are expected to be
// linked with the binary at dynamic link time.
func (f *File) ImportedLibraries() ([]string, error) {
// TODO
// cgo -dynimport don't use this for windows PE, so just return.
return nil, nil
}
// FormatError is unused.
// The type is retained for compatibility.
type FormatError struct {
}
func (e *FormatError) Error() string {
return "unknown error"
}
// readOptionalHeader accepts an io.ReadSeeker pointing to optional header in the PE file
// and its size as seen in the file header.
// It parses the given size of bytes and returns optional header. It infers whether the
// bytes being parsed refer to 32 bit or 64 bit version of optional header.
func readOptionalHeader(r io.ReadSeeker, sz uint16) (any, error) {
// If optional header size is 0, return empty optional header.
if sz == 0 {
return nil, nil
}
var (
// First couple of bytes in option header state its type.
// We need to read them first to determine the type and
// validity of optional header.
ohMagic uint16
ohMagicSz = binary.Size(ohMagic)
)
// If optional header size is greater than 0 but less than its magic size, return error.
if sz < uint16(ohMagicSz) {
return nil, fmt.Errorf("optional header size is less than optional header magic size")
}
// read reads from io.ReadSeeke, r, into data.
var err error
read := func(data any) bool {
err = binary.Read(r, binary.LittleEndian, data)
return err == nil
}
if !read(&ohMagic) {
return nil, fmt.Errorf("failure to read optional header magic: %v", err)
}
switch ohMagic {
case 0x10b: // PE32
var (
oh32 OptionalHeader32
// There can be 0 or more data directories. So the minimum size of optional
// header is calculated by subtracting oh32.DataDirectory size from oh32 size.
oh32MinSz = binary.Size(oh32) - binary.Size(oh32.DataDirectory)
)
if sz < uint16(oh32MinSz) {
return nil, fmt.Errorf("optional header size(%d) is less minimum size (%d) of PE32 optional header", sz, oh32MinSz)
}
// Init oh32 fields
oh32.Magic = ohMagic
if !read(&oh32.MajorLinkerVersion) ||
!read(&oh32.MinorLinkerVersion) ||
!read(&oh32.SizeOfCode) ||
!read(&oh32.SizeOfInitializedData) ||
!read(&oh32.SizeOfUninitializedData) ||
!read(&oh32.AddressOfEntryPoint) ||
!read(&oh32.BaseOfCode) ||
!read(&oh32.BaseOfData) ||
!read(&oh32.ImageBase) ||
!read(&oh32.SectionAlignment) ||
!read(&oh32.FileAlignment) ||
!read(&oh32.MajorOperatingSystemVersion) ||
!read(&oh32.MinorOperatingSystemVersion) ||
!read(&oh32.MajorImageVersion) ||
!read(&oh32.MinorImageVersion) ||
!read(&oh32.MajorSubsystemVersion) ||
!read(&oh32.MinorSubsystemVersion) ||
!read(&oh32.Win32VersionValue) ||
!read(&oh32.SizeOfImage) ||
!read(&oh32.SizeOfHeaders) ||
!read(&oh32.CheckSum) ||
!read(&oh32.Subsystem) ||
!read(&oh32.DllCharacteristics) ||
!read(&oh32.SizeOfStackReserve) ||
!read(&oh32.SizeOfStackCommit) ||
!read(&oh32.SizeOfHeapReserve) ||
!read(&oh32.SizeOfHeapCommit) ||
!read(&oh32.LoaderFlags) ||
!read(&oh32.NumberOfRvaAndSizes) {
return nil, fmt.Errorf("failure to read PE32 optional header: %v", err)
}
dd, err := readDataDirectories(r, sz-uint16(oh32MinSz), oh32.NumberOfRvaAndSizes)
if err != nil {
return nil, err
}
copy(oh32.DataDirectory[:], dd)
return &oh32, nil
case 0x20b: // PE32+
var (
oh64 OptionalHeader64
// There can be 0 or more data directories. So the minimum size of optional
// header is calculated by subtracting oh64.DataDirectory size from oh64 size.
oh64MinSz = binary.Size(oh64) - binary.Size(oh64.DataDirectory)
)
if sz < uint16(oh64MinSz) {
return nil, fmt.Errorf("optional header size(%d) is less minimum size (%d) for PE32+ optional header", sz, oh64MinSz)
}
// Init oh64 fields
oh64.Magic = ohMagic
if !read(&oh64.MajorLinkerVersion) ||
!read(&oh64.MinorLinkerVersion) ||
!read(&oh64.SizeOfCode) ||
!read(&oh64.SizeOfInitializedData) ||
!read(&oh64.SizeOfUninitializedData) ||
!read(&oh64.AddressOfEntryPoint) ||
!read(&oh64.BaseOfCode) ||
!read(&oh64.ImageBase) ||
!read(&oh64.SectionAlignment) ||
!read(&oh64.FileAlignment) ||
!read(&oh64.MajorOperatingSystemVersion) ||
!read(&oh64.MinorOperatingSystemVersion) ||
!read(&oh64.MajorImageVersion) ||
!read(&oh64.MinorImageVersion) ||
!read(&oh64.MajorSubsystemVersion) ||
!read(&oh64.MinorSubsystemVersion) ||
!read(&oh64.Win32VersionValue) ||
!read(&oh64.SizeOfImage) ||
!read(&oh64.SizeOfHeaders) ||
!read(&oh64.CheckSum) ||
!read(&oh64.Subsystem) ||
!read(&oh64.DllCharacteristics) ||
!read(&oh64.SizeOfStackReserve) ||
!read(&oh64.SizeOfStackCommit) ||
!read(&oh64.SizeOfHeapReserve) ||
!read(&oh64.SizeOfHeapCommit) ||
!read(&oh64.LoaderFlags) ||
!read(&oh64.NumberOfRvaAndSizes) {
return nil, fmt.Errorf("failure to read PE32+ optional header: %v", err)
}
dd, err := readDataDirectories(r, sz-uint16(oh64MinSz), oh64.NumberOfRvaAndSizes)
if err != nil {
return nil, err
}
copy(oh64.DataDirectory[:], dd)
return &oh64, nil
default:
return nil, fmt.Errorf("optional header has unexpected Magic of 0x%x", ohMagic)
}
}
// readDataDirectories accepts an io.ReadSeeker pointing to data directories in the PE file,
// its size and number of data directories as seen in optional header.
// It parses the given size of bytes and returns given number of data directories.
func readDataDirectories(r io.ReadSeeker, sz uint16, n uint32) ([]DataDirectory, error) {
ddSz := uint64(binary.Size(DataDirectory{}))
if uint64(sz) != uint64(n)*ddSz {
return nil, fmt.Errorf("size of data directories(%d) is inconsistent with number of data directories(%d)", sz, n)
}
dd := make([]DataDirectory, n)
if err := binary.Read(r, binary.LittleEndian, dd); err != nil {
return nil, fmt.Errorf("failure to read data directories: %v", err)
}
return dd, nil
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pe
import (
"encoding/binary"
"fmt"
"internal/saferio"
"io"
"strconv"
)
// SectionHeader32 represents real PE COFF section header.
type SectionHeader32 struct {
Name [8]uint8
VirtualSize uint32
VirtualAddress uint32
SizeOfRawData uint32
PointerToRawData uint32
PointerToRelocations uint32
PointerToLineNumbers uint32
NumberOfRelocations uint16
NumberOfLineNumbers uint16
Characteristics uint32
}
// fullName finds real name of section sh. Normally name is stored
// in sh.Name, but if it is longer then 8 characters, it is stored
// in COFF string table st instead.
func (sh *SectionHeader32) fullName(st StringTable) (string, error) {
if sh.Name[0] != '/' {
return cstring(sh.Name[:]), nil
}
i, err := strconv.Atoi(cstring(sh.Name[1:]))
if err != nil {
return "", err
}
return st.String(uint32(i))
}
// TODO(brainman): copy all IMAGE_REL_* consts from ldpe.go here
// Reloc represents a PE COFF relocation.
// Each section contains its own relocation list.
type Reloc struct {
VirtualAddress uint32
SymbolTableIndex uint32
Type uint16
}
func readRelocs(sh *SectionHeader, r io.ReadSeeker) ([]Reloc, error) {
if sh.NumberOfRelocations <= 0 {
return nil, nil
}
_, err := r.Seek(int64(sh.PointerToRelocations), io.SeekStart)
if err != nil {
return nil, fmt.Errorf("fail to seek to %q section relocations: %v", sh.Name, err)
}
relocs := make([]Reloc, sh.NumberOfRelocations)
err = binary.Read(r, binary.LittleEndian, relocs)
if err != nil {
return nil, fmt.Errorf("fail to read section relocations: %v", err)
}
return relocs, nil
}
// SectionHeader is similar to [SectionHeader32] with Name
// field replaced by Go string.
type SectionHeader struct {
Name string
VirtualSize uint32
VirtualAddress uint32
Size uint32
Offset uint32
PointerToRelocations uint32
PointerToLineNumbers uint32
NumberOfRelocations uint16
NumberOfLineNumbers uint16
Characteristics uint32
}
// Section provides access to PE COFF section.
type Section struct {
SectionHeader
Relocs []Reloc
// Embed ReaderAt for ReadAt method.
// Do not embed SectionReader directly
// to avoid having Read and Seek.
// If a client wants Read and Seek it must use
// Open() to avoid fighting over the seek offset
// with other clients.
io.ReaderAt
sr *io.SectionReader
}
// Data reads and returns the contents of the PE section s.
//
// If s.Offset is 0, the section has no contents,
// and Data will always return a non-nil error.
func (s *Section) Data() ([]byte, error) {
return saferio.ReadDataAt(s.sr, uint64(s.Size), 0)
}
// Open returns a new ReadSeeker reading the PE section s.
//
// If s.Offset is 0, the section has no contents, and all calls
// to the returned reader will return a non-nil error.
func (s *Section) Open() io.ReadSeeker {
return io.NewSectionReader(s.sr, 0, 1<<63-1)
}
// Section characteristics flags.
const (
IMAGE_SCN_CNT_CODE = 0x00000020
IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040
IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080
IMAGE_SCN_LNK_COMDAT = 0x00001000
IMAGE_SCN_MEM_DISCARDABLE = 0x02000000
IMAGE_SCN_MEM_EXECUTE = 0x20000000
IMAGE_SCN_MEM_READ = 0x40000000
IMAGE_SCN_MEM_WRITE = 0x80000000
)
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pe
import (
"bytes"
"encoding/binary"
"fmt"
"internal/saferio"
"io"
)
// cstring converts ASCII byte sequence b to string.
// It stops once it finds 0 or reaches end of b.
func cstring(b []byte) string {
i := bytes.IndexByte(b, 0)
if i == -1 {
i = len(b)
}
return string(b[:i])
}
// StringTable is a COFF string table.
type StringTable []byte
func readStringTable(fh *FileHeader, r io.ReadSeeker) (StringTable, error) {
// COFF string table is located right after COFF symbol table.
if fh.PointerToSymbolTable <= 0 {
return nil, nil
}
offset := fh.PointerToSymbolTable + COFFSymbolSize*fh.NumberOfSymbols
_, err := r.Seek(int64(offset), io.SeekStart)
if err != nil {
return nil, fmt.Errorf("fail to seek to string table: %v", err)
}
var l uint32
err = binary.Read(r, binary.LittleEndian, &l)
if err != nil {
return nil, fmt.Errorf("fail to read string table length: %v", err)
}
// string table length includes itself
if l <= 4 {
return nil, nil
}
l -= 4
buf, err := saferio.ReadData(r, uint64(l))
if err != nil {
return nil, fmt.Errorf("fail to read string table: %v", err)
}
return StringTable(buf), nil
}
// TODO(brainman): decide if start parameter should be int instead of uint32
// String extracts string from COFF string table st at offset start.
func (st StringTable) String(start uint32) (string, error) {
// start includes 4 bytes of string table length
if start < 4 {
return "", fmt.Errorf("offset %d is before the start of string table", start)
}
start -= 4
if int(start) > len(st) {
return "", fmt.Errorf("offset %d is beyond the end of string table", start)
}
return cstring(st[start:]), nil
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pe
import (
"encoding/binary"
"errors"
"fmt"
"internal/saferio"
"io"
"unsafe"
)
const COFFSymbolSize = 18
// COFFSymbol represents single COFF symbol table record.
type COFFSymbol struct {
Name [8]uint8
Value uint32
SectionNumber int16
Type uint16
StorageClass uint8
NumberOfAuxSymbols uint8
}
// readCOFFSymbols reads in the symbol table for a PE file, returning
// a slice of COFFSymbol objects. The PE format includes both primary
// symbols (whose fields are described by COFFSymbol above) and
// auxiliary symbols; all symbols are 18 bytes in size. The auxiliary
// symbols for a given primary symbol are placed following it in the
// array, e.g.
//
// ...
// k+0: regular sym k
// k+1: 1st aux symbol for k
// k+2: 2nd aux symbol for k
// k+3: regular sym k+3
// k+4: 1st aux symbol for k+3
// k+5: regular sym k+5
// k+6: regular sym k+6
//
// The PE format allows for several possible aux symbol formats. For
// more info see:
//
// https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#auxiliary-symbol-records
//
// At the moment this package only provides APIs for looking at
// aux symbols of format 5 (associated with section definition symbols).
func readCOFFSymbols(fh *FileHeader, r io.ReadSeeker) ([]COFFSymbol, error) {
if fh.PointerToSymbolTable == 0 {
return nil, nil
}
if fh.NumberOfSymbols <= 0 {
return nil, nil
}
_, err := r.Seek(int64(fh.PointerToSymbolTable), io.SeekStart)
if err != nil {
return nil, fmt.Errorf("fail to seek to symbol table: %v", err)
}
c := saferio.SliceCap[COFFSymbol](uint64(fh.NumberOfSymbols))
if c < 0 {
return nil, errors.New("too many symbols; file may be corrupt")
}
syms := make([]COFFSymbol, 0, c)
naux := 0
for k := uint32(0); k < fh.NumberOfSymbols; k++ {
var sym COFFSymbol
if naux == 0 {
// Read a primary symbol.
err = binary.Read(r, binary.LittleEndian, &sym)
if err != nil {
return nil, fmt.Errorf("fail to read symbol table: %v", err)
}
// Record how many auxiliary symbols it has.
naux = int(sym.NumberOfAuxSymbols)
} else {
// Read an aux symbol. At the moment we assume all
// aux symbols are format 5 (obviously this doesn't always
// hold; more cases will be needed below if more aux formats
// are supported in the future).
naux--
aux := (*COFFSymbolAuxFormat5)(unsafe.Pointer(&sym))
err = binary.Read(r, binary.LittleEndian, aux)
if err != nil {
return nil, fmt.Errorf("fail to read symbol table: %v", err)
}
}
syms = append(syms, sym)
}
if naux != 0 {
return nil, fmt.Errorf("fail to read symbol table: %d aux symbols unread", naux)
}
return syms, nil
}
// isSymNameOffset checks symbol name if it is encoded as offset into string table.
func isSymNameOffset(name [8]byte) (bool, uint32) {
if name[0] == 0 && name[1] == 0 && name[2] == 0 && name[3] == 0 {
offset := binary.LittleEndian.Uint32(name[4:])
if offset == 0 {
// symbol has no name
return false, 0
}
return true, offset
}
return false, 0
}
// FullName finds real name of symbol sym. Normally name is stored
// in sym.Name, but if it is longer then 8 characters, it is stored
// in COFF string table st instead.
func (sym *COFFSymbol) FullName(st StringTable) (string, error) {
if ok, offset := isSymNameOffset(sym.Name); ok {
return st.String(offset)
}
return cstring(sym.Name[:]), nil
}
func removeAuxSymbols(allsyms []COFFSymbol, st StringTable) ([]*Symbol, error) {
if len(allsyms) == 0 {
return nil, nil
}
syms := make([]*Symbol, 0)
aux := uint8(0)
for _, sym := range allsyms {
if aux > 0 {
aux--
continue
}
name, err := sym.FullName(st)
if err != nil {
return nil, err
}
aux = sym.NumberOfAuxSymbols
s := &Symbol{
Name: name,
Value: sym.Value,
SectionNumber: sym.SectionNumber,
Type: sym.Type,
StorageClass: sym.StorageClass,
}
syms = append(syms, s)
}
return syms, nil
}
// Symbol is similar to [COFFSymbol] with Name field replaced
// by Go string. Symbol also does not have NumberOfAuxSymbols.
type Symbol struct {
Name string
Value uint32
SectionNumber int16
Type uint16
StorageClass uint8
}
// COFFSymbolAuxFormat5 describes the expected form of an aux symbol
// attached to a section definition symbol. The PE format defines a
// number of different aux symbol formats: format 1 for function
// definitions, format 2 for .be and .ef symbols, and so on. Format 5
// holds extra info associated with a section definition, including
// number of relocations + line numbers, as well as COMDAT info. See
// https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#auxiliary-format-5-section-definitions
// for more on what's going on here.
type COFFSymbolAuxFormat5 struct {
Size uint32
NumRelocs uint16
NumLineNumbers uint16
Checksum uint32
SecNum uint16
Selection uint8
_ [3]uint8 // padding
}
// These constants make up the possible values for the 'Selection'
// field in an AuxFormat5.
const (
IMAGE_COMDAT_SELECT_NODUPLICATES = 1
IMAGE_COMDAT_SELECT_ANY = 2
IMAGE_COMDAT_SELECT_SAME_SIZE = 3
IMAGE_COMDAT_SELECT_EXACT_MATCH = 4
IMAGE_COMDAT_SELECT_ASSOCIATIVE = 5
IMAGE_COMDAT_SELECT_LARGEST = 6
)
// COFFSymbolReadSectionDefAux returns a blob of auxiliary information
// (including COMDAT info) for a section definition symbol. Here 'idx'
// is the index of a section symbol in the main [COFFSymbol] array for
// the File. Return value is a pointer to the appropriate aux symbol
// struct. For more info, see:
//
// auxiliary symbols: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#auxiliary-symbol-records
// COMDAT sections: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#comdat-sections-object-only
// auxiliary info for section definitions: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#auxiliary-format-5-section-definitions
func (f *File) COFFSymbolReadSectionDefAux(idx int) (*COFFSymbolAuxFormat5, error) {
var rv *COFFSymbolAuxFormat5
if idx < 0 || idx >= len(f.COFFSymbols) {
return rv, fmt.Errorf("invalid symbol index")
}
pesym := &f.COFFSymbols[idx]
const IMAGE_SYM_CLASS_STATIC = 3
if pesym.StorageClass != uint8(IMAGE_SYM_CLASS_STATIC) {
return rv, fmt.Errorf("incorrect symbol storage class")
}
if pesym.NumberOfAuxSymbols == 0 || idx+1 >= len(f.COFFSymbols) {
return rv, fmt.Errorf("aux symbol unavailable")
}
// Locate and return a pointer to the successor aux symbol.
pesymn := &f.COFFSymbols[idx+1]
rv = (*COFFSymbolAuxFormat5)(unsafe.Pointer(pesymn))
return rv, nil
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package plan9obj implements access to Plan 9 a.out object files.
# Security
This package is not designed to be hardened against adversarial inputs, and is
outside the scope of https://go.dev/security/policy. In particular, only basic
validation is done when parsing object files. As such, care should be taken when
parsing untrusted inputs, as parsing malformed files may consume significant
resources, or cause panics.
*/
package plan9obj
import (
"encoding/binary"
"errors"
"fmt"
"internal/saferio"
"io"
"os"
)
// A FileHeader represents a Plan 9 a.out file header.
type FileHeader struct {
Magic uint32
Bss uint32
Entry uint64
PtrSize int
LoadAddress uint64
HdrSize uint64
}
// A File represents an open Plan 9 a.out file.
type File struct {
FileHeader
Sections []*Section
closer io.Closer
}
// A SectionHeader represents a single Plan 9 a.out section header.
// This structure doesn't exist on-disk, but eases navigation
// through the object file.
type SectionHeader struct {
Name string
Size uint32
Offset uint32
}
// A Section represents a single section in a Plan 9 a.out file.
type Section struct {
SectionHeader
// Embed ReaderAt for ReadAt method.
// Do not embed SectionReader directly
// to avoid having Read and Seek.
// If a client wants Read and Seek it must use
// Open() to avoid fighting over the seek offset
// with other clients.
io.ReaderAt
sr *io.SectionReader
}
// Data reads and returns the contents of the Plan 9 a.out section.
func (s *Section) Data() ([]byte, error) {
return saferio.ReadDataAt(s.sr, uint64(s.Size), 0)
}
// Open returns a new ReadSeeker reading the Plan 9 a.out section.
func (s *Section) Open() io.ReadSeeker { return io.NewSectionReader(s.sr, 0, 1<<63-1) }
// A Symbol represents an entry in a Plan 9 a.out symbol table section.
type Sym struct {
Value uint64
Type rune
Name string
}
/*
* Plan 9 a.out reader
*/
// formatError is returned by some operations if the data does
// not have the correct format for an object file.
type formatError struct {
off int
msg string
val any
}
func (e *formatError) Error() string {
msg := e.msg
if e.val != nil {
msg += fmt.Sprintf(" '%v'", e.val)
}
msg += fmt.Sprintf(" in record at byte %#x", e.off)
return msg
}
// Open opens the named file using [os.Open] and prepares it for use as a Plan 9 a.out binary.
func Open(name string) (*File, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
ff, err := NewFile(f)
if err != nil {
f.Close()
return nil, err
}
ff.closer = f
return ff, nil
}
// Close closes the [File].
// If the [File] was created using [NewFile] directly instead of [Open],
// Close has no effect.
func (f *File) Close() error {
var err error
if f.closer != nil {
err = f.closer.Close()
f.closer = nil
}
return err
}
func parseMagic(magic []byte) (uint32, error) {
m := binary.BigEndian.Uint32(magic)
switch m {
case Magic386, MagicAMD64, MagicARM:
return m, nil
}
return 0, &formatError{0, "bad magic number", magic}
}
// NewFile creates a new [File] for accessing a Plan 9 binary in an underlying reader.
// The Plan 9 binary is expected to start at position 0 in the ReaderAt.
func NewFile(r io.ReaderAt) (*File, error) {
sr := io.NewSectionReader(r, 0, 1<<63-1)
// Read and decode Plan 9 magic
var magic [4]byte
if _, err := r.ReadAt(magic[:], 0); err != nil {
return nil, err
}
_, err := parseMagic(magic[:])
if err != nil {
return nil, err
}
ph := new(prog)
if err := binary.Read(sr, binary.BigEndian, ph); err != nil {
return nil, err
}
f := &File{FileHeader: FileHeader{
Magic: ph.Magic,
Bss: ph.Bss,
Entry: uint64(ph.Entry),
PtrSize: 4,
LoadAddress: 0x1000,
HdrSize: 4 * 8,
}}
if ph.Magic&Magic64 != 0 {
if err := binary.Read(sr, binary.BigEndian, &f.Entry); err != nil {
return nil, err
}
f.PtrSize = 8
f.LoadAddress = 0x200000
f.HdrSize += 8
}
var sects = []struct {
name string
size uint32
}{
{"text", ph.Text},
{"data", ph.Data},
{"syms", ph.Syms},
{"spsz", ph.Spsz},
{"pcsz", ph.Pcsz},
}
f.Sections = make([]*Section, 5)
off := uint32(f.HdrSize)
for i, sect := range sects {
s := new(Section)
s.SectionHeader = SectionHeader{
Name: sect.name,
Size: sect.size,
Offset: off,
}
off += sect.size
s.sr = io.NewSectionReader(r, int64(s.Offset), int64(s.Size))
s.ReaderAt = s.sr
f.Sections[i] = s
}
return f, nil
}
func walksymtab(data []byte, ptrsz int, fn func(sym) error) error {
var order binary.ByteOrder = binary.BigEndian
var s sym
p := data
for len(p) >= 4 {
// Symbol type, value.
if len(p) < ptrsz {
return &formatError{len(data), "unexpected EOF", nil}
}
// fixed-width value
if ptrsz == 8 {
s.value = order.Uint64(p[0:8])
p = p[8:]
} else {
s.value = uint64(order.Uint32(p[0:4]))
p = p[4:]
}
if len(p) < 1 {
return &formatError{len(data), "unexpected EOF", nil}
}
typ := p[0] & 0x7F
s.typ = typ
p = p[1:]
// Name.
var i int
var nnul int
for i = 0; i < len(p); i++ {
if p[i] == 0 {
nnul = 1
break
}
}
switch typ {
case 'z', 'Z':
p = p[i+nnul:]
for i = 0; i+2 <= len(p); i += 2 {
if p[i] == 0 && p[i+1] == 0 {
nnul = 2
break
}
}
}
if len(p) < i+nnul {
return &formatError{len(data), "unexpected EOF", nil}
}
s.name = p[0:i]
i += nnul
p = p[i:]
fn(s)
}
return nil
}
// newTable decodes the Go symbol table in data,
// returning an in-memory representation.
func newTable(symtab []byte, ptrsz int) ([]Sym, error) {
var n int
err := walksymtab(symtab, ptrsz, func(s sym) error {
n++
return nil
})
if err != nil {
return nil, err
}
fname := make(map[uint16]string)
syms := make([]Sym, 0, n)
err = walksymtab(symtab, ptrsz, func(s sym) error {
n := len(syms)
syms = syms[0 : n+1]
ts := &syms[n]
ts.Type = rune(s.typ)
ts.Value = s.value
switch s.typ {
default:
ts.Name = string(s.name)
case 'z', 'Z':
for i := 0; i < len(s.name); i += 2 {
eltIdx := binary.BigEndian.Uint16(s.name[i : i+2])
elt, ok := fname[eltIdx]
if !ok {
return &formatError{-1, "bad filename code", eltIdx}
}
if n := len(ts.Name); n > 0 && ts.Name[n-1] != '/' {
ts.Name += "/"
}
ts.Name += elt
}
}
switch s.typ {
case 'f':
fname[uint16(s.value)] = ts.Name
}
return nil
})
if err != nil {
return nil, err
}
return syms, nil
}
// ErrNoSymbols is returned by [File.Symbols] if there is no such section
// in the File.
var ErrNoSymbols = errors.New("no symbol section")
// Symbols returns the symbol table for f.
func (f *File) Symbols() ([]Sym, error) {
symtabSection := f.Section("syms")
if symtabSection == nil {
return nil, ErrNoSymbols
}
symtab, err := symtabSection.Data()
if err != nil {
return nil, errors.New("cannot load symbol section")
}
return newTable(symtab, f.PtrSize)
}
// Section returns a section with the given name, or nil if no such
// section exists.
func (f *File) Section(name string) *Section {
for _, s := range f.Sections {
if s.Name == name {
return s
}
}
return nil
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ascii85 implements the ascii85 data encoding
// as used in the btoa tool and Adobe's PostScript and PDF document formats.
package ascii85
import (
"io"
"strconv"
)
/*
* Encoder
*/
// Encode encodes src into at most [MaxEncodedLen](len(src))
// bytes of dst, returning the actual number of bytes written.
//
// The encoding handles 4-byte chunks, using a special encoding
// for the last fragment, so Encode is not appropriate for use on
// individual blocks of a large data stream. Use [NewEncoder] instead.
//
// Often, ascii85-encoded data is wrapped in <~ and ~> symbols.
// Encode does not add these.
func Encode(dst, src []byte) int {
if len(src) == 0 {
return 0
}
n := 0
for len(src) > 0 {
dst[0] = 0
dst[1] = 0
dst[2] = 0
dst[3] = 0
dst[4] = 0
// Unpack 4 bytes into uint32 to repack into base 85 5-byte.
var v uint32
switch len(src) {
default:
v |= uint32(src[3])
fallthrough
case 3:
v |= uint32(src[2]) << 8
fallthrough
case 2:
v |= uint32(src[1]) << 16
fallthrough
case 1:
v |= uint32(src[0]) << 24
}
// Special case: zero (!!!!!) shortens to z.
if v == 0 && len(src) >= 4 {
dst[0] = 'z'
dst = dst[1:]
src = src[4:]
n++
continue
}
// Otherwise, 5 base 85 digits starting at !.
for i := 4; i >= 0; i-- {
dst[i] = '!' + byte(v%85)
v /= 85
}
// If src was short, discard the low destination bytes.
m := 5
if len(src) < 4 {
m -= 4 - len(src)
src = nil
} else {
src = src[4:]
}
dst = dst[m:]
n += m
}
return n
}
// MaxEncodedLen returns the maximum length of an encoding of n source bytes.
func MaxEncodedLen(n int) int { return (n + 3) / 4 * 5 }
// NewEncoder returns a new ascii85 stream encoder. Data written to
// the returned writer will be encoded and then written to w.
// Ascii85 encodings operate in 32-bit blocks; when finished
// writing, the caller must Close the returned encoder to flush any
// trailing partial block.
func NewEncoder(w io.Writer) io.WriteCloser { return &encoder{w: w} }
type encoder struct {
err error
w io.Writer
buf [4]byte // buffered data waiting to be encoded
nbuf int // number of bytes in buf
out [1024]byte // output buffer
}
func (e *encoder) Write(p []byte) (n int, err error) {
if e.err != nil {
return 0, e.err
}
// Leading fringe.
if e.nbuf > 0 {
var i int
for i = 0; i < len(p) && e.nbuf < 4; i++ {
e.buf[e.nbuf] = p[i]
e.nbuf++
}
n += i
p = p[i:]
if e.nbuf < 4 {
return
}
nout := Encode(e.out[0:], e.buf[0:])
if _, e.err = e.w.Write(e.out[0:nout]); e.err != nil {
return n, e.err
}
e.nbuf = 0
}
// Large interior chunks.
for len(p) >= 4 {
nn := len(e.out) / 5 * 4
if nn > len(p) {
nn = len(p)
}
nn -= nn % 4
if nn > 0 {
nout := Encode(e.out[0:], p[0:nn])
if _, e.err = e.w.Write(e.out[0:nout]); e.err != nil {
return n, e.err
}
}
n += nn
p = p[nn:]
}
// Trailing fringe.
copy(e.buf[:], p)
e.nbuf = len(p)
n += len(p)
return
}
// Close flushes any pending output from the encoder.
// It is an error to call Write after calling Close.
func (e *encoder) Close() error {
// If there's anything left in the buffer, flush it out
if e.err == nil && e.nbuf > 0 {
nout := Encode(e.out[0:], e.buf[0:e.nbuf])
e.nbuf = 0
_, e.err = e.w.Write(e.out[0:nout])
}
return e.err
}
/*
* Decoder
*/
type CorruptInputError int64
func (e CorruptInputError) Error() string {
return "illegal ascii85 data at input byte " + strconv.FormatInt(int64(e), 10)
}
// Decode decodes src into dst, returning both the number
// of bytes written to dst and the number consumed from src.
// If src contains invalid ascii85 data, Decode will return the
// number of bytes successfully written and a [CorruptInputError].
// Decode ignores space and control characters in src.
// Often, ascii85-encoded data is wrapped in <~ and ~> symbols.
// Decode expects these to have been stripped by the caller.
//
// If flush is true, Decode assumes that src represents the
// end of the input stream and processes it completely rather
// than wait for the completion of another 32-bit block.
//
// [NewDecoder] wraps an [io.Reader] interface around Decode.
func Decode(dst, src []byte, flush bool) (ndst, nsrc int, err error) {
var v uint32
var nb int
for i, b := range src {
if len(dst)-ndst < 4 {
return
}
switch {
case b <= ' ':
continue
case b == 'z' && nb == 0:
nb = 5
v = 0
case '!' <= b && b <= 'u':
v = v*85 + uint32(b-'!')
nb++
default:
return 0, 0, CorruptInputError(i)
}
if nb == 5 {
nsrc = i + 1
dst[ndst] = byte(v >> 24)
dst[ndst+1] = byte(v >> 16)
dst[ndst+2] = byte(v >> 8)
dst[ndst+3] = byte(v)
ndst += 4
nb = 0
v = 0
}
}
if flush {
nsrc = len(src)
if nb > 0 {
// The number of output bytes in the last fragment
// is the number of leftover input bytes - 1:
// the extra byte provides enough bits to cover
// the inefficiency of the encoding for the block.
if nb == 1 {
return 0, 0, CorruptInputError(len(src))
}
for i := nb; i < 5; i++ {
// The short encoding truncated the output value.
// We have to assume the worst case values (digit 84)
// in order to ensure that the top bits are correct.
v = v*85 + 84
}
for i := 0; i < nb-1; i++ {
dst[ndst] = byte(v >> 24)
v <<= 8
ndst++
}
}
}
return
}
// NewDecoder constructs a new ascii85 stream decoder.
func NewDecoder(r io.Reader) io.Reader { return &decoder{r: r} }
type decoder struct {
err error
readErr error
r io.Reader
buf [1024]byte // leftover input
nbuf int
out []byte // leftover decoded output
outbuf [1024]byte
}
func (d *decoder) Read(p []byte) (n int, err error) {
if len(p) == 0 {
return 0, nil
}
if d.err != nil {
return 0, d.err
}
for {
// Copy leftover output from last decode.
if len(d.out) > 0 {
n = copy(p, d.out)
d.out = d.out[n:]
return
}
// Decode leftover input from last read.
var nn, nsrc, ndst int
if d.nbuf > 0 {
ndst, nsrc, d.err = Decode(d.outbuf[0:], d.buf[0:d.nbuf], d.readErr != nil)
if ndst > 0 {
d.out = d.outbuf[0:ndst]
d.nbuf = copy(d.buf[0:], d.buf[nsrc:d.nbuf])
continue // copy out and return
}
if ndst == 0 && d.err == nil {
// Special case: input buffer is mostly filled with non-data bytes.
// Filter out such bytes to make room for more input.
off := 0
for i := 0; i < d.nbuf; i++ {
if d.buf[i] > ' ' {
d.buf[off] = d.buf[i]
off++
}
}
d.nbuf = off
}
}
// Out of input, out of decoded output. Check errors.
if d.err != nil {
return 0, d.err
}
if d.readErr != nil {
d.err = d.readErr
return 0, d.err
}
// Read more data.
nn, d.readErr = d.r.Read(d.buf[d.nbuf:])
d.nbuf += nn
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package asn1 implements parsing of DER-encoded ASN.1 data structures,
// as defined in ITU-T Rec X.690.
//
// See also “A Layman's Guide to a Subset of ASN.1, BER, and DER,”
// http://luca.ntop.org/Teaching/Appunti/asn1.html.
package asn1
// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
// are different encoding formats for those objects. Here, we'll be dealing
// with DER, the Distinguished Encoding Rules. DER is used in X.509 because
// it's fast to parse and, unlike BER, has a unique encoding for every object.
// When calculating hashes over objects, it's important that the resulting
// bytes be the same at both ends and DER removes this margin of error.
//
// ASN.1 is very complex and this package doesn't attempt to implement
// everything by any means.
import (
"errors"
"fmt"
"internal/saferio"
"math"
"math/big"
"reflect"
"slices"
"strconv"
"strings"
"time"
"unicode/utf16"
"unicode/utf8"
)
// A StructuralError suggests that the ASN.1 data is valid, but the Go type
// which is receiving it doesn't match.
type StructuralError struct {
Msg string
}
func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg }
// A SyntaxError suggests that the ASN.1 data is invalid.
type SyntaxError struct {
Msg string
}
func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg }
// We start by dealing with each of the primitive types in turn.
// BOOLEAN
func parseBool(bytes []byte) (ret bool, err error) {
if len(bytes) != 1 {
err = SyntaxError{"invalid boolean"}
return
}
// DER demands that "If the encoding represents the boolean value TRUE,
// its single contents octet shall have all eight bits set to one."
// Thus only 0 and 255 are valid encoded values.
switch bytes[0] {
case 0:
ret = false
case 0xff:
ret = true
default:
err = SyntaxError{"invalid boolean"}
}
return
}
// INTEGER
// checkInteger returns nil if the given bytes are a valid DER-encoded
// INTEGER and an error otherwise.
func checkInteger(bytes []byte) error {
if len(bytes) == 0 {
return StructuralError{"empty integer"}
}
if len(bytes) == 1 {
return nil
}
if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) {
return StructuralError{"integer not minimally-encoded"}
}
return nil
}
// parseInt64 treats the given bytes as a big-endian, signed integer and
// returns the result.
func parseInt64(bytes []byte) (ret int64, err error) {
err = checkInteger(bytes)
if err != nil {
return
}
if len(bytes) > 8 {
// We'll overflow an int64 in this case.
err = StructuralError{"integer too large"}
return
}
for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
ret <<= 8
ret |= int64(bytes[bytesRead])
}
// Shift up and down in order to sign extend the result.
ret <<= 64 - uint8(len(bytes))*8
ret >>= 64 - uint8(len(bytes))*8
return
}
// parseInt32 treats the given bytes as a big-endian, signed integer and returns
// the result.
func parseInt32(bytes []byte) (int32, error) {
if err := checkInteger(bytes); err != nil {
return 0, err
}
ret64, err := parseInt64(bytes)
if err != nil {
return 0, err
}
if ret64 != int64(int32(ret64)) {
return 0, StructuralError{"integer too large"}
}
return int32(ret64), nil
}
var bigOne = big.NewInt(1)
// parseBigInt treats the given bytes as a big-endian, signed integer and returns
// the result.
func parseBigInt(bytes []byte) (*big.Int, error) {
if err := checkInteger(bytes); err != nil {
return nil, err
}
ret := new(big.Int)
if len(bytes) > 0 && bytes[0]&0x80 == 0x80 {
// This is a negative number.
notBytes := make([]byte, len(bytes))
for i := range notBytes {
notBytes[i] = ^bytes[i]
}
ret.SetBytes(notBytes)
ret.Add(ret, bigOne)
ret.Neg(ret)
return ret, nil
}
ret.SetBytes(bytes)
return ret, nil
}
// BIT STRING
// BitString is the structure to use when you want an ASN.1 BIT STRING type. A
// bit string is padded up to the nearest byte in memory and the number of
// valid bits is recorded. Padding bits will be zero.
type BitString struct {
Bytes []byte // bits packed into bytes.
BitLength int // length in bits.
}
// At returns the bit at the given index. If the index is out of range it
// returns 0.
func (b BitString) At(i int) int {
if i < 0 || i >= b.BitLength {
return 0
}
x := i / 8
y := 7 - uint(i%8)
return int(b.Bytes[x]>>y) & 1
}
// RightAlign returns a slice where the padding bits are at the beginning. The
// slice may share memory with the BitString.
func (b BitString) RightAlign() []byte {
shift := uint(8 - (b.BitLength % 8))
if shift == 8 || len(b.Bytes) == 0 {
return b.Bytes
}
a := make([]byte, len(b.Bytes))
a[0] = b.Bytes[0] >> shift
for i := 1; i < len(b.Bytes); i++ {
a[i] = b.Bytes[i-1] << (8 - shift)
a[i] |= b.Bytes[i] >> shift
}
return a
}
// parseBitString parses an ASN.1 bit string from the given byte slice and returns it.
func parseBitString(bytes []byte) (ret BitString, err error) {
if len(bytes) == 0 {
err = SyntaxError{"zero length BIT STRING"}
return
}
paddingBits := int(bytes[0])
if paddingBits > 7 ||
len(bytes) == 1 && paddingBits > 0 ||
bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 {
err = SyntaxError{"invalid padding bits in BIT STRING"}
return
}
ret.BitLength = (len(bytes)-1)*8 - paddingBits
ret.Bytes = bytes[1:]
return
}
// NULL
// NullRawValue is a [RawValue] with its Tag set to the ASN.1 NULL type tag (5).
var NullRawValue = RawValue{Tag: TagNull}
// NullBytes contains bytes representing the DER-encoded ASN.1 NULL type.
var NullBytes = []byte{TagNull, 0}
// OBJECT IDENTIFIER
// An ObjectIdentifier represents an ASN.1 OBJECT IDENTIFIER.
type ObjectIdentifier []int
// Equal reports whether oi and other represent the same identifier.
func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool {
return slices.Equal(oi, other)
}
func (oi ObjectIdentifier) String() string {
var s strings.Builder
s.Grow(32)
buf := make([]byte, 0, 19)
for i, v := range oi {
if i > 0 {
s.WriteByte('.')
}
s.Write(strconv.AppendInt(buf, int64(v), 10))
}
return s.String()
}
// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
// returns it. An object identifier is a sequence of variable length integers
// that are assigned in a hierarchy.
func parseObjectIdentifier(bytes []byte) (s ObjectIdentifier, err error) {
if len(bytes) == 0 {
err = SyntaxError{"zero length OBJECT IDENTIFIER"}
return
}
// In the worst case, we get two elements from the first byte (which is
// encoded differently) and then every varint is a single byte long.
s = make([]int, len(bytes)+1)
// The first varint is 40*value1 + value2:
// According to this packing, value1 can take the values 0, 1 and 2 only.
// When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
// then there are no restrictions on value2.
v, offset, err := parseBase128Int(bytes, 0)
if err != nil {
return
}
if v < 80 {
s[0] = v / 40
s[1] = v % 40
} else {
s[0] = 2
s[1] = v - 80
}
i := 2
for ; offset < len(bytes); i++ {
v, offset, err = parseBase128Int(bytes, offset)
if err != nil {
return
}
s[i] = v
}
s = s[0:i]
return
}
// ENUMERATED
// An Enumerated is represented as a plain int.
type Enumerated int
// FLAG
// A Flag accepts any data and is set to true if present.
type Flag bool
// parseBase128Int parses a base-128 encoded int from the given offset in the
// given byte slice. It returns the value and the new offset.
func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) {
offset = initOffset
var ret64 int64
for shifted := 0; offset < len(bytes); shifted++ {
// 5 * 7 bits per byte == 35 bits of data
// Thus the representation is either non-minimal or too large for an int32
if shifted == 5 {
err = StructuralError{"base 128 integer too large"}
return
}
ret64 <<= 7
b := bytes[offset]
// integers should be minimally encoded, so the leading octet should
// never be 0x80
if shifted == 0 && b == 0x80 {
err = SyntaxError{"integer is not minimally encoded"}
return
}
ret64 |= int64(b & 0x7f)
offset++
if b&0x80 == 0 {
ret = int(ret64)
// Ensure that the returned value fits in an int on all platforms
if ret64 > math.MaxInt32 {
err = StructuralError{"base 128 integer too large"}
}
return
}
}
err = SyntaxError{"truncated base 128 integer"}
return
}
// UTCTime
func parseUTCTime(bytes []byte) (ret time.Time, err error) {
s := string(bytes)
formatStr := "0601021504Z0700"
ret, err = time.Parse(formatStr, s)
if err != nil {
formatStr = "060102150405Z0700"
ret, err = time.Parse(formatStr, s)
}
if err != nil {
return
}
if serialized := ret.Format(formatStr); serialized != s {
err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
return
}
if ret.Year() >= 2050 {
// UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
ret = ret.AddDate(-100, 0, 0)
}
return
}
// parseGeneralizedTime parses the GeneralizedTime from the given byte slice
// and returns the resulting time.
func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) {
const formatStr = "20060102150405.999999999Z0700"
s := string(bytes)
if ret, err = time.Parse(formatStr, s); err != nil {
return
}
if serialized := ret.Format(formatStr); serialized != s {
err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
}
return
}
// NumericString
// parseNumericString parses an ASN.1 NumericString from the given byte array
// and returns it.
func parseNumericString(bytes []byte) (ret string, err error) {
for _, b := range bytes {
if !isNumeric(b) {
return "", SyntaxError{"NumericString contains invalid character"}
}
}
return string(bytes), nil
}
// isNumeric reports whether the given b is in the ASN.1 NumericString set.
func isNumeric(b byte) bool {
return '0' <= b && b <= '9' ||
b == ' '
}
// PrintableString
// parsePrintableString parses an ASN.1 PrintableString from the given byte
// array and returns it.
func parsePrintableString(bytes []byte) (ret string, err error) {
for _, b := range bytes {
if !isPrintable(b, allowAsterisk, allowAmpersand) {
err = SyntaxError{"PrintableString contains invalid character"}
return
}
}
ret = string(bytes)
return
}
type asteriskFlag bool
type ampersandFlag bool
const (
allowAsterisk asteriskFlag = true
rejectAsterisk asteriskFlag = false
allowAmpersand ampersandFlag = true
rejectAmpersand ampersandFlag = false
)
// isPrintable reports whether the given b is in the ASN.1 PrintableString set.
// If asterisk is allowAsterisk then '*' is also allowed, reflecting existing
// practice. If ampersand is allowAmpersand then '&' is allowed as well.
func isPrintable(b byte, asterisk asteriskFlag, ampersand ampersandFlag) bool {
return 'a' <= b && b <= 'z' ||
'A' <= b && b <= 'Z' ||
'0' <= b && b <= '9' ||
'\'' <= b && b <= ')' ||
'+' <= b && b <= '/' ||
b == ' ' ||
b == ':' ||
b == '=' ||
b == '?' ||
// This is technically not allowed in a PrintableString.
// However, x509 certificates with wildcard strings don't
// always use the correct string type so we permit it.
(bool(asterisk) && b == '*') ||
// This is not technically allowed either. However, not
// only is it relatively common, but there are also a
// handful of CA certificates that contain it. At least
// one of which will not expire until 2027.
(bool(ampersand) && b == '&')
}
// IA5String
// parseIA5String parses an ASN.1 IA5String (ASCII string) from the given
// byte slice and returns it.
func parseIA5String(bytes []byte) (ret string, err error) {
for _, b := range bytes {
if b >= utf8.RuneSelf {
err = SyntaxError{"IA5String contains invalid character"}
return
}
}
ret = string(bytes)
return
}
// T61String
// parseT61String parses an ASN.1 T61String (8-bit clean string) from the given
// byte slice and returns it.
func parseT61String(bytes []byte) (ret string, err error) {
// T.61 is a defunct ITU 8-bit character encoding which preceded Unicode.
// T.61 uses a code page layout that _almost_ exactly maps to the code
// page layout of the ISO 8859-1 (Latin-1) character encoding, with the
// exception that a number of characters in Latin-1 are not present
// in T.61.
//
// Instead of mapping which characters are present in Latin-1 but not T.61,
// we just treat these strings as being encoded using Latin-1. This matches
// what most of the world does, including BoringSSL.
buf := make([]byte, 0, len(bytes))
for _, v := range bytes {
// All the 1-byte UTF-8 runes map 1-1 with Latin-1.
buf = utf8.AppendRune(buf, rune(v))
}
return string(buf), nil
}
// UTF8String
// parseUTF8String parses an ASN.1 UTF8String (raw UTF-8) from the given byte
// array and returns it.
func parseUTF8String(bytes []byte) (ret string, err error) {
if !utf8.Valid(bytes) {
return "", errors.New("asn1: invalid UTF-8 string")
}
return string(bytes), nil
}
// BMPString
// parseBMPString parses an ASN.1 BMPString (Basic Multilingual Plane of
// ISO/IEC/ITU 10646-1) from the given byte slice and returns it.
func parseBMPString(bmpString []byte) (string, error) {
// BMPString uses the defunct UCS-2 16-bit character encoding, which
// covers the Basic Multilingual Plane (BMP). UTF-16 was an extension of
// UCS-2, containing all of the same code points, but also including
// multi-code point characters (by using surrogate code points). We can
// treat a UCS-2 encoded string as a UTF-16 encoded string, as long as
// we reject out the UTF-16 specific code points. This matches the
// BoringSSL behavior.
if len(bmpString)%2 != 0 {
return "", errors.New("invalid BMPString")
}
// Strip terminator if present.
if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 {
bmpString = bmpString[:l-2]
}
s := make([]uint16, 0, len(bmpString)/2)
for len(bmpString) > 0 {
point := uint16(bmpString[0])<<8 + uint16(bmpString[1])
// Reject UTF-16 code points that are permanently reserved
// noncharacters (0xfffe, 0xffff, and 0xfdd0-0xfdef) and surrogates
// (0xd800-0xdfff).
if point == 0xfffe || point == 0xffff ||
(point >= 0xfdd0 && point <= 0xfdef) ||
(point >= 0xd800 && point <= 0xdfff) {
return "", errors.New("invalid BMPString")
}
s = append(s, point)
bmpString = bmpString[2:]
}
return string(utf16.Decode(s)), nil
}
// A RawValue represents an undecoded ASN.1 object.
type RawValue struct {
Class, Tag int
IsCompound bool
Bytes []byte
FullBytes []byte // includes the tag and length
}
// RawContent is used to signal that the undecoded, DER data needs to be
// preserved for a struct. To use it, the first field of the struct must have
// this type. It's an error for any of the other fields to have this type.
type RawContent []byte
// Tagging
// parseTagAndLength parses an ASN.1 tag and length pair from the given offset
// into a byte slice. It returns the parsed data and the new offset. SET and
// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we
// don't distinguish between ordered and unordered objects in this code.
func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) {
offset = initOffset
// parseTagAndLength should not be called without at least a single
// byte to read. Thus this check is for robustness:
if offset >= len(bytes) {
err = errors.New("asn1: internal error in parseTagAndLength")
return
}
b := bytes[offset]
offset++
ret.class = int(b >> 6)
ret.isCompound = b&0x20 == 0x20
ret.tag = int(b & 0x1f)
// If the bottom five bits are set, then the tag number is actually base 128
// encoded afterwards
if ret.tag == 0x1f {
ret.tag, offset, err = parseBase128Int(bytes, offset)
if err != nil {
return
}
// Tags should be encoded in minimal form.
if ret.tag < 0x1f {
err = SyntaxError{"non-minimal tag"}
return
}
}
if offset >= len(bytes) {
err = SyntaxError{"truncated tag or length"}
return
}
b = bytes[offset]
offset++
if b&0x80 == 0 {
// The length is encoded in the bottom 7 bits.
ret.length = int(b & 0x7f)
} else {
// Bottom 7 bits give the number of length bytes to follow.
numBytes := int(b & 0x7f)
if numBytes == 0 {
err = SyntaxError{"indefinite length found (not DER)"}
return
}
ret.length = 0
for i := 0; i < numBytes; i++ {
if offset >= len(bytes) {
err = SyntaxError{"truncated tag or length"}
return
}
b = bytes[offset]
offset++
if ret.length >= 1<<23 {
// We can't shift ret.length up without
// overflowing.
err = StructuralError{"length too large"}
return
}
ret.length <<= 8
ret.length |= int(b)
if ret.length == 0 {
// DER requires that lengths be minimal.
err = StructuralError{"superfluous leading zeros in length"}
return
}
}
// Short lengths must be encoded in short form.
if ret.length < 0x80 {
err = StructuralError{"non-minimal length"}
return
}
}
return
}
// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
// a number of ASN.1 values from the given byte slice and returns them as a
// slice of Go values of the given type.
func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) {
matchAny, expectedTag, compoundType, ok := getUniversalType(elemType)
if !ok {
err = StructuralError{"unknown Go type for slice"}
return
}
// First we iterate over the input and count the number of elements,
// checking that the types are correct in each case.
numElements := 0
for offset := 0; offset < len(bytes); {
var t tagAndLength
t, offset, err = parseTagAndLength(bytes, offset)
if err != nil {
return
}
switch t.tag {
case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString:
// We pretend that various other string types are
// PRINTABLE STRINGs so that a sequence of them can be
// parsed into a []string.
t.tag = TagPrintableString
case TagGeneralizedTime, TagUTCTime:
// Likewise, both time types are treated the same.
t.tag = TagUTCTime
}
if !matchAny && (t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag) {
err = StructuralError{"sequence tag mismatch"}
return
}
if invalidLength(offset, t.length, len(bytes)) {
err = SyntaxError{"truncated sequence"}
return
}
offset += t.length
numElements++
}
elemSize := uint64(elemType.Size())
safeCap := saferio.SliceCapWithSize(elemSize, uint64(numElements))
if safeCap < 0 {
err = SyntaxError{fmt.Sprintf("%s slice too big: %d elements of %d bytes", elemType.Kind(), numElements, elemSize)}
return
}
ret = reflect.MakeSlice(sliceType, 0, safeCap)
params := fieldParameters{}
offset := 0
for i := 0; i < numElements; i++ {
ret = reflect.Append(ret, reflect.Zero(elemType))
offset, err = parseField(ret.Index(i), bytes, offset, params)
if err != nil {
return
}
}
return
}
var (
bitStringType = reflect.TypeFor[BitString]()
objectIdentifierType = reflect.TypeFor[ObjectIdentifier]()
enumeratedType = reflect.TypeFor[Enumerated]()
flagType = reflect.TypeFor[Flag]()
timeType = reflect.TypeFor[time.Time]()
rawValueType = reflect.TypeFor[RawValue]()
rawContentsType = reflect.TypeFor[RawContent]()
bigIntType = reflect.TypeFor[*big.Int]()
)
// invalidLength reports whether offset + length > sliceLength, or if the
// addition would overflow.
func invalidLength(offset, length, sliceLength int) bool {
return offset+length < offset || offset+length > sliceLength
}
// parseField is the main parsing function. Given a byte slice and an offset
// into the array, it will try to parse a suitable ASN.1 value out and store it
// in the given Value.
func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) {
offset = initOffset
fieldType := v.Type()
// If we have run out of data, it may be that there are optional elements at the end.
if offset == len(bytes) {
if !setDefaultValue(v, params) {
err = SyntaxError{"sequence truncated"}
}
return
}
// Deal with the ANY type.
if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 {
var t tagAndLength
t, offset, err = parseTagAndLength(bytes, offset)
if err != nil {
return
}
if invalidLength(offset, t.length, len(bytes)) {
err = SyntaxError{"data truncated"}
return
}
var result any
if !t.isCompound && t.class == ClassUniversal {
innerBytes := bytes[offset : offset+t.length]
switch t.tag {
case TagBoolean:
result, err = parseBool(innerBytes)
case TagPrintableString:
result, err = parsePrintableString(innerBytes)
case TagNumericString:
result, err = parseNumericString(innerBytes)
case TagIA5String:
result, err = parseIA5String(innerBytes)
case TagT61String:
result, err = parseT61String(innerBytes)
case TagUTF8String:
result, err = parseUTF8String(innerBytes)
case TagInteger:
result, err = parseInt64(innerBytes)
case TagBitString:
result, err = parseBitString(innerBytes)
case TagOID:
result, err = parseObjectIdentifier(innerBytes)
case TagUTCTime:
result, err = parseUTCTime(innerBytes)
case TagGeneralizedTime:
result, err = parseGeneralizedTime(innerBytes)
case TagOctetString:
result = innerBytes
case TagBMPString:
result, err = parseBMPString(innerBytes)
default:
// If we don't know how to handle the type, we just leave Value as nil.
}
}
offset += t.length
if err != nil {
return
}
if result != nil {
v.Set(reflect.ValueOf(result))
}
return
}
t, offset, err := parseTagAndLength(bytes, offset)
if err != nil {
return
}
if params.explicit {
expectedClass := ClassContextSpecific
if params.application {
expectedClass = ClassApplication
}
if offset == len(bytes) {
err = StructuralError{"explicit tag has no child"}
return
}
if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
if fieldType == rawValueType {
// The inner element should not be parsed for RawValues.
} else if t.length > 0 {
t, offset, err = parseTagAndLength(bytes, offset)
if err != nil {
return
}
} else {
if fieldType != flagType {
err = StructuralError{"zero length explicit tag was not an asn1.Flag"}
return
}
v.SetBool(true)
return
}
} else {
// The tags didn't match, it might be an optional element.
ok := setDefaultValue(v, params)
if ok {
offset = initOffset
} else {
err = StructuralError{"explicitly tagged member didn't match"}
}
return
}
}
matchAny, universalTag, compoundType, ok1 := getUniversalType(fieldType)
if !ok1 {
err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)}
return
}
// Special case for strings: all the ASN.1 string types map to the Go
// type string. getUniversalType returns the tag for PrintableString
// when it sees a string, so if we see a different string type on the
// wire, we change the universal type to match.
if universalTag == TagPrintableString {
if t.class == ClassUniversal {
switch t.tag {
case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString, TagBMPString:
universalTag = t.tag
}
} else if params.stringType != 0 {
universalTag = params.stringType
}
}
// Special case for time: UTCTime and GeneralizedTime both map to the
// Go type time.Time. getUniversalType returns the tag for UTCTime when
// it sees a time.Time, so if we see a different time type on the wire,
// or the field is tagged with a different type, we change the universal
// type to match.
if universalTag == TagUTCTime {
if t.class == ClassUniversal {
if t.tag == TagGeneralizedTime {
universalTag = t.tag
}
} else if params.timeType != 0 {
universalTag = params.timeType
}
}
if params.set {
universalTag = TagSet
}
matchAnyClassAndTag := matchAny
expectedClass := ClassUniversal
expectedTag := universalTag
if !params.explicit && params.tag != nil {
expectedClass = ClassContextSpecific
expectedTag = *params.tag
matchAnyClassAndTag = false
}
if !params.explicit && params.application && params.tag != nil {
expectedClass = ClassApplication
expectedTag = *params.tag
matchAnyClassAndTag = false
}
if !params.explicit && params.private && params.tag != nil {
expectedClass = ClassPrivate
expectedTag = *params.tag
matchAnyClassAndTag = false
}
// We have unwrapped any explicit tagging at this point.
if !matchAnyClassAndTag && (t.class != expectedClass || t.tag != expectedTag) ||
(!matchAny && t.isCompound != compoundType) {
// Tags don't match. Again, it could be an optional element.
ok := setDefaultValue(v, params)
if ok {
offset = initOffset
} else {
err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)}
}
return
}
if invalidLength(offset, t.length, len(bytes)) {
err = SyntaxError{"data truncated"}
return
}
innerBytes := bytes[offset : offset+t.length]
offset += t.length
// We deal with the structures defined in this package first.
switch v := v.Addr().Interface().(type) {
case *RawValue:
*v = RawValue{t.class, t.tag, t.isCompound, innerBytes, bytes[initOffset:offset]}
return
case *ObjectIdentifier:
*v, err = parseObjectIdentifier(innerBytes)
return
case *BitString:
*v, err = parseBitString(innerBytes)
return
case *time.Time:
if universalTag == TagUTCTime {
*v, err = parseUTCTime(innerBytes)
return
}
*v, err = parseGeneralizedTime(innerBytes)
return
case *Enumerated:
parsedInt, err1 := parseInt32(innerBytes)
if err1 == nil {
*v = Enumerated(parsedInt)
}
err = err1
return
case *Flag:
*v = true
return
case **big.Int:
parsedInt, err1 := parseBigInt(innerBytes)
if err1 == nil {
*v = parsedInt
}
err = err1
return
}
switch val := v; val.Kind() {
case reflect.Bool:
parsedBool, err1 := parseBool(innerBytes)
if err1 == nil {
val.SetBool(parsedBool)
}
err = err1
return
case reflect.Int, reflect.Int32, reflect.Int64:
if val.Type().Size() == 4 {
parsedInt, err1 := parseInt32(innerBytes)
if err1 == nil {
val.SetInt(int64(parsedInt))
}
err = err1
} else {
parsedInt, err1 := parseInt64(innerBytes)
if err1 == nil {
val.SetInt(parsedInt)
}
err = err1
}
return
// TODO(dfc) Add support for the remaining integer types
case reflect.Struct:
structType := fieldType
for i := 0; i < structType.NumField(); i++ {
if !structType.Field(i).IsExported() {
err = StructuralError{"struct contains unexported fields"}
return
}
}
if structType.NumField() > 0 &&
structType.Field(0).Type == rawContentsType {
bytes := bytes[initOffset:offset]
val.Field(0).Set(reflect.ValueOf(RawContent(bytes)))
}
innerOffset := 0
for i := 0; i < structType.NumField(); i++ {
field := structType.Field(i)
if i == 0 && field.Type == rawContentsType {
continue
}
innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1")))
if err != nil {
return
}
}
// We allow extra bytes at the end of the SEQUENCE because
// adding elements to the end has been used in X.509 as the
// version numbers have increased.
return
case reflect.Slice:
sliceType := fieldType
if sliceType.Elem().Kind() == reflect.Uint8 {
val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes)))
reflect.Copy(val, reflect.ValueOf(innerBytes))
return
}
newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem())
if err1 == nil {
val.Set(newSlice)
}
err = err1
return
case reflect.String:
var v string
switch universalTag {
case TagPrintableString:
v, err = parsePrintableString(innerBytes)
case TagNumericString:
v, err = parseNumericString(innerBytes)
case TagIA5String:
v, err = parseIA5String(innerBytes)
case TagT61String:
v, err = parseT61String(innerBytes)
case TagUTF8String:
v, err = parseUTF8String(innerBytes)
case TagGeneralString:
// GeneralString is specified in ISO-2022/ECMA-35,
// A brief review suggests that it includes structures
// that allow the encoding to change midstring and
// such. We give up and pass it as an 8-bit string.
v, err = parseT61String(innerBytes)
case TagBMPString:
v, err = parseBMPString(innerBytes)
default:
err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)}
}
if err == nil {
val.SetString(v)
}
return
}
err = StructuralError{"unsupported: " + v.Type().String()}
return
}
// canHaveDefaultValue reports whether k is a Kind that we will set a default
// value for. (A signed integer, essentially.)
func canHaveDefaultValue(k reflect.Kind) bool {
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return true
}
return false
}
// setDefaultValue is used to install a default value, from a tag string, into
// a Value. It is successful if the field was optional, even if a default value
// wasn't provided or it failed to install it into the Value.
func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
if !params.optional {
return
}
ok = true
if params.defaultValue == nil {
return
}
if canHaveDefaultValue(v.Kind()) {
v.SetInt(*params.defaultValue)
}
return
}
// Unmarshal parses the DER-encoded ASN.1 data structure b
// and uses the reflect package to fill in an arbitrary value pointed at by val.
// Because Unmarshal uses the reflect package, the structs
// being written to must use upper case field names. If val
// is nil or not a pointer, Unmarshal returns an error.
//
// After parsing b, any bytes that were leftover and not used to fill
// val will be returned in rest. When parsing a SEQUENCE into a struct,
// any trailing elements of the SEQUENCE that do not have matching
// fields in val will not be included in rest, as these are considered
// valid elements of the SEQUENCE and not trailing data.
//
// - An ASN.1 INTEGER can be written to an int, int32, int64,
// or *[big.Int].
// If the encoded value does not fit in the Go type,
// Unmarshal returns a parse error.
//
// - An ASN.1 BIT STRING can be written to a [BitString].
//
// - An ASN.1 OCTET STRING can be written to a []byte.
//
// - An ASN.1 OBJECT IDENTIFIER can be written to an [ObjectIdentifier].
//
// - An ASN.1 ENUMERATED can be written to an [Enumerated].
//
// - An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a [time.Time].
//
// - An ASN.1 PrintableString, IA5String, or NumericString can be written to a string.
//
// - Any of the above ASN.1 values can be written to an interface{}.
// The value stored in the interface has the corresponding Go type.
// For integers, that type is int64.
//
// - An ASN.1 SEQUENCE OF x or SET OF x can be written
// to a slice if an x can be written to the slice's element type.
//
// - An ASN.1 SEQUENCE or SET can be written to a struct
// if each of the elements in the sequence can be
// written to the corresponding element in the struct.
//
// The following tags on struct fields have special meaning to Unmarshal:
//
// application specifies that an APPLICATION tag is used
// private specifies that a PRIVATE tag is used
// default:x sets the default value for optional integer fields (only used if optional is also present)
// explicit specifies that an additional, explicit tag wraps the implicit one
// optional marks the field as ASN.1 OPTIONAL
// set causes a SET, rather than a SEQUENCE type to be expected
// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
//
// When decoding an ASN.1 value with an IMPLICIT tag into a string field,
// Unmarshal will default to a PrintableString, which doesn't support
// characters such as '@' and '&'. To force other encodings, use the following
// tags:
//
// ia5 causes strings to be unmarshaled as ASN.1 IA5String values
// numeric causes strings to be unmarshaled as ASN.1 NumericString values
// utf8 causes strings to be unmarshaled as ASN.1 UTF8String values
//
// When decoding an ASN.1 value with an IMPLICIT tag into a time.Time field,
// Unmarshal will default to a UTCTime, which doesn't support time zones or
// fractional seconds. To force usage of GeneralizedTime, use the following
// tag:
//
// generalized causes time.Times to be unmarshaled as ASN.1 GeneralizedTime values
//
// If the type of the first field of a structure is RawContent then the raw
// ASN1 contents of the struct will be stored in it.
//
// If the name of a slice type ends with "SET" then it's treated as if
// the "set" tag was set on it. This results in interpreting the type as a
// SET OF x rather than a SEQUENCE OF x. This can be used with nested slices
// where a struct tag cannot be given.
//
// Other ASN.1 types are not supported; if it encounters them,
// Unmarshal returns a parse error.
func Unmarshal(b []byte, val any) (rest []byte, err error) {
return UnmarshalWithParams(b, val, "")
}
// An invalidUnmarshalError describes an invalid argument passed to Unmarshal.
// (The argument to Unmarshal must be a non-nil pointer.)
type invalidUnmarshalError struct {
Type reflect.Type
}
func (e *invalidUnmarshalError) Error() string {
if e.Type == nil {
return "asn1: Unmarshal recipient value is nil"
}
if e.Type.Kind() != reflect.Pointer {
return "asn1: Unmarshal recipient value is non-pointer " + e.Type.String()
}
return "asn1: Unmarshal recipient value is nil " + e.Type.String()
}
// UnmarshalWithParams allows field parameters to be specified for the
// top-level element. The form of the params is the same as the field tags.
func UnmarshalWithParams(b []byte, val any, params string) (rest []byte, err error) {
v := reflect.ValueOf(val)
if v.Kind() != reflect.Pointer || v.IsNil() {
return nil, &invalidUnmarshalError{reflect.TypeOf(val)}
}
offset, err := parseField(v.Elem(), b, 0, parseFieldParameters(params))
if err != nil {
return nil, err
}
return b[offset:], nil
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package asn1
import (
"reflect"
"strconv"
"strings"
)
// ASN.1 objects have metadata preceding them:
// the tag: the type of the object
// a flag denoting if this object is compound or not
// the class type: the namespace of the tag
// the length of the object, in bytes
// Here are some standard tags and classes
// ASN.1 tags represent the type of the following object.
const (
TagBoolean = 1
TagInteger = 2
TagBitString = 3
TagOctetString = 4
TagNull = 5
TagOID = 6
TagEnum = 10
TagUTF8String = 12
TagSequence = 16
TagSet = 17
TagNumericString = 18
TagPrintableString = 19
TagT61String = 20
TagIA5String = 22
TagUTCTime = 23
TagGeneralizedTime = 24
TagGeneralString = 27
TagBMPString = 30
)
// ASN.1 class types represent the namespace of the tag.
const (
ClassUniversal = 0
ClassApplication = 1
ClassContextSpecific = 2
ClassPrivate = 3
)
type tagAndLength struct {
class, tag, length int
isCompound bool
}
// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead
// of" and "in addition to". When not specified, every primitive type has a
// default tag in the UNIVERSAL class.
//
// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1
// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT
// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another.
//
// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an
// /additional/ tag would wrap the default tag. This explicit tag will have the
// compound flag set.
//
// (This is used in order to remove ambiguity with optional elements.)
//
// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we
// don't support that here. We support a single layer of EXPLICIT or IMPLICIT
// tagging with tag strings on the fields of a structure.
// fieldParameters is the parsed representation of tag string from a structure field.
type fieldParameters struct {
optional bool // true iff the field is OPTIONAL
explicit bool // true iff an EXPLICIT tag is in use.
application bool // true iff an APPLICATION tag is in use.
private bool // true iff a PRIVATE tag is in use.
defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
stringType int // the string tag to use when marshaling.
timeType int // the time tag to use when marshaling.
set bool // true iff this should be encoded as a SET
omitEmpty bool // true iff this should be omitted if empty when marshaling.
// Invariants:
// if explicit is set, tag is non-nil.
}
// Given a tag string with the format specified in the package comment,
// parseFieldParameters will parse it into a fieldParameters structure,
// ignoring unknown parts of the string.
func parseFieldParameters(str string) (ret fieldParameters) {
var part string
for len(str) > 0 {
part, str, _ = strings.Cut(str, ",")
switch {
case part == "optional":
ret.optional = true
case part == "explicit":
ret.explicit = true
if ret.tag == nil {
ret.tag = new(int)
}
case part == "generalized":
ret.timeType = TagGeneralizedTime
case part == "utc":
ret.timeType = TagUTCTime
case part == "ia5":
ret.stringType = TagIA5String
case part == "printable":
ret.stringType = TagPrintableString
case part == "numeric":
ret.stringType = TagNumericString
case part == "utf8":
ret.stringType = TagUTF8String
case strings.HasPrefix(part, "default:"):
i, err := strconv.ParseInt(part[8:], 10, 64)
if err == nil {
ret.defaultValue = new(int64)
*ret.defaultValue = i
}
case strings.HasPrefix(part, "tag:"):
i, err := strconv.Atoi(part[4:])
if err == nil {
ret.tag = new(int)
*ret.tag = i
}
case part == "set":
ret.set = true
case part == "application":
ret.application = true
if ret.tag == nil {
ret.tag = new(int)
}
case part == "private":
ret.private = true
if ret.tag == nil {
ret.tag = new(int)
}
case part == "omitempty":
ret.omitEmpty = true
}
}
return
}
// Given a reflected Go type, getUniversalType returns the default tag number
// and expected compound flag.
func getUniversalType(t reflect.Type) (matchAny bool, tagNumber int, isCompound, ok bool) {
switch t {
case rawValueType:
return true, -1, false, true
case objectIdentifierType:
return false, TagOID, false, true
case bitStringType:
return false, TagBitString, false, true
case timeType:
return false, TagUTCTime, false, true
case enumeratedType:
return false, TagEnum, false, true
case bigIntType:
return false, TagInteger, false, true
}
switch t.Kind() {
case reflect.Bool:
return false, TagBoolean, false, true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return false, TagInteger, false, true
case reflect.Struct:
return false, TagSequence, true, true
case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 {
return false, TagOctetString, false, true
}
if strings.HasSuffix(t.Name(), "SET") {
return false, TagSet, true, true
}
return false, TagSequence, true, true
case reflect.String:
return false, TagPrintableString, false, true
}
return false, 0, false, false
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package asn1
import (
"bytes"
"errors"
"fmt"
"math/big"
"reflect"
"slices"
"time"
"unicode/utf8"
)
var (
byte00Encoder encoder = byteEncoder(0x00)
byteFFEncoder encoder = byteEncoder(0xff)
)
// encoder represents an ASN.1 element that is waiting to be marshaled.
type encoder interface {
// Len returns the number of bytes needed to marshal this element.
Len() int
// Encode encodes this element by writing Len() bytes to dst.
Encode(dst []byte)
}
type byteEncoder byte
func (c byteEncoder) Len() int {
return 1
}
func (c byteEncoder) Encode(dst []byte) {
dst[0] = byte(c)
}
type bytesEncoder []byte
func (b bytesEncoder) Len() int {
return len(b)
}
func (b bytesEncoder) Encode(dst []byte) {
if copy(dst, b) != len(b) {
panic("internal error")
}
}
type stringEncoder string
func (s stringEncoder) Len() int {
return len(s)
}
func (s stringEncoder) Encode(dst []byte) {
if copy(dst, s) != len(s) {
panic("internal error")
}
}
type multiEncoder []encoder
func (m multiEncoder) Len() int {
var size int
for _, e := range m {
size += e.Len()
}
return size
}
func (m multiEncoder) Encode(dst []byte) {
var off int
for _, e := range m {
e.Encode(dst[off:])
off += e.Len()
}
}
type setEncoder []encoder
func (s setEncoder) Len() int {
var size int
for _, e := range s {
size += e.Len()
}
return size
}
func (s setEncoder) Encode(dst []byte) {
// Per X690 Section 11.6: The encodings of the component values of a
// set-of value shall appear in ascending order, the encodings being
// compared as octet strings with the shorter components being padded
// at their trailing end with 0-octets.
//
// First we encode each element to its TLV encoding and then use
// octetSort to get the ordering expected by X690 DER rules before
// writing the sorted encodings out to dst.
l := make([][]byte, len(s))
for i, e := range s {
l[i] = make([]byte, e.Len())
e.Encode(l[i])
}
// Since we are using bytes.Compare to compare TLV encodings we
// don't need to right pad s[i] and s[j] to the same length as
// suggested in X690. If len(s[i]) < len(s[j]) the length octet of
// s[i], which is the first determining byte, will inherently be
// smaller than the length octet of s[j]. This lets us skip the
// padding step.
slices.SortFunc(l, bytes.Compare)
var off int
for _, b := range l {
copy(dst[off:], b)
off += len(b)
}
}
type taggedEncoder struct {
// scratch contains temporary space for encoding the tag and length of
// an element in order to avoid extra allocations.
scratch [8]byte
tag encoder
body encoder
}
func (t *taggedEncoder) Len() int {
return t.tag.Len() + t.body.Len()
}
func (t *taggedEncoder) Encode(dst []byte) {
t.tag.Encode(dst)
t.body.Encode(dst[t.tag.Len():])
}
type int64Encoder int64
func (i int64Encoder) Len() int {
n := 1
for i > 127 {
n++
i >>= 8
}
for i < -128 {
n++
i >>= 8
}
return n
}
func (i int64Encoder) Encode(dst []byte) {
n := i.Len()
for j := 0; j < n; j++ {
dst[j] = byte(i >> uint((n-1-j)*8))
}
}
func base128IntLength(n int64) int {
if n == 0 {
return 1
}
l := 0
for i := n; i > 0; i >>= 7 {
l++
}
return l
}
func appendBase128Int(dst []byte, n int64) []byte {
l := base128IntLength(n)
for i := l - 1; i >= 0; i-- {
o := byte(n >> uint(i*7))
o &= 0x7f
if i != 0 {
o |= 0x80
}
dst = append(dst, o)
}
return dst
}
func makeBigInt(n *big.Int) (encoder, error) {
if n == nil {
return nil, StructuralError{"empty integer"}
}
if n.Sign() < 0 {
// A negative number has to be converted to two's-complement
// form. So we'll invert and subtract 1. If the
// most-significant-bit isn't set then we'll need to pad the
// beginning with 0xff in order to keep the number negative.
nMinus1 := new(big.Int).Neg(n)
nMinus1.Sub(nMinus1, bigOne)
bytes := nMinus1.Bytes()
for i := range bytes {
bytes[i] ^= 0xff
}
if len(bytes) == 0 || bytes[0]&0x80 == 0 {
return multiEncoder([]encoder{byteFFEncoder, bytesEncoder(bytes)}), nil
}
return bytesEncoder(bytes), nil
} else if n.Sign() == 0 {
// Zero is written as a single 0 zero rather than no bytes.
return byte00Encoder, nil
} else {
bytes := n.Bytes()
if len(bytes) > 0 && bytes[0]&0x80 != 0 {
// We'll have to pad this with 0x00 in order to stop it
// looking like a negative number.
return multiEncoder([]encoder{byte00Encoder, bytesEncoder(bytes)}), nil
}
return bytesEncoder(bytes), nil
}
}
func appendLength(dst []byte, i int) []byte {
n := lengthLength(i)
for ; n > 0; n-- {
dst = append(dst, byte(i>>uint((n-1)*8)))
}
return dst
}
func lengthLength(i int) (numBytes int) {
numBytes = 1
for i > 255 {
numBytes++
i >>= 8
}
return
}
func appendTagAndLength(dst []byte, t tagAndLength) []byte {
b := uint8(t.class) << 6
if t.isCompound {
b |= 0x20
}
if t.tag >= 31 {
b |= 0x1f
dst = append(dst, b)
dst = appendBase128Int(dst, int64(t.tag))
} else {
b |= uint8(t.tag)
dst = append(dst, b)
}
if t.length >= 128 {
l := lengthLength(t.length)
dst = append(dst, 0x80|byte(l))
dst = appendLength(dst, t.length)
} else {
dst = append(dst, byte(t.length))
}
return dst
}
type bitStringEncoder BitString
func (b bitStringEncoder) Len() int {
return len(b.Bytes) + 1
}
func (b bitStringEncoder) Encode(dst []byte) {
dst[0] = byte((8 - b.BitLength%8) % 8)
if copy(dst[1:], b.Bytes) != len(b.Bytes) {
panic("internal error")
}
}
type oidEncoder []int
func (oid oidEncoder) Len() int {
l := base128IntLength(int64(oid[0]*40 + oid[1]))
for i := 2; i < len(oid); i++ {
l += base128IntLength(int64(oid[i]))
}
return l
}
func (oid oidEncoder) Encode(dst []byte) {
dst = appendBase128Int(dst[:0], int64(oid[0]*40+oid[1]))
for i := 2; i < len(oid); i++ {
dst = appendBase128Int(dst, int64(oid[i]))
}
}
func makeObjectIdentifier(oid []int) (e encoder, err error) {
if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) {
return nil, StructuralError{"invalid object identifier"}
}
return oidEncoder(oid), nil
}
func makePrintableString(s string) (e encoder, err error) {
for i := 0; i < len(s); i++ {
// The asterisk is often used in PrintableString, even though
// it is invalid. If a PrintableString was specifically
// requested then the asterisk is permitted by this code.
// Ampersand is allowed in parsing due a handful of CA
// certificates, however when making new certificates
// it is rejected.
if !isPrintable(s[i], allowAsterisk, rejectAmpersand) {
return nil, StructuralError{"PrintableString contains invalid character"}
}
}
return stringEncoder(s), nil
}
func makeIA5String(s string) (e encoder, err error) {
for i := 0; i < len(s); i++ {
if s[i] > 127 {
return nil, StructuralError{"IA5String contains invalid character"}
}
}
return stringEncoder(s), nil
}
func makeNumericString(s string) (e encoder, err error) {
for i := 0; i < len(s); i++ {
if !isNumeric(s[i]) {
return nil, StructuralError{"NumericString contains invalid character"}
}
}
return stringEncoder(s), nil
}
func makeUTF8String(s string) encoder {
return stringEncoder(s)
}
func appendTwoDigits(dst []byte, v int) []byte {
return append(dst, byte('0'+(v/10)%10), byte('0'+v%10))
}
func appendFourDigits(dst []byte, v int) []byte {
return append(dst,
byte('0'+(v/1000)%10),
byte('0'+(v/100)%10),
byte('0'+(v/10)%10),
byte('0'+v%10))
}
func outsideUTCRange(t time.Time) bool {
year := t.Year()
return year < 1950 || year >= 2050
}
func makeUTCTime(t time.Time) (e encoder, err error) {
dst := make([]byte, 0, 18)
dst, err = appendUTCTime(dst, t)
if err != nil {
return nil, err
}
return bytesEncoder(dst), nil
}
func makeGeneralizedTime(t time.Time) (e encoder, err error) {
dst := make([]byte, 0, 20)
dst, err = appendGeneralizedTime(dst, t)
if err != nil {
return nil, err
}
return bytesEncoder(dst), nil
}
func appendUTCTime(dst []byte, t time.Time) (ret []byte, err error) {
year := t.Year()
switch {
case 1950 <= year && year < 2000:
dst = appendTwoDigits(dst, year-1900)
case 2000 <= year && year < 2050:
dst = appendTwoDigits(dst, year-2000)
default:
return nil, StructuralError{"cannot represent time as UTCTime"}
}
return appendTimeCommon(dst, t), nil
}
func appendGeneralizedTime(dst []byte, t time.Time) (ret []byte, err error) {
year := t.Year()
if year < 0 || year > 9999 {
return nil, StructuralError{"cannot represent time as GeneralizedTime"}
}
dst = appendFourDigits(dst, year)
return appendTimeCommon(dst, t), nil
}
func appendTimeCommon(dst []byte, t time.Time) []byte {
_, month, day := t.Date()
dst = appendTwoDigits(dst, int(month))
dst = appendTwoDigits(dst, day)
hour, min, sec := t.Clock()
dst = appendTwoDigits(dst, hour)
dst = appendTwoDigits(dst, min)
dst = appendTwoDigits(dst, sec)
_, offset := t.Zone()
switch {
case offset/60 == 0:
return append(dst, 'Z')
case offset > 0:
dst = append(dst, '+')
case offset < 0:
dst = append(dst, '-')
}
offsetMinutes := offset / 60
if offsetMinutes < 0 {
offsetMinutes = -offsetMinutes
}
dst = appendTwoDigits(dst, offsetMinutes/60)
dst = appendTwoDigits(dst, offsetMinutes%60)
return dst
}
func stripTagAndLength(in []byte) []byte {
_, offset, err := parseTagAndLength(in, 0)
if err != nil {
return in
}
return in[offset:]
}
func makeBody(value reflect.Value, params fieldParameters) (e encoder, err error) {
switch value.Type() {
case flagType:
return bytesEncoder(nil), nil
case timeType:
t, _ := reflect.TypeAssert[time.Time](value)
if params.timeType == TagGeneralizedTime || outsideUTCRange(t) {
return makeGeneralizedTime(t)
}
return makeUTCTime(t)
case bitStringType:
v, _ := reflect.TypeAssert[BitString](value)
return bitStringEncoder(v), nil
case objectIdentifierType:
v, _ := reflect.TypeAssert[ObjectIdentifier](value)
return makeObjectIdentifier(v)
case bigIntType:
v, _ := reflect.TypeAssert[*big.Int](value)
return makeBigInt(v)
}
switch v := value; v.Kind() {
case reflect.Bool:
if v.Bool() {
return byteFFEncoder, nil
}
return byte00Encoder, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return int64Encoder(v.Int()), nil
case reflect.Struct:
t := v.Type()
for i := 0; i < t.NumField(); i++ {
if !t.Field(i).IsExported() {
return nil, StructuralError{"struct contains unexported fields"}
}
}
startingField := 0
n := t.NumField()
if n == 0 {
return bytesEncoder(nil), nil
}
// If the first element of the structure is a non-empty
// RawContents, then we don't bother serializing the rest.
if t.Field(0).Type == rawContentsType {
s := v.Field(0)
if s.Len() > 0 {
bytes := s.Bytes()
/* The RawContents will contain the tag and
* length fields but we'll also be writing
* those ourselves, so we strip them out of
* bytes */
return bytesEncoder(stripTagAndLength(bytes)), nil
}
startingField = 1
}
switch n1 := n - startingField; n1 {
case 0:
return bytesEncoder(nil), nil
case 1:
return makeField(v.Field(startingField), parseFieldParameters(t.Field(startingField).Tag.Get("asn1")))
default:
m := make([]encoder, n1)
for i := 0; i < n1; i++ {
m[i], err = makeField(v.Field(i+startingField), parseFieldParameters(t.Field(i+startingField).Tag.Get("asn1")))
if err != nil {
return nil, err
}
}
return multiEncoder(m), nil
}
case reflect.Slice:
sliceType := v.Type()
if sliceType.Elem().Kind() == reflect.Uint8 {
return bytesEncoder(v.Bytes()), nil
}
var fp fieldParameters
switch l := v.Len(); l {
case 0:
return bytesEncoder(nil), nil
case 1:
return makeField(v.Index(0), fp)
default:
m := make([]encoder, l)
for i := 0; i < l; i++ {
m[i], err = makeField(v.Index(i), fp)
if err != nil {
return nil, err
}
}
if params.set {
return setEncoder(m), nil
}
return multiEncoder(m), nil
}
case reflect.String:
switch params.stringType {
case TagIA5String:
return makeIA5String(v.String())
case TagPrintableString:
return makePrintableString(v.String())
case TagNumericString:
return makeNumericString(v.String())
default:
return makeUTF8String(v.String()), nil
}
}
return nil, StructuralError{"unknown Go type"}
}
func makeField(v reflect.Value, params fieldParameters) (e encoder, err error) {
if !v.IsValid() {
return nil, fmt.Errorf("asn1: cannot marshal nil value")
}
// If the field is an interface{} then recurse into it.
if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 {
return makeField(v.Elem(), params)
}
if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty {
return bytesEncoder(nil), nil
}
if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) {
defaultValue := reflect.New(v.Type()).Elem()
defaultValue.SetInt(*params.defaultValue)
if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) {
return bytesEncoder(nil), nil
}
}
// If no default value is given then the zero value for the type is
// assumed to be the default value. This isn't obviously the correct
// behavior, but it's what Go has traditionally done.
if params.optional && params.defaultValue == nil {
if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) {
return bytesEncoder(nil), nil
}
}
if v.Type() == rawValueType {
rv, _ := reflect.TypeAssert[RawValue](v)
if len(rv.FullBytes) != 0 {
return bytesEncoder(rv.FullBytes), nil
}
t := new(taggedEncoder)
t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound}))
t.body = bytesEncoder(rv.Bytes)
return t, nil
}
matchAny, tag, isCompound, ok := getUniversalType(v.Type())
if !ok || matchAny {
return nil, StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())}
}
if params.timeType != 0 && tag != TagUTCTime {
return nil, StructuralError{"explicit time type given to non-time member"}
}
if params.stringType != 0 && tag != TagPrintableString {
return nil, StructuralError{"explicit string type given to non-string member"}
}
switch tag {
case TagPrintableString:
if params.stringType == 0 {
// This is a string without an explicit string type. We'll use
// a PrintableString if the character set in the string is
// sufficiently limited, otherwise we'll use a UTF8String.
for _, r := range v.String() {
if r >= utf8.RuneSelf || !isPrintable(byte(r), rejectAsterisk, rejectAmpersand) {
if !utf8.ValidString(v.String()) {
return nil, errors.New("asn1: string not valid UTF-8")
}
tag = TagUTF8String
break
}
}
} else {
tag = params.stringType
}
case TagUTCTime:
t, _ := reflect.TypeAssert[time.Time](v)
if params.timeType == TagGeneralizedTime || outsideUTCRange(t) {
tag = TagGeneralizedTime
}
}
if params.set {
if tag != TagSequence {
return nil, StructuralError{"non sequence tagged as set"}
}
tag = TagSet
}
// makeField can be called for a slice that should be treated as a SET
// but doesn't have params.set set, for instance when using a slice
// with the SET type name suffix. In this case getUniversalType returns
// TagSet, but makeBody doesn't know about that so will treat the slice
// as a sequence. To work around this we set params.set.
if tag == TagSet && !params.set {
params.set = true
}
t := new(taggedEncoder)
t.body, err = makeBody(v, params)
if err != nil {
return nil, err
}
bodyLen := t.body.Len()
class := ClassUniversal
if params.tag != nil {
if params.application {
class = ClassApplication
} else if params.private {
class = ClassPrivate
} else {
class = ClassContextSpecific
}
if params.explicit {
t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{ClassUniversal, tag, bodyLen, isCompound}))
tt := new(taggedEncoder)
tt.body = t
tt.tag = bytesEncoder(appendTagAndLength(tt.scratch[:0], tagAndLength{
class: class,
tag: *params.tag,
length: bodyLen + t.tag.Len(),
isCompound: true,
}))
return tt, nil
}
// implicit tag.
tag = *params.tag
}
t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{class, tag, bodyLen, isCompound}))
return t, nil
}
// Marshal returns the ASN.1 encoding of val.
//
// In addition to the struct tags recognized by Unmarshal, the following can be
// used:
//
// ia5: causes strings to be marshaled as ASN.1, IA5String values
// omitempty: causes empty slices to be skipped
// printable: causes strings to be marshaled as ASN.1, PrintableString values
// utf8: causes strings to be marshaled as ASN.1, UTF8String values
// numeric: causes strings to be marshaled as ASN.1, NumericString values
// utc: causes time.Time to be marshaled as ASN.1, UTCTime values
// generalized: causes time.Time to be marshaled as ASN.1, GeneralizedTime values
func Marshal(val any) ([]byte, error) {
return MarshalWithParams(val, "")
}
// MarshalWithParams allows field parameters to be specified for the
// top-level element. The form of the params is the same as the field tags.
func MarshalWithParams(val any, params string) ([]byte, error) {
e, err := makeField(reflect.ValueOf(val), parseFieldParameters(params))
if err != nil {
return nil, err
}
b := make([]byte, e.Len())
e.Encode(b)
return b, nil
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package base32 implements base32 encoding as specified by RFC 4648.
package base32
import (
"io"
"slices"
"strconv"
)
/*
* Encodings
*/
// An Encoding is a radix 32 encoding/decoding scheme, defined by a
// 32-character alphabet. The most common is the "base32" encoding
// introduced for SASL GSSAPI and standardized in RFC 4648.
// The alternate "base32hex" encoding is used in DNSSEC.
type Encoding struct {
encode [32]byte // mapping of symbol index to symbol byte value
decodeMap [256]uint8 // mapping of symbol byte value to symbol index
padChar rune
}
const (
StdPadding rune = '=' // Standard padding character
NoPadding rune = -1 // No padding
)
const (
decodeMapInitialize = "" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
invalidIndex = '\xff'
)
// NewEncoding returns a new padded Encoding defined by the given alphabet,
// which must be a 32-byte string that contains unique byte values and
// does not contain the padding character or CR / LF ('\r', '\n').
// The alphabet is treated as a sequence of byte values
// without any special treatment for multi-byte UTF-8.
// The resulting Encoding uses the default padding character ('='),
// which may be changed or disabled via [Encoding.WithPadding].
func NewEncoding(encoder string) *Encoding {
if len(encoder) != 32 {
panic("encoding alphabet is not 32-bytes long")
}
e := new(Encoding)
e.padChar = StdPadding
copy(e.encode[:], encoder)
copy(e.decodeMap[:], decodeMapInitialize)
for i := 0; i < len(encoder); i++ {
// Note: While we document that the alphabet cannot contain
// the padding character, we do not enforce it since we do not know
// if the caller intends to switch the padding from StdPadding later.
switch {
case encoder[i] == '\n' || encoder[i] == '\r':
panic("encoding alphabet contains newline character")
case e.decodeMap[encoder[i]] != invalidIndex:
panic("encoding alphabet includes duplicate symbols")
}
e.decodeMap[encoder[i]] = uint8(i)
}
return e
}
// StdEncoding is the standard base32 encoding, as defined in RFC 4648.
var StdEncoding = NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZ234567")
// HexEncoding is the “Extended Hex Alphabet” defined in RFC 4648.
// It is typically used in DNS.
var HexEncoding = NewEncoding("0123456789ABCDEFGHIJKLMNOPQRSTUV")
// WithPadding creates a new encoding identical to enc except
// with a specified padding character, or NoPadding to disable padding.
// The padding character must not be '\r' or '\n',
// must not be contained in the encoding's alphabet,
// must not be negative, and must be a rune equal or below '\xff'.
// Padding characters above '\x7f' are encoded as their exact byte value
// rather than using the UTF-8 representation of the codepoint.
func (enc Encoding) WithPadding(padding rune) *Encoding {
switch {
case padding < NoPadding || padding == '\r' || padding == '\n' || padding > 0xff:
panic("invalid padding")
case padding != NoPadding && enc.decodeMap[byte(padding)] != invalidIndex:
panic("padding contained in alphabet")
}
enc.padChar = padding
return &enc
}
/*
* Encoder
*/
// Encode encodes src using the encoding enc,
// writing [Encoding.EncodedLen](len(src)) bytes to dst.
//
// The encoding pads the output to a multiple of 8 bytes,
// so Encode is not appropriate for use on individual blocks
// of a large data stream. Use [NewEncoder] instead.
func (enc *Encoding) Encode(dst, src []byte) {
if len(src) == 0 {
return
}
// enc is a pointer receiver, so the use of enc.encode within the hot
// loop below means a nil check at every operation. Lift that nil check
// outside of the loop to speed up the encoder.
_ = enc.encode
for len(src) >= 5 {
// Combining two 32 bit loads allows the same code to be used
// for 32 and 64 bit platforms.
hi := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
lo := hi<<8 | uint32(src[4])
_ = dst[7] // Eliminate bounds checks below.
dst[0] = enc.encode[(hi>>27)&0x1F]
dst[1] = enc.encode[(hi>>22)&0x1F]
dst[2] = enc.encode[(hi>>17)&0x1F]
dst[3] = enc.encode[(hi>>12)&0x1F]
dst[4] = enc.encode[(hi>>7)&0x1F]
dst[5] = enc.encode[(hi>>2)&0x1F]
dst[6] = enc.encode[(lo>>5)&0x1F]
dst[7] = enc.encode[(lo)&0x1F]
src = src[5:]
dst = dst[8:]
}
// Add the remaining small block
if len(src) == 0 {
return
}
// Encode the remaining bytes in reverse order.
val := uint32(0)
switch len(src) {
case 4:
val |= uint32(src[3])
dst[6] = enc.encode[val<<3&0x1F]
dst[5] = enc.encode[val>>2&0x1F]
fallthrough
case 3:
val |= uint32(src[2]) << 8
dst[4] = enc.encode[val>>7&0x1F]
fallthrough
case 2:
val |= uint32(src[1]) << 16
dst[3] = enc.encode[val>>12&0x1F]
dst[2] = enc.encode[val>>17&0x1F]
fallthrough
case 1:
val |= uint32(src[0]) << 24
dst[1] = enc.encode[val>>22&0x1F]
dst[0] = enc.encode[val>>27&0x1F]
}
// Pad the final quantum
if enc.padChar != NoPadding {
nPad := (len(src) * 8 / 5) + 1
for i := nPad; i < 8; i++ {
dst[i] = byte(enc.padChar)
}
}
}
// AppendEncode appends the base32 encoded src to dst
// and returns the extended buffer.
func (enc *Encoding) AppendEncode(dst, src []byte) []byte {
n := enc.EncodedLen(len(src))
dst = slices.Grow(dst, n)
enc.Encode(dst[len(dst):][:n], src)
return dst[:len(dst)+n]
}
// EncodeToString returns the base32 encoding of src.
func (enc *Encoding) EncodeToString(src []byte) string {
buf := make([]byte, enc.EncodedLen(len(src)))
enc.Encode(buf, src)
return string(buf)
}
type encoder struct {
err error
enc *Encoding
w io.Writer
buf [5]byte // buffered data waiting to be encoded
nbuf int // number of bytes in buf
out [1024]byte // output buffer
}
func (e *encoder) Write(p []byte) (n int, err error) {
if e.err != nil {
return 0, e.err
}
// Leading fringe.
if e.nbuf > 0 {
var i int
for i = 0; i < len(p) && e.nbuf < 5; i++ {
e.buf[e.nbuf] = p[i]
e.nbuf++
}
n += i
p = p[i:]
if e.nbuf < 5 {
return
}
e.enc.Encode(e.out[0:], e.buf[0:])
if _, e.err = e.w.Write(e.out[0:8]); e.err != nil {
return n, e.err
}
e.nbuf = 0
}
// Large interior chunks.
for len(p) >= 5 {
nn := len(e.out) / 8 * 5
if nn > len(p) {
nn = len(p)
nn -= nn % 5
}
e.enc.Encode(e.out[0:], p[0:nn])
if _, e.err = e.w.Write(e.out[0 : nn/5*8]); e.err != nil {
return n, e.err
}
n += nn
p = p[nn:]
}
// Trailing fringe.
copy(e.buf[:], p)
e.nbuf = len(p)
n += len(p)
return
}
// Close flushes any pending output from the encoder.
// It is an error to call Write after calling Close.
func (e *encoder) Close() error {
// If there's anything left in the buffer, flush it out
if e.err == nil && e.nbuf > 0 {
e.enc.Encode(e.out[0:], e.buf[0:e.nbuf])
encodedLen := e.enc.EncodedLen(e.nbuf)
e.nbuf = 0
_, e.err = e.w.Write(e.out[0:encodedLen])
}
return e.err
}
// NewEncoder returns a new base32 stream encoder. Data written to
// the returned writer will be encoded using enc and then written to w.
// Base32 encodings operate in 5-byte blocks; when finished
// writing, the caller must Close the returned encoder to flush any
// partially written blocks.
func NewEncoder(enc *Encoding, w io.Writer) io.WriteCloser {
return &encoder{enc: enc, w: w}
}
// EncodedLen returns the length in bytes of the base32 encoding
// of an input buffer of length n.
func (enc *Encoding) EncodedLen(n int) int {
if enc.padChar == NoPadding {
return n/5*8 + (n%5*8+4)/5
}
return (n + 4) / 5 * 8
}
/*
* Decoder
*/
type CorruptInputError int64
func (e CorruptInputError) Error() string {
return "illegal base32 data at input byte " + strconv.FormatInt(int64(e), 10)
}
// decode is like Decode but returns an additional 'end' value, which
// indicates if end-of-message padding was encountered and thus any
// additional data is an error. This method assumes that src has been
// stripped of all supported whitespace ('\r' and '\n').
func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) {
// Lift the nil check outside of the loop.
_ = enc.decodeMap
dsti := 0
olen := len(src)
for len(src) > 0 && !end {
// Decode quantum using the base32 alphabet
var dbuf [8]byte
dlen := 8
for j := 0; j < 8; {
if len(src) == 0 {
if enc.padChar != NoPadding {
// We have reached the end and are missing padding
return n, false, CorruptInputError(olen - len(src) - j)
}
// We have reached the end and are not expecting any padding
dlen, end = j, true
break
}
in := src[0]
src = src[1:]
if in == byte(enc.padChar) && j >= 2 && len(src) < 8 {
// We've reached the end and there's padding
if len(src)+j < 8-1 {
// not enough padding
return n, false, CorruptInputError(olen)
}
for k := 0; k < 8-1-j; k++ {
if len(src) > k && src[k] != byte(enc.padChar) {
// incorrect padding
return n, false, CorruptInputError(olen - len(src) + k - 1)
}
}
dlen, end = j, true
// 7, 5 and 2 are not valid padding lengths, and so 1, 3 and 6 are not
// valid dlen values. See RFC 4648 Section 6 "Base 32 Encoding" listing
// the five valid padding lengths, and Section 9 "Illustrations and
// Examples" for an illustration for how the 1st, 3rd and 6th base32
// src bytes do not yield enough information to decode a dst byte.
if dlen == 1 || dlen == 3 || dlen == 6 {
return n, false, CorruptInputError(olen - len(src) - 1)
}
break
}
dbuf[j] = enc.decodeMap[in]
if dbuf[j] == 0xFF {
return n, false, CorruptInputError(olen - len(src) - 1)
}
j++
}
// Pack 8x 5-bit source blocks into 5 byte destination
// quantum
switch dlen {
case 8:
dst[dsti+4] = dbuf[6]<<5 | dbuf[7]
n++
fallthrough
case 7:
dst[dsti+3] = dbuf[4]<<7 | dbuf[5]<<2 | dbuf[6]>>3
n++
fallthrough
case 5:
dst[dsti+2] = dbuf[3]<<4 | dbuf[4]>>1
n++
fallthrough
case 4:
dst[dsti+1] = dbuf[1]<<6 | dbuf[2]<<1 | dbuf[3]>>4
n++
fallthrough
case 2:
dst[dsti+0] = dbuf[0]<<3 | dbuf[1]>>2
n++
}
dsti += 5
}
return n, end, nil
}
// Decode decodes src using the encoding enc. It writes at most
// [Encoding.DecodedLen](len(src)) bytes to dst and returns the number of bytes
// written. The caller must ensure that dst is large enough to hold all
// the decoded data. If src contains invalid base32 data, it will return the
// number of bytes successfully written and [CorruptInputError].
// Newline characters (\r and \n) are ignored.
func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
buf := make([]byte, len(src))
l := stripNewlines(buf, src)
n, _, err = enc.decode(dst, buf[:l])
return
}
// AppendDecode appends the base32 decoded src to dst
// and returns the extended buffer.
// If the input is malformed, it returns the partially decoded src and an error.
// New line characters (\r and \n) are ignored.
func (enc *Encoding) AppendDecode(dst, src []byte) ([]byte, error) {
// Compute the output size without padding to avoid over allocating.
n := len(src)
for n > 0 && rune(src[n-1]) == enc.padChar {
n--
}
n = decodedLen(n, NoPadding)
dst = slices.Grow(dst, n)
n, err := enc.Decode(dst[len(dst):][:n], src)
return dst[:len(dst)+n], err
}
// DecodeString returns the bytes represented by the base32 string s.
// If the input is malformed, it returns the partially decoded data and
// [CorruptInputError]. New line characters (\r and \n) are ignored.
func (enc *Encoding) DecodeString(s string) ([]byte, error) {
buf := []byte(s)
l := stripNewlines(buf, buf)
n, _, err := enc.decode(buf, buf[:l])
return buf[:n], err
}
type decoder struct {
err error
enc *Encoding
r io.Reader
end bool // saw end of message
buf [1024]byte // leftover input
nbuf int
out []byte // leftover decoded output
outbuf [1024 / 8 * 5]byte
}
func readEncodedData(r io.Reader, buf []byte, min int, expectsPadding bool) (n int, err error) {
for n < min && err == nil {
var nn int
nn, err = r.Read(buf[n:])
n += nn
}
// data was read, less than min bytes could be read
if n < min && n > 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
// no data was read, the buffer already contains some data
// when padding is disabled this is not an error, as the message can be of
// any length
if expectsPadding && min < 8 && n == 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
func (d *decoder) Read(p []byte) (n int, err error) {
// Use leftover decoded output from last read.
if len(d.out) > 0 {
n = copy(p, d.out)
d.out = d.out[n:]
if len(d.out) == 0 {
return n, d.err
}
return n, nil
}
if d.err != nil {
return 0, d.err
}
// Read a chunk.
nn := (len(p) + 4) / 5 * 8
if nn < 8 {
nn = 8
}
if nn > len(d.buf) {
nn = len(d.buf)
}
// Minimum amount of bytes that needs to be read each cycle
var min int
var expectsPadding bool
if d.enc.padChar == NoPadding {
min = 1
expectsPadding = false
} else {
min = 8 - d.nbuf
expectsPadding = true
}
nn, d.err = readEncodedData(d.r, d.buf[d.nbuf:nn], min, expectsPadding)
d.nbuf += nn
if d.nbuf < min {
return 0, d.err
}
if nn > 0 && d.end {
return 0, CorruptInputError(0)
}
// Decode chunk into p, or d.out and then p if p is too small.
var nr int
if d.enc.padChar == NoPadding {
nr = d.nbuf
} else {
nr = d.nbuf / 8 * 8
}
nw := d.enc.DecodedLen(d.nbuf)
if nw > len(p) {
nw, d.end, err = d.enc.decode(d.outbuf[0:], d.buf[0:nr])
d.out = d.outbuf[0:nw]
n = copy(p, d.out)
d.out = d.out[n:]
} else {
n, d.end, err = d.enc.decode(p, d.buf[0:nr])
}
d.nbuf -= nr
for i := 0; i < d.nbuf; i++ {
d.buf[i] = d.buf[i+nr]
}
if err != nil && (d.err == nil || d.err == io.EOF) {
d.err = err
}
if len(d.out) > 0 {
// We cannot return all the decoded bytes to the caller in this
// invocation of Read, so we return a nil error to ensure that Read
// will be called again. The error stored in d.err, if any, will be
// returned with the last set of decoded bytes.
return n, nil
}
return n, d.err
}
type newlineFilteringReader struct {
wrapped io.Reader
}
// stripNewlines removes newline characters and returns the number
// of non-newline characters copied to dst.
func stripNewlines(dst, src []byte) int {
offset := 0
for _, b := range src {
if b == '\r' || b == '\n' {
continue
}
dst[offset] = b
offset++
}
return offset
}
func (r *newlineFilteringReader) Read(p []byte) (int, error) {
n, err := r.wrapped.Read(p)
for n > 0 {
s := p[0:n]
offset := stripNewlines(s, s)
if err != nil || offset > 0 {
return offset, err
}
// Previous buffer entirely whitespace, read again
n, err = r.wrapped.Read(p)
}
return n, err
}
// NewDecoder constructs a new base32 stream decoder.
func NewDecoder(enc *Encoding, r io.Reader) io.Reader {
return &decoder{enc: enc, r: &newlineFilteringReader{r}}
}
// DecodedLen returns the maximum length in bytes of the decoded data
// corresponding to n bytes of base32-encoded data.
func (enc *Encoding) DecodedLen(n int) int {
return decodedLen(n, enc.padChar)
}
func decodedLen(n int, padChar rune) int {
if padChar == NoPadding {
return n/8*5 + n%8*5/8
}
return n / 8 * 5
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package base64 implements base64 encoding as specified by RFC 4648.
package base64
import (
"internal/byteorder"
"io"
"slices"
"strconv"
)
/*
* Encodings
*/
// An Encoding is a radix 64 encoding/decoding scheme, defined by a
// 64-character alphabet. The most common encoding is the "base64"
// encoding defined in RFC 4648 and used in MIME (RFC 2045) and PEM
// (RFC 1421). RFC 4648 also defines an alternate encoding, which is
// the standard encoding with - and _ substituted for + and /.
type Encoding struct {
encode [64]byte // mapping of symbol index to symbol byte value
decodeMap [256]uint8 // mapping of symbol byte value to symbol index
padChar rune
strict bool
}
const (
StdPadding rune = '=' // Standard padding character
NoPadding rune = -1 // No padding
)
const (
decodeMapInitialize = "" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
invalidIndex = '\xff'
)
// NewEncoding returns a new padded Encoding defined by the given alphabet,
// which must be a 64-byte string that contains unique byte values and
// does not contain the padding character or CR / LF ('\r', '\n').
// The alphabet is treated as a sequence of byte values
// without any special treatment for multi-byte UTF-8.
// The resulting Encoding uses the default padding character ('='),
// which may be changed or disabled via [Encoding.WithPadding].
func NewEncoding(encoder string) *Encoding {
if len(encoder) != 64 {
panic("encoding alphabet is not 64-bytes long")
}
e := new(Encoding)
e.padChar = StdPadding
copy(e.encode[:], encoder)
copy(e.decodeMap[:], decodeMapInitialize)
for i := 0; i < len(encoder); i++ {
// Note: While we document that the alphabet cannot contain
// the padding character, we do not enforce it since we do not know
// if the caller intends to switch the padding from StdPadding later.
switch {
case encoder[i] == '\n' || encoder[i] == '\r':
panic("encoding alphabet contains newline character")
case e.decodeMap[encoder[i]] != invalidIndex:
panic("encoding alphabet includes duplicate symbols")
}
e.decodeMap[encoder[i]] = uint8(i)
}
return e
}
// WithPadding creates a new encoding identical to enc except
// with a specified padding character, or [NoPadding] to disable padding.
// The padding character must not be '\r' or '\n',
// must not be contained in the encoding's alphabet,
// must not be negative, and must be a rune equal or below '\xff'.
// Padding characters above '\x7f' are encoded as their exact byte value
// rather than using the UTF-8 representation of the codepoint.
func (enc Encoding) WithPadding(padding rune) *Encoding {
switch {
case padding < NoPadding || padding == '\r' || padding == '\n' || padding > 0xff:
panic("invalid padding")
case padding != NoPadding && enc.decodeMap[byte(padding)] != invalidIndex:
panic("padding contained in alphabet")
}
enc.padChar = padding
return &enc
}
// Strict creates a new encoding identical to enc except with
// strict decoding enabled. In this mode, the decoder requires that
// trailing padding bits are zero, as described in RFC 4648 section 3.5.
//
// Note that the input is still malleable, as new line characters
// (CR and LF) are still ignored.
func (enc Encoding) Strict() *Encoding {
enc.strict = true
return &enc
}
// StdEncoding is the standard base64 encoding, as defined in RFC 4648.
var StdEncoding = NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
// URLEncoding is the alternate base64 encoding defined in RFC 4648.
// It is typically used in URLs and file names.
var URLEncoding = NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_")
// RawStdEncoding is the standard raw, unpadded base64 encoding,
// as defined in RFC 4648 section 3.2.
// This is the same as [StdEncoding] but omits padding characters.
var RawStdEncoding = StdEncoding.WithPadding(NoPadding)
// RawURLEncoding is the unpadded alternate base64 encoding defined in RFC 4648.
// It is typically used in URLs and file names.
// This is the same as [URLEncoding] but omits padding characters.
var RawURLEncoding = URLEncoding.WithPadding(NoPadding)
/*
* Encoder
*/
// Encode encodes src using the encoding enc,
// writing [Encoding.EncodedLen](len(src)) bytes to dst.
//
// The encoding pads the output to a multiple of 4 bytes,
// so Encode is not appropriate for use on individual blocks
// of a large data stream. Use [NewEncoder] instead.
func (enc *Encoding) Encode(dst, src []byte) {
if len(src) == 0 {
return
}
// enc is a pointer receiver, so the use of enc.encode within the hot
// loop below means a nil check at every operation. Lift that nil check
// outside of the loop to speed up the encoder.
_ = enc.encode
for len(src) >= 3 {
// Convert 3x 8bit source bytes into 4 bytes
val := uint(src[0])<<16 | uint(src[1])<<8 | uint(src[2])
_ = dst[3] // Eliminate bounds checks below.
dst[0] = enc.encode[val>>18&0x3F]
dst[1] = enc.encode[val>>12&0x3F]
dst[2] = enc.encode[val>>6&0x3F]
dst[3] = enc.encode[val&0x3F]
src = src[3:]
dst = dst[4:]
}
// Add the remaining small block (if any).
switch len(src) {
case 0:
return
case 1:
val := uint(src[0]) << 16
dst[0] = enc.encode[val>>18&0x3F]
dst[1] = enc.encode[val>>12&0x3F]
if enc.padChar != NoPadding {
dst[2] = byte(enc.padChar)
dst[3] = byte(enc.padChar)
}
case 2:
val := uint(src[0])<<16 | uint(src[1])<<8
dst[0] = enc.encode[val>>18&0x3F]
dst[1] = enc.encode[val>>12&0x3F]
dst[2] = enc.encode[val>>6&0x3F]
if enc.padChar != NoPadding {
dst[3] = byte(enc.padChar)
}
}
}
// AppendEncode appends the base64 encoded src to dst
// and returns the extended buffer.
func (enc *Encoding) AppendEncode(dst, src []byte) []byte {
n := enc.EncodedLen(len(src))
dst = slices.Grow(dst, n)
enc.Encode(dst[len(dst):][:n], src)
return dst[:len(dst)+n]
}
// EncodeToString returns the base64 encoding of src.
func (enc *Encoding) EncodeToString(src []byte) string {
buf := make([]byte, enc.EncodedLen(len(src)))
enc.Encode(buf, src)
return string(buf)
}
type encoder struct {
err error
enc *Encoding
w io.Writer
buf [3]byte // buffered data waiting to be encoded
nbuf int // number of bytes in buf
out [1024]byte // output buffer
}
func (e *encoder) Write(p []byte) (n int, err error) {
if e.err != nil {
return 0, e.err
}
// Leading fringe.
if e.nbuf > 0 {
var i int
for i = 0; i < len(p) && e.nbuf < 3; i++ {
e.buf[e.nbuf] = p[i]
e.nbuf++
}
n += i
p = p[i:]
if e.nbuf < 3 {
return
}
e.enc.Encode(e.out[:], e.buf[:])
if _, e.err = e.w.Write(e.out[:4]); e.err != nil {
return n, e.err
}
e.nbuf = 0
}
// Large interior chunks.
for len(p) >= 3 {
nn := len(e.out) / 4 * 3
if nn > len(p) {
nn = len(p)
nn -= nn % 3
}
e.enc.Encode(e.out[:], p[:nn])
if _, e.err = e.w.Write(e.out[0 : nn/3*4]); e.err != nil {
return n, e.err
}
n += nn
p = p[nn:]
}
// Trailing fringe.
copy(e.buf[:], p)
e.nbuf = len(p)
n += len(p)
return
}
// Close flushes any pending output from the encoder.
// It is an error to call Write after calling Close.
func (e *encoder) Close() error {
// If there's anything left in the buffer, flush it out
if e.err == nil && e.nbuf > 0 {
e.enc.Encode(e.out[:], e.buf[:e.nbuf])
_, e.err = e.w.Write(e.out[:e.enc.EncodedLen(e.nbuf)])
e.nbuf = 0
}
return e.err
}
// NewEncoder returns a new base64 stream encoder. Data written to
// the returned writer will be encoded using enc and then written to w.
// Base64 encodings operate in 4-byte blocks; when finished
// writing, the caller must Close the returned encoder to flush any
// partially written blocks.
func NewEncoder(enc *Encoding, w io.Writer) io.WriteCloser {
return &encoder{enc: enc, w: w}
}
// EncodedLen returns the length in bytes of the base64 encoding
// of an input buffer of length n.
func (enc *Encoding) EncodedLen(n int) int {
if enc.padChar == NoPadding {
return n/3*4 + (n%3*8+5)/6 // minimum # chars at 6 bits per char
}
return (n + 2) / 3 * 4 // minimum # 4-char quanta, 3 bytes each
}
/*
* Decoder
*/
type CorruptInputError int64
func (e CorruptInputError) Error() string {
return "illegal base64 data at input byte " + strconv.FormatInt(int64(e), 10)
}
// decodeQuantum decodes up to 4 base64 bytes. The received parameters are
// the destination buffer dst, the source buffer src and an index in the
// source buffer si.
// It returns the number of bytes read from src, the number of bytes written
// to dst, and an error, if any.
func (enc *Encoding) decodeQuantum(dst, src []byte, si int) (nsi, n int, err error) {
// Decode quantum using the base64 alphabet
var dbuf [4]byte
dlen := 4
// Lift the nil check outside of the loop.
_ = enc.decodeMap
for j := 0; j < len(dbuf); j++ {
if len(src) == si {
switch {
case j == 0:
return si, 0, nil
case j == 1, enc.padChar != NoPadding:
return si, 0, CorruptInputError(si - j)
}
dlen = j
break
}
in := src[si]
si++
out := enc.decodeMap[in]
if out != 0xff {
dbuf[j] = out
continue
}
if in == '\n' || in == '\r' {
j--
continue
}
if rune(in) != enc.padChar {
return si, 0, CorruptInputError(si - 1)
}
// We've reached the end and there's padding
switch j {
case 0, 1:
// incorrect padding
return si, 0, CorruptInputError(si - 1)
case 2:
// "==" is expected, the first "=" is already consumed.
// skip over newlines
for si < len(src) && (src[si] == '\n' || src[si] == '\r') {
si++
}
if si == len(src) {
// not enough padding
return si, 0, CorruptInputError(len(src))
}
if rune(src[si]) != enc.padChar {
// incorrect padding
return si, 0, CorruptInputError(si - 1)
}
si++
}
// skip over newlines
for si < len(src) && (src[si] == '\n' || src[si] == '\r') {
si++
}
if si < len(src) {
// trailing garbage
err = CorruptInputError(si)
}
dlen = j
break
}
// Convert 4x 6bit source bytes into 3 bytes
val := uint(dbuf[0])<<18 | uint(dbuf[1])<<12 | uint(dbuf[2])<<6 | uint(dbuf[3])
dbuf[2], dbuf[1], dbuf[0] = byte(val>>0), byte(val>>8), byte(val>>16)
switch dlen {
case 4:
dst[2] = dbuf[2]
dbuf[2] = 0
fallthrough
case 3:
dst[1] = dbuf[1]
if enc.strict && dbuf[2] != 0 {
return si, 0, CorruptInputError(si - 1)
}
dbuf[1] = 0
fallthrough
case 2:
dst[0] = dbuf[0]
if enc.strict && (dbuf[1] != 0 || dbuf[2] != 0) {
return si, 0, CorruptInputError(si - 2)
}
}
return si, dlen - 1, err
}
// AppendDecode appends the base64 decoded src to dst
// and returns the extended buffer.
// If the input is malformed, it returns the partially decoded src and an error.
// New line characters (\r and \n) are ignored.
func (enc *Encoding) AppendDecode(dst, src []byte) ([]byte, error) {
// Compute the output size without padding to avoid over allocating.
n := len(src)
for n > 0 && rune(src[n-1]) == enc.padChar {
n--
}
n = decodedLen(n, NoPadding)
dst = slices.Grow(dst, n)
n, err := enc.Decode(dst[len(dst):][:n], src)
return dst[:len(dst)+n], err
}
// DecodeString returns the bytes represented by the base64 string s.
// If the input is malformed, it returns the partially decoded data and
// [CorruptInputError]. New line characters (\r and \n) are ignored.
func (enc *Encoding) DecodeString(s string) ([]byte, error) {
dbuf := make([]byte, enc.DecodedLen(len(s)))
n, err := enc.Decode(dbuf, []byte(s))
return dbuf[:n], err
}
type decoder struct {
err error
readErr error // error from r.Read
enc *Encoding
r io.Reader
buf [1024]byte // leftover input
nbuf int
out []byte // leftover decoded output
outbuf [1024 / 4 * 3]byte
}
func (d *decoder) Read(p []byte) (n int, err error) {
// Use leftover decoded output from last read.
if len(d.out) > 0 {
n = copy(p, d.out)
d.out = d.out[n:]
return n, nil
}
if d.err != nil {
return 0, d.err
}
// This code assumes that d.r strips supported whitespace ('\r' and '\n').
// Refill buffer.
for d.nbuf < 4 && d.readErr == nil {
nn := len(p) / 3 * 4
if nn < 4 {
nn = 4
}
if nn > len(d.buf) {
nn = len(d.buf)
}
nn, d.readErr = d.r.Read(d.buf[d.nbuf:nn])
d.nbuf += nn
}
if d.nbuf < 4 {
if d.enc.padChar == NoPadding && d.nbuf > 0 {
// Decode final fragment, without padding.
var nw int
nw, d.err = d.enc.Decode(d.outbuf[:], d.buf[:d.nbuf])
d.nbuf = 0
d.out = d.outbuf[:nw]
n = copy(p, d.out)
d.out = d.out[n:]
if n > 0 || len(p) == 0 && len(d.out) > 0 {
return n, nil
}
if d.err != nil {
return 0, d.err
}
}
d.err = d.readErr
if d.err == io.EOF && d.nbuf > 0 {
d.err = io.ErrUnexpectedEOF
}
return 0, d.err
}
// Decode chunk into p, or d.out and then p if p is too small.
nr := d.nbuf / 4 * 4
nw := d.nbuf / 4 * 3
if nw > len(p) {
nw, d.err = d.enc.Decode(d.outbuf[:], d.buf[:nr])
d.out = d.outbuf[:nw]
n = copy(p, d.out)
d.out = d.out[n:]
} else {
n, d.err = d.enc.Decode(p, d.buf[:nr])
}
d.nbuf -= nr
copy(d.buf[:d.nbuf], d.buf[nr:])
return n, d.err
}
// Decode decodes src using the encoding enc. It writes at most
// [Encoding.DecodedLen](len(src)) bytes to dst and returns the number of bytes
// written. The caller must ensure that dst is large enough to hold all
// the decoded data. If src contains invalid base64 data, it will return the
// number of bytes successfully written and [CorruptInputError].
// New line characters (\r and \n) are ignored.
func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
if len(src) == 0 {
return 0, nil
}
// Lift the nil check outside of the loop. enc.decodeMap is directly
// used later in this function, to let the compiler know that the
// receiver can't be nil.
_ = enc.decodeMap
si := 0
for strconv.IntSize >= 64 && len(src)-si >= 8 && len(dst)-n >= 8 {
src2 := src[si : si+8]
if dn, ok := assemble64(
enc.decodeMap[src2[0]],
enc.decodeMap[src2[1]],
enc.decodeMap[src2[2]],
enc.decodeMap[src2[3]],
enc.decodeMap[src2[4]],
enc.decodeMap[src2[5]],
enc.decodeMap[src2[6]],
enc.decodeMap[src2[7]],
); ok {
byteorder.BEPutUint64(dst[n:], dn)
n += 6
si += 8
} else {
var ninc int
si, ninc, err = enc.decodeQuantum(dst[n:], src, si)
n += ninc
if err != nil {
return n, err
}
}
}
for len(src)-si >= 4 && len(dst)-n >= 4 {
src2 := src[si : si+4]
if dn, ok := assemble32(
enc.decodeMap[src2[0]],
enc.decodeMap[src2[1]],
enc.decodeMap[src2[2]],
enc.decodeMap[src2[3]],
); ok {
byteorder.BEPutUint32(dst[n:], dn)
n += 3
si += 4
} else {
var ninc int
si, ninc, err = enc.decodeQuantum(dst[n:], src, si)
n += ninc
if err != nil {
return n, err
}
}
}
for si < len(src) {
var ninc int
si, ninc, err = enc.decodeQuantum(dst[n:], src, si)
n += ninc
if err != nil {
return n, err
}
}
return n, err
}
// assemble32 assembles 4 base64 digits into 3 bytes.
// Each digit comes from the decode map, and will be 0xff
// if it came from an invalid character.
func assemble32(n1, n2, n3, n4 byte) (dn uint32, ok bool) {
// Check that all the digits are valid. If any of them was 0xff, their
// bitwise OR will be 0xff.
if n1|n2|n3|n4 == 0xff {
return 0, false
}
return uint32(n1)<<26 |
uint32(n2)<<20 |
uint32(n3)<<14 |
uint32(n4)<<8,
true
}
// assemble64 assembles 8 base64 digits into 6 bytes.
// Each digit comes from the decode map, and will be 0xff
// if it came from an invalid character.
func assemble64(n1, n2, n3, n4, n5, n6, n7, n8 byte) (dn uint64, ok bool) {
// Check that all the digits are valid. If any of them was 0xff, their
// bitwise OR will be 0xff.
if n1|n2|n3|n4|n5|n6|n7|n8 == 0xff {
return 0, false
}
return uint64(n1)<<58 |
uint64(n2)<<52 |
uint64(n3)<<46 |
uint64(n4)<<40 |
uint64(n5)<<34 |
uint64(n6)<<28 |
uint64(n7)<<22 |
uint64(n8)<<16,
true
}
type newlineFilteringReader struct {
wrapped io.Reader
}
func (r *newlineFilteringReader) Read(p []byte) (int, error) {
n, err := r.wrapped.Read(p)
for n > 0 {
offset := 0
for i, b := range p[:n] {
if b != '\r' && b != '\n' {
if i != offset {
p[offset] = b
}
offset++
}
}
if offset > 0 {
return offset, err
}
// Previous buffer entirely whitespace, read again
n, err = r.wrapped.Read(p)
}
return n, err
}
// NewDecoder constructs a new base64 stream decoder.
func NewDecoder(enc *Encoding, r io.Reader) io.Reader {
return &decoder{enc: enc, r: &newlineFilteringReader{r}}
}
// DecodedLen returns the maximum length in bytes of the decoded data
// corresponding to n bytes of base64-encoded data.
func (enc *Encoding) DecodedLen(n int) int {
return decodedLen(n, enc.padChar)
}
func decodedLen(n int, padChar rune) int {
if padChar == NoPadding {
// Unpadded data may end with partial block of 2-3 characters.
return n/4*3 + n%4*6/8
}
// Padded base64 should always be a multiple of 4 characters in length.
return n / 4 * 3
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package binary implements simple translation between numbers and byte
// sequences and encoding and decoding of varints.
//
// Numbers are translated by reading and writing fixed-size values.
// A fixed-size value is either a fixed-size arithmetic
// type (bool, int8, uint8, int16, float32, complex64, ...)
// or an array or struct containing only fixed-size values.
//
// The varint functions encode and decode single integer values using
// a variable-length encoding; smaller values require fewer bytes.
// For a specification, see
// https://developers.google.com/protocol-buffers/docs/encoding.
//
// This package favors simplicity over efficiency. Clients that require
// high-performance serialization, especially for large data structures,
// should look at more advanced solutions such as the [encoding/gob]
// package or [google.golang.org/protobuf] for protocol buffers.
package binary
import (
"errors"
"io"
"math"
"reflect"
"slices"
"sync"
)
var errBufferTooSmall = errors.New("buffer too small")
// A ByteOrder specifies how to convert byte slices into
// 16-, 32-, or 64-bit unsigned integers.
//
// It is implemented by [LittleEndian], [BigEndian], and [NativeEndian].
type ByteOrder interface {
Uint16([]byte) uint16
Uint32([]byte) uint32
Uint64([]byte) uint64
PutUint16([]byte, uint16)
PutUint32([]byte, uint32)
PutUint64([]byte, uint64)
String() string
}
// AppendByteOrder specifies how to append 16-, 32-, or 64-bit unsigned integers
// into a byte slice.
//
// It is implemented by [LittleEndian], [BigEndian], and [NativeEndian].
type AppendByteOrder interface {
AppendUint16([]byte, uint16) []byte
AppendUint32([]byte, uint32) []byte
AppendUint64([]byte, uint64) []byte
String() string
}
// LittleEndian is the little-endian implementation of [ByteOrder] and [AppendByteOrder].
var LittleEndian littleEndian
// BigEndian is the big-endian implementation of [ByteOrder] and [AppendByteOrder].
var BigEndian bigEndian
type littleEndian struct{}
// Uint16 returns the uint16 representation of b[0:2].
func (littleEndian) Uint16(b []byte) uint16 {
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
return uint16(b[0]) | uint16(b[1])<<8
}
// PutUint16 stores v into b[0:2].
func (littleEndian) PutUint16(b []byte, v uint16) {
_ = b[1] // early bounds check to guarantee safety of writes below
b[0] = byte(v)
b[1] = byte(v >> 8)
}
// AppendUint16 appends the bytes of v to b and returns the appended slice.
func (littleEndian) AppendUint16(b []byte, v uint16) []byte {
return append(b,
byte(v),
byte(v>>8),
)
}
// Uint32 returns the uint32 representation of b[0:4].
func (littleEndian) Uint32(b []byte) uint32 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
// PutUint32 stores v into b[0:4].
func (littleEndian) PutUint32(b []byte, v uint32) {
_ = b[3] // early bounds check to guarantee safety of writes below
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
}
// AppendUint32 appends the bytes of v to b and returns the appended slice.
func (littleEndian) AppendUint32(b []byte, v uint32) []byte {
return append(b,
byte(v),
byte(v>>8),
byte(v>>16),
byte(v>>24),
)
}
// Uint64 returns the uint64 representation of b[0:8].
func (littleEndian) Uint64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
// PutUint64 stores v into b[0:8].
func (littleEndian) PutUint64(b []byte, v uint64) {
_ = b[7] // early bounds check to guarantee safety of writes below
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
b[4] = byte(v >> 32)
b[5] = byte(v >> 40)
b[6] = byte(v >> 48)
b[7] = byte(v >> 56)
}
// AppendUint64 appends the bytes of v to b and returns the appended slice.
func (littleEndian) AppendUint64(b []byte, v uint64) []byte {
return append(b,
byte(v),
byte(v>>8),
byte(v>>16),
byte(v>>24),
byte(v>>32),
byte(v>>40),
byte(v>>48),
byte(v>>56),
)
}
func (littleEndian) String() string { return "LittleEndian" }
func (littleEndian) GoString() string { return "binary.LittleEndian" }
type bigEndian struct{}
// Uint16 returns the uint16 representation of b[0:2].
func (bigEndian) Uint16(b []byte) uint16 {
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
return uint16(b[1]) | uint16(b[0])<<8
}
// PutUint16 stores v into b[0:2].
func (bigEndian) PutUint16(b []byte, v uint16) {
_ = b[1] // early bounds check to guarantee safety of writes below
b[0] = byte(v >> 8)
b[1] = byte(v)
}
// AppendUint16 appends the bytes of v to b and returns the appended slice.
func (bigEndian) AppendUint16(b []byte, v uint16) []byte {
return append(b,
byte(v>>8),
byte(v),
)
}
// Uint32 returns the uint32 representation of b[0:4].
func (bigEndian) Uint32(b []byte) uint32 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
}
// PutUint32 stores v into b[0:4].
func (bigEndian) PutUint32(b []byte, v uint32) {
_ = b[3] // early bounds check to guarantee safety of writes below
b[0] = byte(v >> 24)
b[1] = byte(v >> 16)
b[2] = byte(v >> 8)
b[3] = byte(v)
}
// AppendUint32 appends the bytes of v to b and returns the appended slice.
func (bigEndian) AppendUint32(b []byte, v uint32) []byte {
return append(b,
byte(v>>24),
byte(v>>16),
byte(v>>8),
byte(v),
)
}
// Uint64 returns the uint64 representation of b[0:8].
func (bigEndian) Uint64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
}
// PutUint64 stores v into b[0:8].
func (bigEndian) PutUint64(b []byte, v uint64) {
_ = b[7] // early bounds check to guarantee safety of writes below
b[0] = byte(v >> 56)
b[1] = byte(v >> 48)
b[2] = byte(v >> 40)
b[3] = byte(v >> 32)
b[4] = byte(v >> 24)
b[5] = byte(v >> 16)
b[6] = byte(v >> 8)
b[7] = byte(v)
}
// AppendUint64 appends the bytes of v to b and returns the appended slice.
func (bigEndian) AppendUint64(b []byte, v uint64) []byte {
return append(b,
byte(v>>56),
byte(v>>48),
byte(v>>40),
byte(v>>32),
byte(v>>24),
byte(v>>16),
byte(v>>8),
byte(v),
)
}
func (bigEndian) String() string { return "BigEndian" }
func (bigEndian) GoString() string { return "binary.BigEndian" }
func (nativeEndian) String() string { return "NativeEndian" }
func (nativeEndian) GoString() string { return "binary.NativeEndian" }
// Read reads structured binary data from r into data.
// Data must be a pointer to a fixed-size value or a slice
// of fixed-size values.
// Bytes read from r are decoded using the specified byte order
// and written to successive fields of the data.
// When decoding boolean values, a zero byte is decoded as false, and
// any other non-zero byte is decoded as true.
// When reading into structs, the field data for fields with
// blank (_) field names is skipped; i.e., blank field names
// may be used for padding.
// When reading into a struct, all non-blank fields must be exported
// or Read may panic.
//
// The error is [io.EOF] only if no bytes were read.
// If an [io.EOF] happens after reading some but not all the bytes,
// Read returns [io.ErrUnexpectedEOF].
func Read(r io.Reader, order ByteOrder, data any) error {
// Fast path for basic types and slices.
if n, _ := intDataSize(data); n != 0 {
bs := make([]byte, n)
if _, err := io.ReadFull(r, bs); err != nil {
return err
}
if decodeFast(bs, order, data) {
return nil
}
}
// Fallback to reflect-based decoding.
v := reflect.ValueOf(data)
size := -1
switch v.Kind() {
case reflect.Pointer:
v = v.Elem()
size = dataSize(v)
case reflect.Slice:
size = dataSize(v)
}
if size < 0 {
return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String())
}
d := &decoder{order: order, buf: make([]byte, size)}
if _, err := io.ReadFull(r, d.buf); err != nil {
return err
}
d.value(v)
return nil
}
// Decode decodes binary data from buf into data according to
// the given byte order.
// It returns an error if buf is too small, otherwise the number of
// bytes consumed from buf.
func Decode(buf []byte, order ByteOrder, data any) (int, error) {
if n, _ := intDataSize(data); n != 0 {
if len(buf) < n {
return 0, errBufferTooSmall
}
if decodeFast(buf, order, data) {
return n, nil
}
}
// Fallback to reflect-based decoding.
v := reflect.ValueOf(data)
size := -1
switch v.Kind() {
case reflect.Pointer:
v = v.Elem()
size = dataSize(v)
case reflect.Slice:
size = dataSize(v)
}
if size < 0 {
return 0, errors.New("binary.Decode: invalid type " + reflect.TypeOf(data).String())
}
if len(buf) < size {
return 0, errBufferTooSmall
}
d := &decoder{order: order, buf: buf[:size]}
d.value(v)
return size, nil
}
func decodeFast(bs []byte, order ByteOrder, data any) bool {
switch data := data.(type) {
case *bool:
*data = bs[0] != 0
case *int8:
*data = int8(bs[0])
case *uint8:
*data = bs[0]
case *int16:
*data = int16(order.Uint16(bs))
case *uint16:
*data = order.Uint16(bs)
case *int32:
*data = int32(order.Uint32(bs))
case *uint32:
*data = order.Uint32(bs)
case *int64:
*data = int64(order.Uint64(bs))
case *uint64:
*data = order.Uint64(bs)
case *float32:
*data = math.Float32frombits(order.Uint32(bs))
case *float64:
*data = math.Float64frombits(order.Uint64(bs))
case []bool:
for i, x := range bs { // Easier to loop over the input for 8-bit values.
data[i] = x != 0
}
case []int8:
for i, x := range bs {
data[i] = int8(x)
}
case []uint8:
copy(data, bs)
case []int16:
for i := range data {
data[i] = int16(order.Uint16(bs[2*i:]))
}
case []uint16:
for i := range data {
data[i] = order.Uint16(bs[2*i:])
}
case []int32:
for i := range data {
data[i] = int32(order.Uint32(bs[4*i:]))
}
case []uint32:
for i := range data {
data[i] = order.Uint32(bs[4*i:])
}
case []int64:
for i := range data {
data[i] = int64(order.Uint64(bs[8*i:]))
}
case []uint64:
for i := range data {
data[i] = order.Uint64(bs[8*i:])
}
case []float32:
for i := range data {
data[i] = math.Float32frombits(order.Uint32(bs[4*i:]))
}
case []float64:
for i := range data {
data[i] = math.Float64frombits(order.Uint64(bs[8*i:]))
}
default:
return false
}
return true
}
// Write writes the binary representation of data into w.
// Data must be a fixed-size value or a slice of fixed-size
// values, or a pointer to such data.
// Boolean values encode as one byte: 1 for true, and 0 for false.
// Bytes written to w are encoded using the specified byte order
// and read from successive fields of the data.
// When writing structs, zero values are written for fields
// with blank (_) field names.
func Write(w io.Writer, order ByteOrder, data any) error {
// Fast path for basic types and slices.
if n, bs := intDataSize(data); n != 0 {
if bs == nil {
bs = make([]byte, n)
encodeFast(bs, order, data)
}
_, err := w.Write(bs)
return err
}
// Fallback to reflect-based encoding.
v := reflect.Indirect(reflect.ValueOf(data))
size := dataSize(v)
if size < 0 {
return errors.New("binary.Write: some values are not fixed-sized in type " + reflect.TypeOf(data).String())
}
buf := make([]byte, size)
e := &encoder{order: order, buf: buf}
e.value(v)
_, err := w.Write(buf)
return err
}
// Encode encodes the binary representation of data into buf according to
// the given byte order.
// It returns an error if buf is too small, otherwise the number of
// bytes written into buf.
func Encode(buf []byte, order ByteOrder, data any) (int, error) {
// Fast path for basic types and slices.
if n, _ := intDataSize(data); n != 0 {
if len(buf) < n {
return 0, errBufferTooSmall
}
encodeFast(buf, order, data)
return n, nil
}
// Fallback to reflect-based encoding.
v := reflect.Indirect(reflect.ValueOf(data))
size := dataSize(v)
if size < 0 {
return 0, errors.New("binary.Encode: some values are not fixed-sized in type " + reflect.TypeOf(data).String())
}
if len(buf) < size {
return 0, errBufferTooSmall
}
e := &encoder{order: order, buf: buf}
e.value(v)
return size, nil
}
// Append appends the binary representation of data to buf.
// buf may be nil, in which case a new buffer will be allocated.
// See [Write] on which data are acceptable.
// It returns the (possibly extended) buffer containing data or an error.
func Append(buf []byte, order ByteOrder, data any) ([]byte, error) {
// Fast path for basic types and slices.
if n, _ := intDataSize(data); n != 0 {
buf, pos := ensure(buf, n)
encodeFast(pos, order, data)
return buf, nil
}
// Fallback to reflect-based encoding.
v := reflect.Indirect(reflect.ValueOf(data))
size := dataSize(v)
if size < 0 {
return nil, errors.New("binary.Append: some values are not fixed-sized in type " + reflect.TypeOf(data).String())
}
buf, pos := ensure(buf, size)
e := &encoder{order: order, buf: pos}
e.value(v)
return buf, nil
}
func encodeFast(bs []byte, order ByteOrder, data any) {
switch v := data.(type) {
case *bool:
if *v {
bs[0] = 1
} else {
bs[0] = 0
}
case bool:
if v {
bs[0] = 1
} else {
bs[0] = 0
}
case []bool:
for i, x := range v {
if x {
bs[i] = 1
} else {
bs[i] = 0
}
}
case *int8:
bs[0] = byte(*v)
case int8:
bs[0] = byte(v)
case []int8:
for i, x := range v {
bs[i] = byte(x)
}
case *uint8:
bs[0] = *v
case uint8:
bs[0] = v
case []uint8:
copy(bs, v)
case *int16:
order.PutUint16(bs, uint16(*v))
case int16:
order.PutUint16(bs, uint16(v))
case []int16:
for i, x := range v {
order.PutUint16(bs[2*i:], uint16(x))
}
case *uint16:
order.PutUint16(bs, *v)
case uint16:
order.PutUint16(bs, v)
case []uint16:
for i, x := range v {
order.PutUint16(bs[2*i:], x)
}
case *int32:
order.PutUint32(bs, uint32(*v))
case int32:
order.PutUint32(bs, uint32(v))
case []int32:
for i, x := range v {
order.PutUint32(bs[4*i:], uint32(x))
}
case *uint32:
order.PutUint32(bs, *v)
case uint32:
order.PutUint32(bs, v)
case []uint32:
for i, x := range v {
order.PutUint32(bs[4*i:], x)
}
case *int64:
order.PutUint64(bs, uint64(*v))
case int64:
order.PutUint64(bs, uint64(v))
case []int64:
for i, x := range v {
order.PutUint64(bs[8*i:], uint64(x))
}
case *uint64:
order.PutUint64(bs, *v)
case uint64:
order.PutUint64(bs, v)
case []uint64:
for i, x := range v {
order.PutUint64(bs[8*i:], x)
}
case *float32:
order.PutUint32(bs, math.Float32bits(*v))
case float32:
order.PutUint32(bs, math.Float32bits(v))
case []float32:
for i, x := range v {
order.PutUint32(bs[4*i:], math.Float32bits(x))
}
case *float64:
order.PutUint64(bs, math.Float64bits(*v))
case float64:
order.PutUint64(bs, math.Float64bits(v))
case []float64:
for i, x := range v {
order.PutUint64(bs[8*i:], math.Float64bits(x))
}
}
}
// Size returns how many bytes [Write] would generate to encode the value v, which
// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data.
// If v is neither of these, Size returns -1.
func Size(v any) int {
switch data := v.(type) {
case bool, int8, uint8:
return 1
case *bool:
if data == nil {
return -1
}
return 1
case *int8:
if data == nil {
return -1
}
return 1
case *uint8:
if data == nil {
return -1
}
return 1
case []bool:
return len(data)
case []int8:
return len(data)
case []uint8:
return len(data)
case int16, uint16:
return 2
case *int16:
if data == nil {
return -1
}
return 2
case *uint16:
if data == nil {
return -1
}
return 2
case []int16:
return 2 * len(data)
case []uint16:
return 2 * len(data)
case int32, uint32:
return 4
case *int32:
if data == nil {
return -1
}
return 4
case *uint32:
if data == nil {
return -1
}
return 4
case []int32:
return 4 * len(data)
case []uint32:
return 4 * len(data)
case int64, uint64:
return 8
case *int64:
if data == nil {
return -1
}
return 8
case *uint64:
if data == nil {
return -1
}
return 8
case []int64:
return 8 * len(data)
case []uint64:
return 8 * len(data)
case float32:
return 4
case *float32:
if data == nil {
return -1
}
return 4
case float64:
return 8
case *float64:
if data == nil {
return -1
}
return 8
case []float32:
return 4 * len(data)
case []float64:
return 8 * len(data)
}
return dataSize(reflect.Indirect(reflect.ValueOf(v)))
}
var structSize sync.Map // map[reflect.Type]int
// dataSize returns the number of bytes the actual data represented by v occupies in memory.
// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice
// it returns the length of the slice times the element size and does not count the memory
// occupied by the header. If the type of v is not acceptable, dataSize returns -1.
func dataSize(v reflect.Value) int {
switch v.Kind() {
case reflect.Slice, reflect.Array:
t := v.Type().Elem()
if size, ok := structSize.Load(t); ok {
return size.(int) * v.Len()
}
size := sizeof(t)
if size >= 0 {
if t.Kind() == reflect.Struct {
structSize.Store(t, size)
}
return size * v.Len()
}
case reflect.Struct:
t := v.Type()
if size, ok := structSize.Load(t); ok {
return size.(int)
}
size := sizeof(t)
structSize.Store(t, size)
return size
default:
if v.IsValid() {
return sizeof(v.Type())
}
}
return -1
}
// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable.
func sizeof(t reflect.Type) int {
switch t.Kind() {
case reflect.Array:
if s := sizeof(t.Elem()); s >= 0 {
return s * t.Len()
}
case reflect.Struct:
sum := 0
for i, n := 0, t.NumField(); i < n; i++ {
s := sizeof(t.Field(i).Type)
if s < 0 {
return -1
}
sum += s
}
return sum
case reflect.Bool,
reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
return int(t.Size())
}
return -1
}
type coder struct {
order ByteOrder
buf []byte
offset int
}
type decoder coder
type encoder coder
func (d *decoder) bool() bool {
x := d.buf[d.offset]
d.offset++
return x != 0
}
func (e *encoder) bool(x bool) {
if x {
e.buf[e.offset] = 1
} else {
e.buf[e.offset] = 0
}
e.offset++
}
func (d *decoder) uint8() uint8 {
x := d.buf[d.offset]
d.offset++
return x
}
func (e *encoder) uint8(x uint8) {
e.buf[e.offset] = x
e.offset++
}
func (d *decoder) uint16() uint16 {
x := d.order.Uint16(d.buf[d.offset : d.offset+2])
d.offset += 2
return x
}
func (e *encoder) uint16(x uint16) {
e.order.PutUint16(e.buf[e.offset:e.offset+2], x)
e.offset += 2
}
func (d *decoder) uint32() uint32 {
x := d.order.Uint32(d.buf[d.offset : d.offset+4])
d.offset += 4
return x
}
func (e *encoder) uint32(x uint32) {
e.order.PutUint32(e.buf[e.offset:e.offset+4], x)
e.offset += 4
}
func (d *decoder) uint64() uint64 {
x := d.order.Uint64(d.buf[d.offset : d.offset+8])
d.offset += 8
return x
}
func (e *encoder) uint64(x uint64) {
e.order.PutUint64(e.buf[e.offset:e.offset+8], x)
e.offset += 8
}
func (d *decoder) int8() int8 { return int8(d.uint8()) }
func (e *encoder) int8(x int8) { e.uint8(uint8(x)) }
func (d *decoder) int16() int16 { return int16(d.uint16()) }
func (e *encoder) int16(x int16) { e.uint16(uint16(x)) }
func (d *decoder) int32() int32 { return int32(d.uint32()) }
func (e *encoder) int32(x int32) { e.uint32(uint32(x)) }
func (d *decoder) int64() int64 { return int64(d.uint64()) }
func (e *encoder) int64(x int64) { e.uint64(uint64(x)) }
func (d *decoder) value(v reflect.Value) {
switch v.Kind() {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
d.value(v.Index(i))
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
// Note: Calling v.CanSet() below is an optimization.
// It would be sufficient to check the field name,
// but creating the StructField info for each field is
// costly (run "go test -bench=ReadStruct" and compare
// results when making changes to this code).
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
d.value(v)
} else {
d.skip(v)
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
d.value(v.Index(i))
}
case reflect.Bool:
v.SetBool(d.bool())
case reflect.Int8:
v.SetInt(int64(d.int8()))
case reflect.Int16:
v.SetInt(int64(d.int16()))
case reflect.Int32:
v.SetInt(int64(d.int32()))
case reflect.Int64:
v.SetInt(d.int64())
case reflect.Uint8:
v.SetUint(uint64(d.uint8()))
case reflect.Uint16:
v.SetUint(uint64(d.uint16()))
case reflect.Uint32:
v.SetUint(uint64(d.uint32()))
case reflect.Uint64:
v.SetUint(d.uint64())
case reflect.Float32:
v.SetFloat(float64(math.Float32frombits(d.uint32())))
case reflect.Float64:
v.SetFloat(math.Float64frombits(d.uint64()))
case reflect.Complex64:
v.SetComplex(complex(
float64(math.Float32frombits(d.uint32())),
float64(math.Float32frombits(d.uint32())),
))
case reflect.Complex128:
v.SetComplex(complex(
math.Float64frombits(d.uint64()),
math.Float64frombits(d.uint64()),
))
}
}
func (e *encoder) value(v reflect.Value) {
switch v.Kind() {
case reflect.Array:
l := v.Len()
for i := 0; i < l; i++ {
e.value(v.Index(i))
}
case reflect.Struct:
t := v.Type()
l := v.NumField()
for i := 0; i < l; i++ {
// see comment for corresponding code in decoder.value()
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
e.value(v)
} else {
e.skip(v)
}
}
case reflect.Slice:
l := v.Len()
for i := 0; i < l; i++ {
e.value(v.Index(i))
}
case reflect.Bool:
e.bool(v.Bool())
case reflect.Int8:
e.int8(int8(v.Int()))
case reflect.Int16:
e.int16(int16(v.Int()))
case reflect.Int32:
e.int32(int32(v.Int()))
case reflect.Int64:
e.int64(v.Int())
case reflect.Uint8:
e.uint8(uint8(v.Uint()))
case reflect.Uint16:
e.uint16(uint16(v.Uint()))
case reflect.Uint32:
e.uint32(uint32(v.Uint()))
case reflect.Uint64:
e.uint64(v.Uint())
case reflect.Float32:
e.uint32(math.Float32bits(float32(v.Float())))
case reflect.Float64:
e.uint64(math.Float64bits(v.Float()))
case reflect.Complex64:
x := v.Complex()
e.uint32(math.Float32bits(float32(real(x))))
e.uint32(math.Float32bits(float32(imag(x))))
case reflect.Complex128:
x := v.Complex()
e.uint64(math.Float64bits(real(x)))
e.uint64(math.Float64bits(imag(x)))
}
}
func (d *decoder) skip(v reflect.Value) {
d.offset += dataSize(v)
}
func (e *encoder) skip(v reflect.Value) {
n := dataSize(v)
clear(e.buf[e.offset : e.offset+n])
e.offset += n
}
// intDataSize returns the size of the data required to represent the data when encoded,
// and optionally a byte slice containing the encoded data if no conversion is necessary.
// It returns zero, nil if the type cannot be implemented by the fast path in Read or Write.
func intDataSize(data any) (int, []byte) {
switch data := data.(type) {
case bool, int8, uint8, *bool, *int8, *uint8:
return 1, nil
case []bool:
return len(data), nil
case []int8:
return len(data), nil
case []uint8:
return len(data), data
case int16, uint16, *int16, *uint16:
return 2, nil
case []int16:
return 2 * len(data), nil
case []uint16:
return 2 * len(data), nil
case int32, uint32, *int32, *uint32:
return 4, nil
case []int32:
return 4 * len(data), nil
case []uint32:
return 4 * len(data), nil
case int64, uint64, *int64, *uint64:
return 8, nil
case []int64:
return 8 * len(data), nil
case []uint64:
return 8 * len(data), nil
case float32, *float32:
return 4, nil
case float64, *float64:
return 8, nil
case []float32:
return 4 * len(data), nil
case []float64:
return 8 * len(data), nil
}
return 0, nil
}
// ensure grows buf to length len(buf) + n and returns the grown buffer
// and a slice starting at the original length of buf (that is, buf2[len(buf):]).
func ensure(buf []byte, n int) (buf2, pos []byte) {
l := len(buf)
buf = slices.Grow(buf, n)[:l+n]
return buf, buf[l:]
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package binary
// This file implements "varint" encoding of 64-bit integers.
// The encoding is:
// - unsigned integers are serialized 7 bits at a time, starting with the
// least significant bits
// - the most significant bit (msb) in each output byte indicates if there
// is a continuation byte (msb = 1)
// - signed integers are mapped to unsigned integers using "zig-zag"
// encoding: Positive values x are written as 2*x + 0, negative values
// are written as 2*(^x) + 1; that is, negative numbers are complemented
// and whether to complement is encoded in bit 0.
//
// Design note:
// At most 10 bytes are needed for 64-bit values. The encoding could
// be more dense: a full 64-bit value needs an extra byte just to hold bit 63.
// Instead, the msb of the previous byte could be used to hold bit 63 since we
// know there can't be more than 64 bits. This is a trivial improvement and
// would reduce the maximum encoding length to 9 bytes. However, it breaks the
// invariant that the msb is always the "continuation bit" and thus makes the
// format incompatible with a varint encoding for larger numbers (say 128-bit).
import (
"errors"
"io"
)
// MaxVarintLenN is the maximum length of a varint-encoded N-bit integer.
const (
MaxVarintLen16 = 3
MaxVarintLen32 = 5
MaxVarintLen64 = 10
)
// AppendUvarint appends the varint-encoded form of x,
// as generated by [PutUvarint], to buf and returns the extended buffer.
func AppendUvarint(buf []byte, x uint64) []byte {
for x >= 0x80 {
buf = append(buf, byte(x)|0x80)
x >>= 7
}
return append(buf, byte(x))
}
// PutUvarint encodes a uint64 into buf and returns the number of bytes written.
// If the buffer is too small, PutUvarint will panic.
func PutUvarint(buf []byte, x uint64) int {
i := 0
for x >= 0x80 {
buf[i] = byte(x) | 0x80
x >>= 7
i++
}
buf[i] = byte(x)
return i + 1
}
// Uvarint decodes a uint64 from buf and returns that value and the
// number of bytes read (> 0). If an error occurred, the value is 0
// and the number of bytes n is <= 0 meaning:
// - n == 0: buf too small;
// - n < 0: value larger than 64 bits (overflow) and -n is the number of
// bytes read.
func Uvarint(buf []byte) (uint64, int) {
var x uint64
var s uint
for i, b := range buf {
if i == MaxVarintLen64 {
// Catch byte reads past MaxVarintLen64.
// See issue https://golang.org/issues/41185
return 0, -(i + 1) // overflow
}
if b < 0x80 {
if i == MaxVarintLen64-1 && b > 1 {
return 0, -(i + 1) // overflow
}
return x | uint64(b)<<s, i + 1
}
x |= uint64(b&0x7f) << s
s += 7
}
return 0, 0
}
// AppendVarint appends the varint-encoded form of x,
// as generated by [PutVarint], to buf and returns the extended buffer.
func AppendVarint(buf []byte, x int64) []byte {
ux := uint64(x) << 1
if x < 0 {
ux = ^ux
}
return AppendUvarint(buf, ux)
}
// PutVarint encodes an int64 into buf and returns the number of bytes written.
// If the buffer is too small, PutVarint will panic.
func PutVarint(buf []byte, x int64) int {
ux := uint64(x) << 1
if x < 0 {
ux = ^ux
}
return PutUvarint(buf, ux)
}
// Varint decodes an int64 from buf and returns that value and the
// number of bytes read (> 0). If an error occurred, the value is 0
// and the number of bytes n is <= 0 with the following meaning:
// - n == 0: buf too small;
// - n < 0: value larger than 64 bits (overflow)
// and -n is the number of bytes read.
func Varint(buf []byte) (int64, int) {
ux, n := Uvarint(buf) // ok to continue in presence of error
x := int64(ux >> 1)
if ux&1 != 0 {
x = ^x
}
return x, n
}
var errOverflow = errors.New("binary: varint overflows a 64-bit integer")
// ReadUvarint reads an encoded unsigned integer from r and returns it as a uint64.
// The error is [io.EOF] only if no bytes were read.
// If an [io.EOF] happens after reading some but not all the bytes,
// ReadUvarint returns [io.ErrUnexpectedEOF].
func ReadUvarint(r io.ByteReader) (uint64, error) {
var x uint64
var s uint
for i := 0; i < MaxVarintLen64; i++ {
b, err := r.ReadByte()
if err != nil {
if i > 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return x, err
}
if b < 0x80 {
if i == MaxVarintLen64-1 && b > 1 {
return x, errOverflow
}
return x | uint64(b)<<s, nil
}
x |= uint64(b&0x7f) << s
s += 7
}
return x, errOverflow
}
// ReadVarint reads an encoded signed integer from r and returns it as an int64.
// The error is [io.EOF] only if no bytes were read.
// If an [io.EOF] happens after reading some but not all the bytes,
// ReadVarint returns [io.ErrUnexpectedEOF].
func ReadVarint(r io.ByteReader) (int64, error) {
ux, err := ReadUvarint(r) // ok to continue in presence of error
x := int64(ux >> 1)
if ux&1 != 0 {
x = ^x
}
return x, err
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package csv reads and writes comma-separated values (CSV) files.
// There are many kinds of CSV files; this package supports the format
// described in RFC 4180, except that [Writer] uses LF
// instead of CRLF as newline character by default.
//
// A csv file contains zero or more records of one or more fields per record.
// Each record is separated by the newline character. The final record may
// optionally be followed by a newline character.
//
// field1,field2,field3
//
// White space is considered part of a field.
//
// Carriage returns before newline characters are silently removed.
//
// Blank lines are ignored. A line with only whitespace characters (excluding
// the ending newline character) is not considered a blank line.
//
// Fields which start and stop with the quote character " are called
// quoted-fields. The beginning and ending quote are not part of the
// field.
//
// The source:
//
// normal string,"quoted-field"
//
// results in the fields
//
// {`normal string`, `quoted-field`}
//
// Within a quoted-field a quote character followed by a second quote
// character is considered a single quote.
//
// "the ""word"" is true","a ""quoted-field"""
//
// results in
//
// {`the "word" is true`, `a "quoted-field"`}
//
// Newlines and commas may be included in a quoted-field
//
// "Multi-line
// field","comma is ,"
//
// results in
//
// {`Multi-line
// field`, `comma is ,`}
package csv
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"unicode"
"unicode/utf8"
)
// A ParseError is returned for parsing errors.
// Line and column numbers are 1-indexed.
type ParseError struct {
StartLine int // Line where the record starts
Line int // Line where the error occurred
Column int // Column (1-based byte index) where the error occurred
Err error // The actual error
}
func (e *ParseError) Error() string {
if e.Err == ErrFieldCount {
return fmt.Sprintf("record on line %d: %v", e.Line, e.Err)
}
if e.StartLine != e.Line {
return fmt.Sprintf("record on line %d; parse error on line %d, column %d: %v", e.StartLine, e.Line, e.Column, e.Err)
}
return fmt.Sprintf("parse error on line %d, column %d: %v", e.Line, e.Column, e.Err)
}
func (e *ParseError) Unwrap() error { return e.Err }
// These are the errors that can be returned in [ParseError.Err].
var (
ErrBareQuote = errors.New("bare \" in non-quoted-field")
ErrQuote = errors.New("extraneous or missing \" in quoted-field")
ErrFieldCount = errors.New("wrong number of fields")
// Deprecated: ErrTrailingComma is no longer used.
ErrTrailingComma = errors.New("extra delimiter at end of line")
)
var errInvalidDelim = errors.New("csv: invalid field or comment delimiter")
func validDelim(r rune) bool {
return r != 0 && r != '"' && r != '\r' && r != '\n' && utf8.ValidRune(r) && r != utf8.RuneError
}
// A Reader reads records from a CSV-encoded file.
//
// As returned by [NewReader], a Reader expects input conforming to RFC 4180.
// The exported fields can be changed to customize the details before the
// first call to [Reader.Read] or [Reader.ReadAll].
//
// The Reader converts all \r\n sequences in its input to plain \n,
// including in multiline field values, so that the returned data does
// not depend on which line-ending convention an input file uses.
type Reader struct {
// Comma is the field delimiter.
// It is set to comma (',') by NewReader.
// Comma must be a valid rune and must not be \r, \n,
// or the Unicode replacement character (0xFFFD).
Comma rune
// Comment, if not 0, is the comment character. Lines beginning with the
// Comment character without preceding whitespace are ignored.
// With leading whitespace the Comment character becomes part of the
// field, even if TrimLeadingSpace is true.
// Comment must be a valid rune and must not be \r, \n,
// or the Unicode replacement character (0xFFFD).
// It must also not be equal to Comma.
Comment rune
// FieldsPerRecord is the number of expected fields per record.
// If FieldsPerRecord is positive, Read requires each record to
// have the given number of fields. If FieldsPerRecord is 0, Read sets it to
// the number of fields in the first record, so that future records must
// have the same field count. If FieldsPerRecord is negative, no check is
// made and records may have a variable number of fields.
FieldsPerRecord int
// If LazyQuotes is true, a quote may appear in an unquoted field and a
// non-doubled quote may appear in a quoted field.
LazyQuotes bool
// If TrimLeadingSpace is true, leading white space in a field is ignored.
// This is done even if the field delimiter, Comma, is white space.
TrimLeadingSpace bool
// ReuseRecord controls whether calls to Read may return a slice sharing
// the backing array of the previous call's returned slice for performance.
// By default, each call to Read returns newly allocated memory owned by the caller.
ReuseRecord bool
// Deprecated: TrailingComma is no longer used.
TrailingComma bool
r *bufio.Reader
// numLine is the current line being read in the CSV file.
numLine int
// offset is the input stream byte offset of the current reader position.
offset int64
// rawBuffer is a line buffer only used by the readLine method.
rawBuffer []byte
// recordBuffer holds the unescaped fields, one after another.
// The fields can be accessed by using the indexes in fieldIndexes.
// E.g., For the row `a,"b","c""d",e`, recordBuffer will contain `abc"de`
// and fieldIndexes will contain the indexes [1, 2, 5, 6].
recordBuffer []byte
// fieldIndexes is an index of fields inside recordBuffer.
// The i'th field ends at offset fieldIndexes[i] in recordBuffer.
fieldIndexes []int
// fieldPositions is an index of field positions for the
// last record returned by Read.
fieldPositions []position
// lastRecord is a record cache and only used when ReuseRecord == true.
lastRecord []string
}
// NewReader returns a new Reader that reads from r.
func NewReader(r io.Reader) *Reader {
return &Reader{
Comma: ',',
r: bufio.NewReader(r),
}
}
// Read reads one record (a slice of fields) from r.
// If the record has an unexpected number of fields,
// Read returns the record along with the error [ErrFieldCount].
// If the record contains a field that cannot be parsed,
// Read returns a partial record along with the parse error.
// The partial record contains all fields read before the error.
// If there is no data left to be read, Read returns nil, [io.EOF].
// If [Reader.ReuseRecord] is true, the returned slice may be shared
// between multiple calls to Read.
func (r *Reader) Read() (record []string, err error) {
if r.ReuseRecord {
record, err = r.readRecord(r.lastRecord)
r.lastRecord = record
} else {
record, err = r.readRecord(nil)
}
return record, err
}
// FieldPos returns the line and column corresponding to
// the start of the field with the given index in the slice most recently
// returned by [Reader.Read]. Numbering of lines and columns starts at 1;
// columns are counted in bytes, not runes.
//
// If this is called with an out-of-bounds index, it panics.
func (r *Reader) FieldPos(field int) (line, column int) {
if field < 0 || field >= len(r.fieldPositions) {
panic("out of range index passed to FieldPos")
}
p := &r.fieldPositions[field]
return p.line, p.col
}
// InputOffset returns the input stream byte offset of the current reader
// position. The offset gives the location of the end of the most recently
// read row and the beginning of the next row.
func (r *Reader) InputOffset() int64 {
return r.offset
}
// pos holds the position of a field in the current line.
type position struct {
line, col int
}
// ReadAll reads all the remaining records from r.
// Each record is a slice of fields.
// A successful call returns err == nil, not err == [io.EOF]. Because ReadAll is
// defined to read until EOF, it does not treat end of file as an error to be
// reported.
func (r *Reader) ReadAll() (records [][]string, err error) {
for {
record, err := r.readRecord(nil)
if err == io.EOF {
return records, nil
}
if err != nil {
return nil, err
}
records = append(records, record)
}
}
// readLine reads the next line (with the trailing endline).
// If EOF is hit without a trailing endline, it will be omitted.
// If some bytes were read, then the error is never [io.EOF].
// The result is only valid until the next call to readLine.
func (r *Reader) readLine() ([]byte, error) {
line, err := r.r.ReadSlice('\n')
if err == bufio.ErrBufferFull {
r.rawBuffer = append(r.rawBuffer[:0], line...)
for err == bufio.ErrBufferFull {
line, err = r.r.ReadSlice('\n')
r.rawBuffer = append(r.rawBuffer, line...)
}
line = r.rawBuffer
}
readSize := len(line)
if readSize > 0 && err == io.EOF {
err = nil
// For backwards compatibility, drop trailing \r before EOF.
if line[readSize-1] == '\r' {
line = line[:readSize-1]
}
}
r.numLine++
r.offset += int64(readSize)
// Normalize \r\n to \n on all input lines.
if n := len(line); n >= 2 && line[n-2] == '\r' && line[n-1] == '\n' {
line[n-2] = '\n'
line = line[:n-1]
}
return line, err
}
// lengthNL reports the number of bytes for the trailing \n.
func lengthNL(b []byte) int {
if len(b) > 0 && b[len(b)-1] == '\n' {
return 1
}
return 0
}
// nextRune returns the next rune in b or utf8.RuneError.
func nextRune(b []byte) rune {
r, _ := utf8.DecodeRune(b)
return r
}
func (r *Reader) readRecord(dst []string) ([]string, error) {
if r.Comma == r.Comment || !validDelim(r.Comma) || (r.Comment != 0 && !validDelim(r.Comment)) {
return nil, errInvalidDelim
}
// Read line (automatically skipping past empty lines and any comments).
var line []byte
var errRead error
for errRead == nil {
line, errRead = r.readLine()
if r.Comment != 0 && nextRune(line) == r.Comment {
line = nil
continue // Skip comment lines
}
if errRead == nil && len(line) == lengthNL(line) {
line = nil
continue // Skip empty lines
}
break
}
if errRead == io.EOF {
return nil, errRead
}
// Parse each field in the record.
var err error
const quoteLen = len(`"`)
commaLen := utf8.RuneLen(r.Comma)
recLine := r.numLine // Starting line for record
r.recordBuffer = r.recordBuffer[:0]
r.fieldIndexes = r.fieldIndexes[:0]
r.fieldPositions = r.fieldPositions[:0]
pos := position{line: r.numLine, col: 1}
parseField:
for {
if r.TrimLeadingSpace {
i := bytes.IndexFunc(line, func(r rune) bool {
return !unicode.IsSpace(r)
})
if i < 0 {
i = len(line)
pos.col -= lengthNL(line)
}
line = line[i:]
pos.col += i
}
if len(line) == 0 || line[0] != '"' {
// Non-quoted string field
i := bytes.IndexRune(line, r.Comma)
field := line
if i >= 0 {
field = field[:i]
} else {
field = field[:len(field)-lengthNL(field)]
}
// Check to make sure a quote does not appear in field.
if !r.LazyQuotes {
if j := bytes.IndexByte(field, '"'); j >= 0 {
col := pos.col + j
err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrBareQuote}
break parseField
}
}
r.recordBuffer = append(r.recordBuffer, field...)
r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
r.fieldPositions = append(r.fieldPositions, pos)
if i >= 0 {
line = line[i+commaLen:]
pos.col += i + commaLen
continue parseField
}
break parseField
} else {
// Quoted string field
fieldPos := pos
line = line[quoteLen:]
pos.col += quoteLen
for {
i := bytes.IndexByte(line, '"')
if i >= 0 {
// Hit next quote.
r.recordBuffer = append(r.recordBuffer, line[:i]...)
line = line[i+quoteLen:]
pos.col += i + quoteLen
switch rn := nextRune(line); {
case rn == '"':
// `""` sequence (append quote).
r.recordBuffer = append(r.recordBuffer, '"')
line = line[quoteLen:]
pos.col += quoteLen
case rn == r.Comma:
// `",` sequence (end of field).
line = line[commaLen:]
pos.col += commaLen
r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
r.fieldPositions = append(r.fieldPositions, fieldPos)
continue parseField
case lengthNL(line) == len(line):
// `"\n` sequence (end of line).
r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
r.fieldPositions = append(r.fieldPositions, fieldPos)
break parseField
case r.LazyQuotes:
// `"` sequence (bare quote).
r.recordBuffer = append(r.recordBuffer, '"')
default:
// `"*` sequence (invalid non-escaped quote).
err = &ParseError{StartLine: recLine, Line: r.numLine, Column: pos.col - quoteLen, Err: ErrQuote}
break parseField
}
} else if len(line) > 0 {
// Hit end of line (copy all data so far).
r.recordBuffer = append(r.recordBuffer, line...)
if errRead != nil {
break parseField
}
pos.col += len(line)
line, errRead = r.readLine()
if len(line) > 0 {
pos.line++
pos.col = 1
}
if errRead == io.EOF {
errRead = nil
}
} else {
// Abrupt end of file (EOF or error).
if !r.LazyQuotes && errRead == nil {
err = &ParseError{StartLine: recLine, Line: pos.line, Column: pos.col, Err: ErrQuote}
break parseField
}
r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
r.fieldPositions = append(r.fieldPositions, fieldPos)
break parseField
}
}
}
}
if err == nil {
err = errRead
}
// Create a single string and create slices out of it.
// This pins the memory of the fields together, but allocates once.
str := string(r.recordBuffer) // Convert to string once to batch allocations
dst = dst[:0]
if cap(dst) < len(r.fieldIndexes) {
dst = make([]string, len(r.fieldIndexes))
}
dst = dst[:len(r.fieldIndexes)]
var preIdx int
for i, idx := range r.fieldIndexes {
dst[i] = str[preIdx:idx]
preIdx = idx
}
// Check or update the expected fields per record.
if r.FieldsPerRecord > 0 {
if len(dst) != r.FieldsPerRecord && err == nil {
err = &ParseError{
StartLine: recLine,
Line: recLine,
Column: 1,
Err: ErrFieldCount,
}
}
} else if r.FieldsPerRecord == 0 {
r.FieldsPerRecord = len(dst)
}
return dst, err
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package csv
import (
"bufio"
"io"
"strings"
"unicode"
"unicode/utf8"
)
// A Writer writes records using CSV encoding.
//
// As returned by [NewWriter], a Writer writes records terminated by a
// newline and uses ',' as the field delimiter. The exported fields can be
// changed to customize the details before
// the first call to [Writer.Write] or [Writer.WriteAll].
//
// [Writer.Comma] is the field delimiter.
//
// If [Writer.UseCRLF] is true,
// the Writer ends each output line with \r\n instead of \n.
//
// The writes of individual records are buffered.
// After all data has been written, the client should call the
// [Writer.Flush] method to guarantee all data has been forwarded to
// the underlying [io.Writer]. Any errors that occurred should
// be checked by calling the [Writer.Error] method.
type Writer struct {
Comma rune // Field delimiter (set to ',' by NewWriter)
UseCRLF bool // True to use \r\n as the line terminator
w *bufio.Writer
}
// NewWriter returns a new Writer that writes to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{
Comma: ',',
w: bufio.NewWriter(w),
}
}
// Write writes a single CSV record to w along with any necessary quoting.
// A record is a slice of strings with each string being one field.
// Writes are buffered, so [Writer.Flush] must eventually be called to ensure
// that the record is written to the underlying [io.Writer].
func (w *Writer) Write(record []string) error {
if !validDelim(w.Comma) {
return errInvalidDelim
}
for n, field := range record {
if n > 0 {
if _, err := w.w.WriteRune(w.Comma); err != nil {
return err
}
}
// If we don't have to have a quoted field then just
// write out the field and continue to the next field.
if !w.fieldNeedsQuotes(field) {
if _, err := w.w.WriteString(field); err != nil {
return err
}
continue
}
if err := w.w.WriteByte('"'); err != nil {
return err
}
for len(field) > 0 {
// Search for special characters.
i := strings.IndexAny(field, "\"\r\n")
if i < 0 {
i = len(field)
}
// Copy verbatim everything before the special character.
if _, err := w.w.WriteString(field[:i]); err != nil {
return err
}
field = field[i:]
// Encode the special character.
if len(field) > 0 {
var err error
switch field[0] {
case '"':
_, err = w.w.WriteString(`""`)
case '\r':
if !w.UseCRLF {
err = w.w.WriteByte('\r')
}
case '\n':
if w.UseCRLF {
_, err = w.w.WriteString("\r\n")
} else {
err = w.w.WriteByte('\n')
}
}
field = field[1:]
if err != nil {
return err
}
}
}
if err := w.w.WriteByte('"'); err != nil {
return err
}
}
var err error
if w.UseCRLF {
_, err = w.w.WriteString("\r\n")
} else {
err = w.w.WriteByte('\n')
}
return err
}
// Flush writes any buffered data to the underlying [io.Writer].
// To check if an error occurred during Flush, call [Writer.Error].
func (w *Writer) Flush() {
w.w.Flush()
}
// Error reports any error that has occurred during
// a previous [Writer.Write] or [Writer.Flush].
func (w *Writer) Error() error {
_, err := w.w.Write(nil)
return err
}
// WriteAll writes multiple CSV records to w using [Writer.Write] and
// then calls [Writer.Flush], returning any error from the Flush.
func (w *Writer) WriteAll(records [][]string) error {
for _, record := range records {
err := w.Write(record)
if err != nil {
return err
}
}
return w.w.Flush()
}
// fieldNeedsQuotes reports whether our field must be enclosed in quotes.
// Fields with a Comma, fields with a quote or newline, and
// fields which start with a space must be enclosed in quotes.
// We used to quote empty strings, but we do not anymore (as of Go 1.4).
// The two representations should be equivalent, but Postgres distinguishes
// quoted vs non-quoted empty string during database imports, and it has
// an option to force the quoted behavior for non-quoted CSV but it has
// no option to force the non-quoted behavior for quoted CSV, making
// CSV with quoted empty strings strictly less useful.
// Not quoting the empty string also makes this package match the behavior
// of Microsoft Excel and Google Drive.
// For Postgres, quote the data terminating string `\.`.
func (w *Writer) fieldNeedsQuotes(field string) bool {
if field == "" {
return false
}
if field == `\.` {
return true
}
if w.Comma < utf8.RuneSelf {
for i := 0; i < len(field); i++ {
c := field[i]
if c == '\n' || c == '\r' || c == '"' || c == byte(w.Comma) {
return true
}
}
} else {
if strings.ContainsRune(field, w.Comma) || strings.ContainsAny(field, "\"\r\n") {
return true
}
}
r1, _ := utf8.DecodeRuneInString(field)
return unicode.IsSpace(r1)
}
// Code generated by go run decgen.go -output dec_helpers.go; DO NOT EDIT.
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gob
import (
"math"
"reflect"
)
var decArrayHelper = map[reflect.Kind]decHelper{
reflect.Bool: decBoolArray,
reflect.Complex64: decComplex64Array,
reflect.Complex128: decComplex128Array,
reflect.Float32: decFloat32Array,
reflect.Float64: decFloat64Array,
reflect.Int: decIntArray,
reflect.Int16: decInt16Array,
reflect.Int32: decInt32Array,
reflect.Int64: decInt64Array,
reflect.Int8: decInt8Array,
reflect.String: decStringArray,
reflect.Uint: decUintArray,
reflect.Uint16: decUint16Array,
reflect.Uint32: decUint32Array,
reflect.Uint64: decUint64Array,
reflect.Uintptr: decUintptrArray,
}
var decSliceHelper = map[reflect.Kind]decHelper{
reflect.Bool: decBoolSlice,
reflect.Complex64: decComplex64Slice,
reflect.Complex128: decComplex128Slice,
reflect.Float32: decFloat32Slice,
reflect.Float64: decFloat64Slice,
reflect.Int: decIntSlice,
reflect.Int16: decInt16Slice,
reflect.Int32: decInt32Slice,
reflect.Int64: decInt64Slice,
reflect.Int8: decInt8Slice,
reflect.String: decStringSlice,
reflect.Uint: decUintSlice,
reflect.Uint16: decUint16Slice,
reflect.Uint32: decUint32Slice,
reflect.Uint64: decUint64Slice,
reflect.Uintptr: decUintptrSlice,
}
func decBoolArray(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decBoolSlice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decBoolSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]bool](v)
if !ok {
// It is kind bool but not type bool. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding bool array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
slice[i] = state.decodeUint() != 0
}
return true
}
func decComplex64Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decComplex64Slice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decComplex64Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]complex64](v)
if !ok {
// It is kind complex64 but not type complex64. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding complex64 array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
real := float32FromBits(state.decodeUint(), ovfl)
imag := float32FromBits(state.decodeUint(), ovfl)
slice[i] = complex(float32(real), float32(imag))
}
return true
}
func decComplex128Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decComplex128Slice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decComplex128Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]complex128](v)
if !ok {
// It is kind complex128 but not type complex128. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding complex128 array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
real := float64FromBits(state.decodeUint())
imag := float64FromBits(state.decodeUint())
slice[i] = complex(real, imag)
}
return true
}
func decFloat32Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decFloat32Slice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decFloat32Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]float32](v)
if !ok {
// It is kind float32 but not type float32. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding float32 array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
slice[i] = float32(float32FromBits(state.decodeUint(), ovfl))
}
return true
}
func decFloat64Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decFloat64Slice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decFloat64Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]float64](v)
if !ok {
// It is kind float64 but not type float64. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding float64 array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
slice[i] = float64FromBits(state.decodeUint())
}
return true
}
func decIntArray(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decIntSlice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decIntSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]int](v)
if !ok {
// It is kind int but not type int. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding int array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
x := state.decodeInt()
// MinInt and MaxInt
if x < ^int64(^uint(0)>>1) || int64(^uint(0)>>1) < x {
error_(ovfl)
}
slice[i] = int(x)
}
return true
}
func decInt16Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decInt16Slice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decInt16Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]int16](v)
if !ok {
// It is kind int16 but not type int16. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding int16 array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
x := state.decodeInt()
if x < math.MinInt16 || math.MaxInt16 < x {
error_(ovfl)
}
slice[i] = int16(x)
}
return true
}
func decInt32Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decInt32Slice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decInt32Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]int32](v)
if !ok {
// It is kind int32 but not type int32. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding int32 array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
x := state.decodeInt()
if x < math.MinInt32 || math.MaxInt32 < x {
error_(ovfl)
}
slice[i] = int32(x)
}
return true
}
func decInt64Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decInt64Slice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decInt64Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]int64](v)
if !ok {
// It is kind int64 but not type int64. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding int64 array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
slice[i] = state.decodeInt()
}
return true
}
func decInt8Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decInt8Slice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decInt8Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]int8](v)
if !ok {
// It is kind int8 but not type int8. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding int8 array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
x := state.decodeInt()
if x < math.MinInt8 || math.MaxInt8 < x {
error_(ovfl)
}
slice[i] = int8(x)
}
return true
}
func decStringArray(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decStringSlice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decStringSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]string](v)
if !ok {
// It is kind string but not type string. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding string array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
u := state.decodeUint()
n := int(u)
if n < 0 || uint64(n) != u || n > state.b.Len() {
errorf("length of string exceeds input size (%d bytes)", u)
}
if n > state.b.Len() {
errorf("string data too long for buffer: %d", n)
}
// Read the data.
data := state.b.Bytes()
if len(data) < n {
errorf("invalid string length %d: exceeds input size %d", n, len(data))
}
slice[i] = string(data[:n])
state.b.Drop(n)
}
return true
}
func decUintArray(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decUintSlice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decUintSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]uint](v)
if !ok {
// It is kind uint but not type uint. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding uint array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
x := state.decodeUint()
/*TODO if math.MaxUint32 < x {
error_(ovfl)
}*/
slice[i] = uint(x)
}
return true
}
func decUint16Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decUint16Slice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decUint16Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]uint16](v)
if !ok {
// It is kind uint16 but not type uint16. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding uint16 array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
x := state.decodeUint()
if math.MaxUint16 < x {
error_(ovfl)
}
slice[i] = uint16(x)
}
return true
}
func decUint32Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decUint32Slice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decUint32Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]uint32](v)
if !ok {
// It is kind uint32 but not type uint32. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding uint32 array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
x := state.decodeUint()
if math.MaxUint32 < x {
error_(ovfl)
}
slice[i] = uint32(x)
}
return true
}
func decUint64Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decUint64Slice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decUint64Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]uint64](v)
if !ok {
// It is kind uint64 but not type uint64. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding uint64 array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
slice[i] = state.decodeUint()
}
return true
}
func decUintptrArray(state *decoderState, v reflect.Value, length int, ovfl error) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return decUintptrSlice(state, v.Slice(0, v.Len()), length, ovfl)
}
func decUintptrSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
slice, ok := reflect.TypeAssert[[]uintptr](v)
if !ok {
// It is kind uintptr but not type uintptr. TODO: We can handle this unsafely.
return false
}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding uintptr array or slice: length exceeds input size (%d elements)", length)
}
if i >= len(slice) {
// This is a slice that we only partially allocated.
growSlice(v, &slice, length)
}
x := state.decodeUint()
if uint64(^uintptr(0)) < x {
error_(ovfl)
}
slice[i] = uintptr(x)
}
return true
}
// growSlice is called for a slice that we only partially allocated,
// to grow it up to length.
func growSlice[E any](v reflect.Value, ps *[]E, length int) {
var zero E
s := *ps
s = append(s, zero)
cp := cap(s)
if cp > length {
cp = length
}
s = s[:cp]
v.Set(reflect.ValueOf(s))
*ps = s
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run decgen.go -output dec_helpers.go
package gob
import (
"encoding"
"errors"
"internal/saferio"
"io"
"math"
"math/bits"
"reflect"
)
var (
errBadUint = errors.New("gob: encoded unsigned integer out of range")
errBadType = errors.New("gob: unknown type id or corrupted data")
errRange = errors.New("gob: bad data: field numbers out of bounds")
)
type decHelper func(state *decoderState, v reflect.Value, length int, ovfl error) bool
// decoderState is the execution state of an instance of the decoder. A new state
// is created for nested objects.
type decoderState struct {
dec *Decoder
// The buffer is stored with an extra indirection because it may be replaced
// if we load a type during decode (when reading an interface value).
b *decBuffer
fieldnum int // the last field number read.
next *decoderState // for free list
}
// decBuffer is an extremely simple, fast implementation of a read-only byte buffer.
// It is initialized by calling Size and then copying the data into the slice returned by Bytes().
type decBuffer struct {
data []byte
offset int // Read offset.
}
func (d *decBuffer) Read(p []byte) (int, error) {
n := copy(p, d.data[d.offset:])
if n == 0 && len(p) != 0 {
return 0, io.EOF
}
d.offset += n
return n, nil
}
func (d *decBuffer) Drop(n int) {
if n > d.Len() {
panic("drop")
}
d.offset += n
}
func (d *decBuffer) ReadByte() (byte, error) {
if d.offset >= len(d.data) {
return 0, io.EOF
}
c := d.data[d.offset]
d.offset++
return c, nil
}
func (d *decBuffer) Len() int {
return len(d.data) - d.offset
}
func (d *decBuffer) Bytes() []byte {
return d.data[d.offset:]
}
// SetBytes sets the buffer to the bytes, discarding any existing data.
func (d *decBuffer) SetBytes(data []byte) {
d.data = data
d.offset = 0
}
func (d *decBuffer) Reset() {
d.data = d.data[0:0]
d.offset = 0
}
// We pass the bytes.Buffer separately for easier testing of the infrastructure
// without requiring a full Decoder.
func (dec *Decoder) newDecoderState(buf *decBuffer) *decoderState {
d := dec.freeList
if d == nil {
d = new(decoderState)
d.dec = dec
} else {
dec.freeList = d.next
}
d.b = buf
return d
}
func (dec *Decoder) freeDecoderState(d *decoderState) {
d.next = dec.freeList
dec.freeList = d
}
func overflow(name string) error {
return errors.New(`value for "` + name + `" out of range`)
}
// decodeUintReader reads an encoded unsigned integer from an io.Reader.
// Used only by the Decoder to read the message length.
func decodeUintReader(r io.Reader, buf []byte) (x uint64, width int, err error) {
width = 1
n, err := io.ReadFull(r, buf[0:width])
if n == 0 {
return
}
b := buf[0]
if b <= 0x7f {
return uint64(b), width, nil
}
n = -int(int8(b))
if n > uint64Size {
err = errBadUint
return
}
width, err = io.ReadFull(r, buf[0:n])
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// Could check that the high byte is zero but it's not worth it.
for _, b := range buf[0:width] {
x = x<<8 | uint64(b)
}
width++ // +1 for length byte
return
}
// decodeUint reads an encoded unsigned integer from state.r.
// Does not check for overflow.
func (state *decoderState) decodeUint() (x uint64) {
b, err := state.b.ReadByte()
if err != nil {
error_(err)
}
if b <= 0x7f {
return uint64(b)
}
n := -int(int8(b))
if n > uint64Size {
error_(errBadUint)
}
buf := state.b.Bytes()
if len(buf) < n {
errorf("invalid uint data length %d: exceeds input size %d", n, len(buf))
}
// Don't need to check error; it's safe to loop regardless.
// Could check that the high byte is zero but it's not worth it.
for _, b := range buf[0:n] {
x = x<<8 | uint64(b)
}
state.b.Drop(n)
return x
}
// decodeInt reads an encoded signed integer from state.r.
// Does not check for overflow.
func (state *decoderState) decodeInt() int64 {
x := state.decodeUint()
if x&1 != 0 {
return ^int64(x >> 1)
}
return int64(x >> 1)
}
// getLength decodes the next uint and makes sure it is a possible
// size for a data item that follows, which means it must fit in a
// non-negative int and fit in the buffer.
func (state *decoderState) getLength() (int, bool) {
n := int(state.decodeUint())
if n < 0 || state.b.Len() < n || tooBig <= n {
return 0, false
}
return n, true
}
// decOp is the signature of a decoding operator for a given type.
type decOp func(i *decInstr, state *decoderState, v reflect.Value)
// The 'instructions' of the decoding machine
type decInstr struct {
op decOp
field int // field number of the wire type
index []int // field access indices for destination type
ovfl error // error message for overflow/underflow (for arrays, of the elements)
}
// ignoreUint discards a uint value with no destination.
func ignoreUint(i *decInstr, state *decoderState, v reflect.Value) {
state.decodeUint()
}
// ignoreTwoUints discards a uint value with no destination. It's used to skip
// complex values.
func ignoreTwoUints(i *decInstr, state *decoderState, v reflect.Value) {
state.decodeUint()
state.decodeUint()
}
// Since the encoder writes no zeros, if we arrive at a decoder we have
// a value to extract and store. The field number has already been read
// (it's how we knew to call this decoder).
// Each decoder is responsible for handling any indirections associated
// with the data structure. If any pointer so reached is nil, allocation must
// be done.
// decAlloc takes a value and returns a settable value that can
// be assigned to. If the value is a pointer, decAlloc guarantees it points to storage.
// The callers to the individual decoders are expected to have used decAlloc.
// The individual decoders don't need it.
func decAlloc(v reflect.Value) reflect.Value {
for v.Kind() == reflect.Pointer {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
return v
}
// decBool decodes a uint and stores it as a boolean in value.
func decBool(i *decInstr, state *decoderState, value reflect.Value) {
value.SetBool(state.decodeUint() != 0)
}
// decInt8 decodes an integer and stores it as an int8 in value.
func decInt8(i *decInstr, state *decoderState, value reflect.Value) {
v := state.decodeInt()
if v < math.MinInt8 || math.MaxInt8 < v {
error_(i.ovfl)
}
value.SetInt(v)
}
// decUint8 decodes an unsigned integer and stores it as a uint8 in value.
func decUint8(i *decInstr, state *decoderState, value reflect.Value) {
v := state.decodeUint()
if math.MaxUint8 < v {
error_(i.ovfl)
}
value.SetUint(v)
}
// decInt16 decodes an integer and stores it as an int16 in value.
func decInt16(i *decInstr, state *decoderState, value reflect.Value) {
v := state.decodeInt()
if v < math.MinInt16 || math.MaxInt16 < v {
error_(i.ovfl)
}
value.SetInt(v)
}
// decUint16 decodes an unsigned integer and stores it as a uint16 in value.
func decUint16(i *decInstr, state *decoderState, value reflect.Value) {
v := state.decodeUint()
if math.MaxUint16 < v {
error_(i.ovfl)
}
value.SetUint(v)
}
// decInt32 decodes an integer and stores it as an int32 in value.
func decInt32(i *decInstr, state *decoderState, value reflect.Value) {
v := state.decodeInt()
if v < math.MinInt32 || math.MaxInt32 < v {
error_(i.ovfl)
}
value.SetInt(v)
}
// decUint32 decodes an unsigned integer and stores it as a uint32 in value.
func decUint32(i *decInstr, state *decoderState, value reflect.Value) {
v := state.decodeUint()
if math.MaxUint32 < v {
error_(i.ovfl)
}
value.SetUint(v)
}
// decInt64 decodes an integer and stores it as an int64 in value.
func decInt64(i *decInstr, state *decoderState, value reflect.Value) {
v := state.decodeInt()
value.SetInt(v)
}
// decUint64 decodes an unsigned integer and stores it as a uint64 in value.
func decUint64(i *decInstr, state *decoderState, value reflect.Value) {
v := state.decodeUint()
value.SetUint(v)
}
// Floating-point numbers are transmitted as uint64s holding the bits
// of the underlying representation. They are sent byte-reversed, with
// the exponent end coming out first, so integer floating point numbers
// (for example) transmit more compactly. This routine does the
// unswizzling.
func float64FromBits(u uint64) float64 {
v := bits.ReverseBytes64(u)
return math.Float64frombits(v)
}
// float32FromBits decodes an unsigned integer, treats it as a 32-bit floating-point
// number, and returns it. It's a helper function for float32 and complex64.
// It returns a float64 because that's what reflection needs, but its return
// value is known to be accurately representable in a float32.
func float32FromBits(u uint64, ovfl error) float64 {
v := float64FromBits(u)
av := v
if av < 0 {
av = -av
}
// +Inf is OK in both 32- and 64-bit floats. Underflow is always OK.
if math.MaxFloat32 < av && av <= math.MaxFloat64 {
error_(ovfl)
}
return v
}
// decFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point
// number, and stores it in value.
func decFloat32(i *decInstr, state *decoderState, value reflect.Value) {
value.SetFloat(float32FromBits(state.decodeUint(), i.ovfl))
}
// decFloat64 decodes an unsigned integer, treats it as a 64-bit floating-point
// number, and stores it in value.
func decFloat64(i *decInstr, state *decoderState, value reflect.Value) {
value.SetFloat(float64FromBits(state.decodeUint()))
}
// decComplex64 decodes a pair of unsigned integers, treats them as a
// pair of floating point numbers, and stores them as a complex64 in value.
// The real part comes first.
func decComplex64(i *decInstr, state *decoderState, value reflect.Value) {
real := float32FromBits(state.decodeUint(), i.ovfl)
imag := float32FromBits(state.decodeUint(), i.ovfl)
value.SetComplex(complex(real, imag))
}
// decComplex128 decodes a pair of unsigned integers, treats them as a
// pair of floating point numbers, and stores them as a complex128 in value.
// The real part comes first.
func decComplex128(i *decInstr, state *decoderState, value reflect.Value) {
real := float64FromBits(state.decodeUint())
imag := float64FromBits(state.decodeUint())
value.SetComplex(complex(real, imag))
}
// decUint8Slice decodes a byte slice and stores in value a slice header
// describing the data.
// uint8 slices are encoded as an unsigned count followed by the raw bytes.
func decUint8Slice(i *decInstr, state *decoderState, value reflect.Value) {
n, ok := state.getLength()
if !ok {
errorf("bad %s slice length: %d", value.Type(), n)
}
if value.Cap() < n {
safe := saferio.SliceCap[byte](uint64(n))
if safe < 0 {
errorf("%s slice too big: %d elements", value.Type(), n)
}
value.Set(reflect.MakeSlice(value.Type(), safe, safe))
ln := safe
i := 0
for i < n {
if i >= ln {
// We didn't allocate the entire slice,
// due to using saferio.SliceCap.
// Grow the slice for one more element.
// The slice is full, so this should
// bump up the capacity.
value.Grow(1)
}
// Copy into s up to the capacity or n,
// whichever is less.
ln = value.Cap()
if ln > n {
ln = n
}
value.SetLen(ln)
sub := value.Slice(i, ln)
if _, err := state.b.Read(sub.Bytes()); err != nil {
errorf("error decoding []byte at %d: %s", i, err)
}
i = ln
}
} else {
value.SetLen(n)
if _, err := state.b.Read(value.Bytes()); err != nil {
errorf("error decoding []byte: %s", err)
}
}
}
// decString decodes byte array and stores in value a string header
// describing the data.
// Strings are encoded as an unsigned count followed by the raw bytes.
func decString(i *decInstr, state *decoderState, value reflect.Value) {
n, ok := state.getLength()
if !ok {
errorf("bad %s slice length: %d", value.Type(), n)
}
// Read the data.
data := state.b.Bytes()
if len(data) < n {
errorf("invalid string length %d: exceeds input size %d", n, len(data))
}
s := string(data[:n])
state.b.Drop(n)
value.SetString(s)
}
// ignoreUint8Array skips over the data for a byte slice value with no destination.
func ignoreUint8Array(i *decInstr, state *decoderState, value reflect.Value) {
n, ok := state.getLength()
if !ok {
errorf("slice length too large")
}
bn := state.b.Len()
if bn < n {
errorf("invalid slice length %d: exceeds input size %d", n, bn)
}
state.b.Drop(n)
}
// Execution engine
// The encoder engine is an array of instructions indexed by field number of the incoming
// decoder. It is executed with random access according to field number.
type decEngine struct {
instr []decInstr
numInstr int // the number of active instructions
}
// decodeSingle decodes a top-level value that is not a struct and stores it in value.
// Such values are preceded by a zero, making them have the memory layout of a
// struct field (although with an illegal field number).
func (dec *Decoder) decodeSingle(engine *decEngine, value reflect.Value) {
state := dec.newDecoderState(&dec.buf)
defer dec.freeDecoderState(state)
state.fieldnum = singletonField
if state.decodeUint() != 0 {
errorf("decode: corrupted data: non-zero delta for singleton")
}
instr := &engine.instr[singletonField]
instr.op(instr, state, value)
}
// decodeStruct decodes a top-level struct and stores it in value.
// Indir is for the value, not the type. At the time of the call it may
// differ from ut.indir, which was computed when the engine was built.
// This state cannot arise for decodeSingle, which is called directly
// from the user's value, not from the innards of an engine.
func (dec *Decoder) decodeStruct(engine *decEngine, value reflect.Value) {
state := dec.newDecoderState(&dec.buf)
defer dec.freeDecoderState(state)
state.fieldnum = -1
for state.b.Len() > 0 {
delta := int(state.decodeUint())
if delta < 0 {
errorf("decode: corrupted data: negative delta")
}
if delta == 0 { // struct terminator is zero delta fieldnum
break
}
if state.fieldnum >= len(engine.instr)-delta { // subtract to compare without overflow
error_(errRange)
}
fieldnum := state.fieldnum + delta
instr := &engine.instr[fieldnum]
var field reflect.Value
if instr.index != nil {
// Otherwise the field is unknown to us and instr.op is an ignore op.
field = value.FieldByIndex(instr.index)
if field.Kind() == reflect.Pointer {
field = decAlloc(field)
}
}
instr.op(instr, state, field)
state.fieldnum = fieldnum
}
}
var noValue reflect.Value
// ignoreStruct discards the data for a struct with no destination.
func (dec *Decoder) ignoreStruct(engine *decEngine) {
state := dec.newDecoderState(&dec.buf)
defer dec.freeDecoderState(state)
state.fieldnum = -1
for state.b.Len() > 0 {
delta := int(state.decodeUint())
if delta < 0 {
errorf("ignore decode: corrupted data: negative delta")
}
if delta == 0 { // struct terminator is zero delta fieldnum
break
}
fieldnum := state.fieldnum + delta
if fieldnum >= len(engine.instr) {
error_(errRange)
}
instr := &engine.instr[fieldnum]
instr.op(instr, state, noValue)
state.fieldnum = fieldnum
}
}
// ignoreSingle discards the data for a top-level non-struct value with no
// destination. It's used when calling Decode with a nil value.
func (dec *Decoder) ignoreSingle(engine *decEngine) {
state := dec.newDecoderState(&dec.buf)
defer dec.freeDecoderState(state)
state.fieldnum = singletonField
delta := int(state.decodeUint())
if delta != 0 {
errorf("decode: corrupted data: non-zero delta for singleton")
}
instr := &engine.instr[singletonField]
instr.op(instr, state, noValue)
}
// decodeArrayHelper does the work for decoding arrays and slices.
func (dec *Decoder) decodeArrayHelper(state *decoderState, value reflect.Value, elemOp decOp, length int, ovfl error, helper decHelper) {
if helper != nil && helper(state, value, length, ovfl) {
return
}
instr := &decInstr{elemOp, 0, nil, ovfl}
isPtr := value.Type().Elem().Kind() == reflect.Pointer
ln := value.Len()
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding array or slice: length exceeds input size (%d elements)", length)
}
if i >= ln {
// This is a slice that we only partially allocated.
// Grow it up to length.
value.Grow(1)
cp := value.Cap()
if cp > length {
cp = length
}
value.SetLen(cp)
ln = cp
}
v := value.Index(i)
if isPtr {
v = decAlloc(v)
}
elemOp(instr, state, v)
}
}
// decodeArray decodes an array and stores it in value.
// The length is an unsigned integer preceding the elements. Even though the length is redundant
// (it's part of the type), it's a useful check and is included in the encoding.
func (dec *Decoder) decodeArray(state *decoderState, value reflect.Value, elemOp decOp, length int, ovfl error, helper decHelper) {
if n := state.decodeUint(); n != uint64(length) {
errorf("length mismatch in decodeArray")
}
dec.decodeArrayHelper(state, value, elemOp, length, ovfl, helper)
}
// decodeIntoValue is a helper for map decoding.
func decodeIntoValue(state *decoderState, op decOp, isPtr bool, value reflect.Value, instr *decInstr) reflect.Value {
v := value
if isPtr {
v = decAlloc(value)
}
op(instr, state, v)
return value
}
// decodeMap decodes a map and stores it in value.
// Maps are encoded as a length followed by key:value pairs.
// Because the internals of maps are not visible to us, we must
// use reflection rather than pointer magic.
func (dec *Decoder) decodeMap(mtyp reflect.Type, state *decoderState, value reflect.Value, keyOp, elemOp decOp, ovfl error) {
n := int(state.decodeUint())
if value.IsNil() {
value.Set(reflect.MakeMapWithSize(mtyp, n))
}
keyIsPtr := mtyp.Key().Kind() == reflect.Pointer
elemIsPtr := mtyp.Elem().Kind() == reflect.Pointer
keyInstr := &decInstr{keyOp, 0, nil, ovfl}
elemInstr := &decInstr{elemOp, 0, nil, ovfl}
keyP := reflect.New(mtyp.Key())
elemP := reflect.New(mtyp.Elem())
for i := 0; i < n; i++ {
key := decodeIntoValue(state, keyOp, keyIsPtr, keyP.Elem(), keyInstr)
elem := decodeIntoValue(state, elemOp, elemIsPtr, elemP.Elem(), elemInstr)
value.SetMapIndex(key, elem)
keyP.Elem().SetZero()
elemP.Elem().SetZero()
}
}
// ignoreArrayHelper does the work for discarding arrays and slices.
func (dec *Decoder) ignoreArrayHelper(state *decoderState, elemOp decOp, length int) {
instr := &decInstr{elemOp, 0, nil, errors.New("no error")}
for i := 0; i < length; i++ {
if state.b.Len() == 0 {
errorf("decoding array or slice: length exceeds input size (%d elements)", length)
}
elemOp(instr, state, noValue)
}
}
// ignoreArray discards the data for an array value with no destination.
func (dec *Decoder) ignoreArray(state *decoderState, elemOp decOp, length int) {
if n := state.decodeUint(); n != uint64(length) {
errorf("length mismatch in ignoreArray")
}
dec.ignoreArrayHelper(state, elemOp, length)
}
// ignoreMap discards the data for a map value with no destination.
func (dec *Decoder) ignoreMap(state *decoderState, keyOp, elemOp decOp) {
n := int(state.decodeUint())
keyInstr := &decInstr{keyOp, 0, nil, errors.New("no error")}
elemInstr := &decInstr{elemOp, 0, nil, errors.New("no error")}
for i := 0; i < n; i++ {
keyOp(keyInstr, state, noValue)
elemOp(elemInstr, state, noValue)
}
}
// decodeSlice decodes a slice and stores it in value.
// Slices are encoded as an unsigned length followed by the elements.
func (dec *Decoder) decodeSlice(state *decoderState, value reflect.Value, elemOp decOp, ovfl error, helper decHelper) {
u := state.decodeUint()
typ := value.Type()
size := uint64(typ.Elem().Size())
nBytes := u * size
n := int(u)
// Take care with overflow in this calculation.
if n < 0 || uint64(n) != u || nBytes > tooBig || (size > 0 && nBytes/size != u) {
// We don't check n against buffer length here because if it's a slice
// of interfaces, there will be buffer reloads.
errorf("%s slice too big: %d elements of %d bytes", typ.Elem(), u, size)
}
if value.Cap() < n {
safe := saferio.SliceCapWithSize(size, uint64(n))
if safe < 0 {
errorf("%s slice too big: %d elements of %d bytes", typ.Elem(), u, size)
}
value.Set(reflect.MakeSlice(typ, safe, safe))
} else {
value.SetLen(n)
}
dec.decodeArrayHelper(state, value, elemOp, n, ovfl, helper)
}
// ignoreSlice skips over the data for a slice value with no destination.
func (dec *Decoder) ignoreSlice(state *decoderState, elemOp decOp) {
dec.ignoreArrayHelper(state, elemOp, int(state.decodeUint()))
}
// decodeInterface decodes an interface value and stores it in value.
// Interfaces are encoded as the name of a concrete type followed by a value.
// If the name is empty, the value is nil and no value is sent.
func (dec *Decoder) decodeInterface(ityp reflect.Type, state *decoderState, value reflect.Value) {
// Read the name of the concrete type.
nr := state.decodeUint()
if nr > 1<<31 { // zero is permissible for anonymous types
errorf("invalid type name length %d", nr)
}
if nr > uint64(state.b.Len()) {
errorf("invalid type name length %d: exceeds input size", nr)
}
n := int(nr)
name := state.b.Bytes()[:n]
state.b.Drop(n)
// Allocate the destination interface value.
if len(name) == 0 {
// Copy the nil interface value to the target.
value.SetZero()
return
}
if len(name) > 1024 {
errorf("name too long (%d bytes): %.20q...", len(name), name)
}
// The concrete type must be registered.
typi, ok := nameToConcreteType.Load(string(name))
if !ok {
errorf("name not registered for interface: %q", name)
}
typ := typi.(reflect.Type)
// Read the type id of the concrete value.
concreteId := dec.decodeTypeSequence(true)
if concreteId < 0 {
error_(dec.err)
}
// Byte count of value is next; we don't care what it is (it's there
// in case we want to ignore the value by skipping it completely).
state.decodeUint()
// Read the concrete value.
v := allocValue(typ)
dec.decodeValue(concreteId, v)
if dec.err != nil {
error_(dec.err)
}
// Assign the concrete value to the interface.
// Tread carefully; it might not satisfy the interface.
if !typ.AssignableTo(ityp) {
errorf("%s is not assignable to type %s", typ, ityp)
}
// Copy the interface value to the target.
value.Set(v)
}
// ignoreInterface discards the data for an interface value with no destination.
func (dec *Decoder) ignoreInterface(state *decoderState) {
// Read the name of the concrete type.
n, ok := state.getLength()
if !ok {
errorf("bad interface encoding: name too large for buffer")
}
bn := state.b.Len()
if bn < n {
errorf("invalid interface value length %d: exceeds input size %d", n, bn)
}
state.b.Drop(n)
id := dec.decodeTypeSequence(true)
if id < 0 {
error_(dec.err)
}
// At this point, the decoder buffer contains a delimited value. Just toss it.
n, ok = state.getLength()
if !ok {
errorf("bad interface encoding: data length too large for buffer")
}
state.b.Drop(n)
}
// decodeGobDecoder decodes something implementing the GobDecoder interface.
// The data is encoded as a byte slice.
func (dec *Decoder) decodeGobDecoder(ut *userTypeInfo, state *decoderState, value reflect.Value) {
// Read the bytes for the value.
n, ok := state.getLength()
if !ok {
errorf("GobDecoder: length too large for buffer")
}
b := state.b.Bytes()
if len(b) < n {
errorf("GobDecoder: invalid data length %d: exceeds input size %d", n, len(b))
}
b = b[:n]
state.b.Drop(n)
var err error
// We know it's one of these.
switch ut.externalDec {
case xGob:
gobDecoder, _ := reflect.TypeAssert[GobDecoder](value)
err = gobDecoder.GobDecode(b)
case xBinary:
binaryUnmarshaler, _ := reflect.TypeAssert[encoding.BinaryUnmarshaler](value)
err = binaryUnmarshaler.UnmarshalBinary(b)
case xText:
textUnmarshaler, _ := reflect.TypeAssert[encoding.TextUnmarshaler](value)
err = textUnmarshaler.UnmarshalText(b)
}
if err != nil {
error_(err)
}
}
// ignoreGobDecoder discards the data for a GobDecoder value with no destination.
func (dec *Decoder) ignoreGobDecoder(state *decoderState) {
// Read the bytes for the value.
n, ok := state.getLength()
if !ok {
errorf("GobDecoder: length too large for buffer")
}
bn := state.b.Len()
if bn < n {
errorf("GobDecoder: invalid data length %d: exceeds input size %d", n, bn)
}
state.b.Drop(n)
}
// Index by Go types.
var decOpTable = [...]decOp{
reflect.Bool: decBool,
reflect.Int8: decInt8,
reflect.Int16: decInt16,
reflect.Int32: decInt32,
reflect.Int64: decInt64,
reflect.Uint8: decUint8,
reflect.Uint16: decUint16,
reflect.Uint32: decUint32,
reflect.Uint64: decUint64,
reflect.Float32: decFloat32,
reflect.Float64: decFloat64,
reflect.Complex64: decComplex64,
reflect.Complex128: decComplex128,
reflect.String: decString,
}
// Indexed by gob types. tComplex will be added during type.init().
var decIgnoreOpMap = map[typeId]decOp{
tBool: ignoreUint,
tInt: ignoreUint,
tUint: ignoreUint,
tFloat: ignoreUint,
tBytes: ignoreUint8Array,
tString: ignoreUint8Array,
tComplex: ignoreTwoUints,
}
// decOpFor returns the decoding op for the base type under rt and
// the indirection count to reach it.
func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProgress map[reflect.Type]*decOp) *decOp {
ut := userType(rt)
// If the type implements GobEncoder, we handle it without further processing.
if ut.externalDec != 0 {
return dec.gobDecodeOpFor(ut)
}
// If this type is already in progress, it's a recursive type (e.g. map[string]*T).
// Return the pointer to the op we're already building.
if opPtr := inProgress[rt]; opPtr != nil {
return opPtr
}
typ := ut.base
var op decOp
k := typ.Kind()
if int(k) < len(decOpTable) {
op = decOpTable[k]
}
if op == nil {
inProgress[rt] = &op
// Special cases
switch t := typ; t.Kind() {
case reflect.Array:
name = "element of " + name
elemId := dec.wireType[wireId].ArrayT.Elem
elemOp := dec.decOpFor(elemId, t.Elem(), name, inProgress)
ovfl := overflow(name)
helper := decArrayHelper[t.Elem().Kind()]
op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.decodeArray(state, value, *elemOp, t.Len(), ovfl, helper)
}
case reflect.Map:
keyId := dec.wireType[wireId].MapT.Key
elemId := dec.wireType[wireId].MapT.Elem
keyOp := dec.decOpFor(keyId, t.Key(), "key of "+name, inProgress)
elemOp := dec.decOpFor(elemId, t.Elem(), "element of "+name, inProgress)
ovfl := overflow(name)
op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.decodeMap(t, state, value, *keyOp, *elemOp, ovfl)
}
case reflect.Slice:
name = "element of " + name
if t.Elem().Kind() == reflect.Uint8 {
op = decUint8Slice
break
}
var elemId typeId
if tt := builtinIdToType(wireId); tt != nil {
elemId = tt.(*sliceType).Elem
} else {
elemId = dec.wireType[wireId].SliceT.Elem
}
elemOp := dec.decOpFor(elemId, t.Elem(), name, inProgress)
ovfl := overflow(name)
helper := decSliceHelper[t.Elem().Kind()]
op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.decodeSlice(state, value, *elemOp, ovfl, helper)
}
case reflect.Struct:
// Generate a closure that calls out to the engine for the nested type.
ut := userType(typ)
enginePtr, err := dec.getDecEnginePtr(wireId, ut)
if err != nil {
error_(err)
}
op = func(i *decInstr, state *decoderState, value reflect.Value) {
// indirect through enginePtr to delay evaluation for recursive structs.
dec.decodeStruct(*enginePtr, value)
}
case reflect.Interface:
op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.decodeInterface(t, state, value)
}
}
}
if op == nil {
errorf("decode can't handle type %s", rt)
}
return &op
}
var maxIgnoreNestingDepth = 10000
// decIgnoreOpFor returns the decoding op for a field that has no destination.
func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) *decOp {
// Track how deep we've recursed trying to skip nested ignored fields.
dec.ignoreDepth++
defer func() { dec.ignoreDepth-- }()
if dec.ignoreDepth > maxIgnoreNestingDepth {
error_(errors.New("invalid nesting depth"))
}
// If this type is already in progress, it's a recursive type (e.g. map[string]*T).
// Return the pointer to the op we're already building.
if opPtr := inProgress[wireId]; opPtr != nil {
return opPtr
}
op, ok := decIgnoreOpMap[wireId]
if !ok {
inProgress[wireId] = &op
if wireId == tInterface {
// Special case because it's a method: the ignored item might
// define types and we need to record their state in the decoder.
op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.ignoreInterface(state)
}
return &op
}
// Special cases
wire := dec.wireType[wireId]
switch {
case wire == nil:
errorf("bad data: undefined type %s", wireId.string())
case wire.ArrayT != nil:
elemId := wire.ArrayT.Elem
elemOp := dec.decIgnoreOpFor(elemId, inProgress)
op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.ignoreArray(state, *elemOp, wire.ArrayT.Len)
}
case wire.MapT != nil:
keyId := dec.wireType[wireId].MapT.Key
elemId := dec.wireType[wireId].MapT.Elem
keyOp := dec.decIgnoreOpFor(keyId, inProgress)
elemOp := dec.decIgnoreOpFor(elemId, inProgress)
op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.ignoreMap(state, *keyOp, *elemOp)
}
case wire.SliceT != nil:
elemId := wire.SliceT.Elem
elemOp := dec.decIgnoreOpFor(elemId, inProgress)
op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.ignoreSlice(state, *elemOp)
}
case wire.StructT != nil:
// Generate a closure that calls out to the engine for the nested type.
enginePtr, err := dec.getIgnoreEnginePtr(wireId)
if err != nil {
error_(err)
}
op = func(i *decInstr, state *decoderState, value reflect.Value) {
// indirect through enginePtr to delay evaluation for recursive structs
state.dec.ignoreStruct(*enginePtr)
}
case wire.GobEncoderT != nil, wire.BinaryMarshalerT != nil, wire.TextMarshalerT != nil:
op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.ignoreGobDecoder(state)
}
}
}
if op == nil {
errorf("bad data: ignore can't handle type %s", wireId.string())
}
return &op
}
// gobDecodeOpFor returns the op for a type that is known to implement
// GobDecoder.
func (dec *Decoder) gobDecodeOpFor(ut *userTypeInfo) *decOp {
rcvrType := ut.user
if ut.decIndir == -1 {
rcvrType = reflect.PointerTo(rcvrType)
} else if ut.decIndir > 0 {
for i := int8(0); i < ut.decIndir; i++ {
rcvrType = rcvrType.Elem()
}
}
var op decOp
op = func(i *decInstr, state *decoderState, value reflect.Value) {
// We now have the base type. We need its address if the receiver is a pointer.
if value.Kind() != reflect.Pointer && rcvrType.Kind() == reflect.Pointer {
value = value.Addr()
}
state.dec.decodeGobDecoder(ut, state, value)
}
return &op
}
// compatibleType asks: Are these two gob Types compatible?
// Answers the question for basic types, arrays, maps and slices, plus
// GobEncoder/Decoder pairs.
// Structs are considered ok; fields will be checked later.
func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId, inProgress map[reflect.Type]typeId) bool {
if rhs, ok := inProgress[fr]; ok {
return rhs == fw
}
inProgress[fr] = fw
ut := userType(fr)
wire, ok := dec.wireType[fw]
// If wire was encoded with an encoding method, fr must have that method.
// And if not, it must not.
// At most one of the booleans in ut is set.
// We could possibly relax this constraint in the future in order to
// choose the decoding method using the data in the wireType.
// The parentheses look odd but are correct.
if (ut.externalDec == xGob) != (ok && wire.GobEncoderT != nil) ||
(ut.externalDec == xBinary) != (ok && wire.BinaryMarshalerT != nil) ||
(ut.externalDec == xText) != (ok && wire.TextMarshalerT != nil) {
return false
}
if ut.externalDec != 0 { // This test trumps all others.
return true
}
switch t := ut.base; t.Kind() {
default:
// chan, etc: cannot handle.
return false
case reflect.Bool:
return fw == tBool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return fw == tInt
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return fw == tUint
case reflect.Float32, reflect.Float64:
return fw == tFloat
case reflect.Complex64, reflect.Complex128:
return fw == tComplex
case reflect.String:
return fw == tString
case reflect.Interface:
return fw == tInterface
case reflect.Array:
if !ok || wire.ArrayT == nil {
return false
}
array := wire.ArrayT
return t.Len() == array.Len && dec.compatibleType(t.Elem(), array.Elem, inProgress)
case reflect.Map:
if !ok || wire.MapT == nil {
return false
}
MapType := wire.MapT
return dec.compatibleType(t.Key(), MapType.Key, inProgress) && dec.compatibleType(t.Elem(), MapType.Elem, inProgress)
case reflect.Slice:
// Is it an array of bytes?
if t.Elem().Kind() == reflect.Uint8 {
return fw == tBytes
}
// Extract and compare element types.
var sw *sliceType
if tt := builtinIdToType(fw); tt != nil {
sw, _ = tt.(*sliceType)
} else if wire != nil {
sw = wire.SliceT
}
elem := userType(t.Elem()).base
return sw != nil && dec.compatibleType(elem, sw.Elem, inProgress)
case reflect.Struct:
return true
}
}
// typeString returns a human-readable description of the type identified by remoteId.
func (dec *Decoder) typeString(remoteId typeId) string {
typeLock.Lock()
defer typeLock.Unlock()
if t := idToType(remoteId); t != nil {
// globally known type.
return t.string()
}
return dec.wireType[remoteId].string()
}
// compileSingle compiles the decoder engine for a non-struct top-level value, including
// GobDecoders.
func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err error) {
rt := ut.user
engine = new(decEngine)
engine.instr = make([]decInstr, 1) // one item
name := rt.String() // best we can do
if !dec.compatibleType(rt, remoteId, make(map[reflect.Type]typeId)) {
remoteType := dec.typeString(remoteId)
// Common confusing case: local interface type, remote concrete type.
if ut.base.Kind() == reflect.Interface && remoteId != tInterface {
return nil, errors.New("gob: local interface type " + name + " can only be decoded from remote interface type; received concrete type " + remoteType)
}
return nil, errors.New("gob: decoding into local type " + name + ", received remote type " + remoteType)
}
op := dec.decOpFor(remoteId, rt, name, make(map[reflect.Type]*decOp))
ovfl := errors.New(`value for "` + name + `" out of range`)
engine.instr[singletonField] = decInstr{*op, singletonField, nil, ovfl}
engine.numInstr = 1
return
}
// compileIgnoreSingle compiles the decoder engine for a non-struct top-level value that will be discarded.
func (dec *Decoder) compileIgnoreSingle(remoteId typeId) *decEngine {
engine := new(decEngine)
engine.instr = make([]decInstr, 1) // one item
op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp))
ovfl := overflow(dec.typeString(remoteId))
engine.instr[0] = decInstr{*op, 0, nil, ovfl}
engine.numInstr = 1
return engine
}
// compileDec compiles the decoder engine for a value. If the value is not a struct,
// it calls out to compileSingle.
func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err error) {
defer catchError(&err)
rt := ut.base
srt := rt
if srt.Kind() != reflect.Struct || ut.externalDec != 0 {
return dec.compileSingle(remoteId, ut)
}
var wireStruct *structType
// Builtin types can come from global pool; the rest must be defined by the decoder.
// Also we know we're decoding a struct now, so the client must have sent one.
if t := builtinIdToType(remoteId); t != nil {
wireStruct, _ = t.(*structType)
} else {
wire := dec.wireType[remoteId]
if wire == nil {
error_(errBadType)
}
wireStruct = wire.StructT
}
if wireStruct == nil {
errorf("type mismatch in decoder: want struct type %s; got non-struct", rt)
}
engine = new(decEngine)
engine.instr = make([]decInstr, len(wireStruct.Field))
seen := make(map[reflect.Type]*decOp)
// Loop over the fields of the wire type.
for fieldnum := 0; fieldnum < len(wireStruct.Field); fieldnum++ {
wireField := wireStruct.Field[fieldnum]
if wireField.Name == "" {
errorf("empty name for remote field of type %s", wireStruct.Name)
}
ovfl := overflow(wireField.Name)
// Find the field of the local type with the same name.
localField, present := srt.FieldByName(wireField.Name)
// TODO(r): anonymous names
if !present || !isExported(wireField.Name) {
op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp))
engine.instr[fieldnum] = decInstr{*op, fieldnum, nil, ovfl}
continue
}
if !dec.compatibleType(localField.Type, wireField.Id, make(map[reflect.Type]typeId)) {
errorf("wrong type (%s) for received field %s.%s", localField.Type, wireStruct.Name, wireField.Name)
}
op := dec.decOpFor(wireField.Id, localField.Type, localField.Name, seen)
engine.instr[fieldnum] = decInstr{*op, fieldnum, localField.Index, ovfl}
engine.numInstr++
}
return
}
// getDecEnginePtr returns the engine for the specified type.
func (dec *Decoder) getDecEnginePtr(remoteId typeId, ut *userTypeInfo) (enginePtr **decEngine, err error) {
rt := ut.user
decoderMap, ok := dec.decoderCache[rt]
if !ok {
decoderMap = make(map[typeId]**decEngine)
dec.decoderCache[rt] = decoderMap
}
if enginePtr, ok = decoderMap[remoteId]; !ok {
// To handle recursive types, mark this engine as underway before compiling.
enginePtr = new(*decEngine)
decoderMap[remoteId] = enginePtr
*enginePtr, err = dec.compileDec(remoteId, ut)
if err != nil {
delete(decoderMap, remoteId)
}
}
return
}
// emptyStruct is the type we compile into when ignoring a struct value.
type emptyStruct struct{}
var emptyStructType = reflect.TypeFor[emptyStruct]()
// getIgnoreEnginePtr returns the engine for the specified type when the value is to be discarded.
func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, err error) {
var ok bool
if enginePtr, ok = dec.ignorerCache[wireId]; !ok {
// To handle recursive types, mark this engine as underway before compiling.
enginePtr = new(*decEngine)
dec.ignorerCache[wireId] = enginePtr
wire := dec.wireType[wireId]
if wire != nil && wire.StructT != nil {
*enginePtr, err = dec.compileDec(wireId, userType(emptyStructType))
} else {
*enginePtr = dec.compileIgnoreSingle(wireId)
}
if err != nil {
delete(dec.ignorerCache, wireId)
}
}
return
}
// decodeValue decodes the data stream representing a value and stores it in value.
func (dec *Decoder) decodeValue(wireId typeId, value reflect.Value) {
defer catchError(&dec.err)
// If the value is nil, it means we should just ignore this item.
if !value.IsValid() {
dec.decodeIgnoredValue(wireId)
return
}
// Dereference down to the underlying type.
ut := userType(value.Type())
base := ut.base
var enginePtr **decEngine
enginePtr, dec.err = dec.getDecEnginePtr(wireId, ut)
if dec.err != nil {
return
}
value = decAlloc(value)
engine := *enginePtr
if st := base; st.Kind() == reflect.Struct && ut.externalDec == 0 {
wt := dec.wireType[wireId]
if engine.numInstr == 0 && st.NumField() > 0 &&
wt != nil && len(wt.StructT.Field) > 0 {
name := base.Name()
errorf("type mismatch: no fields matched compiling decoder for %s", name)
}
dec.decodeStruct(engine, value)
} else {
dec.decodeSingle(engine, value)
}
}
// decodeIgnoredValue decodes the data stream representing a value of the specified type and discards it.
func (dec *Decoder) decodeIgnoredValue(wireId typeId) {
var enginePtr **decEngine
enginePtr, dec.err = dec.getIgnoreEnginePtr(wireId)
if dec.err != nil {
return
}
wire := dec.wireType[wireId]
if wire != nil && wire.StructT != nil {
dec.ignoreStruct(*enginePtr)
} else {
dec.ignoreSingle(*enginePtr)
}
}
const (
intBits = 32 << (^uint(0) >> 63)
uintptrBits = 32 << (^uintptr(0) >> 63)
)
func init() {
var iop, uop decOp
switch intBits {
case 32:
iop = decInt32
uop = decUint32
case 64:
iop = decInt64
uop = decUint64
default:
panic("gob: unknown size of int/uint")
}
decOpTable[reflect.Int] = iop
decOpTable[reflect.Uint] = uop
// Finally uintptr
switch uintptrBits {
case 32:
uop = decUint32
case 64:
uop = decUint64
default:
panic("gob: unknown size of uintptr")
}
decOpTable[reflect.Uintptr] = uop
}
// Gob depends on being able to take the address
// of zeroed Values it creates, so use this wrapper instead
// of the standard reflect.Zero.
// Each call allocates once.
func allocValue(t reflect.Type) reflect.Value {
return reflect.New(t).Elem()
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gob
import (
"bufio"
"errors"
"internal/saferio"
"io"
"reflect"
"sync"
)
// tooBig provides a sanity check for sizes; used in several places. Upper limit
// of is 1GB on 32-bit systems, 8GB on 64-bit, allowing room to grow a little
// without overflow.
const tooBig = (1 << 30) << (^uint(0) >> 62)
// A Decoder manages the receipt of type and data information read from the
// remote side of a connection. It is safe for concurrent use by multiple
// goroutines.
//
// The Decoder does only basic sanity checking on decoded input sizes,
// and its limits are not configurable. Take caution when decoding gob data
// from untrusted sources.
type Decoder struct {
mutex sync.Mutex // each item must be received atomically
r io.Reader // source of the data
buf decBuffer // buffer for more efficient i/o from r
wireType map[typeId]*wireType // map from remote ID to local description
decoderCache map[reflect.Type]map[typeId]**decEngine // cache of compiled engines
ignorerCache map[typeId]**decEngine // ditto for ignored objects
freeList *decoderState // list of free decoderStates; avoids reallocation
countBuf []byte // used for decoding integers while parsing messages
err error
// ignoreDepth tracks the depth of recursively parsed ignored fields
ignoreDepth int
}
// NewDecoder returns a new decoder that reads from the [io.Reader].
// If r does not also implement [io.ByteReader], it will be wrapped in a
// [bufio.Reader].
func NewDecoder(r io.Reader) *Decoder {
dec := new(Decoder)
// We use the ability to read bytes as a plausible surrogate for buffering.
if _, ok := r.(io.ByteReader); !ok {
r = bufio.NewReader(r)
}
dec.r = r
dec.wireType = make(map[typeId]*wireType)
dec.decoderCache = make(map[reflect.Type]map[typeId]**decEngine)
dec.ignorerCache = make(map[typeId]**decEngine)
dec.countBuf = make([]byte, 9) // counts may be uint64s (unlikely!), require 9 bytes
return dec
}
// recvType loads the definition of a type.
func (dec *Decoder) recvType(id typeId) {
// Have we already seen this type? That's an error
if id < firstUserId || dec.wireType[id] != nil {
dec.err = errors.New("gob: duplicate type received")
return
}
// Type:
wire := new(wireType)
dec.decodeValue(tWireType, reflect.ValueOf(wire))
if dec.err != nil {
return
}
// Remember we've seen this type.
dec.wireType[id] = wire
}
var errBadCount = errors.New("invalid message length")
// recvMessage reads the next count-delimited item from the input. It is the converse
// of Encoder.writeMessage. It returns false on EOF or other error reading the message.
func (dec *Decoder) recvMessage() bool {
// Read a count.
nbytes, _, err := decodeUintReader(dec.r, dec.countBuf)
if err != nil {
dec.err = err
return false
}
if nbytes >= tooBig {
dec.err = errBadCount
return false
}
dec.readMessage(int(nbytes))
return dec.err == nil
}
// readMessage reads the next nbytes bytes from the input.
func (dec *Decoder) readMessage(nbytes int) {
if dec.buf.Len() != 0 {
// The buffer should always be empty now.
panic("non-empty decoder buffer")
}
// Read the data
var buf []byte
buf, dec.err = saferio.ReadData(dec.r, uint64(nbytes))
dec.buf.SetBytes(buf)
if dec.err == io.EOF {
dec.err = io.ErrUnexpectedEOF
}
}
// toInt turns an encoded uint64 into an int, according to the marshaling rules.
func toInt(x uint64) int64 {
i := int64(x >> 1)
if x&1 != 0 {
i = ^i
}
return i
}
func (dec *Decoder) nextInt() int64 {
n, _, err := decodeUintReader(&dec.buf, dec.countBuf)
if err != nil {
dec.err = err
}
return toInt(n)
}
func (dec *Decoder) nextUint() uint64 {
n, _, err := decodeUintReader(&dec.buf, dec.countBuf)
if err != nil {
dec.err = err
}
return n
}
// decodeTypeSequence parses:
// TypeSequence
//
// (TypeDefinition DelimitedTypeDefinition*)?
//
// and returns the type id of the next value. It returns -1 at
// EOF. Upon return, the remainder of dec.buf is the value to be
// decoded. If this is an interface value, it can be ignored by
// resetting that buffer.
func (dec *Decoder) decodeTypeSequence(isInterface bool) typeId {
firstMessage := true
for dec.err == nil {
if dec.buf.Len() == 0 {
if !dec.recvMessage() {
// We can only return io.EOF if the input was empty.
// If we read one or more type spec messages,
// require a data item message to follow.
// If we hit an EOF before that, then give ErrUnexpectedEOF.
if !firstMessage && dec.err == io.EOF {
dec.err = io.ErrUnexpectedEOF
}
break
}
}
// Receive a type id.
id := typeId(dec.nextInt())
if id >= 0 {
// Value follows.
return id
}
// Type definition for (-id) follows.
dec.recvType(-id)
if dec.err != nil {
break
}
// When decoding an interface, after a type there may be a
// DelimitedValue still in the buffer. Skip its count.
// (Alternatively, the buffer is empty and the byte count
// will be absorbed by recvMessage.)
if dec.buf.Len() > 0 {
if !isInterface {
dec.err = errors.New("extra data in buffer")
break
}
dec.nextUint()
}
firstMessage = false
}
return -1
}
// Decode reads the next value from the input stream and stores
// it in the data represented by the empty interface value.
// If e is nil, the value will be discarded. Otherwise,
// the value underlying e must be a pointer to the
// correct type for the next data item received.
// If the input is at EOF, Decode returns [io.EOF] and
// does not modify e.
func (dec *Decoder) Decode(e any) error {
if e == nil {
return dec.DecodeValue(reflect.Value{})
}
value := reflect.ValueOf(e)
// If e represents a value as opposed to a pointer, the answer won't
// get back to the caller. Make sure it's a pointer.
if value.Kind() != reflect.Pointer {
dec.err = errors.New("gob: attempt to decode into a non-pointer")
return dec.err
}
return dec.DecodeValue(value)
}
// DecodeValue reads the next value from the input stream.
// If v is the zero reflect.Value (v.Kind() == Invalid), DecodeValue discards the value.
// Otherwise, it stores the value into v. In that case, v must represent
// a non-nil pointer to data or be an assignable reflect.Value (v.CanSet())
// If the input is at EOF, DecodeValue returns [io.EOF] and
// does not modify v.
func (dec *Decoder) DecodeValue(v reflect.Value) error {
if v.IsValid() {
if v.Kind() == reflect.Pointer && !v.IsNil() {
// That's okay, we'll store through the pointer.
} else if !v.CanSet() {
return errors.New("gob: DecodeValue of unassignable value")
}
}
// Make sure we're single-threaded through here.
dec.mutex.Lock()
defer dec.mutex.Unlock()
dec.buf.Reset() // In case data lingers from previous invocation.
dec.err = nil
id := dec.decodeTypeSequence(false)
if dec.err == nil {
dec.decodeValue(id, v)
}
return dec.err
}
// If debug.go is compiled into the program, debugFunc prints a human-readable
// representation of the gob data read from r by calling that file's Debug function.
// Otherwise it is nil.
var debugFunc func(io.Reader)
// Code generated by go run encgen.go -output enc_helpers.go; DO NOT EDIT.
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gob
import (
"reflect"
)
var encArrayHelper = map[reflect.Kind]encHelper{
reflect.Bool: encBoolArray,
reflect.Complex64: encComplex64Array,
reflect.Complex128: encComplex128Array,
reflect.Float32: encFloat32Array,
reflect.Float64: encFloat64Array,
reflect.Int: encIntArray,
reflect.Int16: encInt16Array,
reflect.Int32: encInt32Array,
reflect.Int64: encInt64Array,
reflect.Int8: encInt8Array,
reflect.String: encStringArray,
reflect.Uint: encUintArray,
reflect.Uint16: encUint16Array,
reflect.Uint32: encUint32Array,
reflect.Uint64: encUint64Array,
reflect.Uintptr: encUintptrArray,
}
var encSliceHelper = map[reflect.Kind]encHelper{
reflect.Bool: encBoolSlice,
reflect.Complex64: encComplex64Slice,
reflect.Complex128: encComplex128Slice,
reflect.Float32: encFloat32Slice,
reflect.Float64: encFloat64Slice,
reflect.Int: encIntSlice,
reflect.Int16: encInt16Slice,
reflect.Int32: encInt32Slice,
reflect.Int64: encInt64Slice,
reflect.Int8: encInt8Slice,
reflect.String: encStringSlice,
reflect.Uint: encUintSlice,
reflect.Uint16: encUint16Slice,
reflect.Uint32: encUint32Slice,
reflect.Uint64: encUint64Slice,
reflect.Uintptr: encUintptrSlice,
}
func encBoolArray(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encBoolSlice(state, v.Slice(0, v.Len()))
}
func encBoolSlice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]bool](v)
if !ok {
// It is kind bool but not type bool. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != false || state.sendZero {
if x {
state.encodeUint(1)
} else {
state.encodeUint(0)
}
}
}
return true
}
func encComplex64Array(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encComplex64Slice(state, v.Slice(0, v.Len()))
}
func encComplex64Slice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]complex64](v)
if !ok {
// It is kind complex64 but not type complex64. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0+0i || state.sendZero {
rpart := floatBits(float64(real(x)))
ipart := floatBits(float64(imag(x)))
state.encodeUint(rpart)
state.encodeUint(ipart)
}
}
return true
}
func encComplex128Array(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encComplex128Slice(state, v.Slice(0, v.Len()))
}
func encComplex128Slice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]complex128](v)
if !ok {
// It is kind complex128 but not type complex128. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0+0i || state.sendZero {
rpart := floatBits(real(x))
ipart := floatBits(imag(x))
state.encodeUint(rpart)
state.encodeUint(ipart)
}
}
return true
}
func encFloat32Array(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encFloat32Slice(state, v.Slice(0, v.Len()))
}
func encFloat32Slice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]float32](v)
if !ok {
// It is kind float32 but not type float32. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0 || state.sendZero {
bits := floatBits(float64(x))
state.encodeUint(bits)
}
}
return true
}
func encFloat64Array(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encFloat64Slice(state, v.Slice(0, v.Len()))
}
func encFloat64Slice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]float64](v)
if !ok {
// It is kind float64 but not type float64. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0 || state.sendZero {
bits := floatBits(x)
state.encodeUint(bits)
}
}
return true
}
func encIntArray(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encIntSlice(state, v.Slice(0, v.Len()))
}
func encIntSlice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]int](v)
if !ok {
// It is kind int but not type int. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0 || state.sendZero {
state.encodeInt(int64(x))
}
}
return true
}
func encInt16Array(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encInt16Slice(state, v.Slice(0, v.Len()))
}
func encInt16Slice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]int16](v)
if !ok {
// It is kind int16 but not type int16. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0 || state.sendZero {
state.encodeInt(int64(x))
}
}
return true
}
func encInt32Array(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encInt32Slice(state, v.Slice(0, v.Len()))
}
func encInt32Slice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]int32](v)
if !ok {
// It is kind int32 but not type int32. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0 || state.sendZero {
state.encodeInt(int64(x))
}
}
return true
}
func encInt64Array(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encInt64Slice(state, v.Slice(0, v.Len()))
}
func encInt64Slice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]int64](v)
if !ok {
// It is kind int64 but not type int64. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0 || state.sendZero {
state.encodeInt(x)
}
}
return true
}
func encInt8Array(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encInt8Slice(state, v.Slice(0, v.Len()))
}
func encInt8Slice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]int8](v)
if !ok {
// It is kind int8 but not type int8. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0 || state.sendZero {
state.encodeInt(int64(x))
}
}
return true
}
func encStringArray(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encStringSlice(state, v.Slice(0, v.Len()))
}
func encStringSlice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]string](v)
if !ok {
// It is kind string but not type string. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != "" || state.sendZero {
state.encodeUint(uint64(len(x)))
state.b.WriteString(x)
}
}
return true
}
func encUintArray(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encUintSlice(state, v.Slice(0, v.Len()))
}
func encUintSlice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]uint](v)
if !ok {
// It is kind uint but not type uint. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0 || state.sendZero {
state.encodeUint(uint64(x))
}
}
return true
}
func encUint16Array(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encUint16Slice(state, v.Slice(0, v.Len()))
}
func encUint16Slice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]uint16](v)
if !ok {
// It is kind uint16 but not type uint16. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0 || state.sendZero {
state.encodeUint(uint64(x))
}
}
return true
}
func encUint32Array(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encUint32Slice(state, v.Slice(0, v.Len()))
}
func encUint32Slice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]uint32](v)
if !ok {
// It is kind uint32 but not type uint32. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0 || state.sendZero {
state.encodeUint(uint64(x))
}
}
return true
}
func encUint64Array(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encUint64Slice(state, v.Slice(0, v.Len()))
}
func encUint64Slice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]uint64](v)
if !ok {
// It is kind uint64 but not type uint64. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0 || state.sendZero {
state.encodeUint(x)
}
}
return true
}
func encUintptrArray(state *encoderState, v reflect.Value) bool {
// Can only slice if it is addressable.
if !v.CanAddr() {
return false
}
return encUintptrSlice(state, v.Slice(0, v.Len()))
}
func encUintptrSlice(state *encoderState, v reflect.Value) bool {
slice, ok := reflect.TypeAssert[[]uintptr](v)
if !ok {
// It is kind uintptr but not type uintptr. TODO: We can handle this unsafely.
return false
}
for _, x := range slice {
if x != 0 || state.sendZero {
state.encodeUint(uint64(x))
}
}
return true
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run encgen.go -output enc_helpers.go
package gob
import (
"encoding"
"encoding/binary"
"math"
"math/bits"
"reflect"
"sync"
)
const uint64Size = 8
type encHelper func(state *encoderState, v reflect.Value) bool
// encoderState is the global execution state of an instance of the encoder.
// Field numbers are delta encoded and always increase. The field
// number is initialized to -1 so 0 comes out as delta(1). A delta of
// 0 terminates the structure.
type encoderState struct {
enc *Encoder
b *encBuffer
sendZero bool // encoding an array element or map key/value pair; send zero values
fieldnum int // the last field number written.
buf [1 + uint64Size]byte // buffer used by the encoder; here to avoid allocation.
next *encoderState // for free list
}
// encBuffer is an extremely simple, fast implementation of a write-only byte buffer.
// It never returns a non-nil error, but Write returns an error value so it matches io.Writer.
type encBuffer struct {
data []byte
scratch [64]byte
}
var encBufferPool = sync.Pool{
New: func() any {
e := new(encBuffer)
e.data = e.scratch[0:0]
return e
},
}
func (e *encBuffer) writeByte(c byte) {
e.data = append(e.data, c)
}
func (e *encBuffer) Write(p []byte) (int, error) {
e.data = append(e.data, p...)
return len(p), nil
}
func (e *encBuffer) WriteString(s string) {
e.data = append(e.data, s...)
}
func (e *encBuffer) Len() int {
return len(e.data)
}
func (e *encBuffer) Bytes() []byte {
return e.data
}
func (e *encBuffer) Reset() {
if len(e.data) >= tooBig {
e.data = e.scratch[0:0]
} else {
e.data = e.data[0:0]
}
}
func (enc *Encoder) newEncoderState(b *encBuffer) *encoderState {
e := enc.freeList
if e == nil {
e = new(encoderState)
e.enc = enc
} else {
enc.freeList = e.next
}
e.sendZero = false
e.fieldnum = 0
e.b = b
if len(b.data) == 0 {
b.data = b.scratch[0:0]
}
return e
}
func (enc *Encoder) freeEncoderState(e *encoderState) {
e.next = enc.freeList
enc.freeList = e
}
// Unsigned integers have a two-state encoding. If the number is less
// than 128 (0 through 0x7F), its value is written directly.
// Otherwise the value is written in big-endian byte order preceded
// by the byte length, negated.
// encodeUint writes an encoded unsigned integer to state.b.
func (state *encoderState) encodeUint(x uint64) {
if x <= 0x7F {
state.b.writeByte(uint8(x))
return
}
binary.BigEndian.PutUint64(state.buf[1:], x)
bc := bits.LeadingZeros64(x) >> 3 // 8 - bytelen(x)
state.buf[bc] = uint8(bc - uint64Size) // and then we subtract 8 to get -bytelen(x)
state.b.Write(state.buf[bc : uint64Size+1])
}
// encodeInt writes an encoded signed integer to state.w.
// The low bit of the encoding says whether to bit complement the (other bits of the)
// uint to recover the int.
func (state *encoderState) encodeInt(i int64) {
var x uint64
if i < 0 {
x = uint64(^i<<1) | 1
} else {
x = uint64(i << 1)
}
state.encodeUint(x)
}
// encOp is the signature of an encoding operator for a given type.
type encOp func(i *encInstr, state *encoderState, v reflect.Value)
// The 'instructions' of the encoding machine
type encInstr struct {
op encOp
field int // field number in input
index []int // struct index
indir int // how many pointer indirections to reach the value in the struct
}
// update emits a field number and updates the state to record its value for delta encoding.
// If the instruction pointer is nil, it does nothing
func (state *encoderState) update(instr *encInstr) {
if instr != nil {
state.encodeUint(uint64(instr.field - state.fieldnum))
state.fieldnum = instr.field
}
}
// Each encoder for a composite is responsible for handling any
// indirections associated with the elements of the data structure.
// If any pointer so reached is nil, no bytes are written. If the
// data item is zero, no bytes are written. Single values - ints,
// strings etc. - are indirected before calling their encoders.
// Otherwise, the output (for a scalar) is the field number, as an
// encoded integer, followed by the field data in its appropriate
// format.
// encIndirect dereferences pv indir times and returns the result.
func encIndirect(pv reflect.Value, indir int) reflect.Value {
for ; indir > 0; indir-- {
if pv.IsNil() {
break
}
pv = pv.Elem()
}
return pv
}
// encBool encodes the bool referenced by v as an unsigned 0 or 1.
func encBool(i *encInstr, state *encoderState, v reflect.Value) {
b := v.Bool()
if b || state.sendZero {
state.update(i)
if b {
state.encodeUint(1)
} else {
state.encodeUint(0)
}
}
}
// encInt encodes the signed integer (int int8 int16 int32 int64) referenced by v.
func encInt(i *encInstr, state *encoderState, v reflect.Value) {
value := v.Int()
if value != 0 || state.sendZero {
state.update(i)
state.encodeInt(value)
}
}
// encUint encodes the unsigned integer (uint uint8 uint16 uint32 uint64 uintptr) referenced by v.
func encUint(i *encInstr, state *encoderState, v reflect.Value) {
value := v.Uint()
if value != 0 || state.sendZero {
state.update(i)
state.encodeUint(value)
}
}
// floatBits returns a uint64 holding the bits of a floating-point number.
// Floating-point numbers are transmitted as uint64s holding the bits
// of the underlying representation. They are sent byte-reversed, with
// the exponent end coming out first, so integer floating point numbers
// (for example) transmit more compactly. This routine does the
// swizzling.
func floatBits(f float64) uint64 {
u := math.Float64bits(f)
return bits.ReverseBytes64(u)
}
// encFloat encodes the floating point value (float32 float64) referenced by v.
func encFloat(i *encInstr, state *encoderState, v reflect.Value) {
f := v.Float()
if f != 0 || state.sendZero {
bits := floatBits(f)
state.update(i)
state.encodeUint(bits)
}
}
// encComplex encodes the complex value (complex64 complex128) referenced by v.
// Complex numbers are just a pair of floating-point numbers, real part first.
func encComplex(i *encInstr, state *encoderState, v reflect.Value) {
c := v.Complex()
if c != 0+0i || state.sendZero {
rpart := floatBits(real(c))
ipart := floatBits(imag(c))
state.update(i)
state.encodeUint(rpart)
state.encodeUint(ipart)
}
}
// encUint8Array encodes the byte array referenced by v.
// Byte arrays are encoded as an unsigned count followed by the raw bytes.
func encUint8Array(i *encInstr, state *encoderState, v reflect.Value) {
b := v.Bytes()
if len(b) > 0 || state.sendZero {
state.update(i)
state.encodeUint(uint64(len(b)))
state.b.Write(b)
}
}
// encString encodes the string referenced by v.
// Strings are encoded as an unsigned count followed by the raw bytes.
func encString(i *encInstr, state *encoderState, v reflect.Value) {
s := v.String()
if len(s) > 0 || state.sendZero {
state.update(i)
state.encodeUint(uint64(len(s)))
state.b.WriteString(s)
}
}
// encStructTerminator encodes the end of an encoded struct
// as delta field number of 0.
func encStructTerminator(i *encInstr, state *encoderState, v reflect.Value) {
state.encodeUint(0)
}
// Execution engine
// encEngine an array of instructions indexed by field number of the encoding
// data, typically a struct. It is executed top to bottom, walking the struct.
type encEngine struct {
instr []encInstr
}
const singletonField = 0
// valid reports whether the value is valid and a non-nil pointer.
// (Slices, maps, and chans take care of themselves.)
func valid(v reflect.Value) bool {
switch v.Kind() {
case reflect.Invalid:
return false
case reflect.Pointer:
return !v.IsNil()
}
return true
}
// encodeSingle encodes a single top-level non-struct value.
func (enc *Encoder) encodeSingle(b *encBuffer, engine *encEngine, value reflect.Value) {
state := enc.newEncoderState(b)
defer enc.freeEncoderState(state)
state.fieldnum = singletonField
// There is no surrounding struct to frame the transmission, so we must
// generate data even if the item is zero. To do this, set sendZero.
state.sendZero = true
instr := &engine.instr[singletonField]
if instr.indir > 0 {
value = encIndirect(value, instr.indir)
}
if valid(value) {
instr.op(instr, state, value)
}
}
// encodeStruct encodes a single struct value.
func (enc *Encoder) encodeStruct(b *encBuffer, engine *encEngine, value reflect.Value) {
if !valid(value) {
return
}
state := enc.newEncoderState(b)
defer enc.freeEncoderState(state)
state.fieldnum = -1
for i := 0; i < len(engine.instr); i++ {
instr := &engine.instr[i]
if i >= value.NumField() {
// encStructTerminator
instr.op(instr, state, reflect.Value{})
break
}
field := value.FieldByIndex(instr.index)
if instr.indir > 0 {
field = encIndirect(field, instr.indir)
// TODO: Is field guaranteed valid? If so we could avoid this check.
if !valid(field) {
continue
}
}
instr.op(instr, state, field)
}
}
// encodeArray encodes an array.
func (enc *Encoder) encodeArray(b *encBuffer, value reflect.Value, op encOp, elemIndir int, length int, helper encHelper) {
state := enc.newEncoderState(b)
defer enc.freeEncoderState(state)
state.fieldnum = -1
state.sendZero = true
state.encodeUint(uint64(length))
if helper != nil && helper(state, value) {
return
}
for i := 0; i < length; i++ {
elem := value.Index(i)
if elemIndir > 0 {
elem = encIndirect(elem, elemIndir)
// TODO: Is elem guaranteed valid? If so we could avoid this check.
if !valid(elem) {
errorf("encodeArray: nil element")
}
}
op(nil, state, elem)
}
}
// encodeReflectValue is a helper for maps. It encodes the value v.
func encodeReflectValue(state *encoderState, v reflect.Value, op encOp, indir int) {
for i := 0; i < indir && v.IsValid(); i++ {
v = reflect.Indirect(v)
}
if !v.IsValid() {
errorf("encodeReflectValue: nil element")
}
op(nil, state, v)
}
// encodeMap encodes a map as unsigned count followed by key:value pairs.
func (enc *Encoder) encodeMap(b *encBuffer, mv reflect.Value, keyOp, elemOp encOp, keyIndir, elemIndir int) {
state := enc.newEncoderState(b)
state.fieldnum = -1
state.sendZero = true
state.encodeUint(uint64(mv.Len()))
mi := mv.MapRange()
for mi.Next() {
encodeReflectValue(state, mi.Key(), keyOp, keyIndir)
encodeReflectValue(state, mi.Value(), elemOp, elemIndir)
}
enc.freeEncoderState(state)
}
// encodeInterface encodes the interface value iv.
// To send an interface, we send a string identifying the concrete type, followed
// by the type identifier (which might require defining that type right now), followed
// by the concrete value. A nil value gets sent as the empty string for the name,
// followed by no value.
func (enc *Encoder) encodeInterface(b *encBuffer, iv reflect.Value) {
// Gobs can encode nil interface values but not typed interface
// values holding nil pointers, since nil pointers point to no value.
elem := iv.Elem()
if elem.Kind() == reflect.Pointer && elem.IsNil() {
errorf("gob: cannot encode nil pointer of type %s inside interface", iv.Elem().Type())
}
state := enc.newEncoderState(b)
state.fieldnum = -1
state.sendZero = true
if iv.IsNil() {
state.encodeUint(0)
return
}
ut := userType(iv.Elem().Type())
namei, ok := concreteTypeToName.Load(ut.base)
if !ok {
errorf("type not registered for interface: %s", ut.base)
}
name := namei.(string)
// Send the name.
state.encodeUint(uint64(len(name)))
state.b.WriteString(name)
// Define the type id if necessary.
enc.sendTypeDescriptor(enc.writer(), state, ut)
// Send the type id.
enc.sendTypeId(state, ut)
// Encode the value into a new buffer. Any nested type definitions
// should be written to b, before the encoded value.
enc.pushWriter(b)
data := encBufferPool.Get().(*encBuffer)
data.Write(spaceForLength)
enc.encode(data, elem, ut)
if enc.err != nil {
error_(enc.err)
}
enc.popWriter()
enc.writeMessage(b, data)
data.Reset()
encBufferPool.Put(data)
if enc.err != nil {
error_(enc.err)
}
enc.freeEncoderState(state)
}
// encodeGobEncoder encodes a value that implements the GobEncoder interface.
// The data is sent as a byte array.
func (enc *Encoder) encodeGobEncoder(b *encBuffer, ut *userTypeInfo, v reflect.Value) {
// TODO: should we catch panics from the called method?
var data []byte
var err error
// We know it's one of these.
switch ut.externalEnc {
case xGob:
gobEncoder, _ := reflect.TypeAssert[GobEncoder](v)
data, err = gobEncoder.GobEncode()
case xBinary:
binaryMarshaler, _ := reflect.TypeAssert[encoding.BinaryMarshaler](v)
data, err = binaryMarshaler.MarshalBinary()
case xText:
textMarshaler, _ := reflect.TypeAssert[encoding.TextMarshaler](v)
data, err = textMarshaler.MarshalText()
}
if err != nil {
error_(err)
}
state := enc.newEncoderState(b)
state.fieldnum = -1
state.encodeUint(uint64(len(data)))
state.b.Write(data)
enc.freeEncoderState(state)
}
var encOpTable = [...]encOp{
reflect.Bool: encBool,
reflect.Int: encInt,
reflect.Int8: encInt,
reflect.Int16: encInt,
reflect.Int32: encInt,
reflect.Int64: encInt,
reflect.Uint: encUint,
reflect.Uint8: encUint,
reflect.Uint16: encUint,
reflect.Uint32: encUint,
reflect.Uint64: encUint,
reflect.Uintptr: encUint,
reflect.Float32: encFloat,
reflect.Float64: encFloat,
reflect.Complex64: encComplex,
reflect.Complex128: encComplex,
reflect.String: encString,
}
// encOpFor returns (a pointer to) the encoding op for the base type under rt and
// the indirection count to reach it.
func encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp, building map[*typeInfo]bool) (*encOp, int) {
ut := userType(rt)
// If the type implements GobEncoder, we handle it without further processing.
if ut.externalEnc != 0 {
return gobEncodeOpFor(ut)
}
// If this type is already in progress, it's a recursive type (e.g. map[string]*T).
// Return the pointer to the op we're already building.
if opPtr := inProgress[rt]; opPtr != nil {
return opPtr, ut.indir
}
typ := ut.base
indir := ut.indir
k := typ.Kind()
var op encOp
if int(k) < len(encOpTable) {
op = encOpTable[k]
}
if op == nil {
inProgress[rt] = &op
// Special cases
switch t := typ; t.Kind() {
case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 {
op = encUint8Array
break
}
// Slices have a header; we decode it to find the underlying array.
elemOp, elemIndir := encOpFor(t.Elem(), inProgress, building)
helper := encSliceHelper[t.Elem().Kind()]
op = func(i *encInstr, state *encoderState, slice reflect.Value) {
if !state.sendZero && slice.Len() == 0 {
return
}
state.update(i)
state.enc.encodeArray(state.b, slice, *elemOp, elemIndir, slice.Len(), helper)
}
case reflect.Array:
// True arrays have size in the type.
elemOp, elemIndir := encOpFor(t.Elem(), inProgress, building)
helper := encArrayHelper[t.Elem().Kind()]
op = func(i *encInstr, state *encoderState, array reflect.Value) {
state.update(i)
state.enc.encodeArray(state.b, array, *elemOp, elemIndir, array.Len(), helper)
}
case reflect.Map:
keyOp, keyIndir := encOpFor(t.Key(), inProgress, building)
elemOp, elemIndir := encOpFor(t.Elem(), inProgress, building)
op = func(i *encInstr, state *encoderState, mv reflect.Value) {
// We send zero-length (but non-nil) maps because the
// receiver might want to use the map. (Maps don't use append.)
if !state.sendZero && mv.IsNil() {
return
}
state.update(i)
state.enc.encodeMap(state.b, mv, *keyOp, *elemOp, keyIndir, elemIndir)
}
case reflect.Struct:
// Generate a closure that calls out to the engine for the nested type.
getEncEngine(userType(typ), building)
info := mustGetTypeInfo(typ)
op = func(i *encInstr, state *encoderState, sv reflect.Value) {
state.update(i)
// indirect through info to delay evaluation for recursive structs
enc := info.encoder.Load()
state.enc.encodeStruct(state.b, enc, sv)
}
case reflect.Interface:
op = func(i *encInstr, state *encoderState, iv reflect.Value) {
if !state.sendZero && (!iv.IsValid() || iv.IsNil()) {
return
}
state.update(i)
state.enc.encodeInterface(state.b, iv)
}
}
}
if op == nil {
errorf("can't happen: encode type %s", rt)
}
return &op, indir
}
// gobEncodeOpFor returns the op for a type that is known to implement GobEncoder.
func gobEncodeOpFor(ut *userTypeInfo) (*encOp, int) {
rt := ut.user
if ut.encIndir == -1 {
rt = reflect.PointerTo(rt)
} else if ut.encIndir > 0 {
for i := int8(0); i < ut.encIndir; i++ {
rt = rt.Elem()
}
}
var op encOp
op = func(i *encInstr, state *encoderState, v reflect.Value) {
if ut.encIndir == -1 {
// Need to climb up one level to turn value into pointer.
if !v.CanAddr() {
errorf("unaddressable value of type %s", rt)
}
v = v.Addr()
}
if !state.sendZero && v.IsZero() {
return
}
state.update(i)
state.enc.encodeGobEncoder(state.b, ut, v)
}
return &op, int(ut.encIndir) // encIndir: op will get called with p == address of receiver.
}
// compileEnc returns the engine to compile the type.
func compileEnc(ut *userTypeInfo, building map[*typeInfo]bool) *encEngine {
srt := ut.base
engine := new(encEngine)
seen := make(map[reflect.Type]*encOp)
rt := ut.base
if ut.externalEnc != 0 {
rt = ut.user
}
if ut.externalEnc == 0 && srt.Kind() == reflect.Struct {
for fieldNum, wireFieldNum := 0, 0; fieldNum < srt.NumField(); fieldNum++ {
f := srt.Field(fieldNum)
if !isSent(&f) {
continue
}
op, indir := encOpFor(f.Type, seen, building)
engine.instr = append(engine.instr, encInstr{*op, wireFieldNum, f.Index, indir})
wireFieldNum++
}
if srt.NumField() > 0 && len(engine.instr) == 0 {
errorf("type %s has no exported fields", rt)
}
engine.instr = append(engine.instr, encInstr{encStructTerminator, 0, nil, 0})
} else {
engine.instr = make([]encInstr, 1)
op, indir := encOpFor(rt, seen, building)
engine.instr[0] = encInstr{*op, singletonField, nil, indir}
}
return engine
}
// getEncEngine returns the engine to compile the type.
func getEncEngine(ut *userTypeInfo, building map[*typeInfo]bool) *encEngine {
info, err := getTypeInfo(ut)
if err != nil {
error_(err)
}
enc := info.encoder.Load()
if enc == nil {
enc = buildEncEngine(info, ut, building)
}
return enc
}
func buildEncEngine(info *typeInfo, ut *userTypeInfo, building map[*typeInfo]bool) *encEngine {
// Check for recursive types.
if building != nil && building[info] {
return nil
}
info.encInit.Lock()
defer info.encInit.Unlock()
enc := info.encoder.Load()
if enc == nil {
if building == nil {
building = make(map[*typeInfo]bool)
}
building[info] = true
enc = compileEnc(ut, building)
info.encoder.Store(enc)
}
return enc
}
func (enc *Encoder) encode(b *encBuffer, value reflect.Value, ut *userTypeInfo) {
defer catchError(&enc.err)
engine := getEncEngine(ut, nil)
indir := ut.indir
if ut.externalEnc != 0 {
indir = int(ut.encIndir)
}
for i := 0; i < indir; i++ {
value = reflect.Indirect(value)
}
if ut.externalEnc == 0 && value.Kind() == reflect.Struct {
enc.encodeStruct(b, engine, value)
} else {
enc.encodeSingle(b, engine, value)
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gob
import (
"errors"
"io"
"reflect"
"sync"
)
// An Encoder manages the transmission of type and data information to the
// other side of a connection. It is safe for concurrent use by multiple
// goroutines.
type Encoder struct {
mutex sync.Mutex // each item must be sent atomically
w []io.Writer // where to send the data
sent map[reflect.Type]typeId // which types we've already sent
countState *encoderState // stage for writing counts
freeList *encoderState // list of free encoderStates; avoids reallocation
byteBuf encBuffer // buffer for top-level encoderState
err error
}
// Before we encode a message, we reserve space at the head of the
// buffer in which to encode its length. This means we can use the
// buffer to assemble the message without another allocation.
const maxLength = 9 // Maximum size of an encoded length.
var spaceForLength = make([]byte, maxLength)
// NewEncoder returns a new encoder that will transmit on the [io.Writer].
func NewEncoder(w io.Writer) *Encoder {
enc := new(Encoder)
enc.w = []io.Writer{w}
enc.sent = make(map[reflect.Type]typeId)
enc.countState = enc.newEncoderState(new(encBuffer))
return enc
}
// writer returns the innermost writer the encoder is using.
func (enc *Encoder) writer() io.Writer {
return enc.w[len(enc.w)-1]
}
// pushWriter adds a writer to the encoder.
func (enc *Encoder) pushWriter(w io.Writer) {
enc.w = append(enc.w, w)
}
// popWriter pops the innermost writer.
func (enc *Encoder) popWriter() {
enc.w = enc.w[0 : len(enc.w)-1]
}
func (enc *Encoder) setError(err error) {
if enc.err == nil { // remember the first.
enc.err = err
}
}
// writeMessage sends the data item preceded by an unsigned count of its length.
func (enc *Encoder) writeMessage(w io.Writer, b *encBuffer) {
// Space has been reserved for the length at the head of the message.
// This is a little dirty: we grab the slice from the bytes.Buffer and massage
// it by hand.
message := b.Bytes()
messageLen := len(message) - maxLength
// Length cannot be bigger than the decoder can handle.
if messageLen >= tooBig {
enc.setError(errors.New("gob: encoder: message too big"))
return
}
// Encode the length.
enc.countState.b.Reset()
enc.countState.encodeUint(uint64(messageLen))
// Copy the length to be a prefix of the message.
offset := maxLength - enc.countState.b.Len()
copy(message[offset:], enc.countState.b.Bytes())
// Write the data.
_, err := w.Write(message[offset:])
// Drain the buffer and restore the space at the front for the count of the next message.
b.Reset()
b.Write(spaceForLength)
if err != nil {
enc.setError(err)
}
}
// sendActualType sends the requested type, without further investigation, unless
// it's been sent before.
func (enc *Encoder) sendActualType(w io.Writer, state *encoderState, ut *userTypeInfo, actual reflect.Type) (sent bool) {
if _, alreadySent := enc.sent[actual]; alreadySent {
return false
}
info, err := getTypeInfo(ut)
if err != nil {
enc.setError(err)
return
}
// Send the pair (-id, type)
// Id:
state.encodeInt(-int64(info.id))
// Type:
enc.encode(state.b, reflect.ValueOf(info.wire), wireTypeUserInfo)
enc.writeMessage(w, state.b)
if enc.err != nil {
return
}
// Remember we've sent this type, both what the user gave us and the base type.
enc.sent[ut.base] = info.id
if ut.user != ut.base {
enc.sent[ut.user] = info.id
}
// Now send the inner types
switch st := actual; st.Kind() {
case reflect.Struct:
for i := 0; i < st.NumField(); i++ {
if isExported(st.Field(i).Name) {
enc.sendType(w, state, st.Field(i).Type)
}
}
case reflect.Array, reflect.Slice:
enc.sendType(w, state, st.Elem())
case reflect.Map:
enc.sendType(w, state, st.Key())
enc.sendType(w, state, st.Elem())
}
return true
}
// sendType sends the type info to the other side, if necessary.
func (enc *Encoder) sendType(w io.Writer, state *encoderState, origt reflect.Type) (sent bool) {
ut := userType(origt)
if ut.externalEnc != 0 {
// The rules are different: regardless of the underlying type's representation,
// we need to tell the other side that the base type is a GobEncoder.
return enc.sendActualType(w, state, ut, ut.base)
}
// It's a concrete value, so drill down to the base type.
switch rt := ut.base; rt.Kind() {
default:
// Basic types and interfaces do not need to be described.
return
case reflect.Slice:
// If it's []uint8, don't send; it's considered basic.
if rt.Elem().Kind() == reflect.Uint8 {
return
}
// Otherwise we do send.
break
case reflect.Array:
// arrays must be sent so we know their lengths and element types.
break
case reflect.Map:
// maps must be sent so we know their lengths and key/value types.
break
case reflect.Struct:
// structs must be sent so we know their fields.
break
case reflect.Chan, reflect.Func:
// If we get here, it's a field of a struct; ignore it.
return
}
return enc.sendActualType(w, state, ut, ut.base)
}
// Encode transmits the data item represented by the empty interface value,
// guaranteeing that all necessary type information has been transmitted first.
// Passing a nil pointer to Encoder will panic, as they cannot be transmitted by gob.
func (enc *Encoder) Encode(e any) error {
return enc.EncodeValue(reflect.ValueOf(e))
}
// sendTypeDescriptor makes sure the remote side knows about this type.
// It will send a descriptor if this is the first time the type has been
// sent.
func (enc *Encoder) sendTypeDescriptor(w io.Writer, state *encoderState, ut *userTypeInfo) {
// Make sure the type is known to the other side.
// First, have we already sent this type?
rt := ut.base
if ut.externalEnc != 0 {
rt = ut.user
}
if _, alreadySent := enc.sent[rt]; !alreadySent {
// No, so send it.
sent := enc.sendType(w, state, rt)
if enc.err != nil {
return
}
// If the type info has still not been transmitted, it means we have
// a singleton basic type (int, []byte etc.) at top level. We don't
// need to send the type info but we do need to update enc.sent.
if !sent {
info, err := getTypeInfo(ut)
if err != nil {
enc.setError(err)
return
}
enc.sent[rt] = info.id
}
}
}
// sendTypeId sends the id, which must have already been defined.
func (enc *Encoder) sendTypeId(state *encoderState, ut *userTypeInfo) {
// Identify the type of this top-level value.
state.encodeInt(int64(enc.sent[ut.base]))
}
// EncodeValue transmits the data item represented by the reflection value,
// guaranteeing that all necessary type information has been transmitted first.
// Passing a nil pointer to EncodeValue will panic, as they cannot be transmitted by gob.
func (enc *Encoder) EncodeValue(value reflect.Value) error {
if value.Kind() == reflect.Invalid {
return errors.New("gob: cannot encode nil value")
}
if value.Kind() == reflect.Pointer && value.IsNil() {
panic("gob: cannot encode nil pointer of type " + value.Type().String())
}
// Make sure we're single-threaded through here, so multiple
// goroutines can share an encoder.
enc.mutex.Lock()
defer enc.mutex.Unlock()
// Remove any nested writers remaining due to previous errors.
enc.w = enc.w[0:1]
ut, err := validUserType(value.Type())
if err != nil {
return err
}
enc.err = nil
enc.byteBuf.Reset()
enc.byteBuf.Write(spaceForLength)
state := enc.newEncoderState(&enc.byteBuf)
enc.sendTypeDescriptor(enc.writer(), state, ut)
enc.sendTypeId(state, ut)
if enc.err != nil {
return enc.err
}
// Encode the object.
enc.encode(state.b, value, ut)
if enc.err == nil {
enc.writeMessage(enc.writer(), state.b)
}
enc.freeEncoderState(state)
return enc.err
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gob
import "fmt"
// Errors in decoding and encoding are handled using panic and recover.
// Panics caused by user error (that is, everything except run-time panics
// such as "index out of bounds" errors) do not leave the file that caused
// them, but are instead turned into plain error returns. Encoding and
// decoding functions and methods that do not return an error either use
// panic to report an error or are guaranteed error-free.
// A gobError is used to distinguish errors (panics) generated in this package.
type gobError struct {
err error
}
// errorf is like error_ but takes Printf-style arguments to construct an error.
// It always prefixes the message with "gob: ".
func errorf(format string, args ...any) {
error_(fmt.Errorf("gob: "+format, args...))
}
// error_ wraps the argument error and uses it as the argument to panic.
func error_(err error) {
panic(gobError{err})
}
// catchError is meant to be used as a deferred function to turn a panic(gobError) into a
// plain error. It overwrites the error return of the function that deferred its call.
func catchError(err *error) {
if e := recover(); e != nil {
ge, ok := e.(gobError)
if !ok {
panic(e)
}
*err = ge.err
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gob
import (
"encoding"
"errors"
"fmt"
"maps"
"os"
"reflect"
"sync"
"sync/atomic"
"unicode"
"unicode/utf8"
)
// userTypeInfo stores the information associated with a type the user has handed
// to the package. It's computed once and stored in a map keyed by reflection
// type.
type userTypeInfo struct {
user reflect.Type // the type the user handed us
base reflect.Type // the base type after all indirections
indir int // number of indirections to reach the base type
externalEnc int // xGob, xBinary, or xText
externalDec int // xGob, xBinary, or xText
encIndir int8 // number of indirections to reach the receiver type; may be negative
decIndir int8 // number of indirections to reach the receiver type; may be negative
}
// externalEncoding bits
const (
xGob = 1 + iota // GobEncoder or GobDecoder
xBinary // encoding.BinaryMarshaler or encoding.BinaryUnmarshaler
xText // encoding.TextMarshaler or encoding.TextUnmarshaler
)
var userTypeCache sync.Map // map[reflect.Type]*userTypeInfo
// validUserType returns, and saves, the information associated with user-provided type rt.
// If the user type is not valid, err will be non-nil. To be used when the error handler
// is not set up.
func validUserType(rt reflect.Type) (*userTypeInfo, error) {
if ui, ok := userTypeCache.Load(rt); ok {
return ui.(*userTypeInfo), nil
}
// Construct a new userTypeInfo and atomically add it to the userTypeCache.
// If we lose the race, we'll waste a little CPU and create a little garbage
// but return the existing value anyway.
ut := new(userTypeInfo)
ut.base = rt
ut.user = rt
// A type that is just a cycle of pointers (such as type T *T) cannot
// be represented in gobs, which need some concrete data. We use a
// cycle detection algorithm from Knuth, Vol 2, Section 3.1, Ex 6,
// pp 539-540. As we step through indirections, run another type at
// half speed. If they meet up, there's a cycle.
slowpoke := ut.base // walks half as fast as ut.base
for {
pt := ut.base
if pt.Kind() != reflect.Pointer {
break
}
ut.base = pt.Elem()
if ut.base == slowpoke { // ut.base lapped slowpoke
// recursive pointer type.
return nil, errors.New("can't represent recursive pointer type " + ut.base.String())
}
if ut.indir%2 == 0 {
slowpoke = slowpoke.Elem()
}
ut.indir++
}
if ok, indir := implementsInterface(ut.user, gobEncoderInterfaceType); ok {
ut.externalEnc, ut.encIndir = xGob, indir
} else if ok, indir := implementsInterface(ut.user, binaryMarshalerInterfaceType); ok {
ut.externalEnc, ut.encIndir = xBinary, indir
}
// NOTE(rsc): Would like to allow MarshalText here, but results in incompatibility
// with older encodings for net.IP. See golang.org/issue/6760.
// } else if ok, indir := implementsInterface(ut.user, textMarshalerInterfaceType); ok {
// ut.externalEnc, ut.encIndir = xText, indir
// }
if ok, indir := implementsInterface(ut.user, gobDecoderInterfaceType); ok {
ut.externalDec, ut.decIndir = xGob, indir
} else if ok, indir := implementsInterface(ut.user, binaryUnmarshalerInterfaceType); ok {
ut.externalDec, ut.decIndir = xBinary, indir
}
// See note above.
// } else if ok, indir := implementsInterface(ut.user, textUnmarshalerInterfaceType); ok {
// ut.externalDec, ut.decIndir = xText, indir
// }
ui, _ := userTypeCache.LoadOrStore(rt, ut)
return ui.(*userTypeInfo), nil
}
var (
gobEncoderInterfaceType = reflect.TypeFor[GobEncoder]()
gobDecoderInterfaceType = reflect.TypeFor[GobDecoder]()
binaryMarshalerInterfaceType = reflect.TypeFor[encoding.BinaryMarshaler]()
binaryUnmarshalerInterfaceType = reflect.TypeFor[encoding.BinaryUnmarshaler]()
textMarshalerInterfaceType = reflect.TypeFor[encoding.TextMarshaler]()
textUnmarshalerInterfaceType = reflect.TypeFor[encoding.TextUnmarshaler]()
wireTypeType = reflect.TypeFor[wireType]()
)
// implementsInterface reports whether the type implements the
// gobEncoder/gobDecoder interface.
// It also returns the number of indirections required to get to the
// implementation.
func implementsInterface(typ, gobEncDecType reflect.Type) (success bool, indir int8) {
if typ == nil {
return
}
rt := typ
// The type might be a pointer and we need to keep
// dereferencing to the base type until we find an implementation.
for {
if rt.Implements(gobEncDecType) {
return true, indir
}
if p := rt; p.Kind() == reflect.Pointer {
indir++
if indir > 100 { // insane number of indirections
return false, 0
}
rt = p.Elem()
continue
}
break
}
// No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
if typ.Kind() != reflect.Pointer {
// Not a pointer, but does the pointer work?
if reflect.PointerTo(typ).Implements(gobEncDecType) {
return true, -1
}
}
return false, 0
}
// userType returns, and saves, the information associated with user-provided type rt.
// If the user type is not valid, it calls error.
func userType(rt reflect.Type) *userTypeInfo {
ut, err := validUserType(rt)
if err != nil {
error_(err)
}
return ut
}
// A typeId represents a gob Type as an integer that can be passed on the wire.
// Internally, typeIds are used as keys to a map to recover the underlying type info.
type typeId int32
var typeLock sync.Mutex // set while building a type
const firstUserId = 64 // lowest id number granted to user
type gobType interface {
id() typeId
setId(id typeId)
name() string
string() string // not public; only for debugging
safeString(seen map[typeId]bool) string
}
var (
types = make(map[reflect.Type]gobType, 32)
idToTypeSlice = make([]gobType, 1, firstUserId)
builtinIdToTypeSlice [firstUserId]gobType // set in init() after builtins are established
)
func idToType(id typeId) gobType {
if id < 0 || int(id) >= len(idToTypeSlice) {
return nil
}
return idToTypeSlice[id]
}
func builtinIdToType(id typeId) gobType {
if id < 0 || int(id) >= len(builtinIdToTypeSlice) {
return nil
}
return builtinIdToTypeSlice[id]
}
func setTypeId(typ gobType) {
// When building recursive types, someone may get there before us.
if typ.id() != 0 {
return
}
nextId := typeId(len(idToTypeSlice))
typ.setId(nextId)
idToTypeSlice = append(idToTypeSlice, typ)
}
func (t typeId) gobType() gobType {
if t == 0 {
return nil
}
return idToType(t)
}
// string returns the string representation of the type associated with the typeId.
func (t typeId) string() string {
if t.gobType() == nil {
return "<nil>"
}
return t.gobType().string()
}
// Name returns the name of the type associated with the typeId.
func (t typeId) name() string {
if t.gobType() == nil {
return "<nil>"
}
return t.gobType().name()
}
// CommonType holds elements of all types.
// It is a historical artifact, kept for binary compatibility and exported
// only for the benefit of the package's encoding of type descriptors. It is
// not intended for direct use by clients.
type CommonType struct {
Name string
Id typeId
}
func (t *CommonType) id() typeId { return t.Id }
func (t *CommonType) setId(id typeId) { t.Id = id }
func (t *CommonType) string() string { return t.Name }
func (t *CommonType) safeString(seen map[typeId]bool) string {
return t.Name
}
func (t *CommonType) name() string { return t.Name }
// Create and check predefined types
// The string for tBytes is "bytes" not "[]byte" to signify its specialness.
var (
// Primordial types, needed during initialization.
// Always passed as pointers so the interface{} type
// goes through without losing its interfaceness.
tBool = bootstrapType("bool", (*bool)(nil))
tInt = bootstrapType("int", (*int)(nil))
tUint = bootstrapType("uint", (*uint)(nil))
tFloat = bootstrapType("float", (*float64)(nil))
tBytes = bootstrapType("bytes", (*[]byte)(nil))
tString = bootstrapType("string", (*string)(nil))
tComplex = bootstrapType("complex", (*complex128)(nil))
tInterface = bootstrapType("interface", (*any)(nil))
// Reserve some Ids for compatible expansion
tReserved7 = bootstrapType("_reserved1", (*struct{ r7 int })(nil))
tReserved6 = bootstrapType("_reserved1", (*struct{ r6 int })(nil))
tReserved5 = bootstrapType("_reserved1", (*struct{ r5 int })(nil))
tReserved4 = bootstrapType("_reserved1", (*struct{ r4 int })(nil))
tReserved3 = bootstrapType("_reserved1", (*struct{ r3 int })(nil))
tReserved2 = bootstrapType("_reserved1", (*struct{ r2 int })(nil))
tReserved1 = bootstrapType("_reserved1", (*struct{ r1 int })(nil))
)
// Predefined because it's needed by the Decoder
var tWireType = mustGetTypeInfo(wireTypeType).id
var wireTypeUserInfo *userTypeInfo // userTypeInfo of wireType
func init() {
// Some magic numbers to make sure there are no surprises.
checkId(16, tWireType)
checkId(17, mustGetTypeInfo(reflect.TypeFor[arrayType]()).id)
checkId(18, mustGetTypeInfo(reflect.TypeFor[CommonType]()).id)
checkId(19, mustGetTypeInfo(reflect.TypeFor[sliceType]()).id)
checkId(20, mustGetTypeInfo(reflect.TypeFor[structType]()).id)
checkId(21, mustGetTypeInfo(reflect.TypeFor[fieldType]()).id)
checkId(23, mustGetTypeInfo(reflect.TypeFor[mapType]()).id)
copy(builtinIdToTypeSlice[:], idToTypeSlice)
// Move the id space upwards to allow for growth in the predefined world
// without breaking existing files.
if nextId := len(idToTypeSlice); nextId > firstUserId {
panic(fmt.Sprintln("nextId too large:", nextId))
}
idToTypeSlice = idToTypeSlice[:firstUserId]
registerBasics()
wireTypeUserInfo = userType(wireTypeType)
}
// Array type
type arrayType struct {
CommonType
Elem typeId
Len int
}
func newArrayType(name string) *arrayType {
a := &arrayType{CommonType{Name: name}, 0, 0}
return a
}
func (a *arrayType) init(elem gobType, len int) {
// Set our type id before evaluating the element's, in case it's our own.
setTypeId(a)
a.Elem = elem.id()
a.Len = len
}
func (a *arrayType) safeString(seen map[typeId]bool) string {
if seen[a.Id] {
return a.Name
}
seen[a.Id] = true
return fmt.Sprintf("[%d]%s", a.Len, a.Elem.gobType().safeString(seen))
}
func (a *arrayType) string() string { return a.safeString(make(map[typeId]bool)) }
// GobEncoder type (something that implements the GobEncoder interface)
type gobEncoderType struct {
CommonType
}
func newGobEncoderType(name string) *gobEncoderType {
g := &gobEncoderType{CommonType{Name: name}}
setTypeId(g)
return g
}
func (g *gobEncoderType) safeString(seen map[typeId]bool) string {
return g.Name
}
func (g *gobEncoderType) string() string { return g.Name }
// Map type
type mapType struct {
CommonType
Key typeId
Elem typeId
}
func newMapType(name string) *mapType {
m := &mapType{CommonType{Name: name}, 0, 0}
return m
}
func (m *mapType) init(key, elem gobType) {
// Set our type id before evaluating the element's, in case it's our own.
setTypeId(m)
m.Key = key.id()
m.Elem = elem.id()
}
func (m *mapType) safeString(seen map[typeId]bool) string {
if seen[m.Id] {
return m.Name
}
seen[m.Id] = true
key := m.Key.gobType().safeString(seen)
elem := m.Elem.gobType().safeString(seen)
return fmt.Sprintf("map[%s]%s", key, elem)
}
func (m *mapType) string() string { return m.safeString(make(map[typeId]bool)) }
// Slice type
type sliceType struct {
CommonType
Elem typeId
}
func newSliceType(name string) *sliceType {
s := &sliceType{CommonType{Name: name}, 0}
return s
}
func (s *sliceType) init(elem gobType) {
// Set our type id before evaluating the element's, in case it's our own.
setTypeId(s)
// See the comments about ids in newTypeObject. Only slices and
// structs have mutual recursion.
if elem.id() == 0 {
setTypeId(elem)
}
s.Elem = elem.id()
}
func (s *sliceType) safeString(seen map[typeId]bool) string {
if seen[s.Id] {
return s.Name
}
seen[s.Id] = true
return fmt.Sprintf("[]%s", s.Elem.gobType().safeString(seen))
}
func (s *sliceType) string() string { return s.safeString(make(map[typeId]bool)) }
// Struct type
type fieldType struct {
Name string
Id typeId
}
type structType struct {
CommonType
Field []fieldType
}
func (s *structType) safeString(seen map[typeId]bool) string {
if s == nil {
return "<nil>"
}
if _, ok := seen[s.Id]; ok {
return s.Name
}
seen[s.Id] = true
str := s.Name + " = struct { "
for _, f := range s.Field {
str += fmt.Sprintf("%s %s; ", f.Name, f.Id.gobType().safeString(seen))
}
str += "}"
return str
}
func (s *structType) string() string { return s.safeString(make(map[typeId]bool)) }
func newStructType(name string) *structType {
s := &structType{CommonType{Name: name}, nil}
// For historical reasons we set the id here rather than init.
// See the comment in newTypeObject for details.
setTypeId(s)
return s
}
// newTypeObject allocates a gobType for the reflection type rt.
// Unless ut represents a GobEncoder, rt should be the base type
// of ut.
// This is only called from the encoding side. The decoding side
// works through typeIds and userTypeInfos alone.
func newTypeObject(name string, ut *userTypeInfo, rt reflect.Type) (gobType, error) {
// Does this type implement GobEncoder?
if ut.externalEnc != 0 {
return newGobEncoderType(name), nil
}
var err error
var type0, type1 gobType
defer func() {
if err != nil {
delete(types, rt)
}
}()
// Install the top-level type before the subtypes (e.g. struct before
// fields) so recursive types can be constructed safely.
switch t := rt; t.Kind() {
// All basic types are easy: they are predefined.
case reflect.Bool:
return tBool.gobType(), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return tInt.gobType(), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return tUint.gobType(), nil
case reflect.Float32, reflect.Float64:
return tFloat.gobType(), nil
case reflect.Complex64, reflect.Complex128:
return tComplex.gobType(), nil
case reflect.String:
return tString.gobType(), nil
case reflect.Interface:
return tInterface.gobType(), nil
case reflect.Array:
at := newArrayType(name)
types[rt] = at
type0, err = getBaseType("", t.Elem())
if err != nil {
return nil, err
}
// Historical aside:
// For arrays, maps, and slices, we set the type id after the elements
// are constructed. This is to retain the order of type id allocation after
// a fix made to handle recursive types, which changed the order in
// which types are built. Delaying the setting in this way preserves
// type ids while allowing recursive types to be described. Structs,
// done below, were already handling recursion correctly so they
// assign the top-level id before those of the field.
at.init(type0, t.Len())
return at, nil
case reflect.Map:
mt := newMapType(name)
types[rt] = mt
type0, err = getBaseType("", t.Key())
if err != nil {
return nil, err
}
type1, err = getBaseType("", t.Elem())
if err != nil {
return nil, err
}
mt.init(type0, type1)
return mt, nil
case reflect.Slice:
// []byte == []uint8 is a special case
if t.Elem().Kind() == reflect.Uint8 {
return tBytes.gobType(), nil
}
st := newSliceType(name)
types[rt] = st
type0, err = getBaseType(t.Elem().Name(), t.Elem())
if err != nil {
return nil, err
}
st.init(type0)
return st, nil
case reflect.Struct:
st := newStructType(name)
types[rt] = st
idToTypeSlice[st.id()] = st
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if !isSent(&f) {
continue
}
typ := userType(f.Type).base
tname := typ.Name()
if tname == "" {
t := userType(f.Type).base
tname = t.String()
}
gt, err := getBaseType(tname, f.Type)
if err != nil {
return nil, err
}
// Some mutually recursive types can cause us to be here while
// still defining the element. Fix the element type id here.
// We could do this more neatly by setting the id at the start of
// building every type, but that would break binary compatibility.
if gt.id() == 0 {
setTypeId(gt)
}
st.Field = append(st.Field, fieldType{f.Name, gt.id()})
}
return st, nil
default:
return nil, errors.New("gob NewTypeObject can't handle type: " + rt.String())
}
}
// isExported reports whether this is an exported - upper case - name.
func isExported(name string) bool {
rune, _ := utf8.DecodeRuneInString(name)
return unicode.IsUpper(rune)
}
// isSent reports whether this struct field is to be transmitted.
// It will be transmitted only if it is exported and not a chan or func field
// or pointer to chan or func.
func isSent(field *reflect.StructField) bool {
if !isExported(field.Name) {
return false
}
// If the field is a chan or func or pointer thereto, don't send it.
// That is, treat it like an unexported field.
typ := field.Type
for typ.Kind() == reflect.Pointer {
typ = typ.Elem()
}
if typ.Kind() == reflect.Chan || typ.Kind() == reflect.Func {
return false
}
return true
}
// getBaseType returns the Gob type describing the given reflect.Type's base type.
// typeLock must be held.
func getBaseType(name string, rt reflect.Type) (gobType, error) {
ut := userType(rt)
return getType(name, ut, ut.base)
}
// getType returns the Gob type describing the given reflect.Type.
// Should be called only when handling GobEncoders/Decoders,
// which may be pointers. All other types are handled through the
// base type, never a pointer.
// typeLock must be held.
func getType(name string, ut *userTypeInfo, rt reflect.Type) (gobType, error) {
typ, present := types[rt]
if present {
return typ, nil
}
typ, err := newTypeObject(name, ut, rt)
if err == nil {
types[rt] = typ
}
return typ, err
}
func checkId(want, got typeId) {
if want != got {
fmt.Fprintf(os.Stderr, "checkId: %d should be %d\n", int(got), int(want))
panic("bootstrap type wrong id: " + got.name() + " " + got.string() + " not " + want.string())
}
}
// used for building the basic types; called only from init(). the incoming
// interface always refers to a pointer.
func bootstrapType(name string, e any) typeId {
rt := reflect.TypeOf(e).Elem()
_, present := types[rt]
if present {
panic("bootstrap type already present: " + name + ", " + rt.String())
}
typ := &CommonType{Name: name}
types[rt] = typ
setTypeId(typ)
return typ.id()
}
// Representation of the information we send and receive about this type.
// Each value we send is preceded by its type definition: an encoded int.
// However, the very first time we send the value, we first send the pair
// (-id, wireType).
// For bootstrapping purposes, we assume that the recipient knows how
// to decode a wireType; it is exactly the wireType struct here, interpreted
// using the gob rules for sending a structure, except that we assume the
// ids for wireType and structType etc. are known. The relevant pieces
// are built in encode.go's init() function.
// To maintain binary compatibility, if you extend this type, always put
// the new fields last.
type wireType struct {
ArrayT *arrayType
SliceT *sliceType
StructT *structType
MapT *mapType
GobEncoderT *gobEncoderType
BinaryMarshalerT *gobEncoderType
TextMarshalerT *gobEncoderType
}
func (w *wireType) string() string {
const unknown = "unknown type"
if w == nil {
return unknown
}
switch {
case w.ArrayT != nil:
return w.ArrayT.Name
case w.SliceT != nil:
return w.SliceT.Name
case w.StructT != nil:
return w.StructT.Name
case w.MapT != nil:
return w.MapT.Name
case w.GobEncoderT != nil:
return w.GobEncoderT.Name
case w.BinaryMarshalerT != nil:
return w.BinaryMarshalerT.Name
case w.TextMarshalerT != nil:
return w.TextMarshalerT.Name
}
return unknown
}
type typeInfo struct {
id typeId
encInit sync.Mutex // protects creation of encoder
encoder atomic.Pointer[encEngine]
wire wireType
}
// typeInfoMap is an atomic pointer to map[reflect.Type]*typeInfo.
// It's updated copy-on-write. Readers just do an atomic load
// to get the current version of the map. Writers make a full copy of
// the map and atomically update the pointer to point to the new map.
// Under heavy read contention, this is significantly faster than a map
// protected by a mutex.
var typeInfoMap atomic.Value
// typeInfoMapInit is used instead of typeInfoMap during init time,
// as types are registered sequentially during init and we can save
// the overhead of making map copies.
// It is saved to typeInfoMap and set to nil before init finishes.
var typeInfoMapInit = make(map[reflect.Type]*typeInfo, 16)
func lookupTypeInfo(rt reflect.Type) *typeInfo {
if m := typeInfoMapInit; m != nil {
return m[rt]
}
m, _ := typeInfoMap.Load().(map[reflect.Type]*typeInfo)
return m[rt]
}
func getTypeInfo(ut *userTypeInfo) (*typeInfo, error) {
rt := ut.base
if ut.externalEnc != 0 {
// We want the user type, not the base type.
rt = ut.user
}
if info := lookupTypeInfo(rt); info != nil {
return info, nil
}
return buildTypeInfo(ut, rt)
}
// buildTypeInfo constructs the type information for the type
// and stores it in the type info map.
func buildTypeInfo(ut *userTypeInfo, rt reflect.Type) (*typeInfo, error) {
typeLock.Lock()
defer typeLock.Unlock()
if info := lookupTypeInfo(rt); info != nil {
return info, nil
}
gt, err := getBaseType(rt.Name(), rt)
if err != nil {
return nil, err
}
info := &typeInfo{id: gt.id()}
if ut.externalEnc != 0 {
userType, err := getType(rt.Name(), ut, rt)
if err != nil {
return nil, err
}
gt := userType.id().gobType().(*gobEncoderType)
switch ut.externalEnc {
case xGob:
info.wire.GobEncoderT = gt
case xBinary:
info.wire.BinaryMarshalerT = gt
case xText:
info.wire.TextMarshalerT = gt
}
rt = ut.user
} else {
t := info.id.gobType()
switch typ := rt; typ.Kind() {
case reflect.Array:
info.wire.ArrayT = t.(*arrayType)
case reflect.Map:
info.wire.MapT = t.(*mapType)
case reflect.Slice:
// []byte == []uint8 is a special case handled separately
if typ.Elem().Kind() != reflect.Uint8 {
info.wire.SliceT = t.(*sliceType)
}
case reflect.Struct:
info.wire.StructT = t.(*structType)
}
}
if m := typeInfoMapInit; m != nil {
m[rt] = info
return info, nil
}
// Create new map with old contents plus new entry.
m, _ := typeInfoMap.Load().(map[reflect.Type]*typeInfo)
newm := maps.Clone(m)
newm[rt] = info
typeInfoMap.Store(newm)
return info, nil
}
// Called only when a panic is acceptable and unexpected.
func mustGetTypeInfo(rt reflect.Type) *typeInfo {
t, err := getTypeInfo(userType(rt))
if err != nil {
panic("getTypeInfo: " + err.Error())
}
return t
}
// GobEncoder is the interface describing data that provides its own
// representation for encoding values for transmission to a GobDecoder.
// A type that implements GobEncoder and GobDecoder has complete
// control over the representation of its data and may therefore
// contain things such as private fields, channels, and functions,
// which are not usually transmissible in gob streams.
//
// Note: Since gobs can be stored permanently, it is good design
// to guarantee the encoding used by a GobEncoder is stable as the
// software evolves. For instance, it might make sense for GobEncode
// to include a version number in the encoding.
type GobEncoder interface {
// GobEncode returns a byte slice representing the encoding of the
// receiver for transmission to a GobDecoder, usually of the same
// concrete type.
GobEncode() ([]byte, error)
}
// GobDecoder is the interface describing data that provides its own
// routine for decoding transmitted values sent by a GobEncoder.
type GobDecoder interface {
// GobDecode overwrites the receiver, which must be a pointer,
// with the value represented by the byte slice, which was written
// by GobEncode, usually for the same concrete type.
GobDecode([]byte) error
}
var (
nameToConcreteType sync.Map // map[string]reflect.Type
concreteTypeToName sync.Map // map[reflect.Type]string
)
// RegisterName is like [Register] but uses the provided name rather than the
// type's default.
func RegisterName(name string, value any) {
if name == "" {
// reserved for nil
panic("attempt to register empty name")
}
ut := userType(reflect.TypeOf(value))
// Check for incompatible duplicates. The name must refer to the
// same user type, and vice versa.
// Store the name and type provided by the user....
if t, dup := nameToConcreteType.LoadOrStore(name, reflect.TypeOf(value)); dup && t != ut.user {
panic(fmt.Sprintf("gob: registering duplicate types for %q: %s != %s", name, t, ut.user))
}
// but the flattened type in the type table, since that's what decode needs.
if n, dup := concreteTypeToName.LoadOrStore(ut.base, name); dup && n != name {
nameToConcreteType.Delete(name)
panic(fmt.Sprintf("gob: registering duplicate names for %s: %q != %q", ut.user, n, name))
}
}
// Register records a type, identified by a value for that type, under its
// internal type name. That name will identify the concrete type of a value
// sent or received as an interface variable. Only types that will be
// transferred as implementations of interface values need to be registered.
// Expecting to be used only during initialization, it panics if the mapping
// between types and names is not a bijection.
func Register(value any) {
// Default to printed representation for unnamed types
rt := reflect.TypeOf(value)
name := rt.String()
// But for named types (or pointers to them), qualify with import path (but see inner comment).
// Dereference one pointer looking for a named type.
star := ""
if rt.Name() == "" {
if pt := rt; pt.Kind() == reflect.Pointer {
star = "*"
// NOTE: The following line should be rt = pt.Elem() to implement
// what the comment above claims, but fixing it would break compatibility
// with existing gobs.
//
// Given package p imported as "full/p" with these definitions:
// package p
// type T1 struct { ... }
// this table shows the intended and actual strings used by gob to
// name the types:
//
// Type Correct string Actual string
//
// T1 full/p.T1 full/p.T1
// *T1 *full/p.T1 *p.T1
//
// The missing full path cannot be fixed without breaking existing gob decoders.
rt = pt
}
}
if rt.Name() != "" {
if rt.PkgPath() == "" {
name = star + rt.Name()
} else {
name = star + rt.PkgPath() + "." + rt.Name()
}
}
RegisterName(name, value)
}
func registerBasics() {
Register(int(0))
Register(int8(0))
Register(int16(0))
Register(int32(0))
Register(int64(0))
Register(uint(0))
Register(uint8(0))
Register(uint16(0))
Register(uint32(0))
Register(uint64(0))
Register(float32(0))
Register(float64(0))
Register(complex64(0i))
Register(complex128(0i))
Register(uintptr(0))
Register(false)
Register("")
Register([]byte(nil))
Register([]int(nil))
Register([]int8(nil))
Register([]int16(nil))
Register([]int32(nil))
Register([]int64(nil))
Register([]uint(nil))
Register([]uint8(nil))
Register([]uint16(nil))
Register([]uint32(nil))
Register([]uint64(nil))
Register([]float32(nil))
Register([]float64(nil))
Register([]complex64(nil))
Register([]complex128(nil))
Register([]uintptr(nil))
Register([]bool(nil))
Register([]string(nil))
}
func init() {
typeInfoMap.Store(typeInfoMapInit)
typeInfoMapInit = nil
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package hex implements hexadecimal encoding and decoding.
package hex
import (
"errors"
"fmt"
"io"
"slices"
"strings"
)
const (
hextable = "0123456789abcdef"
reverseHexTable = "" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" +
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
)
// EncodedLen returns the length of an encoding of n source bytes.
// Specifically, it returns n * 2.
func EncodedLen(n int) int { return n * 2 }
// Encode encodes src into [EncodedLen](len(src))
// bytes of dst. As a convenience, it returns the number
// of bytes written to dst, but this value is always [EncodedLen](len(src)).
// Encode implements hexadecimal encoding.
func Encode(dst, src []byte) int {
j := 0
for _, v := range src {
dst[j] = hextable[v>>4]
dst[j+1] = hextable[v&0x0f]
j += 2
}
return len(src) * 2
}
// AppendEncode appends the hexadecimally encoded src to dst
// and returns the extended buffer.
func AppendEncode(dst, src []byte) []byte {
n := EncodedLen(len(src))
dst = slices.Grow(dst, n)
Encode(dst[len(dst):][:n], src)
return dst[:len(dst)+n]
}
// ErrLength reports an attempt to decode an odd-length input
// using [Decode] or [DecodeString].
// The stream-based Decoder returns [io.ErrUnexpectedEOF] instead of ErrLength.
var ErrLength = errors.New("encoding/hex: odd length hex string")
// InvalidByteError values describe errors resulting from an invalid byte in a hex string.
type InvalidByteError byte
func (e InvalidByteError) Error() string {
return fmt.Sprintf("encoding/hex: invalid byte: %#U", rune(e))
}
// DecodedLen returns the length of a decoding of x source bytes.
// Specifically, it returns x / 2.
func DecodedLen(x int) int { return x / 2 }
// Decode decodes src into [DecodedLen](len(src)) bytes,
// returning the actual number of bytes written to dst.
//
// Decode expects that src contains only hexadecimal
// characters and that src has even length.
// If the input is malformed, Decode returns the number
// of bytes decoded before the error.
func Decode(dst, src []byte) (int, error) {
i, j := 0, 0
for ; j < len(src)-1; j += 2 {
p := src[j]
q := src[j+1]
a := reverseHexTable[p]
b := reverseHexTable[q]
if a > 0x0f {
return i, InvalidByteError(p)
}
if b > 0x0f {
return i, InvalidByteError(q)
}
dst[i] = (a << 4) | b
i++
}
if len(src)%2 == 1 {
// Check for invalid char before reporting bad length,
// since the invalid char (if present) is an earlier problem.
if reverseHexTable[src[j]] > 0x0f {
return i, InvalidByteError(src[j])
}
return i, ErrLength
}
return i, nil
}
// AppendDecode appends the hexadecimally decoded src to dst
// and returns the extended buffer.
// If the input is malformed, it returns the partially decoded src and an error.
func AppendDecode(dst, src []byte) ([]byte, error) {
n := DecodedLen(len(src))
dst = slices.Grow(dst, n)
n, err := Decode(dst[len(dst):][:n], src)
return dst[:len(dst)+n], err
}
// EncodeToString returns the hexadecimal encoding of src.
func EncodeToString(src []byte) string {
dst := make([]byte, EncodedLen(len(src)))
Encode(dst, src)
return string(dst)
}
// DecodeString returns the bytes represented by the hexadecimal string s.
//
// DecodeString expects that src contains only hexadecimal
// characters and that src has even length.
// If the input is malformed, DecodeString returns
// the bytes decoded before the error.
func DecodeString(s string) ([]byte, error) {
dst := make([]byte, DecodedLen(len(s)))
n, err := Decode(dst, []byte(s))
return dst[:n], err
}
// Dump returns a string that contains a hex dump of the given data. The format
// of the hex dump matches the output of `hexdump -C` on the command line.
func Dump(data []byte) string {
if len(data) == 0 {
return ""
}
var buf strings.Builder
// Dumper will write 79 bytes per complete 16 byte chunk, and at least
// 64 bytes for whatever remains. Round the allocation up, since only a
// maximum of 15 bytes will be wasted.
buf.Grow((1 + ((len(data) - 1) / 16)) * 79)
dumper := Dumper(&buf)
dumper.Write(data)
dumper.Close()
return buf.String()
}
// bufferSize is the number of hexadecimal characters to buffer in encoder and decoder.
const bufferSize = 1024
type encoder struct {
w io.Writer
err error
out [bufferSize]byte // output buffer
}
// NewEncoder returns an [io.Writer] that writes lowercase hexadecimal characters to w.
func NewEncoder(w io.Writer) io.Writer {
return &encoder{w: w}
}
func (e *encoder) Write(p []byte) (n int, err error) {
for len(p) > 0 && e.err == nil {
chunkSize := bufferSize / 2
if len(p) < chunkSize {
chunkSize = len(p)
}
var written int
encoded := Encode(e.out[:], p[:chunkSize])
written, e.err = e.w.Write(e.out[:encoded])
n += written / 2
p = p[chunkSize:]
}
return n, e.err
}
type decoder struct {
r io.Reader
err error
in []byte // input buffer (encoded form)
arr [bufferSize]byte // backing array for in
}
// NewDecoder returns an [io.Reader] that decodes hexadecimal characters from r.
// NewDecoder expects that r contain only an even number of hexadecimal characters.
func NewDecoder(r io.Reader) io.Reader {
return &decoder{r: r}
}
func (d *decoder) Read(p []byte) (n int, err error) {
// Fill internal buffer with sufficient bytes to decode
if len(d.in) < 2 && d.err == nil {
var numCopy, numRead int
numCopy = copy(d.arr[:], d.in) // Copies either 0 or 1 bytes
numRead, d.err = d.r.Read(d.arr[numCopy:])
d.in = d.arr[:numCopy+numRead]
if d.err == io.EOF && len(d.in)%2 != 0 {
if a := reverseHexTable[d.in[len(d.in)-1]]; a > 0x0f {
d.err = InvalidByteError(d.in[len(d.in)-1])
} else {
d.err = io.ErrUnexpectedEOF
}
}
}
// Decode internal buffer into output buffer
if numAvail := len(d.in) / 2; len(p) > numAvail {
p = p[:numAvail]
}
numDec, err := Decode(p, d.in[:len(p)*2])
d.in = d.in[2*numDec:]
if err != nil {
d.in, d.err = nil, err // Decode error; discard input remainder
}
if len(d.in) < 2 {
return numDec, d.err // Only expose errors when buffer fully consumed
}
return numDec, nil
}
// Dumper returns a [io.WriteCloser] that writes a hex dump of all written data to
// w. The format of the dump matches the output of `hexdump -C` on the command
// line.
func Dumper(w io.Writer) io.WriteCloser {
return &dumper{w: w}
}
type dumper struct {
w io.Writer
rightChars [18]byte
buf [14]byte
used int // number of bytes in the current line
n uint // number of bytes, total
closed bool
}
func toChar(b byte) byte {
if b < 32 || b > 126 {
return '.'
}
return b
}
func (h *dumper) Write(data []byte) (n int, err error) {
if h.closed {
return 0, errors.New("encoding/hex: dumper closed")
}
// Output lines look like:
// 00000010 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d |./0123456789:;<=|
// ^ offset ^ extra space ^ ASCII of line.
for i := range data {
if h.used == 0 {
// At the beginning of a line we print the current
// offset in hex.
h.buf[0] = byte(h.n >> 24)
h.buf[1] = byte(h.n >> 16)
h.buf[2] = byte(h.n >> 8)
h.buf[3] = byte(h.n)
Encode(h.buf[4:], h.buf[:4])
h.buf[12] = ' '
h.buf[13] = ' '
_, err = h.w.Write(h.buf[4:])
if err != nil {
return
}
}
Encode(h.buf[:], data[i:i+1])
h.buf[2] = ' '
l := 3
if h.used == 7 {
// There's an additional space after the 8th byte.
h.buf[3] = ' '
l = 4
} else if h.used == 15 {
// At the end of the line there's an extra space and
// the bar for the right column.
h.buf[3] = ' '
h.buf[4] = '|'
l = 5
}
_, err = h.w.Write(h.buf[:l])
if err != nil {
return
}
n++
h.rightChars[h.used] = toChar(data[i])
h.used++
h.n++
if h.used == 16 {
h.rightChars[16] = '|'
h.rightChars[17] = '\n'
_, err = h.w.Write(h.rightChars[:])
if err != nil {
return
}
h.used = 0
}
}
return
}
func (h *dumper) Close() (err error) {
// See the comments in Write() for the details of this format.
if h.closed {
return
}
h.closed = true
if h.used == 0 {
return
}
h.buf[0] = ' '
h.buf[1] = ' '
h.buf[2] = ' '
h.buf[3] = ' '
h.buf[4] = '|'
nBytes := h.used
for h.used < 16 {
l := 3
if h.used == 7 {
l = 4
} else if h.used == 15 {
l = 5
}
_, err = h.w.Write(h.buf[:l])
if err != nil {
return
}
h.used++
}
h.rightChars[nBytes] = '|'
h.rightChars[nBytes+1] = '\n'
_, err = h.w.Write(h.rightChars[:nBytes+2])
return
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Represents JSON data structure using native Go types: booleans, floats,
// strings, arrays, and maps.
//go:build !goexperiment.jsonv2
package json
import (
"encoding"
"encoding/base64"
"fmt"
"reflect"
"strconv"
"strings"
"unicode"
"unicode/utf16"
"unicode/utf8"
)
// Unmarshal parses the JSON-encoded data and stores the result
// in the value pointed to by v. If v is nil or not a pointer,
// Unmarshal returns an [InvalidUnmarshalError].
//
// Unmarshal uses the inverse of the encodings that
// [Marshal] uses, allocating maps, slices, and pointers as necessary,
// with the following additional rules:
//
// To unmarshal JSON into a pointer, Unmarshal first handles the case of
// the JSON being the JSON literal null. In that case, Unmarshal sets
// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
// the value pointed at by the pointer. If the pointer is nil, Unmarshal
// allocates a new value for it to point to.
//
// To unmarshal JSON into a value implementing [Unmarshaler],
// Unmarshal calls that value's [Unmarshaler.UnmarshalJSON] method, including
// when the input is a JSON null.
// Otherwise, if the value implements [encoding.TextUnmarshaler]
// and the input is a JSON quoted string, Unmarshal calls
// [encoding.TextUnmarshaler.UnmarshalText] with the unquoted form of the string.
//
// To unmarshal JSON into a struct, Unmarshal matches incoming object keys to
// the keys used by [Marshal] (either the struct field name or its tag),
// ignoring case. If multiple struct fields match an object key, an exact case
// match is preferred over a case-insensitive one.
//
// Incoming object members are processed in the order observed. If an object
// includes duplicate keys, later duplicates will replace or be merged into
// prior values.
//
// To unmarshal JSON into an interface value,
// Unmarshal stores one of these in the interface value:
//
// - bool, for JSON booleans
// - float64, for JSON numbers
// - string, for JSON strings
// - []any, for JSON arrays
// - map[string]any, for JSON objects
// - nil for JSON null
//
// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
// to zero and then appends each element to the slice.
// As a special case, to unmarshal an empty JSON array into a slice,
// Unmarshal replaces the slice with a new empty slice.
//
// To unmarshal a JSON array into a Go array, Unmarshal decodes
// JSON array elements into corresponding Go array elements.
// If the Go array is smaller than the JSON array,
// the additional JSON array elements are discarded.
// If the JSON array is smaller than the Go array,
// the additional Go array elements are set to zero values.
//
// To unmarshal a JSON object into a map, Unmarshal first establishes a map to
// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
// reuses the existing map, keeping existing entries. Unmarshal then stores
// key-value pairs from the JSON object into the map. The map's key type must
// either be any string type, an integer, or implement [encoding.TextUnmarshaler].
//
// If the JSON-encoded data contain a syntax error, Unmarshal returns a [SyntaxError].
//
// If a JSON value is not appropriate for a given target type,
// or if a JSON number overflows the target type, Unmarshal
// skips that field and completes the unmarshaling as best it can.
// If no more serious errors are encountered, Unmarshal returns
// an [UnmarshalTypeError] describing the earliest such error. In any
// case, it's not guaranteed that all the remaining fields following
// the problematic one will be unmarshaled into the target object.
//
// The JSON null value unmarshals into an interface, map, pointer, or slice
// by setting that Go value to nil. Because null is often used in JSON to mean
// “not present,” unmarshaling a JSON null into any other Go type has no effect
// on the value and produces no error.
//
// When unmarshaling quoted strings, invalid UTF-8 or
// invalid UTF-16 surrogate pairs are not treated as an error.
// Instead, they are replaced by the Unicode replacement
// character U+FFFD.
func Unmarshal(data []byte, v any) error {
// Check for well-formedness.
// Avoids filling out half a data structure
// before discovering a JSON syntax error.
var d decodeState
err := checkValid(data, &d.scan)
if err != nil {
return err
}
d.init(data)
return d.unmarshal(v)
}
// Unmarshaler is the interface implemented by types
// that can unmarshal a JSON description of themselves.
// The input can be assumed to be a valid encoding of
// a JSON value. UnmarshalJSON must copy the JSON data
// if it wishes to retain the data after returning.
type Unmarshaler interface {
UnmarshalJSON([]byte) error
}
// An UnmarshalTypeError describes a JSON value that was
// not appropriate for a value of a specific Go type.
type UnmarshalTypeError struct {
Value string // description of JSON value - "bool", "array", "number -5"
Type reflect.Type // type of Go value it could not be assigned to
Offset int64 // error occurred after reading Offset bytes
Struct string // name of the struct type containing the field
Field string // the full path from root node to the field, include embedded struct
}
func (e *UnmarshalTypeError) Error() string {
if e.Struct != "" || e.Field != "" {
return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String()
}
return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
}
// An UnmarshalFieldError describes a JSON object key that
// led to an unexported (and therefore unwritable) struct field.
//
// Deprecated: No longer used; kept for compatibility.
type UnmarshalFieldError struct {
Key string
Type reflect.Type
Field reflect.StructField
}
func (e *UnmarshalFieldError) Error() string {
return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
}
// An InvalidUnmarshalError describes an invalid argument passed to [Unmarshal].
// (The argument to [Unmarshal] must be a non-nil pointer.)
type InvalidUnmarshalError struct {
Type reflect.Type
}
func (e *InvalidUnmarshalError) Error() string {
if e.Type == nil {
return "json: Unmarshal(nil)"
}
if e.Type.Kind() != reflect.Pointer {
return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
}
return "json: Unmarshal(nil " + e.Type.String() + ")"
}
func (d *decodeState) unmarshal(v any) error {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Pointer || rv.IsNil() {
return &InvalidUnmarshalError{reflect.TypeOf(v)}
}
d.scan.reset()
d.scanWhile(scanSkipSpace)
// We decode rv not rv.Elem because the Unmarshaler interface
// test must be applied at the top level of the value.
err := d.value(rv)
if err != nil {
return d.addErrorContext(err)
}
return d.savedError
}
// A Number represents a JSON number literal.
type Number string
// String returns the literal text of the number.
func (n Number) String() string { return string(n) }
// Float64 returns the number as a float64.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 returns the number as an int64.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
// An errorContext provides context for type errors during decoding.
type errorContext struct {
Struct reflect.Type
FieldStack []string
}
// decodeState represents the state while decoding a JSON value.
type decodeState struct {
data []byte
off int // next read offset in data
opcode int // last read result
scan scanner
errorContext *errorContext
savedError error
useNumber bool
disallowUnknownFields bool
}
// readIndex returns the position of the last byte read.
func (d *decodeState) readIndex() int {
return d.off - 1
}
// phasePanicMsg is used as a panic message when we end up with something that
// shouldn't happen. It can indicate a bug in the JSON decoder, or that
// something is editing the data slice while the decoder executes.
const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?"
func (d *decodeState) init(data []byte) *decodeState {
d.data = data
d.off = 0
d.savedError = nil
if d.errorContext != nil {
d.errorContext.Struct = nil
// Reuse the allocated space for the FieldStack slice.
d.errorContext.FieldStack = d.errorContext.FieldStack[:0]
}
return d
}
// saveError saves the first err it is called with,
// for reporting at the end of the unmarshal.
func (d *decodeState) saveError(err error) {
if d.savedError == nil {
d.savedError = d.addErrorContext(err)
}
}
// addErrorContext returns a new error enhanced with information from d.errorContext
func (d *decodeState) addErrorContext(err error) error {
if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) {
switch err := err.(type) {
case *UnmarshalTypeError:
err.Struct = d.errorContext.Struct.Name()
fieldStack := d.errorContext.FieldStack
if err.Field != "" {
fieldStack = append(fieldStack, err.Field)
}
err.Field = strings.Join(fieldStack, ".")
}
}
return err
}
// skip scans to the end of what was started.
func (d *decodeState) skip() {
s, data, i := &d.scan, d.data, d.off
depth := len(s.parseState)
for {
op := s.step(s, data[i])
i++
if len(s.parseState) < depth {
d.off = i
d.opcode = op
return
}
}
}
// scanNext processes the byte at d.data[d.off].
func (d *decodeState) scanNext() {
if d.off < len(d.data) {
d.opcode = d.scan.step(&d.scan, d.data[d.off])
d.off++
} else {
d.opcode = d.scan.eof()
d.off = len(d.data) + 1 // mark processed EOF with len+1
}
}
// scanWhile processes bytes in d.data[d.off:] until it
// receives a scan code not equal to op.
func (d *decodeState) scanWhile(op int) {
s, data, i := &d.scan, d.data, d.off
for i < len(data) {
newOp := s.step(s, data[i])
i++
if newOp != op {
d.opcode = newOp
d.off = i
return
}
}
d.off = len(data) + 1 // mark processed EOF with len+1
d.opcode = d.scan.eof()
}
// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the
// common case where we're decoding a literal. The decoder scans the input
// twice, once for syntax errors and to check the length of the value, and the
// second to perform the decoding.
//
// Only in the second step do we use decodeState to tokenize literals, so we
// know there aren't any syntax errors. We can take advantage of that knowledge,
// and scan a literal's bytes much more quickly.
func (d *decodeState) rescanLiteral() {
data, i := d.data, d.off
Switch:
switch data[i-1] {
case '"': // string
for ; i < len(data); i++ {
switch data[i] {
case '\\':
i++ // escaped char
case '"':
i++ // tokenize the closing quote too
break Switch
}
}
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number
for ; i < len(data); i++ {
switch data[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'.', 'e', 'E', '+', '-':
default:
break Switch
}
}
case 't': // true
i += len("rue")
case 'f': // false
i += len("alse")
case 'n': // null
i += len("ull")
}
if i < len(data) {
d.opcode = stateEndValue(&d.scan, data[i])
} else {
d.opcode = scanEnd
}
d.off = i + 1
}
// value consumes a JSON value from d.data[d.off-1:], decoding into v, and
// reads the following byte ahead. If v is invalid, the value is discarded.
// The first byte of the value has been read already.
func (d *decodeState) value(v reflect.Value) error {
switch d.opcode {
default:
panic(phasePanicMsg)
case scanBeginArray:
if v.IsValid() {
if err := d.array(v); err != nil {
return err
}
} else {
d.skip()
}
d.scanNext()
case scanBeginObject:
if v.IsValid() {
if err := d.object(v); err != nil {
return err
}
} else {
d.skip()
}
d.scanNext()
case scanBeginLiteral:
// All bytes inside literal return scanContinue op code.
start := d.readIndex()
d.rescanLiteral()
if v.IsValid() {
if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil {
return err
}
}
}
return nil
}
type unquotedValue struct{}
// valueQuoted is like value but decodes a
// quoted string literal or literal null into an interface value.
// If it finds anything other than a quoted string literal or null,
// valueQuoted returns unquotedValue{}.
func (d *decodeState) valueQuoted() any {
switch d.opcode {
default:
panic(phasePanicMsg)
case scanBeginArray, scanBeginObject:
d.skip()
d.scanNext()
case scanBeginLiteral:
v := d.literalInterface()
switch v.(type) {
case nil, string:
return v
}
}
return unquotedValue{}
}
// indirect walks down v allocating pointers as needed,
// until it gets to a non-pointer.
// If it encounters an Unmarshaler, indirect stops and returns that.
// If decodingNull is true, indirect stops at the first settable pointer so it
// can be set to nil.
func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
// Issue #24153 indicates that it is generally not a guaranteed property
// that you may round-trip a reflect.Value by calling Value.Addr().Elem()
// and expect the value to still be settable for values derived from
// unexported embedded struct fields.
//
// The logic below effectively does this when it first addresses the value
// (to satisfy possible pointer methods) and continues to dereference
// subsequent pointers as necessary.
//
// After the first round-trip, we set v back to the original value to
// preserve the original RW flags contained in reflect.Value.
v0 := v
haveAddr := false
// If v is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
if v.Kind() != reflect.Pointer && v.Type().Name() != "" && v.CanAddr() {
haveAddr = true
v = v.Addr()
}
for {
// Load value from interface, but only if the result will be
// usefully addressable.
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Pointer && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Pointer) {
haveAddr = false
v = e
continue
}
}
if v.Kind() != reflect.Pointer {
break
}
if decodingNull && v.CanSet() {
break
}
// Prevent infinite loop if v is an interface pointing to its own address:
// var v any
// v = &v
if v.Elem().Kind() == reflect.Interface && v.Elem().Elem().Equal(v) {
v = v.Elem()
break
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
if v.Type().NumMethod() > 0 && v.CanInterface() {
if u, ok := reflect.TypeAssert[Unmarshaler](v); ok {
return u, nil, reflect.Value{}
}
if !decodingNull {
if u, ok := reflect.TypeAssert[encoding.TextUnmarshaler](v); ok {
return nil, u, reflect.Value{}
}
}
}
if haveAddr {
v = v0 // restore original value after round-trip Value.Addr().Elem()
haveAddr = false
} else {
v = v.Elem()
}
}
return nil, nil, v
}
// array consumes an array from d.data[d.off-1:], decoding into v.
// The first byte of the array ('[') has been read already.
func (d *decodeState) array(v reflect.Value) error {
// Check for unmarshaler.
u, ut, pv := indirect(v, false)
if u != nil {
start := d.readIndex()
d.skip()
return u.UnmarshalJSON(d.data[start:d.off])
}
if ut != nil {
d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)})
d.skip()
return nil
}
v = pv
// Check type of target.
switch v.Kind() {
case reflect.Interface:
if v.NumMethod() == 0 {
// Decoding into nil interface? Switch to non-reflect code.
ai := d.arrayInterface()
v.Set(reflect.ValueOf(ai))
return nil
}
// Otherwise it's invalid.
fallthrough
default:
d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)})
d.skip()
return nil
case reflect.Array, reflect.Slice:
break
}
i := 0
for {
// Look ahead for ] - can only happen on first iteration.
d.scanWhile(scanSkipSpace)
if d.opcode == scanEndArray {
break
}
// Expand slice length, growing the slice if necessary.
if v.Kind() == reflect.Slice {
if i >= v.Cap() {
v.Grow(1)
}
if i >= v.Len() {
v.SetLen(i + 1)
}
}
if i < v.Len() {
// Decode into element.
if err := d.value(v.Index(i)); err != nil {
return err
}
} else {
// Ran out of fixed array: skip.
if err := d.value(reflect.Value{}); err != nil {
return err
}
}
i++
// Next token must be , or ].
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode == scanEndArray {
break
}
if d.opcode != scanArrayValue {
panic(phasePanicMsg)
}
}
if i < v.Len() {
if v.Kind() == reflect.Array {
for ; i < v.Len(); i++ {
v.Index(i).SetZero() // zero remainder of array
}
} else {
v.SetLen(i) // truncate the slice
}
}
if i == 0 && v.Kind() == reflect.Slice {
v.Set(reflect.MakeSlice(v.Type(), 0, 0))
}
return nil
}
var nullLiteral = []byte("null")
var textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]()
// object consumes an object from d.data[d.off-1:], decoding into v.
// The first byte ('{') of the object has been read already.
func (d *decodeState) object(v reflect.Value) error {
// Check for unmarshaler.
u, ut, pv := indirect(v, false)
if u != nil {
start := d.readIndex()
d.skip()
return u.UnmarshalJSON(d.data[start:d.off])
}
if ut != nil {
d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)})
d.skip()
return nil
}
v = pv
t := v.Type()
// Decoding into nil interface? Switch to non-reflect code.
if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
oi := d.objectInterface()
v.Set(reflect.ValueOf(oi))
return nil
}
var fields structFields
// Check type of target:
// struct or
// map[T1]T2 where T1 is string, an integer type,
// or an encoding.TextUnmarshaler
switch v.Kind() {
case reflect.Map:
// Map key must either have string kind, have an integer kind,
// or be an encoding.TextUnmarshaler.
switch t.Key().Kind() {
case reflect.String,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
default:
if !reflect.PointerTo(t.Key()).Implements(textUnmarshalerType) {
d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)})
d.skip()
return nil
}
}
if v.IsNil() {
v.Set(reflect.MakeMap(t))
}
case reflect.Struct:
fields = cachedTypeFields(t)
// ok
default:
d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)})
d.skip()
return nil
}
var mapElem reflect.Value
var origErrorContext errorContext
if d.errorContext != nil {
origErrorContext = *d.errorContext
}
for {
// Read opening " of string key or closing }.
d.scanWhile(scanSkipSpace)
if d.opcode == scanEndObject {
// closing } - can only happen on first iteration.
break
}
if d.opcode != scanBeginLiteral {
panic(phasePanicMsg)
}
// Read key.
start := d.readIndex()
d.rescanLiteral()
item := d.data[start:d.readIndex()]
key, ok := unquoteBytes(item)
if !ok {
panic(phasePanicMsg)
}
// Figure out field corresponding to key.
var subv reflect.Value
destring := false // whether the value is wrapped in a string to be decoded first
if v.Kind() == reflect.Map {
elemType := t.Elem()
if !mapElem.IsValid() {
mapElem = reflect.New(elemType).Elem()
} else {
mapElem.SetZero()
}
subv = mapElem
} else {
f := fields.byExactName[string(key)]
if f == nil {
f = fields.byFoldedName[string(foldName(key))]
}
if f != nil {
subv = v
destring = f.quoted
if d.errorContext == nil {
d.errorContext = new(errorContext)
}
for i, ind := range f.index {
if subv.Kind() == reflect.Pointer {
if subv.IsNil() {
// If a struct embeds a pointer to an unexported type,
// it is not possible to set a newly allocated value
// since the field is unexported.
//
// See https://golang.org/issue/21357
if !subv.CanSet() {
d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem()))
// Invalidate subv to ensure d.value(subv) skips over
// the JSON value without assigning it to subv.
subv = reflect.Value{}
destring = false
break
}
subv.Set(reflect.New(subv.Type().Elem()))
}
subv = subv.Elem()
}
if i < len(f.index)-1 {
d.errorContext.FieldStack = append(
d.errorContext.FieldStack,
subv.Type().Field(ind).Name,
)
}
subv = subv.Field(ind)
}
d.errorContext.Struct = t
d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name)
} else if d.disallowUnknownFields {
d.saveError(fmt.Errorf("json: unknown field %q", key))
}
}
// Read : before value.
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode != scanObjectKey {
panic(phasePanicMsg)
}
d.scanWhile(scanSkipSpace)
if destring {
switch qv := d.valueQuoted().(type) {
case nil:
if err := d.literalStore(nullLiteral, subv, false); err != nil {
return err
}
case string:
if err := d.literalStore([]byte(qv), subv, true); err != nil {
return err
}
default:
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
}
} else {
if err := d.value(subv); err != nil {
return err
}
}
// Write value back to map;
// if using struct, subv points into struct already.
if v.Kind() == reflect.Map {
kt := t.Key()
var kv reflect.Value
if reflect.PointerTo(kt).Implements(textUnmarshalerType) {
kv = reflect.New(kt)
if err := d.literalStore(item, kv, true); err != nil {
return err
}
kv = kv.Elem()
} else {
switch kt.Kind() {
case reflect.String:
kv = reflect.New(kt).Elem()
kv.SetString(string(key))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s := string(key)
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || kt.OverflowInt(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
break
}
kv = reflect.New(kt).Elem()
kv.SetInt(n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
s := string(key)
n, err := strconv.ParseUint(s, 10, 64)
if err != nil || kt.OverflowUint(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
break
}
kv = reflect.New(kt).Elem()
kv.SetUint(n)
default:
panic("json: Unexpected key type") // should never occur
}
}
if kv.IsValid() {
v.SetMapIndex(kv, subv)
}
}
// Next token must be , or }.
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.errorContext != nil {
// Reset errorContext to its original state.
// Keep the same underlying array for FieldStack, to reuse the
// space and avoid unnecessary allocs.
d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)]
d.errorContext.Struct = origErrorContext.Struct
}
if d.opcode == scanEndObject {
break
}
if d.opcode != scanObjectValue {
panic(phasePanicMsg)
}
}
return nil
}
// convertNumber converts the number literal s to a float64 or a Number
// depending on the setting of d.useNumber.
func (d *decodeState) convertNumber(s string) (any, error) {
if d.useNumber {
return Number(s), nil
}
f, err := strconv.ParseFloat(s, 64)
if err != nil {
return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeFor[float64](), Offset: int64(d.off)}
}
return f, nil
}
var numberType = reflect.TypeFor[Number]()
// literalStore decodes a literal stored in item into v.
//
// fromQuoted indicates whether this literal came from unwrapping a
// string from the ",string" struct tag option. this is used only to
// produce more helpful error messages.
func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error {
// Check for unmarshaler.
if len(item) == 0 {
// Empty string given.
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
return nil
}
isNull := item[0] == 'n' // null
u, ut, pv := indirect(v, isNull)
if u != nil {
return u.UnmarshalJSON(item)
}
if ut != nil {
if item[0] != '"' {
if fromQuoted {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
return nil
}
val := "number"
switch item[0] {
case 'n':
val = "null"
case 't', 'f':
val = "bool"
}
d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())})
return nil
}
s, ok := unquoteBytes(item)
if !ok {
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
}
panic(phasePanicMsg)
}
return ut.UnmarshalText(s)
}
v = pv
switch c := item[0]; c {
case 'n': // null
// The main parser checks that only true and false can reach here,
// but if this was a quoted string input, it could be anything.
if fromQuoted && string(item) != "null" {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
break
}
switch v.Kind() {
case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice:
v.SetZero()
// otherwise, ignore null for primitives/string
}
case 't', 'f': // true, false
value := item[0] == 't'
// The main parser checks that only true and false can reach here,
// but if this was a quoted string input, it could be anything.
if fromQuoted && string(item) != "true" && string(item) != "false" {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
break
}
switch v.Kind() {
default:
if fromQuoted {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
} else {
d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())})
}
case reflect.Bool:
v.SetBool(value)
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(value))
} else {
d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())})
}
}
case '"': // string
s, ok := unquoteBytes(item)
if !ok {
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
}
panic(phasePanicMsg)
}
switch v.Kind() {
default:
d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
case reflect.Slice:
if v.Type().Elem().Kind() != reflect.Uint8 {
d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
break
}
b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
n, err := base64.StdEncoding.Decode(b, s)
if err != nil {
d.saveError(err)
break
}
v.SetBytes(b[:n])
case reflect.String:
t := string(s)
if v.Type() == numberType && !isValidNumber(t) {
return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)
}
v.SetString(t)
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(string(s)))
} else {
d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
}
}
default: // number
if c != '-' && (c < '0' || c > '9') {
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
}
panic(phasePanicMsg)
}
switch v.Kind() {
default:
if v.Kind() == reflect.String && v.Type() == numberType {
// s must be a valid number, because it's
// already been tokenized.
v.SetString(string(item))
break
}
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
}
d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
case reflect.Interface:
n, err := d.convertNumber(string(item))
if err != nil {
d.saveError(err)
break
}
if v.NumMethod() != 0 {
d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.Set(reflect.ValueOf(n))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
n, err := strconv.ParseInt(string(item), 10, 64)
if err != nil || v.OverflowInt(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetInt(n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
n, err := strconv.ParseUint(string(item), 10, 64)
if err != nil || v.OverflowUint(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetUint(n)
case reflect.Float32, reflect.Float64:
n, err := strconv.ParseFloat(string(item), v.Type().Bits())
if err != nil || v.OverflowFloat(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetFloat(n)
}
}
return nil
}
// The xxxInterface routines build up a value to be stored
// in an empty interface. They are not strictly necessary,
// but they avoid the weight of reflection in this common case.
// valueInterface is like value but returns any.
func (d *decodeState) valueInterface() (val any) {
switch d.opcode {
default:
panic(phasePanicMsg)
case scanBeginArray:
val = d.arrayInterface()
d.scanNext()
case scanBeginObject:
val = d.objectInterface()
d.scanNext()
case scanBeginLiteral:
val = d.literalInterface()
}
return
}
// arrayInterface is like array but returns []any.
func (d *decodeState) arrayInterface() []any {
var v = make([]any, 0)
for {
// Look ahead for ] - can only happen on first iteration.
d.scanWhile(scanSkipSpace)
if d.opcode == scanEndArray {
break
}
v = append(v, d.valueInterface())
// Next token must be , or ].
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode == scanEndArray {
break
}
if d.opcode != scanArrayValue {
panic(phasePanicMsg)
}
}
return v
}
// objectInterface is like object but returns map[string]any.
func (d *decodeState) objectInterface() map[string]any {
m := make(map[string]any)
for {
// Read opening " of string key or closing }.
d.scanWhile(scanSkipSpace)
if d.opcode == scanEndObject {
// closing } - can only happen on first iteration.
break
}
if d.opcode != scanBeginLiteral {
panic(phasePanicMsg)
}
// Read string key.
start := d.readIndex()
d.rescanLiteral()
item := d.data[start:d.readIndex()]
key, ok := unquote(item)
if !ok {
panic(phasePanicMsg)
}
// Read : before value.
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode != scanObjectKey {
panic(phasePanicMsg)
}
d.scanWhile(scanSkipSpace)
// Read value.
m[key] = d.valueInterface()
// Next token must be , or }.
if d.opcode == scanSkipSpace {
d.scanWhile(scanSkipSpace)
}
if d.opcode == scanEndObject {
break
}
if d.opcode != scanObjectValue {
panic(phasePanicMsg)
}
}
return m
}
// literalInterface consumes and returns a literal from d.data[d.off-1:] and
// it reads the following byte ahead. The first byte of the literal has been
// read already (that's how the caller knows it's a literal).
func (d *decodeState) literalInterface() any {
// All bytes inside literal return scanContinue op code.
start := d.readIndex()
d.rescanLiteral()
item := d.data[start:d.readIndex()]
switch c := item[0]; c {
case 'n': // null
return nil
case 't', 'f': // true, false
return c == 't'
case '"': // string
s, ok := unquote(item)
if !ok {
panic(phasePanicMsg)
}
return s
default: // number
if c != '-' && (c < '0' || c > '9') {
panic(phasePanicMsg)
}
n, err := d.convertNumber(string(item))
if err != nil {
d.saveError(err)
}
return n
}
}
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
// or it returns -1.
func getu4(s []byte) rune {
if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
return -1
}
var r rune
for _, c := range s[2:6] {
switch {
case '0' <= c && c <= '9':
c = c - '0'
case 'a' <= c && c <= 'f':
c = c - 'a' + 10
case 'A' <= c && c <= 'F':
c = c - 'A' + 10
default:
return -1
}
r = r*16 + rune(c)
}
return r
}
// unquote converts a quoted JSON string literal s into an actual string t.
// The rules are different than for Go, so cannot use strconv.Unquote.
func unquote(s []byte) (t string, ok bool) {
s, ok = unquoteBytes(s)
t = string(s)
return
}
func unquoteBytes(s []byte) (t []byte, ok bool) {
if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
return
}
s = s[1 : len(s)-1]
// Check for unusual characters. If there are none,
// then no unquoting is needed, so return a slice of the
// original bytes.
r := 0
for r < len(s) {
c := s[r]
if c == '\\' || c == '"' || c < ' ' {
break
}
rr, size := utf8.DecodeRune(s[r:])
if rr == utf8.RuneError && size == 1 {
break
}
r += size
}
if r == len(s) {
return s, true
}
b := make([]byte, len(s)+2*utf8.UTFMax)
w := copy(b, s[0:r])
for r < len(s) {
// Out of room? Can only happen if s is full of
// malformed UTF-8 and we're replacing each
// byte with RuneError.
if w >= len(b)-2*utf8.UTFMax {
nb := make([]byte, (len(b)+utf8.UTFMax)*2)
copy(nb, b[0:w])
b = nb
}
switch c := s[r]; {
case c == '\\':
r++
if r >= len(s) {
return
}
switch s[r] {
default:
return
case '"', '\\', '/', '\'':
b[w] = s[r]
r++
w++
case 'b':
b[w] = '\b'
r++
w++
case 'f':
b[w] = '\f'
r++
w++
case 'n':
b[w] = '\n'
r++
w++
case 'r':
b[w] = '\r'
r++
w++
case 't':
b[w] = '\t'
r++
w++
case 'u':
r--
rr := getu4(s[r:])
if rr < 0 {
return
}
r += 6
if utf16.IsSurrogate(rr) {
rr1 := getu4(s[r:])
if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
// A valid pair; consume.
r += 6
w += utf8.EncodeRune(b[w:], dec)
break
}
// Invalid surrogate; fall back to replacement rune.
rr = unicode.ReplacementChar
}
w += utf8.EncodeRune(b[w:], rr)
}
// Quote, control characters are invalid.
case c == '"', c < ' ':
return
// ASCII
case c < utf8.RuneSelf:
b[w] = c
r++
w++
// Coerce to well-formed UTF-8.
default:
rr, size := utf8.DecodeRune(s[r:])
r += size
w += utf8.EncodeRune(b[w:], rr)
}
}
return b[0:w], true
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.jsonv2
// Package json implements encoding and decoding of JSON as defined in RFC 7159.
// The mapping between JSON and Go values is described in the documentation for
// the Marshal and Unmarshal functions.
//
// See "JSON and Go" for an introduction to this package:
// https://golang.org/doc/articles/json_and_go.html
//
// # Security Considerations
//
// The JSON standard (RFC 7159) is lax in its definition of a number of parser
// behaviors. As such, many JSON parsers behave differently in various
// scenarios. These differences in parsers mean that systems that use multiple
// independent JSON parser implementations may parse the same JSON object in
// differing ways.
//
// Systems that rely on a JSON object being parsed consistently for security
// purposes should be careful to understand the behaviors of this parser, as
// well as how these behaviors may cause interoperability issues with other
// parser implementations.
//
// Due to the Go Backwards Compatibility promise (https://go.dev/doc/go1compat)
// there are a number of behaviors this package exhibits that may cause
// interoperability issues, but cannot be changed. In particular the following
// parsing behaviors may cause issues:
//
// - If a JSON object contains duplicate keys, keys are processed in the order
// they are observed, meaning later values will replace or be merged into
// prior values, depending on the field type (in particular maps and structs
// will have values merged, while other types have values replaced).
// - When parsing a JSON object into a Go struct, keys are considered in a
// case-insensitive fashion.
// - When parsing a JSON object into a Go struct, unknown keys in the JSON
// object are ignored (unless a [Decoder] is used and
// [Decoder.DisallowUnknownFields] has been called).
// - Invalid UTF-8 bytes in JSON strings are replaced by the Unicode
// replacement character.
// - Large JSON number integers will lose precision when unmarshaled into
// floating-point types.
package json
import (
"bytes"
"cmp"
"encoding"
"encoding/base64"
"fmt"
"math"
"reflect"
"slices"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
// Marshal returns the JSON encoding of v.
//
// Marshal traverses the value v recursively.
// If an encountered value implements [Marshaler]
// and is not a nil pointer, Marshal calls [Marshaler.MarshalJSON]
// to produce JSON. If no [Marshaler.MarshalJSON] method is present but the
// value implements [encoding.TextMarshaler] instead, Marshal calls
// [encoding.TextMarshaler.MarshalText] and encodes the result as a JSON string.
// The nil pointer exception is not strictly necessary
// but mimics a similar, necessary exception in the behavior of
// [Unmarshaler.UnmarshalJSON].
//
// Otherwise, Marshal uses the following type-dependent default encodings:
//
// Boolean values encode as JSON booleans.
//
// Floating point, integer, and [Number] values encode as JSON numbers.
// NaN and +/-Inf values will return an [UnsupportedValueError].
//
// String values encode as JSON strings coerced to valid UTF-8,
// replacing invalid bytes with the Unicode replacement rune.
// So that the JSON will be safe to embed inside HTML <script> tags,
// the string is encoded using [HTMLEscape],
// which replaces "<", ">", "&", U+2028, and U+2029 are escaped
// to "\u003c","\u003e", "\u0026", "\u2028", and "\u2029".
// This replacement can be disabled when using an [Encoder],
// by calling [Encoder.SetEscapeHTML](false).
//
// Array and slice values encode as JSON arrays, except that
// []byte encodes as a base64-encoded string, and a nil slice
// encodes as the null JSON value.
//
// Struct values encode as JSON objects.
// Each exported struct field becomes a member of the object, using the
// field name as the object key, unless the field is omitted for one of the
// reasons given below.
//
// The encoding of each struct field can be customized by the format string
// stored under the "json" key in the struct field's tag.
// The format string gives the name of the field, possibly followed by a
// comma-separated list of options. The name may be empty in order to
// specify options without overriding the default field name.
//
// The "omitempty" option specifies that the field should be omitted
// from the encoding if the field has an empty value, defined as
// false, 0, a nil pointer, a nil interface value, and any array,
// slice, map, or string of length zero.
//
// As a special case, if the field tag is "-", the field is always omitted.
// Note that a field with name "-" can still be generated using the tag "-,".
//
// Examples of struct field tags and their meanings:
//
// // Field appears in JSON as key "myName".
// Field int `json:"myName"`
//
// // Field appears in JSON as key "myName" and
// // the field is omitted from the object if its value is empty,
// // as defined above.
// Field int `json:"myName,omitempty"`
//
// // Field appears in JSON as key "Field" (the default), but
// // the field is skipped if empty.
// // Note the leading comma.
// Field int `json:",omitempty"`
//
// // Field is ignored by this package.
// Field int `json:"-"`
//
// // Field appears in JSON as key "-".
// Field int `json:"-,"`
//
// The "omitzero" option specifies that the field should be omitted
// from the encoding if the field has a zero value, according to rules:
//
// 1) If the field type has an "IsZero() bool" method, that will be used to
// determine whether the value is zero.
//
// 2) Otherwise, the value is zero if it is the zero value for its type.
//
// If both "omitempty" and "omitzero" are specified, the field will be omitted
// if the value is either empty or zero (or both).
//
// The "string" option signals that a field is stored as JSON inside a
// JSON-encoded string. It applies only to fields of string, floating point,
// integer, or boolean types. This extra level of encoding is sometimes used
// when communicating with JavaScript programs:
//
// Int64String int64 `json:",string"`
//
// The key name will be used if it's a non-empty string consisting of
// only Unicode letters, digits, and ASCII punctuation except quotation
// marks, backslash, and comma.
//
// Embedded struct fields are usually marshaled as if their inner exported fields
// were fields in the outer struct, subject to the usual Go visibility rules amended
// as described in the next paragraph.
// An anonymous struct field with a name given in its JSON tag is treated as
// having that name, rather than being anonymous.
// An anonymous struct field of interface type is treated the same as having
// that type as its name, rather than being anonymous.
//
// The Go visibility rules for struct fields are amended for JSON when
// deciding which field to marshal or unmarshal. If there are
// multiple fields at the same level, and that level is the least
// nested (and would therefore be the nesting level selected by the
// usual Go rules), the following extra rules apply:
//
// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
// even if there are multiple untagged fields that would otherwise conflict.
//
// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
//
// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
//
// Handling of anonymous struct fields is new in Go 1.1.
// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
// an anonymous struct field in both current and earlier versions, give the field
// a JSON tag of "-".
//
// Map values encode as JSON objects. The map's key type must either be a
// string, an integer type, or implement [encoding.TextMarshaler]. The map keys
// are sorted and used as JSON object keys by applying the following rules,
// subject to the UTF-8 coercion described for string values above:
// - keys of any string type are used directly
// - keys that implement [encoding.TextMarshaler] are marshaled
// - integer keys are converted to strings
//
// Pointer values encode as the value pointed to.
// A nil pointer encodes as the null JSON value.
//
// Interface values encode as the value contained in the interface.
// A nil interface value encodes as the null JSON value.
//
// Channel, complex, and function values cannot be encoded in JSON.
// Attempting to encode such a value causes Marshal to return
// an [UnsupportedTypeError].
//
// JSON cannot represent cyclic data structures and Marshal does not
// handle them. Passing cyclic structures to Marshal will result in
// an error.
func Marshal(v any) ([]byte, error) {
e := newEncodeState()
defer encodeStatePool.Put(e)
err := e.marshal(v, encOpts{escapeHTML: true})
if err != nil {
return nil, err
}
buf := append([]byte(nil), e.Bytes()...)
return buf, nil
}
// MarshalIndent is like [Marshal] but applies [Indent] to format the output.
// Each JSON element in the output will begin on a new line beginning with prefix
// followed by one or more copies of indent according to the indentation nesting.
func MarshalIndent(v any, prefix, indent string) ([]byte, error) {
b, err := Marshal(v)
if err != nil {
return nil, err
}
b2 := make([]byte, 0, indentGrowthFactor*len(b))
b2, err = appendIndent(b2, b, prefix, indent)
if err != nil {
return nil, err
}
return b2, nil
}
// Marshaler is the interface implemented by types that
// can marshal themselves into valid JSON.
type Marshaler interface {
MarshalJSON() ([]byte, error)
}
// An UnsupportedTypeError is returned by [Marshal] when attempting
// to encode an unsupported value type.
type UnsupportedTypeError struct {
Type reflect.Type
}
func (e *UnsupportedTypeError) Error() string {
return "json: unsupported type: " + e.Type.String()
}
// An UnsupportedValueError is returned by [Marshal] when attempting
// to encode an unsupported value.
type UnsupportedValueError struct {
Value reflect.Value
Str string
}
func (e *UnsupportedValueError) Error() string {
return "json: unsupported value: " + e.Str
}
// Before Go 1.2, an InvalidUTF8Error was returned by [Marshal] when
// attempting to encode a string value with invalid UTF-8 sequences.
// As of Go 1.2, [Marshal] instead coerces the string to valid UTF-8 by
// replacing invalid bytes with the Unicode replacement rune U+FFFD.
//
// Deprecated: No longer used; kept for compatibility.
type InvalidUTF8Error struct {
S string // the whole string value that caused the error
}
func (e *InvalidUTF8Error) Error() string {
return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
}
// A MarshalerError represents an error from calling a
// [Marshaler.MarshalJSON] or [encoding.TextMarshaler.MarshalText] method.
type MarshalerError struct {
Type reflect.Type
Err error
sourceFunc string
}
func (e *MarshalerError) Error() string {
srcFunc := e.sourceFunc
if srcFunc == "" {
srcFunc = "MarshalJSON"
}
return "json: error calling " + srcFunc +
" for type " + e.Type.String() +
": " + e.Err.Error()
}
// Unwrap returns the underlying error.
func (e *MarshalerError) Unwrap() error { return e.Err }
const hex = "0123456789abcdef"
// An encodeState encodes JSON into a bytes.Buffer.
type encodeState struct {
bytes.Buffer // accumulated output
// Keep track of what pointers we've seen in the current recursive call
// path, to avoid cycles that could lead to a stack overflow. Only do
// the relatively expensive map operations if ptrLevel is larger than
// startDetectingCyclesAfter, so that we skip the work if we're within a
// reasonable amount of nested pointers deep.
ptrLevel uint
ptrSeen map[any]struct{}
}
const startDetectingCyclesAfter = 1000
var encodeStatePool sync.Pool
func newEncodeState() *encodeState {
if v := encodeStatePool.Get(); v != nil {
e := v.(*encodeState)
e.Reset()
if len(e.ptrSeen) > 0 {
panic("ptrEncoder.encode should have emptied ptrSeen via defers")
}
e.ptrLevel = 0
return e
}
return &encodeState{ptrSeen: make(map[any]struct{})}
}
// jsonError is an error wrapper type for internal use only.
// Panics with errors are wrapped in jsonError so that the top-level recover
// can distinguish intentional panics from this package.
type jsonError struct{ error }
func (e *encodeState) marshal(v any, opts encOpts) (err error) {
defer func() {
if r := recover(); r != nil {
if je, ok := r.(jsonError); ok {
err = je.error
} else {
panic(r)
}
}
}()
e.reflectValue(reflect.ValueOf(v), opts)
return nil
}
// error aborts the encoding by panicking with err wrapped in jsonError.
func (e *encodeState) error(err error) {
panic(jsonError{err})
}
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Float32, reflect.Float64,
reflect.Interface, reflect.Pointer:
return v.IsZero()
}
return false
}
func (e *encodeState) reflectValue(v reflect.Value, opts encOpts) {
valueEncoder(v)(e, v, opts)
}
type encOpts struct {
// quoted causes primitive fields to be encoded inside JSON strings.
quoted bool
// escapeHTML causes '<', '>', and '&' to be escaped in JSON strings.
escapeHTML bool
}
type encoderFunc func(e *encodeState, v reflect.Value, opts encOpts)
var encoderCache sync.Map // map[reflect.Type]encoderFunc
func valueEncoder(v reflect.Value) encoderFunc {
if !v.IsValid() {
return invalidValueEncoder
}
return typeEncoder(v.Type())
}
func typeEncoder(t reflect.Type) encoderFunc {
if fi, ok := encoderCache.Load(t); ok {
return fi.(encoderFunc)
}
// To deal with recursive types, populate the map with an
// indirect func before we build it. If the type is recursive,
// the second lookup for the type will return the indirect func.
//
// This indirect func is only used for recursive types,
// and briefly during racing calls to typeEncoder.
indirect := sync.OnceValue(func() encoderFunc {
return newTypeEncoder(t, true)
})
fi, loaded := encoderCache.LoadOrStore(t, encoderFunc(func(e *encodeState, v reflect.Value, opts encOpts) {
indirect()(e, v, opts)
}))
if loaded {
return fi.(encoderFunc)
}
f := indirect()
encoderCache.Store(t, f)
return f
}
var (
marshalerType = reflect.TypeFor[Marshaler]()
textMarshalerType = reflect.TypeFor[encoding.TextMarshaler]()
)
// newTypeEncoder constructs an encoderFunc for a type.
// The returned encoder only checks CanAddr when allowAddr is true.
func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
// If we have a non-pointer value whose type implements
// Marshaler with a value receiver, then we're better off taking
// the address of the value - otherwise we end up with an
// allocation as we cast the value to an interface.
if t.Kind() != reflect.Pointer && allowAddr && reflect.PointerTo(t).Implements(marshalerType) {
return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
}
if t.Implements(marshalerType) {
return marshalerEncoder
}
if t.Kind() != reflect.Pointer && allowAddr && reflect.PointerTo(t).Implements(textMarshalerType) {
return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
}
if t.Implements(textMarshalerType) {
return textMarshalerEncoder
}
switch t.Kind() {
case reflect.Bool:
return boolEncoder
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return intEncoder
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return uintEncoder
case reflect.Float32:
return float32Encoder
case reflect.Float64:
return float64Encoder
case reflect.String:
return stringEncoder
case reflect.Interface:
return interfaceEncoder
case reflect.Struct:
return newStructEncoder(t)
case reflect.Map:
return newMapEncoder(t)
case reflect.Slice:
return newSliceEncoder(t)
case reflect.Array:
return newArrayEncoder(t)
case reflect.Pointer:
return newPtrEncoder(t)
default:
return unsupportedTypeEncoder
}
}
func invalidValueEncoder(e *encodeState, v reflect.Value, _ encOpts) {
e.WriteString("null")
}
func marshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
if v.Kind() == reflect.Pointer && v.IsNil() {
e.WriteString("null")
return
}
m, ok := reflect.TypeAssert[Marshaler](v)
if !ok {
e.WriteString("null")
return
}
b, err := m.MarshalJSON()
if err == nil {
e.Grow(len(b))
out := e.AvailableBuffer()
out, err = appendCompact(out, b, opts.escapeHTML)
e.Buffer.Write(out)
}
if err != nil {
e.error(&MarshalerError{v.Type(), err, "MarshalJSON"})
}
}
func addrMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
va := v.Addr()
if va.IsNil() {
e.WriteString("null")
return
}
m, _ := reflect.TypeAssert[Marshaler](va)
b, err := m.MarshalJSON()
if err == nil {
e.Grow(len(b))
out := e.AvailableBuffer()
out, err = appendCompact(out, b, opts.escapeHTML)
e.Buffer.Write(out)
}
if err != nil {
e.error(&MarshalerError{v.Type(), err, "MarshalJSON"})
}
}
func textMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
if v.Kind() == reflect.Pointer && v.IsNil() {
e.WriteString("null")
return
}
m, ok := reflect.TypeAssert[encoding.TextMarshaler](v)
if !ok {
e.WriteString("null")
return
}
b, err := m.MarshalText()
if err != nil {
e.error(&MarshalerError{v.Type(), err, "MarshalText"})
}
e.Write(appendString(e.AvailableBuffer(), b, opts.escapeHTML))
}
func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
va := v.Addr()
if va.IsNil() {
e.WriteString("null")
return
}
m, _ := reflect.TypeAssert[encoding.TextMarshaler](va)
b, err := m.MarshalText()
if err != nil {
e.error(&MarshalerError{v.Type(), err, "MarshalText"})
}
e.Write(appendString(e.AvailableBuffer(), b, opts.escapeHTML))
}
func boolEncoder(e *encodeState, v reflect.Value, opts encOpts) {
b := e.AvailableBuffer()
b = mayAppendQuote(b, opts.quoted)
b = strconv.AppendBool(b, v.Bool())
b = mayAppendQuote(b, opts.quoted)
e.Write(b)
}
func intEncoder(e *encodeState, v reflect.Value, opts encOpts) {
b := e.AvailableBuffer()
b = mayAppendQuote(b, opts.quoted)
b = strconv.AppendInt(b, v.Int(), 10)
b = mayAppendQuote(b, opts.quoted)
e.Write(b)
}
func uintEncoder(e *encodeState, v reflect.Value, opts encOpts) {
b := e.AvailableBuffer()
b = mayAppendQuote(b, opts.quoted)
b = strconv.AppendUint(b, v.Uint(), 10)
b = mayAppendQuote(b, opts.quoted)
e.Write(b)
}
type floatEncoder int // number of bits
func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
f := v.Float()
if math.IsInf(f, 0) || math.IsNaN(f) {
e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
}
// Convert as if by ES6 number to string conversion.
// This matches most other JSON generators.
// See golang.org/issue/6384 and golang.org/issue/14135.
// Like fmt %g, but the exponent cutoffs are different
// and exponents themselves are not padded to two digits.
b := e.AvailableBuffer()
b = mayAppendQuote(b, opts.quoted)
abs := math.Abs(f)
fmt := byte('f')
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
if abs != 0 {
if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
fmt = 'e'
}
}
b = strconv.AppendFloat(b, f, fmt, -1, int(bits))
if fmt == 'e' {
// clean up e-09 to e-9
n := len(b)
if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' {
b[n-2] = b[n-1]
b = b[:n-1]
}
}
b = mayAppendQuote(b, opts.quoted)
e.Write(b)
}
var (
float32Encoder = (floatEncoder(32)).encode
float64Encoder = (floatEncoder(64)).encode
)
func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) {
if v.Type() == numberType {
numStr := v.String()
// In Go1.5 the empty string encodes to "0", while this is not a valid number literal
// we keep compatibility so check validity after this.
if numStr == "" {
numStr = "0" // Number's zero-val
}
if !isValidNumber(numStr) {
e.error(fmt.Errorf("json: invalid number literal %q", numStr))
}
b := e.AvailableBuffer()
b = mayAppendQuote(b, opts.quoted)
b = append(b, numStr...)
b = mayAppendQuote(b, opts.quoted)
e.Write(b)
return
}
if opts.quoted {
b := appendString(nil, v.String(), opts.escapeHTML)
e.Write(appendString(e.AvailableBuffer(), b, false)) // no need to escape again since it is already escaped
} else {
e.Write(appendString(e.AvailableBuffer(), v.String(), opts.escapeHTML))
}
}
func isValidNumber(s string) bool {
// This function implements the JSON numbers grammar.
// See https://tools.ietf.org/html/rfc7159#section-6
// and https://www.json.org/img/number.png
if s == "" {
return false
}
// Optional -
if s[0] == '-' {
s = s[1:]
if s == "" {
return false
}
}
// Digits
switch {
default:
return false
case s[0] == '0':
s = s[1:]
case '1' <= s[0] && s[0] <= '9':
s = s[1:]
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
}
}
// . followed by 1 or more digits.
if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
s = s[2:]
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
}
}
// e or E followed by an optional - or + and
// 1 or more digits.
if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
s = s[1:]
if s[0] == '+' || s[0] == '-' {
s = s[1:]
if s == "" {
return false
}
}
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
}
}
// Make sure we are at the end.
return s == ""
}
func interfaceEncoder(e *encodeState, v reflect.Value, opts encOpts) {
if v.IsNil() {
e.WriteString("null")
return
}
e.reflectValue(v.Elem(), opts)
}
func unsupportedTypeEncoder(e *encodeState, v reflect.Value, _ encOpts) {
e.error(&UnsupportedTypeError{v.Type()})
}
type structEncoder struct {
fields structFields
}
type structFields struct {
list []field
byExactName map[string]*field
byFoldedName map[string]*field
}
func (se structEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
next := byte('{')
FieldLoop:
for i := range se.fields.list {
f := &se.fields.list[i]
// Find the nested struct field by following f.index.
fv := v
for _, i := range f.index {
if fv.Kind() == reflect.Pointer {
if fv.IsNil() {
continue FieldLoop
}
fv = fv.Elem()
}
fv = fv.Field(i)
}
if (f.omitEmpty && isEmptyValue(fv)) ||
(f.omitZero && (f.isZero == nil && fv.IsZero() || (f.isZero != nil && f.isZero(fv)))) {
continue
}
e.WriteByte(next)
next = ','
if opts.escapeHTML {
e.WriteString(f.nameEscHTML)
} else {
e.WriteString(f.nameNonEsc)
}
opts.quoted = f.quoted
f.encoder(e, fv, opts)
}
if next == '{' {
e.WriteString("{}")
} else {
e.WriteByte('}')
}
}
func newStructEncoder(t reflect.Type) encoderFunc {
se := structEncoder{fields: cachedTypeFields(t)}
return se.encode
}
type mapEncoder struct {
elemEnc encoderFunc
}
func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
if v.IsNil() {
e.WriteString("null")
return
}
if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
// We're a large number of nested ptrEncoder.encode calls deep;
// start checking if we've run into a pointer cycle.
ptr := v.UnsafePointer()
if _, ok := e.ptrSeen[ptr]; ok {
e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
}
e.ptrSeen[ptr] = struct{}{}
defer delete(e.ptrSeen, ptr)
}
e.WriteByte('{')
// Extract and sort the keys.
var (
sv = make([]reflectWithString, v.Len())
mi = v.MapRange()
err error
)
for i := 0; mi.Next(); i++ {
if sv[i].ks, err = resolveKeyName(mi.Key()); err != nil {
e.error(fmt.Errorf("json: encoding error for type %q: %q", v.Type().String(), err.Error()))
}
sv[i].v = mi.Value()
}
slices.SortFunc(sv, func(i, j reflectWithString) int {
return strings.Compare(i.ks, j.ks)
})
for i, kv := range sv {
if i > 0 {
e.WriteByte(',')
}
e.Write(appendString(e.AvailableBuffer(), kv.ks, opts.escapeHTML))
e.WriteByte(':')
me.elemEnc(e, kv.v, opts)
}
e.WriteByte('}')
e.ptrLevel--
}
func newMapEncoder(t reflect.Type) encoderFunc {
switch t.Key().Kind() {
case reflect.String,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
default:
if !t.Key().Implements(textMarshalerType) {
return unsupportedTypeEncoder
}
}
me := mapEncoder{typeEncoder(t.Elem())}
return me.encode
}
func encodeByteSlice(e *encodeState, v reflect.Value, _ encOpts) {
if v.IsNil() {
e.WriteString("null")
return
}
s := v.Bytes()
b := e.AvailableBuffer()
b = append(b, '"')
b = base64.StdEncoding.AppendEncode(b, s)
b = append(b, '"')
e.Write(b)
}
// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
type sliceEncoder struct {
arrayEnc encoderFunc
}
func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
if v.IsNil() {
e.WriteString("null")
return
}
if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
// We're a large number of nested ptrEncoder.encode calls deep;
// start checking if we've run into a pointer cycle.
// Here we use a struct to memorize the pointer to the first element of the slice
// and its length.
ptr := struct {
ptr any // always an unsafe.Pointer, but avoids a dependency on package unsafe
len int
}{v.UnsafePointer(), v.Len()}
if _, ok := e.ptrSeen[ptr]; ok {
e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
}
e.ptrSeen[ptr] = struct{}{}
defer delete(e.ptrSeen, ptr)
}
se.arrayEnc(e, v, opts)
e.ptrLevel--
}
func newSliceEncoder(t reflect.Type) encoderFunc {
// Byte slices get special treatment; arrays don't.
if t.Elem().Kind() == reflect.Uint8 {
p := reflect.PointerTo(t.Elem())
if !p.Implements(marshalerType) && !p.Implements(textMarshalerType) {
return encodeByteSlice
}
}
enc := sliceEncoder{newArrayEncoder(t)}
return enc.encode
}
type arrayEncoder struct {
elemEnc encoderFunc
}
func (ae arrayEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
e.WriteByte('[')
n := v.Len()
for i := 0; i < n; i++ {
if i > 0 {
e.WriteByte(',')
}
ae.elemEnc(e, v.Index(i), opts)
}
e.WriteByte(']')
}
func newArrayEncoder(t reflect.Type) encoderFunc {
enc := arrayEncoder{typeEncoder(t.Elem())}
return enc.encode
}
type ptrEncoder struct {
elemEnc encoderFunc
}
func (pe ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
if v.IsNil() {
e.WriteString("null")
return
}
if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
// We're a large number of nested ptrEncoder.encode calls deep;
// start checking if we've run into a pointer cycle.
ptr := v.Interface()
if _, ok := e.ptrSeen[ptr]; ok {
e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
}
e.ptrSeen[ptr] = struct{}{}
defer delete(e.ptrSeen, ptr)
}
pe.elemEnc(e, v.Elem(), opts)
e.ptrLevel--
}
func newPtrEncoder(t reflect.Type) encoderFunc {
enc := ptrEncoder{typeEncoder(t.Elem())}
return enc.encode
}
type condAddrEncoder struct {
canAddrEnc, elseEnc encoderFunc
}
func (ce condAddrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
if v.CanAddr() {
ce.canAddrEnc(e, v, opts)
} else {
ce.elseEnc(e, v, opts)
}
}
// newCondAddrEncoder returns an encoder that checks whether its value
// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
enc := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
return enc.encode
}
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
case !unicode.IsLetter(c) && !unicode.IsDigit(c):
return false
}
}
return true
}
func typeByIndex(t reflect.Type, index []int) reflect.Type {
for _, i := range index {
if t.Kind() == reflect.Pointer {
t = t.Elem()
}
t = t.Field(i).Type
}
return t
}
type reflectWithString struct {
v reflect.Value
ks string
}
func resolveKeyName(k reflect.Value) (string, error) {
if k.Kind() == reflect.String {
return k.String(), nil
}
if tm, ok := reflect.TypeAssert[encoding.TextMarshaler](k); ok {
if k.Kind() == reflect.Pointer && k.IsNil() {
return "", nil
}
buf, err := tm.MarshalText()
return string(buf), err
}
switch k.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(k.Int(), 10), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(k.Uint(), 10), nil
}
panic("unexpected map key type")
}
func appendString[Bytes []byte | string](dst []byte, src Bytes, escapeHTML bool) []byte {
dst = append(dst, '"')
start := 0
for i := 0; i < len(src); {
if b := src[i]; b < utf8.RuneSelf {
if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
i++
continue
}
dst = append(dst, src[start:i]...)
switch b {
case '\\', '"':
dst = append(dst, '\\', b)
case '\b':
dst = append(dst, '\\', 'b')
case '\f':
dst = append(dst, '\\', 'f')
case '\n':
dst = append(dst, '\\', 'n')
case '\r':
dst = append(dst, '\\', 'r')
case '\t':
dst = append(dst, '\\', 't')
default:
// This encodes bytes < 0x20 except for \b, \f, \n, \r and \t.
// If escapeHTML is set, it also escapes <, >, and &
// because they can lead to security holes when
// user-controlled strings are rendered into JSON
// and served to some browsers.
dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF])
}
i++
start = i
continue
}
// TODO(https://go.dev/issue/56948): Use generic utf8 functionality.
// For now, cast only a small portion of byte slices to a string
// so that it can be stack allocated. This slows down []byte slightly
// due to the extra copy, but keeps string performance roughly the same.
n := min(len(src)-i, utf8.UTFMax)
c, size := utf8.DecodeRuneInString(string(src[i : i+n]))
if c == utf8.RuneError && size == 1 {
dst = append(dst, src[start:i]...)
dst = append(dst, `\ufffd`...)
i += size
start = i
continue
}
// U+2028 is LINE SEPARATOR.
// U+2029 is PARAGRAPH SEPARATOR.
// They are both technically valid characters in JSON strings,
// but don't work in JSONP, which has to be evaluated as JavaScript,
// and can lead to security holes there. It is valid JSON to
// escape them, so we do so unconditionally.
// See https://en.wikipedia.org/wiki/JSON#Safety.
if c == '\u2028' || c == '\u2029' {
dst = append(dst, src[start:i]...)
dst = append(dst, '\\', 'u', '2', '0', '2', hex[c&0xF])
i += size
start = i
continue
}
i += size
}
dst = append(dst, src[start:]...)
dst = append(dst, '"')
return dst
}
// A field represents a single field found in a struct.
type field struct {
name string
nameBytes []byte // []byte(name)
nameNonEsc string // `"` + name + `":`
nameEscHTML string // `"` + HTMLEscape(name) + `":`
tag bool
index []int
typ reflect.Type
omitEmpty bool
omitZero bool
isZero func(reflect.Value) bool
quoted bool
encoder encoderFunc
}
type isZeroer interface {
IsZero() bool
}
var isZeroerType = reflect.TypeFor[isZeroer]()
func typeFields(t reflect.Type) structFields {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
var count, nextCount map[reflect.Type]int
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
// Buffer to run appendHTMLEscape on field names.
var nameEscBuf []byte
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.Anonymous {
t := sf.Type
if t.Kind() == reflect.Pointer {
t = t.Elem()
}
if !sf.IsExported() && t.Kind() != reflect.Struct {
// Ignore embedded fields of unexported non-struct types.
continue
}
// Do not ignore embedded fields of unexported struct types
// since they may have exported fields.
} else if !sf.IsExported() {
// Ignore unexported non-embedded fields.
continue
}
tag := sf.Tag.Get("json")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Pointer {
// Follow pointer.
ft = ft.Elem()
}
// Only strings, floats, integers, and booleans can be quoted.
quoted := false
if opts.Contains("string") {
switch ft.Kind() {
case reflect.Bool,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Float32, reflect.Float64,
reflect.String:
quoted = true
}
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
field := field{
name: name,
tag: tagged,
index: index,
typ: ft,
omitEmpty: opts.Contains("omitempty"),
omitZero: opts.Contains("omitzero"),
quoted: quoted,
}
field.nameBytes = []byte(field.name)
// Build nameEscHTML and nameNonEsc ahead of time.
nameEscBuf = appendHTMLEscape(nameEscBuf[:0], field.nameBytes)
field.nameEscHTML = `"` + string(nameEscBuf) + `":`
field.nameNonEsc = `"` + field.name + `":`
if field.omitZero {
t := sf.Type
// Provide a function that uses a type's IsZero method.
switch {
case t.Kind() == reflect.Interface && t.Implements(isZeroerType):
field.isZero = func(v reflect.Value) bool {
// Avoid panics calling IsZero on a nil interface or
// non-nil interface with nil pointer.
return v.IsNil() ||
(v.Elem().Kind() == reflect.Pointer && v.Elem().IsNil()) ||
v.Interface().(isZeroer).IsZero()
}
case t.Kind() == reflect.Pointer && t.Implements(isZeroerType):
field.isZero = func(v reflect.Value) bool {
// Avoid panics calling IsZero on nil pointer.
return v.IsNil() || v.Interface().(isZeroer).IsZero()
}
case t.Implements(isZeroerType):
field.isZero = func(v reflect.Value) bool {
return v.Interface().(isZeroer).IsZero()
}
case reflect.PointerTo(t).Implements(isZeroerType):
field.isZero = func(v reflect.Value) bool {
if !v.CanAddr() {
// Temporarily box v so we can take the address.
v2 := reflect.New(v.Type()).Elem()
v2.Set(v)
v = v2
}
return v.Addr().Interface().(isZeroer).IsZero()
}
}
}
fields = append(fields, field)
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 and 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, field{name: ft.Name(), index: index, typ: ft})
}
}
}
}
slices.SortFunc(fields, func(a, b field) int {
// sort field by name, breaking ties with depth, then
// breaking ties with "name came from json tag", then
// breaking ties with index sequence.
if c := strings.Compare(a.name, b.name); c != 0 {
return c
}
if c := cmp.Compare(len(a.index), len(b.index)); c != 0 {
return c
}
if a.tag != b.tag {
if a.tag {
return -1
}
return +1
}
return slices.Compare(a.index, b.index)
})
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
slices.SortFunc(fields, func(i, j field) int {
return slices.Compare(i.index, j.index)
})
for i := range fields {
f := &fields[i]
f.encoder = typeEncoder(typeByIndex(t, f.index))
}
exactNameIndex := make(map[string]*field, len(fields))
foldedNameIndex := make(map[string]*field, len(fields))
for i, field := range fields {
exactNameIndex[field.name] = &fields[i]
// For historical reasons, first folded match takes precedence.
if _, ok := foldedNameIndex[string(foldName(field.nameBytes))]; !ok {
foldedNameIndex[string(foldName(field.nameBytes))] = &fields[i]
}
}
return structFields{fields, exactNameIndex, foldedNameIndex}
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order, then by presence of tag.
// That means that the first field is the dominant one. We need only check
// for error cases: two fields at top level, either both tagged or neither tagged.
if len(fields) > 1 && len(fields[0].index) == len(fields[1].index) && fields[0].tag == fields[1].tag {
return field{}, false
}
return fields[0], true
}
var fieldCache sync.Map // map[reflect.Type]structFields
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) structFields {
if f, ok := fieldCache.Load(t); ok {
return f.(structFields)
}
f, _ := fieldCache.LoadOrStore(t, typeFields(t))
return f.(structFields)
}
func mayAppendQuote(b []byte, quoted bool) []byte {
if quoted {
b = append(b, '"')
}
return b
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.jsonv2
package json
import (
"unicode"
"unicode/utf8"
)
// foldName returns a folded string such that foldName(x) == foldName(y)
// is identical to bytes.EqualFold(x, y).
func foldName(in []byte) []byte {
// This is inlinable to take advantage of "function outlining".
var arr [32]byte // large enough for most JSON names
return appendFoldedName(arr[:0], in)
}
func appendFoldedName(out, in []byte) []byte {
for i := 0; i < len(in); {
// Handle single-byte ASCII.
if c := in[i]; c < utf8.RuneSelf {
if 'a' <= c && c <= 'z' {
c -= 'a' - 'A'
}
out = append(out, c)
i++
continue
}
// Handle multi-byte Unicode.
r, n := utf8.DecodeRune(in[i:])
out = utf8.AppendRune(out, foldRune(r))
i += n
}
return out
}
// foldRune is returns the smallest rune for all runes in the same fold set.
func foldRune(r rune) rune {
for {
r2 := unicode.SimpleFold(r)
if r2 <= r {
return r2
}
r = r2
}
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.jsonv2
package json
import "bytes"
// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
// so that the JSON will be safe to embed inside HTML <script> tags.
// For historical reasons, web browsers don't honor standard HTML
// escaping within <script> tags, so an alternative JSON encoding must be used.
func HTMLEscape(dst *bytes.Buffer, src []byte) {
dst.Grow(len(src))
dst.Write(appendHTMLEscape(dst.AvailableBuffer(), src))
}
func appendHTMLEscape(dst, src []byte) []byte {
// The characters can only appear in string literals,
// so just scan the string one byte at a time.
start := 0
for i, c := range src {
if c == '<' || c == '>' || c == '&' {
dst = append(dst, src[start:i]...)
dst = append(dst, '\\', 'u', '0', '0', hex[c>>4], hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
dst = append(dst, src[start:i]...)
dst = append(dst, '\\', 'u', '2', '0', '2', hex[src[i+2]&0xF])
start = i + len("\u2029")
}
}
return append(dst, src[start:]...)
}
// Compact appends to dst the JSON-encoded src with
// insignificant space characters elided.
func Compact(dst *bytes.Buffer, src []byte) error {
dst.Grow(len(src))
b := dst.AvailableBuffer()
b, err := appendCompact(b, src, false)
dst.Write(b)
return err
}
func appendCompact(dst, src []byte, escape bool) ([]byte, error) {
origLen := len(dst)
scan := newScanner()
defer freeScanner(scan)
start := 0
for i, c := range src {
if escape && (c == '<' || c == '>' || c == '&') {
if start < i {
dst = append(dst, src[start:i]...)
}
dst = append(dst, '\\', 'u', '0', '0', hex[c>>4], hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if escape && c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst = append(dst, src[start:i]...)
}
dst = append(dst, '\\', 'u', '2', '0', '2', hex[src[i+2]&0xF])
start = i + 3
}
v := scan.step(scan, c)
if v >= scanSkipSpace {
if v == scanError {
break
}
if start < i {
dst = append(dst, src[start:i]...)
}
start = i + 1
}
}
if scan.eof() == scanError {
return dst[:origLen], scan.err
}
if start < len(src) {
dst = append(dst, src[start:]...)
}
return dst, nil
}
func appendNewline(dst []byte, prefix, indent string, depth int) []byte {
dst = append(dst, '\n')
dst = append(dst, prefix...)
for i := 0; i < depth; i++ {
dst = append(dst, indent...)
}
return dst
}
// indentGrowthFactor specifies the growth factor of indenting JSON input.
// Empirically, the growth factor was measured to be between 1.4x to 1.8x
// for some set of compacted JSON with the indent being a single tab.
// Specify a growth factor slightly larger than what is observed
// to reduce probability of allocation in appendIndent.
// A factor no higher than 2 ensures that wasted space never exceeds 50%.
const indentGrowthFactor = 2
// Indent appends to dst an indented form of the JSON-encoded src.
// Each element in a JSON object or array begins on a new,
// indented line beginning with prefix followed by one or more
// copies of indent according to the indentation nesting.
// The data appended to dst does not begin with the prefix nor
// any indentation, to make it easier to embed inside other formatted JSON data.
// Although leading space characters (space, tab, carriage return, newline)
// at the beginning of src are dropped, trailing space characters
// at the end of src are preserved and copied to dst.
// For example, if src has no trailing spaces, neither will dst;
// if src ends in a trailing newline, so will dst.
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
dst.Grow(indentGrowthFactor * len(src))
b := dst.AvailableBuffer()
b, err := appendIndent(b, src, prefix, indent)
dst.Write(b)
return err
}
func appendIndent(dst, src []byte, prefix, indent string) ([]byte, error) {
origLen := len(dst)
scan := newScanner()
defer freeScanner(scan)
needIndent := false
depth := 0
for _, c := range src {
scan.bytes++
v := scan.step(scan, c)
if v == scanSkipSpace {
continue
}
if v == scanError {
break
}
if needIndent && v != scanEndObject && v != scanEndArray {
needIndent = false
depth++
dst = appendNewline(dst, prefix, indent, depth)
}
// Emit semantically uninteresting bytes
// (in particular, punctuation in strings) unmodified.
if v == scanContinue {
dst = append(dst, c)
continue
}
// Add spacing around real punctuation.
switch c {
case '{', '[':
// delay indent so that empty object and array are formatted as {} and [].
needIndent = true
dst = append(dst, c)
case ',':
dst = append(dst, c)
dst = appendNewline(dst, prefix, indent, depth)
case ':':
dst = append(dst, c, ' ')
case '}', ']':
if needIndent {
// suppress indent in empty object/array
needIndent = false
} else {
depth--
dst = appendNewline(dst, prefix, indent, depth)
}
dst = append(dst, c)
default:
dst = append(dst, c)
}
}
if scan.eof() == scanError {
return dst[:origLen], scan.err
}
return dst, nil
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.jsonv2
package json
// JSON value parser state machine.
// Just about at the limit of what is reasonable to write by hand.
// Some parts are a bit tedious, but overall it nicely factors out the
// otherwise common code from the multiple scanning functions
// in this package (Compact, Indent, checkValid, etc).
//
// This file starts with two simple examples using the scanner
// before diving into the scanner itself.
import (
"strconv"
"sync"
)
// Valid reports whether data is a valid JSON encoding.
func Valid(data []byte) bool {
scan := newScanner()
defer freeScanner(scan)
return checkValid(data, scan) == nil
}
// checkValid verifies that data is valid JSON-encoded data.
// scan is passed in for use by checkValid to avoid an allocation.
// checkValid returns nil or a SyntaxError.
func checkValid(data []byte, scan *scanner) error {
scan.reset()
for _, c := range data {
scan.bytes++
if scan.step(scan, c) == scanError {
return scan.err
}
}
if scan.eof() == scanError {
return scan.err
}
return nil
}
// A SyntaxError is a description of a JSON syntax error.
// [Unmarshal] will return a SyntaxError if the JSON can't be parsed.
type SyntaxError struct {
msg string // description of error
Offset int64 // error occurred after reading Offset bytes
}
func (e *SyntaxError) Error() string { return e.msg }
// A scanner is a JSON scanning state machine.
// Callers call scan.reset and then pass bytes in one at a time
// by calling scan.step(&scan, c) for each byte.
// The return value, referred to as an opcode, tells the
// caller about significant parsing events like beginning
// and ending literals, objects, and arrays, so that the
// caller can follow along if it wishes.
// The return value scanEnd indicates that a single top-level
// JSON value has been completed, *before* the byte that
// just got passed in. (The indication must be delayed in order
// to recognize the end of numbers: is 123 a whole value or
// the beginning of 12345e+6?).
type scanner struct {
// The step is a func to be called to execute the next transition.
// Also tried using an integer constant and a single func
// with a switch, but using the func directly was 10% faster
// on a 64-bit Mac Mini, and it's nicer to read.
step func(*scanner, byte) int
// Reached end of top-level value.
endTop bool
// Stack of what we're in the middle of - array values, object keys, object values.
parseState []int
// Error that happened, if any.
err error
// total bytes consumed, updated by decoder.Decode (and deliberately
// not set to zero by scan.reset)
bytes int64
}
var scannerPool = sync.Pool{
New: func() any {
return &scanner{}
},
}
func newScanner() *scanner {
scan := scannerPool.Get().(*scanner)
// scan.reset by design doesn't set bytes to zero
scan.bytes = 0
scan.reset()
return scan
}
func freeScanner(scan *scanner) {
// Avoid hanging on to too much memory in extreme cases.
if len(scan.parseState) > 1024 {
scan.parseState = nil
}
scannerPool.Put(scan)
}
// These values are returned by the state transition functions
// assigned to scanner.state and the method scanner.eof.
// They give details about the current state of the scan that
// callers might be interested to know about.
// It is okay to ignore the return value of any particular
// call to scanner.state: if one call returns scanError,
// every subsequent call will return scanError too.
const (
// Continue.
scanContinue = iota // uninteresting byte
scanBeginLiteral // end implied by next result != scanContinue
scanBeginObject // begin object
scanObjectKey // just finished object key (string)
scanObjectValue // just finished non-last object value
scanEndObject // end object (implies scanObjectValue if possible)
scanBeginArray // begin array
scanArrayValue // just finished array value
scanEndArray // end array (implies scanArrayValue if possible)
scanSkipSpace // space byte; can skip; known to be last "continue" result
// Stop.
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
scanError // hit an error, scanner.err.
)
// These values are stored in the parseState stack.
// They give the current state of a composite value
// being scanned. If the parser is inside a nested value
// the parseState describes the nested state, outermost at entry 0.
const (
parseObjectKey = iota // parsing object key (before colon)
parseObjectValue // parsing object value (after colon)
parseArrayValue // parsing array value
)
// This limits the max nesting depth to prevent stack overflow.
// This is permitted by https://tools.ietf.org/html/rfc7159#section-9
const maxNestingDepth = 10000
// reset prepares the scanner for use.
// It must be called before calling s.step.
func (s *scanner) reset() {
s.step = stateBeginValue
s.parseState = s.parseState[0:0]
s.err = nil
s.endTop = false
}
// eof tells the scanner that the end of input has been reached.
// It returns a scan status just as s.step does.
func (s *scanner) eof() int {
if s.err != nil {
return scanError
}
if s.endTop {
return scanEnd
}
s.step(s, ' ')
if s.endTop {
return scanEnd
}
if s.err == nil {
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
}
return scanError
}
// pushParseState pushes a new parse state newParseState onto the parse stack.
// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned.
func (s *scanner) pushParseState(c byte, newParseState int, successState int) int {
s.parseState = append(s.parseState, newParseState)
if len(s.parseState) <= maxNestingDepth {
return successState
}
return s.error(c, "exceeded max depth")
}
// popParseState pops a parse state (already obtained) off the stack
// and updates s.step accordingly.
func (s *scanner) popParseState() {
n := len(s.parseState) - 1
s.parseState = s.parseState[0:n]
if n == 0 {
s.step = stateEndTop
s.endTop = true
} else {
s.step = stateEndValue
}
}
func isSpace(c byte) bool {
return c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n')
}
// stateBeginValueOrEmpty is the state after reading `[`.
func stateBeginValueOrEmpty(s *scanner, c byte) int {
if isSpace(c) {
return scanSkipSpace
}
if c == ']' {
return stateEndValue(s, c)
}
return stateBeginValue(s, c)
}
// stateBeginValue is the state at the beginning of the input.
func stateBeginValue(s *scanner, c byte) int {
if isSpace(c) {
return scanSkipSpace
}
switch c {
case '{':
s.step = stateBeginStringOrEmpty
return s.pushParseState(c, parseObjectKey, scanBeginObject)
case '[':
s.step = stateBeginValueOrEmpty
return s.pushParseState(c, parseArrayValue, scanBeginArray)
case '"':
s.step = stateInString
return scanBeginLiteral
case '-':
s.step = stateNeg
return scanBeginLiteral
case '0': // beginning of 0.123
s.step = state0
return scanBeginLiteral
case 't': // beginning of true
s.step = stateT
return scanBeginLiteral
case 'f': // beginning of false
s.step = stateF
return scanBeginLiteral
case 'n': // beginning of null
s.step = stateN
return scanBeginLiteral
}
if '1' <= c && c <= '9' { // beginning of 1234.5
s.step = state1
return scanBeginLiteral
}
return s.error(c, "looking for beginning of value")
}
// stateBeginStringOrEmpty is the state after reading `{`.
func stateBeginStringOrEmpty(s *scanner, c byte) int {
if isSpace(c) {
return scanSkipSpace
}
if c == '}' {
n := len(s.parseState)
s.parseState[n-1] = parseObjectValue
return stateEndValue(s, c)
}
return stateBeginString(s, c)
}
// stateBeginString is the state after reading `{"key": value,`.
func stateBeginString(s *scanner, c byte) int {
if isSpace(c) {
return scanSkipSpace
}
if c == '"' {
s.step = stateInString
return scanBeginLiteral
}
return s.error(c, "looking for beginning of object key string")
}
// stateEndValue is the state after completing a value,
// such as after reading `{}` or `true` or `["x"`.
func stateEndValue(s *scanner, c byte) int {
n := len(s.parseState)
if n == 0 {
// Completed top-level before the current byte.
s.step = stateEndTop
s.endTop = true
return stateEndTop(s, c)
}
if isSpace(c) {
s.step = stateEndValue
return scanSkipSpace
}
ps := s.parseState[n-1]
switch ps {
case parseObjectKey:
if c == ':' {
s.parseState[n-1] = parseObjectValue
s.step = stateBeginValue
return scanObjectKey
}
return s.error(c, "after object key")
case parseObjectValue:
if c == ',' {
s.parseState[n-1] = parseObjectKey
s.step = stateBeginString
return scanObjectValue
}
if c == '}' {
s.popParseState()
return scanEndObject
}
return s.error(c, "after object key:value pair")
case parseArrayValue:
if c == ',' {
s.step = stateBeginValue
return scanArrayValue
}
if c == ']' {
s.popParseState()
return scanEndArray
}
return s.error(c, "after array element")
}
return s.error(c, "")
}
// stateEndTop is the state after finishing the top-level value,
// such as after reading `{}` or `[1,2,3]`.
// Only space characters should be seen now.
func stateEndTop(s *scanner, c byte) int {
if !isSpace(c) {
// Complain about non-space byte on next call.
s.error(c, "after top-level value")
}
return scanEnd
}
// stateInString is the state after reading `"`.
func stateInString(s *scanner, c byte) int {
if c == '"' {
s.step = stateEndValue
return scanContinue
}
if c == '\\' {
s.step = stateInStringEsc
return scanContinue
}
if c < 0x20 {
return s.error(c, "in string literal")
}
return scanContinue
}
// stateInStringEsc is the state after reading `"\` during a quoted string.
func stateInStringEsc(s *scanner, c byte) int {
switch c {
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
s.step = stateInString
return scanContinue
case 'u':
s.step = stateInStringEscU
return scanContinue
}
return s.error(c, "in string escape code")
}
// stateInStringEscU is the state after reading `"\u` during a quoted string.
func stateInStringEscU(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU1
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
func stateInStringEscU1(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU12
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
func stateInStringEscU12(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU123
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
func stateInStringEscU123(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInString
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateNeg is the state after reading `-` during a number.
func stateNeg(s *scanner, c byte) int {
if c == '0' {
s.step = state0
return scanContinue
}
if '1' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return s.error(c, "in numeric literal")
}
// state1 is the state after reading a non-zero integer during a number,
// such as after reading `1` or `100` but not `0`.
func state1(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return state0(s, c)
}
// state0 is the state after reading `0` during a number.
func state0(s *scanner, c byte) int {
if c == '.' {
s.step = stateDot
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateDot is the state after reading the integer and decimal point in a number,
// such as after reading `1.`.
func stateDot(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateDot0
return scanContinue
}
return s.error(c, "after decimal point in numeric literal")
}
// stateDot0 is the state after reading the integer, decimal point, and subsequent
// digits of a number, such as after reading `3.14`.
func stateDot0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateE is the state after reading the mantissa and e in a number,
// such as after reading `314e` or `0.314e`.
func stateE(s *scanner, c byte) int {
if c == '+' || c == '-' {
s.step = stateESign
return scanContinue
}
return stateESign(s, c)
}
// stateESign is the state after reading the mantissa, e, and sign in a number,
// such as after reading `314e-` or `0.314e+`.
func stateESign(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateE0
return scanContinue
}
return s.error(c, "in exponent of numeric literal")
}
// stateE0 is the state after reading the mantissa, e, optional sign,
// and at least one digit of the exponent in a number,
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
func stateE0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
return stateEndValue(s, c)
}
// stateT is the state after reading `t`.
func stateT(s *scanner, c byte) int {
if c == 'r' {
s.step = stateTr
return scanContinue
}
return s.error(c, "in literal true (expecting 'r')")
}
// stateTr is the state after reading `tr`.
func stateTr(s *scanner, c byte) int {
if c == 'u' {
s.step = stateTru
return scanContinue
}
return s.error(c, "in literal true (expecting 'u')")
}
// stateTru is the state after reading `tru`.
func stateTru(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal true (expecting 'e')")
}
// stateF is the state after reading `f`.
func stateF(s *scanner, c byte) int {
if c == 'a' {
s.step = stateFa
return scanContinue
}
return s.error(c, "in literal false (expecting 'a')")
}
// stateFa is the state after reading `fa`.
func stateFa(s *scanner, c byte) int {
if c == 'l' {
s.step = stateFal
return scanContinue
}
return s.error(c, "in literal false (expecting 'l')")
}
// stateFal is the state after reading `fal`.
func stateFal(s *scanner, c byte) int {
if c == 's' {
s.step = stateFals
return scanContinue
}
return s.error(c, "in literal false (expecting 's')")
}
// stateFals is the state after reading `fals`.
func stateFals(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal false (expecting 'e')")
}
// stateN is the state after reading `n`.
func stateN(s *scanner, c byte) int {
if c == 'u' {
s.step = stateNu
return scanContinue
}
return s.error(c, "in literal null (expecting 'u')")
}
// stateNu is the state after reading `nu`.
func stateNu(s *scanner, c byte) int {
if c == 'l' {
s.step = stateNul
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateNul is the state after reading `nul`.
func stateNul(s *scanner, c byte) int {
if c == 'l' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateError is the state after reaching a syntax error,
// such as after reading `[1}` or `5.1.2`.
func stateError(s *scanner, c byte) int {
return scanError
}
// error records an error and switches to the error state.
func (s *scanner) error(c byte, context string) int {
s.step = stateError
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
return scanError
}
// quoteChar formats c as a quoted character literal.
func quoteChar(c byte) string {
// special cases - different from quoted strings
if c == '\'' {
return `'\''`
}
if c == '"' {
return `'"'`
}
// use quoted string with different quotation marks
s := strconv.Quote(string(c))
return "'" + s[1:len(s)-1] + "'"
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.jsonv2
package json
import (
"bytes"
"errors"
"io"
)
// A Decoder reads and decodes JSON values from an input stream.
type Decoder struct {
r io.Reader
buf []byte
d decodeState
scanp int // start of unread data in buf
scanned int64 // amount of data already scanned
scan scanner
err error
tokenState int
tokenStack []int
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may
// read data from r beyond the JSON values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// UseNumber causes the Decoder to unmarshal a number into an
// interface value as a [Number] instead of as a float64.
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
// DisallowUnknownFields causes the Decoder to return an error when the destination
// is a struct and the input contains object keys which do not match any
// non-ignored, exported fields in the destination.
func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true }
// Decode reads the next JSON-encoded value from its
// input and stores it in the value pointed to by v.
//
// See the documentation for [Unmarshal] for details about
// the conversion of JSON into a Go value.
func (dec *Decoder) Decode(v any) error {
if dec.err != nil {
return dec.err
}
if err := dec.tokenPrepareForDecode(); err != nil {
return err
}
if !dec.tokenValueAllowed() {
return &SyntaxError{msg: "not at beginning of value", Offset: dec.InputOffset()}
}
// Read whole value into buffer.
n, err := dec.readValue()
if err != nil {
return err
}
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
dec.scanp += n
// Don't save err from unmarshal into dec.err:
// the connection is still usable since we read a complete JSON
// object from it before the error happened.
err = dec.d.unmarshal(v)
// fixup token streaming state
dec.tokenValueEnd()
return err
}
// Buffered returns a reader of the data remaining in the Decoder's
// buffer. The reader is valid until the next call to [Decoder.Decode].
func (dec *Decoder) Buffered() io.Reader {
return bytes.NewReader(dec.buf[dec.scanp:])
}
// readValue reads a JSON value into dec.buf.
// It returns the length of the encoding.
func (dec *Decoder) readValue() (int, error) {
dec.scan.reset()
scanp := dec.scanp
var err error
Input:
// help the compiler see that scanp is never negative, so it can remove
// some bounds checks below.
for scanp >= 0 {
// Look in the buffer for a new value.
for ; scanp < len(dec.buf); scanp++ {
c := dec.buf[scanp]
dec.scan.bytes++
switch dec.scan.step(&dec.scan, c) {
case scanEnd:
// scanEnd is delayed one byte so we decrement
// the scanner bytes count by 1 to ensure that
// this value is correct in the next call of Decode.
dec.scan.bytes--
break Input
case scanEndObject, scanEndArray:
// scanEnd is delayed one byte.
// We might block trying to get that byte from src,
// so instead invent a space byte.
if stateEndValue(&dec.scan, ' ') == scanEnd {
scanp++
break Input
}
case scanError:
dec.err = dec.scan.err
return 0, dec.scan.err
}
}
// Did the last read have an error?
// Delayed until now to allow buffer scan.
if err != nil {
if err == io.EOF {
if dec.scan.step(&dec.scan, ' ') == scanEnd {
break Input
}
if nonSpace(dec.buf) {
err = io.ErrUnexpectedEOF
}
}
dec.err = err
return 0, err
}
n := scanp - dec.scanp
err = dec.refill()
scanp = dec.scanp + n
}
return scanp - dec.scanp, nil
}
func (dec *Decoder) refill() error {
// Make room to read more into the buffer.
// First slide down data already consumed.
if dec.scanp > 0 {
dec.scanned += int64(dec.scanp)
n := copy(dec.buf, dec.buf[dec.scanp:])
dec.buf = dec.buf[:n]
dec.scanp = 0
}
// Grow buffer if not large enough.
const minRead = 512
if cap(dec.buf)-len(dec.buf) < minRead {
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
copy(newBuf, dec.buf)
dec.buf = newBuf
}
// Read. Delay error for next iteration (after scan).
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
dec.buf = dec.buf[0 : len(dec.buf)+n]
return err
}
func nonSpace(b []byte) bool {
for _, c := range b {
if !isSpace(c) {
return true
}
}
return false
}
// An Encoder writes JSON values to an output stream.
type Encoder struct {
w io.Writer
err error
escapeHTML bool
indentBuf []byte
indentPrefix string
indentValue string
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w, escapeHTML: true}
}
// Encode writes the JSON encoding of v to the stream,
// with insignificant space characters elided,
// followed by a newline character.
//
// See the documentation for [Marshal] for details about the
// conversion of Go values to JSON.
func (enc *Encoder) Encode(v any) error {
if enc.err != nil {
return enc.err
}
e := newEncodeState()
defer encodeStatePool.Put(e)
err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
if err != nil {
return err
}
// Terminate each value with a newline.
// This makes the output look a little nicer
// when debugging, and some kind of space
// is required if the encoded value was a number,
// so that the reader knows there aren't more
// digits coming.
e.WriteByte('\n')
b := e.Bytes()
if enc.indentPrefix != "" || enc.indentValue != "" {
enc.indentBuf, err = appendIndent(enc.indentBuf[:0], b, enc.indentPrefix, enc.indentValue)
if err != nil {
return err
}
b = enc.indentBuf
}
if _, err = enc.w.Write(b); err != nil {
enc.err = err
}
return err
}
// SetIndent instructs the encoder to format each subsequent encoded
// value as if indented by the package-level function Indent(dst, src, prefix, indent).
// Calling SetIndent("", "") disables indentation.
func (enc *Encoder) SetIndent(prefix, indent string) {
enc.indentPrefix = prefix
enc.indentValue = indent
}
// SetEscapeHTML specifies whether problematic HTML characters
// should be escaped inside JSON quoted strings.
// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e
// to avoid certain safety problems that can arise when embedding JSON in HTML.
//
// In non-HTML settings where the escaping interferes with the readability
// of the output, SetEscapeHTML(false) disables this behavior.
func (enc *Encoder) SetEscapeHTML(on bool) {
enc.escapeHTML = on
}
// RawMessage is a raw encoded JSON value.
// It implements [Marshaler] and [Unmarshaler] and can
// be used to delay JSON decoding or precompute a JSON encoding.
type RawMessage []byte
// MarshalJSON returns m as the JSON encoding of m.
func (m RawMessage) MarshalJSON() ([]byte, error) {
if m == nil {
return []byte("null"), nil
}
return m, nil
}
// UnmarshalJSON sets *m to a copy of data.
func (m *RawMessage) UnmarshalJSON(data []byte) error {
if m == nil {
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
}
*m = append((*m)[0:0], data...)
return nil
}
var _ Marshaler = (*RawMessage)(nil)
var _ Unmarshaler = (*RawMessage)(nil)
// A Token holds a value of one of these types:
//
// - [Delim], for the four JSON delimiters [ ] { }
// - bool, for JSON booleans
// - float64, for JSON numbers
// - [Number], for JSON numbers
// - string, for JSON string literals
// - nil, for JSON null
type Token any
const (
tokenTopValue = iota
tokenArrayStart
tokenArrayValue
tokenArrayComma
tokenObjectStart
tokenObjectKey
tokenObjectColon
tokenObjectValue
tokenObjectComma
)
// advance tokenstate from a separator state to a value state
func (dec *Decoder) tokenPrepareForDecode() error {
// Note: Not calling peek before switch, to avoid
// putting peek into the standard Decode path.
// peek is only called when using the Token API.
switch dec.tokenState {
case tokenArrayComma:
c, err := dec.peek()
if err != nil {
return err
}
if c != ',' {
return &SyntaxError{"expected comma after array element", dec.InputOffset()}
}
dec.scanp++
dec.tokenState = tokenArrayValue
case tokenObjectColon:
c, err := dec.peek()
if err != nil {
return err
}
if c != ':' {
return &SyntaxError{"expected colon after object key", dec.InputOffset()}
}
dec.scanp++
dec.tokenState = tokenObjectValue
}
return nil
}
func (dec *Decoder) tokenValueAllowed() bool {
switch dec.tokenState {
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
return true
}
return false
}
func (dec *Decoder) tokenValueEnd() {
switch dec.tokenState {
case tokenArrayStart, tokenArrayValue:
dec.tokenState = tokenArrayComma
case tokenObjectValue:
dec.tokenState = tokenObjectComma
}
}
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
type Delim rune
func (d Delim) String() string {
return string(d)
}
// Token returns the next JSON token in the input stream.
// At the end of the input stream, Token returns nil, [io.EOF].
//
// Token guarantees that the delimiters [ ] { } it returns are
// properly nested and matched: if Token encounters an unexpected
// delimiter in the input, it will return an error.
//
// The input stream consists of basic JSON values—bool, string,
// number, and null—along with delimiters [ ] { } of type [Delim]
// to mark the start and end of arrays and objects.
// Commas and colons are elided.
func (dec *Decoder) Token() (Token, error) {
for {
c, err := dec.peek()
if err != nil {
return nil, err
}
switch c {
case '[':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenArrayStart
return Delim('['), nil
case ']':
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim(']'), nil
case '{':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenObjectStart
return Delim('{'), nil
case '}':
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim('}'), nil
case ':':
if dec.tokenState != tokenObjectColon {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = tokenObjectValue
continue
case ',':
if dec.tokenState == tokenArrayComma {
dec.scanp++
dec.tokenState = tokenArrayValue
continue
}
if dec.tokenState == tokenObjectComma {
dec.scanp++
dec.tokenState = tokenObjectKey
continue
}
return dec.tokenError(c)
case '"':
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
var x string
old := dec.tokenState
dec.tokenState = tokenTopValue
err := dec.Decode(&x)
dec.tokenState = old
if err != nil {
return nil, err
}
dec.tokenState = tokenObjectColon
return x, nil
}
fallthrough
default:
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
var x any
if err := dec.Decode(&x); err != nil {
return nil, err
}
return x, nil
}
}
}
func (dec *Decoder) tokenError(c byte) (Token, error) {
var context string
switch dec.tokenState {
case tokenTopValue:
context = " looking for beginning of value"
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
context = " looking for beginning of value"
case tokenArrayComma:
context = " after array element"
case tokenObjectKey:
context = " looking for beginning of object key string"
case tokenObjectColon:
context = " after object key"
case tokenObjectComma:
context = " after object key:value pair"
}
return nil, &SyntaxError{"invalid character " + quoteChar(c) + context, dec.InputOffset()}
}
// More reports whether there is another element in the
// current array or object being parsed.
func (dec *Decoder) More() bool {
c, err := dec.peek()
return err == nil && c != ']' && c != '}'
}
func (dec *Decoder) peek() (byte, error) {
var err error
for {
for i := dec.scanp; i < len(dec.buf); i++ {
c := dec.buf[i]
if isSpace(c) {
continue
}
dec.scanp = i
return c, nil
}
// buffer has been scanned, now report any error
if err != nil {
return 0, err
}
err = dec.refill()
}
}
// InputOffset returns the input stream byte offset of the current decoder position.
// The offset gives the location of the end of the most recently returned token
// and the beginning of the next token.
func (dec *Decoder) InputOffset() int64 {
return dec.scanned + int64(dec.scanp)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.jsonv2
package json
import (
"strings"
)
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
tag, opt, _ := strings.Cut(tag, ",")
return tag, tagOptions(opt)
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var name string
name, s, _ = strings.Cut(s, ",")
if name == optionName {
return true
}
}
return false
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pem implements the PEM data encoding, which originated in Privacy
// Enhanced Mail. The most common use of PEM encoding today is in TLS keys and
// certificates. See RFC 1421.
package pem
import (
"bytes"
"encoding/base64"
"errors"
"io"
"slices"
"strings"
)
// A Block represents a PEM encoded structure.
//
// The encoded form is:
//
// -----BEGIN Type-----
// Headers
// base64-encoded Bytes
// -----END Type-----
//
// where [Block.Headers] is a possibly empty sequence of Key: Value lines.
type Block struct {
Type string // The type, taken from the preamble (i.e. "RSA PRIVATE KEY").
Headers map[string]string // Optional headers.
Bytes []byte // The decoded bytes of the contents. Typically a DER encoded ASN.1 structure.
}
// getLine results the first \r\n or \n delineated line from the given byte
// array. The line does not include trailing whitespace or the trailing new
// line bytes. The remainder of the byte array (also not including the new line
// bytes) is also returned and this will always be smaller than the original
// argument.
func getLine(data []byte) (line, rest []byte, consumed int) {
i := bytes.IndexByte(data, '\n')
var j int
if i < 0 {
i = len(data)
j = i
} else {
j = i + 1
if i > 0 && data[i-1] == '\r' {
i--
}
}
return bytes.TrimRight(data[0:i], " \t"), data[j:], j
}
// removeSpacesAndTabs returns a copy of its input with all spaces and tabs
// removed, if there were any. Otherwise, the input is returned unchanged.
//
// The base64 decoder already skips newline characters, so we don't need to
// filter them out here.
func removeSpacesAndTabs(data []byte) []byte {
if !bytes.ContainsAny(data, " \t") {
// Fast path; most base64 data within PEM contains newlines, but
// no spaces nor tabs. Skip the extra alloc and work.
return data
}
result := make([]byte, len(data))
n := 0
for _, b := range data {
if b == ' ' || b == '\t' {
continue
}
result[n] = b
n++
}
return result[0:n]
}
var pemStart = []byte("\n-----BEGIN ")
var pemEnd = []byte("\n-----END ")
var pemEndOfLine = []byte("-----")
var colon = []byte(":")
// Decode will find the next PEM formatted block (certificate, private key
// etc) in the input. It returns that block and the remainder of the input. If
// no PEM data is found, p is nil and the whole of the input is returned in
// rest. Blocks must start at the beginning of a line and end at the end of a line.
func Decode(data []byte) (p *Block, rest []byte) {
// pemStart begins with a newline. However, at the very beginning of
// the byte array, we'll accept the start string without it.
rest = data
endTrailerIndex := 0
for {
// If we've already tried parsing a block, skip past the END we already
// saw.
if endTrailerIndex < 0 || endTrailerIndex > len(rest) {
return nil, data
}
rest = rest[endTrailerIndex:]
// Find the first END line, and then find the last BEGIN line before
// the end line. This lets us skip any repeated BEGIN lines that don't
// have a matching END.
endIndex := bytes.Index(rest, pemEnd)
if endIndex < 0 {
return nil, data
}
endTrailerIndex = endIndex + len(pemEnd)
beginIndex := bytes.LastIndex(rest[:endIndex], pemStart[1:])
if beginIndex < 0 || (beginIndex > 0 && rest[beginIndex-1] != '\n') {
continue
}
rest = rest[beginIndex+len(pemStart)-1:]
endIndex -= beginIndex + len(pemStart) - 1
endTrailerIndex -= beginIndex + len(pemStart) - 1
var typeLine []byte
var consumed int
typeLine, rest, consumed = getLine(rest)
endIndex -= consumed
endTrailerIndex -= consumed
if !bytes.HasSuffix(typeLine, pemEndOfLine) {
continue
}
typeLine = typeLine[0 : len(typeLine)-len(pemEndOfLine)]
p = &Block{
Headers: make(map[string]string),
Type: string(typeLine),
}
for {
// This loop terminates because getLine's second result is
// always smaller than its argument.
if len(rest) == 0 {
return nil, data
}
line, next, consumed := getLine(rest)
key, val, ok := bytes.Cut(line, colon)
if !ok {
break
}
// TODO(agl): need to cope with values that spread across lines.
key = bytes.TrimSpace(key)
val = bytes.TrimSpace(val)
p.Headers[string(key)] = string(val)
rest = next
endIndex -= consumed
endTrailerIndex -= consumed
}
// If there were headers, there must be a newline between the headers
// and the END line, so endIndex should be >= 0.
if len(p.Headers) > 0 && endIndex < 0 {
continue
}
// After the "-----" of the ending line, there should be the same type
// and then a final five dashes.
endTrailer := rest[endTrailerIndex:]
endTrailerLen := len(typeLine) + len(pemEndOfLine)
if len(endTrailer) < endTrailerLen {
continue
}
restOfEndLine := endTrailer[endTrailerLen:]
endTrailer = endTrailer[:endTrailerLen]
if !bytes.HasPrefix(endTrailer, typeLine) ||
!bytes.HasSuffix(endTrailer, pemEndOfLine) {
continue
}
// The line must end with only whitespace.
if s, _, _ := getLine(restOfEndLine); len(s) != 0 {
continue
}
p.Bytes = []byte{}
if endIndex > 0 {
base64Data := removeSpacesAndTabs(rest[:endIndex])
p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data)))
n, err := base64.StdEncoding.Decode(p.Bytes, base64Data)
if err != nil {
continue
}
p.Bytes = p.Bytes[:n]
}
// the -1 is because we might have only matched pemEnd without the
// leading newline if the PEM block was empty.
_, rest, _ = getLine(rest[endIndex+len(pemEnd)-1:])
return p, rest
}
}
const pemLineLength = 64
type lineBreaker struct {
line [pemLineLength]byte
used int
out io.Writer
}
var nl = []byte{'\n'}
func (l *lineBreaker) Write(b []byte) (n int, err error) {
if l.used+len(b) < pemLineLength {
copy(l.line[l.used:], b)
l.used += len(b)
return len(b), nil
}
n, err = l.out.Write(l.line[0:l.used])
if err != nil {
return
}
excess := pemLineLength - l.used
l.used = 0
n, err = l.out.Write(b[0:excess])
if err != nil {
return
}
n, err = l.out.Write(nl)
if err != nil {
return
}
return l.Write(b[excess:])
}
func (l *lineBreaker) Close() (err error) {
if l.used > 0 {
_, err = l.out.Write(l.line[0:l.used])
if err != nil {
return
}
_, err = l.out.Write(nl)
}
return
}
func writeHeader(out io.Writer, k, v string) error {
_, err := out.Write([]byte(k + ": " + v + "\n"))
return err
}
// Encode writes the PEM encoding of b to out.
func Encode(out io.Writer, b *Block) error {
// Check for invalid block before writing any output.
for k := range b.Headers {
if strings.Contains(k, ":") {
return errors.New("pem: cannot encode a header key that contains a colon")
}
}
// All errors below are relayed from underlying io.Writer,
// so it is now safe to write data.
if _, err := out.Write(pemStart[1:]); err != nil {
return err
}
if _, err := out.Write([]byte(b.Type + "-----\n")); err != nil {
return err
}
if len(b.Headers) > 0 {
const procType = "Proc-Type"
h := make([]string, 0, len(b.Headers))
hasProcType := false
for k := range b.Headers {
if k == procType {
hasProcType = true
continue
}
h = append(h, k)
}
// The Proc-Type header must be written first.
// See RFC 1421, section 4.6.1.1
if hasProcType {
if err := writeHeader(out, procType, b.Headers[procType]); err != nil {
return err
}
}
// For consistency of output, write other headers sorted by key.
slices.Sort(h)
for _, k := range h {
if err := writeHeader(out, k, b.Headers[k]); err != nil {
return err
}
}
if _, err := out.Write(nl); err != nil {
return err
}
}
var breaker lineBreaker
breaker.out = out
b64 := base64.NewEncoder(base64.StdEncoding, &breaker)
if _, err := b64.Write(b.Bytes); err != nil {
return err
}
b64.Close()
breaker.Close()
if _, err := out.Write(pemEnd[1:]); err != nil {
return err
}
_, err := out.Write([]byte(b.Type + "-----\n"))
return err
}
// EncodeToMemory returns the PEM encoding of b.
//
// If b has invalid headers and cannot be encoded,
// EncodeToMemory returns nil. If it is important to
// report details about this error case, use [Encode] instead.
func EncodeToMemory(b *Block) []byte {
var buf bytes.Buffer
if err := Encode(&buf, b); err != nil {
return nil
}
return buf.Bytes()
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"bufio"
"bytes"
"encoding"
"errors"
"fmt"
"io"
"reflect"
"strconv"
"strings"
)
const (
// Header is a generic XML header suitable for use with the output of [Marshal].
// This is not automatically added to any output of this package,
// it is provided as a convenience.
Header = `<?xml version="1.0" encoding="UTF-8"?>` + "\n"
)
// Marshal returns the XML encoding of v.
//
// Marshal handles an array or slice by marshaling each of the elements.
// Marshal handles a pointer by marshaling the value it points at or, if the
// pointer is nil, by writing nothing. Marshal handles an interface value by
// marshaling the value it contains or, if the interface value is nil, by
// writing nothing. Marshal handles all other data by writing one or more XML
// elements containing the data.
//
// The name for the XML elements is taken from, in order of preference:
// - the tag on the XMLName field, if the data is a struct
// - the value of the XMLName field of type [Name]
// - the tag of the struct field used to obtain the data
// - the name of the struct field used to obtain the data
// - the name of the marshaled type
//
// The XML element for a struct contains marshaled elements for each of the
// exported fields of the struct, with these exceptions:
// - the XMLName field, described above, is omitted.
// - a field with tag "-" is omitted.
// - a field with tag "name,attr" becomes an attribute with
// the given name in the XML element.
// - a field with tag ",attr" becomes an attribute with the
// field name in the XML element.
// - a field with tag ",chardata" is written as character data,
// not as an XML element.
// - a field with tag ",cdata" is written as character data
// wrapped in one or more <![CDATA[ ... ]]> tags, not as an XML element.
// - a field with tag ",innerxml" is written verbatim, not subject
// to the usual marshaling procedure.
// - a field with tag ",comment" is written as an XML comment, not
// subject to the usual marshaling procedure. It must not contain
// the "--" string within it.
// - a field with a tag including the "omitempty" option is omitted
// if the field value is empty. The empty values are false, 0, any
// nil pointer or interface value, and any array, slice, map, or
// string of length zero.
// - an anonymous struct field is handled as if the fields of its
// value were part of the outer struct.
// - an anonymous struct field of interface type is treated the same as having
// that type as its name, rather than being anonymous.
// - a field implementing [Marshaler] is written by calling its MarshalXML
// method.
// - a field implementing [encoding.TextMarshaler] is written by encoding the
// result of its MarshalText method as text.
//
// If a field uses a tag "a>b>c", then the element c will be nested inside
// parent elements a and b. Fields that appear next to each other that name
// the same parent will be enclosed in one XML element.
//
// If the XML name for a struct field is defined by both the field tag and the
// struct's XMLName field, the names must match.
//
// See [MarshalIndent] for an example.
//
// Marshal will return an error if asked to marshal a channel, function, or map.
func Marshal(v any) ([]byte, error) {
var b bytes.Buffer
enc := NewEncoder(&b)
if err := enc.Encode(v); err != nil {
return nil, err
}
if err := enc.Close(); err != nil {
return nil, err
}
return b.Bytes(), nil
}
// Marshaler is the interface implemented by objects that can marshal
// themselves into valid XML elements.
//
// MarshalXML encodes the receiver as zero or more XML elements.
// By convention, arrays or slices are typically encoded as a sequence
// of elements, one per entry.
// Using start as the element tag is not required, but doing so
// will enable [Unmarshal] to match the XML elements to the correct
// struct field.
// One common implementation strategy is to construct a separate
// value with a layout corresponding to the desired XML and then
// to encode it using e.EncodeElement.
// Another common strategy is to use repeated calls to e.EncodeToken
// to generate the XML output one token at a time.
// The sequence of encoded tokens must make up zero or more valid
// XML elements.
type Marshaler interface {
MarshalXML(e *Encoder, start StartElement) error
}
// MarshalerAttr is the interface implemented by objects that can marshal
// themselves into valid XML attributes.
//
// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver.
// Using name as the attribute name is not required, but doing so
// will enable [Unmarshal] to match the attribute to the correct
// struct field.
// If MarshalXMLAttr returns the zero attribute [Attr]{}, no attribute
// will be generated in the output.
// MarshalXMLAttr is used only for struct fields with the
// "attr" option in the field tag.
type MarshalerAttr interface {
MarshalXMLAttr(name Name) (Attr, error)
}
// MarshalIndent works like [Marshal], but each XML element begins on a new
// indented line that starts with prefix and is followed by one or more
// copies of indent according to the nesting depth.
func MarshalIndent(v any, prefix, indent string) ([]byte, error) {
var b bytes.Buffer
enc := NewEncoder(&b)
enc.Indent(prefix, indent)
if err := enc.Encode(v); err != nil {
return nil, err
}
if err := enc.Close(); err != nil {
return nil, err
}
return b.Bytes(), nil
}
// An Encoder writes XML data to an output stream.
type Encoder struct {
p printer
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
e := &Encoder{printer{w: bufio.NewWriter(w)}}
e.p.encoder = e
return e
}
// Indent sets the encoder to generate XML in which each element
// begins on a new indented line that starts with prefix and is followed by
// one or more copies of indent according to the nesting depth.
func (enc *Encoder) Indent(prefix, indent string) {
enc.p.prefix = prefix
enc.p.indent = indent
}
// Encode writes the XML encoding of v to the stream.
//
// See the documentation for [Marshal] for details about the conversion
// of Go values to XML.
//
// Encode calls [Encoder.Flush] before returning.
func (enc *Encoder) Encode(v any) error {
err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil)
if err != nil {
return err
}
return enc.p.w.Flush()
}
// EncodeElement writes the XML encoding of v to the stream,
// using start as the outermost tag in the encoding.
//
// See the documentation for [Marshal] for details about the conversion
// of Go values to XML.
//
// EncodeElement calls [Encoder.Flush] before returning.
func (enc *Encoder) EncodeElement(v any, start StartElement) error {
err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start)
if err != nil {
return err
}
return enc.p.w.Flush()
}
var (
begComment = []byte("<!--")
endComment = []byte("-->")
endProcInst = []byte("?>")
)
// EncodeToken writes the given XML token to the stream.
// It returns an error if [StartElement] and [EndElement] tokens are not properly matched.
//
// EncodeToken does not call [Encoder.Flush], because usually it is part of a larger operation
// such as [Encoder.Encode] or [Encoder.EncodeElement] (or a custom [Marshaler]'s MarshalXML invoked
// during those), and those will call Flush when finished.
// Callers that create an Encoder and then invoke EncodeToken directly, without
// using Encode or EncodeElement, need to call Flush when finished to ensure
// that the XML is written to the underlying writer.
//
// EncodeToken allows writing a [ProcInst] with Target set to "xml" only as the first token
// in the stream.
func (enc *Encoder) EncodeToken(t Token) error {
p := &enc.p
switch t := t.(type) {
case StartElement:
if err := p.writeStart(&t); err != nil {
return err
}
case EndElement:
if err := p.writeEnd(t.Name); err != nil {
return err
}
case CharData:
escapeText(p, t, false)
case Comment:
if bytes.Contains(t, endComment) {
return fmt.Errorf("xml: EncodeToken of Comment containing --> marker")
}
p.WriteString("<!--")
p.Write(t)
p.WriteString("-->")
return p.cachedWriteError()
case ProcInst:
// First token to be encoded which is also a ProcInst with target of xml
// is the xml declaration. The only ProcInst where target of xml is allowed.
if t.Target == "xml" && p.w.Buffered() != 0 {
return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded")
}
if !isNameString(t.Target) {
return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target")
}
if bytes.Contains(t.Inst, endProcInst) {
return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker")
}
p.WriteString("<?")
p.WriteString(t.Target)
if len(t.Inst) > 0 {
p.WriteByte(' ')
p.Write(t.Inst)
}
p.WriteString("?>")
case Directive:
if !isValidDirective(t) {
return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers")
}
p.WriteString("<!")
p.Write(t)
p.WriteString(">")
default:
return fmt.Errorf("xml: EncodeToken of invalid token type")
}
return p.cachedWriteError()
}
// isValidDirective reports whether dir is a valid directive text,
// meaning angle brackets are matched, ignoring comments and strings.
func isValidDirective(dir Directive) bool {
var (
depth int
inquote uint8
incomment bool
)
for i, c := range dir {
switch {
case incomment:
if c == '>' {
if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) {
incomment = false
}
}
// Just ignore anything in comment
case inquote != 0:
if c == inquote {
inquote = 0
}
// Just ignore anything within quotes
case c == '\'' || c == '"':
inquote = c
case c == '<':
if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) {
incomment = true
} else {
depth++
}
case c == '>':
if depth == 0 {
return false
}
depth--
}
}
return depth == 0 && inquote == 0 && !incomment
}
// Flush flushes any buffered XML to the underlying writer.
// See the [Encoder.EncodeToken] documentation for details about when it is necessary.
func (enc *Encoder) Flush() error {
return enc.p.w.Flush()
}
// Close the Encoder, indicating that no more data will be written. It flushes
// any buffered XML to the underlying writer and returns an error if the
// written XML is invalid (e.g. by containing unclosed elements).
func (enc *Encoder) Close() error {
return enc.p.Close()
}
type printer struct {
w *bufio.Writer
encoder *Encoder
seq int
indent string
prefix string
depth int
indentedIn bool
putNewline bool
attrNS map[string]string // map prefix -> name space
attrPrefix map[string]string // map name space -> prefix
prefixes []string
tags []Name
closed bool
err error
}
// createAttrPrefix finds the name space prefix attribute to use for the given name space,
// defining a new prefix if necessary. It returns the prefix.
func (p *printer) createAttrPrefix(url string) string {
if prefix := p.attrPrefix[url]; prefix != "" {
return prefix
}
// The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml"
// and must be referred to that way.
// (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns",
// but users should not be trying to use that one directly - that's our job.)
if url == xmlURL {
return xmlPrefix
}
// Need to define a new name space.
if p.attrPrefix == nil {
p.attrPrefix = make(map[string]string)
p.attrNS = make(map[string]string)
}
// Pick a name. We try to use the final element of the path
// but fall back to _.
prefix := strings.TrimRight(url, "/")
if i := strings.LastIndex(prefix, "/"); i >= 0 {
prefix = prefix[i+1:]
}
if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") {
prefix = "_"
}
// xmlanything is reserved and any variant of it regardless of
// case should be matched, so:
// (('X'|'x') ('M'|'m') ('L'|'l'))
// See Section 2.3 of https://www.w3.org/TR/REC-xml/
if len(prefix) >= 3 && strings.EqualFold(prefix[:3], "xml") {
prefix = "_" + prefix
}
if p.attrNS[prefix] != "" {
// Name is taken. Find a better one.
for p.seq++; ; p.seq++ {
if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" {
prefix = id
break
}
}
}
p.attrPrefix[url] = prefix
p.attrNS[prefix] = url
p.WriteString(`xmlns:`)
p.WriteString(prefix)
p.WriteString(`="`)
EscapeText(p, []byte(url))
p.WriteString(`" `)
p.prefixes = append(p.prefixes, prefix)
return prefix
}
// deleteAttrPrefix removes an attribute name space prefix.
func (p *printer) deleteAttrPrefix(prefix string) {
delete(p.attrPrefix, p.attrNS[prefix])
delete(p.attrNS, prefix)
}
func (p *printer) markPrefix() {
p.prefixes = append(p.prefixes, "")
}
func (p *printer) popPrefix() {
for len(p.prefixes) > 0 {
prefix := p.prefixes[len(p.prefixes)-1]
p.prefixes = p.prefixes[:len(p.prefixes)-1]
if prefix == "" {
break
}
p.deleteAttrPrefix(prefix)
}
}
// marshalValue writes one or more XML elements representing val.
// If val was obtained from a struct field, finfo must have its details.
func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error {
if startTemplate != nil && startTemplate.Name.Local == "" {
return fmt.Errorf("xml: EncodeElement of StartElement with missing name")
}
if !val.IsValid() {
return nil
}
if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) {
return nil
}
// Drill into interfaces and pointers.
// This can turn into an infinite loop given a cyclic chain,
// but it matches the Go 1 behavior.
for val.Kind() == reflect.Interface || val.Kind() == reflect.Pointer {
if val.IsNil() {
return nil
}
val = val.Elem()
}
kind := val.Kind()
typ := val.Type()
// Check for marshaler.
if val.CanInterface() {
if marshaler, ok := reflect.TypeAssert[Marshaler](val); ok {
return p.marshalInterface(marshaler, defaultStart(typ, finfo, startTemplate))
}
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() {
if marshaler, ok := reflect.TypeAssert[Marshaler](pv); ok {
return p.marshalInterface(marshaler, defaultStart(pv.Type(), finfo, startTemplate))
}
}
}
// Check for text marshaler.
if val.CanInterface() {
if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](val); ok {
return p.marshalTextInterface(textMarshaler, defaultStart(typ, finfo, startTemplate))
}
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() {
if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](pv); ok {
return p.marshalTextInterface(textMarshaler, defaultStart(pv.Type(), finfo, startTemplate))
}
}
}
// Slices and arrays iterate over the elements. They do not have an enclosing tag.
if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 {
for i, n := 0, val.Len(); i < n; i++ {
if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil {
return err
}
}
return nil
}
tinfo, err := getTypeInfo(typ)
if err != nil {
return err
}
// Create start element.
// Precedence for the XML element name is:
// 0. startTemplate
// 1. XMLName field in underlying struct;
// 2. field name/tag in the struct field; and
// 3. type name
var start StartElement
if startTemplate != nil {
start.Name = startTemplate.Name
start.Attr = append(start.Attr, startTemplate.Attr...)
} else if tinfo.xmlname != nil {
xmlname := tinfo.xmlname
if xmlname.name != "" {
start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name
} else {
fv := xmlname.value(val, dontInitNilPointers)
if v, ok := reflect.TypeAssert[Name](fv); ok && v.Local != "" {
start.Name = v
}
}
}
if start.Name.Local == "" && finfo != nil {
start.Name.Space, start.Name.Local = finfo.xmlns, finfo.name
}
if start.Name.Local == "" {
name := typ.Name()
if i := strings.IndexByte(name, '['); i >= 0 {
// Truncate generic instantiation name. See issue 48318.
name = name[:i]
}
if name == "" {
return &UnsupportedTypeError{typ}
}
start.Name.Local = name
}
// Attributes
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
if finfo.flags&fAttr == 0 {
continue
}
fv := finfo.value(val, dontInitNilPointers)
if finfo.flags&fOmitEmpty != 0 && (!fv.IsValid() || isEmptyValue(fv)) {
continue
}
if fv.Kind() == reflect.Interface && fv.IsNil() {
continue
}
name := Name{Space: finfo.xmlns, Local: finfo.name}
if err := p.marshalAttr(&start, name, fv); err != nil {
return err
}
}
// If an empty name was found, namespace is overridden with an empty space
if tinfo.xmlname != nil && start.Name.Space == "" &&
tinfo.xmlname.xmlns == "" && tinfo.xmlname.name == "" &&
len(p.tags) != 0 && p.tags[len(p.tags)-1].Space != "" {
start.Attr = append(start.Attr, Attr{Name{"", xmlnsPrefix}, ""})
}
if err := p.writeStart(&start); err != nil {
return err
}
if val.Kind() == reflect.Struct {
err = p.marshalStruct(tinfo, val)
} else {
s, b, err1 := p.marshalSimple(typ, val)
if err1 != nil {
err = err1
} else if b != nil {
EscapeText(p, b)
} else {
p.EscapeString(s)
}
}
if err != nil {
return err
}
if err := p.writeEnd(start.Name); err != nil {
return err
}
return p.cachedWriteError()
}
// marshalAttr marshals an attribute with the given name and value, adding to start.Attr.
func (p *printer) marshalAttr(start *StartElement, name Name, val reflect.Value) error {
if val.CanInterface() {
if marshaler, ok := reflect.TypeAssert[MarshalerAttr](val); ok {
attr, err := marshaler.MarshalXMLAttr(name)
if err != nil {
return err
}
if attr.Name.Local != "" {
start.Attr = append(start.Attr, attr)
}
return nil
}
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() {
if marshaler, ok := reflect.TypeAssert[MarshalerAttr](pv); ok {
attr, err := marshaler.MarshalXMLAttr(name)
if err != nil {
return err
}
if attr.Name.Local != "" {
start.Attr = append(start.Attr, attr)
}
return nil
}
}
}
if val.CanInterface() {
if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](val); ok {
text, err := textMarshaler.MarshalText()
if err != nil {
return err
}
start.Attr = append(start.Attr, Attr{name, string(text)})
return nil
}
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() {
if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](pv); ok {
text, err := textMarshaler.MarshalText()
if err != nil {
return err
}
start.Attr = append(start.Attr, Attr{name, string(text)})
return nil
}
}
}
// Dereference or skip nil pointer, interface values.
switch val.Kind() {
case reflect.Pointer, reflect.Interface:
if val.IsNil() {
return nil
}
val = val.Elem()
}
// Walk slices.
if val.Kind() == reflect.Slice && val.Type().Elem().Kind() != reflect.Uint8 {
n := val.Len()
for i := 0; i < n; i++ {
if err := p.marshalAttr(start, name, val.Index(i)); err != nil {
return err
}
}
return nil
}
if val.Type() == attrType {
attr, _ := reflect.TypeAssert[Attr](val)
start.Attr = append(start.Attr, attr)
return nil
}
s, b, err := p.marshalSimple(val.Type(), val)
if err != nil {
return err
}
if b != nil {
s = string(b)
}
start.Attr = append(start.Attr, Attr{name, s})
return nil
}
// defaultStart returns the default start element to use,
// given the reflect type, field info, and start template.
func defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement {
var start StartElement
// Precedence for the XML element name is as above,
// except that we do not look inside structs for the first field.
if startTemplate != nil {
start.Name = startTemplate.Name
start.Attr = append(start.Attr, startTemplate.Attr...)
} else if finfo != nil && finfo.name != "" {
start.Name.Local = finfo.name
start.Name.Space = finfo.xmlns
} else if typ.Name() != "" {
start.Name.Local = typ.Name()
} else {
// Must be a pointer to a named type,
// since it has the Marshaler methods.
start.Name.Local = typ.Elem().Name()
}
return start
}
// marshalInterface marshals a Marshaler interface value.
func (p *printer) marshalInterface(val Marshaler, start StartElement) error {
// Push a marker onto the tag stack so that MarshalXML
// cannot close the XML tags that it did not open.
p.tags = append(p.tags, Name{})
n := len(p.tags)
err := val.MarshalXML(p.encoder, start)
if err != nil {
return err
}
// Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark.
if len(p.tags) > n {
return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local)
}
p.tags = p.tags[:n-1]
return nil
}
// marshalTextInterface marshals a TextMarshaler interface value.
func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error {
if err := p.writeStart(&start); err != nil {
return err
}
text, err := val.MarshalText()
if err != nil {
return err
}
EscapeText(p, text)
return p.writeEnd(start.Name)
}
// writeStart writes the given start element.
func (p *printer) writeStart(start *StartElement) error {
if start.Name.Local == "" {
return fmt.Errorf("xml: start tag with no name")
}
p.tags = append(p.tags, start.Name)
p.markPrefix()
p.writeIndent(1)
p.WriteByte('<')
p.WriteString(start.Name.Local)
if start.Name.Space != "" {
p.WriteString(` xmlns="`)
p.EscapeString(start.Name.Space)
p.WriteByte('"')
}
// Attributes
for _, attr := range start.Attr {
name := attr.Name
if name.Local == "" {
continue
}
p.WriteByte(' ')
if name.Space != "" {
p.WriteString(p.createAttrPrefix(name.Space))
p.WriteByte(':')
}
p.WriteString(name.Local)
p.WriteString(`="`)
p.EscapeString(attr.Value)
p.WriteByte('"')
}
p.WriteByte('>')
return nil
}
func (p *printer) writeEnd(name Name) error {
if name.Local == "" {
return fmt.Errorf("xml: end tag with no name")
}
if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" {
return fmt.Errorf("xml: end tag </%s> without start tag", name.Local)
}
if top := p.tags[len(p.tags)-1]; top != name {
if top.Local != name.Local {
return fmt.Errorf("xml: end tag </%s> does not match start tag <%s>", name.Local, top.Local)
}
return fmt.Errorf("xml: end tag </%s> in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space)
}
p.tags = p.tags[:len(p.tags)-1]
p.writeIndent(-1)
p.WriteByte('<')
p.WriteByte('/')
p.WriteString(name.Local)
p.WriteByte('>')
p.popPrefix()
return nil
}
func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) {
switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(val.Int(), 10), nil, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(val.Uint(), 10), nil, nil
case reflect.Float32, reflect.Float64:
return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil
case reflect.String:
return val.String(), nil, nil
case reflect.Bool:
return strconv.FormatBool(val.Bool()), nil, nil
case reflect.Array:
if typ.Elem().Kind() != reflect.Uint8 {
break
}
// [...]byte
var bytes []byte
if val.CanAddr() {
bytes = val.Bytes()
} else {
bytes = make([]byte, val.Len())
reflect.Copy(reflect.ValueOf(bytes), val)
}
return "", bytes, nil
case reflect.Slice:
if typ.Elem().Kind() != reflect.Uint8 {
break
}
// []byte
return "", val.Bytes(), nil
}
return "", nil, &UnsupportedTypeError{typ}
}
var ddBytes = []byte("--")
// indirect drills into interfaces and pointers, returning the pointed-at value.
// If it encounters a nil interface or pointer, indirect returns that nil value.
// This can turn into an infinite loop given a cyclic chain,
// but it matches the Go 1 behavior.
func indirect(vf reflect.Value) reflect.Value {
for vf.Kind() == reflect.Interface || vf.Kind() == reflect.Pointer {
if vf.IsNil() {
return vf
}
vf = vf.Elem()
}
return vf
}
func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error {
s := parentStack{p: p}
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
if finfo.flags&fAttr != 0 {
continue
}
vf := finfo.value(val, dontInitNilPointers)
if !vf.IsValid() {
// The field is behind an anonymous struct field that's
// nil. Skip it.
continue
}
switch finfo.flags & fMode {
case fCDATA, fCharData:
emit := EscapeText
if finfo.flags&fMode == fCDATA {
emit = emitCDATA
}
if err := s.trim(finfo.parents); err != nil {
return err
}
if vf.CanInterface() {
if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](vf); ok {
data, err := textMarshaler.MarshalText()
if err != nil {
return err
}
if err := emit(p, data); err != nil {
return err
}
continue
}
}
if vf.CanAddr() {
pv := vf.Addr()
if pv.CanInterface() {
if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](pv); ok {
data, err := textMarshaler.MarshalText()
if err != nil {
return err
}
if err := emit(p, data); err != nil {
return err
}
continue
}
}
}
var scratch [64]byte
vf = indirect(vf)
switch vf.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if err := emit(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)); err != nil {
return err
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if err := emit(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)); err != nil {
return err
}
case reflect.Float32, reflect.Float64:
if err := emit(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())); err != nil {
return err
}
case reflect.Bool:
if err := emit(p, strconv.AppendBool(scratch[:0], vf.Bool())); err != nil {
return err
}
case reflect.String:
if err := emit(p, []byte(vf.String())); err != nil {
return err
}
case reflect.Slice:
if elem, ok := reflect.TypeAssert[[]byte](vf); ok {
if err := emit(p, elem); err != nil {
return err
}
}
}
continue
case fComment:
if err := s.trim(finfo.parents); err != nil {
return err
}
vf = indirect(vf)
k := vf.Kind()
if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) {
return fmt.Errorf("xml: bad type for comment field of %s", val.Type())
}
if vf.Len() == 0 {
continue
}
p.writeIndent(0)
p.WriteString("<!--")
dashDash := false
dashLast := false
switch k {
case reflect.String:
s := vf.String()
dashDash = strings.Contains(s, "--")
dashLast = s[len(s)-1] == '-'
if !dashDash {
p.WriteString(s)
}
case reflect.Slice:
b := vf.Bytes()
dashDash = bytes.Contains(b, ddBytes)
dashLast = b[len(b)-1] == '-'
if !dashDash {
p.Write(b)
}
default:
panic("can't happen")
}
if dashDash {
return fmt.Errorf(`xml: comments must not contain "--"`)
}
if dashLast {
// "--->" is invalid grammar. Make it "- -->"
p.WriteByte(' ')
}
p.WriteString("-->")
continue
case fInnerXML:
vf = indirect(vf)
iface := vf.Interface()
switch raw := iface.(type) {
case []byte:
p.Write(raw)
continue
case string:
p.WriteString(raw)
continue
}
case fElement, fElement | fAny:
if err := s.trim(finfo.parents); err != nil {
return err
}
if len(finfo.parents) > len(s.stack) {
if vf.Kind() != reflect.Pointer && vf.Kind() != reflect.Interface || !vf.IsNil() {
if err := s.push(finfo.parents[len(s.stack):]); err != nil {
return err
}
}
}
}
if err := p.marshalValue(vf, finfo, nil); err != nil {
return err
}
}
s.trim(nil)
return p.cachedWriteError()
}
// Write implements io.Writer
func (p *printer) Write(b []byte) (n int, err error) {
if p.closed && p.err == nil {
p.err = errors.New("use of closed Encoder")
}
if p.err == nil {
n, p.err = p.w.Write(b)
}
return n, p.err
}
// WriteString implements io.StringWriter
func (p *printer) WriteString(s string) (n int, err error) {
if p.closed && p.err == nil {
p.err = errors.New("use of closed Encoder")
}
if p.err == nil {
n, p.err = p.w.WriteString(s)
}
return n, p.err
}
// WriteByte implements io.ByteWriter
func (p *printer) WriteByte(c byte) error {
if p.closed && p.err == nil {
p.err = errors.New("use of closed Encoder")
}
if p.err == nil {
p.err = p.w.WriteByte(c)
}
return p.err
}
// Close the Encoder, indicating that no more data will be written. It flushes
// any buffered XML to the underlying writer and returns an error if the
// written XML is invalid (e.g. by containing unclosed elements).
func (p *printer) Close() error {
if p.closed {
return nil
}
p.closed = true
if err := p.w.Flush(); err != nil {
return err
}
if len(p.tags) > 0 {
return fmt.Errorf("unclosed tag <%s>", p.tags[len(p.tags)-1].Local)
}
return nil
}
// return the bufio Writer's cached write error
func (p *printer) cachedWriteError() error {
_, err := p.Write(nil)
return err
}
func (p *printer) writeIndent(depthDelta int) {
if len(p.prefix) == 0 && len(p.indent) == 0 {
return
}
if depthDelta < 0 {
p.depth--
if p.indentedIn {
p.indentedIn = false
return
}
p.indentedIn = false
}
if p.putNewline {
p.WriteByte('\n')
} else {
p.putNewline = true
}
if len(p.prefix) > 0 {
p.WriteString(p.prefix)
}
if len(p.indent) > 0 {
for i := 0; i < p.depth; i++ {
p.WriteString(p.indent)
}
}
if depthDelta > 0 {
p.depth++
p.indentedIn = true
}
}
type parentStack struct {
p *printer
stack []string
}
// trim updates the XML context to match the longest common prefix of the stack
// and the given parents. A closing tag will be written for every parent
// popped. Passing a zero slice or nil will close all the elements.
func (s *parentStack) trim(parents []string) error {
split := 0
for ; split < len(parents) && split < len(s.stack); split++ {
if parents[split] != s.stack[split] {
break
}
}
for i := len(s.stack) - 1; i >= split; i-- {
if err := s.p.writeEnd(Name{Local: s.stack[i]}); err != nil {
return err
}
}
s.stack = s.stack[:split]
return nil
}
// push adds parent elements to the stack and writes open tags.
func (s *parentStack) push(parents []string) error {
for i := 0; i < len(parents); i++ {
if err := s.p.writeStart(&StartElement{Name: Name{Local: parents[i]}}); err != nil {
return err
}
}
s.stack = append(s.stack, parents...)
return nil
}
// UnsupportedTypeError is returned when [Marshal] encounters a type
// that cannot be converted into XML.
type UnsupportedTypeError struct {
Type reflect.Type
}
func (e *UnsupportedTypeError) Error() string {
return "xml: unsupported type: " + e.Type.String()
}
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Float32, reflect.Float64,
reflect.Interface, reflect.Pointer:
return v.IsZero()
}
return false
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"bytes"
"encoding"
"errors"
"fmt"
"reflect"
"runtime"
"strconv"
"strings"
)
// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:
// an XML element is an order-dependent collection of anonymous
// values, while a data structure is an order-independent collection
// of named values.
// See [encoding/json] for a textual representation more suitable
// to data structures.
// Unmarshal parses the XML-encoded data and stores the result in
// the value pointed to by v, which must be an arbitrary struct,
// slice, or string. Well-formed data that does not fit into v is
// discarded.
//
// Because Unmarshal uses the reflect package, it can only assign
// to exported (upper case) fields. Unmarshal uses a case-sensitive
// comparison to match XML element names to tag values and struct
// field names.
//
// Unmarshal maps an XML element to a struct using the following rules.
// In the rules, the tag of a field refers to the value associated with the
// key 'xml' in the struct field's tag (see the example above).
//
// - If the struct has a field of type []byte or string with tag
// ",innerxml", Unmarshal accumulates the raw XML nested inside the
// element in that field. The rest of the rules still apply.
//
// - If the struct has a field named XMLName of type Name,
// Unmarshal records the element name in that field.
//
// - If the XMLName field has an associated tag of the form
// "name" or "namespace-URL name", the XML element must have
// the given name (and, optionally, name space) or else Unmarshal
// returns an error.
//
// - If the XML element has an attribute whose name matches a
// struct field name with an associated tag containing ",attr" or
// the explicit name in a struct field tag of the form "name,attr",
// Unmarshal records the attribute value in that field.
//
// - If the XML element has an attribute not handled by the previous
// rule and the struct has a field with an associated tag containing
// ",any,attr", Unmarshal records the attribute value in the first
// such field.
//
// - If the XML element contains character data, that data is
// accumulated in the first struct field that has tag ",chardata".
// The struct field may have type []byte or string.
// If there is no such field, the character data is discarded.
//
// - If the XML element contains comments, they are accumulated in
// the first struct field that has tag ",comment". The struct
// field may have type []byte or string. If there is no such
// field, the comments are discarded.
//
// - If the XML element contains a sub-element whose name matches
// the prefix of a tag formatted as "a" or "a>b>c", unmarshal
// will descend into the XML structure looking for elements with the
// given names, and will map the innermost elements to that struct
// field. A tag starting with ">" is equivalent to one starting
// with the field name followed by ">".
//
// - If the XML element contains a sub-element whose name matches
// a struct field's XMLName tag and the struct field has no
// explicit name tag as per the previous rule, unmarshal maps
// the sub-element to that struct field.
//
// - If the XML element contains a sub-element whose name matches a
// field without any mode flags (",attr", ",chardata", etc), Unmarshal
// maps the sub-element to that struct field.
//
// - If the XML element contains a sub-element that hasn't matched any
// of the above rules and the struct has a field with tag ",any",
// unmarshal maps the sub-element to that struct field.
//
// - An anonymous struct field is handled as if the fields of its
// value were part of the outer struct.
//
// - A struct field with tag "-" is never unmarshaled into.
//
// If Unmarshal encounters a field type that implements the Unmarshaler
// interface, Unmarshal calls its UnmarshalXML method to produce the value from
// the XML element. Otherwise, if the value implements
// [encoding.TextUnmarshaler], Unmarshal calls that value's UnmarshalText method.
//
// Unmarshal maps an XML element to a string or []byte by saving the
// concatenation of that element's character data in the string or
// []byte. The saved []byte is never nil.
//
// Unmarshal maps an attribute value to a string or []byte by saving
// the value in the string or slice.
//
// Unmarshal maps an attribute value to an [Attr] by saving the attribute,
// including its name, in the Attr.
//
// Unmarshal maps an XML element or attribute value to a slice by
// extending the length of the slice and mapping the element or attribute
// to the newly created value.
//
// Unmarshal maps an XML element or attribute value to a bool by
// setting it to the boolean value represented by the string. Whitespace
// is trimmed and ignored.
//
// Unmarshal maps an XML element or attribute value to an integer or
// floating-point field by setting the field to the result of
// interpreting the string value in decimal. There is no check for
// overflow. Whitespace is trimmed and ignored.
//
// Unmarshal maps an XML element to a Name by recording the element
// name.
//
// Unmarshal maps an XML element to a pointer by setting the pointer
// to a freshly allocated value and then mapping the element to that value.
//
// A missing element or empty attribute value will be unmarshaled as a zero value.
// If the field is a slice, a zero value will be appended to the field. Otherwise, the
// field will be set to its zero value.
func Unmarshal(data []byte, v any) error {
return NewDecoder(bytes.NewReader(data)).Decode(v)
}
// Decode works like [Unmarshal], except it reads the decoder
// stream to find the start element.
func (d *Decoder) Decode(v any) error {
return d.DecodeElement(v, nil)
}
// DecodeElement works like [Unmarshal] except that it takes
// a pointer to the start XML element to decode into v.
// It is useful when a client reads some raw XML tokens itself
// but also wants to defer to [Unmarshal] for some elements.
func (d *Decoder) DecodeElement(v any, start *StartElement) error {
val := reflect.ValueOf(v)
if val.Kind() != reflect.Pointer {
return errors.New("non-pointer passed to Unmarshal")
}
if val.IsNil() {
return errors.New("nil pointer passed to Unmarshal")
}
return d.unmarshal(val.Elem(), start, 0)
}
// An UnmarshalError represents an error in the unmarshaling process.
type UnmarshalError string
func (e UnmarshalError) Error() string { return string(e) }
// Unmarshaler is the interface implemented by objects that can unmarshal
// an XML element description of themselves.
//
// UnmarshalXML decodes a single XML element
// beginning with the given start element.
// If it returns an error, the outer call to Unmarshal stops and
// returns that error.
// UnmarshalXML must consume exactly one XML element.
// One common implementation strategy is to unmarshal into
// a separate value with a layout matching the expected XML
// using d.DecodeElement, and then to copy the data from
// that value into the receiver.
// Another common strategy is to use d.Token to process the
// XML object one token at a time.
// UnmarshalXML may not use d.RawToken.
type Unmarshaler interface {
UnmarshalXML(d *Decoder, start StartElement) error
}
// UnmarshalerAttr is the interface implemented by objects that can unmarshal
// an XML attribute description of themselves.
//
// UnmarshalXMLAttr decodes a single XML attribute.
// If it returns an error, the outer call to [Unmarshal] stops and
// returns that error.
// UnmarshalXMLAttr is used only for struct fields with the
// "attr" option in the field tag.
type UnmarshalerAttr interface {
UnmarshalXMLAttr(attr Attr) error
}
// receiverType returns the receiver type to use in an expression like "%s.MethodName".
func receiverType(val any) string {
t := reflect.TypeOf(val)
if t.Name() != "" {
return t.String()
}
return "(" + t.String() + ")"
}
// unmarshalInterface unmarshals a single XML element into val.
// start is the opening tag of the element.
func (d *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error {
// Record that decoder must stop at end tag corresponding to start.
d.pushEOF()
d.unmarshalDepth++
err := val.UnmarshalXML(d, *start)
d.unmarshalDepth--
if err != nil {
d.popEOF()
return err
}
if !d.popEOF() {
return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local)
}
return nil
}
// unmarshalTextInterface unmarshals a single XML element into val.
// The chardata contained in the element (but not its children)
// is passed to the text unmarshaler.
func (d *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler) error {
var buf []byte
depth := 1
for depth > 0 {
t, err := d.Token()
if err != nil {
return err
}
switch t := t.(type) {
case CharData:
if depth == 1 {
buf = append(buf, t...)
}
case StartElement:
depth++
case EndElement:
depth--
}
}
return val.UnmarshalText(buf)
}
// unmarshalAttr unmarshals a single XML attribute into val.
func (d *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {
if val.Kind() == reflect.Pointer {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
if val.CanInterface() {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
if unmarshaler, ok := reflect.TypeAssert[UnmarshalerAttr](val); ok {
return unmarshaler.UnmarshalXMLAttr(attr)
}
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() {
if unmarshaler, ok := reflect.TypeAssert[UnmarshalerAttr](pv); ok {
return unmarshaler.UnmarshalXMLAttr(attr)
}
}
}
// Not an UnmarshalerAttr; try encoding.TextUnmarshaler.
if val.CanInterface() {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](val); ok {
return textUnmarshaler.UnmarshalText([]byte(attr.Value))
}
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() {
if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](pv); ok {
return textUnmarshaler.UnmarshalText([]byte(attr.Value))
}
}
}
if val.Kind() == reflect.Slice && val.Type().Elem().Kind() != reflect.Uint8 {
// Slice of element values.
// Grow slice.
n := val.Len()
val.Grow(1)
val.SetLen(n + 1)
// Recur to read element into slice.
if err := d.unmarshalAttr(val.Index(n), attr); err != nil {
val.SetLen(n)
return err
}
return nil
}
if val.Type() == attrType {
val.Set(reflect.ValueOf(attr))
return nil
}
return copyValue(val, []byte(attr.Value))
}
var attrType = reflect.TypeFor[Attr]()
const (
maxUnmarshalDepth = 10000
maxUnmarshalDepthWasm = 5000 // go.dev/issue/56498
)
var errUnmarshalDepth = errors.New("exceeded max depth")
// Unmarshal a single XML element into val.
func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) error {
if depth >= maxUnmarshalDepth || runtime.GOARCH == "wasm" && depth >= maxUnmarshalDepthWasm {
return errUnmarshalDepth
}
// Find start element if we need it.
if start == nil {
for {
tok, err := d.Token()
if err != nil {
return err
}
if t, ok := tok.(StartElement); ok {
start = &t
break
}
}
}
// Load value from interface, but only if the result will be
// usefully addressable.
if val.Kind() == reflect.Interface && !val.IsNil() {
e := val.Elem()
if e.Kind() == reflect.Pointer && !e.IsNil() {
val = e
}
}
if val.Kind() == reflect.Pointer {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
if val.CanInterface() {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
if unmarshaler, ok := reflect.TypeAssert[Unmarshaler](val); ok {
return d.unmarshalInterface(unmarshaler, start)
}
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() {
if unmarshaler, ok := reflect.TypeAssert[Unmarshaler](pv); ok {
return d.unmarshalInterface(unmarshaler, start)
}
}
}
if val.CanInterface() {
if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](val); ok {
return d.unmarshalTextInterface(textUnmarshaler)
}
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() {
if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](pv); ok {
return d.unmarshalTextInterface(textUnmarshaler)
}
}
}
var (
data []byte
saveData reflect.Value
comment []byte
saveComment reflect.Value
saveXML reflect.Value
saveXMLIndex int
saveXMLData []byte
saveAny reflect.Value
sv reflect.Value
tinfo *typeInfo
err error
)
switch v := val; v.Kind() {
default:
return errors.New("unknown type " + v.Type().String())
case reflect.Interface:
// TODO: For now, simply ignore the field. In the near
// future we may choose to unmarshal the start
// element on it, if not nil.
return d.Skip()
case reflect.Slice:
typ := v.Type()
if typ.Elem().Kind() == reflect.Uint8 {
// []byte
saveData = v
break
}
// Slice of element values.
// Grow slice.
n := v.Len()
v.Grow(1)
v.SetLen(n + 1)
// Recur to read element into slice.
if err := d.unmarshal(v.Index(n), start, depth+1); err != nil {
v.SetLen(n)
return err
}
return nil
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:
saveData = v
case reflect.Struct:
typ := v.Type()
if typ == nameType {
v.Set(reflect.ValueOf(start.Name))
break
}
sv = v
tinfo, err = getTypeInfo(typ)
if err != nil {
return err
}
// Validate and assign element name.
if tinfo.xmlname != nil {
finfo := tinfo.xmlname
if finfo.name != "" && finfo.name != start.Name.Local {
return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">")
}
if finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have "
if start.Name.Space == "" {
e += "no name space"
} else {
e += start.Name.Space
}
return UnmarshalError(e)
}
fv := finfo.value(sv, initNilPointers)
if _, ok := reflect.TypeAssert[Name](fv); ok {
fv.Set(reflect.ValueOf(start.Name))
}
}
// Assign attributes.
for _, a := range start.Attr {
handled := false
any := -1
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
switch finfo.flags & fMode {
case fAttr:
strv := finfo.value(sv, initNilPointers)
if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) {
if err := d.unmarshalAttr(strv, a); err != nil {
return err
}
handled = true
}
case fAny | fAttr:
if any == -1 {
any = i
}
}
}
if !handled && any >= 0 {
finfo := &tinfo.fields[any]
strv := finfo.value(sv, initNilPointers)
if err := d.unmarshalAttr(strv, a); err != nil {
return err
}
}
}
// Determine whether we need to save character data or comments.
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
switch finfo.flags & fMode {
case fCDATA, fCharData:
if !saveData.IsValid() {
saveData = finfo.value(sv, initNilPointers)
}
case fComment:
if !saveComment.IsValid() {
saveComment = finfo.value(sv, initNilPointers)
}
case fAny, fAny | fElement:
if !saveAny.IsValid() {
saveAny = finfo.value(sv, initNilPointers)
}
case fInnerXML:
if !saveXML.IsValid() {
saveXML = finfo.value(sv, initNilPointers)
if d.saved == nil {
saveXMLIndex = 0
d.saved = new(bytes.Buffer)
} else {
saveXMLIndex = d.savedOffset()
}
}
}
}
}
// Find end element.
// Process sub-elements along the way.
Loop:
for {
var savedOffset int
if saveXML.IsValid() {
savedOffset = d.savedOffset()
}
tok, err := d.Token()
if err != nil {
return err
}
switch t := tok.(type) {
case StartElement:
consumed := false
if sv.IsValid() {
// unmarshalPath can call unmarshal, so we need to pass the depth through so that
// we can continue to enforce the maximum recursion limit.
consumed, err = d.unmarshalPath(tinfo, sv, nil, &t, depth)
if err != nil {
return err
}
if !consumed && saveAny.IsValid() {
consumed = true
if err := d.unmarshal(saveAny, &t, depth+1); err != nil {
return err
}
}
}
if !consumed {
if err := d.Skip(); err != nil {
return err
}
}
case EndElement:
if saveXML.IsValid() {
saveXMLData = d.saved.Bytes()[saveXMLIndex:savedOffset]
if saveXMLIndex == 0 {
d.saved = nil
}
}
break Loop
case CharData:
if saveData.IsValid() {
data = append(data, t...)
}
case Comment:
if saveComment.IsValid() {
comment = append(comment, t...)
}
}
}
if saveData.IsValid() && saveData.CanInterface() {
if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](saveData); ok {
if err := textUnmarshaler.UnmarshalText(data); err != nil {
return err
}
saveData = reflect.Value{}
}
}
if saveData.IsValid() && saveData.CanAddr() {
pv := saveData.Addr()
if pv.CanInterface() {
if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](pv); ok {
if err := textUnmarshaler.UnmarshalText(data); err != nil {
return err
}
saveData = reflect.Value{}
}
}
}
if err := copyValue(saveData, data); err != nil {
return err
}
switch t := saveComment; t.Kind() {
case reflect.String:
t.SetString(string(comment))
case reflect.Slice:
t.Set(reflect.ValueOf(comment))
}
switch t := saveXML; t.Kind() {
case reflect.String:
t.SetString(string(saveXMLData))
case reflect.Slice:
if t.Type().Elem().Kind() == reflect.Uint8 {
t.Set(reflect.ValueOf(saveXMLData))
}
}
return nil
}
func copyValue(dst reflect.Value, src []byte) (err error) {
dst0 := dst
if dst.Kind() == reflect.Pointer {
if dst.IsNil() {
dst.Set(reflect.New(dst.Type().Elem()))
}
dst = dst.Elem()
}
// Save accumulated data.
switch dst.Kind() {
case reflect.Invalid:
// Probably a comment.
default:
return errors.New("cannot unmarshal into " + dst0.Type().String())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if len(src) == 0 {
dst.SetInt(0)
return nil
}
itmp, err := strconv.ParseInt(strings.TrimSpace(string(src)), 10, dst.Type().Bits())
if err != nil {
return err
}
dst.SetInt(itmp)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if len(src) == 0 {
dst.SetUint(0)
return nil
}
utmp, err := strconv.ParseUint(strings.TrimSpace(string(src)), 10, dst.Type().Bits())
if err != nil {
return err
}
dst.SetUint(utmp)
case reflect.Float32, reflect.Float64:
if len(src) == 0 {
dst.SetFloat(0)
return nil
}
ftmp, err := strconv.ParseFloat(strings.TrimSpace(string(src)), dst.Type().Bits())
if err != nil {
return err
}
dst.SetFloat(ftmp)
case reflect.Bool:
if len(src) == 0 {
dst.SetBool(false)
return nil
}
value, err := strconv.ParseBool(strings.TrimSpace(string(src)))
if err != nil {
return err
}
dst.SetBool(value)
case reflect.String:
dst.SetString(string(src))
case reflect.Slice:
if len(src) == 0 {
// non-nil to flag presence
src = []byte{}
}
dst.SetBytes(src)
}
return nil
}
// unmarshalPath walks down an XML structure looking for wanted
// paths, and calls unmarshal on them.
// The consumed result tells whether XML elements have been consumed
// from the Decoder until start's matching end element, or if it's
// still untouched because start is uninteresting for sv's fields.
func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement, depth int) (consumed bool, err error) {
recurse := false
Loop:
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
continue
}
for j := range parents {
if parents[j] != finfo.parents[j] {
continue Loop
}
}
if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
// It's a perfect match, unmarshal the field.
return true, d.unmarshal(finfo.value(sv, initNilPointers), start, depth+1)
}
if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
// It's a prefix for the field. Break and recurse
// since it's not ok for one field path to be itself
// the prefix for another field path.
recurse = true
// We can reuse the same slice as long as we
// don't try to append to it.
parents = finfo.parents[:len(parents)+1]
break
}
}
if !recurse {
// We have no business with this element.
return false, nil
}
// The element is not a perfect match for any field, but one
// or more fields have the path to this element as a parent
// prefix. Recurse and attempt to match these.
for {
var tok Token
tok, err = d.Token()
if err != nil {
return true, err
}
switch t := tok.(type) {
case StartElement:
// the recursion depth of unmarshalPath is limited to the path length specified
// by the struct field tag, so we don't increment the depth here.
consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t, depth)
if err != nil {
return true, err
}
if !consumed2 {
if err := d.Skip(); err != nil {
return true, err
}
}
case EndElement:
return true, nil
}
}
}
// Skip reads tokens until it has consumed the end element
// matching the most recent start element already consumed,
// skipping nested structures.
// It returns nil if it finds an end element matching the start
// element; otherwise it returns an error describing the problem.
func (d *Decoder) Skip() error {
var depth int64
for {
tok, err := d.Token()
if err != nil {
return err
}
switch tok.(type) {
case StartElement:
depth++
case EndElement:
if depth == 0 {
return nil
}
depth--
}
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"fmt"
"reflect"
"strings"
"sync"
)
// typeInfo holds details for the xml representation of a type.
type typeInfo struct {
xmlname *fieldInfo
fields []fieldInfo
}
// fieldInfo holds details for the xml representation of a single field.
type fieldInfo struct {
idx []int
name string
xmlns string
flags fieldFlags
parents []string
}
type fieldFlags int
const (
fElement fieldFlags = 1 << iota
fAttr
fCDATA
fCharData
fInnerXML
fComment
fAny
fOmitEmpty
fMode = fElement | fAttr | fCDATA | fCharData | fInnerXML | fComment | fAny
xmlName = "XMLName"
)
var tinfoMap sync.Map // map[reflect.Type]*typeInfo
var nameType = reflect.TypeFor[Name]()
// getTypeInfo returns the typeInfo structure with details necessary
// for marshaling and unmarshaling typ.
func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
if ti, ok := tinfoMap.Load(typ); ok {
return ti.(*typeInfo), nil
}
tinfo := &typeInfo{}
if typ.Kind() == reflect.Struct && typ != nameType {
n := typ.NumField()
for i := 0; i < n; i++ {
f := typ.Field(i)
if (!f.IsExported() && !f.Anonymous) || f.Tag.Get("xml") == "-" {
continue // Private field
}
// For embedded structs, embed its fields.
if f.Anonymous {
t := f.Type
if t.Kind() == reflect.Pointer {
t = t.Elem()
}
if t.Kind() == reflect.Struct {
inner, err := getTypeInfo(t)
if err != nil {
return nil, err
}
if tinfo.xmlname == nil {
tinfo.xmlname = inner.xmlname
}
for _, finfo := range inner.fields {
finfo.idx = append([]int{i}, finfo.idx...)
if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
return nil, err
}
}
continue
}
}
finfo, err := structFieldInfo(typ, &f)
if err != nil {
return nil, err
}
if f.Name == xmlName {
tinfo.xmlname = finfo
continue
}
// Add the field if it doesn't conflict with other fields.
if err := addFieldInfo(typ, tinfo, finfo); err != nil {
return nil, err
}
}
}
ti, _ := tinfoMap.LoadOrStore(typ, tinfo)
return ti.(*typeInfo), nil
}
// structFieldInfo builds and returns a fieldInfo for f.
func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
finfo := &fieldInfo{idx: f.Index}
// Split the tag from the xml namespace if necessary.
tag := f.Tag.Get("xml")
if ns, t, ok := strings.Cut(tag, " "); ok {
finfo.xmlns, tag = ns, t
}
// Parse flags.
tokens := strings.Split(tag, ",")
if len(tokens) == 1 {
finfo.flags = fElement
} else {
tag = tokens[0]
for _, flag := range tokens[1:] {
switch flag {
case "attr":
finfo.flags |= fAttr
case "cdata":
finfo.flags |= fCDATA
case "chardata":
finfo.flags |= fCharData
case "innerxml":
finfo.flags |= fInnerXML
case "comment":
finfo.flags |= fComment
case "any":
finfo.flags |= fAny
case "omitempty":
finfo.flags |= fOmitEmpty
}
}
// Validate the flags used.
valid := true
switch mode := finfo.flags & fMode; mode {
case 0:
finfo.flags |= fElement
case fAttr, fCDATA, fCharData, fInnerXML, fComment, fAny, fAny | fAttr:
if f.Name == xmlName || tag != "" && mode != fAttr {
valid = false
}
default:
// This will also catch multiple modes in a single field.
valid = false
}
if finfo.flags&fMode == fAny {
finfo.flags |= fElement
}
if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {
valid = false
}
if !valid {
return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
}
// Use of xmlns without a name is not allowed.
if finfo.xmlns != "" && tag == "" {
return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
if f.Name == xmlName {
// The XMLName field records the XML element name. Don't
// process it as usual because its name should default to
// empty rather than to the field name.
finfo.name = tag
return finfo, nil
}
if tag == "" {
// If the name part of the tag is completely empty, get
// default from XMLName of underlying struct if feasible,
// or field name otherwise.
if xmlname := lookupXMLName(f.Type); xmlname != nil {
finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name
} else {
finfo.name = f.Name
}
return finfo, nil
}
// Prepare field name and parents.
parents := strings.Split(tag, ">")
if parents[0] == "" {
parents[0] = f.Name
}
if parents[len(parents)-1] == "" {
return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ)
}
finfo.name = parents[len(parents)-1]
if len(parents) > 1 {
if (finfo.flags & fElement) == 0 {
return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ","))
}
finfo.parents = parents[:len(parents)-1]
}
// If the field type has an XMLName field, the names must match
// so that the behavior of both marshaling and unmarshaling
// is straightforward and unambiguous.
if finfo.flags&fElement != 0 {
ftyp := f.Type
xmlname := lookupXMLName(ftyp)
if xmlname != nil && xmlname.name != finfo.name {
return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName",
finfo.name, typ, f.Name, xmlname.name, ftyp)
}
}
return finfo, nil
}
// lookupXMLName returns the fieldInfo for typ's XMLName field
// in case it exists and has a valid xml field tag, otherwise
// it returns nil.
func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {
for typ.Kind() == reflect.Pointer {
typ = typ.Elem()
}
if typ.Kind() != reflect.Struct {
return nil
}
for i, n := 0, typ.NumField(); i < n; i++ {
f := typ.Field(i)
if f.Name != xmlName {
continue
}
finfo, err := structFieldInfo(typ, &f)
if err == nil && finfo.name != "" {
return finfo
}
// Also consider errors as a non-existent field tag
// and let getTypeInfo itself report the error.
break
}
return nil
}
// addFieldInfo adds finfo to tinfo.fields if there are no
// conflicts, or if conflicts arise from previous fields that were
// obtained from deeper embedded structures than finfo. In the latter
// case, the conflicting entries are dropped.
// A conflict occurs when the path (parent + name) to a field is
// itself a prefix of another path, or when two paths match exactly.
// It is okay for field paths to share a common, shorter prefix.
func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
var conflicts []int
Loop:
// First, figure all conflicts. Most working code will have none.
for i := range tinfo.fields {
oldf := &tinfo.fields[i]
if oldf.flags&fMode != newf.flags&fMode {
continue
}
if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns {
continue
}
minl := min(len(newf.parents), len(oldf.parents))
for p := 0; p < minl; p++ {
if oldf.parents[p] != newf.parents[p] {
continue Loop
}
}
if len(oldf.parents) > len(newf.parents) {
if oldf.parents[len(newf.parents)] == newf.name {
conflicts = append(conflicts, i)
}
} else if len(oldf.parents) < len(newf.parents) {
if newf.parents[len(oldf.parents)] == oldf.name {
conflicts = append(conflicts, i)
}
} else {
if newf.name == oldf.name && newf.xmlns == oldf.xmlns {
conflicts = append(conflicts, i)
}
}
}
// Without conflicts, add the new field and return.
if conflicts == nil {
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// If any conflict is shallower, ignore the new field.
// This matches the Go field resolution on embedding.
for _, i := range conflicts {
if len(tinfo.fields[i].idx) < len(newf.idx) {
return nil
}
}
// Otherwise, if any of them is at the same depth level, it's an error.
for _, i := range conflicts {
oldf := &tinfo.fields[i]
if len(oldf.idx) == len(newf.idx) {
f1 := typ.FieldByIndex(oldf.idx)
f2 := typ.FieldByIndex(newf.idx)
return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
}
}
// Otherwise, the new field is shallower, and thus takes precedence,
// so drop the conflicting fields from tinfo and append the new one.
for c := len(conflicts) - 1; c >= 0; c-- {
i := conflicts[c]
copy(tinfo.fields[i:], tinfo.fields[i+1:])
tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
}
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// A TagPathError represents an error in the unmarshaling process
// caused by the use of field tags with conflicting paths.
type TagPathError struct {
Struct reflect.Type
Field1, Tag1 string
Field2, Tag2 string
}
func (e *TagPathError) Error() string {
return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
}
const (
initNilPointers = true
dontInitNilPointers = false
)
// value returns v's field value corresponding to finfo.
// It's equivalent to v.FieldByIndex(finfo.idx), but when passed
// initNilPointers, it initializes and dereferences pointers as necessary.
// When passed dontInitNilPointers and a nil pointer is reached, the function
// returns a zero reflect.Value.
func (finfo *fieldInfo) value(v reflect.Value, shouldInitNilPointers bool) reflect.Value {
for i, x := range finfo.idx {
if i > 0 {
t := v.Type()
if t.Kind() == reflect.Pointer && t.Elem().Kind() == reflect.Struct {
if v.IsNil() {
if !shouldInitNilPointers {
return reflect.Value{}
}
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
}
v = v.Field(x)
}
return v
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package xml implements a simple XML 1.0 parser that
// understands XML name spaces.
package xml
// References:
// Annotated XML spec: https://www.xml.com/axml/testaxml.htm
// XML name spaces: https://www.w3.org/TR/REC-xml-names/
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
// A SyntaxError represents a syntax error in the XML input stream.
type SyntaxError struct {
Msg string
Line int
}
func (e *SyntaxError) Error() string {
return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg
}
// A Name represents an XML name (Local) annotated
// with a name space identifier (Space).
// In tokens returned by [Decoder.Token], the Space identifier
// is given as a canonical URL, not the short prefix used
// in the document being parsed.
type Name struct {
Space, Local string
}
// An Attr represents an attribute in an XML element (Name=Value).
type Attr struct {
Name Name
Value string
}
// A Token is an interface holding one of the token types:
// [StartElement], [EndElement], [CharData], [Comment], [ProcInst], or [Directive].
type Token any
// A StartElement represents an XML start element.
type StartElement struct {
Name Name
Attr []Attr
}
// Copy creates a new copy of StartElement.
func (e StartElement) Copy() StartElement {
attrs := make([]Attr, len(e.Attr))
copy(attrs, e.Attr)
e.Attr = attrs
return e
}
// End returns the corresponding XML end element.
func (e StartElement) End() EndElement {
return EndElement{e.Name}
}
// An EndElement represents an XML end element.
type EndElement struct {
Name Name
}
// A CharData represents XML character data (raw text),
// in which XML escape sequences have been replaced by
// the characters they represent.
type CharData []byte
// Copy creates a new copy of CharData.
func (c CharData) Copy() CharData { return CharData(bytes.Clone(c)) }
// A Comment represents an XML comment of the form <!--comment-->.
// The bytes do not include the <!-- and --> comment markers.
type Comment []byte
// Copy creates a new copy of Comment.
func (c Comment) Copy() Comment { return Comment(bytes.Clone(c)) }
// A ProcInst represents an XML processing instruction of the form <?target inst?>
type ProcInst struct {
Target string
Inst []byte
}
// Copy creates a new copy of ProcInst.
func (p ProcInst) Copy() ProcInst {
p.Inst = bytes.Clone(p.Inst)
return p
}
// A Directive represents an XML directive of the form <!text>.
// The bytes do not include the <! and > markers.
type Directive []byte
// Copy creates a new copy of Directive.
func (d Directive) Copy() Directive { return Directive(bytes.Clone(d)) }
// CopyToken returns a copy of a Token.
func CopyToken(t Token) Token {
switch v := t.(type) {
case CharData:
return v.Copy()
case Comment:
return v.Copy()
case Directive:
return v.Copy()
case ProcInst:
return v.Copy()
case StartElement:
return v.Copy()
}
return t
}
// A TokenReader is anything that can decode a stream of XML tokens, including a
// [Decoder].
//
// When Token encounters an error or end-of-file condition after successfully
// reading a token, it returns the token. It may return the (non-nil) error from
// the same call or return the error (and a nil token) from a subsequent call.
// An instance of this general case is that a TokenReader returning a non-nil
// token at the end of the token stream may return either io.EOF or a nil error.
// The next Read should return nil, [io.EOF].
//
// Implementations of Token are discouraged from returning a nil token with a
// nil error. Callers should treat a return of nil, nil as indicating that
// nothing happened; in particular it does not indicate EOF.
type TokenReader interface {
Token() (Token, error)
}
// A Decoder represents an XML parser reading a particular input stream.
// The parser assumes that its input is encoded in UTF-8.
type Decoder struct {
// Strict defaults to true, enforcing the requirements
// of the XML specification.
// If set to false, the parser allows input containing common
// mistakes:
// * If an element is missing an end tag, the parser invents
// end tags as necessary to keep the return values from Token
// properly balanced.
// * In attribute values and character data, unknown or malformed
// character entities (sequences beginning with &) are left alone.
//
// Setting:
//
// d.Strict = false
// d.AutoClose = xml.HTMLAutoClose
// d.Entity = xml.HTMLEntity
//
// creates a parser that can handle typical HTML.
//
// Strict mode does not enforce the requirements of the XML name spaces TR.
// In particular it does not reject name space tags using undefined prefixes.
// Such tags are recorded with the unknown prefix as the name space URL.
Strict bool
// When Strict == false, AutoClose indicates a set of elements to
// consider closed immediately after they are opened, regardless
// of whether an end element is present.
AutoClose []string
// Entity can be used to map non-standard entity names to string replacements.
// The parser behaves as if these standard mappings are present in the map,
// regardless of the actual map content:
//
// "lt": "<",
// "gt": ">",
// "amp": "&",
// "apos": "'",
// "quot": `"`,
Entity map[string]string
// CharsetReader, if non-nil, defines a function to generate
// charset-conversion readers, converting from the provided
// non-UTF-8 charset into UTF-8. If CharsetReader is nil or
// returns an error, parsing stops with an error. One of the
// CharsetReader's result values must be non-nil.
CharsetReader func(charset string, input io.Reader) (io.Reader, error)
// DefaultSpace sets the default name space used for unadorned tags,
// as if the entire XML stream were wrapped in an element containing
// the attribute xmlns="DefaultSpace".
DefaultSpace string
r io.ByteReader
t TokenReader
buf bytes.Buffer
saved *bytes.Buffer
stk *stack
free *stack
needClose bool
toClose Name
nextToken Token
nextByte int
ns map[string]string
err error
line int
linestart int64
offset int64
unmarshalDepth int
}
// NewDecoder creates a new XML parser reading from r.
// If r does not implement [io.ByteReader], NewDecoder will
// do its own buffering.
func NewDecoder(r io.Reader) *Decoder {
d := &Decoder{
ns: make(map[string]string),
nextByte: -1,
line: 1,
Strict: true,
}
d.switchToReader(r)
return d
}
// NewTokenDecoder creates a new XML parser using an underlying token stream.
func NewTokenDecoder(t TokenReader) *Decoder {
// Is it already a Decoder?
if d, ok := t.(*Decoder); ok {
return d
}
d := &Decoder{
ns: make(map[string]string),
t: t,
nextByte: -1,
line: 1,
Strict: true,
}
return d
}
// Token returns the next XML token in the input stream.
// At the end of the input stream, Token returns nil, [io.EOF].
//
// Slices of bytes in the returned token data refer to the
// parser's internal buffer and remain valid only until the next
// call to Token. To acquire a copy of the bytes, call [CopyToken]
// or the token's Copy method.
//
// Token expands self-closing elements such as <br>
// into separate start and end elements returned by successive calls.
//
// Token guarantees that the [StartElement] and [EndElement]
// tokens it returns are properly nested and matched:
// if Token encounters an unexpected end element
// or EOF before all expected end elements,
// it will return an error.
//
// If [Decoder.CharsetReader] is called and returns an error,
// the error is wrapped and returned.
//
// Token implements XML name spaces as described by
// https://www.w3.org/TR/REC-xml-names/. Each of the
// [Name] structures contained in the Token has the Space
// set to the URL identifying its name space when known.
// If Token encounters an unrecognized name space prefix,
// it uses the prefix as the Space rather than report an error.
func (d *Decoder) Token() (Token, error) {
var t Token
var err error
if d.stk != nil && d.stk.kind == stkEOF {
return nil, io.EOF
}
if d.nextToken != nil {
t = d.nextToken
d.nextToken = nil
} else {
if t, err = d.rawToken(); t == nil && err != nil {
if err == io.EOF && d.stk != nil && d.stk.kind != stkEOF {
err = d.syntaxError("unexpected EOF")
}
return nil, err
}
// We still have a token to process, so clear any
// errors (e.g. EOF) and proceed.
err = nil
}
if !d.Strict {
if t1, ok := d.autoClose(t); ok {
d.nextToken = t
t = t1
}
}
switch t1 := t.(type) {
case StartElement:
// In XML name spaces, the translations listed in the
// attributes apply to the element name and
// to the other attribute names, so process
// the translations first.
for _, a := range t1.Attr {
if a.Name.Space == xmlnsPrefix {
v, ok := d.ns[a.Name.Local]
d.pushNs(a.Name.Local, v, ok)
d.ns[a.Name.Local] = a.Value
}
if a.Name.Space == "" && a.Name.Local == xmlnsPrefix {
// Default space for untagged names
v, ok := d.ns[""]
d.pushNs("", v, ok)
d.ns[""] = a.Value
}
}
d.pushElement(t1.Name)
d.translate(&t1.Name, true)
for i := range t1.Attr {
d.translate(&t1.Attr[i].Name, false)
}
t = t1
case EndElement:
if !d.popElement(&t1) {
return nil, d.err
}
t = t1
}
return t, err
}
const (
xmlURL = "http://www.w3.org/XML/1998/namespace"
xmlnsPrefix = "xmlns"
xmlPrefix = "xml"
)
// Apply name space translation to name n.
// The default name space (for Space=="")
// applies only to element names, not to attribute names.
func (d *Decoder) translate(n *Name, isElementName bool) {
switch {
case n.Space == xmlnsPrefix:
return
case n.Space == "" && !isElementName:
return
case n.Space == xmlPrefix:
n.Space = xmlURL
case n.Space == "" && n.Local == xmlnsPrefix:
return
}
if v, ok := d.ns[n.Space]; ok {
n.Space = v
} else if n.Space == "" {
n.Space = d.DefaultSpace
}
}
func (d *Decoder) switchToReader(r io.Reader) {
// Get efficient byte at a time reader.
// Assume that if reader has its own
// ReadByte, it's efficient enough.
// Otherwise, use bufio.
if rb, ok := r.(io.ByteReader); ok {
d.r = rb
} else {
d.r = bufio.NewReader(r)
}
}
// Parsing state - stack holds old name space translations
// and the current set of open elements. The translations to pop when
// ending a given tag are *below* it on the stack, which is
// more work but forced on us by XML.
type stack struct {
next *stack
kind int
name Name
ok bool
}
const (
stkStart = iota
stkNs
stkEOF
)
func (d *Decoder) push(kind int) *stack {
s := d.free
if s != nil {
d.free = s.next
} else {
s = new(stack)
}
s.next = d.stk
s.kind = kind
d.stk = s
return s
}
func (d *Decoder) pop() *stack {
s := d.stk
if s != nil {
d.stk = s.next
s.next = d.free
d.free = s
}
return s
}
// Record that after the current element is finished
// (that element is already pushed on the stack)
// Token should return EOF until popEOF is called.
func (d *Decoder) pushEOF() {
// Walk down stack to find Start.
// It might not be the top, because there might be stkNs
// entries above it.
start := d.stk
for start.kind != stkStart {
start = start.next
}
// The stkNs entries below a start are associated with that
// element too; skip over them.
for start.next != nil && start.next.kind == stkNs {
start = start.next
}
s := d.free
if s != nil {
d.free = s.next
} else {
s = new(stack)
}
s.kind = stkEOF
s.next = start.next
start.next = s
}
// Undo a pushEOF.
// The element must have been finished, so the EOF should be at the top of the stack.
func (d *Decoder) popEOF() bool {
if d.stk == nil || d.stk.kind != stkEOF {
return false
}
d.pop()
return true
}
// Record that we are starting an element with the given name.
func (d *Decoder) pushElement(name Name) {
s := d.push(stkStart)
s.name = name
}
// Record that we are changing the value of ns[local].
// The old value is url, ok.
func (d *Decoder) pushNs(local string, url string, ok bool) {
s := d.push(stkNs)
s.name.Local = local
s.name.Space = url
s.ok = ok
}
// Creates a SyntaxError with the current line number.
func (d *Decoder) syntaxError(msg string) error {
return &SyntaxError{Msg: msg, Line: d.line}
}
// Record that we are ending an element with the given name.
// The name must match the record at the top of the stack,
// which must be a pushElement record.
// After popping the element, apply any undo records from
// the stack to restore the name translations that existed
// before we saw this element.
func (d *Decoder) popElement(t *EndElement) bool {
s := d.pop()
name := t.Name
switch {
case s == nil || s.kind != stkStart:
d.err = d.syntaxError("unexpected end element </" + name.Local + ">")
return false
case s.name.Local != name.Local:
if !d.Strict {
d.needClose = true
d.toClose = t.Name
t.Name = s.name
return true
}
d.err = d.syntaxError("element <" + s.name.Local + "> closed by </" + name.Local + ">")
return false
case s.name.Space != name.Space:
ns := name.Space
if name.Space == "" {
ns = `""`
}
d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space +
" closed by </" + name.Local + "> in space " + ns)
return false
}
d.translate(&t.Name, true)
// Pop stack until a Start or EOF is on the top, undoing the
// translations that were associated with the element we just closed.
for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF {
s := d.pop()
if s.ok {
d.ns[s.name.Local] = s.name.Space
} else {
delete(d.ns, s.name.Local)
}
}
return true
}
// If the top element on the stack is autoclosing and
// t is not the end tag, invent the end tag.
func (d *Decoder) autoClose(t Token) (Token, bool) {
if d.stk == nil || d.stk.kind != stkStart {
return nil, false
}
for _, s := range d.AutoClose {
if strings.EqualFold(s, d.stk.name.Local) {
// This one should be auto closed if t doesn't close it.
et, ok := t.(EndElement)
if !ok || !strings.EqualFold(et.Name.Local, d.stk.name.Local) {
return EndElement{d.stk.name}, true
}
break
}
}
return nil, false
}
var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method")
// RawToken is like [Decoder.Token] but does not verify that
// start and end elements match and does not translate
// name space prefixes to their corresponding URLs.
func (d *Decoder) RawToken() (Token, error) {
if d.unmarshalDepth > 0 {
return nil, errRawToken
}
return d.rawToken()
}
func (d *Decoder) rawToken() (Token, error) {
if d.t != nil {
return d.t.Token()
}
if d.err != nil {
return nil, d.err
}
if d.needClose {
// The last element we read was self-closing and
// we returned just the StartElement half.
// Return the EndElement half now.
d.needClose = false
return EndElement{d.toClose}, nil
}
b, ok := d.getc()
if !ok {
return nil, d.err
}
if b != '<' {
// Text section.
d.ungetc(b)
data := d.text(-1, false)
if data == nil {
return nil, d.err
}
return CharData(data), nil
}
if b, ok = d.mustgetc(); !ok {
return nil, d.err
}
switch b {
case '/':
// </: End element
var name Name
if name, ok = d.nsname(); !ok {
if d.err == nil {
d.err = d.syntaxError("expected element name after </")
}
return nil, d.err
}
d.space()
if b, ok = d.mustgetc(); !ok {
return nil, d.err
}
if b != '>' {
d.err = d.syntaxError("invalid characters between </" + name.Local + " and >")
return nil, d.err
}
return EndElement{name}, nil
case '?':
// <?: Processing instruction.
var target string
if target, ok = d.name(); !ok {
if d.err == nil {
d.err = d.syntaxError("expected target name after <?")
}
return nil, d.err
}
d.space()
d.buf.Reset()
var b0 byte
for {
if b, ok = d.mustgetc(); !ok {
return nil, d.err
}
d.buf.WriteByte(b)
if b0 == '?' && b == '>' {
break
}
b0 = b
}
data := d.buf.Bytes()
data = data[0 : len(data)-2] // chop ?>
if target == "xml" {
content := string(data)
ver := procInst("version", content)
if ver != "" && ver != "1.0" {
d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver)
return nil, d.err
}
enc := procInst("encoding", content)
if enc != "" && enc != "utf-8" && enc != "UTF-8" && !strings.EqualFold(enc, "utf-8") {
if d.CharsetReader == nil {
d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc)
return nil, d.err
}
newr, err := d.CharsetReader(enc, d.r.(io.Reader))
if err != nil {
d.err = fmt.Errorf("xml: opening charset %q: %w", enc, err)
return nil, d.err
}
if newr == nil {
panic("CharsetReader returned a nil Reader for charset " + enc)
}
d.switchToReader(newr)
}
}
return ProcInst{target, data}, nil
case '!':
// <!: Maybe comment, maybe CDATA.
if b, ok = d.mustgetc(); !ok {
return nil, d.err
}
switch b {
case '-': // <!-
// Probably <!-- for a comment.
if b, ok = d.mustgetc(); !ok {
return nil, d.err
}
if b != '-' {
d.err = d.syntaxError("invalid sequence <!- not part of <!--")
return nil, d.err
}
// Look for terminator.
d.buf.Reset()
var b0, b1 byte
for {
if b, ok = d.mustgetc(); !ok {
return nil, d.err
}
d.buf.WriteByte(b)
if b0 == '-' && b1 == '-' {
if b != '>' {
d.err = d.syntaxError(
`invalid sequence "--" not allowed in comments`)
return nil, d.err
}
break
}
b0, b1 = b1, b
}
data := d.buf.Bytes()
data = data[0 : len(data)-3] // chop -->
return Comment(data), nil
case '[': // <![
// Probably <![CDATA[.
for i := 0; i < 6; i++ {
if b, ok = d.mustgetc(); !ok {
return nil, d.err
}
if b != "CDATA["[i] {
d.err = d.syntaxError("invalid <![ sequence")
return nil, d.err
}
}
// Have <![CDATA[. Read text until ]]>.
data := d.text(-1, true)
if data == nil {
return nil, d.err
}
return CharData(data), nil
}
// Probably a directive: <!DOCTYPE ...>, <!ENTITY ...>, etc.
// We don't care, but accumulate for caller. Quoted angle
// brackets do not count for nesting.
d.buf.Reset()
d.buf.WriteByte(b)
inquote := uint8(0)
depth := 0
for {
if b, ok = d.mustgetc(); !ok {
return nil, d.err
}
if inquote == 0 && b == '>' && depth == 0 {
break
}
HandleB:
d.buf.WriteByte(b)
switch {
case b == inquote:
inquote = 0
case inquote != 0:
// in quotes, no special action
case b == '\'' || b == '"':
inquote = b
case b == '>' && inquote == 0:
depth--
case b == '<' && inquote == 0:
// Look for <!-- to begin comment.
s := "!--"
for i := 0; i < len(s); i++ {
if b, ok = d.mustgetc(); !ok {
return nil, d.err
}
if b != s[i] {
for j := 0; j < i; j++ {
d.buf.WriteByte(s[j])
}
depth++
goto HandleB
}
}
// Remove < that was written above.
d.buf.Truncate(d.buf.Len() - 1)
// Look for terminator.
var b0, b1 byte
for {
if b, ok = d.mustgetc(); !ok {
return nil, d.err
}
if b0 == '-' && b1 == '-' && b == '>' {
break
}
b0, b1 = b1, b
}
// Replace the comment with a space in the returned Directive
// body, so that markup parts that were separated by the comment
// (like a "<" and a "!") don't get joined when re-encoding the
// Directive, taking new semantic meaning.
d.buf.WriteByte(' ')
}
}
return Directive(d.buf.Bytes()), nil
}
// Must be an open element like <a href="foo">
d.ungetc(b)
var (
name Name
empty bool
attr []Attr
)
if name, ok = d.nsname(); !ok {
if d.err == nil {
d.err = d.syntaxError("expected element name after <")
}
return nil, d.err
}
attr = []Attr{}
for {
d.space()
if b, ok = d.mustgetc(); !ok {
return nil, d.err
}
if b == '/' {
empty = true
if b, ok = d.mustgetc(); !ok {
return nil, d.err
}
if b != '>' {
d.err = d.syntaxError("expected /> in element")
return nil, d.err
}
break
}
if b == '>' {
break
}
d.ungetc(b)
a := Attr{}
if a.Name, ok = d.nsname(); !ok {
if d.err == nil {
d.err = d.syntaxError("expected attribute name in element")
}
return nil, d.err
}
d.space()
if b, ok = d.mustgetc(); !ok {
return nil, d.err
}
if b != '=' {
if d.Strict {
d.err = d.syntaxError("attribute name without = in element")
return nil, d.err
}
d.ungetc(b)
a.Value = a.Name.Local
} else {
d.space()
data := d.attrval()
if data == nil {
return nil, d.err
}
a.Value = string(data)
}
attr = append(attr, a)
}
if empty {
d.needClose = true
d.toClose = name
}
return StartElement{name, attr}, nil
}
func (d *Decoder) attrval() []byte {
b, ok := d.mustgetc()
if !ok {
return nil
}
// Handle quoted attribute values
if b == '"' || b == '\'' {
return d.text(int(b), false)
}
// Handle unquoted attribute values for strict parsers
if d.Strict {
d.err = d.syntaxError("unquoted or missing attribute value in element")
return nil
}
// Handle unquoted attribute values for unstrict parsers
d.ungetc(b)
d.buf.Reset()
for {
b, ok = d.mustgetc()
if !ok {
return nil
}
// https://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2
if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' ||
'0' <= b && b <= '9' || b == '_' || b == ':' || b == '-' {
d.buf.WriteByte(b)
} else {
d.ungetc(b)
break
}
}
return d.buf.Bytes()
}
// Skip spaces if any
func (d *Decoder) space() {
for {
b, ok := d.getc()
if !ok {
return
}
switch b {
case ' ', '\r', '\n', '\t':
default:
d.ungetc(b)
return
}
}
}
// Read a single byte.
// If there is no byte to read, return ok==false
// and leave the error in d.err.
// Maintain line number.
func (d *Decoder) getc() (b byte, ok bool) {
if d.err != nil {
return 0, false
}
if d.nextByte >= 0 {
b = byte(d.nextByte)
d.nextByte = -1
} else {
b, d.err = d.r.ReadByte()
if d.err != nil {
return 0, false
}
if d.saved != nil {
d.saved.WriteByte(b)
}
}
if b == '\n' {
d.line++
d.linestart = d.offset + 1
}
d.offset++
return b, true
}
// InputOffset returns the input stream byte offset of the current decoder position.
// The offset gives the location of the end of the most recently returned token
// and the beginning of the next token.
func (d *Decoder) InputOffset() int64 {
return d.offset
}
// InputPos returns the line of the current decoder position and the 1 based
// input position of the line. The position gives the location of the end of the
// most recently returned token.
func (d *Decoder) InputPos() (line, column int) {
return d.line, int(d.offset-d.linestart) + 1
}
// Return saved offset.
// If we did ungetc (nextByte >= 0), have to back up one.
func (d *Decoder) savedOffset() int {
n := d.saved.Len()
if d.nextByte >= 0 {
n--
}
return n
}
// Must read a single byte.
// If there is no byte to read,
// set d.err to SyntaxError("unexpected EOF")
// and return ok==false
func (d *Decoder) mustgetc() (b byte, ok bool) {
if b, ok = d.getc(); !ok {
if d.err == io.EOF {
d.err = d.syntaxError("unexpected EOF")
}
}
return
}
// Unread a single byte.
func (d *Decoder) ungetc(b byte) {
if b == '\n' {
d.line--
}
d.nextByte = int(b)
d.offset--
}
var entity = map[string]rune{
"lt": '<',
"gt": '>',
"amp": '&',
"apos": '\'',
"quot": '"',
}
// Read plain text section (XML calls it character data).
// If quote >= 0, we are in a quoted string and need to find the matching quote.
// If cdata == true, we are in a <![CDATA[ section and need to find ]]>.
// On failure return nil and leave the error in d.err.
func (d *Decoder) text(quote int, cdata bool) []byte {
var b0, b1 byte
var trunc int
d.buf.Reset()
Input:
for {
b, ok := d.getc()
if !ok {
if cdata {
if d.err == io.EOF {
d.err = d.syntaxError("unexpected EOF in CDATA section")
}
return nil
}
break Input
}
// <![CDATA[ section ends with ]]>.
// It is an error for ]]> to appear in ordinary text,
// but it is allowed in quoted strings.
if quote < 0 && b0 == ']' && b1 == ']' && b == '>' {
if cdata {
trunc = 2
break Input
}
d.err = d.syntaxError("unescaped ]]> not in CDATA section")
return nil
}
// Stop reading text if we see a <.
if b == '<' && !cdata {
if quote >= 0 {
d.err = d.syntaxError("unescaped < inside quoted string")
return nil
}
d.ungetc('<')
break Input
}
if quote >= 0 && b == byte(quote) {
break Input
}
if b == '&' && !cdata {
// Read escaped character expression up to semicolon.
// XML in all its glory allows a document to define and use
// its own character names with <!ENTITY ...> directives.
// Parsers are required to recognize lt, gt, amp, apos, and quot
// even if they have not been declared.
before := d.buf.Len()
d.buf.WriteByte('&')
var ok bool
var text string
var haveText bool
if b, ok = d.mustgetc(); !ok {
return nil
}
if b == '#' {
d.buf.WriteByte(b)
if b, ok = d.mustgetc(); !ok {
return nil
}
base := 10
if b == 'x' {
base = 16
d.buf.WriteByte(b)
if b, ok = d.mustgetc(); !ok {
return nil
}
}
start := d.buf.Len()
for '0' <= b && b <= '9' ||
base == 16 && 'a' <= b && b <= 'f' ||
base == 16 && 'A' <= b && b <= 'F' {
d.buf.WriteByte(b)
if b, ok = d.mustgetc(); !ok {
return nil
}
}
if b != ';' {
d.ungetc(b)
} else {
s := string(d.buf.Bytes()[start:])
d.buf.WriteByte(';')
n, err := strconv.ParseUint(s, base, 64)
if err == nil && n <= unicode.MaxRune {
text = string(rune(n))
haveText = true
}
}
} else {
d.ungetc(b)
if !d.readName() {
if d.err != nil {
return nil
}
}
if b, ok = d.mustgetc(); !ok {
return nil
}
if b != ';' {
d.ungetc(b)
} else {
name := d.buf.Bytes()[before+1:]
d.buf.WriteByte(';')
if isName(name) {
s := string(name)
if r, ok := entity[s]; ok {
text = string(r)
haveText = true
} else if d.Entity != nil {
text, haveText = d.Entity[s]
}
}
}
}
if haveText {
d.buf.Truncate(before)
d.buf.WriteString(text)
b0, b1 = 0, 0
continue Input
}
if !d.Strict {
b0, b1 = 0, 0
continue Input
}
ent := string(d.buf.Bytes()[before:])
if ent[len(ent)-1] != ';' {
ent += " (no semicolon)"
}
d.err = d.syntaxError("invalid character entity " + ent)
return nil
}
// We must rewrite unescaped \r and \r\n into \n.
if b == '\r' {
d.buf.WriteByte('\n')
} else if b1 == '\r' && b == '\n' {
// Skip \r\n--we already wrote \n.
} else {
d.buf.WriteByte(b)
}
b0, b1 = b1, b
}
data := d.buf.Bytes()
data = data[0 : len(data)-trunc]
// Inspect each rune for being a disallowed character.
buf := data
for len(buf) > 0 {
r, size := utf8.DecodeRune(buf)
if r == utf8.RuneError && size == 1 {
d.err = d.syntaxError("invalid UTF-8")
return nil
}
buf = buf[size:]
if !isInCharacterRange(r) {
d.err = d.syntaxError(fmt.Sprintf("illegal character code %U", r))
return nil
}
}
return data
}
// Decide whether the given rune is in the XML Character Range, per
// the Char production of https://www.xml.com/axml/testaxml.htm,
// Section 2.2 Characters.
func isInCharacterRange(r rune) (inrange bool) {
return r == 0x09 ||
r == 0x0A ||
r == 0x0D ||
r >= 0x20 && r <= 0xD7FF ||
r >= 0xE000 && r <= 0xFFFD ||
r >= 0x10000 && r <= 0x10FFFF
}
// Get name space name: name with a : stuck in the middle.
// The part before the : is the name space identifier.
func (d *Decoder) nsname() (name Name, ok bool) {
s, ok := d.name()
if !ok {
return
}
if strings.Count(s, ":") > 1 {
return name, false
} else if space, local, ok := strings.Cut(s, ":"); !ok || space == "" || local == "" {
name.Local = s
} else {
name.Space = space
name.Local = local
}
return name, true
}
// Get name: /first(first|second)*/
// Do not set d.err if the name is missing (unless unexpected EOF is received):
// let the caller provide better context.
func (d *Decoder) name() (s string, ok bool) {
d.buf.Reset()
if !d.readName() {
return "", false
}
// Now we check the characters.
b := d.buf.Bytes()
if !isName(b) {
d.err = d.syntaxError("invalid XML name: " + string(b))
return "", false
}
return string(b), true
}
// Read a name and append its bytes to d.buf.
// The name is delimited by any single-byte character not valid in names.
// All multi-byte characters are accepted; the caller must check their validity.
func (d *Decoder) readName() (ok bool) {
var b byte
if b, ok = d.mustgetc(); !ok {
return
}
if b < utf8.RuneSelf && !isNameByte(b) {
d.ungetc(b)
return false
}
d.buf.WriteByte(b)
for {
if b, ok = d.mustgetc(); !ok {
return
}
if b < utf8.RuneSelf && !isNameByte(b) {
d.ungetc(b)
break
}
d.buf.WriteByte(b)
}
return true
}
func isNameByte(c byte) bool {
return 'A' <= c && c <= 'Z' ||
'a' <= c && c <= 'z' ||
'0' <= c && c <= '9' ||
c == '_' || c == ':' || c == '.' || c == '-'
}
func isName(s []byte) bool {
if len(s) == 0 {
return false
}
c, n := utf8.DecodeRune(s)
if c == utf8.RuneError && n == 1 {
return false
}
if !unicode.Is(first, c) {
return false
}
for n < len(s) {
s = s[n:]
c, n = utf8.DecodeRune(s)
if c == utf8.RuneError && n == 1 {
return false
}
if !unicode.Is(first, c) && !unicode.Is(second, c) {
return false
}
}
return true
}
func isNameString(s string) bool {
if len(s) == 0 {
return false
}
c, n := utf8.DecodeRuneInString(s)
if c == utf8.RuneError && n == 1 {
return false
}
if !unicode.Is(first, c) {
return false
}
for n < len(s) {
s = s[n:]
c, n = utf8.DecodeRuneInString(s)
if c == utf8.RuneError && n == 1 {
return false
}
if !unicode.Is(first, c) && !unicode.Is(second, c) {
return false
}
}
return true
}
// These tables were generated by cut and paste from Appendix B of
// the XML spec at https://www.xml.com/axml/testaxml.htm
// and then reformatting. First corresponds to (Letter | '_' | ':')
// and second corresponds to NameChar.
var first = &unicode.RangeTable{
R16: []unicode.Range16{
{0x003A, 0x003A, 1},
{0x0041, 0x005A, 1},
{0x005F, 0x005F, 1},
{0x0061, 0x007A, 1},
{0x00C0, 0x00D6, 1},
{0x00D8, 0x00F6, 1},
{0x00F8, 0x00FF, 1},
{0x0100, 0x0131, 1},
{0x0134, 0x013E, 1},
{0x0141, 0x0148, 1},
{0x014A, 0x017E, 1},
{0x0180, 0x01C3, 1},
{0x01CD, 0x01F0, 1},
{0x01F4, 0x01F5, 1},
{0x01FA, 0x0217, 1},
{0x0250, 0x02A8, 1},
{0x02BB, 0x02C1, 1},
{0x0386, 0x0386, 1},
{0x0388, 0x038A, 1},
{0x038C, 0x038C, 1},
{0x038E, 0x03A1, 1},
{0x03A3, 0x03CE, 1},
{0x03D0, 0x03D6, 1},
{0x03DA, 0x03E0, 2},
{0x03E2, 0x03F3, 1},
{0x0401, 0x040C, 1},
{0x040E, 0x044F, 1},
{0x0451, 0x045C, 1},
{0x045E, 0x0481, 1},
{0x0490, 0x04C4, 1},
{0x04C7, 0x04C8, 1},
{0x04CB, 0x04CC, 1},
{0x04D0, 0x04EB, 1},
{0x04EE, 0x04F5, 1},
{0x04F8, 0x04F9, 1},
{0x0531, 0x0556, 1},
{0x0559, 0x0559, 1},
{0x0561, 0x0586, 1},
{0x05D0, 0x05EA, 1},
{0x05F0, 0x05F2, 1},
{0x0621, 0x063A, 1},
{0x0641, 0x064A, 1},
{0x0671, 0x06B7, 1},
{0x06BA, 0x06BE, 1},
{0x06C0, 0x06CE, 1},
{0x06D0, 0x06D3, 1},
{0x06D5, 0x06D5, 1},
{0x06E5, 0x06E6, 1},
{0x0905, 0x0939, 1},
{0x093D, 0x093D, 1},
{0x0958, 0x0961, 1},
{0x0985, 0x098C, 1},
{0x098F, 0x0990, 1},
{0x0993, 0x09A8, 1},
{0x09AA, 0x09B0, 1},
{0x09B2, 0x09B2, 1},
{0x09B6, 0x09B9, 1},
{0x09DC, 0x09DD, 1},
{0x09DF, 0x09E1, 1},
{0x09F0, 0x09F1, 1},
{0x0A05, 0x0A0A, 1},
{0x0A0F, 0x0A10, 1},
{0x0A13, 0x0A28, 1},
{0x0A2A, 0x0A30, 1},
{0x0A32, 0x0A33, 1},
{0x0A35, 0x0A36, 1},
{0x0A38, 0x0A39, 1},
{0x0A59, 0x0A5C, 1},
{0x0A5E, 0x0A5E, 1},
{0x0A72, 0x0A74, 1},
{0x0A85, 0x0A8B, 1},
{0x0A8D, 0x0A8D, 1},
{0x0A8F, 0x0A91, 1},
{0x0A93, 0x0AA8, 1},
{0x0AAA, 0x0AB0, 1},
{0x0AB2, 0x0AB3, 1},
{0x0AB5, 0x0AB9, 1},
{0x0ABD, 0x0AE0, 0x23},
{0x0B05, 0x0B0C, 1},
{0x0B0F, 0x0B10, 1},
{0x0B13, 0x0B28, 1},
{0x0B2A, 0x0B30, 1},
{0x0B32, 0x0B33, 1},
{0x0B36, 0x0B39, 1},
{0x0B3D, 0x0B3D, 1},
{0x0B5C, 0x0B5D, 1},
{0x0B5F, 0x0B61, 1},
{0x0B85, 0x0B8A, 1},
{0x0B8E, 0x0B90, 1},
{0x0B92, 0x0B95, 1},
{0x0B99, 0x0B9A, 1},
{0x0B9C, 0x0B9C, 1},
{0x0B9E, 0x0B9F, 1},
{0x0BA3, 0x0BA4, 1},
{0x0BA8, 0x0BAA, 1},
{0x0BAE, 0x0BB5, 1},
{0x0BB7, 0x0BB9, 1},
{0x0C05, 0x0C0C, 1},
{0x0C0E, 0x0C10, 1},
{0x0C12, 0x0C28, 1},
{0x0C2A, 0x0C33, 1},
{0x0C35, 0x0C39, 1},
{0x0C60, 0x0C61, 1},
{0x0C85, 0x0C8C, 1},
{0x0C8E, 0x0C90, 1},
{0x0C92, 0x0CA8, 1},
{0x0CAA, 0x0CB3, 1},
{0x0CB5, 0x0CB9, 1},
{0x0CDE, 0x0CDE, 1},
{0x0CE0, 0x0CE1, 1},
{0x0D05, 0x0D0C, 1},
{0x0D0E, 0x0D10, 1},
{0x0D12, 0x0D28, 1},
{0x0D2A, 0x0D39, 1},
{0x0D60, 0x0D61, 1},
{0x0E01, 0x0E2E, 1},
{0x0E30, 0x0E30, 1},
{0x0E32, 0x0E33, 1},
{0x0E40, 0x0E45, 1},
{0x0E81, 0x0E82, 1},
{0x0E84, 0x0E84, 1},
{0x0E87, 0x0E88, 1},
{0x0E8A, 0x0E8D, 3},
{0x0E94, 0x0E97, 1},
{0x0E99, 0x0E9F, 1},
{0x0EA1, 0x0EA3, 1},
{0x0EA5, 0x0EA7, 2},
{0x0EAA, 0x0EAB, 1},
{0x0EAD, 0x0EAE, 1},
{0x0EB0, 0x0EB0, 1},
{0x0EB2, 0x0EB3, 1},
{0x0EBD, 0x0EBD, 1},
{0x0EC0, 0x0EC4, 1},
{0x0F40, 0x0F47, 1},
{0x0F49, 0x0F69, 1},
{0x10A0, 0x10C5, 1},
{0x10D0, 0x10F6, 1},
{0x1100, 0x1100, 1},
{0x1102, 0x1103, 1},
{0x1105, 0x1107, 1},
{0x1109, 0x1109, 1},
{0x110B, 0x110C, 1},
{0x110E, 0x1112, 1},
{0x113C, 0x1140, 2},
{0x114C, 0x1150, 2},
{0x1154, 0x1155, 1},
{0x1159, 0x1159, 1},
{0x115F, 0x1161, 1},
{0x1163, 0x1169, 2},
{0x116D, 0x116E, 1},
{0x1172, 0x1173, 1},
{0x1175, 0x119E, 0x119E - 0x1175},
{0x11A8, 0x11AB, 0x11AB - 0x11A8},
{0x11AE, 0x11AF, 1},
{0x11B7, 0x11B8, 1},
{0x11BA, 0x11BA, 1},
{0x11BC, 0x11C2, 1},
{0x11EB, 0x11F0, 0x11F0 - 0x11EB},
{0x11F9, 0x11F9, 1},
{0x1E00, 0x1E9B, 1},
{0x1EA0, 0x1EF9, 1},
{0x1F00, 0x1F15, 1},
{0x1F18, 0x1F1D, 1},
{0x1F20, 0x1F45, 1},
{0x1F48, 0x1F4D, 1},
{0x1F50, 0x1F57, 1},
{0x1F59, 0x1F5B, 0x1F5B - 0x1F59},
{0x1F5D, 0x1F5D, 1},
{0x1F5F, 0x1F7D, 1},
{0x1F80, 0x1FB4, 1},
{0x1FB6, 0x1FBC, 1},
{0x1FBE, 0x1FBE, 1},
{0x1FC2, 0x1FC4, 1},
{0x1FC6, 0x1FCC, 1},
{0x1FD0, 0x1FD3, 1},
{0x1FD6, 0x1FDB, 1},
{0x1FE0, 0x1FEC, 1},
{0x1FF2, 0x1FF4, 1},
{0x1FF6, 0x1FFC, 1},
{0x2126, 0x2126, 1},
{0x212A, 0x212B, 1},
{0x212E, 0x212E, 1},
{0x2180, 0x2182, 1},
{0x3007, 0x3007, 1},
{0x3021, 0x3029, 1},
{0x3041, 0x3094, 1},
{0x30A1, 0x30FA, 1},
{0x3105, 0x312C, 1},
{0x4E00, 0x9FA5, 1},
{0xAC00, 0xD7A3, 1},
},
}
var second = &unicode.RangeTable{
R16: []unicode.Range16{
{0x002D, 0x002E, 1},
{0x0030, 0x0039, 1},
{0x00B7, 0x00B7, 1},
{0x02D0, 0x02D1, 1},
{0x0300, 0x0345, 1},
{0x0360, 0x0361, 1},
{0x0387, 0x0387, 1},
{0x0483, 0x0486, 1},
{0x0591, 0x05A1, 1},
{0x05A3, 0x05B9, 1},
{0x05BB, 0x05BD, 1},
{0x05BF, 0x05BF, 1},
{0x05C1, 0x05C2, 1},
{0x05C4, 0x0640, 0x0640 - 0x05C4},
{0x064B, 0x0652, 1},
{0x0660, 0x0669, 1},
{0x0670, 0x0670, 1},
{0x06D6, 0x06DC, 1},
{0x06DD, 0x06DF, 1},
{0x06E0, 0x06E4, 1},
{0x06E7, 0x06E8, 1},
{0x06EA, 0x06ED, 1},
{0x06F0, 0x06F9, 1},
{0x0901, 0x0903, 1},
{0x093C, 0x093C, 1},
{0x093E, 0x094C, 1},
{0x094D, 0x094D, 1},
{0x0951, 0x0954, 1},
{0x0962, 0x0963, 1},
{0x0966, 0x096F, 1},
{0x0981, 0x0983, 1},
{0x09BC, 0x09BC, 1},
{0x09BE, 0x09BF, 1},
{0x09C0, 0x09C4, 1},
{0x09C7, 0x09C8, 1},
{0x09CB, 0x09CD, 1},
{0x09D7, 0x09D7, 1},
{0x09E2, 0x09E3, 1},
{0x09E6, 0x09EF, 1},
{0x0A02, 0x0A3C, 0x3A},
{0x0A3E, 0x0A3F, 1},
{0x0A40, 0x0A42, 1},
{0x0A47, 0x0A48, 1},
{0x0A4B, 0x0A4D, 1},
{0x0A66, 0x0A6F, 1},
{0x0A70, 0x0A71, 1},
{0x0A81, 0x0A83, 1},
{0x0ABC, 0x0ABC, 1},
{0x0ABE, 0x0AC5, 1},
{0x0AC7, 0x0AC9, 1},
{0x0ACB, 0x0ACD, 1},
{0x0AE6, 0x0AEF, 1},
{0x0B01, 0x0B03, 1},
{0x0B3C, 0x0B3C, 1},
{0x0B3E, 0x0B43, 1},
{0x0B47, 0x0B48, 1},
{0x0B4B, 0x0B4D, 1},
{0x0B56, 0x0B57, 1},
{0x0B66, 0x0B6F, 1},
{0x0B82, 0x0B83, 1},
{0x0BBE, 0x0BC2, 1},
{0x0BC6, 0x0BC8, 1},
{0x0BCA, 0x0BCD, 1},
{0x0BD7, 0x0BD7, 1},
{0x0BE7, 0x0BEF, 1},
{0x0C01, 0x0C03, 1},
{0x0C3E, 0x0C44, 1},
{0x0C46, 0x0C48, 1},
{0x0C4A, 0x0C4D, 1},
{0x0C55, 0x0C56, 1},
{0x0C66, 0x0C6F, 1},
{0x0C82, 0x0C83, 1},
{0x0CBE, 0x0CC4, 1},
{0x0CC6, 0x0CC8, 1},
{0x0CCA, 0x0CCD, 1},
{0x0CD5, 0x0CD6, 1},
{0x0CE6, 0x0CEF, 1},
{0x0D02, 0x0D03, 1},
{0x0D3E, 0x0D43, 1},
{0x0D46, 0x0D48, 1},
{0x0D4A, 0x0D4D, 1},
{0x0D57, 0x0D57, 1},
{0x0D66, 0x0D6F, 1},
{0x0E31, 0x0E31, 1},
{0x0E34, 0x0E3A, 1},
{0x0E46, 0x0E46, 1},
{0x0E47, 0x0E4E, 1},
{0x0E50, 0x0E59, 1},
{0x0EB1, 0x0EB1, 1},
{0x0EB4, 0x0EB9, 1},
{0x0EBB, 0x0EBC, 1},
{0x0EC6, 0x0EC6, 1},
{0x0EC8, 0x0ECD, 1},
{0x0ED0, 0x0ED9, 1},
{0x0F18, 0x0F19, 1},
{0x0F20, 0x0F29, 1},
{0x0F35, 0x0F39, 2},
{0x0F3E, 0x0F3F, 1},
{0x0F71, 0x0F84, 1},
{0x0F86, 0x0F8B, 1},
{0x0F90, 0x0F95, 1},
{0x0F97, 0x0F97, 1},
{0x0F99, 0x0FAD, 1},
{0x0FB1, 0x0FB7, 1},
{0x0FB9, 0x0FB9, 1},
{0x20D0, 0x20DC, 1},
{0x20E1, 0x3005, 0x3005 - 0x20E1},
{0x302A, 0x302F, 1},
{0x3031, 0x3035, 1},
{0x3099, 0x309A, 1},
{0x309D, 0x309E, 1},
{0x30FC, 0x30FE, 1},
},
}
// HTMLEntity is an entity map containing translations for the
// standard HTML entity characters.
//
// See the [Decoder.Strict] and [Decoder.Entity] fields' documentation.
var HTMLEntity map[string]string = htmlEntity
var htmlEntity = map[string]string{
/*
hget http://www.w3.org/TR/html4/sgml/entities.html |
ssam '
,y /\>/ x/\<(.|\n)+/ s/\n/ /g
,x v/^\<!ENTITY/d
,s/\<!ENTITY ([^ ]+) .*U\+([0-9A-F][0-9A-F][0-9A-F][0-9A-F]) .+/ "\1": "\\u\2",/g
'
*/
"nbsp": "\u00A0",
"iexcl": "\u00A1",
"cent": "\u00A2",
"pound": "\u00A3",
"curren": "\u00A4",
"yen": "\u00A5",
"brvbar": "\u00A6",
"sect": "\u00A7",
"uml": "\u00A8",
"copy": "\u00A9",
"ordf": "\u00AA",
"laquo": "\u00AB",
"not": "\u00AC",
"shy": "\u00AD",
"reg": "\u00AE",
"macr": "\u00AF",
"deg": "\u00B0",
"plusmn": "\u00B1",
"sup2": "\u00B2",
"sup3": "\u00B3",
"acute": "\u00B4",
"micro": "\u00B5",
"para": "\u00B6",
"middot": "\u00B7",
"cedil": "\u00B8",
"sup1": "\u00B9",
"ordm": "\u00BA",
"raquo": "\u00BB",
"frac14": "\u00BC",
"frac12": "\u00BD",
"frac34": "\u00BE",
"iquest": "\u00BF",
"Agrave": "\u00C0",
"Aacute": "\u00C1",
"Acirc": "\u00C2",
"Atilde": "\u00C3",
"Auml": "\u00C4",
"Aring": "\u00C5",
"AElig": "\u00C6",
"Ccedil": "\u00C7",
"Egrave": "\u00C8",
"Eacute": "\u00C9",
"Ecirc": "\u00CA",
"Euml": "\u00CB",
"Igrave": "\u00CC",
"Iacute": "\u00CD",
"Icirc": "\u00CE",
"Iuml": "\u00CF",
"ETH": "\u00D0",
"Ntilde": "\u00D1",
"Ograve": "\u00D2",
"Oacute": "\u00D3",
"Ocirc": "\u00D4",
"Otilde": "\u00D5",
"Ouml": "\u00D6",
"times": "\u00D7",
"Oslash": "\u00D8",
"Ugrave": "\u00D9",
"Uacute": "\u00DA",
"Ucirc": "\u00DB",
"Uuml": "\u00DC",
"Yacute": "\u00DD",
"THORN": "\u00DE",
"szlig": "\u00DF",
"agrave": "\u00E0",
"aacute": "\u00E1",
"acirc": "\u00E2",
"atilde": "\u00E3",
"auml": "\u00E4",
"aring": "\u00E5",
"aelig": "\u00E6",
"ccedil": "\u00E7",
"egrave": "\u00E8",
"eacute": "\u00E9",
"ecirc": "\u00EA",
"euml": "\u00EB",
"igrave": "\u00EC",
"iacute": "\u00ED",
"icirc": "\u00EE",
"iuml": "\u00EF",
"eth": "\u00F0",
"ntilde": "\u00F1",
"ograve": "\u00F2",
"oacute": "\u00F3",
"ocirc": "\u00F4",
"otilde": "\u00F5",
"ouml": "\u00F6",
"divide": "\u00F7",
"oslash": "\u00F8",
"ugrave": "\u00F9",
"uacute": "\u00FA",
"ucirc": "\u00FB",
"uuml": "\u00FC",
"yacute": "\u00FD",
"thorn": "\u00FE",
"yuml": "\u00FF",
"fnof": "\u0192",
"Alpha": "\u0391",
"Beta": "\u0392",
"Gamma": "\u0393",
"Delta": "\u0394",
"Epsilon": "\u0395",
"Zeta": "\u0396",
"Eta": "\u0397",
"Theta": "\u0398",
"Iota": "\u0399",
"Kappa": "\u039A",
"Lambda": "\u039B",
"Mu": "\u039C",
"Nu": "\u039D",
"Xi": "\u039E",
"Omicron": "\u039F",
"Pi": "\u03A0",
"Rho": "\u03A1",
"Sigma": "\u03A3",
"Tau": "\u03A4",
"Upsilon": "\u03A5",
"Phi": "\u03A6",
"Chi": "\u03A7",
"Psi": "\u03A8",
"Omega": "\u03A9",
"alpha": "\u03B1",
"beta": "\u03B2",
"gamma": "\u03B3",
"delta": "\u03B4",
"epsilon": "\u03B5",
"zeta": "\u03B6",
"eta": "\u03B7",
"theta": "\u03B8",
"iota": "\u03B9",
"kappa": "\u03BA",
"lambda": "\u03BB",
"mu": "\u03BC",
"nu": "\u03BD",
"xi": "\u03BE",
"omicron": "\u03BF",
"pi": "\u03C0",
"rho": "\u03C1",
"sigmaf": "\u03C2",
"sigma": "\u03C3",
"tau": "\u03C4",
"upsilon": "\u03C5",
"phi": "\u03C6",
"chi": "\u03C7",
"psi": "\u03C8",
"omega": "\u03C9",
"thetasym": "\u03D1",
"upsih": "\u03D2",
"piv": "\u03D6",
"bull": "\u2022",
"hellip": "\u2026",
"prime": "\u2032",
"Prime": "\u2033",
"oline": "\u203E",
"frasl": "\u2044",
"weierp": "\u2118",
"image": "\u2111",
"real": "\u211C",
"trade": "\u2122",
"alefsym": "\u2135",
"larr": "\u2190",
"uarr": "\u2191",
"rarr": "\u2192",
"darr": "\u2193",
"harr": "\u2194",
"crarr": "\u21B5",
"lArr": "\u21D0",
"uArr": "\u21D1",
"rArr": "\u21D2",
"dArr": "\u21D3",
"hArr": "\u21D4",
"forall": "\u2200",
"part": "\u2202",
"exist": "\u2203",
"empty": "\u2205",
"nabla": "\u2207",
"isin": "\u2208",
"notin": "\u2209",
"ni": "\u220B",
"prod": "\u220F",
"sum": "\u2211",
"minus": "\u2212",
"lowast": "\u2217",
"radic": "\u221A",
"prop": "\u221D",
"infin": "\u221E",
"ang": "\u2220",
"and": "\u2227",
"or": "\u2228",
"cap": "\u2229",
"cup": "\u222A",
"int": "\u222B",
"there4": "\u2234",
"sim": "\u223C",
"cong": "\u2245",
"asymp": "\u2248",
"ne": "\u2260",
"equiv": "\u2261",
"le": "\u2264",
"ge": "\u2265",
"sub": "\u2282",
"sup": "\u2283",
"nsub": "\u2284",
"sube": "\u2286",
"supe": "\u2287",
"oplus": "\u2295",
"otimes": "\u2297",
"perp": "\u22A5",
"sdot": "\u22C5",
"lceil": "\u2308",
"rceil": "\u2309",
"lfloor": "\u230A",
"rfloor": "\u230B",
"lang": "\u2329",
"rang": "\u232A",
"loz": "\u25CA",
"spades": "\u2660",
"clubs": "\u2663",
"hearts": "\u2665",
"diams": "\u2666",
"quot": "\u0022",
"amp": "\u0026",
"lt": "\u003C",
"gt": "\u003E",
"OElig": "\u0152",
"oelig": "\u0153",
"Scaron": "\u0160",
"scaron": "\u0161",
"Yuml": "\u0178",
"circ": "\u02C6",
"tilde": "\u02DC",
"ensp": "\u2002",
"emsp": "\u2003",
"thinsp": "\u2009",
"zwnj": "\u200C",
"zwj": "\u200D",
"lrm": "\u200E",
"rlm": "\u200F",
"ndash": "\u2013",
"mdash": "\u2014",
"lsquo": "\u2018",
"rsquo": "\u2019",
"sbquo": "\u201A",
"ldquo": "\u201C",
"rdquo": "\u201D",
"bdquo": "\u201E",
"dagger": "\u2020",
"Dagger": "\u2021",
"permil": "\u2030",
"lsaquo": "\u2039",
"rsaquo": "\u203A",
"euro": "\u20AC",
}
// HTMLAutoClose is the set of HTML elements that
// should be considered to close automatically.
//
// See the [Decoder.Strict] and [Decoder.Entity] fields' documentation.
var HTMLAutoClose []string = htmlAutoClose
var htmlAutoClose = []string{
/*
hget http://www.w3.org/TR/html4/loose.dtd |
9 sed -n 's/<!ELEMENT ([^ ]*) +- O EMPTY.+/ "\1",/p' | tr A-Z a-z
*/
"basefont",
"br",
"area",
"link",
"img",
"param",
"hr",
"input",
"col",
"frame",
"isindex",
"base",
"meta",
}
var (
escQuot = []byte(""") // shorter than """
escApos = []byte("'") // shorter than "'"
escAmp = []byte("&")
escLT = []byte("<")
escGT = []byte(">")
escTab = []byte("	")
escNL = []byte("
")
escCR = []byte("
")
escFFFD = []byte("\uFFFD") // Unicode replacement character
)
// EscapeText writes to w the properly escaped XML equivalent
// of the plain text data s.
func EscapeText(w io.Writer, s []byte) error {
return escapeText(w, s, true)
}
// escapeText writes to w the properly escaped XML equivalent
// of the plain text data s. If escapeNewline is true, newline
// characters will be escaped.
func escapeText(w io.Writer, s []byte, escapeNewline bool) error {
var esc []byte
last := 0
for i := 0; i < len(s); {
r, width := utf8.DecodeRune(s[i:])
i += width
switch r {
case '"':
esc = escQuot
case '\'':
esc = escApos
case '&':
esc = escAmp
case '<':
esc = escLT
case '>':
esc = escGT
case '\t':
esc = escTab
case '\n':
if !escapeNewline {
continue
}
esc = escNL
case '\r':
esc = escCR
default:
if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
esc = escFFFD
break
}
continue
}
if _, err := w.Write(s[last : i-width]); err != nil {
return err
}
if _, err := w.Write(esc); err != nil {
return err
}
last = i
}
_, err := w.Write(s[last:])
return err
}
// EscapeString writes to p the properly escaped XML equivalent
// of the plain text data s.
func (p *printer) EscapeString(s string) {
var esc []byte
last := 0
for i := 0; i < len(s); {
r, width := utf8.DecodeRuneInString(s[i:])
i += width
switch r {
case '"':
esc = escQuot
case '\'':
esc = escApos
case '&':
esc = escAmp
case '<':
esc = escLT
case '>':
esc = escGT
case '\t':
esc = escTab
case '\n':
esc = escNL
case '\r':
esc = escCR
default:
if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
esc = escFFFD
break
}
continue
}
p.WriteString(s[last : i-width])
p.Write(esc)
last = i
}
p.WriteString(s[last:])
}
// Escape is like [EscapeText] but omits the error return value.
// It is provided for backwards compatibility with Go 1.0.
// Code targeting Go 1.1 or later should use [EscapeText].
func Escape(w io.Writer, s []byte) {
EscapeText(w, s)
}
var (
cdataStart = []byte("<![CDATA[")
cdataEnd = []byte("]]>")
cdataEscape = []byte("]]]]><![CDATA[>")
)
// emitCDATA writes to w the CDATA-wrapped plain text data s.
// It escapes CDATA directives nested in s.
func emitCDATA(w io.Writer, s []byte) error {
if len(s) == 0 {
return nil
}
if _, err := w.Write(cdataStart); err != nil {
return err
}
for {
before, after, ok := bytes.Cut(s, cdataEnd)
if !ok {
break
}
// Found a nested CDATA directive end.
if _, err := w.Write(before); err != nil {
return err
}
if _, err := w.Write(cdataEscape); err != nil {
return err
}
s = after
}
if _, err := w.Write(s); err != nil {
return err
}
_, err := w.Write(cdataEnd)
return err
}
// procInst parses the `param="..."` or `param='...'`
// value out of the provided string, returning "" if not found.
func procInst(param, s string) string {
// TODO: this parsing is somewhat lame and not exact.
// It works for all actual cases, though.
param = param + "="
lenp := len(param)
i := 0
var sep byte
for i < len(s) {
sub := s[i:]
k := strings.Index(sub, param)
if k < 0 || lenp+k >= len(sub) {
return ""
}
i += lenp + k + 1
if c := sub[lenp+k]; c == '\'' || c == '"' {
sep = c
break
}
}
if sep == 0 {
return ""
}
j := strings.IndexByte(s[i:], sep)
if j < 0 {
return ""
}
return s[i : i+j]
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errors implements functions to manipulate errors.
//
// The [New] function creates errors whose only content is a text message.
//
// An error e wraps another error if e's type has one of the methods
//
// Unwrap() error
// Unwrap() []error
//
// If e.Unwrap() returns a non-nil error w or a slice containing w,
// then we say that e wraps w. A nil error returned from e.Unwrap()
// indicates that e does not wrap any error. It is invalid for an
// Unwrap method to return an []error containing a nil error value.
//
// An easy way to create wrapped errors is to call [fmt.Errorf] and apply
// the %w verb to the error argument:
//
// wrapsErr := fmt.Errorf("... %w ...", ..., err, ...)
//
// Successive unwrapping of an error creates a tree. The [Is] and [As]
// functions inspect an error's tree by examining first the error
// itself followed by the tree of each of its children in turn
// (pre-order, depth-first traversal).
//
// See https://go.dev/blog/go1.13-errors for a deeper discussion of the
// philosophy of wrapping and when to wrap.
//
// [Is] examines the tree of its first argument looking for an error that
// matches the second. It reports whether it finds a match. It should be
// used in preference to simple equality checks:
//
// if errors.Is(err, fs.ErrExist)
//
// is preferable to
//
// if err == fs.ErrExist
//
// because the former will succeed if err wraps [io/fs.ErrExist].
//
// [AsType] examines the tree of its argument looking for an error whose
// type matches its type argument. If it succeeds, it returns the
// corresponding value of that type and true. Otherwise, it returns the
// zero value of that type and false. The form
//
// if perr, ok := errors.AsType[*fs.PathError](err); ok {
// fmt.Println(perr.Path)
// }
//
// is preferable to
//
// if perr, ok := err.(*fs.PathError); ok {
// fmt.Println(perr.Path)
// }
//
// because the former will succeed if err wraps an [*io/fs.PathError].
package errors
// New returns an error that formats as the given text.
// Each call to New returns a distinct error value even if the text is identical.
func New(text string) error {
return &errorString{text}
}
// errorString is a trivial implementation of error.
type errorString struct {
s string
}
func (e *errorString) Error() string {
return e.s
}
// ErrUnsupported indicates that a requested operation cannot be performed,
// because it is unsupported. For example, a call to [os.Link] when using a
// file system that does not support hard links.
//
// Functions and methods should not return this error but should instead
// return an error including appropriate context that satisfies
//
// errors.Is(err, errors.ErrUnsupported)
//
// either by directly wrapping ErrUnsupported or by implementing an [Is] method.
//
// Functions and methods should document the cases in which an error
// wrapping this will be returned.
var ErrUnsupported = New("unsupported operation")
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package errors
import (
"unsafe"
)
// Join returns an error that wraps the given errors.
// Any nil error values are discarded.
// Join returns nil if every value in errs is nil.
// The error formats as the concatenation of the strings obtained
// by calling the Error method of each element of errs, with a newline
// between each string.
//
// A non-nil error returned by Join implements the Unwrap() []error method.
// The errors may be inspected with [Is] and [As].
func Join(errs ...error) error {
n := 0
for _, err := range errs {
if err != nil {
n++
}
}
if n == 0 {
return nil
}
e := &joinError{
errs: make([]error, 0, n),
}
for _, err := range errs {
if err != nil {
e.errs = append(e.errs, err)
}
}
return e
}
type joinError struct {
errs []error
}
func (e *joinError) Error() string {
// Since Join returns nil if every value in errs is nil,
// e.errs cannot be empty.
if len(e.errs) == 1 {
return e.errs[0].Error()
}
b := []byte(e.errs[0].Error())
for _, err := range e.errs[1:] {
b = append(b, '\n')
b = append(b, err.Error()...)
}
// At this point, b has at least one byte '\n'.
return unsafe.String(&b[0], len(b))
}
func (e *joinError) Unwrap() []error {
return e.errs
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package errors
import (
"internal/reflectlite"
)
// Unwrap returns the result of calling the Unwrap method on err, if err's
// type contains an Unwrap method returning error.
// Otherwise, Unwrap returns nil.
//
// Unwrap only calls a method of the form "Unwrap() error".
// In particular Unwrap does not unwrap errors returned by [Join].
func Unwrap(err error) error {
u, ok := err.(interface {
Unwrap() error
})
if !ok {
return nil
}
return u.Unwrap()
}
// Is reports whether any error in err's tree matches target.
// The target must be comparable.
//
// The tree consists of err itself, followed by the errors obtained by repeatedly
// calling its Unwrap() error or Unwrap() []error method. When err wraps multiple
// errors, Is examines err followed by a depth-first traversal of its children.
//
// An error is considered to match a target if it is equal to that target or if
// it implements a method Is(error) bool such that Is(target) returns true.
//
// An error type might provide an Is method so it can be treated as equivalent
// to an existing error. For example, if MyError defines
//
// func (m MyError) Is(target error) bool { return target == fs.ErrExist }
//
// then Is(MyError{}, fs.ErrExist) returns true. See [syscall.Errno.Is] for
// an example in the standard library. An Is method should only shallowly
// compare err and the target and not call [Unwrap] on either.
func Is(err, target error) bool {
if err == nil || target == nil {
return err == target
}
isComparable := reflectlite.TypeOf(target).Comparable()
return is(err, target, isComparable)
}
func is(err, target error, targetComparable bool) bool {
for {
if targetComparable && err == target {
return true
}
if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
return true
}
switch x := err.(type) {
case interface{ Unwrap() error }:
err = x.Unwrap()
if err == nil {
return false
}
case interface{ Unwrap() []error }:
for _, err := range x.Unwrap() {
if is(err, target, targetComparable) {
return true
}
}
return false
default:
return false
}
}
}
// As finds the first error in err's tree that matches target, and if one is found, sets
// target to that error value and returns true. Otherwise, it returns false.
//
// For most uses, prefer [AsType]. As is equivalent to [AsType] but sets its target
// argument rather than returning the matching error and doesn't require its target
// argument to implement error.
//
// The tree consists of err itself, followed by the errors obtained by repeatedly
// calling its Unwrap() error or Unwrap() []error method. When err wraps multiple
// errors, As examines err followed by a depth-first traversal of its children.
//
// An error matches target if the error's concrete value is assignable to the value
// pointed to by target, or if the error has a method As(any) bool such that
// As(target) returns true. In the latter case, the As method is responsible for
// setting target.
//
// An error type might provide an As method so it can be treated as if it were a
// different error type.
//
// As panics if target is not a non-nil pointer to either a type that implements
// error, or to any interface type.
func As(err error, target any) bool {
if err == nil {
return false
}
if target == nil {
panic("errors: target cannot be nil")
}
val := reflectlite.ValueOf(target)
typ := val.Type()
if typ.Kind() != reflectlite.Ptr || val.IsNil() {
panic("errors: target must be a non-nil pointer")
}
targetType := typ.Elem()
if targetType.Kind() != reflectlite.Interface && !targetType.Implements(errorType) {
panic("errors: *target must be interface or implement error")
}
return as(err, target, val, targetType)
}
func as(err error, target any, targetVal reflectlite.Value, targetType reflectlite.Type) bool {
for {
if reflectlite.TypeOf(err).AssignableTo(targetType) {
targetVal.Elem().Set(reflectlite.ValueOf(err))
return true
}
if x, ok := err.(interface{ As(any) bool }); ok && x.As(target) {
return true
}
switch x := err.(type) {
case interface{ Unwrap() error }:
err = x.Unwrap()
if err == nil {
return false
}
case interface{ Unwrap() []error }:
for _, err := range x.Unwrap() {
if err == nil {
continue
}
if as(err, target, targetVal, targetType) {
return true
}
}
return false
default:
return false
}
}
}
var errorType = reflectlite.TypeOf((*error)(nil)).Elem()
// AsType finds the first error in err's tree that matches the type E, and
// if one is found, returns that error value and true. Otherwise, it
// returns the zero value of E and false.
//
// The tree consists of err itself, followed by the errors obtained by
// repeatedly calling its Unwrap() error or Unwrap() []error method. When
// err wraps multiple errors, AsType examines err followed by a
// depth-first traversal of its children.
//
// An error err matches the type E if the type assertion err.(E) holds,
// or if the error has a method As(any) bool such that err.As(target)
// returns true when target is a non-nil *E. In the latter case, the As
// method is responsible for setting target.
func AsType[E error](err error) (E, bool) {
if err == nil {
var zero E
return zero, false
}
var pe *E // lazily initialized
return asType(err, &pe)
}
func asType[E error](err error, ppe **E) (_ E, _ bool) {
for {
if e, ok := err.(E); ok {
return e, true
}
if x, ok := err.(interface{ As(any) bool }); ok {
if *ppe == nil {
*ppe = new(E)
}
if x.As(*ppe) {
return **ppe, true
}
}
switch x := err.(type) {
case interface{ Unwrap() error }:
err = x.Unwrap()
if err == nil {
return
}
case interface{ Unwrap() []error }:
for _, err := range x.Unwrap() {
if err == nil {
continue
}
if x, ok := asType(err, ppe); ok {
return x, true
}
}
return
default:
return
}
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package expvar provides a standardized interface to public variables, such
// as operation counters in servers. It exposes these variables via HTTP at
// /debug/vars in JSON format. As of Go 1.22, the /debug/vars request must
// use GET.
//
// Operations to set or modify these public variables are atomic.
//
// In addition to adding the HTTP handler, this package registers the
// following variables:
//
// cmdline os.Args
// memstats runtime.Memstats
//
// The package is sometimes only imported for the side effect of
// registering its HTTP handler and the above variables. To use it
// this way, link this package into your program:
//
// import _ "expvar"
package expvar
import (
"encoding/json"
"internal/godebug"
"log"
"math"
"net/http"
"os"
"runtime"
"slices"
"strconv"
"sync"
"sync/atomic"
"unicode/utf8"
)
// Var is an abstract type for all exported variables.
type Var interface {
// String returns a valid JSON value for the variable.
// Types with String methods that do not return valid JSON
// (such as time.Time) must not be used as a Var.
String() string
}
type jsonVar interface {
// appendJSON appends the JSON representation of the receiver to b.
appendJSON(b []byte) []byte
}
// Int is a 64-bit integer variable that satisfies the [Var] interface.
type Int struct {
i atomic.Int64
}
func (v *Int) Value() int64 {
return v.i.Load()
}
func (v *Int) String() string {
return string(v.appendJSON(nil))
}
func (v *Int) appendJSON(b []byte) []byte {
return strconv.AppendInt(b, v.i.Load(), 10)
}
func (v *Int) Add(delta int64) {
v.i.Add(delta)
}
func (v *Int) Set(value int64) {
v.i.Store(value)
}
// Float is a 64-bit float variable that satisfies the [Var] interface.
type Float struct {
f atomic.Uint64
}
func (v *Float) Value() float64 {
return math.Float64frombits(v.f.Load())
}
func (v *Float) String() string {
return string(v.appendJSON(nil))
}
func (v *Float) appendJSON(b []byte) []byte {
return strconv.AppendFloat(b, math.Float64frombits(v.f.Load()), 'g', -1, 64)
}
// Add adds delta to v.
func (v *Float) Add(delta float64) {
for {
cur := v.f.Load()
curVal := math.Float64frombits(cur)
nxtVal := curVal + delta
nxt := math.Float64bits(nxtVal)
if v.f.CompareAndSwap(cur, nxt) {
return
}
}
}
// Set sets v to value.
func (v *Float) Set(value float64) {
v.f.Store(math.Float64bits(value))
}
// Map is a string-to-Var map variable that satisfies the [Var] interface.
type Map struct {
m sync.Map // map[string]Var
keysMu sync.RWMutex
keys []string // sorted
}
// KeyValue represents a single entry in a [Map].
type KeyValue struct {
Key string
Value Var
}
func (v *Map) String() string {
return string(v.appendJSON(nil))
}
func (v *Map) appendJSON(b []byte) []byte {
return v.appendJSONMayExpand(b, false)
}
func (v *Map) appendJSONMayExpand(b []byte, expand bool) []byte {
afterCommaDelim := byte(' ')
mayAppendNewline := func(b []byte) []byte { return b }
if expand {
afterCommaDelim = '\n'
mayAppendNewline = func(b []byte) []byte { return append(b, '\n') }
}
b = append(b, '{')
b = mayAppendNewline(b)
first := true
v.Do(func(kv KeyValue) {
if !first {
b = append(b, ',', afterCommaDelim)
}
first = false
b = appendJSONQuote(b, kv.Key)
b = append(b, ':', ' ')
switch v := kv.Value.(type) {
case nil:
b = append(b, "null"...)
case jsonVar:
b = v.appendJSON(b)
default:
b = append(b, v.String()...)
}
})
b = mayAppendNewline(b)
b = append(b, '}')
b = mayAppendNewline(b)
return b
}
// Init removes all keys from the map.
func (v *Map) Init() *Map {
v.keysMu.Lock()
defer v.keysMu.Unlock()
v.keys = v.keys[:0]
v.m.Clear()
return v
}
// addKey updates the sorted list of keys in v.keys.
func (v *Map) addKey(key string) {
v.keysMu.Lock()
defer v.keysMu.Unlock()
// Using insertion sort to place key into the already-sorted v.keys.
i, found := slices.BinarySearch(v.keys, key)
if found {
return
}
v.keys = slices.Insert(v.keys, i, key)
}
func (v *Map) Get(key string) Var {
i, _ := v.m.Load(key)
av, _ := i.(Var)
return av
}
func (v *Map) Set(key string, av Var) {
// Before we store the value, check to see whether the key is new. Try a Load
// before LoadOrStore: LoadOrStore causes the key interface to escape even on
// the Load path.
if _, ok := v.m.Load(key); !ok {
if _, dup := v.m.LoadOrStore(key, av); !dup {
v.addKey(key)
return
}
}
v.m.Store(key, av)
}
// Add adds delta to the *[Int] value stored under the given map key.
func (v *Map) Add(key string, delta int64) {
i, ok := v.m.Load(key)
if !ok {
var dup bool
i, dup = v.m.LoadOrStore(key, new(Int))
if !dup {
v.addKey(key)
}
}
// Add to Int; ignore otherwise.
if iv, ok := i.(*Int); ok {
iv.Add(delta)
}
}
// AddFloat adds delta to the *[Float] value stored under the given map key.
func (v *Map) AddFloat(key string, delta float64) {
i, ok := v.m.Load(key)
if !ok {
var dup bool
i, dup = v.m.LoadOrStore(key, new(Float))
if !dup {
v.addKey(key)
}
}
// Add to Float; ignore otherwise.
if iv, ok := i.(*Float); ok {
iv.Add(delta)
}
}
// Delete deletes the given key from the map.
func (v *Map) Delete(key string) {
v.keysMu.Lock()
defer v.keysMu.Unlock()
i, found := slices.BinarySearch(v.keys, key)
if found {
v.keys = slices.Delete(v.keys, i, i+1)
v.m.Delete(key)
}
}
// Do calls f for each entry in the map.
// The map is locked during the iteration,
// but existing entries may be concurrently updated.
func (v *Map) Do(f func(KeyValue)) {
v.keysMu.RLock()
defer v.keysMu.RUnlock()
for _, k := range v.keys {
i, _ := v.m.Load(k)
val, _ := i.(Var)
f(KeyValue{k, val})
}
}
// String is a string variable, and satisfies the [Var] interface.
type String struct {
s atomic.Value // string
}
func (v *String) Value() string {
p, _ := v.s.Load().(string)
return p
}
// String implements the [Var] interface. To get the unquoted string
// use [String.Value].
func (v *String) String() string {
return string(v.appendJSON(nil))
}
func (v *String) appendJSON(b []byte) []byte {
return appendJSONQuote(b, v.Value())
}
func (v *String) Set(value string) {
v.s.Store(value)
}
// Func implements [Var] by calling the function
// and formatting the returned value using JSON.
type Func func() any
func (f Func) Value() any {
return f()
}
func (f Func) String() string {
v, _ := json.Marshal(f())
return string(v)
}
// All published variables.
var vars Map
// Publish declares a named exported variable. This should be called from a
// package's init function when it creates its Vars. If the name is already
// registered then this will log.Panic.
func Publish(name string, v Var) {
if _, dup := vars.m.LoadOrStore(name, v); dup {
log.Panicln("Reuse of exported var name:", name)
}
vars.keysMu.Lock()
defer vars.keysMu.Unlock()
vars.keys = append(vars.keys, name)
slices.Sort(vars.keys)
}
// Get retrieves a named exported variable. It returns nil if the name has
// not been registered.
func Get(name string) Var {
return vars.Get(name)
}
// Convenience functions for creating new exported variables.
func NewInt(name string) *Int {
v := new(Int)
Publish(name, v)
return v
}
func NewFloat(name string) *Float {
v := new(Float)
Publish(name, v)
return v
}
func NewMap(name string) *Map {
v := new(Map).Init()
Publish(name, v)
return v
}
func NewString(name string) *String {
v := new(String)
Publish(name, v)
return v
}
// Do calls f for each exported variable.
// The global variable map is locked during the iteration,
// but existing entries may be concurrently updated.
func Do(f func(KeyValue)) {
vars.Do(f)
}
func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Write(vars.appendJSONMayExpand(nil, true))
}
// Handler returns the expvar HTTP Handler.
//
// This is only needed to install the handler in a non-standard location.
func Handler() http.Handler {
return http.HandlerFunc(expvarHandler)
}
func cmdline() any {
return os.Args
}
func memstats() any {
stats := new(runtime.MemStats)
runtime.ReadMemStats(stats)
return *stats
}
func init() {
if godebug.New("httpmuxgo121").Value() == "1" {
http.HandleFunc("/debug/vars", expvarHandler)
} else {
http.HandleFunc("GET /debug/vars", expvarHandler)
}
Publish("cmdline", Func(cmdline))
Publish("memstats", Func(memstats))
}
// TODO: Use json.appendString instead.
func appendJSONQuote(b []byte, s string) []byte {
const hex = "0123456789abcdef"
b = append(b, '"')
for _, r := range s {
switch {
case r < ' ' || r == '\\' || r == '"' || r == '<' || r == '>' || r == '&' || r == '\u2028' || r == '\u2029':
switch r {
case '\\', '"':
b = append(b, '\\', byte(r))
case '\n':
b = append(b, '\\', 'n')
case '\r':
b = append(b, '\\', 'r')
case '\t':
b = append(b, '\\', 't')
default:
b = append(b, '\\', 'u', hex[(r>>12)&0xf], hex[(r>>8)&0xf], hex[(r>>4)&0xf], hex[(r>>0)&0xf])
}
case r < utf8.RuneSelf:
b = append(b, byte(r))
default:
b = utf8.AppendRune(b, r)
}
}
b = append(b, '"')
return b
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package build
import (
"bytes"
"errors"
"fmt"
"go/ast"
"go/build/constraint"
"go/doc"
"go/token"
"internal/buildcfg"
"internal/godebug"
"internal/goroot"
"internal/goversion"
"internal/platform"
"internal/syslist"
"io"
"io/fs"
"os"
"os/exec"
pathpkg "path"
"path/filepath"
"runtime"
"slices"
"strconv"
"strings"
"unicode"
"unicode/utf8"
_ "unsafe" // for linkname
)
// A Context specifies the supporting context for a build.
type Context struct {
GOARCH string // target architecture
GOOS string // target operating system
GOROOT string // Go root
GOPATH string // Go paths
// Dir is the caller's working directory, or the empty string to use
// the current directory of the running process. In module mode, this is used
// to locate the main module.
//
// If Dir is non-empty, directories passed to Import and ImportDir must
// be absolute.
Dir string
CgoEnabled bool // whether cgo files are included
UseAllFiles bool // use files regardless of go:build lines, file names
Compiler string // compiler to assume when computing target paths
// The build, tool, and release tags specify build constraints
// that should be considered satisfied when processing go:build lines.
// Clients creating a new context may customize BuildTags, which
// defaults to empty, but it is usually an error to customize ToolTags or ReleaseTags.
// ToolTags defaults to build tags appropriate to the current Go toolchain configuration.
// ReleaseTags defaults to the list of Go releases the current release is compatible with.
// BuildTags is not set for the Default build Context.
// In addition to the BuildTags, ToolTags, and ReleaseTags, build constraints
// consider the values of GOARCH and GOOS as satisfied tags.
// The last element in ReleaseTags is assumed to be the current release.
BuildTags []string
ToolTags []string
ReleaseTags []string
// The install suffix specifies a suffix to use in the name of the installation
// directory. By default it is empty, but custom builds that need to keep
// their outputs separate can set InstallSuffix to do so. For example, when
// using the race detector, the go command uses InstallSuffix = "race", so
// that on a Linux/386 system, packages are written to a directory named
// "linux_386_race" instead of the usual "linux_386".
InstallSuffix string
// By default, Import uses the operating system's file system calls
// to read directories and files. To read from other sources,
// callers can set the following functions. They all have default
// behaviors that use the local file system, so clients need only set
// the functions whose behaviors they wish to change.
// JoinPath joins the sequence of path fragments into a single path.
// If JoinPath is nil, Import uses filepath.Join.
JoinPath func(elem ...string) string
// SplitPathList splits the path list into a slice of individual paths.
// If SplitPathList is nil, Import uses filepath.SplitList.
SplitPathList func(list string) []string
// IsAbsPath reports whether path is an absolute path.
// If IsAbsPath is nil, Import uses filepath.IsAbs.
IsAbsPath func(path string) bool
// IsDir reports whether the path names a directory.
// If IsDir is nil, Import calls os.Stat and uses the result's IsDir method.
IsDir func(path string) bool
// HasSubdir reports whether dir is lexically a subdirectory of
// root, perhaps multiple levels below. It does not try to check
// whether dir exists.
// If so, HasSubdir sets rel to a slash-separated path that
// can be joined to root to produce a path equivalent to dir.
// If HasSubdir is nil, Import uses an implementation built on
// filepath.EvalSymlinks.
HasSubdir func(root, dir string) (rel string, ok bool)
// ReadDir returns a slice of fs.FileInfo, sorted by Name,
// describing the content of the named directory.
// If ReadDir is nil, Import uses os.ReadDir.
ReadDir func(dir string) ([]fs.FileInfo, error)
// OpenFile opens a file (not a directory) for reading.
// If OpenFile is nil, Import uses os.Open.
OpenFile func(path string) (io.ReadCloser, error)
}
// joinPath calls ctxt.JoinPath (if not nil) or else filepath.Join.
func (ctxt *Context) joinPath(elem ...string) string {
if f := ctxt.JoinPath; f != nil {
return f(elem...)
}
return filepath.Join(elem...)
}
// splitPathList calls ctxt.SplitPathList (if not nil) or else filepath.SplitList.
func (ctxt *Context) splitPathList(s string) []string {
if f := ctxt.SplitPathList; f != nil {
return f(s)
}
return filepath.SplitList(s)
}
// isAbsPath calls ctxt.IsAbsPath (if not nil) or else filepath.IsAbs.
func (ctxt *Context) isAbsPath(path string) bool {
if f := ctxt.IsAbsPath; f != nil {
return f(path)
}
return filepath.IsAbs(path)
}
// isDir calls ctxt.IsDir (if not nil) or else uses os.Stat.
func (ctxt *Context) isDir(path string) bool {
if f := ctxt.IsDir; f != nil {
return f(path)
}
fi, err := os.Stat(path)
return err == nil && fi.IsDir()
}
// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
// the local file system to answer the question.
func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) {
if f := ctxt.HasSubdir; f != nil {
return f(root, dir)
}
// Try using paths we received.
if rel, ok = hasSubdir(root, dir); ok {
return
}
// Try expanding symlinks and comparing
// expanded against unexpanded and
// expanded against expanded.
rootSym, _ := filepath.EvalSymlinks(root)
dirSym, _ := filepath.EvalSymlinks(dir)
if rel, ok = hasSubdir(rootSym, dir); ok {
return
}
if rel, ok = hasSubdir(root, dirSym); ok {
return
}
return hasSubdir(rootSym, dirSym)
}
// hasSubdir reports if dir is within root by performing lexical analysis only.
func hasSubdir(root, dir string) (rel string, ok bool) {
const sep = string(filepath.Separator)
root = filepath.Clean(root)
if !strings.HasSuffix(root, sep) {
root += sep
}
dir = filepath.Clean(dir)
after, found := strings.CutPrefix(dir, root)
if !found {
return "", false
}
return filepath.ToSlash(after), true
}
// readDir calls ctxt.ReadDir (if not nil) or else os.ReadDir.
func (ctxt *Context) readDir(path string) ([]fs.DirEntry, error) {
// TODO: add a fs.DirEntry version of Context.ReadDir
if f := ctxt.ReadDir; f != nil {
fis, err := f(path)
if err != nil {
return nil, err
}
des := make([]fs.DirEntry, len(fis))
for i, fi := range fis {
des[i] = fs.FileInfoToDirEntry(fi)
}
return des, nil
}
return os.ReadDir(path)
}
// openFile calls ctxt.OpenFile (if not nil) or else os.Open.
func (ctxt *Context) openFile(path string) (io.ReadCloser, error) {
if fn := ctxt.OpenFile; fn != nil {
return fn(path)
}
f, err := os.Open(path)
if err != nil {
return nil, err // nil interface
}
return f, nil
}
// isFile determines whether path is a file by trying to open it.
// It reuses openFile instead of adding another function to the
// list in Context.
func (ctxt *Context) isFile(path string) bool {
f, err := ctxt.openFile(path)
if err != nil {
return false
}
f.Close()
return true
}
// gopath returns the list of Go path directories.
func (ctxt *Context) gopath() []string {
var all []string
for _, p := range ctxt.splitPathList(ctxt.GOPATH) {
if p == "" || p == ctxt.GOROOT {
// Empty paths are uninteresting.
// If the path is the GOROOT, ignore it.
// People sometimes set GOPATH=$GOROOT.
// Do not get confused by this common mistake.
continue
}
if strings.HasPrefix(p, "~") {
// Path segments starting with ~ on Unix are almost always
// users who have incorrectly quoted ~ while setting GOPATH,
// preventing it from expanding to $HOME.
// The situation is made more confusing by the fact that
// bash allows quoted ~ in $PATH (most shells do not).
// Do not get confused by this, and do not try to use the path.
// It does not exist, and printing errors about it confuses
// those users even more, because they think "sure ~ exists!".
// The go command diagnoses this situation and prints a
// useful error.
// On Windows, ~ is used in short names, such as c:\progra~1
// for c:\program files.
continue
}
all = append(all, p)
}
return all
}
// SrcDirs returns a list of package source root directories.
// It draws from the current Go root and Go path but omits directories
// that do not exist.
func (ctxt *Context) SrcDirs() []string {
var all []string
if ctxt.GOROOT != "" && ctxt.Compiler != "gccgo" {
dir := ctxt.joinPath(ctxt.GOROOT, "src")
if ctxt.isDir(dir) {
all = append(all, dir)
}
}
for _, p := range ctxt.gopath() {
dir := ctxt.joinPath(p, "src")
if ctxt.isDir(dir) {
all = append(all, dir)
}
}
return all
}
// Default is the default Context for builds.
// It uses the GOARCH, GOOS, GOROOT, and GOPATH environment variables
// if set, or else the compiled code's GOARCH, GOOS, and GOROOT.
var Default Context = defaultContext()
// Keep consistent with cmd/go/internal/cfg.defaultGOPATH.
func defaultGOPATH() string {
env := "HOME"
if runtime.GOOS == "windows" {
env = "USERPROFILE"
} else if runtime.GOOS == "plan9" {
env = "home"
}
if home := os.Getenv(env); home != "" {
def := filepath.Join(home, "go")
if filepath.Clean(def) == filepath.Clean(runtime.GOROOT()) {
// Don't set the default GOPATH to GOROOT,
// as that will trigger warnings from the go tool.
return ""
}
return def
}
return ""
}
// defaultToolTags should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/gopherjs/gopherjs
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname defaultToolTags
var defaultToolTags []string
// defaultReleaseTags should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/gopherjs/gopherjs
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname defaultReleaseTags
var defaultReleaseTags []string
func defaultContext() Context {
var c Context
c.GOARCH = buildcfg.GOARCH
c.GOOS = buildcfg.GOOS
if goroot := runtime.GOROOT(); goroot != "" {
c.GOROOT = filepath.Clean(goroot)
}
c.GOPATH = envOr("GOPATH", defaultGOPATH())
c.Compiler = runtime.Compiler
c.ToolTags = append(c.ToolTags, buildcfg.ToolTags...)
defaultToolTags = append([]string{}, c.ToolTags...) // our own private copy
// Each major Go release in the Go 1.x series adds a new
// "go1.x" release tag. That is, the go1.x tag is present in
// all releases >= Go 1.x. Code that requires Go 1.x or later
// should say "go:build go1.x", and code that should only be
// built before Go 1.x (perhaps it is the stub to use in that
// case) should say "go:build !go1.x".
// The last element in ReleaseTags is the current release.
for i := 1; i <= goversion.Version; i++ {
c.ReleaseTags = append(c.ReleaseTags, "go1."+strconv.Itoa(i))
}
defaultReleaseTags = append([]string{}, c.ReleaseTags...) // our own private copy
env := os.Getenv("CGO_ENABLED")
if env == "" {
env = buildcfg.DefaultCGO_ENABLED
}
switch env {
case "1":
c.CgoEnabled = true
case "0":
c.CgoEnabled = false
default:
// cgo must be explicitly enabled for cross compilation builds
if runtime.GOARCH == c.GOARCH && runtime.GOOS == c.GOOS {
c.CgoEnabled = platform.CgoSupported(c.GOOS, c.GOARCH)
break
}
c.CgoEnabled = false
}
return c
}
func envOr(name, def string) string {
s := os.Getenv(name)
if s == "" {
return def
}
return s
}
// An ImportMode controls the behavior of the Import method.
type ImportMode uint
const (
// If FindOnly is set, Import stops after locating the directory
// that should contain the sources for a package. It does not
// read any files in the directory.
FindOnly ImportMode = 1 << iota
// If AllowBinary is set, Import can be satisfied by a compiled
// package object without corresponding sources.
//
// Deprecated:
// The supported way to create a compiled-only package is to
// write source code containing a //go:binary-only-package comment at
// the top of the file. Such a package will be recognized
// regardless of this flag setting (because it has source code)
// and will have BinaryOnly set to true in the returned Package.
AllowBinary
// If ImportComment is set, parse import comments on package statements.
// Import returns an error if it finds a comment it cannot understand
// or finds conflicting comments in multiple source files.
// See golang.org/s/go14customimport for more information.
ImportComment
// By default, Import searches vendor directories
// that apply in the given source directory before searching
// the GOROOT and GOPATH roots.
// If an Import finds and returns a package using a vendor
// directory, the resulting ImportPath is the complete path
// to the package, including the path elements leading up
// to and including "vendor".
// For example, if Import("y", "x/subdir", 0) finds
// "x/vendor/y", the returned package's ImportPath is "x/vendor/y",
// not plain "y".
// See golang.org/s/go15vendor for more information.
//
// Setting IgnoreVendor ignores vendor directories.
//
// In contrast to the package's ImportPath,
// the returned package's Imports, TestImports, and XTestImports
// are always the exact import paths from the source files:
// Import makes no attempt to resolve or check those paths.
IgnoreVendor
)
// A Package describes the Go package found in a directory.
type Package struct {
Dir string // directory containing package sources
Name string // package name
ImportComment string // path in import comment on package statement
Doc string // documentation synopsis
ImportPath string // import path of package ("" if unknown)
Root string // root of Go tree where this package lives
SrcRoot string // package source root directory ("" if unknown)
PkgRoot string // package install root directory ("" if unknown)
PkgTargetRoot string // architecture dependent install root directory ("" if unknown)
BinDir string // command install directory ("" if unknown)
Goroot bool // package found in Go root
PkgObj string // installed .a file
AllTags []string // tags that can influence file selection in this directory
ConflictDir string // this directory shadows Dir in $GOPATH
BinaryOnly bool // cannot be rebuilt from source (has //go:binary-only-package comment)
// Source files
GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
CgoFiles []string // .go source files that import "C"
IgnoredGoFiles []string // .go source files ignored for this build (including ignored _test.go files)
InvalidGoFiles []string // .go source files with detected problems (parse error, wrong package name, and so on)
IgnoredOtherFiles []string // non-.go source files ignored for this build
CFiles []string // .c source files
CXXFiles []string // .cc, .cpp and .cxx source files
MFiles []string // .m (Objective-C) source files
HFiles []string // .h, .hh, .hpp and .hxx source files
FFiles []string // .f, .F, .for and .f90 Fortran source files
SFiles []string // .s source files
SwigFiles []string // .swig files
SwigCXXFiles []string // .swigcxx files
SysoFiles []string // .syso system object files to add to archive
// Cgo directives
CgoCFLAGS []string // Cgo CFLAGS directives
CgoCPPFLAGS []string // Cgo CPPFLAGS directives
CgoCXXFLAGS []string // Cgo CXXFLAGS directives
CgoFFLAGS []string // Cgo FFLAGS directives
CgoLDFLAGS []string // Cgo LDFLAGS directives
CgoPkgConfig []string // Cgo pkg-config directives
// Test information
TestGoFiles []string // _test.go files in package
XTestGoFiles []string // _test.go files outside package
// Go directive comments (//go:zzz...) found in source files.
Directives []Directive
TestDirectives []Directive
XTestDirectives []Directive
// Dependency information
Imports []string // import paths from GoFiles, CgoFiles
ImportPos map[string][]token.Position // line information for Imports
TestImports []string // import paths from TestGoFiles
TestImportPos map[string][]token.Position // line information for TestImports
XTestImports []string // import paths from XTestGoFiles
XTestImportPos map[string][]token.Position // line information for XTestImports
// //go:embed patterns found in Go source files
// For example, if a source file says
// //go:embed a* b.c
// then the list will contain those two strings as separate entries.
// (See package embed for more details about //go:embed.)
EmbedPatterns []string // patterns from GoFiles, CgoFiles
EmbedPatternPos map[string][]token.Position // line information for EmbedPatterns
TestEmbedPatterns []string // patterns from TestGoFiles
TestEmbedPatternPos map[string][]token.Position // line information for TestEmbedPatterns
XTestEmbedPatterns []string // patterns from XTestGoFiles
XTestEmbedPatternPos map[string][]token.Position // line information for XTestEmbedPatternPos
}
// A Directive is a Go directive comment (//go:zzz...) found in a source file.
type Directive struct {
Text string // full line comment including leading slashes
Pos token.Position // position of comment
}
// IsCommand reports whether the package is considered a
// command to be installed (not just a library).
// Packages named "main" are treated as commands.
func (p *Package) IsCommand() bool {
return p.Name == "main"
}
// ImportDir is like [Import] but processes the Go package found in
// the named directory.
func (ctxt *Context) ImportDir(dir string, mode ImportMode) (*Package, error) {
return ctxt.Import(".", dir, mode)
}
// NoGoError is the error used by [Import] to describe a directory
// containing no buildable Go source files. (It may still contain
// test files, files hidden by build tags, and so on.)
type NoGoError struct {
Dir string
}
func (e *NoGoError) Error() string {
return "no buildable Go source files in " + e.Dir
}
// MultiplePackageError describes a directory containing
// multiple buildable Go source files for multiple packages.
type MultiplePackageError struct {
Dir string // directory containing files
Packages []string // package names found
Files []string // corresponding files: Files[i] declares package Packages[i]
}
func (e *MultiplePackageError) Error() string {
// Error string limited to two entries for compatibility.
return fmt.Sprintf("found packages %s (%s) and %s (%s) in %s", e.Packages[0], e.Files[0], e.Packages[1], e.Files[1], e.Dir)
}
func nameExt(name string) string {
i := strings.LastIndex(name, ".")
if i < 0 {
return ""
}
return name[i:]
}
var installgoroot = godebug.New("installgoroot")
// Import returns details about the Go package named by the import path,
// interpreting local import paths relative to the srcDir directory.
// If the path is a local import path naming a package that can be imported
// using a standard import path, the returned package will set p.ImportPath
// to that path.
//
// In the directory containing the package, .go, .c, .h, and .s files are
// considered part of the package except for:
//
// - .go files in package documentation
// - files starting with _ or . (likely editor temporary files)
// - files with build constraints not satisfied by the context
//
// If an error occurs, Import returns a non-nil error and a non-nil
// *[Package] containing partial information.
func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Package, error) {
p := &Package{
ImportPath: path,
}
if path == "" {
return p, fmt.Errorf("import %q: invalid import path", path)
}
var pkgtargetroot string
var pkga string
var pkgerr error
suffix := ""
if ctxt.InstallSuffix != "" {
suffix = "_" + ctxt.InstallSuffix
}
switch ctxt.Compiler {
case "gccgo":
pkgtargetroot = "pkg/gccgo_" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
case "gc":
pkgtargetroot = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
default:
// Save error for end of function.
pkgerr = fmt.Errorf("import %q: unknown compiler %q", path, ctxt.Compiler)
}
setPkga := func() {
switch ctxt.Compiler {
case "gccgo":
dir, elem := pathpkg.Split(p.ImportPath)
pkga = pkgtargetroot + "/" + dir + "lib" + elem + ".a"
case "gc":
pkga = pkgtargetroot + "/" + p.ImportPath + ".a"
}
}
setPkga()
binaryOnly := false
if IsLocalImport(path) {
pkga = "" // local imports have no installed path
if srcDir == "" {
return p, fmt.Errorf("import %q: import relative to unknown directory", path)
}
if !ctxt.isAbsPath(path) {
p.Dir = ctxt.joinPath(srcDir, path)
}
// p.Dir directory may or may not exist. Gather partial information first, check if it exists later.
// Determine canonical import path, if any.
// Exclude results where the import path would include /testdata/.
inTestdata := func(sub string) bool {
return strings.Contains(sub, "/testdata/") || strings.HasSuffix(sub, "/testdata") || strings.HasPrefix(sub, "testdata/") || sub == "testdata"
}
if ctxt.GOROOT != "" {
root := ctxt.joinPath(ctxt.GOROOT, "src")
if sub, ok := ctxt.hasSubdir(root, p.Dir); ok && !inTestdata(sub) {
p.Goroot = true
p.ImportPath = sub
p.Root = ctxt.GOROOT
setPkga() // p.ImportPath changed
goto Found
}
}
all := ctxt.gopath()
for i, root := range all {
rootsrc := ctxt.joinPath(root, "src")
if sub, ok := ctxt.hasSubdir(rootsrc, p.Dir); ok && !inTestdata(sub) {
// We found a potential import path for dir,
// but check that using it wouldn't find something
// else first.
if ctxt.GOROOT != "" && ctxt.Compiler != "gccgo" {
if dir := ctxt.joinPath(ctxt.GOROOT, "src", sub); ctxt.isDir(dir) {
p.ConflictDir = dir
goto Found
}
}
for _, earlyRoot := range all[:i] {
if dir := ctxt.joinPath(earlyRoot, "src", sub); ctxt.isDir(dir) {
p.ConflictDir = dir
goto Found
}
}
// sub would not name some other directory instead of this one.
// Record it.
p.ImportPath = sub
p.Root = root
setPkga() // p.ImportPath changed
goto Found
}
}
// It's okay that we didn't find a root containing dir.
// Keep going with the information we have.
} else {
if strings.HasPrefix(path, "/") {
return p, fmt.Errorf("import %q: cannot import absolute path", path)
}
if err := ctxt.importGo(p, path, srcDir, mode); err == nil {
goto Found
} else if err != errNoModules {
return p, err
}
gopath := ctxt.gopath() // needed twice below; avoid computing many times
// tried records the location of unsuccessful package lookups
var tried struct {
vendor []string
goroot string
gopath []string
}
// Vendor directories get first chance to satisfy import.
if mode&IgnoreVendor == 0 && srcDir != "" {
searchVendor := func(root string, isGoroot bool) bool {
sub, ok := ctxt.hasSubdir(root, srcDir)
if !ok || !strings.HasPrefix(sub, "src/") || strings.Contains(sub, "/testdata/") {
return false
}
for {
vendor := ctxt.joinPath(root, sub, "vendor")
if ctxt.isDir(vendor) {
dir := ctxt.joinPath(vendor, path)
if ctxt.isDir(dir) && hasGoFiles(ctxt, dir) {
p.Dir = dir
p.ImportPath = strings.TrimPrefix(pathpkg.Join(sub, "vendor", path), "src/")
p.Goroot = isGoroot
p.Root = root
setPkga() // p.ImportPath changed
return true
}
tried.vendor = append(tried.vendor, dir)
}
i := strings.LastIndex(sub, "/")
if i < 0 {
break
}
sub = sub[:i]
}
return false
}
if ctxt.Compiler != "gccgo" && ctxt.GOROOT != "" && searchVendor(ctxt.GOROOT, true) {
goto Found
}
for _, root := range gopath {
if searchVendor(root, false) {
goto Found
}
}
}
// Determine directory from import path.
if ctxt.GOROOT != "" {
// If the package path starts with "vendor/", only search GOROOT before
// GOPATH if the importer is also within GOROOT. That way, if the user has
// vendored in a package that is subsequently included in the standard
// distribution, they'll continue to pick up their own vendored copy.
gorootFirst := srcDir == "" || !strings.HasPrefix(path, "vendor/")
if !gorootFirst {
_, gorootFirst = ctxt.hasSubdir(ctxt.GOROOT, srcDir)
}
if gorootFirst {
dir := ctxt.joinPath(ctxt.GOROOT, "src", path)
if ctxt.Compiler != "gccgo" {
isDir := ctxt.isDir(dir)
binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(ctxt.GOROOT, pkga))
if isDir || binaryOnly {
p.Dir = dir
p.Goroot = true
p.Root = ctxt.GOROOT
goto Found
}
}
tried.goroot = dir
}
if ctxt.Compiler == "gccgo" && goroot.IsStandardPackage(ctxt.GOROOT, ctxt.Compiler, path) {
// TODO(bcmills): Setting p.Dir here is misleading, because gccgo
// doesn't actually load its standard-library packages from this
// directory. See if we can leave it unset.
p.Dir = ctxt.joinPath(ctxt.GOROOT, "src", path)
p.Goroot = true
p.Root = ctxt.GOROOT
goto Found
}
}
for _, root := range gopath {
dir := ctxt.joinPath(root, "src", path)
isDir := ctxt.isDir(dir)
binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(root, pkga))
if isDir || binaryOnly {
p.Dir = dir
p.Root = root
goto Found
}
tried.gopath = append(tried.gopath, dir)
}
// If we tried GOPATH first due to a "vendor/" prefix, fall back to GOPATH.
// That way, the user can still get useful results from 'go list' for
// standard-vendored paths passed on the command line.
if ctxt.GOROOT != "" && tried.goroot == "" {
dir := ctxt.joinPath(ctxt.GOROOT, "src", path)
if ctxt.Compiler != "gccgo" {
isDir := ctxt.isDir(dir)
binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(ctxt.GOROOT, pkga))
if isDir || binaryOnly {
p.Dir = dir
p.Goroot = true
p.Root = ctxt.GOROOT
goto Found
}
}
tried.goroot = dir
}
// package was not found
var paths []string
format := "\t%s (vendor tree)"
for _, dir := range tried.vendor {
paths = append(paths, fmt.Sprintf(format, dir))
format = "\t%s"
}
if tried.goroot != "" {
paths = append(paths, fmt.Sprintf("\t%s (from $GOROOT)", tried.goroot))
} else {
paths = append(paths, "\t($GOROOT not set)")
}
format = "\t%s (from $GOPATH)"
for _, dir := range tried.gopath {
paths = append(paths, fmt.Sprintf(format, dir))
format = "\t%s"
}
if len(tried.gopath) == 0 {
paths = append(paths, "\t($GOPATH not set. For more details see: 'go help gopath')")
}
return p, fmt.Errorf("cannot find package %q in any of:\n%s", path, strings.Join(paths, "\n"))
}
Found:
if p.Root != "" {
p.SrcRoot = ctxt.joinPath(p.Root, "src")
p.PkgRoot = ctxt.joinPath(p.Root, "pkg")
p.BinDir = ctxt.joinPath(p.Root, "bin")
if pkga != "" {
// Always set PkgTargetRoot. It might be used when building in shared
// mode.
p.PkgTargetRoot = ctxt.joinPath(p.Root, pkgtargetroot)
// Set the install target if applicable.
if !p.Goroot || (installgoroot.Value() == "all" && p.ImportPath != "unsafe" && p.ImportPath != "builtin") {
if p.Goroot {
installgoroot.IncNonDefault()
}
p.PkgObj = ctxt.joinPath(p.Root, pkga)
}
}
}
// If it's a local import path, by the time we get here, we still haven't checked
// that p.Dir directory exists. This is the right time to do that check.
// We can't do it earlier, because we want to gather partial information for the
// non-nil *Package returned when an error occurs.
// We need to do this before we return early on FindOnly flag.
if IsLocalImport(path) && !ctxt.isDir(p.Dir) {
if ctxt.Compiler == "gccgo" && p.Goroot {
// gccgo has no sources for GOROOT packages.
return p, nil
}
// package was not found
return p, fmt.Errorf("cannot find package %q in:\n\t%s", p.ImportPath, p.Dir)
}
if mode&FindOnly != 0 {
return p, pkgerr
}
if binaryOnly && (mode&AllowBinary) != 0 {
return p, pkgerr
}
if ctxt.Compiler == "gccgo" && p.Goroot {
// gccgo has no sources for GOROOT packages.
return p, nil
}
dirs, err := ctxt.readDir(p.Dir)
if err != nil {
return p, err
}
var badGoError error
badGoFiles := make(map[string]bool)
badGoFile := func(name string, err error) {
if badGoError == nil {
badGoError = err
}
if !badGoFiles[name] {
p.InvalidGoFiles = append(p.InvalidGoFiles, name)
badGoFiles[name] = true
}
}
var Sfiles []string // files with ".S"(capital S)/.sx(capital s equivalent for case insensitive filesystems)
var firstFile, firstCommentFile string
embedPos := make(map[string][]token.Position)
testEmbedPos := make(map[string][]token.Position)
xTestEmbedPos := make(map[string][]token.Position)
importPos := make(map[string][]token.Position)
testImportPos := make(map[string][]token.Position)
xTestImportPos := make(map[string][]token.Position)
allTags := make(map[string]bool)
fset := token.NewFileSet()
for _, d := range dirs {
if d.IsDir() {
continue
}
if d.Type() == fs.ModeSymlink {
if ctxt.isDir(ctxt.joinPath(p.Dir, d.Name())) {
// Symlinks to directories are not source files.
continue
}
}
name := d.Name()
ext := nameExt(name)
info, err := ctxt.matchFile(p.Dir, name, allTags, &p.BinaryOnly, fset)
if err != nil && strings.HasSuffix(name, ".go") {
badGoFile(name, err)
continue
}
if info == nil {
if strings.HasPrefix(name, "_") || strings.HasPrefix(name, ".") {
// not due to build constraints - don't report
} else if ext == ".go" {
p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
} else if fileListForExt(p, ext) != nil {
p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, name)
}
continue
}
// Going to save the file. For non-Go files, can stop here.
switch ext {
case ".go":
// keep going
case ".S", ".sx":
// special case for cgo, handled at end
Sfiles = append(Sfiles, name)
continue
default:
if list := fileListForExt(p, ext); list != nil {
*list = append(*list, name)
}
continue
}
data, filename := info.header, info.name
if info.parseErr != nil {
badGoFile(name, info.parseErr)
// Fall through: we might still have a partial AST in info.parsed,
// and we want to list files with parse errors anyway.
}
var pkg string
if info.parsed != nil {
pkg = info.parsed.Name.Name
if pkg == "documentation" {
p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
continue
}
}
isTest := strings.HasSuffix(name, "_test.go")
isXTest := false
if isTest && strings.HasSuffix(pkg, "_test") && p.Name != pkg {
isXTest = true
pkg = pkg[:len(pkg)-len("_test")]
}
if p.Name == "" {
p.Name = pkg
firstFile = name
} else if pkg != p.Name {
// TODO(#45999): The choice of p.Name is arbitrary based on file iteration
// order. Instead of resolving p.Name arbitrarily, we should clear out the
// existing name and mark the existing files as also invalid.
badGoFile(name, &MultiplePackageError{
Dir: p.Dir,
Packages: []string{p.Name, pkg},
Files: []string{firstFile, name},
})
}
// Grab the first package comment as docs, provided it is not from a test file.
if info.parsed != nil && info.parsed.Doc != nil && p.Doc == "" && !isTest && !isXTest {
p.Doc = doc.Synopsis(info.parsed.Doc.Text())
}
if mode&ImportComment != 0 {
qcom, line := findImportComment(data)
if line != 0 {
com, err := strconv.Unquote(qcom)
if err != nil {
badGoFile(name, fmt.Errorf("%s:%d: cannot parse import comment", filename, line))
} else if p.ImportComment == "" {
p.ImportComment = com
firstCommentFile = name
} else if p.ImportComment != com {
badGoFile(name, fmt.Errorf("found import comments %q (%s) and %q (%s) in %s", p.ImportComment, firstCommentFile, com, name, p.Dir))
}
}
}
// Record imports and information about cgo.
isCgo := false
for _, imp := range info.imports {
if imp.path == "C" {
if isTest {
badGoFile(name, fmt.Errorf("use of cgo in test %s not supported", filename))
continue
}
isCgo = true
if imp.doc != nil {
if err := ctxt.saveCgo(filename, p, imp.doc); err != nil {
badGoFile(name, err)
}
}
}
}
var fileList *[]string
var importMap, embedMap map[string][]token.Position
var directives *[]Directive
switch {
case isCgo:
allTags["cgo"] = true
if ctxt.CgoEnabled {
fileList = &p.CgoFiles
importMap = importPos
embedMap = embedPos
directives = &p.Directives
} else {
// Ignore imports and embeds from cgo files if cgo is disabled.
fileList = &p.IgnoredGoFiles
}
case isXTest:
fileList = &p.XTestGoFiles
importMap = xTestImportPos
embedMap = xTestEmbedPos
directives = &p.XTestDirectives
case isTest:
fileList = &p.TestGoFiles
importMap = testImportPos
embedMap = testEmbedPos
directives = &p.TestDirectives
default:
fileList = &p.GoFiles
importMap = importPos
embedMap = embedPos
directives = &p.Directives
}
*fileList = append(*fileList, name)
if importMap != nil {
for _, imp := range info.imports {
importMap[imp.path] = append(importMap[imp.path], fset.Position(imp.pos))
}
}
if embedMap != nil {
for _, emb := range info.embeds {
embedMap[emb.pattern] = append(embedMap[emb.pattern], emb.pos)
}
}
if directives != nil {
*directives = append(*directives, info.directives...)
}
}
for tag := range allTags {
p.AllTags = append(p.AllTags, tag)
}
slices.Sort(p.AllTags)
p.EmbedPatterns, p.EmbedPatternPos = cleanDecls(embedPos)
p.TestEmbedPatterns, p.TestEmbedPatternPos = cleanDecls(testEmbedPos)
p.XTestEmbedPatterns, p.XTestEmbedPatternPos = cleanDecls(xTestEmbedPos)
p.Imports, p.ImportPos = cleanDecls(importPos)
p.TestImports, p.TestImportPos = cleanDecls(testImportPos)
p.XTestImports, p.XTestImportPos = cleanDecls(xTestImportPos)
// add the .S/.sx files only if we are using cgo
// (which means gcc will compile them).
// The standard assemblers expect .s files.
if len(p.CgoFiles) > 0 {
p.SFiles = append(p.SFiles, Sfiles...)
slices.Sort(p.SFiles)
} else {
p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, Sfiles...)
slices.Sort(p.IgnoredOtherFiles)
}
if badGoError != nil {
return p, badGoError
}
if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
return p, &NoGoError{p.Dir}
}
return p, pkgerr
}
func fileListForExt(p *Package, ext string) *[]string {
switch ext {
case ".c":
return &p.CFiles
case ".cc", ".cpp", ".cxx":
return &p.CXXFiles
case ".m":
return &p.MFiles
case ".h", ".hh", ".hpp", ".hxx":
return &p.HFiles
case ".f", ".F", ".for", ".f90":
return &p.FFiles
case ".s", ".S", ".sx":
return &p.SFiles
case ".swig":
return &p.SwigFiles
case ".swigcxx":
return &p.SwigCXXFiles
case ".syso":
return &p.SysoFiles
}
return nil
}
func uniq(list []string) []string {
if list == nil {
return nil
}
out := make([]string, len(list))
copy(out, list)
slices.Sort(out)
uniq := out[:0]
for _, x := range out {
if len(uniq) == 0 || uniq[len(uniq)-1] != x {
uniq = append(uniq, x)
}
}
return uniq
}
var errNoModules = errors.New("not using modules")
// importGo checks whether it can use the go command to find the directory for path.
// If using the go command is not appropriate, importGo returns errNoModules.
// Otherwise, importGo tries using the go command and reports whether that succeeded.
// Using the go command lets build.Import and build.Context.Import find code
// in Go modules. In the long term we want tools to use go/packages (currently golang.org/x/tools/go/packages),
// which will also use the go command.
// Invoking the go command here is not very efficient in that it computes information
// about the requested package and all dependencies and then only reports about the requested package.
// Then we reinvoke it for every dependency. But this is still better than not working at all.
// See golang.org/issue/26504.
func (ctxt *Context) importGo(p *Package, path, srcDir string, mode ImportMode) error {
// To invoke the go command,
// we must not being doing special things like AllowBinary or IgnoreVendor,
// and all the file system callbacks must be nil (we're meant to use the local file system).
if mode&AllowBinary != 0 || mode&IgnoreVendor != 0 ||
ctxt.JoinPath != nil || ctxt.SplitPathList != nil || ctxt.IsAbsPath != nil || ctxt.IsDir != nil || ctxt.HasSubdir != nil || ctxt.ReadDir != nil || ctxt.OpenFile != nil || !slices.Equal(ctxt.ToolTags, defaultToolTags) || !slices.Equal(ctxt.ReleaseTags, defaultReleaseTags) || ctxt.UseAllFiles {
return errNoModules
}
// If ctxt.GOROOT is not set, we don't know which go command to invoke,
// and even if we did we might return packages in GOROOT that we wouldn't otherwise find
// (because we don't know to search in 'go env GOROOT' otherwise).
if ctxt.GOROOT == "" {
return errNoModules
}
// Predict whether module aware mode is enabled by checking the value of
// GO111MODULE and looking for a go.mod file in the source directory or
// one of its parents. Running 'go env GOMOD' in the source directory would
// give a canonical answer, but we'd prefer not to execute another command.
go111Module := os.Getenv("GO111MODULE")
switch go111Module {
case "off":
return errNoModules
default: // "", "on", "auto", anything else
// Maybe use modules.
}
if srcDir != "" {
var absSrcDir string
if filepath.IsAbs(srcDir) {
absSrcDir = srcDir
} else if ctxt.Dir != "" {
return fmt.Errorf("go/build: Dir is non-empty, so relative srcDir is not allowed: %v", srcDir)
} else {
// Find the absolute source directory. hasSubdir does not handle
// relative paths (and can't because the callbacks don't support this).
var err error
absSrcDir, err = filepath.Abs(srcDir)
if err != nil {
return errNoModules
}
}
// If the source directory is in GOROOT, then the in-process code works fine
// and we should keep using it. Moreover, the 'go list' approach below doesn't
// take standard-library vendoring into account and will fail.
if _, ok := ctxt.hasSubdir(filepath.Join(ctxt.GOROOT, "src"), absSrcDir); ok {
return errNoModules
}
}
// For efficiency, if path is a standard library package, let the usual lookup code handle it.
if dir := ctxt.joinPath(ctxt.GOROOT, "src", path); ctxt.isDir(dir) {
return errNoModules
}
// If GO111MODULE=auto, look to see if there is a go.mod.
// Since go1.13, it doesn't matter if we're inside GOPATH.
if go111Module == "auto" {
var (
parent string
err error
)
if ctxt.Dir == "" {
parent, err = os.Getwd()
if err != nil {
// A nonexistent working directory can't be in a module.
return errNoModules
}
} else {
parent, err = filepath.Abs(ctxt.Dir)
if err != nil {
// If the caller passed a bogus Dir explicitly, that's materially
// different from not having modules enabled.
return err
}
}
for {
if f, err := ctxt.openFile(ctxt.joinPath(parent, "go.mod")); err == nil {
buf := make([]byte, 100)
_, err := f.Read(buf)
f.Close()
if err == nil || err == io.EOF {
// go.mod exists and is readable (is a file, not a directory).
break
}
}
d := filepath.Dir(parent)
if len(d) >= len(parent) {
return errNoModules // reached top of file system, no go.mod
}
parent = d
}
}
goCmd := filepath.Join(ctxt.GOROOT, "bin", "go")
cmd := exec.Command(goCmd, "list", "-e", "-compiler="+ctxt.Compiler, "-tags="+strings.Join(ctxt.BuildTags, ","), "-installsuffix="+ctxt.InstallSuffix, "-f={{.Dir}}\n{{.ImportPath}}\n{{.Root}}\n{{.Goroot}}\n{{if .Error}}{{.Error}}{{end}}\n", "--", path)
if ctxt.Dir != "" {
cmd.Dir = ctxt.Dir
}
var stdout, stderr strings.Builder
cmd.Stdout = &stdout
cmd.Stderr = &stderr
cgo := "0"
if ctxt.CgoEnabled {
cgo = "1"
}
cmd.Env = append(cmd.Environ(),
"GOOS="+ctxt.GOOS,
"GOARCH="+ctxt.GOARCH,
"GOROOT="+ctxt.GOROOT,
"GOPATH="+ctxt.GOPATH,
"CGO_ENABLED="+cgo,
)
if err := cmd.Run(); err != nil {
return fmt.Errorf("go/build: go list %s: %v\n%s\n", path, err, stderr.String())
}
f := strings.SplitN(stdout.String(), "\n", 5)
if len(f) != 5 {
return fmt.Errorf("go/build: importGo %s: unexpected output:\n%s\n", path, stdout.String())
}
dir := f[0]
errStr := strings.TrimSpace(f[4])
if errStr != "" && dir == "" {
// If 'go list' could not locate the package (dir is empty),
// return the same error that 'go list' reported.
return errors.New(errStr)
}
// If 'go list' did locate the package, ignore the error.
// It was probably related to loading source files, and we'll
// encounter it ourselves shortly if the FindOnly flag isn't set.
p.Dir = dir
p.ImportPath = f[1]
p.Root = f[2]
p.Goroot = f[3] == "true"
return nil
}
// hasGoFiles reports whether dir contains any files with names ending in .go.
// For a vendor check we must exclude directories that contain no .go files.
// Otherwise it is not possible to vendor just a/b/c and still import the
// non-vendored a/b. See golang.org/issue/13832.
func hasGoFiles(ctxt *Context, dir string) bool {
ents, _ := ctxt.readDir(dir)
for _, ent := range ents {
if !ent.IsDir() && strings.HasSuffix(ent.Name(), ".go") {
return true
}
}
return false
}
func findImportComment(data []byte) (s string, line int) {
// expect keyword package
word, data := parseWord(data)
if string(word) != "package" {
return "", 0
}
// expect package name
_, data = parseWord(data)
// now ready for import comment, a // or /* */ comment
// beginning and ending on the current line.
for len(data) > 0 && (data[0] == ' ' || data[0] == '\t' || data[0] == '\r') {
data = data[1:]
}
var comment []byte
switch {
case bytes.HasPrefix(data, slashSlash):
comment, _, _ = bytes.Cut(data[2:], newline)
case bytes.HasPrefix(data, slashStar):
var ok bool
comment, _, ok = bytes.Cut(data[2:], starSlash)
if !ok {
// malformed comment
return "", 0
}
if bytes.Contains(comment, newline) {
return "", 0
}
}
comment = bytes.TrimSpace(comment)
// split comment into `import`, `"pkg"`
word, arg := parseWord(comment)
if string(word) != "import" {
return "", 0
}
line = 1 + bytes.Count(data[:cap(data)-cap(arg)], newline)
return strings.TrimSpace(string(arg)), line
}
var (
slashSlash = []byte("//")
slashStar = []byte("/*")
starSlash = []byte("*/")
newline = []byte("\n")
)
// skipSpaceOrComment returns data with any leading spaces or comments removed.
func skipSpaceOrComment(data []byte) []byte {
for len(data) > 0 {
switch data[0] {
case ' ', '\t', '\r', '\n':
data = data[1:]
continue
case '/':
if bytes.HasPrefix(data, slashSlash) {
i := bytes.Index(data, newline)
if i < 0 {
return nil
}
data = data[i+1:]
continue
}
if bytes.HasPrefix(data, slashStar) {
data = data[2:]
i := bytes.Index(data, starSlash)
if i < 0 {
return nil
}
data = data[i+2:]
continue
}
}
break
}
return data
}
// parseWord skips any leading spaces or comments in data
// and then parses the beginning of data as an identifier or keyword,
// returning that word and what remains after the word.
func parseWord(data []byte) (word, rest []byte) {
data = skipSpaceOrComment(data)
// Parse past leading word characters.
rest = data
for {
r, size := utf8.DecodeRune(rest)
if unicode.IsLetter(r) || '0' <= r && r <= '9' || r == '_' {
rest = rest[size:]
continue
}
break
}
word = data[:len(data)-len(rest)]
if len(word) == 0 {
return nil, nil
}
return word, rest
}
// MatchFile reports whether the file with the given name in the given directory
// matches the context and would be included in a [Package] created by [ImportDir]
// of that directory.
//
// MatchFile considers the name of the file and may use ctxt.OpenFile to
// read some or all of the file's content.
func (ctxt *Context) MatchFile(dir, name string) (match bool, err error) {
info, err := ctxt.matchFile(dir, name, nil, nil, nil)
return info != nil, err
}
var dummyPkg Package
// fileInfo records information learned about a file included in a build.
type fileInfo struct {
name string // full name including dir
header []byte
fset *token.FileSet
parsed *ast.File
parseErr error
imports []fileImport
embeds []fileEmbed
directives []Directive
}
type fileImport struct {
path string
pos token.Pos
doc *ast.CommentGroup
}
type fileEmbed struct {
pattern string
pos token.Position
}
// matchFile determines whether the file with the given name in the given directory
// should be included in the package being constructed.
// If the file should be included, matchFile returns a non-nil *fileInfo (and a nil error).
// Non-nil errors are reserved for unexpected problems.
//
// If name denotes a Go program, matchFile reads until the end of the
// imports and returns that section of the file in the fileInfo's header field,
// even though it only considers text until the first non-comment
// for go:build lines.
//
// If allTags is non-nil, matchFile records any encountered build tag
// by setting allTags[tag] = true.
func (ctxt *Context) matchFile(dir, name string, allTags map[string]bool, binaryOnly *bool, fset *token.FileSet) (*fileInfo, error) {
if strings.HasPrefix(name, "_") ||
strings.HasPrefix(name, ".") {
return nil, nil
}
i := strings.LastIndex(name, ".")
if i < 0 {
i = len(name)
}
ext := name[i:]
if ext != ".go" && fileListForExt(&dummyPkg, ext) == nil {
// skip
return nil, nil
}
if !ctxt.goodOSArchFile(name, allTags) && !ctxt.UseAllFiles {
return nil, nil
}
info := &fileInfo{name: ctxt.joinPath(dir, name), fset: fset}
if ext == ".syso" {
// binary, no reading
return info, nil
}
f, err := ctxt.openFile(info.name)
if err != nil {
return nil, err
}
if strings.HasSuffix(name, ".go") {
err = readGoInfo(f, info)
if strings.HasSuffix(name, "_test.go") {
binaryOnly = nil // ignore //go:binary-only-package comments in _test.go files
}
} else {
binaryOnly = nil // ignore //go:binary-only-package comments in non-Go sources
info.header, err = readComments(f)
}
f.Close()
if err != nil {
return info, fmt.Errorf("read %s: %v", info.name, err)
}
// Look for go:build comments to accept or reject the file.
ok, sawBinaryOnly, err := ctxt.shouldBuild(info.header, allTags)
if err != nil {
return nil, fmt.Errorf("%s: %v", name, err)
}
if !ok && !ctxt.UseAllFiles {
return nil, nil
}
if binaryOnly != nil && sawBinaryOnly {
*binaryOnly = true
}
return info, nil
}
func cleanDecls(m map[string][]token.Position) ([]string, map[string][]token.Position) {
all := make([]string, 0, len(m))
for path := range m {
all = append(all, path)
}
slices.Sort(all)
return all, m
}
// Import is shorthand for Default.Import.
func Import(path, srcDir string, mode ImportMode) (*Package, error) {
return Default.Import(path, srcDir, mode)
}
// ImportDir is shorthand for Default.ImportDir.
func ImportDir(dir string, mode ImportMode) (*Package, error) {
return Default.ImportDir(dir, mode)
}
var (
plusBuild = []byte("+build")
goBuildComment = []byte("//go:build")
errMultipleGoBuild = errors.New("multiple //go:build comments")
)
func isGoBuildComment(line []byte) bool {
if !bytes.HasPrefix(line, goBuildComment) {
return false
}
line = bytes.TrimSpace(line)
rest := line[len(goBuildComment):]
return len(rest) == 0 || len(bytes.TrimSpace(rest)) < len(rest)
}
// Special comment denoting a binary-only package.
// See https://golang.org/design/2775-binary-only-packages
// for more about the design of binary-only packages.
var binaryOnlyComment = []byte("//go:binary-only-package")
// shouldBuild reports whether it is okay to use this file,
// The rule is that in the file's leading run of // comments
// and blank lines, which must be followed by a blank line
// (to avoid including a Go package clause doc comment),
// lines beginning with '//go:build' are taken as build directives.
//
// The file is accepted only if each such line lists something
// matching the file. For example:
//
// //go:build windows linux
//
// marks the file as applicable only on Windows and Linux.
//
// For each build tag it consults, shouldBuild sets allTags[tag] = true.
//
// shouldBuild reports whether the file should be built
// and whether a //go:binary-only-package comment was found.
func (ctxt *Context) shouldBuild(content []byte, allTags map[string]bool) (shouldBuild, binaryOnly bool, err error) {
// Identify leading run of // comments and blank lines,
// which must be followed by a blank line.
// Also identify any //go:build comments.
content, goBuild, sawBinaryOnly, err := parseFileHeader(content)
if err != nil {
return false, false, err
}
// If //go:build line is present, it controls.
// Otherwise fall back to +build processing.
switch {
case goBuild != nil:
x, err := constraint.Parse(string(goBuild))
if err != nil {
return false, false, fmt.Errorf("parsing //go:build line: %v", err)
}
shouldBuild = ctxt.eval(x, allTags)
default:
shouldBuild = true
p := content
for len(p) > 0 {
line := p
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, p = line[:i], p[i+1:]
} else {
p = p[len(p):]
}
line = bytes.TrimSpace(line)
if !bytes.HasPrefix(line, slashSlash) || !bytes.Contains(line, plusBuild) {
continue
}
text := string(line)
if !constraint.IsPlusBuild(text) {
continue
}
if x, err := constraint.Parse(text); err == nil {
if !ctxt.eval(x, allTags) {
shouldBuild = false
}
}
}
}
return shouldBuild, sawBinaryOnly, nil
}
// parseFileHeader should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bazelbuild/bazel-gazelle
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname parseFileHeader
func parseFileHeader(content []byte) (trimmed, goBuild []byte, sawBinaryOnly bool, err error) {
end := 0
p := content
ended := false // found non-blank, non-// line, so stopped accepting //go:build lines
inSlashStar := false // in /* */ comment
Lines:
for len(p) > 0 {
line := p
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, p = line[:i], p[i+1:]
} else {
p = p[len(p):]
}
line = bytes.TrimSpace(line)
if len(line) == 0 && !ended { // Blank line
// Remember position of most recent blank line.
// When we find the first non-blank, non-// line,
// this "end" position marks the latest file position
// where a //go:build line can appear.
// (It must appear _before_ a blank line before the non-blank, non-// line.
// Yes, that's confusing, which is part of why we moved to //go:build lines.)
// Note that ended==false here means that inSlashStar==false,
// since seeing a /* would have set ended==true.
end = len(content) - len(p)
continue Lines
}
if !bytes.HasPrefix(line, slashSlash) { // Not comment line
ended = true
}
if !inSlashStar && isGoBuildComment(line) {
if goBuild != nil {
return nil, nil, false, errMultipleGoBuild
}
goBuild = line
}
if !inSlashStar && bytes.Equal(line, binaryOnlyComment) {
sawBinaryOnly = true
}
Comments:
for len(line) > 0 {
if inSlashStar {
if i := bytes.Index(line, starSlash); i >= 0 {
inSlashStar = false
line = bytes.TrimSpace(line[i+len(starSlash):])
continue Comments
}
continue Lines
}
if bytes.HasPrefix(line, slashSlash) {
continue Lines
}
if bytes.HasPrefix(line, slashStar) {
inSlashStar = true
line = bytes.TrimSpace(line[len(slashStar):])
continue Comments
}
// Found non-comment text.
break Lines
}
}
return content[:end], goBuild, sawBinaryOnly, nil
}
// saveCgo saves the information from the #cgo lines in the import "C" comment.
// These lines set CFLAGS, CPPFLAGS, CXXFLAGS and LDFLAGS and pkg-config directives
// that affect the way cgo's C code is built.
func (ctxt *Context) saveCgo(filename string, di *Package, cg *ast.CommentGroup) error {
text := cg.Text()
for line := range strings.SplitSeq(text, "\n") {
orig := line
// Line is
// #cgo [GOOS/GOARCH...] LDFLAGS: stuff
//
line = strings.TrimSpace(line)
if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
continue
}
// #cgo (nocallback|noescape) <function name>
if fields := strings.Fields(line); len(fields) == 3 && (fields[1] == "nocallback" || fields[1] == "noescape") {
continue
}
// Split at colon.
line, argstr, ok := strings.Cut(strings.TrimSpace(line[4:]), ":")
if !ok {
return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
}
// Parse GOOS/GOARCH stuff.
f := strings.Fields(line)
if len(f) < 1 {
return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
}
cond, verb := f[:len(f)-1], f[len(f)-1]
if len(cond) > 0 {
ok := false
for _, c := range cond {
if ctxt.matchAuto(c, nil) {
ok = true
break
}
}
if !ok {
continue
}
}
args, err := splitQuoted(argstr)
if err != nil {
return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
}
for i, arg := range args {
if arg, ok = expandSrcDir(arg, di.Dir); !ok {
return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg)
}
args[i] = arg
}
switch verb {
case "CFLAGS", "CPPFLAGS", "CXXFLAGS", "FFLAGS", "LDFLAGS":
// Change relative paths to absolute.
ctxt.makePathsAbsolute(args, di.Dir)
}
switch verb {
case "CFLAGS":
di.CgoCFLAGS = append(di.CgoCFLAGS, args...)
case "CPPFLAGS":
di.CgoCPPFLAGS = append(di.CgoCPPFLAGS, args...)
case "CXXFLAGS":
di.CgoCXXFLAGS = append(di.CgoCXXFLAGS, args...)
case "FFLAGS":
di.CgoFFLAGS = append(di.CgoFFLAGS, args...)
case "LDFLAGS":
di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...)
case "pkg-config":
di.CgoPkgConfig = append(di.CgoPkgConfig, args...)
default:
return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig)
}
}
return nil
}
// expandSrcDir expands any occurrence of ${SRCDIR}, making sure
// the result is safe for the shell.
func expandSrcDir(str string, srcdir string) (string, bool) {
// "\" delimited paths cause safeCgoName to fail
// so convert native paths with a different delimiter
// to "/" before starting (eg: on windows).
srcdir = filepath.ToSlash(srcdir)
chunks := strings.Split(str, "${SRCDIR}")
if len(chunks) < 2 {
return str, safeCgoName(str)
}
ok := true
for _, chunk := range chunks {
ok = ok && (chunk == "" || safeCgoName(chunk))
}
ok = ok && (srcdir == "" || safeCgoName(srcdir))
res := strings.Join(chunks, srcdir)
return res, ok && res != ""
}
// makePathsAbsolute looks for compiler options that take paths and
// makes them absolute. We do this because through the 1.8 release we
// ran the compiler in the package directory, so any relative -I or -L
// options would be relative to that directory. In 1.9 we changed to
// running the compiler in the build directory, to get consistent
// build results (issue #19964). To keep builds working, we change any
// relative -I or -L options to be absolute.
//
// Using filepath.IsAbs and filepath.Join here means the results will be
// different on different systems, but that's OK: -I and -L options are
// inherently system-dependent.
func (ctxt *Context) makePathsAbsolute(args []string, srcDir string) {
nextPath := false
for i, arg := range args {
if nextPath {
if !filepath.IsAbs(arg) {
args[i] = filepath.Join(srcDir, arg)
}
nextPath = false
} else if strings.HasPrefix(arg, "-I") || strings.HasPrefix(arg, "-L") {
if len(arg) == 2 {
nextPath = true
} else {
if !filepath.IsAbs(arg[2:]) {
args[i] = arg[:2] + filepath.Join(srcDir, arg[2:])
}
}
}
}
}
// NOTE: $ is not safe for the shell, but it is allowed here because of linker options like -Wl,$ORIGIN.
// We never pass these arguments to a shell (just to programs we construct argv for), so this should be okay.
// See golang.org/issue/6038.
// The @ is for OS X. See golang.org/issue/13720.
// The % is for Jenkins. See golang.org/issue/16959.
// The ! is because module paths may use them. See golang.org/issue/26716.
// The ~ and ^ are for sr.ht. See golang.org/issue/32260.
const safeString = "+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:$@%! ~^"
func safeCgoName(s string) bool {
if s == "" {
return false
}
for i := 0; i < len(s); i++ {
if c := s[i]; c < utf8.RuneSelf && strings.IndexByte(safeString, c) < 0 {
return false
}
}
return true
}
// splitQuoted splits the string s around each instance of one or more consecutive
// white space characters while taking into account quotes and escaping, and
// returns an array of substrings of s or an empty list if s contains only white space.
// Single quotes and double quotes are recognized to prevent splitting within the
// quoted region, and are removed from the resulting substrings. If a quote in s
// isn't closed err will be set and r will have the unclosed argument as the
// last element. The backslash is used for escaping.
//
// For example, the following string:
//
// a b:"c d" 'e''f' "g\""
//
// Would be parsed as:
//
// []string{"a", "b:c d", "ef", `g"`}
func splitQuoted(s string) (r []string, err error) {
var args []string
arg := make([]rune, len(s))
escaped := false
quoted := false
quote := '\x00'
i := 0
for _, rune := range s {
switch {
case escaped:
escaped = false
case rune == '\\':
escaped = true
continue
case quote != '\x00':
if rune == quote {
quote = '\x00'
continue
}
case rune == '"' || rune == '\'':
quoted = true
quote = rune
continue
case unicode.IsSpace(rune):
if quoted || i > 0 {
quoted = false
args = append(args, string(arg[:i]))
i = 0
}
continue
}
arg[i] = rune
i++
}
if quoted || i > 0 {
args = append(args, string(arg[:i]))
}
if quote != 0 {
err = errors.New("unclosed quote")
} else if escaped {
err = errors.New("unfinished escaping")
}
return args, err
}
// matchAuto interprets text as either a +build or //go:build expression (whichever works),
// reporting whether the expression matches the build context.
//
// matchAuto is only used for testing of tag evaluation
// and in #cgo lines, which accept either syntax.
func (ctxt *Context) matchAuto(text string, allTags map[string]bool) bool {
if strings.ContainsAny(text, "&|()") {
text = "//go:build " + text
} else {
text = "// +build " + text
}
x, err := constraint.Parse(text)
if err != nil {
return false
}
return ctxt.eval(x, allTags)
}
func (ctxt *Context) eval(x constraint.Expr, allTags map[string]bool) bool {
return x.Eval(func(tag string) bool { return ctxt.matchTag(tag, allTags) })
}
// matchTag reports whether the name is one of:
//
// cgo (if cgo is enabled)
// $GOOS
// $GOARCH
// ctxt.Compiler
// linux (if GOOS = android)
// solaris (if GOOS = illumos)
// darwin (if GOOS = ios)
// unix (if this is a Unix GOOS)
// boringcrypto (if GOEXPERIMENT=boringcrypto is enabled)
// tag (if tag is listed in ctxt.BuildTags, ctxt.ToolTags, or ctxt.ReleaseTags)
//
// It records all consulted tags in allTags.
func (ctxt *Context) matchTag(name string, allTags map[string]bool) bool {
if allTags != nil {
allTags[name] = true
}
// special tags
if ctxt.CgoEnabled && name == "cgo" {
return true
}
if name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler {
return true
}
if ctxt.GOOS == "android" && name == "linux" {
return true
}
if ctxt.GOOS == "illumos" && name == "solaris" {
return true
}
if ctxt.GOOS == "ios" && name == "darwin" {
return true
}
if name == "unix" && syslist.UnixOS[ctxt.GOOS] {
return true
}
if name == "boringcrypto" {
name = "goexperiment.boringcrypto" // boringcrypto is an old name for goexperiment.boringcrypto
}
// other tags
return slices.Contains(ctxt.BuildTags, name) || slices.Contains(ctxt.ToolTags, name) ||
slices.Contains(ctxt.ReleaseTags, name)
}
// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH
// suffix which does not match the current system.
// The recognized name formats are:
//
// name_$(GOOS).*
// name_$(GOARCH).*
// name_$(GOOS)_$(GOARCH).*
// name_$(GOOS)_test.*
// name_$(GOARCH)_test.*
// name_$(GOOS)_$(GOARCH)_test.*
//
// Exceptions:
// if GOOS=android, then files with GOOS=linux are also matched.
// if GOOS=illumos, then files with GOOS=solaris are also matched.
// if GOOS=ios, then files with GOOS=darwin are also matched.
func (ctxt *Context) goodOSArchFile(name string, allTags map[string]bool) bool {
name, _, _ = strings.Cut(name, ".")
// Before Go 1.4, a file called "linux.go" would be equivalent to having a
// build tag "linux" in that file. For Go 1.4 and beyond, we require this
// auto-tagging to apply only to files with a non-empty prefix, so
// "foo_linux.go" is tagged but "linux.go" is not. This allows new operating
// systems, such as android, to arrive without breaking existing code with
// innocuous source code in "android.go". The easiest fix: cut everything
// in the name before the initial _.
i := strings.Index(name, "_")
if i < 0 {
return true
}
name = name[i:] // ignore everything before first _
l := strings.Split(name, "_")
if n := len(l); n > 0 && l[n-1] == "test" {
l = l[:n-1]
}
n := len(l)
if n >= 2 && syslist.KnownOS[l[n-2]] && syslist.KnownArch[l[n-1]] {
if allTags != nil {
// In case we short-circuit on l[n-1].
allTags[l[n-2]] = true
}
return ctxt.matchTag(l[n-1], allTags) && ctxt.matchTag(l[n-2], allTags)
}
if n >= 1 && (syslist.KnownOS[l[n-1]] || syslist.KnownArch[l[n-1]]) {
return ctxt.matchTag(l[n-1], allTags)
}
return true
}
// ToolDir is the directory containing build tools.
var ToolDir = getToolDir()
// IsLocalImport reports whether the import path is
// a local import path, like ".", "..", "./foo", or "../foo".
func IsLocalImport(path string) bool {
return path == "." || path == ".." ||
strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../")
}
// ArchChar returns "?" and an error.
// In earlier versions of Go, the returned string was used to derive
// the compiler and linker tool names, the default object file suffix,
// and the default linker output name. As of Go 1.5, those strings
// no longer vary by architecture; they are compile, link, .o, and a.out, respectively.
func ArchChar(goarch string) (string, error) {
return "?", errors.New("architecture letter no longer used")
}
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package constraint implements parsing and evaluation of build constraint lines.
// See https://golang.org/cmd/go/#hdr-Build_constraints for documentation about build constraints themselves.
//
// This package parses both the original “// +build” syntax and the “//go:build” syntax that was added in Go 1.17.
// See https://golang.org/design/draft-gobuild for details about the “//go:build” syntax.
package constraint
import (
"errors"
"strings"
"unicode"
"unicode/utf8"
)
// maxSize is a limit used to control the complexity of expressions, in order
// to prevent stack exhaustion issues due to recursion.
const maxSize = 1000
// An Expr is a build tag constraint expression.
// The underlying concrete type is *[AndExpr], *[OrExpr], *[NotExpr], or *[TagExpr].
type Expr interface {
// String returns the string form of the expression,
// using the boolean syntax used in //go:build lines.
String() string
// Eval reports whether the expression evaluates to true.
// It calls ok(tag) as needed to find out whether a given build tag
// is satisfied by the current build configuration.
Eval(ok func(tag string) bool) bool
// The presence of an isExpr method explicitly marks the type as an Expr.
// Only implementations in this package should be used as Exprs.
isExpr()
}
// A TagExpr is an [Expr] for the single tag Tag.
type TagExpr struct {
Tag string // for example, “linux” or “cgo”
}
func (x *TagExpr) isExpr() {}
func (x *TagExpr) Eval(ok func(tag string) bool) bool {
return ok(x.Tag)
}
func (x *TagExpr) String() string {
return x.Tag
}
func tag(tag string) Expr { return &TagExpr{tag} }
// A NotExpr represents the expression !X (the negation of X).
type NotExpr struct {
X Expr
}
func (x *NotExpr) isExpr() {}
func (x *NotExpr) Eval(ok func(tag string) bool) bool {
return !x.X.Eval(ok)
}
func (x *NotExpr) String() string {
s := x.X.String()
switch x.X.(type) {
case *AndExpr, *OrExpr:
s = "(" + s + ")"
}
return "!" + s
}
func not(x Expr) Expr { return &NotExpr{x} }
// An AndExpr represents the expression X && Y.
type AndExpr struct {
X, Y Expr
}
func (x *AndExpr) isExpr() {}
func (x *AndExpr) Eval(ok func(tag string) bool) bool {
// Note: Eval both, to make sure ok func observes all tags.
xok := x.X.Eval(ok)
yok := x.Y.Eval(ok)
return xok && yok
}
func (x *AndExpr) String() string {
return andArg(x.X) + " && " + andArg(x.Y)
}
func andArg(x Expr) string {
s := x.String()
if _, ok := x.(*OrExpr); ok {
s = "(" + s + ")"
}
return s
}
func and(x, y Expr) Expr {
return &AndExpr{x, y}
}
// An OrExpr represents the expression X || Y.
type OrExpr struct {
X, Y Expr
}
func (x *OrExpr) isExpr() {}
func (x *OrExpr) Eval(ok func(tag string) bool) bool {
// Note: Eval both, to make sure ok func observes all tags.
xok := x.X.Eval(ok)
yok := x.Y.Eval(ok)
return xok || yok
}
func (x *OrExpr) String() string {
return orArg(x.X) + " || " + orArg(x.Y)
}
func orArg(x Expr) string {
s := x.String()
if _, ok := x.(*AndExpr); ok {
s = "(" + s + ")"
}
return s
}
func or(x, y Expr) Expr {
return &OrExpr{x, y}
}
// A SyntaxError reports a syntax error in a parsed build expression.
type SyntaxError struct {
Offset int // byte offset in input where error was detected
Err string // description of error
}
func (e *SyntaxError) Error() string {
return e.Err
}
var errNotConstraint = errors.New("not a build constraint")
// Parse parses a single build constraint line of the form “//go:build ...” or “// +build ...”
// and returns the corresponding boolean expression.
func Parse(line string) (Expr, error) {
if text, ok := splitGoBuild(line); ok {
return parseExpr(text)
}
if text, ok := splitPlusBuild(line); ok {
return parsePlusBuildExpr(text)
}
return nil, errNotConstraint
}
// IsGoBuild reports whether the line of text is a “//go:build” constraint.
// It only checks the prefix of the text, not that the expression itself parses.
func IsGoBuild(line string) bool {
_, ok := splitGoBuild(line)
return ok
}
// splitGoBuild splits apart the leading //go:build prefix in line from the build expression itself.
// It returns "", false if the input is not a //go:build line or if the input contains multiple lines.
func splitGoBuild(line string) (expr string, ok bool) {
// A single trailing newline is OK; otherwise multiple lines are not.
if len(line) > 0 && line[len(line)-1] == '\n' {
line = line[:len(line)-1]
}
if strings.Contains(line, "\n") {
return "", false
}
if !strings.HasPrefix(line, "//go:build") {
return "", false
}
line = strings.TrimSpace(line)
line = line[len("//go:build"):]
// If strings.TrimSpace finds more to trim after removing the //go:build prefix,
// it means that the prefix was followed by a space, making this a //go:build line
// (as opposed to a //go:buildsomethingelse line).
// If line is empty, we had "//go:build" by itself, which also counts.
trim := strings.TrimSpace(line)
if len(line) == len(trim) && line != "" {
return "", false
}
return trim, true
}
// An exprParser holds state for parsing a build expression.
type exprParser struct {
s string // input string
i int // next read location in s
tok string // last token read
isTag bool
pos int // position (start) of last token
size int
}
// parseExpr parses a boolean build tag expression.
func parseExpr(text string) (x Expr, err error) {
defer func() {
if e := recover(); e != nil {
if e, ok := e.(*SyntaxError); ok {
err = e
return
}
panic(e) // unreachable unless parser has a bug
}
}()
p := &exprParser{s: text}
x = p.or()
if p.tok != "" {
panic(&SyntaxError{Offset: p.pos, Err: "unexpected token " + p.tok})
}
return x, nil
}
// or parses a sequence of || expressions.
// On entry, the next input token has not yet been lexed.
// On exit, the next input token has been lexed and is in p.tok.
func (p *exprParser) or() Expr {
x := p.and()
for p.tok == "||" {
x = or(x, p.and())
}
return x
}
// and parses a sequence of && expressions.
// On entry, the next input token has not yet been lexed.
// On exit, the next input token has been lexed and is in p.tok.
func (p *exprParser) and() Expr {
x := p.not()
for p.tok == "&&" {
x = and(x, p.not())
}
return x
}
// not parses a ! expression.
// On entry, the next input token has not yet been lexed.
// On exit, the next input token has been lexed and is in p.tok.
func (p *exprParser) not() Expr {
p.size++
if p.size > maxSize {
panic(&SyntaxError{Offset: p.pos, Err: "build expression too large"})
}
p.lex()
if p.tok == "!" {
p.lex()
if p.tok == "!" {
panic(&SyntaxError{Offset: p.pos, Err: "double negation not allowed"})
}
return not(p.atom())
}
return p.atom()
}
// atom parses a tag or a parenthesized expression.
// On entry, the next input token HAS been lexed.
// On exit, the next input token has been lexed and is in p.tok.
func (p *exprParser) atom() Expr {
// first token already in p.tok
if p.tok == "(" {
pos := p.pos
defer func() {
if e := recover(); e != nil {
if e, ok := e.(*SyntaxError); ok && e.Err == "unexpected end of expression" {
e.Err = "missing close paren"
}
panic(e)
}
}()
x := p.or()
if p.tok != ")" {
panic(&SyntaxError{Offset: pos, Err: "missing close paren"})
}
p.lex()
return x
}
if !p.isTag {
if p.tok == "" {
panic(&SyntaxError{Offset: p.pos, Err: "unexpected end of expression"})
}
panic(&SyntaxError{Offset: p.pos, Err: "unexpected token " + p.tok})
}
tok := p.tok
p.lex()
return tag(tok)
}
// lex finds and consumes the next token in the input stream.
// On return, p.tok is set to the token text,
// p.isTag reports whether the token was a tag,
// and p.pos records the byte offset of the start of the token in the input stream.
// If lex reaches the end of the input, p.tok is set to the empty string.
// For any other syntax error, lex panics with a SyntaxError.
func (p *exprParser) lex() {
p.isTag = false
for p.i < len(p.s) && (p.s[p.i] == ' ' || p.s[p.i] == '\t') {
p.i++
}
if p.i >= len(p.s) {
p.tok = ""
p.pos = p.i
return
}
switch p.s[p.i] {
case '(', ')', '!':
p.pos = p.i
p.i++
p.tok = p.s[p.pos:p.i]
return
case '&', '|':
if p.i+1 >= len(p.s) || p.s[p.i+1] != p.s[p.i] {
panic(&SyntaxError{Offset: p.i, Err: "invalid syntax at " + string(rune(p.s[p.i]))})
}
p.pos = p.i
p.i += 2
p.tok = p.s[p.pos:p.i]
return
}
tag := p.s[p.i:]
for i, c := range tag {
if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' {
tag = tag[:i]
break
}
}
if tag == "" {
c, _ := utf8.DecodeRuneInString(p.s[p.i:])
panic(&SyntaxError{Offset: p.i, Err: "invalid syntax at " + string(c)})
}
p.pos = p.i
p.i += len(tag)
p.tok = p.s[p.pos:p.i]
p.isTag = true
}
// IsPlusBuild reports whether the line of text is a “// +build” constraint.
// It only checks the prefix of the text, not that the expression itself parses.
func IsPlusBuild(line string) bool {
_, ok := splitPlusBuild(line)
return ok
}
// splitPlusBuild splits apart the leading // +build prefix in line from the build expression itself.
// It returns "", false if the input is not a // +build line or if the input contains multiple lines.
func splitPlusBuild(line string) (expr string, ok bool) {
// A single trailing newline is OK; otherwise multiple lines are not.
if len(line) > 0 && line[len(line)-1] == '\n' {
line = line[:len(line)-1]
}
if strings.Contains(line, "\n") {
return "", false
}
if !strings.HasPrefix(line, "//") {
return "", false
}
line = line[len("//"):]
// Note the space is optional; "//+build" is recognized too.
line = strings.TrimSpace(line)
if !strings.HasPrefix(line, "+build") {
return "", false
}
line = line[len("+build"):]
// If strings.TrimSpace finds more to trim after removing the +build prefix,
// it means that the prefix was followed by a space, making this a +build line
// (as opposed to a +buildsomethingelse line).
// If line is empty, we had "// +build" by itself, which also counts.
trim := strings.TrimSpace(line)
if len(line) == len(trim) && line != "" {
return "", false
}
return trim, true
}
// parsePlusBuildExpr parses a legacy build tag expression (as used with “// +build”).
func parsePlusBuildExpr(text string) (Expr, error) {
// Only allow up to 100 AND/OR operators for "old" syntax.
// This is much less than the limit for "new" syntax,
// but uses of old syntax were always very simple.
const maxOldSize = 100
size := 0
var x Expr
for clause := range strings.FieldsSeq(text) {
var y Expr
for lit := range strings.SplitSeq(clause, ",") {
var z Expr
var neg bool
if strings.HasPrefix(lit, "!!") || lit == "!" {
z = tag("ignore")
} else {
if strings.HasPrefix(lit, "!") {
neg = true
lit = lit[len("!"):]
}
if isValidTag(lit) {
z = tag(lit)
} else {
z = tag("ignore")
}
if neg {
z = not(z)
}
}
if y == nil {
y = z
} else {
if size++; size > maxOldSize {
return nil, errComplex
}
y = and(y, z)
}
}
if x == nil {
x = y
} else {
if size++; size > maxOldSize {
return nil, errComplex
}
x = or(x, y)
}
}
if x == nil {
x = tag("ignore")
}
return x, nil
}
// isValidTag reports whether the word is a valid build tag.
// Tags must be letters, digits, underscores or dots.
// Unlike in Go identifiers, all digits are fine (e.g., "386").
func isValidTag(word string) bool {
if word == "" {
return false
}
for _, c := range word {
if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' {
return false
}
}
return true
}
var errComplex = errors.New("expression too complex for // +build lines")
// PlusBuildLines returns a sequence of “// +build” lines that evaluate to the build expression x.
// If the expression is too complex to convert directly to “// +build” lines, PlusBuildLines returns an error.
func PlusBuildLines(x Expr) ([]string, error) {
// Push all NOTs to the expression leaves, so that //go:build !(x && y) can be treated as !x || !y.
// This rewrite is both efficient and commonly needed, so it's worth doing.
// Essentially all other possible rewrites are too expensive and too rarely needed.
x = pushNot(x, false)
// Split into AND of ORs of ANDs of literals (tag or NOT tag).
var split [][][]Expr
for _, or := range appendSplitAnd(nil, x) {
var ands [][]Expr
for _, and := range appendSplitOr(nil, or) {
var lits []Expr
for _, lit := range appendSplitAnd(nil, and) {
switch lit.(type) {
case *TagExpr, *NotExpr:
lits = append(lits, lit)
default:
return nil, errComplex
}
}
ands = append(ands, lits)
}
split = append(split, ands)
}
// If all the ORs have length 1 (no actual OR'ing going on),
// push the top-level ANDs to the bottom level, so that we get
// one // +build line instead of many.
maxOr := 0
for _, or := range split {
if maxOr < len(or) {
maxOr = len(or)
}
}
if maxOr == 1 {
var lits []Expr
for _, or := range split {
lits = append(lits, or[0]...)
}
split = [][][]Expr{{lits}}
}
// Prepare the +build lines.
var lines []string
for _, or := range split {
var line strings.Builder
line.WriteString("// +build")
for _, and := range or {
line.WriteString(" ")
for i, lit := range and {
if i > 0 {
line.WriteString(",")
}
line.WriteString(lit.String())
}
}
lines = append(lines, line.String())
}
return lines, nil
}
// pushNot applies DeMorgan's law to push negations down the expression,
// so that only tags are negated in the result.
// (It applies the rewrites !(X && Y) => (!X || !Y) and !(X || Y) => (!X && !Y).)
func pushNot(x Expr, not bool) Expr {
switch x := x.(type) {
default:
// unreachable
return x
case *NotExpr:
if _, ok := x.X.(*TagExpr); ok && !not {
return x
}
return pushNot(x.X, !not)
case *TagExpr:
if not {
return &NotExpr{X: x}
}
return x
case *AndExpr:
x1 := pushNot(x.X, not)
y1 := pushNot(x.Y, not)
if not {
return or(x1, y1)
}
if x1 == x.X && y1 == x.Y {
return x
}
return and(x1, y1)
case *OrExpr:
x1 := pushNot(x.X, not)
y1 := pushNot(x.Y, not)
if not {
return and(x1, y1)
}
if x1 == x.X && y1 == x.Y {
return x
}
return or(x1, y1)
}
}
// appendSplitAnd appends x to list while splitting apart any top-level && expressions.
// For example, appendSplitAnd({W}, X && Y && Z) = {W, X, Y, Z}.
func appendSplitAnd(list []Expr, x Expr) []Expr {
if x, ok := x.(*AndExpr); ok {
list = appendSplitAnd(list, x.X)
list = appendSplitAnd(list, x.Y)
return list
}
return append(list, x)
}
// appendSplitOr appends x to list while splitting apart any top-level || expressions.
// For example, appendSplitOr({W}, X || Y || Z) = {W, X, Y, Z}.
func appendSplitOr(list []Expr, x Expr) []Expr {
if x, ok := x.(*OrExpr); ok {
list = appendSplitOr(list, x.X)
list = appendSplitOr(list, x.Y)
return list
}
return append(list, x)
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package constraint
import (
"strconv"
"strings"
)
// GoVersion returns the minimum Go version implied by a given build expression.
// If the expression can be satisfied without any Go version tags, GoVersion returns an empty string.
//
// For example:
//
// GoVersion(linux && go1.22) = "go1.22"
// GoVersion((linux && go1.22) || (windows && go1.20)) = "go1.20" => go1.20
// GoVersion(linux) = ""
// GoVersion(linux || (windows && go1.22)) = ""
// GoVersion(!go1.22) = ""
//
// GoVersion assumes that any tag or negated tag may independently be true,
// so that its analysis can be purely structural, without SAT solving.
// “Impossible” subexpressions may therefore affect the result.
//
// For example:
//
// GoVersion((linux && !linux && go1.20) || go1.21) = "go1.20"
func GoVersion(x Expr) string {
v := minVersion(x, +1)
if v < 0 {
return ""
}
if v == 0 {
return "go1"
}
return "go1." + strconv.Itoa(v)
}
// minVersion returns the minimum Go major version (9 for go1.9)
// implied by expression z, or if sign < 0, by expression !z.
func minVersion(z Expr, sign int) int {
switch z := z.(type) {
default:
return -1
case *AndExpr:
op := andVersion
if sign < 0 {
op = orVersion
}
return op(minVersion(z.X, sign), minVersion(z.Y, sign))
case *OrExpr:
op := orVersion
if sign < 0 {
op = andVersion
}
return op(minVersion(z.X, sign), minVersion(z.Y, sign))
case *NotExpr:
return minVersion(z.X, -sign)
case *TagExpr:
if sign < 0 {
// !foo implies nothing
return -1
}
if z.Tag == "go1" {
return 0
}
_, v, _ := strings.Cut(z.Tag, "go1.")
n, err := strconv.Atoi(v)
if err != nil {
// not a go1.N tag
return -1
}
return n
}
}
// andVersion returns the minimum Go version
// implied by the AND of two minimum Go versions,
// which is the max of the versions.
func andVersion(x, y int) int {
if x > y {
return x
}
return y
}
// orVersion returns the minimum Go version
// implied by the OR of two minimum Go versions,
// which is the min of the versions.
func orVersion(x, y int) int {
if x < y {
return x
}
return y
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
package build
import (
"path/filepath"
"runtime"
)
// getToolDir returns the default value of ToolDir.
func getToolDir() string {
return filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH)
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package build
import (
"bufio"
"bytes"
"errors"
"fmt"
"go/ast"
"go/parser"
"go/scanner"
"go/token"
"io"
"strconv"
"strings"
"unicode"
"unicode/utf8"
_ "unsafe" // for linkname
)
type importReader struct {
b *bufio.Reader
buf []byte
peek byte
err error
eof bool
nerr int
pos token.Position
}
var bom = []byte{0xef, 0xbb, 0xbf}
func newImportReader(name string, r io.Reader) *importReader {
b := bufio.NewReader(r)
// Remove leading UTF-8 BOM.
// Per https://golang.org/ref/spec#Source_code_representation:
// a compiler may ignore a UTF-8-encoded byte order mark (U+FEFF)
// if it is the first Unicode code point in the source text.
if leadingBytes, err := b.Peek(3); err == nil && bytes.Equal(leadingBytes, bom) {
b.Discard(3)
}
return &importReader{
b: b,
pos: token.Position{
Filename: name,
Line: 1,
Column: 1,
},
}
}
func isIdent(c byte) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf
}
var (
errSyntax = errors.New("syntax error")
errNUL = errors.New("unexpected NUL in input")
)
// syntaxError records a syntax error, but only if an I/O error has not already been recorded.
func (r *importReader) syntaxError() {
if r.err == nil {
r.err = errSyntax
}
}
// readByte reads the next byte from the input, saves it in buf, and returns it.
// If an error occurs, readByte records the error in r.err and returns 0.
func (r *importReader) readByte() byte {
c, err := r.b.ReadByte()
if err == nil {
r.buf = append(r.buf, c)
if c == 0 {
err = errNUL
}
}
if err != nil {
if err == io.EOF {
r.eof = true
} else if r.err == nil {
r.err = err
}
c = 0
}
return c
}
// readRest reads the entire rest of the file into r.buf.
func (r *importReader) readRest() {
for {
if len(r.buf) == cap(r.buf) {
// Grow the buffer
r.buf = append(r.buf, 0)[:len(r.buf)]
}
n, err := r.b.Read(r.buf[len(r.buf):cap(r.buf)])
r.buf = r.buf[:len(r.buf)+n]
if err != nil {
if err == io.EOF {
r.eof = true
} else if r.err == nil {
r.err = err
}
break
}
}
}
// peekByte returns the next byte from the input reader but does not advance beyond it.
// If skipSpace is set, peekByte skips leading spaces and comments.
func (r *importReader) peekByte(skipSpace bool) byte {
if r.err != nil {
if r.nerr++; r.nerr > 10000 {
panic("go/build: import reader looping")
}
return 0
}
// Use r.peek as first input byte.
// Don't just return r.peek here: it might have been left by peekByte(false)
// and this might be peekByte(true).
c := r.peek
if c == 0 {
c = r.readByte()
}
for r.err == nil && !r.eof {
if skipSpace {
// For the purposes of this reader, semicolons are never necessary to
// understand the input and are treated as spaces.
switch c {
case ' ', '\f', '\t', '\r', '\n', ';':
c = r.readByte()
continue
case '/':
c = r.readByte()
if c == '/' {
for c != '\n' && r.err == nil && !r.eof {
c = r.readByte()
}
} else if c == '*' {
var c1 byte
for (c != '*' || c1 != '/') && r.err == nil {
if r.eof {
r.syntaxError()
}
c, c1 = c1, r.readByte()
}
} else {
r.syntaxError()
}
c = r.readByte()
continue
}
}
break
}
r.peek = c
return r.peek
}
// nextByte is like peekByte but advances beyond the returned byte.
func (r *importReader) nextByte(skipSpace bool) byte {
c := r.peekByte(skipSpace)
r.peek = 0
return c
}
// readKeyword reads the given keyword from the input.
// If the keyword is not present, readKeyword records a syntax error.
func (r *importReader) readKeyword(kw string) {
r.peekByte(true)
for i := 0; i < len(kw); i++ {
if r.nextByte(false) != kw[i] {
r.syntaxError()
return
}
}
if isIdent(r.peekByte(false)) {
r.syntaxError()
}
}
// readIdent reads an identifier from the input.
// If an identifier is not present, readIdent records a syntax error.
func (r *importReader) readIdent() {
c := r.peekByte(true)
if !isIdent(c) {
r.syntaxError()
return
}
for isIdent(r.peekByte(false)) {
r.peek = 0
}
}
// readString reads a quoted string literal from the input.
// If an identifier is not present, readString records a syntax error.
func (r *importReader) readString() {
switch r.nextByte(true) {
case '`':
for r.err == nil {
if r.nextByte(false) == '`' {
break
}
if r.eof {
r.syntaxError()
}
}
case '"':
for r.err == nil {
c := r.nextByte(false)
if c == '"' {
break
}
if r.eof || c == '\n' {
r.syntaxError()
}
if c == '\\' {
r.nextByte(false)
}
}
default:
r.syntaxError()
}
}
// readImport reads an import clause - optional identifier followed by quoted string -
// from the input.
func (r *importReader) readImport() {
c := r.peekByte(true)
if c == '.' {
r.peek = 0
} else if isIdent(c) {
r.readIdent()
}
r.readString()
}
// readComments is like io.ReadAll, except that it only reads the leading
// block of comments in the file.
//
// readComments should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bazelbuild/bazel-gazelle
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname readComments
func readComments(f io.Reader) ([]byte, error) {
r := newImportReader("", f)
r.peekByte(true)
if r.err == nil && !r.eof {
// Didn't reach EOF, so must have found a non-space byte. Remove it.
r.buf = r.buf[:len(r.buf)-1]
}
return r.buf, r.err
}
// readGoInfo expects a Go file as input and reads the file up to and including the import section.
// It records what it learned in *info.
// If info.fset is non-nil, readGoInfo parses the file and sets info.parsed, info.parseErr,
// info.imports and info.embeds.
//
// It only returns an error if there are problems reading the file,
// not for syntax errors in the file itself.
func readGoInfo(f io.Reader, info *fileInfo) error {
r := newImportReader(info.name, f)
r.readKeyword("package")
r.readIdent()
for r.peekByte(true) == 'i' {
r.readKeyword("import")
if r.peekByte(true) == '(' {
r.nextByte(false)
for r.peekByte(true) != ')' && r.err == nil {
r.readImport()
}
r.nextByte(false)
} else {
r.readImport()
}
}
info.header = r.buf
// If we stopped successfully before EOF, we read a byte that told us we were done.
// Return all but that last byte, which would cause a syntax error if we let it through.
if r.err == nil && !r.eof {
info.header = r.buf[:len(r.buf)-1]
}
// If we stopped for a syntax error, consume the whole file so that
// we are sure we don't change the errors that go/parser returns.
if r.err == errSyntax {
r.err = nil
r.readRest()
info.header = r.buf
}
if r.err != nil {
return r.err
}
if info.fset == nil {
return nil
}
// Parse file header & record imports.
info.parsed, info.parseErr = parser.ParseFile(info.fset, info.name, info.header, parser.ImportsOnly|parser.ParseComments)
if info.parseErr != nil {
return nil
}
hasEmbed := false
for _, decl := range info.parsed.Decls {
d, ok := decl.(*ast.GenDecl)
if !ok {
continue
}
for _, dspec := range d.Specs {
spec, ok := dspec.(*ast.ImportSpec)
if !ok {
continue
}
quoted := spec.Path.Value
path, err := strconv.Unquote(quoted)
if err != nil {
return fmt.Errorf("parser returned invalid quoted string: <%s>", quoted)
}
if !isValidImport(path) {
// The parser used to return a parse error for invalid import paths, but
// no longer does, so check for and create the error here instead.
info.parseErr = scanner.Error{Pos: info.fset.Position(spec.Pos()), Msg: "invalid import path: " + path}
info.imports = nil
return nil
}
if path == "embed" {
hasEmbed = true
}
doc := spec.Doc
if doc == nil && len(d.Specs) == 1 {
doc = d.Doc
}
info.imports = append(info.imports, fileImport{path, spec.Pos(), doc})
}
}
// Extract directives.
for _, group := range info.parsed.Comments {
if group.Pos() >= info.parsed.Package {
break
}
for _, c := range group.List {
if strings.HasPrefix(c.Text, "//go:") {
info.directives = append(info.directives, Directive{c.Text, info.fset.Position(c.Slash)})
}
}
}
// If the file imports "embed",
// we have to look for //go:embed comments
// in the remainder of the file.
// The compiler will enforce the mapping of comments to
// declared variables. We just need to know the patterns.
// If there were //go:embed comments earlier in the file
// (near the package statement or imports), the compiler
// will reject them. They can be (and have already been) ignored.
if hasEmbed {
r.readRest()
fset := token.NewFileSet()
file := fset.AddFile(r.pos.Filename, -1, len(r.buf))
var sc scanner.Scanner
sc.Init(file, r.buf, nil, scanner.ScanComments)
for {
pos, tok, lit := sc.Scan()
if tok == token.EOF {
break
}
if tok == token.COMMENT && strings.HasPrefix(lit, "//go:embed") {
// Ignore badly-formed lines - the compiler will report them when it finds them,
// and we can pretend they are not there to help go list succeed with what it knows.
embs, err := parseGoEmbed(fset, pos, lit)
if err == nil {
info.embeds = append(info.embeds, embs...)
}
}
}
}
return nil
}
// isValidImport checks if the import is a valid import using the more strict
// checks allowed by the implementation restriction in https://go.dev/ref/spec#Import_declarations.
// It was ported from the function of the same name that was removed from the
// parser in CL 424855, when the parser stopped doing these checks.
func isValidImport(s string) bool {
const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
for _, r := range s {
if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
return false
}
}
return s != ""
}
// parseGoEmbed parses a "//go:embed" to extract the glob patterns.
// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
// This must match the behavior of cmd/compile/internal/noder.go.
func parseGoEmbed(fset *token.FileSet, pos token.Pos, comment string) ([]fileEmbed, error) {
dir, ok := ast.ParseDirective(pos, comment)
if !ok || dir.Tool != "go" || dir.Name != "embed" {
return nil, nil
}
args, err := dir.ParseArgs()
if err != nil {
return nil, err
}
var list []fileEmbed
for _, arg := range args {
list = append(list, fileEmbed{arg.Arg, fset.Position(arg.Pos)})
}
return list, nil
}
// Code generated by "stringer -type Kind"; DO NOT EDIT.
package constant
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[Unknown-0]
_ = x[Bool-1]
_ = x[String-2]
_ = x[Int-3]
_ = x[Float-4]
_ = x[Complex-5]
}
const _Kind_name = "UnknownBoolStringIntFloatComplex"
var _Kind_index = [...]uint8{0, 7, 11, 17, 20, 25, 32}
func (i Kind) String() string {
if i < 0 || i >= Kind(len(_Kind_index)-1) {
return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package constant implements Values representing untyped
// Go constants and their corresponding operations.
//
// A special Unknown value may be used when a value
// is unknown due to an error. Operations on unknown
// values produce unknown values unless specified
// otherwise.
package constant
import (
"fmt"
"go/token"
"math"
"math/big"
"math/bits"
"strconv"
"strings"
"sync"
"unicode/utf8"
)
//go:generate stringer -type Kind
// Kind specifies the kind of value represented by a [Value].
type Kind int
const (
// unknown values
Unknown Kind = iota
// non-numeric values
Bool
String
// numeric values
Int
Float
Complex
)
// A Value represents the value of a Go constant.
type Value interface {
// Kind returns the value kind.
Kind() Kind
// String returns a short, quoted (human-readable) form of the value.
// For numeric values, the result may be an approximation;
// for String values the result may be a shortened string.
// Use ExactString for a string representing a value exactly.
String() string
// ExactString returns an exact, quoted (human-readable) form of the value.
// If the Value is of Kind String, use StringVal to obtain the unquoted string.
ExactString() string
// Prevent external implementations.
implementsValue()
}
// ----------------------------------------------------------------------------
// Implementations
// Maximum supported mantissa precision.
// The spec requires at least 256 bits; typical implementations use 512 bits.
const prec = 512
// TODO(gri) Consider storing "error" information in an unknownVal so clients
// can provide better error messages. For instance, if a number is
// too large (incl. infinity), that could be recorded in unknownVal.
// See also #20583 and #42695 for use cases.
// Representation of values:
//
// Values of Int and Float Kind have two different representations each: int64Val
// and intVal, and ratVal and floatVal. When possible, the "smaller", respectively
// more precise (for Floats) representation is chosen. However, once a Float value
// is represented as a floatVal, any subsequent results remain floatVals (unless
// explicitly converted); i.e., no attempt is made to convert a floatVal back into
// a ratVal. The reasoning is that all representations but floatVal are mathematically
// exact, but once that precision is lost (by moving to floatVal), moving back to
// a different representation implies a precision that's not actually there.
type (
unknownVal struct{}
boolVal bool
stringVal struct {
// Lazy value: either a string (l,r==nil) or an addition (l,r!=nil).
mu sync.Mutex
s string
l, r *stringVal
}
int64Val int64 // Int values representable as an int64
intVal struct{ val *big.Int } // Int values not representable as an int64
ratVal struct{ val *big.Rat } // Float values representable as a fraction
floatVal struct{ val *big.Float } // Float values not representable as a fraction
complexVal struct{ re, im Value }
)
func (unknownVal) Kind() Kind { return Unknown }
func (boolVal) Kind() Kind { return Bool }
func (*stringVal) Kind() Kind { return String }
func (int64Val) Kind() Kind { return Int }
func (intVal) Kind() Kind { return Int }
func (ratVal) Kind() Kind { return Float }
func (floatVal) Kind() Kind { return Float }
func (complexVal) Kind() Kind { return Complex }
func (unknownVal) String() string { return "unknown" }
func (x boolVal) String() string { return strconv.FormatBool(bool(x)) }
// String returns a possibly shortened quoted form of the String value.
func (x *stringVal) String() string {
const maxLen = 72 // a reasonable length
s := strconv.Quote(x.string())
if utf8.RuneCountInString(s) > maxLen {
// The string without the enclosing quotes is greater than maxLen-2 runes
// long. Remove the last 3 runes (including the closing '"') by keeping
// only the first maxLen-3 runes; then add "...".
i := 0
for n := 0; n < maxLen-3; n++ {
_, size := utf8.DecodeRuneInString(s[i:])
i += size
}
s = s[:i] + "..."
}
return s
}
// string constructs and returns the actual string literal value.
// If x represents an addition, then it rewrites x to be a single
// string, to speed future calls. This lazy construction avoids
// building different string values for all subpieces of a large
// concatenation. See golang.org/issue/23348.
func (x *stringVal) string() string {
x.mu.Lock()
if x.l != nil {
x.s = strings.Join(reverse(x.appendReverse(nil)), "")
x.l = nil
x.r = nil
}
s := x.s
x.mu.Unlock()
return s
}
// reverse reverses x in place and returns it.
func reverse(x []string) []string {
n := len(x)
for i := 0; i+i < n; i++ {
x[i], x[n-1-i] = x[n-1-i], x[i]
}
return x
}
// appendReverse appends to list all of x's subpieces, but in reverse,
// and returns the result. Appending the reversal allows processing
// the right side in a recursive call and the left side in a loop.
// Because a chain like a + b + c + d + e is actually represented
// as ((((a + b) + c) + d) + e), the left-side loop avoids deep recursion.
// x must be locked.
func (x *stringVal) appendReverse(list []string) []string {
y := x
for y.r != nil {
y.r.mu.Lock()
list = y.r.appendReverse(list)
y.r.mu.Unlock()
l := y.l
if y != x {
y.mu.Unlock()
}
l.mu.Lock()
y = l
}
s := y.s
if y != x {
y.mu.Unlock()
}
return append(list, s)
}
func (x int64Val) String() string { return strconv.FormatInt(int64(x), 10) }
func (x intVal) String() string { return x.val.String() }
func (x ratVal) String() string { return rtof(x).String() }
// String returns a decimal approximation of the Float value.
func (x floatVal) String() string {
f := x.val
// Don't try to convert infinities (will not terminate).
if f.IsInf() {
return f.String()
}
// Use exact fmt formatting if in float64 range (common case):
// proceed if f doesn't underflow to 0 or overflow to inf.
if x, _ := f.Float64(); f.Sign() == 0 == (x == 0) && !math.IsInf(x, 0) {
s := fmt.Sprintf("%.6g", x)
if !f.IsInt() && strings.IndexByte(s, '.') < 0 {
// f is not an integer, but its string representation
// doesn't reflect that. Use more digits. See issue 56220.
s = fmt.Sprintf("%g", x)
}
return s
}
// Out of float64 range. Do approximate manual to decimal
// conversion to avoid precise but possibly slow Float
// formatting.
// f = mant * 2**exp
var mant big.Float
exp := f.MantExp(&mant) // 0.5 <= |mant| < 1.0
// approximate float64 mantissa m and decimal exponent d
// f ~ m * 10**d
m, _ := mant.Float64() // 0.5 <= |m| < 1.0
d := float64(exp) * (math.Ln2 / math.Ln10) // log_10(2)
// adjust m for truncated (integer) decimal exponent e
e := int64(d)
m *= math.Pow(10, d-float64(e))
// ensure 1 <= |m| < 10
switch am := math.Abs(m); {
case am < 1-0.5e-6:
// The %.6g format below rounds m to 5 digits after the
// decimal point. Make sure that m*10 < 10 even after
// rounding up: m*10 + 0.5e-5 < 10 => m < 1 - 0.5e6.
m *= 10
e--
case am >= 10:
m /= 10
e++
}
return fmt.Sprintf("%.6ge%+d", m, e)
}
func (x complexVal) String() string { return fmt.Sprintf("(%s + %si)", x.re, x.im) }
func (x unknownVal) ExactString() string { return x.String() }
func (x boolVal) ExactString() string { return x.String() }
func (x *stringVal) ExactString() string { return strconv.Quote(x.string()) }
func (x int64Val) ExactString() string { return x.String() }
func (x intVal) ExactString() string { return x.String() }
func (x ratVal) ExactString() string {
r := x.val
if r.IsInt() {
return r.Num().String()
}
return r.String()
}
func (x floatVal) ExactString() string { return x.val.Text('p', 0) }
func (x complexVal) ExactString() string {
return fmt.Sprintf("(%s + %si)", x.re.ExactString(), x.im.ExactString())
}
func (unknownVal) implementsValue() {}
func (boolVal) implementsValue() {}
func (*stringVal) implementsValue() {}
func (int64Val) implementsValue() {}
func (ratVal) implementsValue() {}
func (intVal) implementsValue() {}
func (floatVal) implementsValue() {}
func (complexVal) implementsValue() {}
func newInt() *big.Int { return new(big.Int) }
func newRat() *big.Rat { return new(big.Rat) }
func newFloat() *big.Float { return new(big.Float).SetPrec(prec) }
func i64toi(x int64Val) intVal { return intVal{newInt().SetInt64(int64(x))} }
func i64tor(x int64Val) ratVal { return ratVal{newRat().SetInt64(int64(x))} }
func i64tof(x int64Val) floatVal { return floatVal{newFloat().SetInt64(int64(x))} }
func itor(x intVal) ratVal { return ratVal{newRat().SetInt(x.val)} }
func itof(x intVal) floatVal { return floatVal{newFloat().SetInt(x.val)} }
func rtof(x ratVal) floatVal { return floatVal{newFloat().SetRat(x.val)} }
func vtoc(x Value) complexVal { return complexVal{x, int64Val(0)} }
func makeInt(x *big.Int) Value {
if x.IsInt64() {
return int64Val(x.Int64())
}
return intVal{x}
}
func makeRat(x *big.Rat) Value {
a := x.Num()
b := x.Denom()
if smallInt(a) && smallInt(b) {
// ok to remain fraction
return ratVal{x}
}
// components too large => switch to float
return floatVal{newFloat().SetRat(x)}
}
var floatVal0 = floatVal{newFloat()}
func makeFloat(x *big.Float) Value {
// convert -0
if x.Sign() == 0 {
return floatVal0
}
if x.IsInf() {
return unknownVal{}
}
// No attempt is made to "go back" to ratVal, even if possible,
// to avoid providing the illusion of a mathematically exact
// representation.
return floatVal{x}
}
func makeComplex(re, im Value) Value {
if re.Kind() == Unknown || im.Kind() == Unknown {
return unknownVal{}
}
return complexVal{re, im}
}
func makeFloatFromLiteral(lit string) Value {
if f, ok := newFloat().SetString(lit); ok {
if smallFloat(f) {
// ok to use rationals
if f.Sign() == 0 {
// Issue 20228: If the float underflowed to zero, parse just "0".
// Otherwise, lit might contain a value with a large negative exponent,
// such as -6e-1886451601. As a float, that will underflow to 0,
// but it'll take forever to parse as a Rat.
lit = "0"
}
if r, ok := newRat().SetString(lit); ok {
return ratVal{r}
}
}
// otherwise use floats
return makeFloat(f)
}
return nil
}
// Permit fractions with component sizes up to maxExp
// before switching to using floating-point numbers.
const maxExp = 4 << 10
// smallInt reports whether x would lead to "reasonably"-sized fraction
// if converted to a *big.Rat.
func smallInt(x *big.Int) bool {
return x.BitLen() < maxExp
}
// smallFloat64 reports whether x would lead to "reasonably"-sized fraction
// if converted to a *big.Rat.
func smallFloat64(x float64) bool {
if math.IsInf(x, 0) {
return false
}
_, e := math.Frexp(x)
return -maxExp < e && e < maxExp
}
// smallFloat reports whether x would lead to "reasonably"-sized fraction
// if converted to a *big.Rat.
func smallFloat(x *big.Float) bool {
if x.IsInf() {
return false
}
e := x.MantExp(nil)
return -maxExp < e && e < maxExp
}
// ----------------------------------------------------------------------------
// Factories
// MakeUnknown returns the [Unknown] value.
func MakeUnknown() Value { return unknownVal{} }
// MakeBool returns the [Bool] value for b.
func MakeBool(b bool) Value { return boolVal(b) }
// MakeString returns the [String] value for s.
func MakeString(s string) Value {
if s == "" {
return &emptyString // common case
}
return &stringVal{s: s}
}
var emptyString stringVal
// MakeInt64 returns the [Int] value for x.
func MakeInt64(x int64) Value { return int64Val(x) }
// MakeUint64 returns the [Int] value for x.
func MakeUint64(x uint64) Value {
if x < 1<<63 {
return int64Val(int64(x))
}
return intVal{newInt().SetUint64(x)}
}
// MakeFloat64 returns the [Float] value for x.
// If x is -0.0, the result is 0.0.
// If x is not finite, the result is an [Unknown].
func MakeFloat64(x float64) Value {
if math.IsInf(x, 0) || math.IsNaN(x) {
return unknownVal{}
}
if smallFloat64(x) {
return ratVal{newRat().SetFloat64(x + 0)} // convert -0 to 0
}
return floatVal{newFloat().SetFloat64(x + 0)}
}
// MakeFromLiteral returns the corresponding integer, floating-point,
// imaginary, character, or string value for a Go literal string. The
// tok value must be one of [token.INT], [token.FLOAT], [token.IMAG],
// [token.CHAR], or [token.STRING]. The final argument must be zero.
// If the literal string syntax is invalid, the result is an [Unknown].
func MakeFromLiteral(lit string, tok token.Token, zero uint) Value {
if zero != 0 {
panic("MakeFromLiteral called with non-zero last argument")
}
switch tok {
case token.INT:
if x, err := strconv.ParseInt(lit, 0, 64); err == nil {
return int64Val(x)
}
if x, ok := newInt().SetString(lit, 0); ok {
return intVal{x}
}
case token.FLOAT:
if x := makeFloatFromLiteral(lit); x != nil {
return x
}
case token.IMAG:
if n := len(lit); n > 0 && lit[n-1] == 'i' {
if im := makeFloatFromLiteral(lit[:n-1]); im != nil {
return makeComplex(int64Val(0), im)
}
}
case token.CHAR:
if n := len(lit); n >= 2 {
if code, _, _, err := strconv.UnquoteChar(lit[1:n-1], '\''); err == nil {
return MakeInt64(int64(code))
}
}
case token.STRING:
if s, err := strconv.Unquote(lit); err == nil {
return MakeString(s)
}
default:
panic(fmt.Sprintf("%v is not a valid token", tok))
}
return unknownVal{}
}
// ----------------------------------------------------------------------------
// Accessors
//
// For unknown arguments the result is the zero value for the respective
// accessor type, except for Sign, where the result is 1.
// BoolVal returns the Go boolean value of x, which must be a [Bool] or an [Unknown].
// If x is [Unknown], the result is false.
func BoolVal(x Value) bool {
switch x := x.(type) {
case boolVal:
return bool(x)
case unknownVal:
return false
default:
panic(fmt.Sprintf("%v not a Bool", x))
}
}
// StringVal returns the Go string value of x, which must be a [String] or an [Unknown].
// If x is [Unknown], the result is "".
func StringVal(x Value) string {
switch x := x.(type) {
case *stringVal:
return x.string()
case unknownVal:
return ""
default:
panic(fmt.Sprintf("%v not a String", x))
}
}
// Int64Val returns the Go int64 value of x and whether the result is exact;
// x must be an [Int] or an [Unknown]. If the result is not exact, its value is undefined.
// If x is [Unknown], the result is (0, false).
func Int64Val(x Value) (int64, bool) {
switch x := x.(type) {
case int64Val:
return int64(x), true
case intVal:
return x.val.Int64(), false // not an int64Val and thus not exact
case unknownVal:
return 0, false
default:
panic(fmt.Sprintf("%v not an Int", x))
}
}
// Uint64Val returns the Go uint64 value of x and whether the result is exact;
// x must be an [Int] or an [Unknown]. If the result is not exact, its value is undefined.
// If x is [Unknown], the result is (0, false).
func Uint64Val(x Value) (uint64, bool) {
switch x := x.(type) {
case int64Val:
return uint64(x), x >= 0
case intVal:
return x.val.Uint64(), x.val.IsUint64()
case unknownVal:
return 0, false
default:
panic(fmt.Sprintf("%v not an Int", x))
}
}
// Float32Val is like [Float64Val] but for float32 instead of float64.
func Float32Val(x Value) (float32, bool) {
switch x := x.(type) {
case int64Val:
f := float32(x)
return f, int64Val(f) == x
case intVal:
f, acc := newFloat().SetInt(x.val).Float32()
return f, acc == big.Exact
case ratVal:
return x.val.Float32()
case floatVal:
f, acc := x.val.Float32()
return f, acc == big.Exact
case unknownVal:
return 0, false
default:
panic(fmt.Sprintf("%v not a Float", x))
}
}
// Float64Val returns the nearest Go float64 value of x and whether the result is exact;
// x must be numeric or an [Unknown], but not [Complex]. For values too small (too close to 0)
// to represent as float64, [Float64Val] silently underflows to 0. The result sign always
// matches the sign of x, even for 0.
// If x is [Unknown], the result is (0, false).
func Float64Val(x Value) (float64, bool) {
switch x := x.(type) {
case int64Val:
f := float64(int64(x))
return f, int64Val(f) == x
case intVal:
f, acc := newFloat().SetInt(x.val).Float64()
return f, acc == big.Exact
case ratVal:
return x.val.Float64()
case floatVal:
f, acc := x.val.Float64()
return f, acc == big.Exact
case unknownVal:
return 0, false
default:
panic(fmt.Sprintf("%v not a Float", x))
}
}
// Val returns the underlying value for a given constant. Since it returns an
// interface, it is up to the caller to type assert the result to the expected
// type. The possible dynamic return types are:
//
// x Kind type of result
// -----------------------------------------
// Bool bool
// String string
// Int int64 or *big.Int
// Float *big.Float or *big.Rat
// everything else nil
func Val(x Value) any {
switch x := x.(type) {
case boolVal:
return bool(x)
case *stringVal:
return x.string()
case int64Val:
return int64(x)
case intVal:
return x.val
case ratVal:
return x.val
case floatVal:
return x.val
default:
return nil
}
}
// Make returns the [Value] for x.
//
// type of x result Kind
// ----------------------------
// bool Bool
// string String
// int64 Int
// *big.Int Int
// *big.Float Float
// *big.Rat Float
// anything else Unknown
func Make(x any) Value {
switch x := x.(type) {
case bool:
return boolVal(x)
case string:
return &stringVal{s: x}
case int64:
return int64Val(x)
case *big.Int:
return makeInt(x)
case *big.Rat:
return makeRat(x)
case *big.Float:
return makeFloat(x)
default:
return unknownVal{}
}
}
// BitLen returns the number of bits required to represent
// the absolute value x in binary representation; x must be an [Int] or an [Unknown].
// If x is [Unknown], the result is 0.
func BitLen(x Value) int {
switch x := x.(type) {
case int64Val:
u := uint64(x)
if x < 0 {
u = uint64(-x)
}
return 64 - bits.LeadingZeros64(u)
case intVal:
return x.val.BitLen()
case unknownVal:
return 0
default:
panic(fmt.Sprintf("%v not an Int", x))
}
}
// Sign returns -1, 0, or 1 depending on whether x < 0, x == 0, or x > 0;
// x must be numeric or [Unknown]. For complex values x, the sign is 0 if x == 0,
// otherwise it is != 0. If x is [Unknown], the result is 1.
func Sign(x Value) int {
switch x := x.(type) {
case int64Val:
switch {
case x < 0:
return -1
case x > 0:
return 1
}
return 0
case intVal:
return x.val.Sign()
case ratVal:
return x.val.Sign()
case floatVal:
return x.val.Sign()
case complexVal:
return Sign(x.re) | Sign(x.im)
case unknownVal:
return 1 // avoid spurious division by zero errors
default:
panic(fmt.Sprintf("%v not numeric", x))
}
}
// ----------------------------------------------------------------------------
// Support for assembling/disassembling numeric values
const (
// Compute the size of a Word in bytes.
_m = ^big.Word(0)
_log = _m>>8&1 + _m>>16&1 + _m>>32&1
wordSize = 1 << _log
)
// Bytes returns the bytes for the absolute value of x in little-
// endian binary representation; x must be an [Int].
func Bytes(x Value) []byte {
var t intVal
switch x := x.(type) {
case int64Val:
t = i64toi(x)
case intVal:
t = x
default:
panic(fmt.Sprintf("%v not an Int", x))
}
words := t.val.Bits()
bytes := make([]byte, len(words)*wordSize)
i := 0
for _, w := range words {
for j := 0; j < wordSize; j++ {
bytes[i] = byte(w)
w >>= 8
i++
}
}
// remove leading 0's
for i > 0 && bytes[i-1] == 0 {
i--
}
return bytes[:i]
}
// MakeFromBytes returns the [Int] value given the bytes of its little-endian
// binary representation. An empty byte slice argument represents 0.
func MakeFromBytes(bytes []byte) Value {
words := make([]big.Word, (len(bytes)+(wordSize-1))/wordSize)
i := 0
var w big.Word
var s uint
for _, b := range bytes {
w |= big.Word(b) << s
if s += 8; s == wordSize*8 {
words[i] = w
i++
w = 0
s = 0
}
}
// store last word
if i < len(words) {
words[i] = w
i++
}
// remove leading 0's
for i > 0 && words[i-1] == 0 {
i--
}
return makeInt(newInt().SetBits(words[:i]))
}
// Num returns the numerator of x; x must be [Int], [Float], or [Unknown].
// If x is [Unknown], or if it is too large or small to represent as a
// fraction, the result is [Unknown]. Otherwise the result is an [Int]
// with the same sign as x.
func Num(x Value) Value {
switch x := x.(type) {
case int64Val, intVal:
return x
case ratVal:
return makeInt(x.val.Num())
case floatVal:
if smallFloat(x.val) {
r, _ := x.val.Rat(nil)
return makeInt(r.Num())
}
case unknownVal:
break
default:
panic(fmt.Sprintf("%v not Int or Float", x))
}
return unknownVal{}
}
// Denom returns the denominator of x; x must be [Int], [Float], or [Unknown].
// If x is [Unknown], or if it is too large or small to represent as a
// fraction, the result is [Unknown]. Otherwise the result is an [Int] >= 1.
func Denom(x Value) Value {
switch x := x.(type) {
case int64Val, intVal:
return int64Val(1)
case ratVal:
return makeInt(x.val.Denom())
case floatVal:
if smallFloat(x.val) {
r, _ := x.val.Rat(nil)
return makeInt(r.Denom())
}
case unknownVal:
break
default:
panic(fmt.Sprintf("%v not Int or Float", x))
}
return unknownVal{}
}
// MakeImag returns the [Complex] value x*i;
// x must be [Int], [Float], or [Unknown].
// If x is [Unknown], the result is [Unknown].
func MakeImag(x Value) Value {
switch x.(type) {
case unknownVal:
return x
case int64Val, intVal, ratVal, floatVal:
return makeComplex(int64Val(0), x)
default:
panic(fmt.Sprintf("%v not Int or Float", x))
}
}
// Real returns the real part of x, which must be a numeric or unknown value.
// If x is [Unknown], the result is [Unknown].
func Real(x Value) Value {
switch x := x.(type) {
case unknownVal, int64Val, intVal, ratVal, floatVal:
return x
case complexVal:
return x.re
default:
panic(fmt.Sprintf("%v not numeric", x))
}
}
// Imag returns the imaginary part of x, which must be a numeric or unknown value.
// If x is [Unknown], the result is [Unknown].
func Imag(x Value) Value {
switch x := x.(type) {
case unknownVal:
return x
case int64Val, intVal, ratVal, floatVal:
return int64Val(0)
case complexVal:
return x.im
default:
panic(fmt.Sprintf("%v not numeric", x))
}
}
// ----------------------------------------------------------------------------
// Numeric conversions
// ToInt converts x to an [Int] value if x is representable as an [Int].
// Otherwise it returns an [Unknown].
func ToInt(x Value) Value {
switch x := x.(type) {
case int64Val, intVal:
return x
case ratVal:
if x.val.IsInt() {
return makeInt(x.val.Num())
}
case floatVal:
// avoid creation of huge integers
// (Existing tests require permitting exponents of at least 1024;
// allow any value that would also be permissible as a fraction.)
if smallFloat(x.val) {
i := newInt()
if _, acc := x.val.Int(i); acc == big.Exact {
return makeInt(i)
}
// If we can get an integer by rounding up or down,
// assume x is not an integer because of rounding
// errors in prior computations.
const delta = 4 // a small number of bits > 0
var t big.Float
t.SetPrec(prec - delta)
// try rounding down a little
t.SetMode(big.ToZero)
t.Set(x.val)
if _, acc := t.Int(i); acc == big.Exact {
return makeInt(i)
}
// try rounding up a little
t.SetMode(big.AwayFromZero)
t.Set(x.val)
if _, acc := t.Int(i); acc == big.Exact {
return makeInt(i)
}
}
case complexVal:
if re := ToFloat(x); re.Kind() == Float {
return ToInt(re)
}
}
return unknownVal{}
}
// ToFloat converts x to a [Float] value if x is representable as a [Float].
// Otherwise it returns an [Unknown].
func ToFloat(x Value) Value {
switch x := x.(type) {
case int64Val:
return i64tor(x) // x is always a small int
case intVal:
if smallInt(x.val) {
return itor(x)
}
return itof(x)
case ratVal, floatVal:
return x
case complexVal:
if Sign(x.im) == 0 {
return ToFloat(x.re)
}
}
return unknownVal{}
}
// ToComplex converts x to a [Complex] value if x is representable as a [Complex].
// Otherwise it returns an [Unknown].
func ToComplex(x Value) Value {
switch x := x.(type) {
case int64Val, intVal, ratVal, floatVal:
return vtoc(x)
case complexVal:
return x
}
return unknownVal{}
}
// ----------------------------------------------------------------------------
// Operations
// is32bit reports whether x can be represented using 32 bits.
func is32bit(x int64) bool {
const s = 32
return -1<<(s-1) <= x && x <= 1<<(s-1)-1
}
// is63bit reports whether x can be represented using 63 bits.
func is63bit(x int64) bool {
const s = 63
return -1<<(s-1) <= x && x <= 1<<(s-1)-1
}
// UnaryOp returns the result of the unary expression op y.
// The operation must be defined for the operand.
// If prec > 0 it specifies the ^ (xor) result size in bits.
// If y is [Unknown], the result is [Unknown].
func UnaryOp(op token.Token, y Value, prec uint) Value {
switch op {
case token.ADD:
switch y.(type) {
case unknownVal, int64Val, intVal, ratVal, floatVal, complexVal:
return y
}
case token.SUB:
switch y := y.(type) {
case unknownVal:
return y
case int64Val:
if z := -y; z != y {
return z // no overflow
}
return makeInt(newInt().Neg(big.NewInt(int64(y))))
case intVal:
return makeInt(newInt().Neg(y.val))
case ratVal:
return makeRat(newRat().Neg(y.val))
case floatVal:
return makeFloat(newFloat().Neg(y.val))
case complexVal:
re := UnaryOp(token.SUB, y.re, 0)
im := UnaryOp(token.SUB, y.im, 0)
return makeComplex(re, im)
}
case token.XOR:
z := newInt()
switch y := y.(type) {
case unknownVal:
return y
case int64Val:
z.Not(big.NewInt(int64(y)))
case intVal:
z.Not(y.val)
default:
goto Error
}
// For unsigned types, the result will be negative and
// thus "too large": We must limit the result precision
// to the type's precision.
if prec > 0 {
z.AndNot(z, newInt().Lsh(big.NewInt(-1), prec)) // z &^= (-1)<<prec
}
return makeInt(z)
case token.NOT:
switch y := y.(type) {
case unknownVal:
return y
case boolVal:
return !y
}
}
Error:
panic(fmt.Sprintf("invalid unary operation %s%v", op, y))
}
func ord(x Value) int {
switch x.(type) {
default:
// force invalid value into "x position" in match
// (don't panic here so that callers can provide a better error message)
return -1
case unknownVal:
return 0
case boolVal, *stringVal:
return 1
case int64Val:
return 2
case intVal:
return 3
case ratVal:
return 4
case floatVal:
return 5
case complexVal:
return 6
}
}
// match returns the matching representation (same type) with the
// smallest complexity for two values x and y. If one of them is
// numeric, both of them must be numeric. If one of them is Unknown
// or invalid (say, nil) both results are that value.
func match(x, y Value) (_, _ Value) {
switch ox, oy := ord(x), ord(y); {
case ox < oy:
x, y = match0(x, y)
case ox > oy:
y, x = match0(y, x)
}
return x, y
}
// match0 must only be called by match.
// Invariant: ord(x) < ord(y)
func match0(x, y Value) (_, _ Value) {
// Prefer to return the original x and y arguments when possible,
// to avoid unnecessary heap allocations.
switch y.(type) {
case intVal:
switch x1 := x.(type) {
case int64Val:
return i64toi(x1), y
}
case ratVal:
switch x1 := x.(type) {
case int64Val:
return i64tor(x1), y
case intVal:
return itor(x1), y
}
case floatVal:
switch x1 := x.(type) {
case int64Val:
return i64tof(x1), y
case intVal:
return itof(x1), y
case ratVal:
return rtof(x1), y
}
case complexVal:
switch x1 := x.(type) {
case int64Val, intVal, ratVal, floatVal:
return vtoc(x1), y
}
}
// force unknown and invalid values into "x position" in callers of match
// (don't panic here so that callers can provide a better error message)
return x, x
}
// BinaryOp returns the result of the binary expression x op y.
// The operation must be defined for the operands. If one of the
// operands is [Unknown], the result is [Unknown].
// BinaryOp doesn't handle comparisons or shifts; use [Compare]
// or [Shift] instead.
//
// To force integer division of [Int] operands, use op == [token.QUO_ASSIGN]
// instead of [token.QUO]; the result is guaranteed to be [Int] in this case.
// Division by zero leads to a run-time panic.
func BinaryOp(x_ Value, op token.Token, y_ Value) Value {
x, y := match(x_, y_)
switch x := x.(type) {
case unknownVal:
return x
case boolVal:
y := y.(boolVal)
switch op {
case token.LAND:
return x && y
case token.LOR:
return x || y
}
case int64Val:
a := int64(x)
b := int64(y.(int64Val))
var c int64
switch op {
case token.ADD:
if !is63bit(a) || !is63bit(b) {
return makeInt(newInt().Add(big.NewInt(a), big.NewInt(b)))
}
c = a + b
case token.SUB:
if !is63bit(a) || !is63bit(b) {
return makeInt(newInt().Sub(big.NewInt(a), big.NewInt(b)))
}
c = a - b
case token.MUL:
if !is32bit(a) || !is32bit(b) {
return makeInt(newInt().Mul(big.NewInt(a), big.NewInt(b)))
}
c = a * b
case token.QUO:
return makeRat(big.NewRat(a, b))
case token.QUO_ASSIGN: // force integer division
c = a / b
case token.REM:
c = a % b
case token.AND:
c = a & b
case token.OR:
c = a | b
case token.XOR:
c = a ^ b
case token.AND_NOT:
c = a &^ b
default:
goto Error
}
return int64Val(c)
case intVal:
a := x.val
b := y.(intVal).val
c := newInt()
switch op {
case token.ADD:
c.Add(a, b)
case token.SUB:
c.Sub(a, b)
case token.MUL:
c.Mul(a, b)
case token.QUO:
return makeRat(newRat().SetFrac(a, b))
case token.QUO_ASSIGN: // force integer division
c.Quo(a, b)
case token.REM:
c.Rem(a, b)
case token.AND:
c.And(a, b)
case token.OR:
c.Or(a, b)
case token.XOR:
c.Xor(a, b)
case token.AND_NOT:
c.AndNot(a, b)
default:
goto Error
}
return makeInt(c)
case ratVal:
a := x.val
b := y.(ratVal).val
c := newRat()
switch op {
case token.ADD:
c.Add(a, b)
case token.SUB:
c.Sub(a, b)
case token.MUL:
c.Mul(a, b)
case token.QUO:
c.Quo(a, b)
default:
goto Error
}
return makeRat(c)
case floatVal:
a := x.val
b := y.(floatVal).val
c := newFloat()
switch op {
case token.ADD:
c.Add(a, b)
case token.SUB:
c.Sub(a, b)
case token.MUL:
c.Mul(a, b)
case token.QUO:
c.Quo(a, b)
default:
goto Error
}
return makeFloat(c)
case complexVal:
y := y.(complexVal)
a, b := x.re, x.im
c, d := y.re, y.im
var re, im Value
switch op {
case token.ADD:
// (a+c) + i(b+d)
re = add(a, c)
im = add(b, d)
case token.SUB:
// (a-c) + i(b-d)
re = sub(a, c)
im = sub(b, d)
case token.MUL:
// (ac-bd) + i(bc+ad)
ac := mul(a, c)
bd := mul(b, d)
bc := mul(b, c)
ad := mul(a, d)
re = sub(ac, bd)
im = add(bc, ad)
case token.QUO:
// (ac+bd)/s + i(bc-ad)/s, with s = cc + dd
ac := mul(a, c)
bd := mul(b, d)
bc := mul(b, c)
ad := mul(a, d)
cc := mul(c, c)
dd := mul(d, d)
s := add(cc, dd)
re = add(ac, bd)
re = quo(re, s)
im = sub(bc, ad)
im = quo(im, s)
default:
goto Error
}
return makeComplex(re, im)
case *stringVal:
if op == token.ADD {
return &stringVal{l: x, r: y.(*stringVal)}
}
}
Error:
panic(fmt.Sprintf("invalid binary operation %v %s %v", x_, op, y_))
}
func add(x, y Value) Value { return BinaryOp(x, token.ADD, y) }
func sub(x, y Value) Value { return BinaryOp(x, token.SUB, y) }
func mul(x, y Value) Value { return BinaryOp(x, token.MUL, y) }
func quo(x, y Value) Value { return BinaryOp(x, token.QUO, y) }
// Shift returns the result of the shift expression x op s
// with op == [token.SHL] or [token.SHR] (<< or >>). x must be
// an [Int] or an [Unknown]. If x is [Unknown], the result is x.
func Shift(x Value, op token.Token, s uint) Value {
switch x := x.(type) {
case unknownVal:
return x
case int64Val:
if s == 0 {
return x
}
switch op {
case token.SHL:
z := i64toi(x).val
return makeInt(z.Lsh(z, s))
case token.SHR:
return x >> s
}
case intVal:
if s == 0 {
return x
}
z := newInt()
switch op {
case token.SHL:
return makeInt(z.Lsh(x.val, s))
case token.SHR:
return makeInt(z.Rsh(x.val, s))
}
}
panic(fmt.Sprintf("invalid shift %v %s %d", x, op, s))
}
func cmpZero(x int, op token.Token) bool {
switch op {
case token.EQL:
return x == 0
case token.NEQ:
return x != 0
case token.LSS:
return x < 0
case token.LEQ:
return x <= 0
case token.GTR:
return x > 0
case token.GEQ:
return x >= 0
}
panic(fmt.Sprintf("invalid comparison %v %s 0", x, op))
}
// Compare returns the result of the comparison x op y.
// The comparison must be defined for the operands.
// If one of the operands is [Unknown], the result is
// false.
func Compare(x_ Value, op token.Token, y_ Value) bool {
x, y := match(x_, y_)
switch x := x.(type) {
case unknownVal:
return false
case boolVal:
y := y.(boolVal)
switch op {
case token.EQL:
return x == y
case token.NEQ:
return x != y
}
case int64Val:
y := y.(int64Val)
switch op {
case token.EQL:
return x == y
case token.NEQ:
return x != y
case token.LSS:
return x < y
case token.LEQ:
return x <= y
case token.GTR:
return x > y
case token.GEQ:
return x >= y
}
case intVal:
return cmpZero(x.val.Cmp(y.(intVal).val), op)
case ratVal:
return cmpZero(x.val.Cmp(y.(ratVal).val), op)
case floatVal:
return cmpZero(x.val.Cmp(y.(floatVal).val), op)
case complexVal:
y := y.(complexVal)
re := Compare(x.re, token.EQL, y.re)
im := Compare(x.im, token.EQL, y.im)
switch op {
case token.EQL:
return re && im
case token.NEQ:
return !re || !im
}
case *stringVal:
xs := x.string()
ys := y.(*stringVal).string()
switch op {
case token.EQL:
return xs == ys
case token.NEQ:
return xs != ys
case token.LSS:
return xs < ys
case token.LEQ:
return xs <= ys
case token.GTR:
return xs > ys
case token.GEQ:
return xs >= ys
}
}
panic(fmt.Sprintf("invalid comparison %v %s %v", x_, op, y_))
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package doc
import (
"go/doc/comment"
"io"
)
// ToHTML converts comment text to formatted HTML.
//
// Deprecated: ToHTML cannot identify documentation links
// in the doc comment, because they depend on knowing what
// package the text came from, which is not included in this API.
//
// Given the *[doc.Package] p where text was found,
// ToHTML(w, text, nil) can be replaced by:
//
// w.Write(p.HTML(text))
//
// which is in turn shorthand for:
//
// w.Write(p.Printer().HTML(p.Parser().Parse(text)))
//
// If words may be non-nil, the longer replacement is:
//
// parser := p.Parser()
// parser.Words = words
// w.Write(p.Printer().HTML(parser.Parse(d)))
func ToHTML(w io.Writer, text string, words map[string]string) {
p := new(Package).Parser()
p.Words = words
d := p.Parse(text)
pr := new(comment.Printer)
w.Write(pr.HTML(d))
}
// ToText converts comment text to formatted text.
//
// Deprecated: ToText cannot identify documentation links
// in the doc comment, because they depend on knowing what
// package the text came from, which is not included in this API.
//
// Given the *[doc.Package] p where text was found,
// ToText(w, text, "", "\t", 80) can be replaced by:
//
// w.Write(p.Text(text))
//
// In the general case, ToText(w, text, prefix, codePrefix, width)
// can be replaced by:
//
// d := p.Parser().Parse(text)
// pr := p.Printer()
// pr.TextPrefix = prefix
// pr.TextCodePrefix = codePrefix
// pr.TextWidth = width
// w.Write(pr.Text(d))
//
// See the documentation for [Package.Text] and [comment.Printer.Text]
// for more details.
func ToText(w io.Writer, text string, prefix, codePrefix string, width int) {
d := new(Package).Parser().Parse(text)
pr := &comment.Printer{
TextPrefix: prefix,
TextCodePrefix: codePrefix,
TextWidth: width,
}
w.Write(pr.Text(d))
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package comment
import (
"bytes"
"fmt"
"strconv"
)
// An htmlPrinter holds the state needed for printing a [Doc] as HTML.
type htmlPrinter struct {
*Printer
tight bool
}
// HTML returns an HTML formatting of the [Doc].
// See the [Printer] documentation for ways to customize the HTML output.
func (p *Printer) HTML(d *Doc) []byte {
hp := &htmlPrinter{Printer: p}
var out bytes.Buffer
for _, x := range d.Content {
hp.block(&out, x)
}
return out.Bytes()
}
// block prints the block x to out.
func (p *htmlPrinter) block(out *bytes.Buffer, x Block) {
switch x := x.(type) {
default:
fmt.Fprintf(out, "?%T", x)
case *Paragraph:
if !p.tight {
out.WriteString("<p>")
}
p.text(out, x.Text)
out.WriteString("\n")
case *Heading:
out.WriteString("<h")
h := strconv.Itoa(p.headingLevel())
out.WriteString(h)
if id := p.headingID(x); id != "" {
out.WriteString(` id="`)
p.escape(out, id)
out.WriteString(`"`)
}
out.WriteString(">")
p.text(out, x.Text)
out.WriteString("</h")
out.WriteString(h)
out.WriteString(">\n")
case *Code:
out.WriteString("<pre>")
p.escape(out, x.Text)
out.WriteString("</pre>\n")
case *List:
kind := "ol>\n"
if x.Items[0].Number == "" {
kind = "ul>\n"
}
out.WriteString("<")
out.WriteString(kind)
next := "1"
for _, item := range x.Items {
out.WriteString("<li")
if n := item.Number; n != "" {
if n != next {
out.WriteString(` value="`)
out.WriteString(n)
out.WriteString(`"`)
next = n
}
next = inc(next)
}
out.WriteString(">")
p.tight = !x.BlankBetween()
for _, blk := range item.Content {
p.block(out, blk)
}
p.tight = false
}
out.WriteString("</")
out.WriteString(kind)
}
}
// inc increments the decimal string s.
// For example, inc("1199") == "1200".
func inc(s string) string {
b := []byte(s)
for i := len(b) - 1; i >= 0; i-- {
if b[i] < '9' {
b[i]++
return string(b)
}
b[i] = '0'
}
return "1" + string(b)
}
// text prints the text sequence x to out.
func (p *htmlPrinter) text(out *bytes.Buffer, x []Text) {
for _, t := range x {
switch t := t.(type) {
case Plain:
p.escape(out, string(t))
case Italic:
out.WriteString("<i>")
p.escape(out, string(t))
out.WriteString("</i>")
case *Link:
out.WriteString(`<a href="`)
p.escape(out, t.URL)
out.WriteString(`">`)
p.text(out, t.Text)
out.WriteString("</a>")
case *DocLink:
url := p.docLinkURL(t)
if url != "" {
out.WriteString(`<a href="`)
p.escape(out, url)
out.WriteString(`">`)
}
p.text(out, t.Text)
if url != "" {
out.WriteString("</a>")
}
}
}
}
// escape prints s to out as plain text,
// escaping < & " ' and > to avoid being misinterpreted
// in larger HTML constructs.
func (p *htmlPrinter) escape(out *bytes.Buffer, s string) {
start := 0
for i := 0; i < len(s); i++ {
switch s[i] {
case '<':
out.WriteString(s[start:i])
out.WriteString("<")
start = i + 1
case '&':
out.WriteString(s[start:i])
out.WriteString("&")
start = i + 1
case '"':
out.WriteString(s[start:i])
out.WriteString(""")
start = i + 1
case '\'':
out.WriteString(s[start:i])
out.WriteString("'")
start = i + 1
case '>':
out.WriteString(s[start:i])
out.WriteString(">")
start = i + 1
}
}
out.WriteString(s[start:])
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package comment
import (
"bytes"
"fmt"
"strings"
)
// An mdPrinter holds the state needed for printing a Doc as Markdown.
type mdPrinter struct {
*Printer
headingPrefix string
raw bytes.Buffer
}
// Markdown returns a Markdown formatting of the Doc.
// See the [Printer] documentation for ways to customize the Markdown output.
func (p *Printer) Markdown(d *Doc) []byte {
mp := &mdPrinter{
Printer: p,
headingPrefix: strings.Repeat("#", p.headingLevel()) + " ",
}
var out bytes.Buffer
for i, x := range d.Content {
if i > 0 {
out.WriteByte('\n')
}
mp.block(&out, x)
}
return out.Bytes()
}
// block prints the block x to out.
func (p *mdPrinter) block(out *bytes.Buffer, x Block) {
switch x := x.(type) {
default:
fmt.Fprintf(out, "?%T", x)
case *Paragraph:
p.text(out, x.Text)
out.WriteString("\n")
case *Heading:
out.WriteString(p.headingPrefix)
p.text(out, x.Text)
if id := p.headingID(x); id != "" {
out.WriteString(" {#")
out.WriteString(id)
out.WriteString("}")
}
out.WriteString("\n")
case *Code:
md := x.Text
for md != "" {
var line string
line, md, _ = strings.Cut(md, "\n")
if line != "" {
out.WriteString("\t")
out.WriteString(line)
}
out.WriteString("\n")
}
case *List:
loose := x.BlankBetween()
for i, item := range x.Items {
if i > 0 && loose {
out.WriteString("\n")
}
if n := item.Number; n != "" {
out.WriteString(" ")
out.WriteString(n)
out.WriteString(". ")
} else {
out.WriteString(" - ") // SP SP - SP
}
for i, blk := range item.Content {
const fourSpace = " "
if i > 0 {
out.WriteString("\n" + fourSpace)
}
p.text(out, blk.(*Paragraph).Text)
out.WriteString("\n")
}
}
}
}
// text prints the text sequence x to out.
func (p *mdPrinter) text(out *bytes.Buffer, x []Text) {
p.raw.Reset()
p.rawText(&p.raw, x)
line := bytes.TrimSpace(p.raw.Bytes())
if len(line) == 0 {
return
}
switch line[0] {
case '+', '-', '*', '#':
// Escape what would be the start of an unordered list or heading.
out.WriteByte('\\')
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
i := 1
for i < len(line) && '0' <= line[i] && line[i] <= '9' {
i++
}
if i < len(line) && (line[i] == '.' || line[i] == ')') {
// Escape what would be the start of an ordered list.
out.Write(line[:i])
out.WriteByte('\\')
line = line[i:]
}
}
out.Write(line)
}
// rawText prints the text sequence x to out,
// without worrying about escaping characters
// that have special meaning at the start of a Markdown line.
func (p *mdPrinter) rawText(out *bytes.Buffer, x []Text) {
for _, t := range x {
switch t := t.(type) {
case Plain:
p.escape(out, string(t))
case Italic:
out.WriteString("*")
p.escape(out, string(t))
out.WriteString("*")
case *Link:
out.WriteString("[")
p.rawText(out, t.Text)
out.WriteString("](")
out.WriteString(t.URL)
out.WriteString(")")
case *DocLink:
url := p.docLinkURL(t)
if url != "" {
out.WriteString("[")
}
p.rawText(out, t.Text)
if url != "" {
out.WriteString("](")
url = strings.ReplaceAll(url, "(", "%28")
url = strings.ReplaceAll(url, ")", "%29")
out.WriteString(url)
out.WriteString(")")
}
}
}
}
// escape prints s to out as plain text,
// escaping special characters to avoid being misinterpreted
// as Markdown markup sequences.
func (p *mdPrinter) escape(out *bytes.Buffer, s string) {
start := 0
for i := 0; i < len(s); i++ {
switch s[i] {
case '\n':
// Turn all \n into spaces, for a few reasons:
// - Avoid introducing paragraph breaks accidentally.
// - Avoid the need to reindent after the newline.
// - Avoid problems with Markdown renderers treating
// every mid-paragraph newline as a <br>.
out.WriteString(s[start:i])
out.WriteByte(' ')
start = i + 1
continue
case '`', '_', '*', '[', '<', '\\':
// Not all of these need to be escaped all the time,
// but is valid and easy to do so.
// We assume the Markdown is being passed to a
// Markdown renderer, not edited by a person,
// so it's fine to have escapes that are not strictly
// necessary in some cases.
out.WriteString(s[start:i])
out.WriteByte('\\')
out.WriteByte(s[i])
start = i + 1
}
}
out.WriteString(s[start:])
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package comment
import (
"slices"
"strings"
"unicode"
"unicode/utf8"
)
// A Doc is a parsed Go doc comment.
type Doc struct {
// Content is the sequence of content blocks in the comment.
Content []Block
// Links is the link definitions in the comment.
Links []*LinkDef
}
// A LinkDef is a single link definition.
type LinkDef struct {
Text string // the link text
URL string // the link URL
Used bool // whether the comment uses the definition
}
// A Block is block-level content in a doc comment,
// one of [*Code], [*Heading], [*List], or [*Paragraph].
type Block interface {
block()
}
// A Heading is a doc comment heading.
type Heading struct {
Text []Text // the heading text
}
func (*Heading) block() {}
// A List is a numbered or bullet list.
// Lists are always non-empty: len(Items) > 0.
// In a numbered list, every Items[i].Number is a non-empty string.
// In a bullet list, every Items[i].Number is an empty string.
type List struct {
// Items is the list items.
Items []*ListItem
// ForceBlankBefore indicates that the list must be
// preceded by a blank line when reformatting the comment,
// overriding the usual conditions. See the BlankBefore method.
//
// The comment parser sets ForceBlankBefore for any list
// that is preceded by a blank line, to make sure
// the blank line is preserved when printing.
ForceBlankBefore bool
// ForceBlankBetween indicates that list items must be
// separated by blank lines when reformatting the comment,
// overriding the usual conditions. See the BlankBetween method.
//
// The comment parser sets ForceBlankBetween for any list
// that has a blank line between any two of its items, to make sure
// the blank lines are preserved when printing.
ForceBlankBetween bool
}
func (*List) block() {}
// BlankBefore reports whether a reformatting of the comment
// should include a blank line before the list.
// The default rule is the same as for [BlankBetween]:
// if the list item content contains any blank lines
// (meaning at least one item has multiple paragraphs)
// then the list itself must be preceded by a blank line.
// A preceding blank line can be forced by setting [List].ForceBlankBefore.
func (l *List) BlankBefore() bool {
return l.ForceBlankBefore || l.BlankBetween()
}
// BlankBetween reports whether a reformatting of the comment
// should include a blank line between each pair of list items.
// The default rule is that if the list item content contains any blank lines
// (meaning at least one item has multiple paragraphs)
// then list items must themselves be separated by blank lines.
// Blank line separators can be forced by setting [List].ForceBlankBetween.
func (l *List) BlankBetween() bool {
if l.ForceBlankBetween {
return true
}
for _, item := range l.Items {
if len(item.Content) != 1 {
// Unreachable for parsed comments today,
// since the only way to get multiple item.Content
// is multiple paragraphs, which must have been
// separated by a blank line.
return true
}
}
return false
}
// A ListItem is a single item in a numbered or bullet list.
type ListItem struct {
// Number is a decimal string in a numbered list
// or an empty string in a bullet list.
Number string // "1", "2", ...; "" for bullet list
// Content is the list content.
// Currently, restrictions in the parser and printer
// require every element of Content to be a *Paragraph.
Content []Block // Content of this item.
}
// A Paragraph is a paragraph of text.
type Paragraph struct {
Text []Text
}
func (*Paragraph) block() {}
// A Code is a preformatted code block.
type Code struct {
// Text is the preformatted text, ending with a newline character.
// It may be multiple lines, each of which ends with a newline character.
// It is never empty, nor does it start or end with a blank line.
Text string
}
func (*Code) block() {}
// A Text is text-level content in a doc comment,
// one of [Plain], [Italic], [*Link], or [*DocLink].
type Text interface {
text()
}
// A Plain is a string rendered as plain text (not italicized).
type Plain string
func (Plain) text() {}
// An Italic is a string rendered as italicized text.
type Italic string
func (Italic) text() {}
// A Link is a link to a specific URL.
type Link struct {
Auto bool // is this an automatic (implicit) link of a literal URL?
Text []Text // text of link
URL string // target URL of link
}
func (*Link) text() {}
// A DocLink is a link to documentation for a Go package or symbol.
type DocLink struct {
Text []Text // text of link
// ImportPath, Recv, and Name identify the Go package or symbol
// that is the link target. The potential combinations of
// non-empty fields are:
// - ImportPath: a link to another package
// - ImportPath, Name: a link to a const, func, type, or var in another package
// - ImportPath, Recv, Name: a link to a method in another package
// - Name: a link to a const, func, type, or var in this package
// - Recv, Name: a link to a method in this package
ImportPath string // import path
Recv string // receiver type, without any pointer star, for methods
Name string // const, func, type, var, or method name
}
func (*DocLink) text() {}
// A Parser is a doc comment parser.
// The fields in the struct can be filled in before calling [Parser.Parse]
// in order to customize the details of the parsing process.
type Parser struct {
// Words is a map of Go identifier words that
// should be italicized and potentially linked.
// If Words[w] is the empty string, then the word w
// is only italicized. Otherwise it is linked, using
// Words[w] as the link target.
// Words corresponds to the [go/doc.ToHTML] words parameter.
Words map[string]string
// LookupPackage resolves a package name to an import path.
//
// If LookupPackage(name) returns ok == true, then [name]
// (or [name.Sym] or [name.Sym.Method])
// is considered a documentation link to importPath's package docs.
// It is valid to return "", true, in which case name is considered
// to refer to the current package.
//
// If LookupPackage(name) returns ok == false,
// then [name] (or [name.Sym] or [name.Sym.Method])
// will not be considered a documentation link,
// except in the case where name is the full (but single-element) import path
// of a package in the standard library, such as in [math] or [io.Reader].
// LookupPackage is still called for such names,
// in order to permit references to imports of other packages
// with the same package names.
//
// Setting LookupPackage to nil is equivalent to setting it to
// a function that always returns "", false.
LookupPackage func(name string) (importPath string, ok bool)
// LookupSym reports whether a symbol name or method name
// exists in the current package.
//
// If LookupSym("", "Name") returns true, then [Name]
// is considered a documentation link for a const, func, type, or var.
//
// Similarly, if LookupSym("Recv", "Name") returns true,
// then [Recv.Name] is considered a documentation link for
// type Recv's method Name.
//
// Setting LookupSym to nil is equivalent to setting it to a function
// that always returns false.
LookupSym func(recv, name string) (ok bool)
}
// parseDoc is parsing state for a single doc comment.
type parseDoc struct {
*Parser
*Doc
links map[string]*LinkDef
lines []string
lookupSym func(recv, name string) bool
}
// lookupPkg is called to look up the pkg in [pkg], [pkg.Name], and [pkg.Name.Recv].
// If pkg has a slash, it is assumed to be the full import path and is returned with ok = true.
//
// Otherwise, pkg is probably a simple package name like "rand" (not "crypto/rand" or "math/rand").
// d.LookupPackage provides a way for the caller to allow resolving such names with reference
// to the imports in the surrounding package.
//
// There is one collision between these two cases: single-element standard library names
// like "math" are full import paths but don't contain slashes. We let d.LookupPackage have
// the first chance to resolve it, in case there's a different package imported as math,
// and otherwise we refer to a built-in list of single-element standard library package names.
func (d *parseDoc) lookupPkg(pkg string) (importPath string, ok bool) {
if strings.Contains(pkg, "/") { // assume a full import path
if validImportPath(pkg) {
return pkg, true
}
return "", false
}
if d.LookupPackage != nil {
// Give LookupPackage a chance.
if path, ok := d.LookupPackage(pkg); ok {
return path, true
}
}
return DefaultLookupPackage(pkg)
}
func isStdPkg(path string) bool {
_, ok := slices.BinarySearch(stdPkgs, path)
return ok
}
// DefaultLookupPackage is the default package lookup
// function, used when [Parser.LookupPackage] is nil.
// It recognizes names of the packages from the standard
// library with single-element import paths, such as math,
// which would otherwise be impossible to name.
//
// Note that the go/doc package provides a more sophisticated
// lookup based on the imports used in the current package.
func DefaultLookupPackage(name string) (importPath string, ok bool) {
if isStdPkg(name) {
return name, true
}
return "", false
}
// Parse parses the doc comment text and returns the *[Doc] form.
// Comment markers (/* // and */) in the text must have already been removed.
func (p *Parser) Parse(text string) *Doc {
lines := unindent(strings.Split(text, "\n"))
d := &parseDoc{
Parser: p,
Doc: new(Doc),
links: make(map[string]*LinkDef),
lines: lines,
lookupSym: func(recv, name string) bool { return false },
}
if p.LookupSym != nil {
d.lookupSym = p.LookupSym
}
// First pass: break into block structure and collect known links.
// The text is all recorded as Plain for now.
var prev span
for _, s := range parseSpans(lines) {
var b Block
switch s.kind {
default:
panic("go/doc/comment: internal error: unknown span kind")
case spanList:
b = d.list(lines[s.start:s.end], prev.end < s.start)
case spanCode:
b = d.code(lines[s.start:s.end])
case spanOldHeading:
b = d.oldHeading(lines[s.start])
case spanHeading:
b = d.heading(lines[s.start])
case spanPara:
b = d.paragraph(lines[s.start:s.end])
}
if b != nil {
d.Content = append(d.Content, b)
}
prev = s
}
// Second pass: interpret all the Plain text now that we know the links.
for _, b := range d.Content {
switch b := b.(type) {
case *Paragraph:
b.Text = d.parseLinkedText(string(b.Text[0].(Plain)))
case *List:
for _, i := range b.Items {
for _, c := range i.Content {
p := c.(*Paragraph)
p.Text = d.parseLinkedText(string(p.Text[0].(Plain)))
}
}
}
}
return d.Doc
}
// A span represents a single span of comment lines (lines[start:end])
// of an identified kind (code, heading, paragraph, and so on).
type span struct {
start int
end int
kind spanKind
}
// A spanKind describes the kind of span.
type spanKind int
const (
_ spanKind = iota
spanCode
spanHeading
spanList
spanOldHeading
spanPara
)
func parseSpans(lines []string) []span {
var spans []span
// The loop may process a line twice: once as unindented
// and again forced indented. So the maximum expected
// number of iterations is 2*len(lines). The repeating logic
// can be subtle, though, and to protect against introduction
// of infinite loops in future changes, we watch to see that
// we are not looping too much. A panic is better than a
// quiet infinite loop.
watchdog := 2 * len(lines)
i := 0
forceIndent := 0
Spans:
for {
// Skip blank lines.
for i < len(lines) && lines[i] == "" {
i++
}
if i >= len(lines) {
break
}
if watchdog--; watchdog < 0 {
panic("go/doc/comment: internal error: not making progress")
}
var kind spanKind
start := i
end := i
if i < forceIndent || indented(lines[i]) {
// Indented (or force indented).
// Ends before next unindented. (Blank lines are OK.)
// If this is an unindented list that we are heuristically treating as indented,
// then accept unindented list item lines up to the first blank lines.
// The heuristic is disabled at blank lines to contain its effect
// to non-gofmt'ed sections of the comment.
unindentedListOK := isList(lines[i]) && i < forceIndent
i++
for i < len(lines) && (lines[i] == "" || i < forceIndent || indented(lines[i]) || (unindentedListOK && isList(lines[i]))) {
if lines[i] == "" {
unindentedListOK = false
}
i++
}
// Drop trailing blank lines.
end = i
for end > start && lines[end-1] == "" {
end--
}
// If indented lines are followed (without a blank line)
// by an unindented line ending in a brace,
// take that one line too. This fixes the common mistake
// of pasting in something like
//
// func main() {
// fmt.Println("hello, world")
// }
//
// and forgetting to indent it.
// The heuristic will never trigger on a gofmt'ed comment,
// because any gofmt'ed code block or list would be
// followed by a blank line or end of comment.
if end < len(lines) && strings.HasPrefix(lines[end], "}") {
end++
}
if isList(lines[start]) {
kind = spanList
} else {
kind = spanCode
}
} else {
// Unindented. Ends at next blank or indented line.
i++
for i < len(lines) && lines[i] != "" && !indented(lines[i]) {
i++
}
end = i
// If unindented lines are followed (without a blank line)
// by an indented line that would start a code block,
// check whether the final unindented lines
// should be left for the indented section.
// This can happen for the common mistakes of
// unindented code or unindented lists.
// The heuristic will never trigger on a gofmt'ed comment,
// because any gofmt'ed code block would have a blank line
// preceding it after the unindented lines.
if i < len(lines) && lines[i] != "" && !isList(lines[i]) {
switch {
case isList(lines[i-1]):
// If the final unindented line looks like a list item,
// this may be the first indented line wrap of
// a mistakenly unindented list.
// Leave all the unindented list items.
forceIndent = end
end--
for end > start && isList(lines[end-1]) {
end--
}
case strings.HasSuffix(lines[i-1], "{") || strings.HasSuffix(lines[i-1], `\`):
// If the final unindented line ended in { or \
// it is probably the start of a misindented code block.
// Give the user a single line fix.
// Often that's enough; if not, the user can fix the others themselves.
forceIndent = end
end--
}
if start == end && forceIndent > start {
i = start
continue Spans
}
}
// Span is either paragraph or heading.
if end-start == 1 && isHeading(lines[start]) {
kind = spanHeading
} else if end-start == 1 && isOldHeading(lines[start], lines, start) {
kind = spanOldHeading
} else {
kind = spanPara
}
}
spans = append(spans, span{start, end, kind})
i = end
}
return spans
}
// indented reports whether line is indented
// (starts with a leading space or tab).
func indented(line string) bool {
return line != "" && (line[0] == ' ' || line[0] == '\t')
}
// unindent removes any common space/tab prefix
// from each line in lines, returning a copy of lines in which
// those prefixes have been trimmed from each line.
// It also replaces any lines containing only spaces with blank lines (empty strings).
func unindent(lines []string) []string {
// Trim leading and trailing blank lines.
for len(lines) > 0 && isBlank(lines[0]) {
lines = lines[1:]
}
for len(lines) > 0 && isBlank(lines[len(lines)-1]) {
lines = lines[:len(lines)-1]
}
if len(lines) == 0 {
return nil
}
// Compute and remove common indentation.
prefix := leadingSpace(lines[0])
for _, line := range lines[1:] {
if !isBlank(line) {
prefix = commonPrefix(prefix, leadingSpace(line))
}
}
out := make([]string, len(lines))
for i, line := range lines {
line = strings.TrimPrefix(line, prefix)
if strings.TrimSpace(line) == "" {
line = ""
}
out[i] = line
}
for len(out) > 0 && out[0] == "" {
out = out[1:]
}
for len(out) > 0 && out[len(out)-1] == "" {
out = out[:len(out)-1]
}
return out
}
// isBlank reports whether s is a blank line.
func isBlank(s string) bool {
return len(s) == 0 || (len(s) == 1 && s[0] == '\n')
}
// commonPrefix returns the longest common prefix of a and b.
func commonPrefix(a, b string) string {
i := 0
for i < len(a) && i < len(b) && a[i] == b[i] {
i++
}
return a[0:i]
}
// leadingSpace returns the longest prefix of s consisting of spaces and tabs.
func leadingSpace(s string) string {
i := 0
for i < len(s) && (s[i] == ' ' || s[i] == '\t') {
i++
}
return s[:i]
}
// isOldHeading reports whether line is an old-style section heading.
// line is all[off].
func isOldHeading(line string, all []string, off int) bool {
if off <= 0 || all[off-1] != "" || off+2 >= len(all) || all[off+1] != "" || leadingSpace(all[off+2]) != "" {
return false
}
line = strings.TrimSpace(line)
// a heading must start with an uppercase letter
r, _ := utf8.DecodeRuneInString(line)
if !unicode.IsLetter(r) || !unicode.IsUpper(r) {
return false
}
// it must end in a letter or digit:
r, _ = utf8.DecodeLastRuneInString(line)
if !unicode.IsLetter(r) && !unicode.IsDigit(r) {
return false
}
// exclude lines with illegal characters. we allow "(),"
if strings.ContainsAny(line, ";:!?+*/=[]{}_^°&§~%#@<\">\\") {
return false
}
// allow "'" for possessive "'s" only
for b := line; ; {
var ok bool
if _, b, ok = strings.Cut(b, "'"); !ok {
break
}
if b != "s" && !strings.HasPrefix(b, "s ") {
return false // ' not followed by s and then end-of-word
}
}
// allow "." when followed by non-space
for b := line; ; {
var ok bool
if _, b, ok = strings.Cut(b, "."); !ok {
break
}
if b == "" || strings.HasPrefix(b, " ") {
return false // not followed by non-space
}
}
return true
}
// oldHeading returns the *Heading for the given old-style section heading line.
func (d *parseDoc) oldHeading(line string) Block {
return &Heading{Text: []Text{Plain(strings.TrimSpace(line))}}
}
// isHeading reports whether line is a new-style section heading.
func isHeading(line string) bool {
return len(line) >= 2 &&
line[0] == '#' &&
(line[1] == ' ' || line[1] == '\t') &&
strings.TrimSpace(line) != "#"
}
// heading returns the *Heading for the given new-style section heading line.
func (d *parseDoc) heading(line string) Block {
return &Heading{Text: []Text{Plain(strings.TrimSpace(line[1:]))}}
}
// code returns a code block built from the lines.
func (d *parseDoc) code(lines []string) *Code {
body := unindent(lines)
body = append(body, "") // to get final \n from Join
return &Code{Text: strings.Join(body, "\n")}
}
// paragraph returns a paragraph block built from the lines.
// If the lines are link definitions, paragraph adds them to d and returns nil.
func (d *parseDoc) paragraph(lines []string) Block {
// Is this a block of known links? Handle.
var defs []*LinkDef
for _, line := range lines {
def, ok := parseLink(line)
if !ok {
goto NoDefs
}
defs = append(defs, def)
}
for _, def := range defs {
d.Links = append(d.Links, def)
if d.links[def.Text] == nil {
d.links[def.Text] = def
}
}
return nil
NoDefs:
return &Paragraph{Text: []Text{Plain(strings.Join(lines, "\n"))}}
}
// parseLink parses a single link definition line:
//
// [text]: url
//
// It returns the link definition and whether the line was well formed.
func parseLink(line string) (*LinkDef, bool) {
if line == "" || line[0] != '[' {
return nil, false
}
i := strings.Index(line, "]:")
if i < 0 || i+3 >= len(line) || (line[i+2] != ' ' && line[i+2] != '\t') {
return nil, false
}
text := line[1:i]
url := strings.TrimSpace(line[i+3:])
j := strings.Index(url, "://")
if j < 0 || !isScheme(url[:j]) {
return nil, false
}
// Line has right form and has valid scheme://.
// That's good enough for us - we are not as picky
// about the characters beyond the :// as we are
// when extracting inline URLs from text.
return &LinkDef{Text: text, URL: url}, true
}
// list returns a list built from the indented lines,
// using forceBlankBefore as the value of the List's ForceBlankBefore field.
func (d *parseDoc) list(lines []string, forceBlankBefore bool) *List {
num, _, _ := listMarker(lines[0])
var (
list *List = &List{ForceBlankBefore: forceBlankBefore}
item *ListItem
text []string
)
flush := func() {
if item != nil {
if para := d.paragraph(text); para != nil {
item.Content = append(item.Content, para)
}
}
text = nil
}
for _, line := range lines {
if n, after, ok := listMarker(line); ok && (n != "") == (num != "") {
// start new list item
flush()
item = &ListItem{Number: n}
list.Items = append(list.Items, item)
line = after
}
line = strings.TrimSpace(line)
if line == "" {
list.ForceBlankBetween = true
flush()
continue
}
text = append(text, strings.TrimSpace(line))
}
flush()
return list
}
// listMarker parses the line as beginning with a list marker.
// If it can do that, it returns the numeric marker ("" for a bullet list),
// the rest of the line, and ok == true.
// Otherwise, it returns "", "", false.
func listMarker(line string) (num, rest string, ok bool) {
line = strings.TrimSpace(line)
if line == "" {
return "", "", false
}
// Can we find a marker?
if r, n := utf8.DecodeRuneInString(line); r == '•' || r == '*' || r == '+' || r == '-' {
num, rest = "", line[n:]
} else if '0' <= line[0] && line[0] <= '9' {
n := 1
for n < len(line) && '0' <= line[n] && line[n] <= '9' {
n++
}
if n >= len(line) || (line[n] != '.' && line[n] != ')') {
return "", "", false
}
num, rest = line[:n], line[n+1:]
} else {
return "", "", false
}
if !indented(rest) || strings.TrimSpace(rest) == "" {
return "", "", false
}
return num, rest, true
}
// isList reports whether the line is the first line of a list,
// meaning starts with a list marker after any indentation.
// (The caller is responsible for checking the line is indented, as appropriate.)
func isList(line string) bool {
_, _, ok := listMarker(line)
return ok
}
// parseLinkedText parses text that is allowed to contain explicit links,
// such as [math.Sin] or [Go home page], into a slice of Text items.
//
// A “pkg” is only assumed to be a full import path if it starts with
// a domain name (a path element with a dot) or is one of the packages
// from the standard library (“[os]”, “[encoding/json]”, and so on).
// To avoid problems with maps, generics, and array types, doc links
// must be both preceded and followed by punctuation, spaces, tabs,
// or the start or end of a line. An example problem would be treating
// map[ast.Expr]TypeAndValue as containing a link.
func (d *parseDoc) parseLinkedText(text string) []Text {
var out []Text
wrote := 0
flush := func(i int) {
if wrote < i {
out = d.parseText(out, text[wrote:i], true)
wrote = i
}
}
start := -1
var buf []byte
for i := 0; i < len(text); i++ {
c := text[i]
if c == '\n' || c == '\t' {
c = ' '
}
switch c {
case '[':
start = i
case ']':
if start >= 0 {
if def, ok := d.links[string(buf)]; ok {
def.Used = true
flush(start)
out = append(out, &Link{
Text: d.parseText(nil, text[start+1:i], false),
URL: def.URL,
})
wrote = i + 1
} else if link, ok := d.docLink(text[start+1:i], text[:start], text[i+1:]); ok {
flush(start)
link.Text = d.parseText(nil, text[start+1:i], false)
out = append(out, link)
wrote = i + 1
}
}
start = -1
buf = buf[:0]
}
if start >= 0 && i != start {
buf = append(buf, c)
}
}
flush(len(text))
return out
}
// docLink parses text, which was found inside [ ] brackets,
// as a doc link if possible, returning the DocLink and ok == true
// or else nil, false.
// The before and after strings are the text before the [ and after the ]
// on the same line. Doc links must be preceded and followed by
// punctuation, spaces, tabs, or the start or end of a line.
func (d *parseDoc) docLink(text, before, after string) (link *DocLink, ok bool) {
if before != "" {
r, _ := utf8.DecodeLastRuneInString(before)
if !unicode.IsPunct(r) && r != ' ' && r != '\t' && r != '\n' {
return nil, false
}
}
if after != "" {
r, _ := utf8.DecodeRuneInString(after)
if !unicode.IsPunct(r) && r != ' ' && r != '\t' && r != '\n' {
return nil, false
}
}
text = strings.TrimPrefix(text, "*")
pkg, name, ok := splitDocName(text)
var recv string
if ok {
pkg, recv, _ = splitDocName(pkg)
}
if pkg != "" {
if pkg, ok = d.lookupPkg(pkg); !ok {
return nil, false
}
} else {
if ok = d.lookupSym(recv, name); !ok {
return nil, false
}
}
link = &DocLink{
ImportPath: pkg,
Recv: recv,
Name: name,
}
return link, true
}
// If text is of the form before.Name, where Name is a capitalized Go identifier,
// then splitDocName returns before, name, true.
// Otherwise it returns text, "", false.
func splitDocName(text string) (before, name string, foundDot bool) {
i := strings.LastIndex(text, ".")
name = text[i+1:]
if !isName(name) {
return text, "", false
}
if i >= 0 {
before = text[:i]
}
return before, name, true
}
// parseText parses s as text and returns the result of appending
// those parsed Text elements to out.
// parseText does not handle explicit links like [math.Sin] or [Go home page]:
// those are handled by parseLinkedText.
// If autoLink is true, then parseText recognizes URLs and words from d.Words
// and converts those to links as appropriate.
func (d *parseDoc) parseText(out []Text, s string, autoLink bool) []Text {
var w strings.Builder
wrote := 0
writeUntil := func(i int) {
w.WriteString(s[wrote:i])
wrote = i
}
flush := func(i int) {
writeUntil(i)
if w.Len() > 0 {
out = append(out, Plain(w.String()))
w.Reset()
}
}
for i := 0; i < len(s); {
t := s[i:]
if autoLink {
if url, ok := autoURL(t); ok {
flush(i)
// Note: The old comment parser would look up the URL in words
// and replace the target with words[URL] if it was non-empty.
// That would allow creating links that display as one URL but
// when clicked go to a different URL. Not sure what the point
// of that is, so we're not doing that lookup here.
out = append(out, &Link{Auto: true, Text: []Text{Plain(url)}, URL: url})
i += len(url)
wrote = i
continue
}
if id, ok := ident(t); ok {
url, italics := d.Words[id]
if !italics {
i += len(id)
continue
}
flush(i)
if url == "" {
out = append(out, Italic(id))
} else {
out = append(out, &Link{Auto: true, Text: []Text{Italic(id)}, URL: url})
}
i += len(id)
wrote = i
continue
}
}
switch {
case strings.HasPrefix(t, "``"):
if len(t) >= 3 && t[2] == '`' {
// Do not convert `` inside ```, in case people are mistakenly writing Markdown.
i += 3
for i < len(t) && t[i] == '`' {
i++
}
break
}
writeUntil(i)
w.WriteRune('“')
i += 2
wrote = i
case strings.HasPrefix(t, "''"):
writeUntil(i)
w.WriteRune('”')
i += 2
wrote = i
default:
i++
}
}
flush(len(s))
return out
}
// autoURL checks whether s begins with a URL that should be hyperlinked.
// If so, it returns the URL, which is a prefix of s, and ok == true.
// Otherwise it returns "", false.
// The caller should skip over the first len(url) bytes of s
// before further processing.
func autoURL(s string) (url string, ok bool) {
// Find the ://. Fast path to pick off non-URL,
// since we call this at every position in the string.
// The shortest possible URL is ftp://x, 7 bytes.
var i int
switch {
case len(s) < 7:
return "", false
case s[3] == ':':
i = 3
case s[4] == ':':
i = 4
case s[5] == ':':
i = 5
case s[6] == ':':
i = 6
default:
return "", false
}
if i+3 > len(s) || s[i:i+3] != "://" {
return "", false
}
// Check valid scheme.
if !isScheme(s[:i]) {
return "", false
}
// Scan host part. Must have at least one byte,
// and must start and end in non-punctuation.
i += 3
if i >= len(s) || !isHost(s[i]) || isPunct(s[i]) {
return "", false
}
i++
end := i
for i < len(s) && isHost(s[i]) {
if !isPunct(s[i]) {
end = i + 1
}
i++
}
i = end
// At this point we are definitely returning a URL (scheme://host).
// We just have to find the longest path we can add to it.
// Heuristics abound.
// We allow parens, braces, and brackets,
// but only if they match (#5043, #22285).
// We allow .,:;?! in the path but not at the end,
// to avoid end-of-sentence punctuation (#18139, #16565).
stk := []byte{}
end = i
Path:
for ; i < len(s); i++ {
if isPunct(s[i]) {
continue
}
if !isPath(s[i]) {
break
}
switch s[i] {
case '(':
stk = append(stk, ')')
case '{':
stk = append(stk, '}')
case '[':
stk = append(stk, ']')
case ')', '}', ']':
if len(stk) == 0 || stk[len(stk)-1] != s[i] {
break Path
}
stk = stk[:len(stk)-1]
}
if len(stk) == 0 {
end = i + 1
}
}
return s[:end], true
}
// isScheme reports whether s is a recognized URL scheme.
// Note that if strings of new length (beyond 3-7)
// are added here, the fast path at the top of autoURL will need updating.
func isScheme(s string) bool {
switch s {
case "file",
"ftp",
"gopher",
"http",
"https",
"mailto",
"nntp":
return true
}
return false
}
// isHost reports whether c is a byte that can appear in a URL host,
// like www.example.com or user@[::1]:8080
func isHost(c byte) bool {
// mask is a 128-bit bitmap with 1s for allowed bytes,
// so that the byte c can be tested with a shift and an and.
// If c > 128, then 1<<c and 1<<(c-64) will both be zero,
// and this function will return false.
const mask = 0 |
(1<<26-1)<<'A' |
(1<<26-1)<<'a' |
(1<<10-1)<<'0' |
1<<'_' |
1<<'@' |
1<<'-' |
1<<'.' |
1<<'[' |
1<<']' |
1<<':'
return ((uint64(1)<<c)&(mask&(1<<64-1)) |
(uint64(1)<<(c-64))&(mask>>64)) != 0
}
// isPunct reports whether c is a punctuation byte that can appear
// inside a path but not at the end.
func isPunct(c byte) bool {
// mask is a 128-bit bitmap with 1s for allowed bytes,
// so that the byte c can be tested with a shift and an and.
// If c > 128, then 1<<c and 1<<(c-64) will both be zero,
// and this function will return false.
const mask = 0 |
1<<'.' |
1<<',' |
1<<':' |
1<<';' |
1<<'?' |
1<<'!'
return ((uint64(1)<<c)&(mask&(1<<64-1)) |
(uint64(1)<<(c-64))&(mask>>64)) != 0
}
// isPath reports whether c is a (non-punctuation) path byte.
func isPath(c byte) bool {
// mask is a 128-bit bitmap with 1s for allowed bytes,
// so that the byte c can be tested with a shift and an and.
// If c > 128, then 1<<c and 1<<(c-64) will both be zero,
// and this function will return false.
const mask = 0 |
(1<<26-1)<<'A' |
(1<<26-1)<<'a' |
(1<<10-1)<<'0' |
1<<'$' |
1<<'\'' |
1<<'(' |
1<<')' |
1<<'*' |
1<<'+' |
1<<'&' |
1<<'#' |
1<<'=' |
1<<'@' |
1<<'~' |
1<<'_' |
1<<'/' |
1<<'-' |
1<<'[' |
1<<']' |
1<<'{' |
1<<'}' |
1<<'%'
return ((uint64(1)<<c)&(mask&(1<<64-1)) |
(uint64(1)<<(c-64))&(mask>>64)) != 0
}
// isName reports whether s is a capitalized Go identifier (like Name).
func isName(s string) bool {
t, ok := ident(s)
if !ok || t != s {
return false
}
r, _ := utf8.DecodeRuneInString(s)
return unicode.IsUpper(r)
}
// ident checks whether s begins with a Go identifier.
// If so, it returns the identifier, which is a prefix of s, and ok == true.
// Otherwise it returns "", false.
// The caller should skip over the first len(id) bytes of s
// before further processing.
func ident(s string) (id string, ok bool) {
// Scan [\pL_][\pL_0-9]*
n := 0
for n < len(s) {
if c := s[n]; c < utf8.RuneSelf {
if isIdentASCII(c) && (n > 0 || c < '0' || c > '9') {
n++
continue
}
break
}
r, nr := utf8.DecodeRuneInString(s[n:])
if unicode.IsLetter(r) {
n += nr
continue
}
break
}
return s[:n], n > 0
}
// isIdentASCII reports whether c is an ASCII identifier byte.
func isIdentASCII(c byte) bool {
// mask is a 128-bit bitmap with 1s for allowed bytes,
// so that the byte c can be tested with a shift and an and.
// If c > 128, then 1<<c and 1<<(c-64) will both be zero,
// and this function will return false.
const mask = 0 |
(1<<26-1)<<'A' |
(1<<26-1)<<'a' |
(1<<10-1)<<'0' |
1<<'_'
return ((uint64(1)<<c)&(mask&(1<<64-1)) |
(uint64(1)<<(c-64))&(mask>>64)) != 0
}
// validImportPath reports whether path is a valid import path.
// It is a lightly edited copy of golang.org/x/mod/module.CheckImportPath.
func validImportPath(path string) bool {
if !utf8.ValidString(path) {
return false
}
if path == "" {
return false
}
if path[0] == '-' {
return false
}
if strings.Contains(path, "//") {
return false
}
if path[len(path)-1] == '/' {
return false
}
elemStart := 0
for i, r := range path {
if r == '/' {
if !validImportPathElem(path[elemStart:i]) {
return false
}
elemStart = i + 1
}
}
return validImportPathElem(path[elemStart:])
}
func validImportPathElem(elem string) bool {
if elem == "" || elem[0] == '.' || elem[len(elem)-1] == '.' {
return false
}
for i := 0; i < len(elem); i++ {
if !importPathOK(elem[i]) {
return false
}
}
return true
}
func importPathOK(c byte) bool {
// mask is a 128-bit bitmap with 1s for allowed bytes,
// so that the byte c can be tested with a shift and an and.
// If c > 128, then 1<<c and 1<<(c-64) will both be zero,
// and this function will return false.
const mask = 0 |
(1<<26-1)<<'A' |
(1<<26-1)<<'a' |
(1<<10-1)<<'0' |
1<<'-' |
1<<'.' |
1<<'~' |
1<<'_' |
1<<'+'
return ((uint64(1)<<c)&(mask&(1<<64-1)) |
(uint64(1)<<(c-64))&(mask>>64)) != 0
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package comment
import (
"bytes"
"fmt"
"strings"
)
// A Printer is a doc comment printer.
// The fields in the struct can be filled in before calling
// any of the printing methods
// in order to customize the details of the printing process.
type Printer struct {
// HeadingLevel is the nesting level used for
// HTML and Markdown headings.
// If HeadingLevel is zero, it defaults to level 3,
// meaning to use <h3> and ###.
HeadingLevel int
// HeadingID is a function that computes the heading ID
// (anchor tag) to use for the heading h when generating
// HTML and Markdown. If HeadingID returns an empty string,
// then the heading ID is omitted.
// If HeadingID is nil, h.DefaultID is used.
HeadingID func(h *Heading) string
// DocLinkURL is a function that computes the URL for the given DocLink.
// If DocLinkURL is nil, then link.DefaultURL(p.DocLinkBaseURL) is used.
DocLinkURL func(link *DocLink) string
// DocLinkBaseURL is used when DocLinkURL is nil,
// passed to [DocLink.DefaultURL] to construct a DocLink's URL.
// See that method's documentation for details.
DocLinkBaseURL string
// TextPrefix is a prefix to print at the start of every line
// when generating text output using the Text method.
TextPrefix string
// TextCodePrefix is the prefix to print at the start of each
// preformatted (code block) line when generating text output,
// instead of (not in addition to) TextPrefix.
// If TextCodePrefix is the empty string, it defaults to TextPrefix+"\t".
TextCodePrefix string
// TextWidth is the maximum width text line to generate,
// measured in Unicode code points,
// excluding TextPrefix and the newline character.
// If TextWidth is zero, it defaults to 80 minus the number of code points in TextPrefix.
// If TextWidth is negative, there is no limit.
TextWidth int
}
func (p *Printer) headingLevel() int {
if p.HeadingLevel <= 0 {
return 3
}
return p.HeadingLevel
}
func (p *Printer) headingID(h *Heading) string {
if p.HeadingID == nil {
return h.DefaultID()
}
return p.HeadingID(h)
}
func (p *Printer) docLinkURL(link *DocLink) string {
if p.DocLinkURL != nil {
return p.DocLinkURL(link)
}
return link.DefaultURL(p.DocLinkBaseURL)
}
// DefaultURL constructs and returns the documentation URL for l,
// using baseURL as a prefix for links to other packages.
//
// The possible forms returned by DefaultURL are:
// - baseURL/ImportPath, for a link to another package
// - baseURL/ImportPath#Name, for a link to a const, func, type, or var in another package
// - baseURL/ImportPath#Recv.Name, for a link to a method in another package
// - #Name, for a link to a const, func, type, or var in this package
// - #Recv.Name, for a link to a method in this package
//
// If baseURL ends in a trailing slash, then DefaultURL inserts
// a slash between ImportPath and # in the anchored forms.
// For example, here are some baseURL values and URLs they can generate:
//
// "/pkg/" → "/pkg/math/#Sqrt"
// "/pkg" → "/pkg/math#Sqrt"
// "/" → "/math/#Sqrt"
// "" → "/math#Sqrt"
func (l *DocLink) DefaultURL(baseURL string) string {
if l.ImportPath != "" {
slash := ""
if strings.HasSuffix(baseURL, "/") {
slash = "/"
} else {
baseURL += "/"
}
switch {
case l.Name == "":
return baseURL + l.ImportPath + slash
case l.Recv != "":
return baseURL + l.ImportPath + slash + "#" + l.Recv + "." + l.Name
default:
return baseURL + l.ImportPath + slash + "#" + l.Name
}
}
if l.Recv != "" {
return "#" + l.Recv + "." + l.Name
}
return "#" + l.Name
}
// DefaultID returns the default anchor ID for the heading h.
//
// The default anchor ID is constructed by converting every
// rune that is not alphanumeric ASCII to an underscore
// and then adding the prefix “hdr-”.
// For example, if the heading text is “Go Doc Comments”,
// the default ID is “hdr-Go_Doc_Comments”.
func (h *Heading) DefaultID() string {
// Note: The “hdr-” prefix is important to avoid DOM clobbering attacks.
// See https://pkg.go.dev/github.com/google/safehtml#Identifier.
var out strings.Builder
var p textPrinter
p.oneLongLine(&out, h.Text)
s := strings.TrimSpace(out.String())
if s == "" {
return ""
}
out.Reset()
out.WriteString("hdr-")
for _, r := range s {
if r < 0x80 && isIdentASCII(byte(r)) {
out.WriteByte(byte(r))
} else {
out.WriteByte('_')
}
}
return out.String()
}
type commentPrinter struct {
*Printer
}
// Comment returns the standard Go formatting of the [Doc],
// without any comment markers.
func (p *Printer) Comment(d *Doc) []byte {
cp := &commentPrinter{Printer: p}
var out bytes.Buffer
for i, x := range d.Content {
if i > 0 && blankBefore(x) {
out.WriteString("\n")
}
cp.block(&out, x)
}
// Print one block containing all the link definitions that were used,
// and then a second block containing all the unused ones.
// This makes it easy to clean up the unused ones: gofmt and
// delete the final block. And it's a nice visual signal without
// affecting the way the comment formats for users.
for i := 0; i < 2; i++ {
used := i == 0
first := true
for _, def := range d.Links {
if def.Used == used {
if first {
out.WriteString("\n")
first = false
}
out.WriteString("[")
out.WriteString(def.Text)
out.WriteString("]: ")
out.WriteString(def.URL)
out.WriteString("\n")
}
}
}
return out.Bytes()
}
// blankBefore reports whether the block x requires a blank line before it.
// All blocks do, except for Lists that return false from x.BlankBefore().
func blankBefore(x Block) bool {
if x, ok := x.(*List); ok {
return x.BlankBefore()
}
return true
}
// block prints the block x to out.
func (p *commentPrinter) block(out *bytes.Buffer, x Block) {
switch x := x.(type) {
default:
fmt.Fprintf(out, "?%T", x)
case *Paragraph:
p.text(out, "", x.Text)
out.WriteString("\n")
case *Heading:
out.WriteString("# ")
p.text(out, "", x.Text)
out.WriteString("\n")
case *Code:
md := x.Text
for md != "" {
var line string
line, md, _ = strings.Cut(md, "\n")
if line != "" {
out.WriteString("\t")
out.WriteString(line)
}
out.WriteString("\n")
}
case *List:
loose := x.BlankBetween()
for i, item := range x.Items {
if i > 0 && loose {
out.WriteString("\n")
}
out.WriteString(" ")
if item.Number == "" {
out.WriteString(" - ")
} else {
out.WriteString(item.Number)
out.WriteString(". ")
}
for i, blk := range item.Content {
const fourSpace = " "
if i > 0 {
out.WriteString("\n" + fourSpace)
}
p.text(out, fourSpace, blk.(*Paragraph).Text)
out.WriteString("\n")
}
}
}
}
// text prints the text sequence x to out.
func (p *commentPrinter) text(out *bytes.Buffer, indent string, x []Text) {
for _, t := range x {
switch t := t.(type) {
case Plain:
p.indent(out, indent, string(t))
case Italic:
p.indent(out, indent, string(t))
case *Link:
if t.Auto {
p.text(out, indent, t.Text)
} else {
out.WriteString("[")
p.text(out, indent, t.Text)
out.WriteString("]")
}
case *DocLink:
out.WriteString("[")
p.text(out, indent, t.Text)
out.WriteString("]")
}
}
}
// indent prints s to out, indenting with the indent string
// after each newline in s.
func (p *commentPrinter) indent(out *bytes.Buffer, indent, s string) {
for s != "" {
line, rest, ok := strings.Cut(s, "\n")
out.WriteString(line)
if ok {
out.WriteString("\n")
out.WriteString(indent)
}
s = rest
}
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package comment
import (
"bytes"
"fmt"
"sort"
"strings"
"unicode/utf8"
)
// A textPrinter holds the state needed for printing a Doc as plain text.
type textPrinter struct {
*Printer
long strings.Builder
prefix string
codePrefix string
width int
}
// Text returns a textual formatting of the [Doc].
// See the [Printer] documentation for ways to customize the text output.
func (p *Printer) Text(d *Doc) []byte {
tp := &textPrinter{
Printer: p,
prefix: p.TextPrefix,
codePrefix: p.TextCodePrefix,
width: p.TextWidth,
}
if tp.codePrefix == "" {
tp.codePrefix = p.TextPrefix + "\t"
}
if tp.width == 0 {
tp.width = 80 - utf8.RuneCountInString(tp.prefix)
}
var out bytes.Buffer
for i, x := range d.Content {
if i > 0 && blankBefore(x) {
out.WriteString(tp.prefix)
writeNL(&out)
}
tp.block(&out, x)
}
anyUsed := false
for _, def := range d.Links {
if def.Used {
anyUsed = true
break
}
}
if anyUsed {
writeNL(&out)
for _, def := range d.Links {
if def.Used {
fmt.Fprintf(&out, "[%s]: %s\n", def.Text, def.URL)
}
}
}
return out.Bytes()
}
// writeNL calls out.WriteByte('\n')
// but first trims trailing spaces on the previous line.
func writeNL(out *bytes.Buffer) {
// Trim trailing spaces.
data := out.Bytes()
n := 0
for n < len(data) && (data[len(data)-n-1] == ' ' || data[len(data)-n-1] == '\t') {
n++
}
if n > 0 {
out.Truncate(len(data) - n)
}
out.WriteByte('\n')
}
// block prints the block x to out.
func (p *textPrinter) block(out *bytes.Buffer, x Block) {
switch x := x.(type) {
default:
fmt.Fprintf(out, "?%T\n", x)
case *Paragraph:
out.WriteString(p.prefix)
p.text(out, "", x.Text)
case *Heading:
out.WriteString(p.prefix)
out.WriteString("# ")
p.text(out, "", x.Text)
case *Code:
text := x.Text
for text != "" {
var line string
line, text, _ = strings.Cut(text, "\n")
if line != "" {
out.WriteString(p.codePrefix)
out.WriteString(line)
}
writeNL(out)
}
case *List:
loose := x.BlankBetween()
for i, item := range x.Items {
if i > 0 && loose {
out.WriteString(p.prefix)
writeNL(out)
}
out.WriteString(p.prefix)
out.WriteString(" ")
if item.Number == "" {
out.WriteString(" - ")
} else {
out.WriteString(item.Number)
out.WriteString(". ")
}
for i, blk := range item.Content {
const fourSpace = " "
if i > 0 {
writeNL(out)
out.WriteString(p.prefix)
out.WriteString(fourSpace)
}
p.text(out, fourSpace, blk.(*Paragraph).Text)
}
}
}
}
// text prints the text sequence x to out.
func (p *textPrinter) text(out *bytes.Buffer, indent string, x []Text) {
p.oneLongLine(&p.long, x)
words := strings.Fields(p.long.String())
p.long.Reset()
var seq []int
if p.width < 0 || len(words) == 0 {
seq = []int{0, len(words)} // one long line
} else {
seq = wrap(words, p.width-utf8.RuneCountInString(indent))
}
for i := 0; i+1 < len(seq); i++ {
if i > 0 {
out.WriteString(p.prefix)
out.WriteString(indent)
}
for j, w := range words[seq[i]:seq[i+1]] {
if j > 0 {
out.WriteString(" ")
}
out.WriteString(w)
}
writeNL(out)
}
}
// oneLongLine prints the text sequence x to out as one long line,
// without worrying about line wrapping.
// Explicit links have the [ ] dropped to improve readability.
func (p *textPrinter) oneLongLine(out *strings.Builder, x []Text) {
for _, t := range x {
switch t := t.(type) {
case Plain:
out.WriteString(string(t))
case Italic:
out.WriteString(string(t))
case *Link:
p.oneLongLine(out, t.Text)
case *DocLink:
p.oneLongLine(out, t.Text)
}
}
}
// wrap wraps words into lines of at most max runes,
// minimizing the sum of the squares of the leftover lengths
// at the end of each line (except the last, of course),
// with a preference for ending lines at punctuation (.,:;).
//
// The returned slice gives the indexes of the first words
// on each line in the wrapped text with a final entry of len(words).
// Thus the lines are words[seq[0]:seq[1]], words[seq[1]:seq[2]],
// ..., words[seq[len(seq)-2]:seq[len(seq)-1]].
//
// The implementation runs in O(n log n) time, where n = len(words),
// using the algorithm described in D. S. Hirschberg and L. L. Larmore,
// “[The least weight subsequence problem],” FOCS 1985, pp. 137-143.
//
// [The least weight subsequence problem]: https://doi.org/10.1109/SFCS.1985.60
func wrap(words []string, max int) (seq []int) {
// The algorithm requires that our scoring function be concave,
// meaning that for all i₀ ≤ i₁ < j₀ ≤ j₁,
// weight(i₀, j₀) + weight(i₁, j₁) ≤ weight(i₀, j₁) + weight(i₁, j₀).
//
// Our weights are two-element pairs [hi, lo]
// ordered by elementwise comparison.
// The hi entry counts the weight for lines that are longer than max,
// and the lo entry counts the weight for lines that are not.
// This forces the algorithm to first minimize the number of lines
// that are longer than max, which correspond to lines with
// single very long words. Having done that, it can move on to
// minimizing the lo score, which is more interesting.
//
// The lo score is the sum for each line of the square of the
// number of spaces remaining at the end of the line and a
// penalty of 64 given out for not ending the line in a
// punctuation character (.,:;).
// The penalty is somewhat arbitrarily chosen by trying
// different amounts and judging how nice the wrapped text looks.
// Roughly speaking, using 64 means that we are willing to
// end a line with eight blank spaces in order to end at a
// punctuation character, even if the next word would fit in
// those spaces.
//
// We care about ending in punctuation characters because
// it makes the text easier to skim if not too many sentences
// or phrases begin with a single word on the previous line.
// A score is the score (also called weight) for a given line.
// add and cmp add and compare scores.
type score struct {
hi int64
lo int64
}
add := func(s, t score) score { return score{s.hi + t.hi, s.lo + t.lo} }
cmp := func(s, t score) int {
switch {
case s.hi < t.hi:
return -1
case s.hi > t.hi:
return +1
case s.lo < t.lo:
return -1
case s.lo > t.lo:
return +1
}
return 0
}
// total[j] is the total number of runes
// (including separating spaces) in words[:j].
total := make([]int, len(words)+1)
total[0] = 0
for i, s := range words {
total[1+i] = total[i] + utf8.RuneCountInString(s) + 1
}
// weight returns weight(i, j).
weight := func(i, j int) score {
// On the last line, there is zero weight for being too short.
n := total[j] - 1 - total[i]
if j == len(words) && n <= max {
return score{0, 0}
}
// Otherwise the weight is the penalty plus the square of the number of
// characters remaining on the line or by which the line goes over.
// In the latter case, that value goes in the hi part of the score.
// (See note above.)
p := wrapPenalty(words[j-1])
v := int64(max-n) * int64(max-n)
if n > max {
return score{v, p}
}
return score{0, v + p}
}
// The rest of this function is “The Basic Algorithm” from
// Hirschberg and Larmore's conference paper,
// using the same names as in the paper.
f := []score{{0, 0}}
g := func(i, j int) score { return add(f[i], weight(i, j)) }
bridge := func(a, b, c int) bool {
k := c + sort.Search(len(words)+1-c, func(k int) bool {
k += c
return cmp(g(a, k), g(b, k)) > 0
})
if k > len(words) {
return true
}
return cmp(g(c, k), g(b, k)) <= 0
}
// d is a one-ended deque implemented as a slice.
d := make([]int, 1, len(words))
d[0] = 0
bestleft := make([]int, 1, len(words))
bestleft[0] = -1
for m := 1; m < len(words); m++ {
f = append(f, g(d[0], m))
bestleft = append(bestleft, d[0])
for len(d) > 1 && cmp(g(d[1], m+1), g(d[0], m+1)) <= 0 {
d = d[1:] // “Retire”
}
for len(d) > 1 && bridge(d[len(d)-2], d[len(d)-1], m) {
d = d[:len(d)-1] // “Fire”
}
if cmp(g(m, len(words)), g(d[len(d)-1], len(words))) < 0 {
d = append(d, m) // “Hire”
// The next few lines are not in the paper but are necessary
// to handle two-word inputs correctly. It appears to be
// just a bug in the paper's pseudocode.
if len(d) == 2 && cmp(g(d[1], m+1), g(d[0], m+1)) <= 0 {
d = d[1:]
}
}
}
bestleft = append(bestleft, d[0])
// Recover least weight sequence from bestleft.
n := 1
for m := len(words); m > 0; m = bestleft[m] {
n++
}
seq = make([]int, n)
for m := len(words); m > 0; m = bestleft[m] {
n--
seq[n] = m
}
return seq
}
// wrapPenalty is the penalty for inserting a line break after word s.
func wrapPenalty(s string) int64 {
switch s[len(s)-1] {
case '.', ',', ':', ';':
return 0
}
return 64
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package doc extracts source code documentation from a Go AST.
package doc
import (
"fmt"
"go/ast"
"go/doc/comment"
"go/token"
"strings"
)
// Package is the documentation for an entire package.
type Package struct {
Doc string
Name string
ImportPath string
Imports []string
Filenames []string
Notes map[string][]*Note
// Deprecated: For backward compatibility Bugs is still populated,
// but all new code should use Notes instead.
Bugs []string
// declarations
Consts []*Value
Types []*Type
Vars []*Value
Funcs []*Func
// Examples is a sorted list of examples associated with
// the package. Examples are extracted from _test.go files
// provided to NewFromFiles.
Examples []*Example
importByName map[string]string
syms map[string]bool
}
// Value is the documentation for a (possibly grouped) var or const declaration.
type Value struct {
Doc string
Names []string // var or const names in declaration order
Decl *ast.GenDecl
order int
}
// Type is the documentation for a type declaration.
type Type struct {
Doc string
Name string
Decl *ast.GenDecl
// associated declarations
Consts []*Value // sorted list of constants of (mostly) this type
Vars []*Value // sorted list of variables of (mostly) this type
Funcs []*Func // sorted list of functions returning this type
Methods []*Func // sorted list of methods (including embedded ones) of this type
// Examples is a sorted list of examples associated with
// this type. Examples are extracted from _test.go files
// provided to NewFromFiles.
Examples []*Example
}
// Func is the documentation for a func declaration.
type Func struct {
Doc string
Name string
Decl *ast.FuncDecl
// methods
// (for functions, these fields have the respective zero value)
Recv string // actual receiver "T" or "*T" possibly followed by type parameters [P1, ..., Pn]
Orig string // original receiver "T" or "*T"
Level int // embedding level; 0 means not embedded
// Examples is a sorted list of examples associated with this
// function or method. Examples are extracted from _test.go files
// provided to NewFromFiles.
Examples []*Example
}
// A Note represents a marked comment starting with "MARKER(uid): note body".
// Any note with a marker of 2 or more upper case [A-Z] letters and a uid of
// at least one character is recognized. The ":" following the uid is optional.
// Notes are collected in the Package.Notes map indexed by the notes marker.
type Note struct {
Pos, End token.Pos // position range of the comment containing the marker
UID string // uid found with the marker
Body string // note body text
}
// Mode values control the operation of [New] and [NewFromFiles].
type Mode int
const (
// AllDecls says to extract documentation for all package-level
// declarations, not just exported ones.
AllDecls Mode = 1 << iota
// AllMethods says to show all embedded methods, not just the ones of
// invisible (unexported) anonymous fields.
AllMethods
// PreserveAST says to leave the AST unmodified. Originally, pieces of
// the AST such as function bodies were nil-ed out to save memory in
// godoc, but not all programs want that behavior.
PreserveAST
)
// New computes the package documentation for the given package AST.
// New takes ownership of the AST pkg and may edit or overwrite it.
// To have the [Examples] fields populated, use [NewFromFiles] and include
// the package's _test.go files.
func New(pkg *ast.Package, importPath string, mode Mode) *Package {
var r reader
r.readPackage(pkg, mode)
r.computeMethodSets()
r.cleanupTypes()
p := &Package{
Doc: r.doc,
Name: pkg.Name,
ImportPath: importPath,
Imports: sortedKeys(r.imports),
Filenames: r.filenames,
Notes: r.notes,
Bugs: noteBodies(r.notes["BUG"]),
Consts: sortedValues(r.values, token.CONST),
Types: sortedTypes(r.types, mode&AllMethods != 0),
Vars: sortedValues(r.values, token.VAR),
Funcs: sortedFuncs(r.funcs, true),
importByName: r.importByName,
syms: make(map[string]bool),
}
p.collectValues(p.Consts)
p.collectValues(p.Vars)
p.collectTypes(p.Types)
p.collectFuncs(p.Funcs)
return p
}
func (p *Package) collectValues(values []*Value) {
for _, v := range values {
for _, name := range v.Names {
p.syms[name] = true
}
}
}
func (p *Package) collectTypes(types []*Type) {
for _, t := range types {
if p.syms[t.Name] {
// Shouldn't be any cycles but stop just in case.
continue
}
p.syms[t.Name] = true
p.collectValues(t.Consts)
p.collectValues(t.Vars)
p.collectFuncs(t.Funcs)
p.collectFuncs(t.Methods)
p.collectInterfaceMethods(t)
p.collectStructFields(t)
}
}
func (p *Package) collectFuncs(funcs []*Func) {
for _, f := range funcs {
if f.Recv != "" {
r := strings.TrimPrefix(f.Recv, "*")
if i := strings.IndexByte(r, '['); i >= 0 {
r = r[:i] // remove type parameters
}
p.syms[r+"."+f.Name] = true
} else {
p.syms[f.Name] = true
}
}
}
// collectInterfaceMethods adds methods of interface types within t to p.syms.
// Note that t.Methods will contain methods of non-interface types, but not interface types.
// Adding interface methods to t.Methods might make sense, but would cause us to
// include those methods in the documentation index. Adding interface methods to p.syms
// here allows us to linkify references like [io.Reader.Read] without making any other
// changes to the documentation formatting at this time.
//
// If we do start adding interface methods to t.Methods in the future,
// collectInterfaceMethods can be dropped as redundant with collectFuncs(t.Methods).
func (p *Package) collectInterfaceMethods(t *Type) {
for _, s := range t.Decl.Specs {
spec, ok := s.(*ast.TypeSpec)
if !ok {
continue
}
list, isStruct := fields(spec.Type)
if isStruct {
continue
}
for _, field := range list {
for _, name := range field.Names {
p.syms[t.Name+"."+name.Name] = true
}
}
}
}
func (p *Package) collectStructFields(t *Type) {
for _, s := range t.Decl.Specs {
spec, ok := s.(*ast.TypeSpec)
if !ok {
continue
}
list, isStruct := fields(spec.Type)
if !isStruct {
continue
}
for _, field := range list {
for _, name := range field.Names {
p.syms[t.Name+"."+name.Name] = true
}
}
}
}
// NewFromFiles computes documentation for a package.
//
// The package is specified by a list of *ast.Files and corresponding
// file set, which must not be nil.
//
// NewFromFiles uses all provided files when computing documentation,
// so it is the caller's responsibility to provide only the files that
// match the desired build context. "go/build".Context.MatchFile can
// be used for determining whether a file matches a build context with
// the desired GOOS and GOARCH values, and other build constraints.
// The import path of the package is specified by importPath.
//
// Examples found in _test.go files are associated with the corresponding
// type, function, method, or the package, based on their name.
// If the example has a suffix in its name, it is set in the
// [Example.Suffix] field. [Examples] with malformed names are skipped.
//
// Optionally, a single extra argument of type [Mode] can be provided to
// control low-level aspects of the documentation extraction behavior.
//
// NewFromFiles takes ownership of the AST files and may edit them,
// unless the PreserveAST Mode bit is on.
func NewFromFiles(fset *token.FileSet, files []*ast.File, importPath string, opts ...any) (*Package, error) {
// Check for invalid API usage.
if fset == nil {
panic(fmt.Errorf("doc.NewFromFiles: no token.FileSet provided (fset == nil)"))
}
var mode Mode
switch len(opts) { // There can only be 0 or 1 options, so a simple switch works for now.
case 0:
// Nothing to do.
case 1:
m, ok := opts[0].(Mode)
if !ok {
panic(fmt.Errorf("doc.NewFromFiles: option argument type must be doc.Mode"))
}
mode = m
default:
panic(fmt.Errorf("doc.NewFromFiles: there must not be more than 1 option argument"))
}
// Collect .go and _test.go files.
var (
pkgName string
goFiles = make(map[string]*ast.File)
testGoFiles []*ast.File
)
for i, file := range files {
f := fset.File(file.Pos())
if f == nil {
return nil, fmt.Errorf("file files[%d] is not found in the provided file set", i)
}
switch filename := f.Name(); {
case strings.HasSuffix(filename, "_test.go"):
testGoFiles = append(testGoFiles, file)
case strings.HasSuffix(filename, ".go"):
pkgName = file.Name.Name
goFiles[filename] = file
default:
return nil, fmt.Errorf("file files[%d] filename %q does not have a .go extension", i, filename)
}
}
// Compute package documentation.
//
// Since this package doesn't need Package.{Scope,Imports}, or
// handle errors, and ast.File's Scope field is unset in files
// parsed with parser.SkipObjectResolution, we construct the
// Package directly instead of calling [ast.NewPackage].
pkg := &ast.Package{Name: pkgName, Files: goFiles}
p := New(pkg, importPath, mode)
classifyExamples(p, Examples(testGoFiles...))
return p, nil
}
// lookupSym reports whether the package has a given symbol or method.
//
// If recv == "", HasSym reports whether the package has a top-level
// const, func, type, or var named name.
//
// If recv != "", HasSym reports whether the package has a type
// named recv with a method named name.
func (p *Package) lookupSym(recv, name string) bool {
if recv != "" {
return p.syms[recv+"."+name]
}
return p.syms[name]
}
// lookupPackage returns the import path identified by name
// in the given package. If name uniquely identifies a single import,
// then lookupPackage returns that import.
// If multiple packages are imported as name, importPath returns "", false.
// Otherwise, if name is the name of p itself, importPath returns "", true,
// to signal a reference to p.
// Otherwise, importPath returns "", false.
func (p *Package) lookupPackage(name string) (importPath string, ok bool) {
if path, ok := p.importByName[name]; ok {
if path == "" {
return "", false // multiple imports used the name
}
return path, true // found import
}
if p.Name == name {
return "", true // allow reference to this package
}
return "", false // unknown name
}
// Parser returns a doc comment parser configured
// for parsing doc comments from package p.
// Each call returns a new parser, so that the caller may
// customize it before use.
func (p *Package) Parser() *comment.Parser {
return &comment.Parser{
LookupPackage: p.lookupPackage,
LookupSym: p.lookupSym,
}
}
// Printer returns a doc comment printer configured
// for printing doc comments from package p.
// Each call returns a new printer, so that the caller may
// customize it before use.
func (p *Package) Printer() *comment.Printer {
// No customization today, but having p.Printer()
// gives us flexibility in the future, and it is convenient for callers.
return &comment.Printer{}
}
// HTML returns formatted HTML for the doc comment text.
//
// To customize details of the HTML, use [Package.Printer]
// to obtain a [comment.Printer], and configure it
// before calling its HTML method.
func (p *Package) HTML(text string) []byte {
return p.Printer().HTML(p.Parser().Parse(text))
}
// Markdown returns formatted Markdown for the doc comment text.
//
// To customize details of the Markdown, use [Package.Printer]
// to obtain a [comment.Printer], and configure it
// before calling its Markdown method.
func (p *Package) Markdown(text string) []byte {
return p.Printer().Markdown(p.Parser().Parse(text))
}
// Text returns formatted text for the doc comment text,
// wrapped to 80 Unicode code points and using tabs for
// code block indentation.
//
// To customize details of the formatting, use [Package.Printer]
// to obtain a [comment.Printer], and configure it
// before calling its Text method.
func (p *Package) Text(text string) []byte {
return p.Printer().Text(p.Parser().Parse(text))
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Extract example functions from file ASTs.
package doc
import (
"cmp"
"go/ast"
"go/token"
"internal/lazyregexp"
"slices"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
// An Example represents an example function found in a test source file.
type Example struct {
Name string // name of the item being exemplified (including optional suffix)
Suffix string // example suffix, without leading '_' (only populated by NewFromFiles)
Doc string // example function doc string
Code ast.Node
Play *ast.File // a whole program version of the example
Comments []*ast.CommentGroup
Output string // expected output
Unordered bool
EmptyOutput bool // expect empty output
Order int // original source code order
}
// Examples returns the examples found in testFiles, sorted by Name field.
// The Order fields record the order in which the examples were encountered.
// The Suffix field is not populated when Examples is called directly, it is
// only populated by [NewFromFiles] for examples it finds in _test.go files.
//
// Playable Examples must be in a package whose name ends in "_test".
// An Example is "playable" (the Play field is non-nil) in either of these
// circumstances:
// - The example function is self-contained: the function references only
// identifiers from other packages (or predeclared identifiers, such as
// "int") and the test file does not include a dot import.
// - The entire test file is the example: the file contains exactly one
// example function, zero test, fuzz test, or benchmark function, and at
// least one top-level function, type, variable, or constant declaration
// other than the example function.
func Examples(testFiles ...*ast.File) []*Example {
var list []*Example
for _, file := range testFiles {
hasTests := false // file contains tests, fuzz test, or benchmarks
numDecl := 0 // number of non-import declarations in the file
var flist []*Example
for _, decl := range file.Decls {
if g, ok := decl.(*ast.GenDecl); ok && g.Tok != token.IMPORT {
numDecl++
continue
}
f, ok := decl.(*ast.FuncDecl)
if !ok || f.Recv != nil {
continue
}
numDecl++
name := f.Name.Name
if isTest(name, "Test") || isTest(name, "Benchmark") || isTest(name, "Fuzz") {
hasTests = true
continue
}
if !isTest(name, "Example") {
continue
}
if params := f.Type.Params; len(params.List) != 0 {
continue // function has params; not a valid example
}
if results := f.Type.Results; results != nil && len(results.List) != 0 {
continue // function has results; not a valid example
}
if f.Body == nil { // ast.File.Body nil dereference (see issue 28044)
continue
}
var doc string
if f.Doc != nil {
doc = f.Doc.Text()
}
output, unordered, hasOutput := exampleOutput(f.Body, file.Comments)
flist = append(flist, &Example{
Name: name[len("Example"):],
Doc: doc,
Code: f.Body,
Play: playExample(file, f),
Comments: file.Comments,
Output: output,
Unordered: unordered,
EmptyOutput: output == "" && hasOutput,
Order: len(flist),
})
}
if !hasTests && numDecl > 1 && len(flist) == 1 {
// If this file only has one example function, some
// other top-level declarations, and no tests or
// benchmarks, use the whole file as the example.
flist[0].Code = file
flist[0].Play = playExampleFile(file)
}
list = append(list, flist...)
}
// sort by name
slices.SortFunc(list, func(a, b *Example) int {
return cmp.Compare(a.Name, b.Name)
})
return list
}
var outputPrefix = lazyregexp.New(`(?i)^[[:space:]]*(unordered )?output:`)
// Extracts the expected output and whether there was a valid output comment.
func exampleOutput(b *ast.BlockStmt, comments []*ast.CommentGroup) (output string, unordered, ok bool) {
if _, last := lastComment(b, comments); last != nil {
// test that it begins with the correct prefix
text := last.Text()
if loc := outputPrefix.FindStringSubmatchIndex(text); loc != nil {
if loc[2] != -1 {
unordered = true
}
text = text[loc[1]:]
// Strip zero or more spaces followed by \n or a single space.
text = strings.TrimLeft(text, " ")
if len(text) > 0 && text[0] == '\n' {
text = text[1:]
}
return text, unordered, true
}
}
return "", false, false // no suitable comment found
}
// isTest tells whether name looks like a test, example, fuzz test, or
// benchmark. It is a Test (say) if there is a character after Test that is not
// a lower-case letter. (We don't want Testiness.)
func isTest(name, prefix string) bool {
if !strings.HasPrefix(name, prefix) {
return false
}
if len(name) == len(prefix) { // "Test" is ok
return true
}
rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
return !unicode.IsLower(rune)
}
// playExample synthesizes a new *ast.File based on the provided
// file with the provided function body as the body of main.
func playExample(file *ast.File, f *ast.FuncDecl) *ast.File {
body := f.Body
if !strings.HasSuffix(file.Name.Name, "_test") {
// We don't support examples that are part of the
// greater package (yet).
return nil
}
// Collect top-level declarations in the file.
topDecls := make(map[*ast.Object]ast.Decl)
typMethods := make(map[string][]ast.Decl)
for _, decl := range file.Decls {
switch d := decl.(type) {
case *ast.FuncDecl:
if d.Recv == nil {
topDecls[d.Name.Obj] = d
} else {
if len(d.Recv.List) == 1 {
t := d.Recv.List[0].Type
tname, _ := baseTypeName(t)
typMethods[tname] = append(typMethods[tname], d)
}
}
case *ast.GenDecl:
for _, spec := range d.Specs {
switch s := spec.(type) {
case *ast.TypeSpec:
topDecls[s.Name.Obj] = d
case *ast.ValueSpec:
for _, name := range s.Names {
topDecls[name.Obj] = d
}
}
}
}
}
// Find unresolved identifiers and uses of top-level declarations.
depDecls, unresolved := findDeclsAndUnresolved(body, topDecls, typMethods)
// Use unresolved identifiers to determine the imports used by this
// example. The heuristic assumes package names match base import
// paths for imports w/o renames (should be good enough most of the time).
var namedImports []ast.Spec
var blankImports []ast.Spec // _ imports
// To preserve the blank lines between groups of imports, find the
// start position of each group, and assign that position to all
// imports from that group.
groupStarts := findImportGroupStarts(file.Imports)
groupStart := func(s *ast.ImportSpec) token.Pos {
for i, start := range groupStarts {
if s.Path.ValuePos < start {
return groupStarts[i-1]
}
}
return groupStarts[len(groupStarts)-1]
}
for _, s := range file.Imports {
p, err := strconv.Unquote(s.Path.Value)
if err != nil {
continue
}
if p == "syscall/js" {
// We don't support examples that import syscall/js,
// because the package syscall/js is not available in the playground.
return nil
}
n := assumedPackageName(p)
if s.Name != nil {
n = s.Name.Name
switch n {
case "_":
blankImports = append(blankImports, s)
continue
case ".":
// We can't resolve dot imports (yet).
return nil
}
}
if unresolved[n] {
// Copy the spec and its path to avoid modifying the original.
spec := *s
path := *s.Path
spec.Path = &path
updateBasicLitPos(spec.Path, groupStart(&spec))
namedImports = append(namedImports, &spec)
delete(unresolved, n)
}
}
// Remove predeclared identifiers from unresolved list.
for n := range unresolved {
if predeclaredTypes[n] || predeclaredConstants[n] || predeclaredFuncs[n] {
delete(unresolved, n)
}
}
// If there are other unresolved identifiers, give up because this
// synthesized file is not going to build.
if len(unresolved) > 0 {
return nil
}
// Include documentation belonging to blank imports.
var comments []*ast.CommentGroup
for _, s := range blankImports {
if c := s.(*ast.ImportSpec).Doc; c != nil {
comments = append(comments, c)
}
}
// Include comments that are inside the function body.
for _, c := range file.Comments {
if body.Pos() <= c.Pos() && c.End() <= body.End() {
comments = append(comments, c)
}
}
// Strip the "Output:" or "Unordered output:" comment and adjust body
// end position.
body, comments = stripOutputComment(body, comments)
// Include documentation belonging to dependent declarations.
for _, d := range depDecls {
switch d := d.(type) {
case *ast.GenDecl:
if d.Doc != nil {
comments = append(comments, d.Doc)
}
case *ast.FuncDecl:
if d.Doc != nil {
comments = append(comments, d.Doc)
}
}
}
// Synthesize import declaration.
importDecl := &ast.GenDecl{
Tok: token.IMPORT,
Lparen: 1, // Need non-zero Lparen and Rparen so that printer
Rparen: 1, // treats this as a factored import.
}
importDecl.Specs = append(namedImports, blankImports...)
// Synthesize main function.
funcDecl := &ast.FuncDecl{
Name: ast.NewIdent("main"),
Type: f.Type,
Body: body,
}
decls := make([]ast.Decl, 0, 2+len(depDecls))
decls = append(decls, importDecl)
decls = append(decls, depDecls...)
decls = append(decls, funcDecl)
slices.SortFunc(decls, func(a, b ast.Decl) int {
return cmp.Compare(a.Pos(), b.Pos())
})
slices.SortFunc(comments, func(a, b *ast.CommentGroup) int {
return cmp.Compare(a.Pos(), b.Pos())
})
// Synthesize file.
return &ast.File{
Name: ast.NewIdent("main"),
Decls: decls,
Comments: comments,
}
}
// findDeclsAndUnresolved returns all the top-level declarations mentioned in
// the body, and a set of unresolved symbols (those that appear in the body but
// have no declaration in the program).
//
// topDecls maps objects to the top-level declaration declaring them (not
// necessarily obj.Decl, as obj.Decl will be a Spec for GenDecls, but
// topDecls[obj] will be the GenDecl itself).
func findDeclsAndUnresolved(body ast.Node, topDecls map[*ast.Object]ast.Decl, typMethods map[string][]ast.Decl) ([]ast.Decl, map[string]bool) {
// This function recursively finds every top-level declaration used
// transitively by the body, populating usedDecls and usedObjs. Then it
// trims down the declarations to include only the symbols actually
// referenced by the body.
unresolved := make(map[string]bool)
var depDecls []ast.Decl
usedDecls := make(map[ast.Decl]bool) // set of top-level decls reachable from the body
usedObjs := make(map[*ast.Object]bool) // set of objects reachable from the body (each declared by a usedDecl)
var inspectFunc func(ast.Node) bool
inspectFunc = func(n ast.Node) bool {
switch e := n.(type) {
case *ast.Ident:
if e.Obj == nil && e.Name != "_" {
unresolved[e.Name] = true
} else if d := topDecls[e.Obj]; d != nil {
usedObjs[e.Obj] = true
if !usedDecls[d] {
usedDecls[d] = true
depDecls = append(depDecls, d)
}
}
return true
case *ast.SelectorExpr:
// For selector expressions, only inspect the left hand side.
// (For an expression like fmt.Println, only add "fmt" to the
// set of unresolved names, not "Println".)
ast.Inspect(e.X, inspectFunc)
return false
case *ast.KeyValueExpr:
// For key value expressions, only inspect the value
// as the key should be resolved by the type of the
// composite literal.
ast.Inspect(e.Value, inspectFunc)
return false
}
return true
}
inspectFieldList := func(fl *ast.FieldList) {
if fl != nil {
for _, f := range fl.List {
ast.Inspect(f.Type, inspectFunc)
}
}
}
// Find the decls immediately referenced by body.
ast.Inspect(body, inspectFunc)
// Now loop over them, adding to the list when we find a new decl that the
// body depends on. Keep going until we don't find anything new.
for i := 0; i < len(depDecls); i++ {
switch d := depDecls[i].(type) {
case *ast.FuncDecl:
// Inspect type parameters.
inspectFieldList(d.Type.TypeParams)
// Inspect types of parameters and results. See #28492.
inspectFieldList(d.Type.Params)
inspectFieldList(d.Type.Results)
// Functions might not have a body. See #42706.
if d.Body != nil {
ast.Inspect(d.Body, inspectFunc)
}
case *ast.GenDecl:
for _, spec := range d.Specs {
switch s := spec.(type) {
case *ast.TypeSpec:
inspectFieldList(s.TypeParams)
ast.Inspect(s.Type, inspectFunc)
depDecls = append(depDecls, typMethods[s.Name.Name]...)
case *ast.ValueSpec:
if s.Type != nil {
ast.Inspect(s.Type, inspectFunc)
}
for _, val := range s.Values {
ast.Inspect(val, inspectFunc)
}
}
}
}
}
// Some decls include multiple specs, such as a variable declaration with
// multiple variables on the same line, or a parenthesized declaration. Trim
// the declarations to include only the specs that are actually mentioned.
// However, if there is a constant group with iota, leave it all: later
// constant declarations in the group may have no value and so cannot stand
// on their own, and removing any constant from the group could change the
// values of subsequent ones.
// See testdata/examples/iota.go for a minimal example.
var ds []ast.Decl
for _, d := range depDecls {
switch d := d.(type) {
case *ast.FuncDecl:
ds = append(ds, d)
case *ast.GenDecl:
containsIota := false // does any spec have iota?
// Collect all Specs that were mentioned in the example.
var specs []ast.Spec
for _, s := range d.Specs {
switch s := s.(type) {
case *ast.TypeSpec:
if usedObjs[s.Name.Obj] {
specs = append(specs, s)
}
case *ast.ValueSpec:
if !containsIota {
containsIota = hasIota(s)
}
// A ValueSpec may have multiple names (e.g. "var a, b int").
// Keep only the names that were mentioned in the example.
// Exception: the multiple names have a single initializer (which
// would be a function call with multiple return values). In that
// case, keep everything.
if len(s.Names) > 1 && len(s.Values) == 1 {
specs = append(specs, s)
continue
}
ns := *s
ns.Names = nil
ns.Values = nil
for i, n := range s.Names {
if usedObjs[n.Obj] {
ns.Names = append(ns.Names, n)
if s.Values != nil {
ns.Values = append(ns.Values, s.Values[i])
}
}
}
if len(ns.Names) > 0 {
specs = append(specs, &ns)
}
}
}
if len(specs) > 0 {
// Constant with iota? Keep it all.
if d.Tok == token.CONST && containsIota {
ds = append(ds, d)
} else {
// Synthesize a GenDecl with just the Specs we need.
nd := *d // copy the GenDecl
nd.Specs = specs
if len(specs) == 1 {
// Remove grouping parens if there is only one spec.
nd.Lparen = 0
}
ds = append(ds, &nd)
}
}
}
}
return ds, unresolved
}
func hasIota(s ast.Spec) bool {
for n := range ast.Preorder(s) {
// Check that this is the special built-in "iota" identifier, not
// a user-defined shadow.
if id, ok := n.(*ast.Ident); ok && id.Name == "iota" && id.Obj == nil {
return true
}
}
return false
}
// findImportGroupStarts finds the start positions of each sequence of import
// specs that are not separated by a blank line.
func findImportGroupStarts(imps []*ast.ImportSpec) []token.Pos {
startImps := findImportGroupStarts1(imps)
groupStarts := make([]token.Pos, len(startImps))
for i, imp := range startImps {
groupStarts[i] = imp.Pos()
}
return groupStarts
}
// Helper for findImportGroupStarts to ease testing.
func findImportGroupStarts1(origImps []*ast.ImportSpec) []*ast.ImportSpec {
// Copy to avoid mutation.
imps := make([]*ast.ImportSpec, len(origImps))
copy(imps, origImps)
// Assume the imports are sorted by position.
slices.SortFunc(imps, func(a, b *ast.ImportSpec) int {
return cmp.Compare(a.Pos(), b.Pos())
})
// Assume gofmt has been applied, so there is a blank line between adjacent imps
// if and only if they are more than 2 positions apart (newline, tab).
var groupStarts []*ast.ImportSpec
prevEnd := token.Pos(-2)
for _, imp := range imps {
if imp.Pos()-prevEnd > 2 {
groupStarts = append(groupStarts, imp)
}
prevEnd = imp.End()
// Account for end-of-line comments.
if imp.Comment != nil {
prevEnd = imp.Comment.End()
}
}
return groupStarts
}
// playExampleFile takes a whole file example and synthesizes a new *ast.File
// such that the example is function main in package main.
func playExampleFile(file *ast.File) *ast.File {
// Strip copyright comment if present.
comments := file.Comments
if len(comments) > 0 && strings.HasPrefix(comments[0].Text(), "Copyright") {
comments = comments[1:]
}
// Copy declaration slice, rewriting the ExampleX function to main.
var decls []ast.Decl
for _, d := range file.Decls {
if f, ok := d.(*ast.FuncDecl); ok && isTest(f.Name.Name, "Example") {
// Copy the FuncDecl, as it may be used elsewhere.
newF := *f
newF.Name = ast.NewIdent("main")
newF.Body, comments = stripOutputComment(f.Body, comments)
d = &newF
}
decls = append(decls, d)
}
// Copy the File, as it may be used elsewhere.
f := *file
f.Name = ast.NewIdent("main")
f.Decls = decls
f.Comments = comments
return &f
}
// stripOutputComment finds and removes the "Output:" or "Unordered output:"
// comment from body and comments, and adjusts the body block's end position.
func stripOutputComment(body *ast.BlockStmt, comments []*ast.CommentGroup) (*ast.BlockStmt, []*ast.CommentGroup) {
// Do nothing if there is no "Output:" or "Unordered output:" comment.
i, last := lastComment(body, comments)
if last == nil || !outputPrefix.MatchString(last.Text()) {
return body, comments
}
// Copy body and comments, as the originals may be used elsewhere.
newBody := &ast.BlockStmt{
Lbrace: body.Lbrace,
List: body.List,
Rbrace: last.Pos(),
}
newComments := make([]*ast.CommentGroup, len(comments)-1)
copy(newComments, comments[:i])
copy(newComments[i:], comments[i+1:])
return newBody, newComments
}
// lastComment returns the last comment inside the provided block.
func lastComment(b *ast.BlockStmt, c []*ast.CommentGroup) (i int, last *ast.CommentGroup) {
if b == nil {
return
}
pos, end := b.Pos(), b.End()
for j, cg := range c {
if cg.Pos() < pos {
continue
}
if cg.End() > end {
break
}
i, last = j, cg
}
return
}
// classifyExamples classifies examples and assigns them to the Examples field
// of the relevant Func, Type, or Package that the example is associated with.
//
// The classification process is ambiguous in some cases:
//
// - ExampleFoo_Bar matches a type named Foo_Bar
// or a method named Foo.Bar.
// - ExampleFoo_bar matches a type named Foo_bar
// or Foo (with a "bar" suffix).
//
// Examples with malformed names are not associated with anything.
func classifyExamples(p *Package, examples []*Example) {
if len(examples) == 0 {
return
}
// Mapping of names for funcs, types, and methods to the example listing.
ids := make(map[string]*[]*Example)
ids[""] = &p.Examples // package-level examples have an empty name
for _, f := range p.Funcs {
if !token.IsExported(f.Name) {
continue
}
ids[f.Name] = &f.Examples
}
for _, t := range p.Types {
if !token.IsExported(t.Name) {
continue
}
ids[t.Name] = &t.Examples
for _, f := range t.Funcs {
if !token.IsExported(f.Name) {
continue
}
ids[f.Name] = &f.Examples
}
for _, m := range t.Methods {
if !token.IsExported(m.Name) {
continue
}
ids[strings.TrimPrefix(nameWithoutInst(m.Recv), "*")+"_"+m.Name] = &m.Examples
}
}
// Group each example with the associated func, type, or method.
for _, ex := range examples {
// Consider all possible split points for the suffix
// by starting at the end of string (no suffix case),
// then trying all positions that contain a '_' character.
//
// An association is made on the first successful match.
// Examples with malformed names that match nothing are skipped.
for i := len(ex.Name); i >= 0; i = strings.LastIndexByte(ex.Name[:i], '_') {
prefix, suffix, ok := splitExampleName(ex.Name, i)
if !ok {
continue
}
exs, ok := ids[prefix]
if !ok {
continue
}
ex.Suffix = suffix
*exs = append(*exs, ex)
break
}
}
// Sort list of example according to the user-specified suffix name.
for _, exs := range ids {
slices.SortFunc(*exs, func(a, b *Example) int {
return cmp.Compare(a.Suffix, b.Suffix)
})
}
}
// nameWithoutInst returns name if name has no brackets. If name contains
// brackets, then it returns name with all the contents between (and including)
// the outermost left and right bracket removed.
//
// Adapted from debug/gosym/symtab.go:Sym.nameWithoutInst.
func nameWithoutInst(name string) string {
start := strings.Index(name, "[")
if start < 0 {
return name
}
end := strings.LastIndex(name, "]")
if end < 0 {
// Malformed name, should contain closing bracket too.
return name
}
return name[0:start] + name[end+1:]
}
// splitExampleName attempts to split example name s at index i,
// and reports if that produces a valid split. The suffix may be
// absent. Otherwise, it must start with a lower-case letter and
// be preceded by '_'.
//
// One of i == len(s) or s[i] == '_' must be true.
func splitExampleName(s string, i int) (prefix, suffix string, ok bool) {
if i == len(s) {
return s, "", true
}
if i == len(s)-1 {
return "", "", false
}
prefix, suffix = s[:i], s[i+1:]
return prefix, suffix, isExampleSuffix(suffix)
}
func isExampleSuffix(s string) bool {
r, size := utf8.DecodeRuneInString(s)
return size > 0 && unicode.IsLower(r)
}
// updateBasicLitPos updates lit.Pos,
// ensuring that lit.End is displaced by the same amount.
// (See https://go.dev/issue/76395.)
func updateBasicLitPos(lit *ast.BasicLit, pos token.Pos) {
len := lit.End() - lit.Pos()
lit.ValuePos = pos
if lit.ValueEnd.IsValid() {
lit.ValueEnd = pos + len
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements export filtering of an AST.
package doc
import (
"go/ast"
"go/token"
)
// filterIdentList removes unexported names from list in place
// and returns the resulting list.
func filterIdentList(list []*ast.Ident) []*ast.Ident {
j := 0
for _, x := range list {
if token.IsExported(x.Name) {
list[j] = x
j++
}
}
return list[0:j]
}
var underscore = ast.NewIdent("_")
func filterCompositeLit(lit *ast.CompositeLit, filter Filter, export bool) {
n := len(lit.Elts)
lit.Elts = filterExprList(lit.Elts, filter, export)
if len(lit.Elts) < n {
lit.Incomplete = true
}
}
func filterExprList(list []ast.Expr, filter Filter, export bool) []ast.Expr {
j := 0
for _, exp := range list {
switch x := exp.(type) {
case *ast.CompositeLit:
filterCompositeLit(x, filter, export)
case *ast.KeyValueExpr:
if x, ok := x.Key.(*ast.Ident); ok && !filter(x.Name) {
continue
}
if x, ok := x.Value.(*ast.CompositeLit); ok {
filterCompositeLit(x, filter, export)
}
}
list[j] = exp
j++
}
return list[0:j]
}
// updateIdentList replaces all unexported identifiers with underscore
// and reports whether at least one exported name exists.
func updateIdentList(list []*ast.Ident) (hasExported bool) {
for i, x := range list {
if token.IsExported(x.Name) {
hasExported = true
} else {
list[i] = underscore
}
}
return hasExported
}
// hasExportedName reports whether list contains any exported names.
func hasExportedName(list []*ast.Ident) bool {
for _, x := range list {
if x.IsExported() {
return true
}
}
return false
}
// removeAnonymousField removes anonymous fields named name from an interface.
func removeAnonymousField(name string, ityp *ast.InterfaceType) {
list := ityp.Methods.List // we know that ityp.Methods != nil
j := 0
for _, field := range list {
keepField := true
if n := len(field.Names); n == 0 {
// anonymous field
if fname, _ := baseTypeName(field.Type); fname == name {
keepField = false
}
}
if keepField {
list[j] = field
j++
}
}
if j < len(list) {
ityp.Incomplete = true
}
ityp.Methods.List = list[0:j]
}
// filterFieldList removes unexported fields (field names) from the field list
// in place and reports whether fields were removed. Anonymous fields are
// recorded with the parent type. filterType is called with the types of
// all remaining fields.
func (r *reader) filterFieldList(parent *namedType, fields *ast.FieldList, ityp *ast.InterfaceType) (removedFields bool) {
if fields == nil {
return
}
list := fields.List
j := 0
for _, field := range list {
keepField := false
if n := len(field.Names); n == 0 {
// anonymous field or embedded type or union element
fname := r.recordAnonymousField(parent, field.Type)
if fname != "" {
if token.IsExported(fname) {
keepField = true
} else if ityp != nil && predeclaredTypes[fname] {
// possibly an embedded predeclared type; keep it for now but
// remember this interface so that it can be fixed if name is also
// defined locally
keepField = true
r.remember(fname, ityp)
}
} else {
// If we're operating on an interface, assume that this is an embedded
// type or union element.
//
// TODO(rfindley): consider traversing into approximation/unions
// elements to see if they are entirely unexported.
keepField = ityp != nil
}
} else {
field.Names = filterIdentList(field.Names)
if len(field.Names) < n {
removedFields = true
}
if len(field.Names) > 0 {
keepField = true
}
}
if keepField {
r.filterType(nil, field.Type)
list[j] = field
j++
}
}
if j < len(list) {
removedFields = true
}
fields.List = list[0:j]
return
}
// filterParamList applies filterType to each parameter type in fields.
func (r *reader) filterParamList(fields *ast.FieldList) {
if fields != nil {
for _, f := range fields.List {
r.filterType(nil, f.Type)
}
}
}
// filterType strips any unexported struct fields or method types from typ
// in place. If fields (or methods) have been removed, the corresponding
// struct or interface type has the Incomplete field set to true.
func (r *reader) filterType(parent *namedType, typ ast.Expr) {
switch t := typ.(type) {
case *ast.Ident:
// nothing to do
case *ast.ParenExpr:
r.filterType(nil, t.X)
case *ast.StarExpr: // possibly an embedded type literal
r.filterType(nil, t.X)
case *ast.UnaryExpr:
if t.Op == token.TILDE { // approximation element
r.filterType(nil, t.X)
}
case *ast.BinaryExpr:
if t.Op == token.OR { // union
r.filterType(nil, t.X)
r.filterType(nil, t.Y)
}
case *ast.ArrayType:
r.filterType(nil, t.Elt)
case *ast.StructType:
if r.filterFieldList(parent, t.Fields, nil) {
t.Incomplete = true
}
case *ast.FuncType:
r.filterParamList(t.TypeParams)
r.filterParamList(t.Params)
r.filterParamList(t.Results)
case *ast.InterfaceType:
if r.filterFieldList(parent, t.Methods, t) {
t.Incomplete = true
}
case *ast.MapType:
r.filterType(nil, t.Key)
r.filterType(nil, t.Value)
case *ast.ChanType:
r.filterType(nil, t.Value)
}
}
func (r *reader) filterSpec(spec ast.Spec) bool {
switch s := spec.(type) {
case *ast.ImportSpec:
// always keep imports so we can collect them
return true
case *ast.ValueSpec:
s.Values = filterExprList(s.Values, token.IsExported, true)
if len(s.Values) > 0 || s.Type == nil && len(s.Values) == 0 {
// If there are values declared on RHS, just replace the unexported
// identifiers on the LHS with underscore, so that it matches
// the sequence of expression on the RHS.
//
// Similarly, if there are no type and values, then this expression
// must be following an iota expression, where order matters.
if updateIdentList(s.Names) {
r.filterType(nil, s.Type)
return true
}
} else {
s.Names = filterIdentList(s.Names)
if len(s.Names) > 0 {
r.filterType(nil, s.Type)
return true
}
}
case *ast.TypeSpec:
// Don't filter type parameters here, by analogy with function parameters
// which are not filtered for top-level function declarations.
if name := s.Name.Name; token.IsExported(name) {
r.filterType(r.lookupType(s.Name.Name), s.Type)
return true
} else if IsPredeclared(name) {
if r.shadowedPredecl == nil {
r.shadowedPredecl = make(map[string]bool)
}
r.shadowedPredecl[name] = true
}
}
return false
}
// copyConstType returns a copy of typ with position pos.
// typ must be a valid constant type.
// In practice, only (possibly qualified) identifiers are possible.
func copyConstType(typ ast.Expr, pos token.Pos) ast.Expr {
switch typ := typ.(type) {
case *ast.Ident:
return &ast.Ident{Name: typ.Name, NamePos: pos}
case *ast.SelectorExpr:
if id, ok := typ.X.(*ast.Ident); ok {
// presumably a qualified identifier
return &ast.SelectorExpr{
Sel: ast.NewIdent(typ.Sel.Name),
X: &ast.Ident{Name: id.Name, NamePos: pos},
}
}
}
return nil // shouldn't happen, but be conservative and don't panic
}
func (r *reader) filterSpecList(list []ast.Spec, tok token.Token) []ast.Spec {
if tok == token.CONST {
// Propagate any type information that would get lost otherwise
// when unexported constants are filtered.
var prevType ast.Expr
for _, spec := range list {
spec := spec.(*ast.ValueSpec)
if spec.Type == nil && len(spec.Values) == 0 && prevType != nil {
// provide current spec with an explicit type
spec.Type = copyConstType(prevType, spec.Pos())
}
if hasExportedName(spec.Names) {
// exported names are preserved so there's no need to propagate the type
prevType = nil
} else {
prevType = spec.Type
}
}
}
j := 0
for _, s := range list {
if r.filterSpec(s) {
list[j] = s
j++
}
}
return list[0:j]
}
func (r *reader) filterDecl(decl ast.Decl) bool {
switch d := decl.(type) {
case *ast.GenDecl:
d.Specs = r.filterSpecList(d.Specs, d.Tok)
return len(d.Specs) > 0
case *ast.FuncDecl:
// ok to filter these methods early because any
// conflicting method will be filtered here, too -
// thus, removing these methods early will not lead
// to the false removal of possible conflicts
return token.IsExported(d.Name.Name)
}
return false
}
// fileExports removes unexported declarations from src in place.
func (r *reader) fileExports(src *ast.File) {
j := 0
for _, d := range src.Decls {
if r.filterDecl(d) {
src.Decls[j] = d
j++
}
}
src.Decls = src.Decls[0:j]
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package doc
import "go/ast"
type Filter func(string) bool
func matchFields(fields *ast.FieldList, f Filter) bool {
if fields != nil {
for _, field := range fields.List {
for _, name := range field.Names {
if f(name.Name) {
return true
}
}
}
}
return false
}
func matchDecl(d *ast.GenDecl, f Filter) bool {
for _, d := range d.Specs {
switch v := d.(type) {
case *ast.ValueSpec:
for _, name := range v.Names {
if f(name.Name) {
return true
}
}
case *ast.TypeSpec:
if f(v.Name.Name) {
return true
}
// We don't match ordinary parameters in filterFuncs, so by analogy don't
// match type parameters here.
switch t := v.Type.(type) {
case *ast.StructType:
if matchFields(t.Fields, f) {
return true
}
case *ast.InterfaceType:
if matchFields(t.Methods, f) {
return true
}
}
}
}
return false
}
func filterValues(a []*Value, f Filter) []*Value {
w := 0
for _, vd := range a {
if matchDecl(vd.Decl, f) {
a[w] = vd
w++
}
}
return a[0:w]
}
func filterFuncs(a []*Func, f Filter) []*Func {
w := 0
for _, fd := range a {
if f(fd.Name) {
a[w] = fd
w++
}
}
return a[0:w]
}
func filterTypes(a []*Type, f Filter) []*Type {
w := 0
for _, td := range a {
n := 0 // number of matches
if matchDecl(td.Decl, f) {
n = 1
} else {
// type name doesn't match, but we may have matching consts, vars, factories or methods
td.Consts = filterValues(td.Consts, f)
td.Vars = filterValues(td.Vars, f)
td.Funcs = filterFuncs(td.Funcs, f)
td.Methods = filterFuncs(td.Methods, f)
n += len(td.Consts) + len(td.Vars) + len(td.Funcs) + len(td.Methods)
}
if n > 0 {
a[w] = td
w++
}
}
return a[0:w]
}
// Filter eliminates documentation for names that don't pass through the filter f.
// TODO(gri): Recognize "Type.Method" as a name.
func (p *Package) Filter(f Filter) {
p.Consts = filterValues(p.Consts, f)
p.Vars = filterValues(p.Vars, f)
p.Types = filterTypes(p.Types, f)
p.Funcs = filterFuncs(p.Funcs, f)
p.Doc = "" // don't show top-level package doc
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package doc
import (
"cmp"
"fmt"
"go/ast"
"go/token"
"internal/lazyregexp"
"path"
"slices"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
// ----------------------------------------------------------------------------
// function/method sets
//
// Internally, we treat functions like methods and collect them in method sets.
// A methodSet describes a set of methods. Entries where Decl == nil are conflict
// entries (more than one method with the same name at the same embedding level).
type methodSet map[string]*Func
// recvString returns a string representation of recv of the form "T", "*T",
// "T[A, ...]", "*T[A, ...]" or "BADRECV" (if not a proper receiver type).
func recvString(recv ast.Expr) string {
switch t := recv.(type) {
case *ast.Ident:
return t.Name
case *ast.StarExpr:
return "*" + recvString(t.X)
case *ast.IndexExpr:
// Generic type with one parameter.
return fmt.Sprintf("%s[%s]", recvString(t.X), recvParam(t.Index))
case *ast.IndexListExpr:
// Generic type with multiple parameters.
if len(t.Indices) > 0 {
var b strings.Builder
b.WriteString(recvString(t.X))
b.WriteByte('[')
b.WriteString(recvParam(t.Indices[0]))
for _, e := range t.Indices[1:] {
b.WriteString(", ")
b.WriteString(recvParam(e))
}
b.WriteByte(']')
return b.String()
}
}
return "BADRECV"
}
func recvParam(p ast.Expr) string {
if id, ok := p.(*ast.Ident); ok {
return id.Name
}
return "BADPARAM"
}
// set creates the corresponding Func for f and adds it to mset.
// If there are multiple f's with the same name, set keeps the first
// one with documentation; conflicts are ignored. The boolean
// specifies whether to leave the AST untouched.
func (mset methodSet) set(f *ast.FuncDecl, preserveAST bool) {
name := f.Name.Name
if g := mset[name]; g != nil && g.Doc != "" {
// A function with the same name has already been registered;
// since it has documentation, assume f is simply another
// implementation and ignore it. This does not happen if the
// caller is using go/build.ScanDir to determine the list of
// files implementing a package.
return
}
// function doesn't exist or has no documentation; use f
recv := ""
if f.Recv != nil {
var typ ast.Expr
// be careful in case of incorrect ASTs
if list := f.Recv.List; len(list) == 1 {
typ = list[0].Type
}
recv = recvString(typ)
}
mset[name] = &Func{
Doc: f.Doc.Text(),
Name: name,
Decl: f,
Recv: recv,
Orig: recv,
}
if !preserveAST {
f.Doc = nil // doc consumed - remove from AST
}
}
// add adds method m to the method set; m is ignored if the method set
// already contains a method with the same name at the same or a higher
// level than m.
func (mset methodSet) add(m *Func) {
old := mset[m.Name]
if old == nil || m.Level < old.Level {
mset[m.Name] = m
return
}
if m.Level == old.Level {
// conflict - mark it using a method with nil Decl
mset[m.Name] = &Func{
Name: m.Name,
Level: m.Level,
}
}
}
// ----------------------------------------------------------------------------
// Named types
// baseTypeName returns the name of the base type of x (or "")
// and whether the type is imported or not.
func baseTypeName(x ast.Expr) (name string, imported bool) {
switch t := x.(type) {
case *ast.Ident:
return t.Name, false
case *ast.IndexExpr:
return baseTypeName(t.X)
case *ast.IndexListExpr:
return baseTypeName(t.X)
case *ast.SelectorExpr:
if _, ok := t.X.(*ast.Ident); ok {
// only possible for qualified type names;
// assume type is imported
return t.Sel.Name, true
}
case *ast.ParenExpr:
return baseTypeName(t.X)
case *ast.StarExpr:
return baseTypeName(t.X)
}
return "", false
}
// An embeddedSet describes a set of embedded types.
type embeddedSet map[*namedType]bool
// A namedType represents a named unqualified (package local, or possibly
// predeclared) type. The namedType for a type name is always found via
// reader.lookupType.
type namedType struct {
doc string // doc comment for type
name string // type name
decl *ast.GenDecl // nil if declaration hasn't been seen yet
isEmbedded bool // true if this type is embedded
isStruct bool // true if this type is a struct
embedded embeddedSet // true if the embedded type is a pointer
// associated declarations
values []*Value // consts and vars
funcs methodSet
methods methodSet
}
// ----------------------------------------------------------------------------
// AST reader
// reader accumulates documentation for a single package.
// It modifies the AST: Comments (declaration documentation)
// that have been collected by the reader are set to nil
// in the respective AST nodes so that they are not printed
// twice (once when printing the documentation and once when
// printing the corresponding AST node).
type reader struct {
mode Mode
// package properties
doc string // package documentation, if any
filenames []string
notes map[string][]*Note
// imports
imports map[string]int
hasDotImp bool // if set, package contains a dot import
importByName map[string]string
// declarations
values []*Value // consts and vars
order int // sort order of const and var declarations (when we can't use a name)
types map[string]*namedType
funcs methodSet
// support for package-local shadowing of predeclared types
shadowedPredecl map[string]bool
fixmap map[string][]*ast.InterfaceType
}
func (r *reader) isVisible(name string) bool {
return r.mode&AllDecls != 0 || token.IsExported(name)
}
// lookupType returns the base type with the given name.
// If the base type has not been encountered yet, a new
// type with the given name but no associated declaration
// is added to the type map.
func (r *reader) lookupType(name string) *namedType {
if name == "" || name == "_" {
return nil // no type docs for anonymous types
}
if typ, found := r.types[name]; found {
return typ
}
// type not found - add one without declaration
typ := &namedType{
name: name,
embedded: make(embeddedSet),
funcs: make(methodSet),
methods: make(methodSet),
}
r.types[name] = typ
return typ
}
// recordAnonymousField registers fieldType as the type of an
// anonymous field in the parent type. If the field is imported
// (qualified name) or the parent is nil, the field is ignored.
// The function returns the field name.
func (r *reader) recordAnonymousField(parent *namedType, fieldType ast.Expr) (fname string) {
fname, imp := baseTypeName(fieldType)
if parent == nil || imp {
return
}
if ftype := r.lookupType(fname); ftype != nil {
ftype.isEmbedded = true
_, ptr := fieldType.(*ast.StarExpr)
parent.embedded[ftype] = ptr
}
return
}
func (r *reader) readDoc(comment *ast.CommentGroup) {
// By convention there should be only one package comment
// but collect all of them if there are more than one.
text := comment.Text()
if r.doc == "" {
r.doc = text
return
}
r.doc += "\n" + text
}
func (r *reader) remember(predecl string, typ *ast.InterfaceType) {
if r.fixmap == nil {
r.fixmap = make(map[string][]*ast.InterfaceType)
}
r.fixmap[predecl] = append(r.fixmap[predecl], typ)
}
func specNames(specs []ast.Spec) []string {
names := make([]string, 0, len(specs)) // reasonable estimate
for _, s := range specs {
// s guaranteed to be an *ast.ValueSpec by readValue
for _, ident := range s.(*ast.ValueSpec).Names {
names = append(names, ident.Name)
}
}
return names
}
// readValue processes a const or var declaration.
func (r *reader) readValue(decl *ast.GenDecl) {
// determine if decl should be associated with a type
// Heuristic: For each typed entry, determine the type name, if any.
// If there is exactly one type name that is sufficiently
// frequent, associate the decl with the respective type.
domName := ""
domFreq := 0
prev := ""
n := 0
for _, spec := range decl.Specs {
s, ok := spec.(*ast.ValueSpec)
if !ok {
continue // should not happen, but be conservative
}
name := ""
switch {
case s.Type != nil:
// a type is present; determine its name
if n, imp := baseTypeName(s.Type); !imp {
name = n
}
case decl.Tok == token.CONST && len(s.Values) == 0:
// no type or value is present but we have a constant declaration;
// use the previous type name (possibly the empty string)
name = prev
}
if name != "" {
// entry has a named type
if domName != "" && domName != name {
// more than one type name - do not associate
// with any type
domName = ""
break
}
domName = name
domFreq++
}
prev = name
n++
}
// nothing to do w/o a legal declaration
if n == 0 {
return
}
// determine values list with which to associate the Value for this decl
values := &r.values
const threshold = 0.75
if domName != "" && r.isVisible(domName) && domFreq >= int(float64(len(decl.Specs))*threshold) {
// typed entries are sufficiently frequent
if typ := r.lookupType(domName); typ != nil {
values = &typ.values // associate with that type
}
}
*values = append(*values, &Value{
Doc: decl.Doc.Text(),
Names: specNames(decl.Specs),
Decl: decl,
order: r.order,
})
if r.mode&PreserveAST == 0 {
decl.Doc = nil // doc consumed - remove from AST
}
// Note: It's important that the order used here is global because the cleanupTypes
// methods may move values associated with types back into the global list. If the
// order is list-specific, sorting is not deterministic because the same order value
// may appear multiple times (was bug, found when fixing #16153).
r.order++
}
// fields returns a struct's fields or an interface's methods.
func fields(typ ast.Expr) (list []*ast.Field, isStruct bool) {
var fields *ast.FieldList
switch t := typ.(type) {
case *ast.StructType:
fields = t.Fields
isStruct = true
case *ast.InterfaceType:
fields = t.Methods
}
if fields != nil {
list = fields.List
}
return
}
// readType processes a type declaration.
func (r *reader) readType(decl *ast.GenDecl, spec *ast.TypeSpec) {
typ := r.lookupType(spec.Name.Name)
if typ == nil {
return // no name or blank name - ignore the type
}
// A type should be added at most once, so typ.decl
// should be nil - if it is not, simply overwrite it.
typ.decl = decl
// compute documentation
doc := spec.Doc
if doc == nil {
// no doc associated with the spec, use the declaration doc, if any
doc = decl.Doc
}
if r.mode&PreserveAST == 0 {
spec.Doc = nil // doc consumed - remove from AST
decl.Doc = nil // doc consumed - remove from AST
}
typ.doc = doc.Text()
// record anonymous fields (they may contribute methods)
// (some fields may have been recorded already when filtering
// exports, but that's ok)
var list []*ast.Field
list, typ.isStruct = fields(spec.Type)
for _, field := range list {
if len(field.Names) == 0 {
r.recordAnonymousField(typ, field.Type)
}
}
}
// isPredeclared reports whether n denotes a predeclared type.
func (r *reader) isPredeclared(n string) bool {
return predeclaredTypes[n] && r.types[n] == nil
}
// readFunc processes a func or method declaration.
func (r *reader) readFunc(fun *ast.FuncDecl) {
// strip function body if requested.
if r.mode&PreserveAST == 0 {
fun.Body = nil
}
// associate methods with the receiver type, if any
if fun.Recv != nil {
// method
if len(fun.Recv.List) == 0 {
// should not happen (incorrect AST); (See issue 17788)
// don't show this method
return
}
recvTypeName, imp := baseTypeName(fun.Recv.List[0].Type)
if imp {
// should not happen (incorrect AST);
// don't show this method
return
}
if typ := r.lookupType(recvTypeName); typ != nil {
typ.methods.set(fun, r.mode&PreserveAST != 0)
}
// otherwise ignore the method
// TODO(gri): There may be exported methods of non-exported types
// that can be called because of exported values (consts, vars, or
// function results) of that type. Could determine if that is the
// case and then show those methods in an appropriate section.
return
}
// Associate factory functions with the first visible result type, as long as
// others are predeclared types.
if fun.Type.Results.NumFields() >= 1 {
var typ *namedType // type to associate the function with
numResultTypes := 0
for _, res := range fun.Type.Results.List {
factoryType := res.Type
if t, ok := factoryType.(*ast.ArrayType); ok {
// We consider functions that return slices or arrays of type
// T (or pointers to T) as factory functions of T.
factoryType = t.Elt
}
if n, imp := baseTypeName(factoryType); !imp && r.isVisible(n) && !r.isPredeclared(n) {
if lookupTypeParam(n, fun.Type.TypeParams) != nil {
// Issue #49477: don't associate fun with its type parameter result.
// A type parameter is not a defined type.
continue
}
if t := r.lookupType(n); t != nil {
typ = t
numResultTypes++
if numResultTypes > 1 {
break
}
}
}
}
// If there is exactly one result type,
// associate the function with that type.
if numResultTypes == 1 {
typ.funcs.set(fun, r.mode&PreserveAST != 0)
return
}
}
// just an ordinary function
r.funcs.set(fun, r.mode&PreserveAST != 0)
}
// lookupTypeParam searches for type parameters named name within the tparams
// field list, returning the relevant identifier if found, or nil if not.
func lookupTypeParam(name string, tparams *ast.FieldList) *ast.Ident {
if tparams == nil {
return nil
}
for _, field := range tparams.List {
for _, id := range field.Names {
if id.Name == name {
return id
}
}
}
return nil
}
var (
noteMarker = `([A-Z][A-Z]+)\(([^)]+)\):?` // MARKER(uid), MARKER at least 2 chars, uid at least 1 char
noteMarkerRx = lazyregexp.New(`^[ \t]*` + noteMarker) // MARKER(uid) at text start
noteCommentRx = lazyregexp.New(`^/[/*][ \t]*` + noteMarker) // MARKER(uid) at comment start
)
// clean replaces each sequence of space, \r, or \t characters
// with a single space and removes any trailing and leading spaces.
func clean(s string) string {
var b []byte
p := byte(' ')
for i := 0; i < len(s); i++ {
q := s[i]
if q == '\r' || q == '\t' {
q = ' '
}
if q != ' ' || p != ' ' {
b = append(b, q)
p = q
}
}
// remove trailing blank, if any
if n := len(b); n > 0 && p == ' ' {
b = b[0 : n-1]
}
return string(b)
}
// readNote collects a single note from a sequence of comments.
func (r *reader) readNote(list []*ast.Comment) {
text := (&ast.CommentGroup{List: list}).Text()
if m := noteMarkerRx.FindStringSubmatchIndex(text); m != nil {
// The note body starts after the marker.
// We remove any formatting so that we don't
// get spurious line breaks/indentation when
// showing the TODO body.
body := clean(text[m[1]:])
if body != "" {
marker := text[m[2]:m[3]]
r.notes[marker] = append(r.notes[marker], &Note{
Pos: list[0].Pos(),
End: list[len(list)-1].End(),
UID: text[m[4]:m[5]],
Body: body,
})
}
}
}
// readNotes extracts notes from comments.
// A note must start at the beginning of a comment with "MARKER(uid):"
// and is followed by the note body (e.g., "// BUG(gri): fix this").
// The note ends at the end of the comment group or at the start of
// another note in the same comment group, whichever comes first.
func (r *reader) readNotes(comments []*ast.CommentGroup) {
for _, group := range comments {
i := -1 // comment index of most recent note start, valid if >= 0
list := group.List
for j, c := range list {
if noteCommentRx.MatchString(c.Text) {
if i >= 0 {
r.readNote(list[i:j])
}
i = j
}
}
if i >= 0 {
r.readNote(list[i:])
}
}
}
// readFile adds the AST for a source file to the reader.
func (r *reader) readFile(src *ast.File) {
// add package documentation
if src.Doc != nil {
r.readDoc(src.Doc)
if r.mode&PreserveAST == 0 {
src.Doc = nil // doc consumed - remove from AST
}
}
// add all declarations but for functions which are processed in a separate pass
for _, decl := range src.Decls {
switch d := decl.(type) {
case *ast.GenDecl:
switch d.Tok {
case token.IMPORT:
// imports are handled individually
for _, spec := range d.Specs {
if s, ok := spec.(*ast.ImportSpec); ok {
if import_, err := strconv.Unquote(s.Path.Value); err == nil {
r.imports[import_] = 1
var name string
if s.Name != nil {
name = s.Name.Name
if name == "." {
r.hasDotImp = true
}
}
if name != "." {
if name == "" {
name = assumedPackageName(import_)
}
old, ok := r.importByName[name]
if !ok {
r.importByName[name] = import_
} else if old != import_ && old != "" {
r.importByName[name] = "" // ambiguous
}
}
}
}
}
case token.CONST, token.VAR:
// constants and variables are always handled as a group
r.readValue(d)
case token.TYPE:
// types are handled individually
if len(d.Specs) == 1 && !d.Lparen.IsValid() {
// common case: single declaration w/o parentheses
// (if a single declaration is parenthesized,
// create a new fake declaration below, so that
// go/doc type declarations always appear w/o
// parentheses)
if s, ok := d.Specs[0].(*ast.TypeSpec); ok {
r.readType(d, s)
}
break
}
for _, spec := range d.Specs {
if s, ok := spec.(*ast.TypeSpec); ok {
// use an individual (possibly fake) declaration
// for each type; this also ensures that each type
// gets to (re-)use the declaration documentation
// if there's none associated with the spec itself
fake := &ast.GenDecl{
Doc: d.Doc,
// don't use the existing TokPos because it
// will lead to the wrong selection range for
// the fake declaration if there are more
// than one type in the group (this affects
// src/cmd/godoc/godoc.go's posLink_urlFunc)
TokPos: s.Pos(),
Tok: token.TYPE,
Specs: []ast.Spec{s},
}
r.readType(fake, s)
}
}
}
}
}
// collect MARKER(...): annotations
r.readNotes(src.Comments)
if r.mode&PreserveAST == 0 {
src.Comments = nil // consumed unassociated comments - remove from AST
}
}
func (r *reader) readPackage(pkg *ast.Package, mode Mode) {
// initialize reader
r.filenames = make([]string, len(pkg.Files))
r.imports = make(map[string]int)
r.mode = mode
r.types = make(map[string]*namedType)
r.funcs = make(methodSet)
r.notes = make(map[string][]*Note)
r.importByName = make(map[string]string)
// sort package files before reading them so that the
// result does not depend on map iteration order
i := 0
for filename := range pkg.Files {
r.filenames[i] = filename
i++
}
slices.Sort(r.filenames)
// process files in sorted order
for _, filename := range r.filenames {
f := pkg.Files[filename]
if mode&AllDecls == 0 {
r.fileExports(f)
}
r.readFile(f)
}
for name, path := range r.importByName {
if path == "" {
delete(r.importByName, name)
}
}
// process functions now that we have better type information
for _, f := range pkg.Files {
for _, decl := range f.Decls {
if d, ok := decl.(*ast.FuncDecl); ok {
r.readFunc(d)
}
}
}
}
// ----------------------------------------------------------------------------
// Types
func customizeRecv(f *Func, recvTypeName string, embeddedIsPtr bool, level int) *Func {
if f == nil || f.Decl == nil || f.Decl.Recv == nil || len(f.Decl.Recv.List) != 1 {
return f // shouldn't happen, but be safe
}
// copy existing receiver field and set new type
newField := *f.Decl.Recv.List[0]
origPos := newField.Type.Pos()
_, origRecvIsPtr := newField.Type.(*ast.StarExpr)
newIdent := &ast.Ident{NamePos: origPos, Name: recvTypeName}
var typ ast.Expr = newIdent
if !embeddedIsPtr && origRecvIsPtr {
newIdent.NamePos++ // '*' is one character
typ = &ast.StarExpr{Star: origPos, X: newIdent}
}
newField.Type = typ
// copy existing receiver field list and set new receiver field
newFieldList := *f.Decl.Recv
newFieldList.List = []*ast.Field{&newField}
// copy existing function declaration and set new receiver field list
newFuncDecl := *f.Decl
newFuncDecl.Recv = &newFieldList
// copy existing function documentation and set new declaration
newF := *f
newF.Decl = &newFuncDecl
newF.Recv = recvString(typ)
// the Orig field never changes
newF.Level = level
return &newF
}
// collectEmbeddedMethods collects the embedded methods of typ in mset.
func (r *reader) collectEmbeddedMethods(mset methodSet, typ *namedType, recvTypeName string, embeddedIsPtr bool, level int, visited embeddedSet) {
visited[typ] = true
for embedded, isPtr := range typ.embedded {
// Once an embedded type is embedded as a pointer type
// all embedded types in those types are treated like
// pointer types for the purpose of the receiver type
// computation; i.e., embeddedIsPtr is sticky for this
// embedding hierarchy.
thisEmbeddedIsPtr := embeddedIsPtr || isPtr
for _, m := range embedded.methods {
// only top-level methods are embedded
if m.Level == 0 {
mset.add(customizeRecv(m, recvTypeName, thisEmbeddedIsPtr, level))
}
}
if !visited[embedded] {
r.collectEmbeddedMethods(mset, embedded, recvTypeName, thisEmbeddedIsPtr, level+1, visited)
}
}
delete(visited, typ)
}
// computeMethodSets determines the actual method sets for each type encountered.
func (r *reader) computeMethodSets() {
for _, t := range r.types {
// collect embedded methods for t
if t.isStruct {
// struct
r.collectEmbeddedMethods(t.methods, t, t.name, false, 1, make(embeddedSet))
} else {
// interface
// TODO(gri) fix this
}
}
// For any predeclared names that are declared locally, don't treat them as
// exported fields anymore.
for predecl := range r.shadowedPredecl {
for _, ityp := range r.fixmap[predecl] {
removeAnonymousField(predecl, ityp)
}
}
}
// cleanupTypes removes the association of functions and methods with
// types that have no declaration. Instead, these functions and methods
// are shown at the package level. It also removes types with missing
// declarations or which are not visible.
func (r *reader) cleanupTypes() {
for _, t := range r.types {
visible := r.isVisible(t.name)
predeclared := predeclaredTypes[t.name]
if t.decl == nil && (predeclared || visible && (t.isEmbedded || r.hasDotImp)) {
// t.name is a predeclared type (and was not redeclared in this package),
// or it was embedded somewhere but its declaration is missing (because
// the AST is incomplete), or we have a dot-import (and all bets are off):
// move any associated values, funcs, and methods back to the top-level so
// that they are not lost.
// 1) move values
r.values = append(r.values, t.values...)
// 2) move factory functions
for name, f := range t.funcs {
// in a correct AST, package-level function names
// are all different - no need to check for conflicts
r.funcs[name] = f
}
// 3) move methods
if !predeclared {
for name, m := range t.methods {
// don't overwrite functions with the same name - drop them
if _, found := r.funcs[name]; !found {
r.funcs[name] = m
}
}
}
}
// remove types w/o declaration or which are not visible
if t.decl == nil || !visible {
delete(r.types, t.name)
}
}
}
// ----------------------------------------------------------------------------
// Sorting
func sortedKeys(m map[string]int) []string {
list := make([]string, len(m))
i := 0
for key := range m {
list[i] = key
i++
}
slices.Sort(list)
return list
}
// sortingName returns the name to use when sorting d into place.
func sortingName(d *ast.GenDecl) string {
if len(d.Specs) == 1 {
if s, ok := d.Specs[0].(*ast.ValueSpec); ok {
return s.Names[0].Name
}
}
return ""
}
func sortedValues(m []*Value, tok token.Token) []*Value {
list := make([]*Value, len(m)) // big enough in any case
i := 0
for _, val := range m {
if val.Decl.Tok == tok {
list[i] = val
i++
}
}
list = list[0:i]
slices.SortFunc(list, func(a, b *Value) int {
r := strings.Compare(sortingName(a.Decl), sortingName(b.Decl))
if r != 0 {
return r
}
return cmp.Compare(a.order, b.order)
})
return list
}
func sortedTypes(m map[string]*namedType, allMethods bool) []*Type {
list := make([]*Type, len(m))
i := 0
for _, t := range m {
list[i] = &Type{
Doc: t.doc,
Name: t.name,
Decl: t.decl,
Consts: sortedValues(t.values, token.CONST),
Vars: sortedValues(t.values, token.VAR),
Funcs: sortedFuncs(t.funcs, true),
Methods: sortedFuncs(t.methods, allMethods),
}
i++
}
slices.SortFunc(list, func(a, b *Type) int {
return strings.Compare(a.Name, b.Name)
})
return list
}
func removeStar(s string) string {
if len(s) > 0 && s[0] == '*' {
return s[1:]
}
return s
}
func sortedFuncs(m methodSet, allMethods bool) []*Func {
list := make([]*Func, len(m))
i := 0
for _, m := range m {
// determine which methods to include
switch {
case m.Decl == nil:
// exclude conflict entry
case allMethods, m.Level == 0, !token.IsExported(removeStar(m.Orig)):
// forced inclusion, method not embedded, or method
// embedded but original receiver type not exported
list[i] = m
i++
}
}
list = list[0:i]
slices.SortFunc(list, func(a, b *Func) int {
return strings.Compare(a.Name, b.Name)
})
return list
}
// noteBodies returns a list of note body strings given a list of notes.
// This is only used to populate the deprecated Package.Bugs field.
func noteBodies(notes []*Note) []string {
var list []string
for _, n := range notes {
list = append(list, n.Body)
}
return list
}
// ----------------------------------------------------------------------------
// Predeclared identifiers
// IsPredeclared reports whether s is a predeclared identifier.
func IsPredeclared(s string) bool {
return predeclaredTypes[s] || predeclaredFuncs[s] || predeclaredConstants[s]
}
var predeclaredTypes = map[string]bool{
"any": true,
"bool": true,
"byte": true,
"comparable": true,
"complex64": true,
"complex128": true,
"error": true,
"float32": true,
"float64": true,
"int": true,
"int8": true,
"int16": true,
"int32": true,
"int64": true,
"rune": true,
"string": true,
"uint": true,
"uint8": true,
"uint16": true,
"uint32": true,
"uint64": true,
"uintptr": true,
}
var predeclaredFuncs = map[string]bool{
"append": true,
"cap": true,
"clear": true,
"close": true,
"complex": true,
"copy": true,
"delete": true,
"imag": true,
"len": true,
"make": true,
"max": true,
"min": true,
"new": true,
"panic": true,
"print": true,
"println": true,
"real": true,
"recover": true,
}
var predeclaredConstants = map[string]bool{
"false": true,
"iota": true,
"nil": true,
"true": true,
}
// assumedPackageName returns the assumed package name
// for a given import path. This is a copy of
// golang.org/x/tools/internal/imports.ImportPathToAssumedName.
func assumedPackageName(importPath string) string {
notIdentifier := func(ch rune) bool {
return !('a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' ||
'0' <= ch && ch <= '9' ||
ch == '_' ||
ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch)))
}
base := path.Base(importPath)
if strings.HasPrefix(base, "v") {
if _, err := strconv.Atoi(base[1:]); err == nil {
dir := path.Dir(importPath)
if dir != "." {
base = path.Base(dir)
}
}
}
base = strings.TrimPrefix(base, "go-")
if i := strings.IndexFunc(base, notIdentifier); i >= 0 {
base = base[:i]
}
return base
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package doc
import (
"go/doc/comment"
"strings"
"unicode"
)
// firstSentence returns the first sentence in s.
// The sentence ends after the first period followed by space and
// not preceded by exactly one uppercase letter.
func firstSentence(s string) string {
var ppp, pp, p rune
for i, q := range s {
if q == '\n' || q == '\r' || q == '\t' {
q = ' '
}
if q == ' ' && p == '.' && (!unicode.IsUpper(pp) || unicode.IsUpper(ppp)) {
return s[:i]
}
if p == '。' || p == '.' {
return s[:i]
}
ppp, pp, p = pp, p, q
}
return s
}
// Synopsis returns a cleaned version of the first sentence in text.
//
// Deprecated: New programs should use [Package.Synopsis] instead,
// which handles links in text properly.
func Synopsis(text string) string {
var p Package
return p.Synopsis(text)
}
// IllegalPrefixes is a list of lower-case prefixes that identify
// a comment as not being a doc comment.
// This helps to avoid misinterpreting the common mistake
// of a copyright notice immediately before a package statement
// as being a doc comment.
var IllegalPrefixes = []string{
"copyright",
"all rights",
"author",
}
// Synopsis returns a cleaned version of the first sentence in text.
// That sentence ends after the first period followed by space and not
// preceded by exactly one uppercase letter, or at the first paragraph break.
// The result string has no \n, \r, or \t characters and uses only single
// spaces between words. If text starts with any of the [IllegalPrefixes],
// the result is the empty string.
func (p *Package) Synopsis(text string) string {
text = firstSentence(text)
lower := strings.ToLower(text)
for _, prefix := range IllegalPrefixes {
if strings.HasPrefix(lower, prefix) {
return ""
}
}
pr := p.Printer()
pr.TextWidth = -1
d := p.Parser().Parse(text)
if len(d.Content) == 0 {
return ""
}
if _, ok := d.Content[0].(*comment.Paragraph); !ok {
return ""
}
d.Content = d.Content[:1] // might be blank lines, code blocks, etc in “first sentence”
return strings.TrimSpace(string(pr.Text(d)))
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package format implements standard formatting of Go source.
//
// Note that formatting of Go source code changes over time, so tools relying on
// consistent formatting should execute a specific version of the gofmt binary
// instead of using this package. That way, the formatting will be stable, and
// the tools won't need to be recompiled each time gofmt changes.
//
// For example, pre-submit checks that use this package directly would behave
// differently depending on what Go version each developer uses, causing the
// check to be inherently fragile.
package format
import (
"bytes"
"fmt"
"go/ast"
"go/parser"
"go/printer"
"go/token"
"io"
)
// Keep these in sync with cmd/gofmt/gofmt.go.
const (
tabWidth = 8
printerMode = printer.UseSpaces | printer.TabIndent | printerNormalizeNumbers
// printerNormalizeNumbers means to canonicalize number literal prefixes
// and exponents while printing. See https://golang.org/doc/go1.13#gofmt.
//
// This value is defined in go/printer specifically for go/format and cmd/gofmt.
printerNormalizeNumbers = 1 << 30
)
var config = printer.Config{Mode: printerMode, Tabwidth: tabWidth}
const parserMode = parser.ParseComments | parser.SkipObjectResolution
// Node formats node in canonical gofmt style and writes the result to dst.
//
// The node type must be *[ast.File], *[printer.CommentedNode], [][ast.Decl],
// [][ast.Stmt], or assignment-compatible to [ast.Expr], [ast.Decl], [ast.Spec],
// or [ast.Stmt]. Node does not modify node. Imports are not sorted for
// nodes representing partial source files (for instance, if the node is
// not an *[ast.File] or a *[printer.CommentedNode] not wrapping an *[ast.File]).
//
// The function may return early (before the entire result is written)
// and return a formatting error, for instance due to an incorrect AST.
func Node(dst io.Writer, fset *token.FileSet, node any) error {
// Determine if we have a complete source file (file != nil).
var file *ast.File
var cnode *printer.CommentedNode
switch n := node.(type) {
case *ast.File:
file = n
case *printer.CommentedNode:
if f, ok := n.Node.(*ast.File); ok {
file = f
cnode = n
}
}
// Sort imports if necessary.
if file != nil && hasUnsortedImports(file) {
// Make a copy of the AST because ast.SortImports is destructive.
// TODO(gri) Do this more efficiently.
var buf bytes.Buffer
err := config.Fprint(&buf, fset, file)
if err != nil {
return err
}
file, err = parser.ParseFile(fset, "", buf.Bytes(), parserMode)
if err != nil {
// We should never get here. If we do, provide good diagnostic.
return fmt.Errorf("format.Node internal error (%s)", err)
}
ast.SortImports(fset, file)
// Use new file with sorted imports.
node = file
if cnode != nil {
node = &printer.CommentedNode{Node: file, Comments: cnode.Comments}
}
}
return config.Fprint(dst, fset, node)
}
// Source formats src in canonical gofmt style and returns the result
// or an (I/O or syntax) error. src is expected to be a syntactically
// correct Go source file, or a list of Go declarations or statements.
//
// If src is a partial source file, the leading and trailing space of src
// is applied to the result (such that it has the same leading and trailing
// space as src), and the result is indented by the same amount as the first
// line of src containing code. Imports are not sorted for partial source files.
func Source(src []byte) ([]byte, error) {
fset := token.NewFileSet()
file, sourceAdj, indentAdj, err := parse(fset, "", src, true)
if err != nil {
return nil, err
}
if sourceAdj == nil {
// Complete source file.
// TODO(gri) consider doing this always.
ast.SortImports(fset, file)
}
return format(fset, file, sourceAdj, indentAdj, src, config)
}
func hasUnsortedImports(file *ast.File) bool {
for _, d := range file.Decls {
d, ok := d.(*ast.GenDecl)
if !ok || d.Tok != token.IMPORT {
// Not an import declaration, so we're done.
// Imports are always first.
return false
}
if d.Lparen.IsValid() {
// For now assume all grouped imports are unsorted.
// TODO(gri) Should check if they are sorted already.
return true
}
// Ungrouped imports are sorted by default.
}
return false
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// TODO(gri): This file and the file src/cmd/gofmt/internal.go are
// the same (but for this comment and the package name). Do not modify
// one without the other. Determine if we can factor out functionality
// in a public API. See also #11844 for context.
package format
import (
"bytes"
"go/ast"
"go/parser"
"go/printer"
"go/token"
"strings"
)
// parse parses src, which was read from the named file,
// as a Go source file, declaration, or statement list.
func parse(fset *token.FileSet, filename string, src []byte, fragmentOk bool) (
file *ast.File,
sourceAdj func(src []byte, indent int) []byte,
indentAdj int,
err error,
) {
// Try as whole source file.
file, err = parser.ParseFile(fset, filename, src, parserMode)
// If there's no error, return. If the error is that the source file didn't begin with a
// package line and source fragments are ok, fall through to
// try as a source fragment. Stop and return on any other error.
if err == nil || !fragmentOk || !strings.Contains(err.Error(), "expected 'package'") {
return
}
// If this is a declaration list, make it a source file
// by inserting a package clause.
// Insert using a ';', not a newline, so that the line numbers
// in psrc match the ones in src.
psrc := append([]byte("package p;"), src...)
file, err = parser.ParseFile(fset, filename, psrc, parserMode)
if err == nil {
sourceAdj = func(src []byte, indent int) []byte {
// Remove the package clause.
// Gofmt has turned the ';' into a '\n'.
src = src[indent+len("package p\n"):]
return bytes.TrimSpace(src)
}
return
}
// If the error is that the source file didn't begin with a
// declaration, fall through to try as a statement list.
// Stop and return on any other error.
if !strings.Contains(err.Error(), "expected declaration") {
return
}
// If this is a statement list, make it a source file
// by inserting a package clause and turning the list
// into a function body. This handles expressions too.
// Insert using a ';', not a newline, so that the line numbers
// in fsrc match the ones in src. Add an extra '\n' before the '}'
// to make sure comments are flushed before the '}'.
fsrc := append(append([]byte("package p; func _() {"), src...), '\n', '\n', '}')
file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
if err == nil {
sourceAdj = func(src []byte, indent int) []byte {
// Cap adjusted indent to zero.
if indent < 0 {
indent = 0
}
// Remove the wrapping.
// Gofmt has turned the "; " into a "\n\n".
// There will be two non-blank lines with indent, hence 2*indent.
src = src[2*indent+len("package p\n\nfunc _() {"):]
// Remove only the "}\n" suffix: remaining whitespaces will be trimmed anyway
src = src[:len(src)-len("}\n")]
return bytes.TrimSpace(src)
}
// Gofmt has also indented the function body one level.
// Adjust that with indentAdj.
indentAdj = -1
}
// Succeeded, or out of options.
return
}
// format formats the given package file originally obtained from src
// and adjusts the result based on the original source via sourceAdj
// and indentAdj.
func format(
fset *token.FileSet,
file *ast.File,
sourceAdj func(src []byte, indent int) []byte,
indentAdj int,
src []byte,
cfg printer.Config,
) ([]byte, error) {
if sourceAdj == nil {
// Complete source file.
var buf bytes.Buffer
err := cfg.Fprint(&buf, fset, file)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// Partial source file.
// Determine and prepend leading space.
i, j := 0, 0
for j < len(src) && isSpace(src[j]) {
if src[j] == '\n' {
i = j + 1 // byte offset of last line in leading space
}
j++
}
var res []byte
res = append(res, src[:i]...)
// Determine and prepend indentation of first code line.
// Spaces are ignored unless there are no tabs,
// in which case spaces count as one tab.
indent := 0
hasSpace := false
for _, b := range src[i:j] {
switch b {
case ' ':
hasSpace = true
case '\t':
indent++
}
}
if indent == 0 && hasSpace {
indent = 1
}
for i := 0; i < indent; i++ {
res = append(res, '\t')
}
// Format the source.
// Write it without any leading and trailing space.
cfg.Indent = indent + indentAdj
var buf bytes.Buffer
err := cfg.Fprint(&buf, fset, file)
if err != nil {
return nil, err
}
out := sourceAdj(buf.Bytes(), cfg.Indent)
// If the adjusted output is empty, the source
// was empty but (possibly) for white space.
// The result is the incoming source.
if len(out) == 0 {
return src, nil
}
// Otherwise, append output to leading space.
res = append(res, out...)
// Determine and append trailing space.
i = len(src)
for i > 0 && isSpace(src[i-1]) {
i--
}
return append(res, src[i:]...), nil
}
// isSpace reports whether the byte is a space character.
// isSpace defines a space as being among the following bytes: ' ', '\t', '\n' and '\r'.
func isSpace(b byte) bool {
return b == ' ' || b == '\t' || b == '\n' || b == '\r'
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the exported entry points for invoking the parser.
package parser
import (
"bytes"
"errors"
"go/ast"
"go/token"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
)
// If src != nil, readSource converts src to a []byte if possible;
// otherwise it returns an error. If src == nil, readSource returns
// the result of reading the file specified by filename.
func readSource(filename string, src any) ([]byte, error) {
if src != nil {
switch s := src.(type) {
case string:
return []byte(s), nil
case []byte:
return s, nil
case *bytes.Buffer:
// is io.Reader, but src is already available in []byte form
if s != nil {
return s.Bytes(), nil
}
case io.Reader:
return io.ReadAll(s)
}
return nil, errors.New("invalid source")
}
return os.ReadFile(filename)
}
// A Mode value is a set of flags (or 0).
// They control the amount of source code parsed and other optional
// parser functionality.
type Mode uint
const (
PackageClauseOnly Mode = 1 << iota // stop parsing after package clause
ImportsOnly // stop parsing after import declarations
ParseComments // parse comments and add them to AST
Trace // print a trace of parsed productions
DeclarationErrors // report declaration errors
SpuriousErrors // same as AllErrors, for backward-compatibility
SkipObjectResolution // skip deprecated identifier resolution; see ParseFile
AllErrors = SpuriousErrors // report all errors (not just the first 10 on different lines)
)
// ParseFile parses the source code of a single Go source file and returns
// the corresponding [ast.File] node. The source code may be provided via
// the filename of the source file, or via the src parameter.
//
// If src != nil, ParseFile parses the source from src and the filename is
// only used when recording position information. The type of the argument
// for the src parameter must be string, []byte, or [io.Reader].
// If src == nil, ParseFile parses the file specified by filename.
//
// The mode parameter controls the amount of source text parsed and
// other optional parser functionality. If the [SkipObjectResolution]
// mode bit is set (recommended), the object resolution phase of
// parsing will be skipped, causing File.Scope, File.Unresolved, and
// all Ident.Obj fields to be nil. Those fields are deprecated; see
// [ast.Object] for details.
//
// Position information is recorded in the file set fset, which must not be
// nil.
//
// If the source couldn't be read, the returned AST is nil and the error
// indicates the specific failure. If the source was read but syntax
// errors were found, the result is a partial AST (with [ast.Bad]* nodes
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by source position.
func ParseFile(fset *token.FileSet, filename string, src any, mode Mode) (f *ast.File, err error) {
if fset == nil {
panic("parser.ParseFile: no token.FileSet provided (fset == nil)")
}
// get source
text, err := readSource(filename, src)
if err != nil {
return nil, err
}
file := fset.AddFile(filename, -1, len(text))
var p parser
defer func() {
if e := recover(); e != nil {
// resume same panic if it's not a bailout
bail, ok := e.(bailout)
if !ok {
panic(e)
} else if bail.msg != "" {
p.errors.Add(p.file.Position(bail.pos), bail.msg)
}
}
// set result values
if f == nil {
// source is not a valid Go source file - satisfy
// ParseFile API and return a valid (but) empty
// *ast.File
f = &ast.File{
Name: new(ast.Ident),
Scope: ast.NewScope(nil),
}
}
// Ensure the start/end are consistent,
// whether parsing succeeded or not.
f.FileStart = token.Pos(file.Base())
f.FileEnd = file.End()
p.errors.Sort()
err = p.errors.Err()
}()
// parse source
p.init(file, text, mode)
f = p.parseFile()
return
}
// ParseDir calls [ParseFile] for all files with names ending in ".go" in the
// directory specified by path and returns a map of package name -> package
// AST with all the packages found.
//
// If filter != nil, only the files with [fs.FileInfo] entries passing through
// the filter (and ending in ".go") are considered. The mode bits are passed
// to [ParseFile] unchanged. Position information is recorded in fset, which
// must not be nil.
//
// If the directory couldn't be read, a nil map and the respective error are
// returned. If a parse error occurred, a non-nil but incomplete map and the
// first error encountered are returned.
//
// Deprecated: ParseDir does not consider build tags when associating
// files with packages. For precise information about the relationship
// between packages and files, use golang.org/x/tools/go/packages,
// which can also optionally parse and type-check the files too.
func ParseDir(fset *token.FileSet, path string, filter func(fs.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error) {
list, err := os.ReadDir(path)
if err != nil {
return nil, err
}
pkgs = make(map[string]*ast.Package)
for _, d := range list {
if d.IsDir() || !strings.HasSuffix(d.Name(), ".go") {
continue
}
if filter != nil {
info, err := d.Info()
if err != nil {
return nil, err
}
if !filter(info) {
continue
}
}
filename := filepath.Join(path, d.Name())
if src, err := ParseFile(fset, filename, nil, mode); err == nil {
name := src.Name.Name
pkg, found := pkgs[name]
if !found {
pkg = &ast.Package{
Name: name,
Files: make(map[string]*ast.File),
}
pkgs[name] = pkg
}
pkg.Files[filename] = src
} else if first == nil {
first = err
}
}
return
}
// ParseExprFrom is a convenience function for parsing an expression.
// The arguments have the same meaning as for [ParseFile], but the source must
// be a valid Go (type or value) expression. Specifically, fset must not
// be nil.
//
// If the source couldn't be read, the returned AST is nil and the error
// indicates the specific failure. If the source was read but syntax
// errors were found, the result is a partial AST (with [ast.Bad]* nodes
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by source position.
func ParseExprFrom(fset *token.FileSet, filename string, src any, mode Mode) (expr ast.Expr, err error) {
if fset == nil {
panic("parser.ParseExprFrom: no token.FileSet provided (fset == nil)")
}
// get source
text, err := readSource(filename, src)
if err != nil {
return nil, err
}
var p parser
defer func() {
if e := recover(); e != nil {
// resume same panic if it's not a bailout
bail, ok := e.(bailout)
if !ok {
panic(e)
} else if bail.msg != "" {
p.errors.Add(p.file.Position(bail.pos), bail.msg)
}
}
p.errors.Sort()
err = p.errors.Err()
}()
// parse expr
file := fset.AddFile(filename, -1, len(text))
p.init(file, text, mode)
expr = p.parseRhs()
// If a semicolon was inserted, consume it;
// report an error if there's more tokens.
if p.tok == token.SEMICOLON && p.lit == "\n" {
p.next()
}
p.expect(token.EOF)
return
}
// ParseExpr is a convenience function for obtaining the AST of an expression x.
// The position information recorded in the AST is undefined. The filename used
// in error messages is the empty string.
//
// If syntax errors were found, the result is a partial AST (with [ast.Bad]* nodes
// representing the fragments of erroneous source code). Multiple errors are
// returned via a scanner.ErrorList which is sorted by source position.
func ParseExpr(x string) (ast.Expr, error) {
return ParseExprFrom(token.NewFileSet(), "", []byte(x), 0)
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package parser implements a parser for Go source files.
//
// The [ParseFile] function reads file input from a string, []byte, or
// io.Reader, and produces an [ast.File] representing the complete
// abstract syntax tree of the file.
//
// The [ParseExprFrom] function reads a single source-level expression and
// produces an [ast.Expr], the syntax tree of the expression.
//
// The parser accepts a larger language than is syntactically permitted by
// the Go spec, for simplicity, and for improved robustness in the presence
// of syntax errors. For instance, in method declarations, the receiver is
// treated like an ordinary parameter list and thus may contain multiple
// entries where the spec permits exactly one. Consequently, the corresponding
// field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry.
//
// Applications that need to parse one or more complete packages of Go
// source code may find it more convenient not to interact directly
// with the parser but instead to use the Load function in package
// [golang.org/x/tools/go/packages].
package parser
import (
"fmt"
"go/ast"
"go/build/constraint"
"go/scanner"
"go/token"
"strings"
)
// The parser structure holds the parser's internal state.
type parser struct {
file *token.File
errors scanner.ErrorList
scanner scanner.Scanner
// Tracing/debugging
mode Mode // parsing mode
trace bool // == (mode&Trace != 0)
indent int // indentation used for tracing output
// Comments
comments []*ast.CommentGroup
leadComment *ast.CommentGroup // last lead comment
lineComment *ast.CommentGroup // last line comment
top bool // in top of file (before package clause)
goVersion string // minimum Go version found in //go:build comment
// Next token
pos token.Pos // token position
tok token.Token // one token look-ahead
lit string // token literal
// Error recovery
// (used to limit the number of calls to parser.advance
// w/o making scanning progress - avoids potential endless
// loops across multiple parser functions during error recovery)
syncPos token.Pos // last synchronization position
syncCnt int // number of parser.advance calls without progress
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
inRhs bool // if set, the parser is parsing a rhs expression
imports []*ast.ImportSpec // list of imports
// nestLev is used to track and limit the recursion depth
// during parsing.
nestLev int
}
func (p *parser) init(file *token.File, src []byte, mode Mode) {
p.file = file
eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
p.scanner.Init(p.file, src, eh, scanner.ScanComments)
p.top = true
p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
p.next()
}
// end returns the end position of the current token
func (p *parser) end() token.Pos {
return p.scanner.End()
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a ...any) {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *parser) {
p.indent--
p.printTrace(")")
}
// maxNestLev is the deepest we're willing to recurse during parsing
const maxNestLev int = 1e5
func incNestLev(p *parser) *parser {
p.nestLev++
if p.nestLev > maxNestLev {
p.error(p.pos, "exceeded max nesting depth")
panic(bailout{})
}
return p
}
// decNestLev is used to track nesting depth during parsing to prevent stack exhaustion.
// It is used along with incNestLev in a similar fashion to how un and trace are used.
func decNestLev(p *parser) {
p.nestLev--
}
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
// when tracing as it provides a more readable output. The
// very first token (!p.pos.IsValid()) is not initialized
// (it is token.ILLEGAL), so don't print it.
if p.trace && p.pos.IsValid() {
s := p.tok.String()
switch {
case p.tok.IsLiteral():
p.printTrace(s, p.lit)
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"")
default:
p.printTrace(s)
}
}
for {
p.pos, p.tok, p.lit = p.scanner.Scan()
if p.tok == token.COMMENT {
if p.top && strings.HasPrefix(p.lit, "//go:build") {
if x, err := constraint.Parse(p.lit); err == nil {
p.goVersion = constraint.GoVersion(x)
}
}
if p.mode&ParseComments == 0 {
continue
}
} else {
// Found a non-comment; top of file is over.
p.top = false
}
break
}
}
// lineFor returns the line of pos, ignoring line directive adjustments.
func (p *parser) lineFor(pos token.Pos) int {
return p.file.PositionFor(pos, false).Line
}
// Consume a comment and return it and the line on which it ends.
func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.lineFor(p.pos)
if p.lit[1] == '*' {
// don't use range here - no need to decode Unicode code points
for i := 0; i < len(p.lit); i++ {
if p.lit[i] == '\n' {
endline++
}
}
}
comment = &ast.Comment{Slash: p.pos, Text: p.lit}
p.next0()
return
}
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return it together with the line at which
// the last comment in the group ends. A non-comment token or n
// empty lines terminate a comment group.
func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
var list []*ast.Comment
endline = p.lineFor(p.pos)
for p.tok == token.COMMENT && p.lineFor(p.pos) <= endline+n {
var comment *ast.Comment
comment, endline = p.consumeComment()
list = append(list, comment)
}
// add comment group to the comments list
comments = &ast.CommentGroup{List: list}
p.comments = append(p.comments, comments)
return
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
prev := p.pos
p.next0()
if p.tok == token.COMMENT {
var comment *ast.CommentGroup
var endline int
if p.lineFor(p.pos) == p.lineFor(prev) {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup(0)
if p.lineFor(p.pos) != endline || p.tok == token.SEMICOLON || p.tok == token.EOF {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endline = -1
for p.tok == token.COMMENT {
comment, endline = p.consumeCommentGroup(1)
}
if endline+1 == p.lineFor(p.pos) {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
}
// A bailout panic is raised to indicate early termination. pos and msg are
// only populated when bailing out of object resolution.
type bailout struct {
pos token.Pos
msg string
}
func (p *parser) error(pos token.Pos, msg string) {
if p.trace {
defer un(trace(p, "error: "+msg))
}
epos := p.file.Position(pos)
// If AllErrors is not set, discard errors reported on the same line
// as the last recorded error and stop parsing if there are more than
// 10 errors.
if p.mode&AllErrors == 0 {
n := len(p.errors)
if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
return // discard - likely a spurious error
}
if n > 10 {
panic(bailout{})
}
}
p.errors.Add(epos, msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
switch {
case p.tok == token.SEMICOLON && p.lit == "\n":
msg += ", found newline"
case p.tok.IsLiteral():
// print 123 rather than 'INT', etc.
msg += ", found " + p.lit
default:
msg += ", found '" + p.tok.String() + "'"
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress
return pos
}
// expect2 is like expect, but it returns an invalid position
// if the expected token is not found.
func (p *parser) expect2(tok token.Token) (pos token.Pos) {
if p.tok == tok {
pos = p.pos
} else {
p.errorExpected(p.pos, "'"+tok.String()+"'")
}
p.next() // make progress
return
}
// expectClosing is like expect but provides a better error message
// for the common case of a missing comma before a newline.
func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
p.error(p.pos, "missing ',' before newline in "+context)
p.next()
}
return p.expect(tok)
}
// expectSemi consumes a semicolon and returns the applicable line comment.
func (p *parser) expectSemi() (comment *ast.CommentGroup) {
switch p.tok {
case token.RPAREN, token.RBRACE:
return nil // semicolon is optional before a closing ')' or '}'
case token.COMMA:
// permit a ',' instead of a ';' but complain
p.errorExpected(p.pos, "';'")
fallthrough
case token.SEMICOLON:
if p.lit == ";" {
// explicit semicolon
p.next()
comment = p.lineComment // use following comments
} else {
// artificial semicolon
comment = p.lineComment // use preceding comments
p.next()
}
return comment
default:
p.errorExpected(p.pos, "';'")
p.advance(stmtStart)
return nil
}
}
func (p *parser) atComma(context string, follow token.Token) bool {
if p.tok == token.COMMA {
return true
}
if p.tok != follow {
msg := "missing ','"
if p.tok == token.SEMICOLON && p.lit == "\n" {
msg += " before newline"
}
p.error(p.pos, msg+" in "+context)
return true // "insert" comma and continue
}
return false
}
func assert(cond bool, msg string) {
if !cond {
panic("go/parser internal error: " + msg)
}
}
// advance consumes tokens until the current token p.tok
// is in the 'to' set, or token.EOF. For error recovery.
func (p *parser) advance(to map[token.Token]bool) {
for ; p.tok != token.EOF; p.next() {
if to[p.tok] {
// Return only if parser made some progress since last
// sync or if it has not reached 10 advance calls without
// progress. Otherwise consume at least one token to
// avoid an endless parser loop (it is possible that
// both parseOperand and parseStmt call advance and
// correctly do not advance, thus the need for the
// invocation limit p.syncCnt).
if p.pos == p.syncPos && p.syncCnt < 10 {
p.syncCnt++
return
}
if p.pos > p.syncPos {
p.syncPos = p.pos
p.syncCnt = 0
return
}
// Reaching here indicates a parser bug, likely an
// incorrect token list in this function, but it only
// leads to skipping of possibly correct code if a
// previous error is present, and thus is preferred
// over a non-terminating parse.
}
}
}
var stmtStart = map[token.Token]bool{
token.BREAK: true,
token.CONST: true,
token.CONTINUE: true,
token.DEFER: true,
token.FALLTHROUGH: true,
token.FOR: true,
token.GO: true,
token.GOTO: true,
token.IF: true,
token.RETURN: true,
token.SELECT: true,
token.SWITCH: true,
token.TYPE: true,
token.VAR: true,
}
var declStart = map[token.Token]bool{
token.IMPORT: true,
token.CONST: true,
token.TYPE: true,
token.VAR: true,
}
var exprEnd = map[token.Token]bool{
token.COMMA: true,
token.COLON: true,
token.SEMICOLON: true,
token.RPAREN: true,
token.RBRACK: true,
token.RBRACE: true,
}
// ----------------------------------------------------------------------------
// Identifiers
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseIdentList() (list []*ast.Ident) {
if p.trace {
defer un(trace(p, "IdentList"))
}
list = append(list, p.parseIdent())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseIdent())
}
return
}
// ----------------------------------------------------------------------------
// Common productions
// If lhs is set, result list elements which are identifiers are not resolved.
func (p *parser) parseExprList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ExpressionList"))
}
list = append(list, p.parseExpr())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseExpr())
}
return
}
func (p *parser) parseList(inRhs bool) []ast.Expr {
old := p.inRhs
p.inRhs = inRhs
list := p.parseExprList()
p.inRhs = old
return list
}
// ----------------------------------------------------------------------------
// Types
func (p *parser) parseType() ast.Expr {
if p.trace {
defer un(trace(p, "Type"))
}
typ := p.tryIdentOrType()
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.advance(exprEnd)
return &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
func (p *parser) parseQualifiedIdent(ident *ast.Ident) ast.Expr {
if p.trace {
defer un(trace(p, "QualifiedIdent"))
}
typ := p.parseTypeName(ident)
if p.tok == token.LBRACK {
typ = p.parseTypeInstance(typ)
}
return typ
}
// If the result is an identifier, it is not resolved.
func (p *parser) parseTypeName(ident *ast.Ident) ast.Expr {
if p.trace {
defer un(trace(p, "TypeName"))
}
if ident == nil {
ident = p.parseIdent()
}
if p.tok == token.PERIOD {
// ident is a package name
p.next()
sel := p.parseIdent()
return &ast.SelectorExpr{X: ident, Sel: sel}
}
return ident
}
// "[" has already been consumed, and lbrack is its position.
// If len != nil it is the already consumed array length.
func (p *parser) parseArrayType(lbrack token.Pos, len ast.Expr) *ast.ArrayType {
if p.trace {
defer un(trace(p, "ArrayType"))
}
if len == nil {
p.exprLev++
// always permit ellipsis for more fault-tolerant parsing
if p.tok == token.ELLIPSIS {
len = &ast.Ellipsis{Ellipsis: p.pos}
p.next()
} else if p.tok != token.RBRACK {
len = p.parseRhs()
}
p.exprLev--
}
if p.tok == token.COMMA {
// Trailing commas are accepted in type parameter
// lists but not in array type declarations.
// Accept for better error handling but complain.
p.error(p.pos, "unexpected comma; expecting ]")
p.next()
}
p.expect(token.RBRACK)
elt := p.parseType()
return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
}
func (p *parser) parseArrayFieldOrTypeInstance(x *ast.Ident) (*ast.Ident, ast.Expr) {
if p.trace {
defer un(trace(p, "ArrayFieldOrTypeInstance"))
}
lbrack := p.expect(token.LBRACK)
trailingComma := token.NoPos // if valid, the position of a trailing comma preceding the ']'
var args []ast.Expr
if p.tok != token.RBRACK {
p.exprLev++
args = append(args, p.parseRhs())
for p.tok == token.COMMA {
comma := p.pos
p.next()
if p.tok == token.RBRACK {
trailingComma = comma
break
}
args = append(args, p.parseRhs())
}
p.exprLev--
}
rbrack := p.expect(token.RBRACK)
if len(args) == 0 {
// x []E
elt := p.parseType()
return x, &ast.ArrayType{Lbrack: lbrack, Elt: elt}
}
// x [P]E or x[P]
if len(args) == 1 {
elt := p.tryIdentOrType()
if elt != nil {
// x [P]E
if trailingComma.IsValid() {
// Trailing commas are invalid in array type fields.
p.error(trailingComma, "unexpected comma; expecting ]")
}
return x, &ast.ArrayType{Lbrack: lbrack, Len: args[0], Elt: elt}
}
}
// x[P], x[P1, P2], ...
return nil, packIndexExpr(x, lbrack, args, rbrack)
}
func (p *parser) parseFieldDecl() *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
}
doc := p.leadComment
var names []*ast.Ident
var typ ast.Expr
switch p.tok {
case token.IDENT:
name := p.parseIdent()
if p.tok == token.PERIOD || p.tok == token.STRING || p.tok == token.SEMICOLON || p.tok == token.RBRACE {
// embedded type
typ = name
if p.tok == token.PERIOD {
typ = p.parseQualifiedIdent(name)
}
} else {
// name1, name2, ... T
names = []*ast.Ident{name}
for p.tok == token.COMMA {
p.next()
names = append(names, p.parseIdent())
}
// Careful dance: We don't know if we have an embedded instantiated
// type T[P1, P2, ...] or a field T of array type []E or [P]E.
if len(names) == 1 && p.tok == token.LBRACK {
name, typ = p.parseArrayFieldOrTypeInstance(name)
if name == nil {
names = nil
}
} else {
// T P
typ = p.parseType()
}
}
case token.MUL:
star := p.pos
p.next()
if p.tok == token.LPAREN {
// *(T)
p.error(p.pos, "cannot parenthesize embedded type")
p.next()
typ = p.parseQualifiedIdent(nil)
// expect closing ')' but no need to complain if missing
if p.tok == token.RPAREN {
p.next()
}
} else {
// *T
typ = p.parseQualifiedIdent(nil)
}
typ = &ast.StarExpr{Star: star, X: typ}
case token.LPAREN:
p.error(p.pos, "cannot parenthesize embedded type")
p.next()
if p.tok == token.MUL {
// (*T)
star := p.pos
p.next()
typ = &ast.StarExpr{Star: star, X: p.parseQualifiedIdent(nil)}
} else {
// (T)
typ = p.parseQualifiedIdent(nil)
}
// expect closing ')' but no need to complain if missing
if p.tok == token.RPAREN {
p.next()
}
default:
pos := p.pos
p.errorExpected(pos, "field name or embedded type")
p.advance(exprEnd)
typ = &ast.BadExpr{From: pos, To: p.pos}
}
var tag *ast.BasicLit
if p.tok == token.STRING {
tag = &ast.BasicLit{ValuePos: p.pos, ValueEnd: p.end(), Kind: p.tok, Value: p.lit}
p.next()
}
comment := p.expectSemi()
field := &ast.Field{Doc: doc, Names: names, Type: typ, Tag: tag, Comment: comment}
return field
}
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"))
}
pos := p.expect(token.STRUCT)
lbrace := p.expect(token.LBRACE)
var list []*ast.Field
for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
// a field declaration cannot start with a '(' but we accept
// it here for more robust parsing and better error messages
// (parseFieldDecl will check and complain if necessary)
list = append(list, p.parseFieldDecl())
}
rbrace := p.expect(token.RBRACE)
return &ast.StructType{
Struct: pos,
Fields: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"))
}
star := p.expect(token.MUL)
base := p.parseType()
return &ast.StarExpr{Star: star, X: base}
}
func (p *parser) parseDotsType() *ast.Ellipsis {
if p.trace {
defer un(trace(p, "DotsType"))
}
pos := p.expect(token.ELLIPSIS)
elt := p.parseType()
return &ast.Ellipsis{Ellipsis: pos, Elt: elt}
}
type field struct {
name *ast.Ident
typ ast.Expr
}
func (p *parser) parseParamDecl(name *ast.Ident, typeSetsOK bool) (f field) {
// TODO(rFindley) refactor to be more similar to paramDeclOrNil in the syntax
// package
if p.trace {
defer un(trace(p, "ParamDecl"))
}
ptok := p.tok
if name != nil {
p.tok = token.IDENT // force token.IDENT case in switch below
} else if typeSetsOK && p.tok == token.TILDE {
// "~" ...
return field{nil, p.embeddedElem(nil)}
}
switch p.tok {
case token.IDENT:
// name
if name != nil {
f.name = name
p.tok = ptok
} else {
f.name = p.parseIdent()
}
switch p.tok {
case token.IDENT, token.MUL, token.ARROW, token.FUNC, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
// name type
f.typ = p.parseType()
case token.LBRACK:
// name "[" type1, ..., typeN "]" or name "[" n "]" type
f.name, f.typ = p.parseArrayFieldOrTypeInstance(f.name)
case token.ELLIPSIS:
// name "..." type
f.typ = p.parseDotsType()
return // don't allow ...type "|" ...
case token.PERIOD:
// name "." ...
f.typ = p.parseQualifiedIdent(f.name)
f.name = nil
case token.TILDE:
if typeSetsOK {
f.typ = p.embeddedElem(nil)
return
}
case token.OR:
if typeSetsOK {
// name "|" typeset
f.typ = p.embeddedElem(f.name)
f.name = nil
return
}
}
case token.MUL, token.ARROW, token.FUNC, token.LBRACK, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
// type
f.typ = p.parseType()
case token.ELLIPSIS:
// "..." type
// (always accepted)
f.typ = p.parseDotsType()
return // don't allow ...type "|" ...
default:
// TODO(rfindley): this is incorrect in the case of type parameter lists
// (should be "']'" in that case)
p.errorExpected(p.pos, "')'")
p.advance(exprEnd)
}
// [name] type "|"
if typeSetsOK && p.tok == token.OR && f.typ != nil {
f.typ = p.embeddedElem(f.typ)
}
return
}
func (p *parser) parseParameterList(name0 *ast.Ident, typ0 ast.Expr, closing token.Token, dddok bool) (params []*ast.Field) {
if p.trace {
defer un(trace(p, "ParameterList"))
}
// Type parameters are the only parameter list closed by ']'.
tparams := closing == token.RBRACK
pos0 := p.pos
if name0 != nil {
pos0 = name0.Pos()
} else if typ0 != nil {
pos0 = typ0.Pos()
}
// Note: The code below matches the corresponding code in the syntax
// parser closely. Changes must be reflected in either parser.
// For the code to match, we use the local []field list that
// corresponds to []syntax.Field. At the end, the list must be
// converted into an []*ast.Field.
var list []field
var named int // number of parameters that have an explicit name and type
var typed int // number of parameters that have an explicit type
for name0 != nil || p.tok != closing && p.tok != token.EOF {
var par field
if typ0 != nil {
if tparams {
typ0 = p.embeddedElem(typ0)
}
par = field{name0, typ0}
} else {
par = p.parseParamDecl(name0, tparams)
}
name0 = nil // 1st name was consumed if present
typ0 = nil // 1st typ was consumed if present
if par.name != nil || par.typ != nil {
list = append(list, par)
if par.name != nil && par.typ != nil {
named++
}
if par.typ != nil {
typed++
}
}
if !p.atComma("parameter list", closing) {
break
}
p.next()
}
if len(list) == 0 {
return // not uncommon
}
// distribute parameter types (len(list) > 0)
if named == 0 {
// all unnamed => found names are type names
for i := range list {
par := &list[i]
if typ := par.name; typ != nil {
par.typ = typ
par.name = nil
}
}
if tparams {
// This is the same error handling as below, adjusted for type parameters only.
// See comment below for details. (go.dev/issue/64534)
var errPos token.Pos
var msg string
if named == typed /* same as typed == 0 */ {
errPos = p.pos // position error at closing ]
msg = "missing type constraint"
} else {
errPos = pos0 // position at opening [ or first name
msg = "missing type parameter name"
if len(list) == 1 {
msg += " or invalid array length"
}
}
p.error(errPos, msg)
}
} else if named != len(list) {
// some named or we're in a type parameter list => all must be named
var errPos token.Pos // left-most error position (or invalid)
var typ ast.Expr // current type (from right to left)
for i := range list {
if par := &list[len(list)-i-1]; par.typ != nil {
typ = par.typ
if par.name == nil {
errPos = typ.Pos()
n := ast.NewIdent("_")
n.NamePos = errPos // correct position
par.name = n
}
} else if typ != nil {
par.typ = typ
} else {
// par.typ == nil && typ == nil => we only have a par.name
errPos = par.name.Pos()
par.typ = &ast.BadExpr{From: errPos, To: p.pos}
}
}
if errPos.IsValid() {
// Not all parameters are named because named != len(list).
// If named == typed, there must be parameters that have no types.
// They must be at the end of the parameter list, otherwise types
// would have been filled in by the right-to-left sweep above and
// there would be no error.
// If tparams is set, the parameter list is a type parameter list.
var msg string
if named == typed {
errPos = p.pos // position error at closing token ) or ]
if tparams {
msg = "missing type constraint"
} else {
msg = "missing parameter type"
}
} else {
if tparams {
msg = "missing type parameter name"
// go.dev/issue/60812
if len(list) == 1 {
msg += " or invalid array length"
}
} else {
msg = "missing parameter name"
}
}
p.error(errPos, msg)
}
}
// check use of ...
first := true // only report first occurrence
for i, _ := range list {
f := &list[i]
if t, _ := f.typ.(*ast.Ellipsis); t != nil && (!dddok || i+1 < len(list)) {
if first {
first = false
if dddok {
p.error(t.Ellipsis, "can only use ... with final parameter")
} else {
p.error(t.Ellipsis, "invalid use of ...")
}
}
// use T instead of invalid ...T
// TODO(gri) would like to use `f.typ = t.Elt` but that causes problems
// with the resolver in cases of reuse of the same identifier
f.typ = &ast.BadExpr{From: t.Pos(), To: t.End()}
}
}
// Convert list to []*ast.Field.
// If list contains types only, each type gets its own ast.Field.
if named == 0 {
// parameter list consists of types only
for _, par := range list {
assert(par.typ != nil, "nil type in unnamed parameter list")
params = append(params, &ast.Field{Type: par.typ})
}
return
}
// If the parameter list consists of named parameters with types,
// collect all names with the same types into a single ast.Field.
var names []*ast.Ident
var typ ast.Expr
addParams := func() {
assert(typ != nil, "nil type in named parameter list")
field := &ast.Field{Names: names, Type: typ}
params = append(params, field)
names = nil
}
for _, par := range list {
if par.typ != typ {
if len(names) > 0 {
addParams()
}
typ = par.typ
}
names = append(names, par.name)
}
if len(names) > 0 {
addParams()
}
return
}
func (p *parser) parseTypeParameters() *ast.FieldList {
if p.trace {
defer un(trace(p, "TypeParameters"))
}
lbrack := p.expect(token.LBRACK)
var list []*ast.Field
if p.tok != token.RBRACK {
list = p.parseParameterList(nil, nil, token.RBRACK, false)
}
rbrack := p.expect(token.RBRACK)
if len(list) == 0 {
p.error(rbrack, "empty type parameter list")
return nil // avoid follow-on errors
}
return &ast.FieldList{Opening: lbrack, List: list, Closing: rbrack}
}
func (p *parser) parseParameters(result bool) *ast.FieldList {
if p.trace {
defer un(trace(p, "Parameters"))
}
if !result || p.tok == token.LPAREN {
lparen := p.expect(token.LPAREN)
var list []*ast.Field
if p.tok != token.RPAREN {
list = p.parseParameterList(nil, nil, token.RPAREN, !result)
}
rparen := p.expect(token.RPAREN)
return &ast.FieldList{Opening: lparen, List: list, Closing: rparen}
}
if typ := p.tryIdentOrType(); typ != nil {
list := make([]*ast.Field, 1)
list[0] = &ast.Field{Type: typ}
return &ast.FieldList{List: list}
}
return nil
}
func (p *parser) parseFuncType() *ast.FuncType {
if p.trace {
defer un(trace(p, "FuncType"))
}
pos := p.expect(token.FUNC)
// accept type parameters for more tolerant parsing but complain
if p.tok == token.LBRACK {
tparams := p.parseTypeParameters()
if tparams != nil {
p.error(tparams.Opening, "function type must have no type parameters")
}
}
params := p.parseParameters(false)
results := p.parseParameters(true)
return &ast.FuncType{Func: pos, Params: params, Results: results}
}
func (p *parser) parseMethodSpec() *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
}
doc := p.leadComment
var idents []*ast.Ident
var typ ast.Expr
x := p.parseTypeName(nil)
if ident, _ := x.(*ast.Ident); ident != nil {
switch {
case p.tok == token.LBRACK:
// generic method or embedded instantiated type
lbrack := p.pos
p.next()
p.exprLev++
x := p.parseExpr()
p.exprLev--
if name0, _ := x.(*ast.Ident); name0 != nil && p.tok != token.COMMA && p.tok != token.RBRACK {
// generic method m[T any]
//
// Interface methods do not have type parameters. We parse them for a
// better error message and improved error recovery.
_ = p.parseParameterList(name0, nil, token.RBRACK, false)
_ = p.expect(token.RBRACK)
p.error(lbrack, "interface method must have no type parameters")
// TODO(rfindley) refactor to share code with parseFuncType.
params := p.parseParameters(false)
results := p.parseParameters(true)
idents = []*ast.Ident{ident}
typ = &ast.FuncType{
Func: token.NoPos,
Params: params,
Results: results,
}
} else {
// embedded instantiated type
// TODO(rfindley) should resolve all identifiers in x.
list := []ast.Expr{x}
if p.atComma("type argument list", token.RBRACK) {
p.exprLev++
p.next()
for p.tok != token.RBRACK && p.tok != token.EOF {
list = append(list, p.parseType())
if !p.atComma("type argument list", token.RBRACK) {
break
}
p.next()
}
p.exprLev--
}
rbrack := p.expectClosing(token.RBRACK, "type argument list")
typ = packIndexExpr(ident, lbrack, list, rbrack)
}
case p.tok == token.LPAREN:
// ordinary method
// TODO(rfindley) refactor to share code with parseFuncType.
params := p.parseParameters(false)
results := p.parseParameters(true)
idents = []*ast.Ident{ident}
typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
default:
// embedded type
typ = x
}
} else {
// embedded, possibly instantiated type
typ = x
if p.tok == token.LBRACK {
// embedded instantiated interface
typ = p.parseTypeInstance(typ)
}
}
// Comment is added at the callsite: the field below may joined with
// additional type specs using '|'.
// TODO(rfindley) this should be refactored.
// TODO(rfindley) add more tests for comment handling.
return &ast.Field{Doc: doc, Names: idents, Type: typ}
}
func (p *parser) embeddedElem(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "EmbeddedElem"))
}
if x == nil {
x = p.embeddedTerm()
}
for p.tok == token.OR {
t := new(ast.BinaryExpr)
t.OpPos = p.pos
t.Op = token.OR
p.next()
t.X = x
t.Y = p.embeddedTerm()
x = t
}
return x
}
func (p *parser) embeddedTerm() ast.Expr {
if p.trace {
defer un(trace(p, "EmbeddedTerm"))
}
if p.tok == token.TILDE {
t := new(ast.UnaryExpr)
t.OpPos = p.pos
t.Op = token.TILDE
p.next()
t.X = p.parseType()
return t
}
t := p.tryIdentOrType()
if t == nil {
pos := p.pos
p.errorExpected(pos, "~ term or type")
p.advance(exprEnd)
return &ast.BadExpr{From: pos, To: p.pos}
}
return t
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"))
}
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
var list []*ast.Field
parseElements:
for {
switch {
case p.tok == token.IDENT:
f := p.parseMethodSpec()
if f.Names == nil {
f.Type = p.embeddedElem(f.Type)
}
f.Comment = p.expectSemi()
list = append(list, f)
case p.tok == token.TILDE:
typ := p.embeddedElem(nil)
comment := p.expectSemi()
list = append(list, &ast.Field{Type: typ, Comment: comment})
default:
if t := p.tryIdentOrType(); t != nil {
typ := p.embeddedElem(t)
comment := p.expectSemi()
list = append(list, &ast.Field{Type: typ, Comment: comment})
} else {
break parseElements
}
}
}
// TODO(rfindley): the error produced here could be improved, since we could
// accept an identifier, 'type', or a '}' at this point.
rbrace := p.expect(token.RBRACE)
return &ast.InterfaceType{
Interface: pos,
Methods: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"))
}
pos := p.expect(token.MAP)
p.expect(token.LBRACK)
key := p.parseType()
p.expect(token.RBRACK)
value := p.parseType()
return &ast.MapType{Map: pos, Key: key, Value: value}
}
func (p *parser) parseChanType() *ast.ChanType {
if p.trace {
defer un(trace(p, "ChanType"))
}
pos := p.pos
dir := ast.SEND | ast.RECV
var arrow token.Pos
if p.tok == token.CHAN {
p.next()
if p.tok == token.ARROW {
arrow = p.pos
p.next()
dir = ast.SEND
}
} else {
arrow = p.expect(token.ARROW)
p.expect(token.CHAN)
dir = ast.RECV
}
value := p.parseType()
return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value}
}
func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "TypeInstance"))
}
opening := p.expect(token.LBRACK)
p.exprLev++
var list []ast.Expr
for p.tok != token.RBRACK && p.tok != token.EOF {
list = append(list, p.parseType())
if !p.atComma("type argument list", token.RBRACK) {
break
}
p.next()
}
p.exprLev--
closing := p.expectClosing(token.RBRACK, "type argument list")
if len(list) == 0 {
p.errorExpected(closing, "type argument list")
return &ast.IndexExpr{
X: typ,
Lbrack: opening,
Index: &ast.BadExpr{From: opening + 1, To: closing},
Rbrack: closing,
}
}
return packIndexExpr(typ, opening, list, closing)
}
func (p *parser) tryIdentOrType() ast.Expr {
defer decNestLev(incNestLev(p))
switch p.tok {
case token.IDENT:
typ := p.parseTypeName(nil)
if p.tok == token.LBRACK {
typ = p.parseTypeInstance(typ)
}
return typ
case token.LBRACK:
lbrack := p.expect(token.LBRACK)
return p.parseArrayType(lbrack, nil)
case token.STRUCT:
return p.parseStructType()
case token.MUL:
return p.parsePointerType()
case token.FUNC:
return p.parseFuncType()
case token.INTERFACE:
return p.parseInterfaceType()
case token.MAP:
return p.parseMapType()
case token.CHAN, token.ARROW:
return p.parseChanType()
case token.LPAREN:
lparen := p.pos
p.next()
typ := p.parseType()
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
}
// no type found
return nil
}
// ----------------------------------------------------------------------------
// Blocks
func (p *parser) parseStmtList() (list []ast.Stmt) {
if p.trace {
defer un(trace(p, "StatementList"))
}
for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseStmt())
}
return
}
func (p *parser) parseBody() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "Body"))
}
lbrace := p.expect(token.LBRACE)
list := p.parseStmtList()
rbrace := p.expect2(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"))
}
lbrace := p.expect(token.LBRACE)
list := p.parseStmtList()
rbrace := p.expect2(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
// ----------------------------------------------------------------------------
// Expressions
func (p *parser) parseFuncTypeOrLit() ast.Expr {
if p.trace {
defer un(trace(p, "FuncTypeOrLit"))
}
typ := p.parseFuncType()
if p.tok != token.LBRACE {
// function type only
return typ
}
p.exprLev++
body := p.parseBody()
p.exprLev--
return &ast.FuncLit{Type: typ, Body: body}
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T). Callers must verify the result.
func (p *parser) parseOperand() ast.Expr {
if p.trace {
defer un(trace(p, "Operand"))
}
switch p.tok {
case token.IDENT:
x := p.parseIdent()
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{ValuePos: p.pos, ValueEnd: p.end(), Kind: p.tok, Value: p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseRhs() // types may be parenthesized: (some type)
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
case token.FUNC:
return p.parseFuncTypeOrLit()
}
if typ := p.tryIdentOrType(); typ != nil { // do not consume trailing type parameters
// could be type for composite literal or conversion
_, isIdent := typ.(*ast.Ident)
assert(!isIdent, "type cannot be identifier")
return typ
}
// we have an error
pos := p.pos
p.errorExpected(pos, "operand")
p.advance(stmtStart)
return &ast.BadExpr{From: pos, To: p.pos}
}
func (p *parser) parseSelector(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "Selector"))
}
sel := p.parseIdent()
return &ast.SelectorExpr{X: x, Sel: sel}
}
func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "TypeAssertion"))
}
lparen := p.expect(token.LPAREN)
var typ ast.Expr
if p.tok == token.TYPE {
// type switch: typ == nil
p.next()
} else {
typ = p.parseType()
}
rparen := p.expect(token.RPAREN)
return &ast.TypeAssertExpr{X: x, Type: typ, Lparen: lparen, Rparen: rparen}
}
func (p *parser) parseIndexOrSliceOrInstance(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "parseIndexOrSliceOrInstance"))
}
lbrack := p.expect(token.LBRACK)
if p.tok == token.RBRACK {
// empty index, slice or index expressions are not permitted;
// accept them for parsing tolerance, but complain
p.errorExpected(p.pos, "operand")
rbrack := p.pos
p.next()
return &ast.IndexExpr{
X: x,
Lbrack: lbrack,
Index: &ast.BadExpr{From: rbrack, To: rbrack},
Rbrack: rbrack,
}
}
p.exprLev++
const N = 3 // change the 3 to 2 to disable 3-index slices
var args []ast.Expr
var index [N]ast.Expr
var colons [N - 1]token.Pos
if p.tok != token.COLON {
// We can't know if we have an index expression or a type instantiation;
// so even if we see a (named) type we are not going to be in type context.
index[0] = p.parseRhs()
}
ncolons := 0
switch p.tok {
case token.COLON:
// slice expression
for p.tok == token.COLON && ncolons < len(colons) {
colons[ncolons] = p.pos
ncolons++
p.next()
if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
index[ncolons] = p.parseRhs()
}
}
case token.COMMA:
// instance expression
args = append(args, index[0])
for p.tok == token.COMMA {
p.next()
if p.tok != token.RBRACK && p.tok != token.EOF {
args = append(args, p.parseType())
}
}
}
p.exprLev--
rbrack := p.expect(token.RBRACK)
if ncolons > 0 {
// slice expression
slice3 := false
if ncolons == 2 {
slice3 = true
// Check presence of middle and final index here rather than during type-checking
// to prevent erroneous programs from passing through gofmt (was go.dev/issue/7305).
if index[1] == nil {
p.error(colons[0], "middle index required in 3-index slice")
index[1] = &ast.BadExpr{From: colons[0] + 1, To: colons[1]}
}
if index[2] == nil {
p.error(colons[1], "final index required in 3-index slice")
index[2] = &ast.BadExpr{From: colons[1] + 1, To: rbrack}
}
}
return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: index[0], High: index[1], Max: index[2], Slice3: slice3, Rbrack: rbrack}
}
if len(args) == 0 {
// index expression
return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: index[0], Rbrack: rbrack}
}
// instance expression
return packIndexExpr(x, lbrack, args, rbrack)
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"))
}
lparen := p.expect(token.LPAREN)
p.exprLev++
var list []ast.Expr
var ellipsis token.Pos
for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
list = append(list, p.parseRhs()) // builtins may expect a type: make(some type, ...)
if p.tok == token.ELLIPSIS {
ellipsis = p.pos
p.next()
}
if !p.atComma("argument list", token.RPAREN) {
break
}
p.next()
}
p.exprLev--
rparen := p.expectClosing(token.RPAREN, "argument list")
return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
}
func (p *parser) parseValue() ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
}
if p.tok == token.LBRACE {
return p.parseLiteralValue(nil)
}
x := p.parseExpr()
return x
}
func (p *parser) parseElement() ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
}
x := p.parseValue()
if p.tok == token.COLON {
colon := p.pos
p.next()
x = &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseValue()}
}
return x
}
func (p *parser) parseElementList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ElementList"))
}
for p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseElement())
if !p.atComma("composite literal", token.RBRACE) {
break
}
p.next()
}
return
}
func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
defer decNestLev(incNestLev(p))
if p.trace {
defer un(trace(p, "LiteralValue"))
}
lbrace := p.expect(token.LBRACE)
var elts []ast.Expr
p.exprLev++
if p.tok != token.RBRACE {
elts = p.parseElementList()
}
p.exprLev--
rbrace := p.expectClosing(token.RBRACE, "composite literal")
return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
}
func (p *parser) parsePrimaryExpr(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"))
}
if x == nil {
x = p.parseOperand()
}
// We track the nesting here rather than at the entry for the function,
// since it can iteratively produce a nested output, and we want to
// limit how deep a structure we generate.
var n int
defer func() { p.nestLev -= n }()
for n = 1; ; n++ {
incNestLev(p)
switch p.tok {
case token.PERIOD:
p.next()
switch p.tok {
case token.IDENT:
x = p.parseSelector(x)
case token.LPAREN:
x = p.parseTypeAssertion(x)
default:
pos := p.pos
p.errorExpected(pos, "selector or type assertion")
// TODO(rFindley) The check for token.RBRACE below is a targeted fix
// to error recovery sufficient to make the x/tools tests to
// pass with the new parsing logic introduced for type
// parameters. Remove this once error recovery has been
// more generally reconsidered.
if p.tok != token.RBRACE {
p.next() // make progress
}
sel := &ast.Ident{NamePos: pos, Name: "_"}
x = &ast.SelectorExpr{X: x, Sel: sel}
}
case token.LBRACK:
x = p.parseIndexOrSliceOrInstance(x)
case token.LPAREN:
x = p.parseCallOrConversion(x)
case token.LBRACE:
// operand may have returned a parenthesized complit
// type; accept it but complain if we have a complit
t := ast.Unparen(x)
// determine if '{' belongs to a composite literal or a block statement
switch t.(type) {
case *ast.BadExpr, *ast.Ident, *ast.SelectorExpr:
if p.exprLev < 0 {
return x
}
// x is possibly a composite literal type
case *ast.IndexExpr, *ast.IndexListExpr:
if p.exprLev < 0 {
return x
}
// x is possibly a composite literal type
case *ast.ArrayType, *ast.StructType, *ast.MapType:
// x is a composite literal type
default:
return x
}
if t != x {
p.error(t.Pos(), "cannot parenthesize type in composite literal")
// already progressed, no need to advance
}
x = p.parseLiteralValue(x)
default:
return x
}
}
}
func (p *parser) parseUnaryExpr() ast.Expr {
defer decNestLev(incNestLev(p))
if p.trace {
defer un(trace(p, "UnaryExpr"))
}
switch p.tok {
case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.TILDE:
pos, op := p.pos, p.tok
p.next()
x := p.parseUnaryExpr()
return &ast.UnaryExpr{OpPos: pos, Op: op, X: x}
case token.ARROW:
// channel type or receive expression
arrow := p.pos
p.next()
// If the next token is token.CHAN we still don't know if it
// is a channel type or a receive operation - we only know
// once we have found the end of the unary expression. There
// are two cases:
//
// <- type => (<-type) must be channel type
// <- expr => <-(expr) is a receive from an expression
//
// In the first case, the arrow must be re-associated with
// the channel type parsed already:
//
// <- (chan type) => (<-chan type)
// <- (chan<- type) => (<-chan (<-type))
x := p.parseUnaryExpr()
// determine which case we have
if typ, ok := x.(*ast.ChanType); ok {
// (<-type)
// re-associate position info and <-
dir := ast.SEND
for ok && dir == ast.SEND {
if typ.Dir == ast.RECV {
// error: (<-type) is (<-(<-chan T))
p.errorExpected(typ.Arrow, "'chan'")
}
arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow
dir, typ.Dir = typ.Dir, ast.RECV
typ, ok = typ.Value.(*ast.ChanType)
}
if dir == ast.SEND {
p.errorExpected(arrow, "channel type")
}
return x
}
// <-(expr)
return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: x}
case token.MUL:
// pointer type or unary "*" expression
pos := p.pos
p.next()
x := p.parseUnaryExpr()
return &ast.StarExpr{Star: pos, X: x}
}
return p.parsePrimaryExpr(nil)
}
func (p *parser) tokPrec() (token.Token, int) {
tok := p.tok
if p.inRhs && tok == token.ASSIGN {
tok = token.EQL
}
return tok, tok.Precedence()
}
// parseBinaryExpr parses a (possibly) binary expression.
// If x is non-nil, it is used as the left operand.
//
// TODO(rfindley): parseBinaryExpr has become overloaded. Consider refactoring.
func (p *parser) parseBinaryExpr(x ast.Expr, prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"))
}
if x == nil {
x = p.parseUnaryExpr()
}
// We track the nesting here rather than at the entry for the function,
// since it can iteratively produce a nested output, and we want to
// limit how deep a structure we generate.
var n int
defer func() { p.nestLev -= n }()
for n = 1; ; n++ {
incNestLev(p)
op, oprec := p.tokPrec()
if oprec < prec1 {
return x
}
pos := p.expect(op)
y := p.parseBinaryExpr(nil, oprec+1)
x = &ast.BinaryExpr{X: x, OpPos: pos, Op: op, Y: y}
}
}
// The result may be a type or even a raw type ([...]int).
func (p *parser) parseExpr() ast.Expr {
if p.trace {
defer un(trace(p, "Expression"))
}
return p.parseBinaryExpr(nil, token.LowestPrec+1)
}
func (p *parser) parseRhs() ast.Expr {
old := p.inRhs
p.inRhs = true
x := p.parseExpr()
p.inRhs = old
return x
}
// ----------------------------------------------------------------------------
// Statements
// Parsing modes for parseSimpleStmt.
const (
basic = iota
labelOk
rangeOk
)
// parseSimpleStmt returns true as 2nd result if it parsed the assignment
// of a range clause (with mode == rangeOk). The returned statement is an
// assignment with a right-hand side that is a single unary expression of
// the form "range x". No guarantees are given for the left-hand side.
func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
if p.trace {
defer un(trace(p, "SimpleStmt"))
}
x := p.parseList(false)
switch p.tok {
case
token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
// assignment statement, possibly part of a range clause
pos, tok := p.pos, p.tok
p.next()
var y []ast.Expr
isRange := false
if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
pos := p.pos
p.next()
y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
isRange = true
} else {
y = p.parseList(true)
}
return &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}, isRange
}
if len(x) > 1 {
p.errorExpected(x[0].Pos(), "1 expression")
// continue with first expression
}
switch p.tok {
case token.COLON:
// labeled statement
colon := p.pos
p.next()
if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
// Go spec: The scope of a label is the body of the function
// in which it is declared and excludes the body of any nested
// function.
stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
return stmt, false
}
// The label declaration typically starts at x[0].Pos(), but the label
// declaration may be erroneous due to a token after that position (and
// before the ':'). If SpuriousErrors is not set, the (only) error
// reported for the line is the illegal label error instead of the token
// before the ':' that caused the problem. Thus, use the (latest) colon
// position for error reporting.
p.error(colon, "illegal label declaration")
return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
case token.ARROW:
// send statement
arrow := p.pos
p.next()
y := p.parseRhs()
return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
case token.INC, token.DEC:
// increment or decrement
s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
p.next()
return s, false
}
// expression
return &ast.ExprStmt{X: x[0]}, false
}
func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
x := p.parseRhs() // could be a conversion: (some type)(x)
if t := ast.Unparen(x); t != x {
p.error(x.Pos(), fmt.Sprintf("expression in %s must not be parenthesized", callType))
x = t
}
if call, isCall := x.(*ast.CallExpr); isCall {
return call
}
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.error(x.End(), fmt.Sprintf("expression in %s must be function call", callType))
}
return nil
}
func (p *parser) parseGoStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "GoStmt"))
}
pos := p.expect(token.GO)
call := p.parseCallExpr("go")
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
}
return &ast.GoStmt{Go: pos, Call: call}
}
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"))
}
pos := p.expect(token.DEFER)
call := p.parseCallExpr("defer")
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
}
return &ast.DeferStmt{Defer: pos, Call: call}
}
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"))
}
pos := p.pos
p.expect(token.RETURN)
var x []ast.Expr
if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
x = p.parseList(true)
}
p.expectSemi()
return &ast.ReturnStmt{Return: pos, Results: x}
}
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"))
}
pos := p.expect(tok)
var label *ast.Ident
if tok == token.GOTO || ((tok == token.CONTINUE || tok == token.BREAK) && p.tok == token.IDENT) {
label = p.parseIdent()
}
p.expectSemi()
return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
}
func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr {
if s == nil {
return nil
}
if es, isExpr := s.(*ast.ExprStmt); isExpr {
return es.X
}
found := "simple statement"
if _, isAss := s.(*ast.AssignStmt); isAss {
found = "assignment"
}
p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found))
return &ast.BadExpr{From: s.Pos(), To: s.End()}
}
// parseIfHeader is an adjusted version of parser.header
// in cmd/compile/internal/syntax/parser.go, which has
// been tuned for better error handling.
func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) {
if p.tok == token.LBRACE {
p.error(p.pos, "missing condition in if statement")
cond = &ast.BadExpr{From: p.pos, To: p.pos}
return
}
// p.tok != token.LBRACE
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
// accept potential variable declaration but complain
if p.tok == token.VAR {
p.next()
p.error(p.pos, "var declaration not allowed in if initializer")
}
init, _ = p.parseSimpleStmt(basic)
}
var condStmt ast.Stmt
var semi struct {
pos token.Pos
lit string // ";" or "\n"; valid if pos.IsValid()
}
if p.tok != token.LBRACE {
if p.tok == token.SEMICOLON {
semi.pos = p.pos
semi.lit = p.lit
p.next()
} else {
p.expect(token.SEMICOLON)
}
if p.tok != token.LBRACE {
condStmt, _ = p.parseSimpleStmt(basic)
}
} else {
condStmt = init
init = nil
}
if condStmt != nil {
cond = p.makeExpr(condStmt, "boolean expression")
} else if semi.pos.IsValid() {
if semi.lit == "\n" {
p.error(semi.pos, "unexpected newline, expecting { after if clause")
} else {
p.error(semi.pos, "missing condition in if statement")
}
}
// make sure we have a valid AST
if cond == nil {
cond = &ast.BadExpr{From: p.pos, To: p.pos}
}
p.exprLev = prevLev
return
}
func (p *parser) parseIfStmt() *ast.IfStmt {
defer decNestLev(incNestLev(p))
if p.trace {
defer un(trace(p, "IfStmt"))
}
pos := p.expect(token.IF)
init, cond := p.parseIfHeader()
body := p.parseBlockStmt()
var else_ ast.Stmt
if p.tok == token.ELSE {
p.next()
switch p.tok {
case token.IF:
else_ = p.parseIfStmt()
case token.LBRACE:
else_ = p.parseBlockStmt()
p.expectSemi()
default:
p.errorExpected(p.pos, "if statement or block")
else_ = &ast.BadStmt{From: p.pos, To: p.pos}
}
} else {
p.expectSemi()
}
return &ast.IfStmt{If: pos, Init: init, Cond: cond, Body: body, Else: else_}
}
func (p *parser) parseCaseClause() *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
}
pos := p.pos
var list []ast.Expr
if p.tok == token.CASE {
p.next()
list = p.parseList(true)
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
}
func isTypeSwitchAssert(x ast.Expr) bool {
a, ok := x.(*ast.TypeAssertExpr)
return ok && a.Type == nil
}
func (p *parser) isTypeSwitchGuard(s ast.Stmt) bool {
switch t := s.(type) {
case *ast.ExprStmt:
// x.(type)
return isTypeSwitchAssert(t.X)
case *ast.AssignStmt:
// v := x.(type)
if len(t.Lhs) == 1 && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0]) {
switch t.Tok {
case token.ASSIGN:
// permit v = x.(type) but complain
p.error(t.TokPos, "expected ':=', found '='")
fallthrough
case token.DEFINE:
return true
}
}
}
return false
}
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"))
}
pos := p.expect(token.SWITCH)
var s1, s2 ast.Stmt
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
if p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.LBRACE {
// A TypeSwitchGuard may declare a variable in addition
// to the variable declared in the initial SimpleStmt.
// Introduce extra scope to avoid redeclaration errors:
//
// switch t := 0; t := x.(T) { ... }
//
// (this code is not valid Go because the first t
// cannot be accessed and thus is never used, the extra
// scope is needed for the correct error message).
//
// If we don't have a type switch, s2 must be an expression.
// Having the extra nested but empty scope won't affect it.
s2, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
typeSwitch := p.isTypeSwitchGuard(s2)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCaseClause())
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
if typeSwitch {
return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
}
return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2, "switch expression"), Body: body}
}
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"))
}
pos := p.pos
var comm ast.Stmt
if p.tok == token.CASE {
p.next()
lhs := p.parseList(false)
if p.tok == token.ARROW {
// SendStmt
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
arrow := p.pos
p.next()
rhs := p.parseRhs()
comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
} else {
// RecvStmt
if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
// RecvStmt with assignment
if len(lhs) > 2 {
p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
// continue with first two expressions
lhs = lhs[0:2]
}
pos := p.pos
p.next()
rhs := p.parseRhs()
comm = &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
} else {
// lhs must be single receive operation
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
comm = &ast.ExprStmt{X: lhs[0]}
}
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
}
func (p *parser) parseSelectStmt() *ast.SelectStmt {
if p.trace {
defer un(trace(p, "SelectStmt"))
}
pos := p.expect(token.SELECT)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCommClause())
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
return &ast.SelectStmt{Select: pos, Body: body}
}
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"))
}
pos := p.expect(token.FOR)
var s1, s2, s3 ast.Stmt
var isRange bool
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
if p.tok == token.RANGE {
// "for range x" (nil lhs in assignment)
pos := p.pos
p.next()
y := []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
s2 = &ast.AssignStmt{Rhs: y}
isRange = true
} else {
s2, isRange = p.parseSimpleStmt(rangeOk)
}
}
if !isRange && p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
p.expectSemi()
if p.tok != token.LBRACE {
s3, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
p.expectSemi()
if isRange {
as := s2.(*ast.AssignStmt)
// check lhs
var key, value ast.Expr
switch len(as.Lhs) {
case 0:
// nothing to do
case 1:
key = as.Lhs[0]
case 2:
key, value = as.Lhs[0], as.Lhs[1]
default:
p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions")
return &ast.BadStmt{From: pos, To: body.End()}
}
// parseSimpleStmt returned a right-hand side that
// is a single unary expression of the form "range x"
x := as.Rhs[0].(*ast.UnaryExpr).X
return &ast.RangeStmt{
For: pos,
Key: key,
Value: value,
TokPos: as.TokPos,
Tok: as.Tok,
Range: as.Rhs[0].Pos(),
X: x,
Body: body,
}
}
// regular for statement
return &ast.ForStmt{
For: pos,
Init: s1,
Cond: p.makeExpr(s2, "boolean or range expression"),
Post: s3,
Body: body,
}
}
func (p *parser) parseStmt() (s ast.Stmt) {
defer decNestLev(incNestLev(p))
if p.trace {
defer un(trace(p, "Statement"))
}
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
s = &ast.DeclStmt{Decl: p.parseDecl(stmtStart)}
case
// tokens that may start an expression
token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
token.LBRACK, token.STRUCT, token.MAP, token.CHAN, token.INTERFACE, // composite types
token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
s, _ = p.parseSimpleStmt(labelOk)
// because of the required look-ahead, labeled statements are
// parsed by parseSimpleStmt - don't expect a semicolon after
// them
if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
p.expectSemi()
}
case token.GO:
s = p.parseGoStmt()
case token.DEFER:
s = p.parseDeferStmt()
case token.RETURN:
s = p.parseReturnStmt()
case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
s = p.parseBranchStmt(p.tok)
case token.LBRACE:
s = p.parseBlockStmt()
p.expectSemi()
case token.IF:
s = p.parseIfStmt()
case token.SWITCH:
s = p.parseSwitchStmt()
case token.SELECT:
s = p.parseSelectStmt()
case token.FOR:
s = p.parseForStmt()
case token.SEMICOLON:
// Is it ever possible to have an implicit semicolon
// producing an empty statement in a valid program?
// (handle correctly anyway)
s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: p.lit == "\n"}
p.next()
case token.RBRACE:
// a semicolon may be omitted before a closing "}"
s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: true}
default:
// no statement found
pos := p.pos
p.errorExpected(pos, "statement")
p.advance(stmtStart)
s = &ast.BadStmt{From: pos, To: p.pos}
}
return
}
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec
func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
}
var ident *ast.Ident
switch p.tok {
case token.IDENT:
ident = p.parseIdent()
case token.PERIOD:
ident = &ast.Ident{NamePos: p.pos, Name: "."}
p.next()
}
pos := p.pos
end := p.pos
var path string
if p.tok == token.STRING {
path = p.lit
end = p.end()
p.next()
} else if p.tok.IsLiteral() {
p.error(pos, "import path must be a string")
p.next()
} else {
p.error(pos, "missing import path")
p.advance(exprEnd)
}
comment := p.expectSemi()
// collect imports
spec := &ast.ImportSpec{
Doc: doc,
Name: ident,
Path: &ast.BasicLit{ValuePos: pos, ValueEnd: end, Kind: token.STRING, Value: path},
Comment: comment,
}
p.imports = append(p.imports, spec)
return spec
}
func (p *parser) parseValueSpec(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec {
if p.trace {
defer un(trace(p, keyword.String()+"Spec"))
}
idents := p.parseIdentList()
var typ ast.Expr
var values []ast.Expr
switch keyword {
case token.CONST:
// always permit optional type and initialization for more tolerant parsing
if p.tok != token.EOF && p.tok != token.SEMICOLON && p.tok != token.RPAREN {
typ = p.tryIdentOrType()
if p.tok == token.ASSIGN {
p.next()
values = p.parseList(true)
}
}
case token.VAR:
if p.tok != token.ASSIGN {
typ = p.parseType()
}
if p.tok == token.ASSIGN {
p.next()
values = p.parseList(true)
}
default:
panic("unreachable")
}
comment := p.expectSemi()
spec := &ast.ValueSpec{
Doc: doc,
Names: idents,
Type: typ,
Values: values,
Comment: comment,
}
return spec
}
func (p *parser) parseGenericType(spec *ast.TypeSpec, openPos token.Pos, name0 *ast.Ident, typ0 ast.Expr) {
if p.trace {
defer un(trace(p, "parseGenericType"))
}
list := p.parseParameterList(name0, typ0, token.RBRACK, false)
closePos := p.expect(token.RBRACK)
spec.TypeParams = &ast.FieldList{Opening: openPos, List: list, Closing: closePos}
if p.tok == token.ASSIGN {
// type alias
spec.Assign = p.pos
p.next()
}
spec.Type = p.parseType()
}
func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
}
name := p.parseIdent()
spec := &ast.TypeSpec{Doc: doc, Name: name}
if p.tok == token.LBRACK {
// spec.Name "[" ...
// array/slice type or type parameter list
lbrack := p.pos
p.next()
if p.tok == token.IDENT {
// We may have an array type or a type parameter list.
// In either case we expect an expression x (which may
// just be a name, or a more complex expression) which
// we can analyze further.
//
// A type parameter list may have a type bound starting
// with a "[" as in: P []E. In that case, simply parsing
// an expression would lead to an error: P[] is invalid.
// But since index or slice expressions are never constant
// and thus invalid array length expressions, if the name
// is followed by "[" it must be the start of an array or
// slice constraint. Only if we don't see a "[" do we
// need to parse a full expression. Notably, name <- x
// is not a concern because name <- x is a statement and
// not an expression.
var x ast.Expr = p.parseIdent()
if p.tok != token.LBRACK {
// To parse the expression starting with name, expand
// the call sequence we would get by passing in name
// to parser.expr, and pass in name to parsePrimaryExpr.
p.exprLev++
lhs := p.parsePrimaryExpr(x)
x = p.parseBinaryExpr(lhs, token.LowestPrec+1)
p.exprLev--
}
// Analyze expression x. If we can split x into a type parameter
// name, possibly followed by a type parameter type, we consider
// this the start of a type parameter list, with some caveats:
// a single name followed by "]" tilts the decision towards an
// array declaration; a type parameter type that could also be
// an ordinary expression but which is followed by a comma tilts
// the decision towards a type parameter list.
if pname, ptype := extractName(x, p.tok == token.COMMA); pname != nil && (ptype != nil || p.tok != token.RBRACK) {
// spec.Name "[" pname ...
// spec.Name "[" pname ptype ...
// spec.Name "[" pname ptype "," ...
p.parseGenericType(spec, lbrack, pname, ptype) // ptype may be nil
} else {
// spec.Name "[" pname "]" ...
// spec.Name "[" x ...
spec.Type = p.parseArrayType(lbrack, x)
}
} else {
// array type
spec.Type = p.parseArrayType(lbrack, nil)
}
} else {
// no type parameters
if p.tok == token.ASSIGN {
// type alias
spec.Assign = p.pos
p.next()
}
spec.Type = p.parseType()
}
spec.Comment = p.expectSemi()
return spec
}
// extractName splits the expression x into (name, expr) if syntactically
// x can be written as name expr. The split only happens if expr is a type
// element (per the isTypeElem predicate) or if force is set.
// If x is just a name, the result is (name, nil). If the split succeeds,
// the result is (name, expr). Otherwise the result is (nil, x).
// Examples:
//
// x force name expr
// ------------------------------------
// P*[]int T/F P *[]int
// P*E T P *E
// P*E F nil P*E
// P([]int) T/F P ([]int)
// P(E) T P (E)
// P(E) F nil P(E)
// P*E|F|~G T/F P *E|F|~G
// P*E|F|G T P *E|F|G
// P*E|F|G F nil P*E|F|G
func extractName(x ast.Expr, force bool) (*ast.Ident, ast.Expr) {
switch x := x.(type) {
case *ast.Ident:
return x, nil
case *ast.BinaryExpr:
switch x.Op {
case token.MUL:
if name, _ := x.X.(*ast.Ident); name != nil && (force || isTypeElem(x.Y)) {
// x = name *x.Y
return name, &ast.StarExpr{Star: x.OpPos, X: x.Y}
}
case token.OR:
if name, lhs := extractName(x.X, force || isTypeElem(x.Y)); name != nil && lhs != nil {
// x = name lhs|x.Y
op := *x
op.X = lhs
return name, &op
}
}
case *ast.CallExpr:
if name, _ := x.Fun.(*ast.Ident); name != nil {
if len(x.Args) == 1 && x.Ellipsis == token.NoPos && (force || isTypeElem(x.Args[0])) {
// x = name (x.Args[0])
// (Note that the cmd/compile/internal/syntax parser does not care
// about syntax tree fidelity and does not preserve parentheses here.)
return name, &ast.ParenExpr{
Lparen: x.Lparen,
X: x.Args[0],
Rparen: x.Rparen,
}
}
}
}
return nil, x
}
// isTypeElem reports whether x is a (possibly parenthesized) type element expression.
// The result is false if x could be a type element OR an ordinary (value) expression.
func isTypeElem(x ast.Expr) bool {
switch x := x.(type) {
case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
return true
case *ast.BinaryExpr:
return isTypeElem(x.X) || isTypeElem(x.Y)
case *ast.UnaryExpr:
return x.Op == token.TILDE
case *ast.ParenExpr:
return isTypeElem(x.X)
}
return false
}
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, "GenDecl("+keyword.String()+")"))
}
doc := p.leadComment
pos := p.expect(keyword)
var lparen, rparen token.Pos
var list []ast.Spec
if p.tok == token.LPAREN {
lparen = p.pos
p.next()
for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
list = append(list, f(p.leadComment, keyword, iota))
}
rparen = p.expect(token.RPAREN)
p.expectSemi()
} else {
list = append(list, f(nil, keyword, 0))
}
return &ast.GenDecl{
Doc: doc,
TokPos: pos,
Tok: keyword,
Lparen: lparen,
Specs: list,
Rparen: rparen,
}
}
func (p *parser) parseFuncDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"))
}
doc := p.leadComment
pos := p.expect(token.FUNC)
var recv *ast.FieldList
if p.tok == token.LPAREN {
recv = p.parseParameters(false)
}
ident := p.parseIdent()
var tparams *ast.FieldList
if p.tok == token.LBRACK {
tparams = p.parseTypeParameters()
}
params := p.parseParameters(false)
results := p.parseParameters(true)
var body *ast.BlockStmt
switch p.tok {
case token.LBRACE:
body = p.parseBody()
p.expectSemi()
case token.SEMICOLON:
p.next()
if p.tok == token.LBRACE {
// opening { of function declaration on next line
p.error(p.pos, "unexpected semicolon or newline before {")
body = p.parseBody()
p.expectSemi()
}
default:
p.expectSemi()
}
decl := &ast.FuncDecl{
Doc: doc,
Recv: recv,
Name: ident,
Type: &ast.FuncType{
Func: pos,
TypeParams: tparams,
Params: params,
Results: results,
},
Body: body,
}
return decl
}
func (p *parser) parseDecl(sync map[token.Token]bool) ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
}
var f parseSpecFunction
switch p.tok {
case token.IMPORT:
f = p.parseImportSpec
case token.CONST, token.VAR:
f = p.parseValueSpec
case token.TYPE:
f = p.parseTypeSpec
case token.FUNC:
return p.parseFuncDecl()
default:
pos := p.pos
p.errorExpected(pos, "declaration")
p.advance(sync)
return &ast.BadDecl{From: pos, To: p.pos}
}
return p.parseGenDecl(p.tok, f)
}
// ----------------------------------------------------------------------------
// Source files
func (p *parser) parseFile() *ast.File {
if p.trace {
defer un(trace(p, "File"))
}
// Don't bother parsing the rest if we had errors scanning the first token.
// Likely not a Go source file at all.
if p.errors.Len() != 0 {
return nil
}
// package clause
doc := p.leadComment
pos := p.expect(token.PACKAGE)
// Go spec: The package clause is not a declaration;
// the package name does not appear in any scope.
ident := p.parseIdent()
if ident.Name == "_" && p.mode&DeclarationErrors != 0 {
p.error(p.pos, "invalid package name _")
}
p.expectSemi()
// Don't bother parsing the rest if we had errors parsing the package clause.
// Likely not a Go source file at all.
if p.errors.Len() != 0 {
return nil
}
var decls []ast.Decl
if p.mode&PackageClauseOnly == 0 {
// import decls
for p.tok == token.IMPORT {
decls = append(decls, p.parseGenDecl(token.IMPORT, p.parseImportSpec))
}
if p.mode&ImportsOnly == 0 {
// rest of package body
prev := token.IMPORT
for p.tok != token.EOF {
// Continue to accept import declarations for error tolerance, but complain.
if p.tok == token.IMPORT && prev != token.IMPORT {
p.error(p.pos, "imports must appear before other declarations")
}
prev = p.tok
decls = append(decls, p.parseDecl(declStart))
}
}
}
f := &ast.File{
Doc: doc,
Package: pos,
Name: ident,
Decls: decls,
// File{Start,End} are set by the defer in the caller.
Imports: p.imports,
Comments: p.comments,
GoVersion: p.goVersion,
}
var declErr func(token.Pos, string)
if p.mode&DeclarationErrors != 0 {
declErr = p.error
}
if p.mode&SkipObjectResolution == 0 {
resolveFile(f, p.file, declErr)
}
return f
}
// packIndexExpr returns an IndexExpr x[expr0] or IndexListExpr x[expr0, ...].
func packIndexExpr(x ast.Expr, lbrack token.Pos, exprs []ast.Expr, rbrack token.Pos) ast.Expr {
switch len(exprs) {
case 0:
panic("internal error: packIndexExpr with empty expr slice")
case 1:
return &ast.IndexExpr{
X: x,
Lbrack: lbrack,
Index: exprs[0],
Rbrack: rbrack,
}
default:
return &ast.IndexListExpr{
X: x,
Lbrack: lbrack,
Indices: exprs,
Rbrack: rbrack,
}
}
}
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package parser
import (
"fmt"
"go/ast"
"go/token"
"strings"
)
const debugResolve = false
// resolveFile walks the given file to resolve identifiers within the file
// scope, updating ast.Ident.Obj fields with declaration information.
//
// If declErr is non-nil, it is used to report declaration errors during
// resolution. tok is used to format position in error messages.
func resolveFile(file *ast.File, handle *token.File, declErr func(token.Pos, string)) {
pkgScope := ast.NewScope(nil)
r := &resolver{
handle: handle,
declErr: declErr,
topScope: pkgScope,
pkgScope: pkgScope,
depth: 1,
}
for _, decl := range file.Decls {
ast.Walk(r, decl)
}
r.closeScope()
assert(r.topScope == nil, "unbalanced scopes")
assert(r.labelScope == nil, "unbalanced label scopes")
// resolve global identifiers within the same file
i := 0
for _, ident := range r.unresolved {
// i <= index for current ident
assert(ident.Obj == unresolved, "object already resolved")
ident.Obj = r.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
if ident.Obj == nil {
r.unresolved[i] = ident
i++
} else if debugResolve {
pos := ident.Obj.Decl.(interface{ Pos() token.Pos }).Pos()
r.trace("resolved %s@%v to package object %v", ident.Name, ident.Pos(), pos)
}
}
file.Scope = r.pkgScope
file.Unresolved = r.unresolved[0:i]
}
const maxScopeDepth int = 1e3
type resolver struct {
handle *token.File
declErr func(token.Pos, string)
// Ordinary identifier scopes
pkgScope *ast.Scope // pkgScope.Outer == nil
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved identifiers
depth int // scope depth
// Label scopes
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
}
func (r *resolver) trace(format string, args ...any) {
fmt.Println(strings.Repeat(". ", r.depth) + r.sprintf(format, args...))
}
func (r *resolver) sprintf(format string, args ...any) string {
for i, arg := range args {
switch arg := arg.(type) {
case token.Pos:
args[i] = r.handle.Position(arg)
}
}
return fmt.Sprintf(format, args...)
}
func (r *resolver) openScope(pos token.Pos) {
r.depth++
if r.depth > maxScopeDepth {
panic(bailout{pos: pos, msg: "exceeded max scope depth during object resolution"})
}
if debugResolve {
r.trace("opening scope @%v", pos)
}
r.topScope = ast.NewScope(r.topScope)
}
func (r *resolver) closeScope() {
r.depth--
if debugResolve {
r.trace("closing scope")
}
r.topScope = r.topScope.Outer
}
func (r *resolver) openLabelScope() {
r.labelScope = ast.NewScope(r.labelScope)
r.targetStack = append(r.targetStack, nil)
}
func (r *resolver) closeLabelScope() {
// resolve labels
n := len(r.targetStack) - 1
scope := r.labelScope
for _, ident := range r.targetStack[n] {
ident.Obj = scope.Lookup(ident.Name)
if ident.Obj == nil && r.declErr != nil {
r.declErr(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
}
}
// pop label scope
r.targetStack = r.targetStack[0:n]
r.labelScope = r.labelScope.Outer
}
func (r *resolver) declare(decl, data any, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
for _, ident := range idents {
if ident.Obj != nil {
panic(fmt.Sprintf("%v: identifier %s already declared or resolved", ident.Pos(), ident.Name))
}
obj := ast.NewObj(kind, ident.Name)
// remember the corresponding declaration for redeclaration
// errors and global variable resolution/typechecking phase
obj.Decl = decl
obj.Data = data
// Identifiers (for receiver type parameters) are written to the scope, but
// never set as the resolved object. See go.dev/issue/50956.
if _, ok := decl.(*ast.Ident); !ok {
ident.Obj = obj
}
if ident.Name != "_" {
if debugResolve {
r.trace("declaring %s@%v", ident.Name, ident.Pos())
}
if alt := scope.Insert(obj); alt != nil && r.declErr != nil {
prevDecl := ""
if pos := alt.Pos(); pos.IsValid() {
prevDecl = r.sprintf("\n\tprevious declaration at %v", pos)
}
r.declErr(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
}
}
}
}
func (r *resolver) shortVarDecl(decl *ast.AssignStmt) {
// Go spec: A short variable declaration may redeclare variables
// provided they were originally declared in the same block with
// the same type, and at least one of the non-blank variables is new.
n := 0 // number of new variables
for _, x := range decl.Lhs {
if ident, isIdent := x.(*ast.Ident); isIdent {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(ast.Var, ident.Name)
// remember corresponding assignment for other tools
obj.Decl = decl
ident.Obj = obj
if ident.Name != "_" {
if debugResolve {
r.trace("declaring %s@%v", ident.Name, ident.Pos())
}
if alt := r.topScope.Insert(obj); alt != nil {
ident.Obj = alt // redeclaration
} else {
n++ // new declaration
}
}
}
}
if n == 0 && r.declErr != nil {
r.declErr(decl.Lhs[0].Pos(), "no new variables on left side of :=")
}
}
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
// If x is an identifier, resolve attempts to resolve x by looking up
// the object it denotes. If no object is found and collectUnresolved is
// set, x is marked as unresolved and collected in the list of unresolved
// identifiers.
func (r *resolver) resolve(ident *ast.Ident, collectUnresolved bool) {
if ident.Obj != nil {
panic(r.sprintf("%v: identifier %s already declared or resolved", ident.Pos(), ident.Name))
}
// '_' should never refer to existing declarations, because it has special
// handling in the spec.
if ident.Name == "_" {
return
}
for s := r.topScope; s != nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj != nil {
if debugResolve {
r.trace("resolved %v:%s to %v", ident.Pos(), ident.Name, obj)
}
assert(obj.Name != "", "obj with no name")
// Identifiers (for receiver type parameters) are written to the scope,
// but never set as the resolved object. See go.dev/issue/50956.
if _, ok := obj.Decl.(*ast.Ident); !ok {
ident.Obj = obj
}
return
}
}
// all local scopes are known, so any unresolved identifier
// must be found either in the file scope, package scope
// (perhaps in another file), or universe scope --- collect
// them so that they can be resolved later
if collectUnresolved {
ident.Obj = unresolved
r.unresolved = append(r.unresolved, ident)
}
}
func (r *resolver) walkExprs(list []ast.Expr) {
for _, node := range list {
ast.Walk(r, node)
}
}
func (r *resolver) walkLHS(list []ast.Expr) {
for _, expr := range list {
expr := ast.Unparen(expr)
if _, ok := expr.(*ast.Ident); !ok && expr != nil {
ast.Walk(r, expr)
}
}
}
func (r *resolver) walkStmts(list []ast.Stmt) {
for _, stmt := range list {
ast.Walk(r, stmt)
}
}
func (r *resolver) Visit(node ast.Node) ast.Visitor {
if debugResolve && node != nil {
r.trace("node %T@%v", node, node.Pos())
}
switch n := node.(type) {
// Expressions.
case *ast.Ident:
r.resolve(n, true)
case *ast.FuncLit:
r.openScope(n.Pos())
defer r.closeScope()
r.walkFuncType(n.Type)
r.walkBody(n.Body)
case *ast.SelectorExpr:
ast.Walk(r, n.X)
// Note: don't try to resolve n.Sel, as we don't support qualified
// resolution.
case *ast.StructType:
r.openScope(n.Pos())
defer r.closeScope()
r.walkFieldList(n.Fields, ast.Var)
case *ast.FuncType:
r.openScope(n.Pos())
defer r.closeScope()
r.walkFuncType(n)
case *ast.CompositeLit:
if n.Type != nil {
ast.Walk(r, n.Type)
}
for _, e := range n.Elts {
if kv, _ := e.(*ast.KeyValueExpr); kv != nil {
// See go.dev/issue/45160: try to resolve composite lit keys, but don't
// collect them as unresolved if resolution failed. This replicates
// existing behavior when resolving during parsing.
if ident, _ := kv.Key.(*ast.Ident); ident != nil {
r.resolve(ident, false)
} else {
ast.Walk(r, kv.Key)
}
ast.Walk(r, kv.Value)
} else {
ast.Walk(r, e)
}
}
case *ast.InterfaceType:
r.openScope(n.Pos())
defer r.closeScope()
r.walkFieldList(n.Methods, ast.Fun)
// Statements
case *ast.LabeledStmt:
r.declare(n, nil, r.labelScope, ast.Lbl, n.Label)
ast.Walk(r, n.Stmt)
case *ast.AssignStmt:
r.walkExprs(n.Rhs)
if n.Tok == token.DEFINE {
r.shortVarDecl(n)
} else {
r.walkExprs(n.Lhs)
}
case *ast.BranchStmt:
// add to list of unresolved targets
if n.Tok != token.FALLTHROUGH && n.Label != nil {
depth := len(r.targetStack) - 1
r.targetStack[depth] = append(r.targetStack[depth], n.Label)
}
case *ast.BlockStmt:
r.openScope(n.Pos())
defer r.closeScope()
r.walkStmts(n.List)
case *ast.IfStmt:
r.openScope(n.Pos())
defer r.closeScope()
if n.Init != nil {
ast.Walk(r, n.Init)
}
ast.Walk(r, n.Cond)
ast.Walk(r, n.Body)
if n.Else != nil {
ast.Walk(r, n.Else)
}
case *ast.CaseClause:
r.walkExprs(n.List)
r.openScope(n.Pos())
defer r.closeScope()
r.walkStmts(n.Body)
case *ast.SwitchStmt:
r.openScope(n.Pos())
defer r.closeScope()
if n.Init != nil {
ast.Walk(r, n.Init)
}
if n.Tag != nil {
// The scope below reproduces some unnecessary behavior of the parser,
// opening an extra scope in case this is a type switch. It's not needed
// for expression switches.
// TODO: remove this once we've matched the parser resolution exactly.
if n.Init != nil {
r.openScope(n.Tag.Pos())
defer r.closeScope()
}
ast.Walk(r, n.Tag)
}
if n.Body != nil {
r.walkStmts(n.Body.List)
}
case *ast.TypeSwitchStmt:
if n.Init != nil {
r.openScope(n.Pos())
defer r.closeScope()
ast.Walk(r, n.Init)
}
r.openScope(n.Assign.Pos())
defer r.closeScope()
ast.Walk(r, n.Assign)
// s.Body consists only of case clauses, so does not get its own
// scope.
if n.Body != nil {
r.walkStmts(n.Body.List)
}
case *ast.CommClause:
r.openScope(n.Pos())
defer r.closeScope()
if n.Comm != nil {
ast.Walk(r, n.Comm)
}
r.walkStmts(n.Body)
case *ast.SelectStmt:
// as for switch statements, select statement bodies don't get their own
// scope.
if n.Body != nil {
r.walkStmts(n.Body.List)
}
case *ast.ForStmt:
r.openScope(n.Pos())
defer r.closeScope()
if n.Init != nil {
ast.Walk(r, n.Init)
}
if n.Cond != nil {
ast.Walk(r, n.Cond)
}
if n.Post != nil {
ast.Walk(r, n.Post)
}
ast.Walk(r, n.Body)
case *ast.RangeStmt:
r.openScope(n.Pos())
defer r.closeScope()
ast.Walk(r, n.X)
var lhs []ast.Expr
if n.Key != nil {
lhs = append(lhs, n.Key)
}
if n.Value != nil {
lhs = append(lhs, n.Value)
}
if len(lhs) > 0 {
if n.Tok == token.DEFINE {
// Note: we can't exactly match the behavior of object resolution
// during the parsing pass here, as it uses the position of the RANGE
// token for the RHS OpPos. That information is not contained within
// the AST.
as := &ast.AssignStmt{
Lhs: lhs,
Tok: token.DEFINE,
TokPos: n.TokPos,
Rhs: []ast.Expr{&ast.UnaryExpr{Op: token.RANGE, X: n.X}},
}
// TODO(rFindley): this walkLHS reproduced the parser resolution, but
// is it necessary? By comparison, for a normal AssignStmt we don't
// walk the LHS in case there is an invalid identifier list.
r.walkLHS(lhs)
r.shortVarDecl(as)
} else {
r.walkExprs(lhs)
}
}
ast.Walk(r, n.Body)
// Declarations
case *ast.GenDecl:
switch n.Tok {
case token.CONST, token.VAR:
for i, spec := range n.Specs {
spec := spec.(*ast.ValueSpec)
kind := ast.Con
if n.Tok == token.VAR {
kind = ast.Var
}
r.walkExprs(spec.Values)
if spec.Type != nil {
ast.Walk(r, spec.Type)
}
r.declare(spec, i, r.topScope, kind, spec.Names...)
}
case token.TYPE:
for _, spec := range n.Specs {
spec := spec.(*ast.TypeSpec)
// Go spec: The scope of a type identifier declared inside a function begins
// at the identifier in the TypeSpec and ends at the end of the innermost
// containing block.
r.declare(spec, nil, r.topScope, ast.Typ, spec.Name)
if spec.TypeParams != nil {
r.openScope(spec.Pos())
defer r.closeScope()
r.walkTParams(spec.TypeParams)
}
ast.Walk(r, spec.Type)
}
}
case *ast.FuncDecl:
// Open the function scope.
r.openScope(n.Pos())
defer r.closeScope()
r.walkRecv(n.Recv)
// Type parameters are walked normally: they can reference each other, and
// can be referenced by normal parameters.
if n.Type.TypeParams != nil {
r.walkTParams(n.Type.TypeParams)
// TODO(rFindley): need to address receiver type parameters.
}
// Resolve and declare parameters in a specific order to get duplicate
// declaration errors in the correct location.
r.resolveList(n.Type.Params)
r.resolveList(n.Type.Results)
r.declareList(n.Recv, ast.Var)
r.declareList(n.Type.Params, ast.Var)
r.declareList(n.Type.Results, ast.Var)
r.walkBody(n.Body)
if n.Recv == nil && n.Name.Name != "init" {
r.declare(n, nil, r.pkgScope, ast.Fun, n.Name)
}
default:
return r
}
return nil
}
func (r *resolver) walkFuncType(typ *ast.FuncType) {
// typ.TypeParams must be walked separately for FuncDecls.
r.resolveList(typ.Params)
r.resolveList(typ.Results)
r.declareList(typ.Params, ast.Var)
r.declareList(typ.Results, ast.Var)
}
func (r *resolver) resolveList(list *ast.FieldList) {
if list == nil {
return
}
for _, f := range list.List {
if f.Type != nil {
ast.Walk(r, f.Type)
}
}
}
func (r *resolver) declareList(list *ast.FieldList, kind ast.ObjKind) {
if list == nil {
return
}
for _, f := range list.List {
r.declare(f, nil, r.topScope, kind, f.Names...)
}
}
func (r *resolver) walkRecv(recv *ast.FieldList) {
// If our receiver has receiver type parameters, we must declare them before
// trying to resolve the rest of the receiver, and avoid re-resolving the
// type parameter identifiers.
if recv == nil || len(recv.List) == 0 {
return // nothing to do
}
typ := recv.List[0].Type
if ptr, ok := typ.(*ast.StarExpr); ok {
typ = ptr.X
}
var declareExprs []ast.Expr // exprs to declare
var resolveExprs []ast.Expr // exprs to resolve
switch typ := typ.(type) {
case *ast.IndexExpr:
declareExprs = []ast.Expr{typ.Index}
resolveExprs = append(resolveExprs, typ.X)
case *ast.IndexListExpr:
declareExprs = typ.Indices
resolveExprs = append(resolveExprs, typ.X)
default:
resolveExprs = append(resolveExprs, typ)
}
for _, expr := range declareExprs {
if id, _ := expr.(*ast.Ident); id != nil {
r.declare(expr, nil, r.topScope, ast.Typ, id)
} else {
// The receiver type parameter expression is invalid, but try to resolve
// it anyway for consistency.
resolveExprs = append(resolveExprs, expr)
}
}
for _, expr := range resolveExprs {
if expr != nil {
ast.Walk(r, expr)
}
}
// The receiver is invalid, but try to resolve it anyway for consistency.
for _, f := range recv.List[1:] {
if f.Type != nil {
ast.Walk(r, f.Type)
}
}
}
func (r *resolver) walkFieldList(list *ast.FieldList, kind ast.ObjKind) {
if list == nil {
return
}
r.resolveList(list)
r.declareList(list, kind)
}
// walkTParams is like walkFieldList, but declares type parameters eagerly so
// that they may be resolved in the constraint expressions held in the field
// Type.
func (r *resolver) walkTParams(list *ast.FieldList) {
r.declareList(list, ast.Typ)
r.resolveList(list)
}
func (r *resolver) walkBody(body *ast.BlockStmt) {
if body == nil {
return
}
r.openLabelScope()
defer r.closeLabelScope()
r.walkStmts(body.List)
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package version provides operations on [Go versions]
// in [Go toolchain name syntax]: strings like
// "go1.20", "go1.21.0", "go1.22rc2", and "go1.23.4-custom".
//
// [Go versions]: https://go.dev/doc/toolchain#version
// [Go toolchain name syntax]: https://go.dev/doc/toolchain#name
package version // import "go/version"
import (
"internal/gover"
"strings"
)
// stripGo converts from a "go1.21-custom" version to a "1.21" version.
// If v does not start with "go", stripGo returns the empty string (a known invalid version).
func stripGo(v string) string {
v, _, _ = strings.Cut(v, "-") // strip -custom suffix.
if len(v) < 2 || v[:2] != "go" {
return ""
}
return v[2:]
}
// Lang returns the Go language version for version x.
// If x is not a valid version, Lang returns the empty string.
// For example:
//
// Lang("go1.21rc2") = "go1.21"
// Lang("go1.21.2") = "go1.21"
// Lang("go1.21") = "go1.21"
// Lang("go1") = "go1"
// Lang("bad") = ""
// Lang("1.21") = ""
func Lang(x string) string {
v := gover.Lang(stripGo(x))
if v == "" {
return ""
}
if strings.HasPrefix(x[2:], v) {
return x[:2+len(v)] // "go"+v without allocation
} else {
return "go" + v
}
}
// Compare returns -1, 0, or +1 depending on whether
// x < y, x == y, or x > y, interpreted as Go versions.
// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21".
// Invalid versions, including the empty string, compare less than
// valid versions and equal to each other.
// The language version "go1.21" compares less than the
// release candidate and eventual releases "go1.21rc1" and "go1.21.0".
func Compare(x, y string) int {
return gover.Compare(stripGo(x), stripGo(y))
}
// IsValid reports whether the version x is valid.
func IsValid(x string) bool {
return gover.IsValid(stripGo(x))
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package adler32 implements the Adler-32 checksum.
//
// It is defined in RFC 1950:
//
// Adler-32 is composed of two sums accumulated per byte: s1 is
// the sum of all bytes, s2 is the sum of all s1 values. Both sums
// are done modulo 65521. s1 is initialized to 1, s2 to zero. The
// Adler-32 checksum is stored as s2*65536 + s1 in most-
// significant-byte first (network) order.
package adler32
import (
"errors"
"hash"
"internal/byteorder"
)
const (
// mod is the largest prime that is less than 65536.
mod = 65521
// nmax is the largest n such that
// 255 * n * (n+1) / 2 + (n+1) * (mod-1) <= 2^32-1.
// It is mentioned in RFC 1950 (search for "5552").
nmax = 5552
)
// The size of an Adler-32 checksum in bytes.
const Size = 4
// digest represents the partial evaluation of a checksum.
// The low 16 bits are s1, the high 16 bits are s2.
type digest uint32
func (d *digest) Reset() { *d = 1 }
// New returns a new hash.Hash32 computing the Adler-32 checksum. Its
// Sum method will lay the value out in big-endian byte order. The
// returned Hash32 also implements [encoding.BinaryMarshaler] and
// [encoding.BinaryUnmarshaler] to marshal and unmarshal the internal
// state of the hash.
func New() hash.Hash32 {
d := new(digest)
d.Reset()
return d
}
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return 4 }
const (
magic = "adl\x01"
marshaledSize = len(magic) + 4
)
func (d *digest) AppendBinary(b []byte) ([]byte, error) {
b = append(b, magic...)
b = byteorder.BEAppendUint32(b, uint32(*d))
return b, nil
}
func (d *digest) MarshalBinary() ([]byte, error) {
return d.AppendBinary(make([]byte, 0, marshaledSize))
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("hash/adler32: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("hash/adler32: invalid hash state size")
}
*d = digest(byteorder.BEUint32(b[len(magic):]))
return nil
}
func (d *digest) Clone() (hash.Cloner, error) {
r := *d
return &r, nil
}
// Add p to the running checksum d.
func update(d digest, p []byte) digest {
s1, s2 := uint32(d&0xffff), uint32(d>>16)
for len(p) > 0 {
var q []byte
if len(p) > nmax {
p, q = p[:nmax], p[nmax:]
}
for len(p) >= 4 {
s1 += uint32(p[0])
s2 += s1
s1 += uint32(p[1])
s2 += s1
s1 += uint32(p[2])
s2 += s1
s1 += uint32(p[3])
s2 += s1
p = p[4:]
}
for _, x := range p {
s1 += uint32(x)
s2 += s1
}
s1 %= mod
s2 %= mod
p = q
}
return digest(s2<<16 | s1)
}
func (d *digest) Write(p []byte) (nn int, err error) {
*d = update(*d, p)
return len(p), nil
}
func (d *digest) Sum32() uint32 { return uint32(*d) }
func (d *digest) Sum(in []byte) []byte {
s := uint32(*d)
return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// Checksum returns the Adler-32 checksum of data.
func Checksum(data []byte) uint32 { return uint32(update(1, data)) }
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32,
// checksum. See https://en.wikipedia.org/wiki/Cyclic_redundancy_check for
// information.
//
// Polynomials are represented in LSB-first form also known as reversed representation.
//
// See https://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials
// for information.
package crc32
import (
"errors"
"hash"
"internal/byteorder"
"sync"
"sync/atomic"
)
// The size of a CRC-32 checksum in bytes.
const Size = 4
// Predefined polynomials.
const (
// IEEE is by far and away the most common CRC-32 polynomial.
// Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ...
IEEE = 0xedb88320
// Castagnoli's polynomial, used in iSCSI.
// Has better error detection characteristics than IEEE.
// https://dx.doi.org/10.1109/26.231911
Castagnoli = 0x82f63b78
// Koopman's polynomial.
// Also has better error detection characteristics than IEEE.
// https://dx.doi.org/10.1109/DSN.2002.1028931
Koopman = 0xeb31d82e
)
// Table is a 256-word table representing the polynomial for efficient processing.
type Table [256]uint32
// This file makes use of functions implemented in architecture-specific files.
// The interface that they implement is as follows:
//
// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE
// // algorithm is available.
// archAvailableIEEE() bool
//
// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm.
// // It can only be called if archAvailableIEEE() returns true.
// archInitIEEE()
//
// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if
// // archInitIEEE() was previously called.
// archUpdateIEEE(crc uint32, p []byte) uint32
//
// // archAvailableCastagnoli reports whether an architecture-specific
// // CRC32-C algorithm is available.
// archAvailableCastagnoli() bool
//
// // archInitCastagnoli initializes the architecture-specific CRC32-C
// // algorithm. It can only be called if archAvailableCastagnoli() returns
// // true.
// archInitCastagnoli()
//
// // archUpdateCastagnoli updates the given CRC32-C. It can only be called
// // if archInitCastagnoli() was previously called.
// archUpdateCastagnoli(crc uint32, p []byte) uint32
// castagnoliTable points to a lazily initialized Table for the Castagnoli
// polynomial. MakeTable will always return this value when asked to make a
// Castagnoli table so we can compare against it to find when the caller is
// using this polynomial.
var castagnoliTable *Table
var castagnoliTable8 *slicing8Table
var updateCastagnoli func(crc uint32, p []byte) uint32
var haveCastagnoli atomic.Bool
var castagnoliInitOnce = sync.OnceFunc(func() {
castagnoliTable = simpleMakeTable(Castagnoli)
if archAvailableCastagnoli() {
archInitCastagnoli()
updateCastagnoli = archUpdateCastagnoli
} else {
// Initialize the slicing-by-8 table.
castagnoliTable8 = slicingMakeTable(Castagnoli)
updateCastagnoli = func(crc uint32, p []byte) uint32 {
return slicingUpdate(crc, castagnoliTable8, p)
}
}
haveCastagnoli.Store(true)
})
// IEEETable is the table for the [IEEE] polynomial.
var IEEETable = simpleMakeTable(IEEE)
// ieeeTable8 is the slicing8Table for IEEE
var ieeeTable8 *slicing8Table
var updateIEEE func(crc uint32, p []byte) uint32
var ieeeInitOnce = sync.OnceFunc(func() {
if archAvailableIEEE() {
archInitIEEE()
updateIEEE = archUpdateIEEE
} else {
// Initialize the slicing-by-8 table.
ieeeTable8 = slicingMakeTable(IEEE)
updateIEEE = func(crc uint32, p []byte) uint32 {
return slicingUpdate(crc, ieeeTable8, p)
}
}
})
// MakeTable returns a [Table] constructed from the specified polynomial.
// The contents of this [Table] must not be modified.
func MakeTable(poly uint32) *Table {
switch poly {
case IEEE:
ieeeInitOnce()
return IEEETable
case Castagnoli:
castagnoliInitOnce()
return castagnoliTable
default:
return simpleMakeTable(poly)
}
}
// digest represents the partial evaluation of a checksum.
type digest struct {
crc uint32
tab *Table
}
// New creates a new [hash.Hash32] computing the CRC-32 checksum using the
// polynomial represented by the [Table]. Its Sum method will lay the
// value out in big-endian byte order. The returned Hash32 also
// implements [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to
// marshal and unmarshal the internal state of the hash.
func New(tab *Table) hash.Hash32 {
if tab == IEEETable {
ieeeInitOnce()
}
return &digest{0, tab}
}
// NewIEEE creates a new [hash.Hash32] computing the CRC-32 checksum using
// the [IEEE] polynomial. Its Sum method will lay the value out in
// big-endian byte order. The returned Hash32 also implements
// [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to marshal
// and unmarshal the internal state of the hash.
func NewIEEE() hash.Hash32 { return New(IEEETable) }
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return 1 }
func (d *digest) Reset() { d.crc = 0 }
const (
magic = "crc\x01"
marshaledSize = len(magic) + 4 + 4
)
func (d *digest) AppendBinary(b []byte) ([]byte, error) {
b = append(b, magic...)
b = byteorder.BEAppendUint32(b, tableSum(d.tab))
b = byteorder.BEAppendUint32(b, d.crc)
return b, nil
}
func (d *digest) MarshalBinary() ([]byte, error) {
return d.AppendBinary(make([]byte, 0, marshaledSize))
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("hash/crc32: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("hash/crc32: invalid hash state size")
}
if tableSum(d.tab) != byteorder.BEUint32(b[4:]) {
return errors.New("hash/crc32: tables do not match")
}
d.crc = byteorder.BEUint32(b[8:])
return nil
}
func (d *digest) Clone() (hash.Cloner, error) {
r := *d
return &r, nil
}
func update(crc uint32, tab *Table, p []byte, checkInitIEEE bool) uint32 {
switch {
case haveCastagnoli.Load() && tab == castagnoliTable:
return updateCastagnoli(crc, p)
case tab == IEEETable:
if checkInitIEEE {
ieeeInitOnce()
}
return updateIEEE(crc, p)
default:
return simpleUpdate(crc, tab, p)
}
}
// Update returns the result of adding the bytes in p to the crc.
func Update(crc uint32, tab *Table, p []byte) uint32 {
// Unfortunately, because IEEETable is exported, IEEE may be used without a
// call to MakeTable. We have to make sure it gets initialized in that case.
return update(crc, tab, p, true)
}
func (d *digest) Write(p []byte) (n int, err error) {
// We only create digest objects through New() which takes care of
// initialization in this case.
d.crc = update(d.crc, d.tab, p, false)
return len(p), nil
}
func (d *digest) Sum32() uint32 { return d.crc }
func (d *digest) Sum(in []byte) []byte {
s := d.Sum32()
return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// Checksum returns the CRC-32 checksum of data
// using the polynomial represented by the [Table].
func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) }
// ChecksumIEEE returns the CRC-32 checksum of data
// using the [IEEE] polynomial.
func ChecksumIEEE(data []byte) uint32 {
ieeeInitOnce()
return updateIEEE(0, data)
}
// tableSum returns the IEEE checksum of table t.
func tableSum(t *Table) uint32 {
var a [1024]byte
b := a[:0]
if t != nil {
for _, x := range t {
b = byteorder.BEAppendUint32(b, x)
}
}
return ChecksumIEEE(b)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a
// description of the interface that each architecture-specific file
// implements.
package crc32
import (
"internal/cpu"
"unsafe"
)
// Offset into internal/cpu records for use in assembly.
const (
offsetX86HasAVX512VPCLMULQDQL = unsafe.Offsetof(cpu.X86.HasAVX512VPCLMULQDQ)
)
// This file contains the code to call the SSE 4.2 version of the Castagnoli
// and IEEE CRC.
// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE 4.2 CRC32
// instruction.
//
//go:noescape
func castagnoliSSE42(crc uint32, p []byte) uint32
// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE 4.2 CRC32
// instruction.
//
//go:noescape
func castagnoliSSE42Triple(
crcA, crcB, crcC uint32,
a, b, c []byte,
rounds uint32,
) (retA uint32, retB uint32, retC uint32)
// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ
// instruction as well as SSE 4.1.
//
//go:noescape
func ieeeCLMUL(crc uint32, p []byte) uint32
const castagnoliK1 = 168
const castagnoliK2 = 1344
type sse42Table [4]Table
var castagnoliSSE42TableK1 *sse42Table
var castagnoliSSE42TableK2 *sse42Table
func archAvailableCastagnoli() bool {
return cpu.X86.HasSSE42
}
func archInitCastagnoli() {
if !cpu.X86.HasSSE42 {
panic("arch-specific Castagnoli not available")
}
castagnoliSSE42TableK1 = new(sse42Table)
castagnoliSSE42TableK2 = new(sse42Table)
// See description in updateCastagnoli.
// t[0][i] = CRC(i000, O)
// t[1][i] = CRC(0i00, O)
// t[2][i] = CRC(00i0, O)
// t[3][i] = CRC(000i, O)
// where O is a sequence of K zeros.
var tmp [castagnoliK2]byte
for b := 0; b < 4; b++ {
for i := 0; i < 256; i++ {
val := uint32(i) << uint32(b*8)
castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1])
castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:])
}
}
}
// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the
// table given) with the given initial crc value. This corresponds to
// CRC(crc, O) in the description in updateCastagnoli.
func castagnoliShift(table *sse42Table, crc uint32) uint32 {
return table[3][crc>>24] ^
table[2][(crc>>16)&0xFF] ^
table[1][(crc>>8)&0xFF] ^
table[0][crc&0xFF]
}
func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
if !cpu.X86.HasSSE42 {
panic("not available")
}
// This method is inspired from the algorithm in Intel's white paper:
// "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction"
// The same strategy of splitting the buffer in three is used but the
// combining calculation is different; the complete derivation is explained
// below.
//
// -- The basic idea --
//
// The CRC32 instruction (available in SSE4.2) can process 8 bytes at a
// time. In recent Intel architectures the instruction takes 3 cycles;
// however the processor can pipeline up to three instructions if they
// don't depend on each other.
//
// Roughly this means that we can process three buffers in about the same
// time we can process one buffer.
//
// The idea is then to split the buffer in three, CRC the three pieces
// separately and then combine the results.
//
// Combining the results requires precomputed tables, so we must choose a
// fixed buffer length to optimize. The longer the length, the faster; but
// only buffers longer than this length will use the optimization. We choose
// two cutoffs and compute tables for both:
// - one around 512: 168*3=504
// - one around 4KB: 1344*3=4032
//
// -- The nitty gritty --
//
// Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with
// initial non-inverted CRC I). This function has the following properties:
// (a) CRC(I, AB) = CRC(CRC(I, A), B)
// (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B)
//
// Say we want to compute CRC(I, ABC) where A, B, C are three sequences of
// K bytes each, where K is a fixed constant. Let O be the sequence of K zero
// bytes.
//
// CRC(I, ABC) = CRC(I, ABO xor C)
// = CRC(I, ABO) xor CRC(0, C)
// = CRC(CRC(I, AB), O) xor CRC(0, C)
// = CRC(CRC(I, AO xor B), O) xor CRC(0, C)
// = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C)
// = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C)
//
// The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B),
// and CRC(0, C) efficiently. We just need to find a way to quickly compute
// CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these
// values; since we can't have a 32-bit table, we break it up into four
// 8-bit tables:
//
// CRC(uvwx, O) = CRC(u000, O) xor
// CRC(0v00, O) xor
// CRC(00w0, O) xor
// CRC(000x, O)
//
// We can compute tables corresponding to the four terms for all 8-bit
// values.
crc = ^crc
// If a buffer is long enough to use the optimization, process the first few
// bytes to align the buffer to an 8 byte boundary (if necessary).
if len(p) >= castagnoliK1*3 {
delta := int(uintptr(unsafe.Pointer(&p[0])) & 7)
if delta != 0 {
delta = 8 - delta
crc = castagnoliSSE42(crc, p[:delta])
p = p[delta:]
}
}
// Process 3*K2 at a time.
for len(p) >= castagnoliK2*3 {
// Compute CRC(I, A), CRC(0, B), and CRC(0, C).
crcA, crcB, crcC := castagnoliSSE42Triple(
crc, 0, 0,
p, p[castagnoliK2:], p[castagnoliK2*2:],
castagnoliK2/24)
// CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB
// CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC
p = p[castagnoliK2*3:]
}
// Process 3*K1 at a time.
for len(p) >= castagnoliK1*3 {
// Compute CRC(I, A), CRC(0, B), and CRC(0, C).
crcA, crcB, crcC := castagnoliSSE42Triple(
crc, 0, 0,
p, p[castagnoliK1:], p[castagnoliK1*2:],
castagnoliK1/24)
// CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB
// CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC
p = p[castagnoliK1*3:]
}
// Use the simple implementation for what's left.
crc = castagnoliSSE42(crc, p)
return ^crc
}
func archAvailableIEEE() bool {
return cpu.X86.HasPCLMULQDQ && cpu.X86.HasSSE41
}
var archIeeeTable8 *slicing8Table
func archInitIEEE() {
if !cpu.X86.HasPCLMULQDQ || !cpu.X86.HasSSE41 {
panic("not available")
}
// We still use slicing-by-8 for small buffers.
archIeeeTable8 = slicingMakeTable(IEEE)
}
func archUpdateIEEE(crc uint32, p []byte) uint32 {
if !cpu.X86.HasPCLMULQDQ || !cpu.X86.HasSSE41 {
panic("not available")
}
if len(p) >= 64 {
left := len(p) & 15
do := len(p) - left
crc = ^ieeeCLMUL(^crc, p[:do])
p = p[do:]
}
if len(p) == 0 {
return crc
}
return slicingUpdate(crc, archIeeeTable8, p)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains CRC32 algorithms that are not specific to any architecture
// and don't use hardware acceleration.
//
// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table.
//
// The slicing-by-8 algorithm is a faster implementation that uses a bigger
// table (8*256*4 bytes).
package crc32
import "internal/byteorder"
// simpleMakeTable allocates and constructs a Table for the specified
// polynomial. The table is suitable for use with the simple algorithm
// (simpleUpdate).
func simpleMakeTable(poly uint32) *Table {
t := new(Table)
simplePopulateTable(poly, t)
return t
}
// simplePopulateTable constructs a Table for the specified polynomial, suitable
// for use with simpleUpdate.
func simplePopulateTable(poly uint32, t *Table) {
for i := 0; i < 256; i++ {
crc := uint32(i)
for j := 0; j < 8; j++ {
if crc&1 == 1 {
crc = (crc >> 1) ^ poly
} else {
crc >>= 1
}
}
t[i] = crc
}
}
// simpleUpdate uses the simple algorithm to update the CRC, given a table that
// was previously computed using simpleMakeTable.
func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 {
crc = ^crc
for _, v := range p {
crc = tab[byte(crc)^v] ^ (crc >> 8)
}
return ^crc
}
// Use slicing-by-8 when payload >= this value.
const slicing8Cutoff = 16
// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm.
type slicing8Table [8]Table
// slicingMakeTable constructs a slicing8Table for the specified polynomial. The
// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate).
func slicingMakeTable(poly uint32) *slicing8Table {
t := new(slicing8Table)
simplePopulateTable(poly, &t[0])
for i := 0; i < 256; i++ {
crc := t[0][i]
for j := 1; j < 8; j++ {
crc = t[0][crc&0xFF] ^ (crc >> 8)
t[j][i] = crc
}
}
return t
}
// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a
// table that was previously computed using slicingMakeTable.
func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 {
if len(p) >= slicing8Cutoff {
crc = ^crc
for len(p) > 8 {
crc ^= byteorder.LEUint32(p)
crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^
tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^
tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF]
p = p[8:]
}
crc = ^crc
}
if len(p) == 0 {
return crc
}
return simpleUpdate(crc, &tab[0], p)
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package crc64 implements the 64-bit cyclic redundancy check, or CRC-64,
// checksum. See https://en.wikipedia.org/wiki/Cyclic_redundancy_check for
// information.
package crc64
import (
"errors"
"hash"
"internal/byteorder"
"sync"
)
// The size of a CRC-64 checksum in bytes.
const Size = 8
// Predefined polynomials.
const (
// The ISO polynomial, defined in ISO 3309 and used in HDLC.
ISO = 0xD800000000000000
// The ECMA polynomial, defined in ECMA 182.
ECMA = 0xC96C5795D7870F42
)
// Table is a 256-word table representing the polynomial for efficient processing.
type Table [256]uint64
var (
slicing8TableISO *[8]Table
slicing8TableECMA *[8]Table
)
var buildSlicing8TablesOnce = sync.OnceFunc(buildSlicing8Tables)
func buildSlicing8Tables() {
slicing8TableISO = makeSlicingBy8Table(makeTable(ISO))
slicing8TableECMA = makeSlicingBy8Table(makeTable(ECMA))
}
// MakeTable returns a [Table] constructed from the specified polynomial.
// The contents of this [Table] must not be modified.
func MakeTable(poly uint64) *Table {
buildSlicing8TablesOnce()
switch poly {
case ISO:
return &slicing8TableISO[0]
case ECMA:
return &slicing8TableECMA[0]
default:
return makeTable(poly)
}
}
func makeTable(poly uint64) *Table {
t := new(Table)
for i := 0; i < 256; i++ {
crc := uint64(i)
for j := 0; j < 8; j++ {
if crc&1 == 1 {
crc = (crc >> 1) ^ poly
} else {
crc >>= 1
}
}
t[i] = crc
}
return t
}
func makeSlicingBy8Table(t *Table) *[8]Table {
var helperTable [8]Table
helperTable[0] = *t
for i := 0; i < 256; i++ {
crc := t[i]
for j := 1; j < 8; j++ {
crc = t[crc&0xff] ^ (crc >> 8)
helperTable[j][i] = crc
}
}
return &helperTable
}
// digest represents the partial evaluation of a checksum.
type digest struct {
crc uint64
tab *Table
}
// New creates a new hash.Hash64 computing the CRC-64 checksum using the
// polynomial represented by the [Table]. Its Sum method will lay the
// value out in big-endian byte order. The returned Hash64 also
// implements [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to
// marshal and unmarshal the internal state of the hash.
func New(tab *Table) hash.Hash64 { return &digest{0, tab} }
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return 1 }
func (d *digest) Reset() { d.crc = 0 }
const (
magic = "crc\x02"
marshaledSize = len(magic) + 8 + 8
)
func (d *digest) AppendBinary(b []byte) ([]byte, error) {
b = append(b, magic...)
b = byteorder.BEAppendUint64(b, tableSum(d.tab))
b = byteorder.BEAppendUint64(b, d.crc)
return b, nil
}
func (d *digest) MarshalBinary() ([]byte, error) {
return d.AppendBinary(make([]byte, 0, marshaledSize))
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("hash/crc64: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("hash/crc64: invalid hash state size")
}
if tableSum(d.tab) != byteorder.BEUint64(b[4:]) {
return errors.New("hash/crc64: tables do not match")
}
d.crc = byteorder.BEUint64(b[12:])
return nil
}
func (d *digest) Clone() (hash.Cloner, error) {
r := *d
return &r, nil
}
func update(crc uint64, tab *Table, p []byte) uint64 {
buildSlicing8TablesOnce()
crc = ^crc
// Table comparison is somewhat expensive, so avoid it for small sizes
for len(p) >= 64 {
var helperTable *[8]Table
if *tab == slicing8TableECMA[0] {
helperTable = slicing8TableECMA
} else if *tab == slicing8TableISO[0] {
helperTable = slicing8TableISO
// For smaller sizes creating extended table takes too much time
} else if len(p) >= 2048 {
// According to the tests between various x86 and arm CPUs, 2k is a reasonable
// threshold for now. This may change in the future.
helperTable = makeSlicingBy8Table(tab)
} else {
break
}
// Update using slicing-by-8
for len(p) > 8 {
crc ^= byteorder.LEUint64(p)
crc = helperTable[7][crc&0xff] ^
helperTable[6][(crc>>8)&0xff] ^
helperTable[5][(crc>>16)&0xff] ^
helperTable[4][(crc>>24)&0xff] ^
helperTable[3][(crc>>32)&0xff] ^
helperTable[2][(crc>>40)&0xff] ^
helperTable[1][(crc>>48)&0xff] ^
helperTable[0][crc>>56]
p = p[8:]
}
}
// For reminders or small sizes
for _, v := range p {
crc = tab[byte(crc)^v] ^ (crc >> 8)
}
return ^crc
}
// Update returns the result of adding the bytes in p to the crc.
func Update(crc uint64, tab *Table, p []byte) uint64 {
return update(crc, tab, p)
}
func (d *digest) Write(p []byte) (n int, err error) {
d.crc = update(d.crc, d.tab, p)
return len(p), nil
}
func (d *digest) Sum64() uint64 { return d.crc }
func (d *digest) Sum(in []byte) []byte {
s := d.Sum64()
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// Checksum returns the CRC-64 checksum of data
// using the polynomial represented by the [Table].
func Checksum(data []byte, tab *Table) uint64 { return update(0, tab, data) }
// tableSum returns the ISO checksum of table t.
func tableSum(t *Table) uint64 {
var a [2048]byte
b := a[:0]
if t != nil {
for _, x := range t {
b = byteorder.BEAppendUint64(b, x)
}
}
return Checksum(b, MakeTable(ISO))
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package html
import "sync"
// All entities that do not end with ';' are 6 or fewer bytes long.
const longestEntityWithoutSemicolon = 6
// entityMaps returns entity and entity2.
//
// entity is a map from HTML entity names to their values. The semicolon matters:
// https://html.spec.whatwg.org/multipage/named-characters.html
// lists both "amp" and "amp;" as two separate entries.
// Note that the HTML5 list is larger than the HTML4 list at
// http://www.w3.org/TR/html4/sgml/entities.html
//
// entity2 is a map of HTML entities to two unicode codepoints.
var entityMaps = sync.OnceValues(func() (entity map[string]rune, entity2 map[string][2]rune) {
entity = map[string]rune{
"AElig;": '\U000000C6',
"AMP;": '\U00000026',
"Aacute;": '\U000000C1',
"Abreve;": '\U00000102',
"Acirc;": '\U000000C2',
"Acy;": '\U00000410',
"Afr;": '\U0001D504',
"Agrave;": '\U000000C0',
"Alpha;": '\U00000391',
"Amacr;": '\U00000100',
"And;": '\U00002A53',
"Aogon;": '\U00000104',
"Aopf;": '\U0001D538',
"ApplyFunction;": '\U00002061',
"Aring;": '\U000000C5',
"Ascr;": '\U0001D49C',
"Assign;": '\U00002254',
"Atilde;": '\U000000C3',
"Auml;": '\U000000C4',
"Backslash;": '\U00002216',
"Barv;": '\U00002AE7',
"Barwed;": '\U00002306',
"Bcy;": '\U00000411',
"Because;": '\U00002235',
"Bernoullis;": '\U0000212C',
"Beta;": '\U00000392',
"Bfr;": '\U0001D505',
"Bopf;": '\U0001D539',
"Breve;": '\U000002D8',
"Bscr;": '\U0000212C',
"Bumpeq;": '\U0000224E',
"CHcy;": '\U00000427',
"COPY;": '\U000000A9',
"Cacute;": '\U00000106',
"Cap;": '\U000022D2',
"CapitalDifferentialD;": '\U00002145',
"Cayleys;": '\U0000212D',
"Ccaron;": '\U0000010C',
"Ccedil;": '\U000000C7',
"Ccirc;": '\U00000108',
"Cconint;": '\U00002230',
"Cdot;": '\U0000010A',
"Cedilla;": '\U000000B8',
"CenterDot;": '\U000000B7',
"Cfr;": '\U0000212D',
"Chi;": '\U000003A7',
"CircleDot;": '\U00002299',
"CircleMinus;": '\U00002296',
"CirclePlus;": '\U00002295',
"CircleTimes;": '\U00002297',
"ClockwiseContourIntegral;": '\U00002232',
"CloseCurlyDoubleQuote;": '\U0000201D',
"CloseCurlyQuote;": '\U00002019',
"Colon;": '\U00002237',
"Colone;": '\U00002A74',
"Congruent;": '\U00002261',
"Conint;": '\U0000222F',
"ContourIntegral;": '\U0000222E',
"Copf;": '\U00002102',
"Coproduct;": '\U00002210',
"CounterClockwiseContourIntegral;": '\U00002233',
"Cross;": '\U00002A2F',
"Cscr;": '\U0001D49E',
"Cup;": '\U000022D3',
"CupCap;": '\U0000224D',
"DD;": '\U00002145',
"DDotrahd;": '\U00002911',
"DJcy;": '\U00000402',
"DScy;": '\U00000405',
"DZcy;": '\U0000040F',
"Dagger;": '\U00002021',
"Darr;": '\U000021A1',
"Dashv;": '\U00002AE4',
"Dcaron;": '\U0000010E',
"Dcy;": '\U00000414',
"Del;": '\U00002207',
"Delta;": '\U00000394',
"Dfr;": '\U0001D507',
"DiacriticalAcute;": '\U000000B4',
"DiacriticalDot;": '\U000002D9',
"DiacriticalDoubleAcute;": '\U000002DD',
"DiacriticalGrave;": '\U00000060',
"DiacriticalTilde;": '\U000002DC',
"Diamond;": '\U000022C4',
"DifferentialD;": '\U00002146',
"Dopf;": '\U0001D53B',
"Dot;": '\U000000A8',
"DotDot;": '\U000020DC',
"DotEqual;": '\U00002250',
"DoubleContourIntegral;": '\U0000222F',
"DoubleDot;": '\U000000A8',
"DoubleDownArrow;": '\U000021D3',
"DoubleLeftArrow;": '\U000021D0',
"DoubleLeftRightArrow;": '\U000021D4',
"DoubleLeftTee;": '\U00002AE4',
"DoubleLongLeftArrow;": '\U000027F8',
"DoubleLongLeftRightArrow;": '\U000027FA',
"DoubleLongRightArrow;": '\U000027F9',
"DoubleRightArrow;": '\U000021D2',
"DoubleRightTee;": '\U000022A8',
"DoubleUpArrow;": '\U000021D1',
"DoubleUpDownArrow;": '\U000021D5',
"DoubleVerticalBar;": '\U00002225',
"DownArrow;": '\U00002193',
"DownArrowBar;": '\U00002913',
"DownArrowUpArrow;": '\U000021F5',
"DownBreve;": '\U00000311',
"DownLeftRightVector;": '\U00002950',
"DownLeftTeeVector;": '\U0000295E',
"DownLeftVector;": '\U000021BD',
"DownLeftVectorBar;": '\U00002956',
"DownRightTeeVector;": '\U0000295F',
"DownRightVector;": '\U000021C1',
"DownRightVectorBar;": '\U00002957',
"DownTee;": '\U000022A4',
"DownTeeArrow;": '\U000021A7',
"Downarrow;": '\U000021D3',
"Dscr;": '\U0001D49F',
"Dstrok;": '\U00000110',
"ENG;": '\U0000014A',
"ETH;": '\U000000D0',
"Eacute;": '\U000000C9',
"Ecaron;": '\U0000011A',
"Ecirc;": '\U000000CA',
"Ecy;": '\U0000042D',
"Edot;": '\U00000116',
"Efr;": '\U0001D508',
"Egrave;": '\U000000C8',
"Element;": '\U00002208',
"Emacr;": '\U00000112',
"EmptySmallSquare;": '\U000025FB',
"EmptyVerySmallSquare;": '\U000025AB',
"Eogon;": '\U00000118',
"Eopf;": '\U0001D53C',
"Epsilon;": '\U00000395',
"Equal;": '\U00002A75',
"EqualTilde;": '\U00002242',
"Equilibrium;": '\U000021CC',
"Escr;": '\U00002130',
"Esim;": '\U00002A73',
"Eta;": '\U00000397',
"Euml;": '\U000000CB',
"Exists;": '\U00002203',
"ExponentialE;": '\U00002147',
"Fcy;": '\U00000424',
"Ffr;": '\U0001D509',
"FilledSmallSquare;": '\U000025FC',
"FilledVerySmallSquare;": '\U000025AA',
"Fopf;": '\U0001D53D',
"ForAll;": '\U00002200',
"Fouriertrf;": '\U00002131',
"Fscr;": '\U00002131',
"GJcy;": '\U00000403',
"GT;": '\U0000003E',
"Gamma;": '\U00000393',
"Gammad;": '\U000003DC',
"Gbreve;": '\U0000011E',
"Gcedil;": '\U00000122',
"Gcirc;": '\U0000011C',
"Gcy;": '\U00000413',
"Gdot;": '\U00000120',
"Gfr;": '\U0001D50A',
"Gg;": '\U000022D9',
"Gopf;": '\U0001D53E',
"GreaterEqual;": '\U00002265',
"GreaterEqualLess;": '\U000022DB',
"GreaterFullEqual;": '\U00002267',
"GreaterGreater;": '\U00002AA2',
"GreaterLess;": '\U00002277',
"GreaterSlantEqual;": '\U00002A7E',
"GreaterTilde;": '\U00002273',
"Gscr;": '\U0001D4A2',
"Gt;": '\U0000226B',
"HARDcy;": '\U0000042A',
"Hacek;": '\U000002C7',
"Hat;": '\U0000005E',
"Hcirc;": '\U00000124',
"Hfr;": '\U0000210C',
"HilbertSpace;": '\U0000210B',
"Hopf;": '\U0000210D',
"HorizontalLine;": '\U00002500',
"Hscr;": '\U0000210B',
"Hstrok;": '\U00000126',
"HumpDownHump;": '\U0000224E',
"HumpEqual;": '\U0000224F',
"IEcy;": '\U00000415',
"IJlig;": '\U00000132',
"IOcy;": '\U00000401',
"Iacute;": '\U000000CD',
"Icirc;": '\U000000CE',
"Icy;": '\U00000418',
"Idot;": '\U00000130',
"Ifr;": '\U00002111',
"Igrave;": '\U000000CC',
"Im;": '\U00002111',
"Imacr;": '\U0000012A',
"ImaginaryI;": '\U00002148',
"Implies;": '\U000021D2',
"Int;": '\U0000222C',
"Integral;": '\U0000222B',
"Intersection;": '\U000022C2',
"InvisibleComma;": '\U00002063',
"InvisibleTimes;": '\U00002062',
"Iogon;": '\U0000012E',
"Iopf;": '\U0001D540',
"Iota;": '\U00000399',
"Iscr;": '\U00002110',
"Itilde;": '\U00000128',
"Iukcy;": '\U00000406',
"Iuml;": '\U000000CF',
"Jcirc;": '\U00000134',
"Jcy;": '\U00000419',
"Jfr;": '\U0001D50D',
"Jopf;": '\U0001D541',
"Jscr;": '\U0001D4A5',
"Jsercy;": '\U00000408',
"Jukcy;": '\U00000404',
"KHcy;": '\U00000425',
"KJcy;": '\U0000040C',
"Kappa;": '\U0000039A',
"Kcedil;": '\U00000136',
"Kcy;": '\U0000041A',
"Kfr;": '\U0001D50E',
"Kopf;": '\U0001D542',
"Kscr;": '\U0001D4A6',
"LJcy;": '\U00000409',
"LT;": '\U0000003C',
"Lacute;": '\U00000139',
"Lambda;": '\U0000039B',
"Lang;": '\U000027EA',
"Laplacetrf;": '\U00002112',
"Larr;": '\U0000219E',
"Lcaron;": '\U0000013D',
"Lcedil;": '\U0000013B',
"Lcy;": '\U0000041B',
"LeftAngleBracket;": '\U000027E8',
"LeftArrow;": '\U00002190',
"LeftArrowBar;": '\U000021E4',
"LeftArrowRightArrow;": '\U000021C6',
"LeftCeiling;": '\U00002308',
"LeftDoubleBracket;": '\U000027E6',
"LeftDownTeeVector;": '\U00002961',
"LeftDownVector;": '\U000021C3',
"LeftDownVectorBar;": '\U00002959',
"LeftFloor;": '\U0000230A',
"LeftRightArrow;": '\U00002194',
"LeftRightVector;": '\U0000294E',
"LeftTee;": '\U000022A3',
"LeftTeeArrow;": '\U000021A4',
"LeftTeeVector;": '\U0000295A',
"LeftTriangle;": '\U000022B2',
"LeftTriangleBar;": '\U000029CF',
"LeftTriangleEqual;": '\U000022B4',
"LeftUpDownVector;": '\U00002951',
"LeftUpTeeVector;": '\U00002960',
"LeftUpVector;": '\U000021BF',
"LeftUpVectorBar;": '\U00002958',
"LeftVector;": '\U000021BC',
"LeftVectorBar;": '\U00002952',
"Leftarrow;": '\U000021D0',
"Leftrightarrow;": '\U000021D4',
"LessEqualGreater;": '\U000022DA',
"LessFullEqual;": '\U00002266',
"LessGreater;": '\U00002276',
"LessLess;": '\U00002AA1',
"LessSlantEqual;": '\U00002A7D',
"LessTilde;": '\U00002272',
"Lfr;": '\U0001D50F',
"Ll;": '\U000022D8',
"Lleftarrow;": '\U000021DA',
"Lmidot;": '\U0000013F',
"LongLeftArrow;": '\U000027F5',
"LongLeftRightArrow;": '\U000027F7',
"LongRightArrow;": '\U000027F6',
"Longleftarrow;": '\U000027F8',
"Longleftrightarrow;": '\U000027FA',
"Longrightarrow;": '\U000027F9',
"Lopf;": '\U0001D543',
"LowerLeftArrow;": '\U00002199',
"LowerRightArrow;": '\U00002198',
"Lscr;": '\U00002112',
"Lsh;": '\U000021B0',
"Lstrok;": '\U00000141',
"Lt;": '\U0000226A',
"Map;": '\U00002905',
"Mcy;": '\U0000041C',
"MediumSpace;": '\U0000205F',
"Mellintrf;": '\U00002133',
"Mfr;": '\U0001D510',
"MinusPlus;": '\U00002213',
"Mopf;": '\U0001D544',
"Mscr;": '\U00002133',
"Mu;": '\U0000039C',
"NJcy;": '\U0000040A',
"Nacute;": '\U00000143',
"Ncaron;": '\U00000147',
"Ncedil;": '\U00000145',
"Ncy;": '\U0000041D',
"NegativeMediumSpace;": '\U0000200B',
"NegativeThickSpace;": '\U0000200B',
"NegativeThinSpace;": '\U0000200B',
"NegativeVeryThinSpace;": '\U0000200B',
"NestedGreaterGreater;": '\U0000226B',
"NestedLessLess;": '\U0000226A',
"NewLine;": '\U0000000A',
"Nfr;": '\U0001D511',
"NoBreak;": '\U00002060',
"NonBreakingSpace;": '\U000000A0',
"Nopf;": '\U00002115',
"Not;": '\U00002AEC',
"NotCongruent;": '\U00002262',
"NotCupCap;": '\U0000226D',
"NotDoubleVerticalBar;": '\U00002226',
"NotElement;": '\U00002209',
"NotEqual;": '\U00002260',
"NotExists;": '\U00002204',
"NotGreater;": '\U0000226F',
"NotGreaterEqual;": '\U00002271',
"NotGreaterLess;": '\U00002279',
"NotGreaterTilde;": '\U00002275',
"NotLeftTriangle;": '\U000022EA',
"NotLeftTriangleEqual;": '\U000022EC',
"NotLess;": '\U0000226E',
"NotLessEqual;": '\U00002270',
"NotLessGreater;": '\U00002278',
"NotLessTilde;": '\U00002274',
"NotPrecedes;": '\U00002280',
"NotPrecedesSlantEqual;": '\U000022E0',
"NotReverseElement;": '\U0000220C',
"NotRightTriangle;": '\U000022EB',
"NotRightTriangleEqual;": '\U000022ED',
"NotSquareSubsetEqual;": '\U000022E2',
"NotSquareSupersetEqual;": '\U000022E3',
"NotSubsetEqual;": '\U00002288',
"NotSucceeds;": '\U00002281',
"NotSucceedsSlantEqual;": '\U000022E1',
"NotSupersetEqual;": '\U00002289',
"NotTilde;": '\U00002241',
"NotTildeEqual;": '\U00002244',
"NotTildeFullEqual;": '\U00002247',
"NotTildeTilde;": '\U00002249',
"NotVerticalBar;": '\U00002224',
"Nscr;": '\U0001D4A9',
"Ntilde;": '\U000000D1',
"Nu;": '\U0000039D',
"OElig;": '\U00000152',
"Oacute;": '\U000000D3',
"Ocirc;": '\U000000D4',
"Ocy;": '\U0000041E',
"Odblac;": '\U00000150',
"Ofr;": '\U0001D512',
"Ograve;": '\U000000D2',
"Omacr;": '\U0000014C',
"Omega;": '\U000003A9',
"Omicron;": '\U0000039F',
"Oopf;": '\U0001D546',
"OpenCurlyDoubleQuote;": '\U0000201C',
"OpenCurlyQuote;": '\U00002018',
"Or;": '\U00002A54',
"Oscr;": '\U0001D4AA',
"Oslash;": '\U000000D8',
"Otilde;": '\U000000D5',
"Otimes;": '\U00002A37',
"Ouml;": '\U000000D6',
"OverBar;": '\U0000203E',
"OverBrace;": '\U000023DE',
"OverBracket;": '\U000023B4',
"OverParenthesis;": '\U000023DC',
"PartialD;": '\U00002202',
"Pcy;": '\U0000041F',
"Pfr;": '\U0001D513',
"Phi;": '\U000003A6',
"Pi;": '\U000003A0',
"PlusMinus;": '\U000000B1',
"Poincareplane;": '\U0000210C',
"Popf;": '\U00002119',
"Pr;": '\U00002ABB',
"Precedes;": '\U0000227A',
"PrecedesEqual;": '\U00002AAF',
"PrecedesSlantEqual;": '\U0000227C',
"PrecedesTilde;": '\U0000227E',
"Prime;": '\U00002033',
"Product;": '\U0000220F',
"Proportion;": '\U00002237',
"Proportional;": '\U0000221D',
"Pscr;": '\U0001D4AB',
"Psi;": '\U000003A8',
"QUOT;": '\U00000022',
"Qfr;": '\U0001D514',
"Qopf;": '\U0000211A',
"Qscr;": '\U0001D4AC',
"RBarr;": '\U00002910',
"REG;": '\U000000AE',
"Racute;": '\U00000154',
"Rang;": '\U000027EB',
"Rarr;": '\U000021A0',
"Rarrtl;": '\U00002916',
"Rcaron;": '\U00000158',
"Rcedil;": '\U00000156',
"Rcy;": '\U00000420',
"Re;": '\U0000211C',
"ReverseElement;": '\U0000220B',
"ReverseEquilibrium;": '\U000021CB',
"ReverseUpEquilibrium;": '\U0000296F',
"Rfr;": '\U0000211C',
"Rho;": '\U000003A1',
"RightAngleBracket;": '\U000027E9',
"RightArrow;": '\U00002192',
"RightArrowBar;": '\U000021E5',
"RightArrowLeftArrow;": '\U000021C4',
"RightCeiling;": '\U00002309',
"RightDoubleBracket;": '\U000027E7',
"RightDownTeeVector;": '\U0000295D',
"RightDownVector;": '\U000021C2',
"RightDownVectorBar;": '\U00002955',
"RightFloor;": '\U0000230B',
"RightTee;": '\U000022A2',
"RightTeeArrow;": '\U000021A6',
"RightTeeVector;": '\U0000295B',
"RightTriangle;": '\U000022B3',
"RightTriangleBar;": '\U000029D0',
"RightTriangleEqual;": '\U000022B5',
"RightUpDownVector;": '\U0000294F',
"RightUpTeeVector;": '\U0000295C',
"RightUpVector;": '\U000021BE',
"RightUpVectorBar;": '\U00002954',
"RightVector;": '\U000021C0',
"RightVectorBar;": '\U00002953',
"Rightarrow;": '\U000021D2',
"Ropf;": '\U0000211D',
"RoundImplies;": '\U00002970',
"Rrightarrow;": '\U000021DB',
"Rscr;": '\U0000211B',
"Rsh;": '\U000021B1',
"RuleDelayed;": '\U000029F4',
"SHCHcy;": '\U00000429',
"SHcy;": '\U00000428',
"SOFTcy;": '\U0000042C',
"Sacute;": '\U0000015A',
"Sc;": '\U00002ABC',
"Scaron;": '\U00000160',
"Scedil;": '\U0000015E',
"Scirc;": '\U0000015C',
"Scy;": '\U00000421',
"Sfr;": '\U0001D516',
"ShortDownArrow;": '\U00002193',
"ShortLeftArrow;": '\U00002190',
"ShortRightArrow;": '\U00002192',
"ShortUpArrow;": '\U00002191',
"Sigma;": '\U000003A3',
"SmallCircle;": '\U00002218',
"Sopf;": '\U0001D54A',
"Sqrt;": '\U0000221A',
"Square;": '\U000025A1',
"SquareIntersection;": '\U00002293',
"SquareSubset;": '\U0000228F',
"SquareSubsetEqual;": '\U00002291',
"SquareSuperset;": '\U00002290',
"SquareSupersetEqual;": '\U00002292',
"SquareUnion;": '\U00002294',
"Sscr;": '\U0001D4AE',
"Star;": '\U000022C6',
"Sub;": '\U000022D0',
"Subset;": '\U000022D0',
"SubsetEqual;": '\U00002286',
"Succeeds;": '\U0000227B',
"SucceedsEqual;": '\U00002AB0',
"SucceedsSlantEqual;": '\U0000227D',
"SucceedsTilde;": '\U0000227F',
"SuchThat;": '\U0000220B',
"Sum;": '\U00002211',
"Sup;": '\U000022D1',
"Superset;": '\U00002283',
"SupersetEqual;": '\U00002287',
"Supset;": '\U000022D1',
"THORN;": '\U000000DE',
"TRADE;": '\U00002122',
"TSHcy;": '\U0000040B',
"TScy;": '\U00000426',
"Tab;": '\U00000009',
"Tau;": '\U000003A4',
"Tcaron;": '\U00000164',
"Tcedil;": '\U00000162',
"Tcy;": '\U00000422',
"Tfr;": '\U0001D517',
"Therefore;": '\U00002234',
"Theta;": '\U00000398',
"ThinSpace;": '\U00002009',
"Tilde;": '\U0000223C',
"TildeEqual;": '\U00002243',
"TildeFullEqual;": '\U00002245',
"TildeTilde;": '\U00002248',
"Topf;": '\U0001D54B',
"TripleDot;": '\U000020DB',
"Tscr;": '\U0001D4AF',
"Tstrok;": '\U00000166',
"Uacute;": '\U000000DA',
"Uarr;": '\U0000219F',
"Uarrocir;": '\U00002949',
"Ubrcy;": '\U0000040E',
"Ubreve;": '\U0000016C',
"Ucirc;": '\U000000DB',
"Ucy;": '\U00000423',
"Udblac;": '\U00000170',
"Ufr;": '\U0001D518',
"Ugrave;": '\U000000D9',
"Umacr;": '\U0000016A',
"UnderBar;": '\U0000005F',
"UnderBrace;": '\U000023DF',
"UnderBracket;": '\U000023B5',
"UnderParenthesis;": '\U000023DD',
"Union;": '\U000022C3',
"UnionPlus;": '\U0000228E',
"Uogon;": '\U00000172',
"Uopf;": '\U0001D54C',
"UpArrow;": '\U00002191',
"UpArrowBar;": '\U00002912',
"UpArrowDownArrow;": '\U000021C5',
"UpDownArrow;": '\U00002195',
"UpEquilibrium;": '\U0000296E',
"UpTee;": '\U000022A5',
"UpTeeArrow;": '\U000021A5',
"Uparrow;": '\U000021D1',
"Updownarrow;": '\U000021D5',
"UpperLeftArrow;": '\U00002196',
"UpperRightArrow;": '\U00002197',
"Upsi;": '\U000003D2',
"Upsilon;": '\U000003A5',
"Uring;": '\U0000016E',
"Uscr;": '\U0001D4B0',
"Utilde;": '\U00000168',
"Uuml;": '\U000000DC',
"VDash;": '\U000022AB',
"Vbar;": '\U00002AEB',
"Vcy;": '\U00000412',
"Vdash;": '\U000022A9',
"Vdashl;": '\U00002AE6',
"Vee;": '\U000022C1',
"Verbar;": '\U00002016',
"Vert;": '\U00002016',
"VerticalBar;": '\U00002223',
"VerticalLine;": '\U0000007C',
"VerticalSeparator;": '\U00002758',
"VerticalTilde;": '\U00002240',
"VeryThinSpace;": '\U0000200A',
"Vfr;": '\U0001D519',
"Vopf;": '\U0001D54D',
"Vscr;": '\U0001D4B1',
"Vvdash;": '\U000022AA',
"Wcirc;": '\U00000174',
"Wedge;": '\U000022C0',
"Wfr;": '\U0001D51A',
"Wopf;": '\U0001D54E',
"Wscr;": '\U0001D4B2',
"Xfr;": '\U0001D51B',
"Xi;": '\U0000039E',
"Xopf;": '\U0001D54F',
"Xscr;": '\U0001D4B3',
"YAcy;": '\U0000042F',
"YIcy;": '\U00000407',
"YUcy;": '\U0000042E',
"Yacute;": '\U000000DD',
"Ycirc;": '\U00000176',
"Ycy;": '\U0000042B',
"Yfr;": '\U0001D51C',
"Yopf;": '\U0001D550',
"Yscr;": '\U0001D4B4',
"Yuml;": '\U00000178',
"ZHcy;": '\U00000416',
"Zacute;": '\U00000179',
"Zcaron;": '\U0000017D',
"Zcy;": '\U00000417',
"Zdot;": '\U0000017B',
"ZeroWidthSpace;": '\U0000200B',
"Zeta;": '\U00000396',
"Zfr;": '\U00002128',
"Zopf;": '\U00002124',
"Zscr;": '\U0001D4B5',
"aacute;": '\U000000E1',
"abreve;": '\U00000103',
"ac;": '\U0000223E',
"acd;": '\U0000223F',
"acirc;": '\U000000E2',
"acute;": '\U000000B4',
"acy;": '\U00000430',
"aelig;": '\U000000E6',
"af;": '\U00002061',
"afr;": '\U0001D51E',
"agrave;": '\U000000E0',
"alefsym;": '\U00002135',
"aleph;": '\U00002135',
"alpha;": '\U000003B1',
"amacr;": '\U00000101',
"amalg;": '\U00002A3F',
"amp;": '\U00000026',
"and;": '\U00002227',
"andand;": '\U00002A55',
"andd;": '\U00002A5C',
"andslope;": '\U00002A58',
"andv;": '\U00002A5A',
"ang;": '\U00002220',
"ange;": '\U000029A4',
"angle;": '\U00002220',
"angmsd;": '\U00002221',
"angmsdaa;": '\U000029A8',
"angmsdab;": '\U000029A9',
"angmsdac;": '\U000029AA',
"angmsdad;": '\U000029AB',
"angmsdae;": '\U000029AC',
"angmsdaf;": '\U000029AD',
"angmsdag;": '\U000029AE',
"angmsdah;": '\U000029AF',
"angrt;": '\U0000221F',
"angrtvb;": '\U000022BE',
"angrtvbd;": '\U0000299D',
"angsph;": '\U00002222',
"angst;": '\U000000C5',
"angzarr;": '\U0000237C',
"aogon;": '\U00000105',
"aopf;": '\U0001D552',
"ap;": '\U00002248',
"apE;": '\U00002A70',
"apacir;": '\U00002A6F',
"ape;": '\U0000224A',
"apid;": '\U0000224B',
"apos;": '\U00000027',
"approx;": '\U00002248',
"approxeq;": '\U0000224A',
"aring;": '\U000000E5',
"ascr;": '\U0001D4B6',
"ast;": '\U0000002A',
"asymp;": '\U00002248',
"asympeq;": '\U0000224D',
"atilde;": '\U000000E3',
"auml;": '\U000000E4',
"awconint;": '\U00002233',
"awint;": '\U00002A11',
"bNot;": '\U00002AED',
"backcong;": '\U0000224C',
"backepsilon;": '\U000003F6',
"backprime;": '\U00002035',
"backsim;": '\U0000223D',
"backsimeq;": '\U000022CD',
"barvee;": '\U000022BD',
"barwed;": '\U00002305',
"barwedge;": '\U00002305',
"bbrk;": '\U000023B5',
"bbrktbrk;": '\U000023B6',
"bcong;": '\U0000224C',
"bcy;": '\U00000431',
"bdquo;": '\U0000201E',
"becaus;": '\U00002235',
"because;": '\U00002235',
"bemptyv;": '\U000029B0',
"bepsi;": '\U000003F6',
"bernou;": '\U0000212C',
"beta;": '\U000003B2',
"beth;": '\U00002136',
"between;": '\U0000226C',
"bfr;": '\U0001D51F',
"bigcap;": '\U000022C2',
"bigcirc;": '\U000025EF',
"bigcup;": '\U000022C3',
"bigodot;": '\U00002A00',
"bigoplus;": '\U00002A01',
"bigotimes;": '\U00002A02',
"bigsqcup;": '\U00002A06',
"bigstar;": '\U00002605',
"bigtriangledown;": '\U000025BD',
"bigtriangleup;": '\U000025B3',
"biguplus;": '\U00002A04',
"bigvee;": '\U000022C1',
"bigwedge;": '\U000022C0',
"bkarow;": '\U0000290D',
"blacklozenge;": '\U000029EB',
"blacksquare;": '\U000025AA',
"blacktriangle;": '\U000025B4',
"blacktriangledown;": '\U000025BE',
"blacktriangleleft;": '\U000025C2',
"blacktriangleright;": '\U000025B8',
"blank;": '\U00002423',
"blk12;": '\U00002592',
"blk14;": '\U00002591',
"blk34;": '\U00002593',
"block;": '\U00002588',
"bnot;": '\U00002310',
"bopf;": '\U0001D553',
"bot;": '\U000022A5',
"bottom;": '\U000022A5',
"bowtie;": '\U000022C8',
"boxDL;": '\U00002557',
"boxDR;": '\U00002554',
"boxDl;": '\U00002556',
"boxDr;": '\U00002553',
"boxH;": '\U00002550',
"boxHD;": '\U00002566',
"boxHU;": '\U00002569',
"boxHd;": '\U00002564',
"boxHu;": '\U00002567',
"boxUL;": '\U0000255D',
"boxUR;": '\U0000255A',
"boxUl;": '\U0000255C',
"boxUr;": '\U00002559',
"boxV;": '\U00002551',
"boxVH;": '\U0000256C',
"boxVL;": '\U00002563',
"boxVR;": '\U00002560',
"boxVh;": '\U0000256B',
"boxVl;": '\U00002562',
"boxVr;": '\U0000255F',
"boxbox;": '\U000029C9',
"boxdL;": '\U00002555',
"boxdR;": '\U00002552',
"boxdl;": '\U00002510',
"boxdr;": '\U0000250C',
"boxh;": '\U00002500',
"boxhD;": '\U00002565',
"boxhU;": '\U00002568',
"boxhd;": '\U0000252C',
"boxhu;": '\U00002534',
"boxminus;": '\U0000229F',
"boxplus;": '\U0000229E',
"boxtimes;": '\U000022A0',
"boxuL;": '\U0000255B',
"boxuR;": '\U00002558',
"boxul;": '\U00002518',
"boxur;": '\U00002514',
"boxv;": '\U00002502',
"boxvH;": '\U0000256A',
"boxvL;": '\U00002561',
"boxvR;": '\U0000255E',
"boxvh;": '\U0000253C',
"boxvl;": '\U00002524',
"boxvr;": '\U0000251C',
"bprime;": '\U00002035',
"breve;": '\U000002D8',
"brvbar;": '\U000000A6',
"bscr;": '\U0001D4B7',
"bsemi;": '\U0000204F',
"bsim;": '\U0000223D',
"bsime;": '\U000022CD',
"bsol;": '\U0000005C',
"bsolb;": '\U000029C5',
"bsolhsub;": '\U000027C8',
"bull;": '\U00002022',
"bullet;": '\U00002022',
"bump;": '\U0000224E',
"bumpE;": '\U00002AAE',
"bumpe;": '\U0000224F',
"bumpeq;": '\U0000224F',
"cacute;": '\U00000107',
"cap;": '\U00002229',
"capand;": '\U00002A44',
"capbrcup;": '\U00002A49',
"capcap;": '\U00002A4B',
"capcup;": '\U00002A47',
"capdot;": '\U00002A40',
"caret;": '\U00002041',
"caron;": '\U000002C7',
"ccaps;": '\U00002A4D',
"ccaron;": '\U0000010D',
"ccedil;": '\U000000E7',
"ccirc;": '\U00000109',
"ccups;": '\U00002A4C',
"ccupssm;": '\U00002A50',
"cdot;": '\U0000010B',
"cedil;": '\U000000B8',
"cemptyv;": '\U000029B2',
"cent;": '\U000000A2',
"centerdot;": '\U000000B7',
"cfr;": '\U0001D520',
"chcy;": '\U00000447',
"check;": '\U00002713',
"checkmark;": '\U00002713',
"chi;": '\U000003C7',
"cir;": '\U000025CB',
"cirE;": '\U000029C3',
"circ;": '\U000002C6',
"circeq;": '\U00002257',
"circlearrowleft;": '\U000021BA',
"circlearrowright;": '\U000021BB',
"circledR;": '\U000000AE',
"circledS;": '\U000024C8',
"circledast;": '\U0000229B',
"circledcirc;": '\U0000229A',
"circleddash;": '\U0000229D',
"cire;": '\U00002257',
"cirfnint;": '\U00002A10',
"cirmid;": '\U00002AEF',
"cirscir;": '\U000029C2',
"clubs;": '\U00002663',
"clubsuit;": '\U00002663',
"colon;": '\U0000003A',
"colone;": '\U00002254',
"coloneq;": '\U00002254',
"comma;": '\U0000002C',
"commat;": '\U00000040',
"comp;": '\U00002201',
"compfn;": '\U00002218',
"complement;": '\U00002201',
"complexes;": '\U00002102',
"cong;": '\U00002245',
"congdot;": '\U00002A6D',
"conint;": '\U0000222E',
"copf;": '\U0001D554',
"coprod;": '\U00002210',
"copy;": '\U000000A9',
"copysr;": '\U00002117',
"crarr;": '\U000021B5',
"cross;": '\U00002717',
"cscr;": '\U0001D4B8',
"csub;": '\U00002ACF',
"csube;": '\U00002AD1',
"csup;": '\U00002AD0',
"csupe;": '\U00002AD2',
"ctdot;": '\U000022EF',
"cudarrl;": '\U00002938',
"cudarrr;": '\U00002935',
"cuepr;": '\U000022DE',
"cuesc;": '\U000022DF',
"cularr;": '\U000021B6',
"cularrp;": '\U0000293D',
"cup;": '\U0000222A',
"cupbrcap;": '\U00002A48',
"cupcap;": '\U00002A46',
"cupcup;": '\U00002A4A',
"cupdot;": '\U0000228D',
"cupor;": '\U00002A45',
"curarr;": '\U000021B7',
"curarrm;": '\U0000293C',
"curlyeqprec;": '\U000022DE',
"curlyeqsucc;": '\U000022DF',
"curlyvee;": '\U000022CE',
"curlywedge;": '\U000022CF',
"curren;": '\U000000A4',
"curvearrowleft;": '\U000021B6',
"curvearrowright;": '\U000021B7',
"cuvee;": '\U000022CE',
"cuwed;": '\U000022CF',
"cwconint;": '\U00002232',
"cwint;": '\U00002231',
"cylcty;": '\U0000232D',
"dArr;": '\U000021D3',
"dHar;": '\U00002965',
"dagger;": '\U00002020',
"daleth;": '\U00002138',
"darr;": '\U00002193',
"dash;": '\U00002010',
"dashv;": '\U000022A3',
"dbkarow;": '\U0000290F',
"dblac;": '\U000002DD',
"dcaron;": '\U0000010F',
"dcy;": '\U00000434',
"dd;": '\U00002146',
"ddagger;": '\U00002021',
"ddarr;": '\U000021CA',
"ddotseq;": '\U00002A77',
"deg;": '\U000000B0',
"delta;": '\U000003B4',
"demptyv;": '\U000029B1',
"dfisht;": '\U0000297F',
"dfr;": '\U0001D521',
"dharl;": '\U000021C3',
"dharr;": '\U000021C2',
"diam;": '\U000022C4',
"diamond;": '\U000022C4',
"diamondsuit;": '\U00002666',
"diams;": '\U00002666',
"die;": '\U000000A8',
"digamma;": '\U000003DD',
"disin;": '\U000022F2',
"div;": '\U000000F7',
"divide;": '\U000000F7',
"divideontimes;": '\U000022C7',
"divonx;": '\U000022C7',
"djcy;": '\U00000452',
"dlcorn;": '\U0000231E',
"dlcrop;": '\U0000230D',
"dollar;": '\U00000024',
"dopf;": '\U0001D555',
"dot;": '\U000002D9',
"doteq;": '\U00002250',
"doteqdot;": '\U00002251',
"dotminus;": '\U00002238',
"dotplus;": '\U00002214',
"dotsquare;": '\U000022A1',
"doublebarwedge;": '\U00002306',
"downarrow;": '\U00002193',
"downdownarrows;": '\U000021CA',
"downharpoonleft;": '\U000021C3',
"downharpoonright;": '\U000021C2',
"drbkarow;": '\U00002910',
"drcorn;": '\U0000231F',
"drcrop;": '\U0000230C',
"dscr;": '\U0001D4B9',
"dscy;": '\U00000455',
"dsol;": '\U000029F6',
"dstrok;": '\U00000111',
"dtdot;": '\U000022F1',
"dtri;": '\U000025BF',
"dtrif;": '\U000025BE',
"duarr;": '\U000021F5',
"duhar;": '\U0000296F',
"dwangle;": '\U000029A6',
"dzcy;": '\U0000045F',
"dzigrarr;": '\U000027FF',
"eDDot;": '\U00002A77',
"eDot;": '\U00002251',
"eacute;": '\U000000E9',
"easter;": '\U00002A6E',
"ecaron;": '\U0000011B',
"ecir;": '\U00002256',
"ecirc;": '\U000000EA',
"ecolon;": '\U00002255',
"ecy;": '\U0000044D',
"edot;": '\U00000117',
"ee;": '\U00002147',
"efDot;": '\U00002252',
"efr;": '\U0001D522',
"eg;": '\U00002A9A',
"egrave;": '\U000000E8',
"egs;": '\U00002A96',
"egsdot;": '\U00002A98',
"el;": '\U00002A99',
"elinters;": '\U000023E7',
"ell;": '\U00002113',
"els;": '\U00002A95',
"elsdot;": '\U00002A97',
"emacr;": '\U00000113',
"empty;": '\U00002205',
"emptyset;": '\U00002205',
"emptyv;": '\U00002205',
"emsp;": '\U00002003',
"emsp13;": '\U00002004',
"emsp14;": '\U00002005',
"eng;": '\U0000014B',
"ensp;": '\U00002002',
"eogon;": '\U00000119',
"eopf;": '\U0001D556',
"epar;": '\U000022D5',
"eparsl;": '\U000029E3',
"eplus;": '\U00002A71',
"epsi;": '\U000003B5',
"epsilon;": '\U000003B5',
"epsiv;": '\U000003F5',
"eqcirc;": '\U00002256',
"eqcolon;": '\U00002255',
"eqsim;": '\U00002242',
"eqslantgtr;": '\U00002A96',
"eqslantless;": '\U00002A95',
"equals;": '\U0000003D',
"equest;": '\U0000225F',
"equiv;": '\U00002261',
"equivDD;": '\U00002A78',
"eqvparsl;": '\U000029E5',
"erDot;": '\U00002253',
"erarr;": '\U00002971',
"escr;": '\U0000212F',
"esdot;": '\U00002250',
"esim;": '\U00002242',
"eta;": '\U000003B7',
"eth;": '\U000000F0',
"euml;": '\U000000EB',
"euro;": '\U000020AC',
"excl;": '\U00000021',
"exist;": '\U00002203',
"expectation;": '\U00002130',
"exponentiale;": '\U00002147',
"fallingdotseq;": '\U00002252',
"fcy;": '\U00000444',
"female;": '\U00002640',
"ffilig;": '\U0000FB03',
"fflig;": '\U0000FB00',
"ffllig;": '\U0000FB04',
"ffr;": '\U0001D523',
"filig;": '\U0000FB01',
"flat;": '\U0000266D',
"fllig;": '\U0000FB02',
"fltns;": '\U000025B1',
"fnof;": '\U00000192',
"fopf;": '\U0001D557',
"forall;": '\U00002200',
"fork;": '\U000022D4',
"forkv;": '\U00002AD9',
"fpartint;": '\U00002A0D',
"frac12;": '\U000000BD',
"frac13;": '\U00002153',
"frac14;": '\U000000BC',
"frac15;": '\U00002155',
"frac16;": '\U00002159',
"frac18;": '\U0000215B',
"frac23;": '\U00002154',
"frac25;": '\U00002156',
"frac34;": '\U000000BE',
"frac35;": '\U00002157',
"frac38;": '\U0000215C',
"frac45;": '\U00002158',
"frac56;": '\U0000215A',
"frac58;": '\U0000215D',
"frac78;": '\U0000215E',
"frasl;": '\U00002044',
"frown;": '\U00002322',
"fscr;": '\U0001D4BB',
"gE;": '\U00002267',
"gEl;": '\U00002A8C',
"gacute;": '\U000001F5',
"gamma;": '\U000003B3',
"gammad;": '\U000003DD',
"gap;": '\U00002A86',
"gbreve;": '\U0000011F',
"gcirc;": '\U0000011D',
"gcy;": '\U00000433',
"gdot;": '\U00000121',
"ge;": '\U00002265',
"gel;": '\U000022DB',
"geq;": '\U00002265',
"geqq;": '\U00002267',
"geqslant;": '\U00002A7E',
"ges;": '\U00002A7E',
"gescc;": '\U00002AA9',
"gesdot;": '\U00002A80',
"gesdoto;": '\U00002A82',
"gesdotol;": '\U00002A84',
"gesles;": '\U00002A94',
"gfr;": '\U0001D524',
"gg;": '\U0000226B',
"ggg;": '\U000022D9',
"gimel;": '\U00002137',
"gjcy;": '\U00000453',
"gl;": '\U00002277',
"glE;": '\U00002A92',
"gla;": '\U00002AA5',
"glj;": '\U00002AA4',
"gnE;": '\U00002269',
"gnap;": '\U00002A8A',
"gnapprox;": '\U00002A8A',
"gne;": '\U00002A88',
"gneq;": '\U00002A88',
"gneqq;": '\U00002269',
"gnsim;": '\U000022E7',
"gopf;": '\U0001D558',
"grave;": '\U00000060',
"gscr;": '\U0000210A',
"gsim;": '\U00002273',
"gsime;": '\U00002A8E',
"gsiml;": '\U00002A90',
"gt;": '\U0000003E',
"gtcc;": '\U00002AA7',
"gtcir;": '\U00002A7A',
"gtdot;": '\U000022D7',
"gtlPar;": '\U00002995',
"gtquest;": '\U00002A7C',
"gtrapprox;": '\U00002A86',
"gtrarr;": '\U00002978',
"gtrdot;": '\U000022D7',
"gtreqless;": '\U000022DB',
"gtreqqless;": '\U00002A8C',
"gtrless;": '\U00002277',
"gtrsim;": '\U00002273',
"hArr;": '\U000021D4',
"hairsp;": '\U0000200A',
"half;": '\U000000BD',
"hamilt;": '\U0000210B',
"hardcy;": '\U0000044A',
"harr;": '\U00002194',
"harrcir;": '\U00002948',
"harrw;": '\U000021AD',
"hbar;": '\U0000210F',
"hcirc;": '\U00000125',
"hearts;": '\U00002665',
"heartsuit;": '\U00002665',
"hellip;": '\U00002026',
"hercon;": '\U000022B9',
"hfr;": '\U0001D525',
"hksearow;": '\U00002925',
"hkswarow;": '\U00002926',
"hoarr;": '\U000021FF',
"homtht;": '\U0000223B',
"hookleftarrow;": '\U000021A9',
"hookrightarrow;": '\U000021AA',
"hopf;": '\U0001D559',
"horbar;": '\U00002015',
"hscr;": '\U0001D4BD',
"hslash;": '\U0000210F',
"hstrok;": '\U00000127',
"hybull;": '\U00002043',
"hyphen;": '\U00002010',
"iacute;": '\U000000ED',
"ic;": '\U00002063',
"icirc;": '\U000000EE',
"icy;": '\U00000438',
"iecy;": '\U00000435',
"iexcl;": '\U000000A1',
"iff;": '\U000021D4',
"ifr;": '\U0001D526',
"igrave;": '\U000000EC',
"ii;": '\U00002148',
"iiiint;": '\U00002A0C',
"iiint;": '\U0000222D',
"iinfin;": '\U000029DC',
"iiota;": '\U00002129',
"ijlig;": '\U00000133',
"imacr;": '\U0000012B',
"image;": '\U00002111',
"imagline;": '\U00002110',
"imagpart;": '\U00002111',
"imath;": '\U00000131',
"imof;": '\U000022B7',
"imped;": '\U000001B5',
"in;": '\U00002208',
"incare;": '\U00002105',
"infin;": '\U0000221E',
"infintie;": '\U000029DD',
"inodot;": '\U00000131',
"int;": '\U0000222B',
"intcal;": '\U000022BA',
"integers;": '\U00002124',
"intercal;": '\U000022BA',
"intlarhk;": '\U00002A17',
"intprod;": '\U00002A3C',
"iocy;": '\U00000451',
"iogon;": '\U0000012F',
"iopf;": '\U0001D55A',
"iota;": '\U000003B9',
"iprod;": '\U00002A3C',
"iquest;": '\U000000BF',
"iscr;": '\U0001D4BE',
"isin;": '\U00002208',
"isinE;": '\U000022F9',
"isindot;": '\U000022F5',
"isins;": '\U000022F4',
"isinsv;": '\U000022F3',
"isinv;": '\U00002208',
"it;": '\U00002062',
"itilde;": '\U00000129',
"iukcy;": '\U00000456',
"iuml;": '\U000000EF',
"jcirc;": '\U00000135',
"jcy;": '\U00000439',
"jfr;": '\U0001D527',
"jmath;": '\U00000237',
"jopf;": '\U0001D55B',
"jscr;": '\U0001D4BF',
"jsercy;": '\U00000458',
"jukcy;": '\U00000454',
"kappa;": '\U000003BA',
"kappav;": '\U000003F0',
"kcedil;": '\U00000137',
"kcy;": '\U0000043A',
"kfr;": '\U0001D528',
"kgreen;": '\U00000138',
"khcy;": '\U00000445',
"kjcy;": '\U0000045C',
"kopf;": '\U0001D55C',
"kscr;": '\U0001D4C0',
"lAarr;": '\U000021DA',
"lArr;": '\U000021D0',
"lAtail;": '\U0000291B',
"lBarr;": '\U0000290E',
"lE;": '\U00002266',
"lEg;": '\U00002A8B',
"lHar;": '\U00002962',
"lacute;": '\U0000013A',
"laemptyv;": '\U000029B4',
"lagran;": '\U00002112',
"lambda;": '\U000003BB',
"lang;": '\U000027E8',
"langd;": '\U00002991',
"langle;": '\U000027E8',
"lap;": '\U00002A85',
"laquo;": '\U000000AB',
"larr;": '\U00002190',
"larrb;": '\U000021E4',
"larrbfs;": '\U0000291F',
"larrfs;": '\U0000291D',
"larrhk;": '\U000021A9',
"larrlp;": '\U000021AB',
"larrpl;": '\U00002939',
"larrsim;": '\U00002973',
"larrtl;": '\U000021A2',
"lat;": '\U00002AAB',
"latail;": '\U00002919',
"late;": '\U00002AAD',
"lbarr;": '\U0000290C',
"lbbrk;": '\U00002772',
"lbrace;": '\U0000007B',
"lbrack;": '\U0000005B',
"lbrke;": '\U0000298B',
"lbrksld;": '\U0000298F',
"lbrkslu;": '\U0000298D',
"lcaron;": '\U0000013E',
"lcedil;": '\U0000013C',
"lceil;": '\U00002308',
"lcub;": '\U0000007B',
"lcy;": '\U0000043B',
"ldca;": '\U00002936',
"ldquo;": '\U0000201C',
"ldquor;": '\U0000201E',
"ldrdhar;": '\U00002967',
"ldrushar;": '\U0000294B',
"ldsh;": '\U000021B2',
"le;": '\U00002264',
"leftarrow;": '\U00002190',
"leftarrowtail;": '\U000021A2',
"leftharpoondown;": '\U000021BD',
"leftharpoonup;": '\U000021BC',
"leftleftarrows;": '\U000021C7',
"leftrightarrow;": '\U00002194',
"leftrightarrows;": '\U000021C6',
"leftrightharpoons;": '\U000021CB',
"leftrightsquigarrow;": '\U000021AD',
"leftthreetimes;": '\U000022CB',
"leg;": '\U000022DA',
"leq;": '\U00002264',
"leqq;": '\U00002266',
"leqslant;": '\U00002A7D',
"les;": '\U00002A7D',
"lescc;": '\U00002AA8',
"lesdot;": '\U00002A7F',
"lesdoto;": '\U00002A81',
"lesdotor;": '\U00002A83',
"lesges;": '\U00002A93',
"lessapprox;": '\U00002A85',
"lessdot;": '\U000022D6',
"lesseqgtr;": '\U000022DA',
"lesseqqgtr;": '\U00002A8B',
"lessgtr;": '\U00002276',
"lesssim;": '\U00002272',
"lfisht;": '\U0000297C',
"lfloor;": '\U0000230A',
"lfr;": '\U0001D529',
"lg;": '\U00002276',
"lgE;": '\U00002A91',
"lhard;": '\U000021BD',
"lharu;": '\U000021BC',
"lharul;": '\U0000296A',
"lhblk;": '\U00002584',
"ljcy;": '\U00000459',
"ll;": '\U0000226A',
"llarr;": '\U000021C7',
"llcorner;": '\U0000231E',
"llhard;": '\U0000296B',
"lltri;": '\U000025FA',
"lmidot;": '\U00000140',
"lmoust;": '\U000023B0',
"lmoustache;": '\U000023B0',
"lnE;": '\U00002268',
"lnap;": '\U00002A89',
"lnapprox;": '\U00002A89',
"lne;": '\U00002A87',
"lneq;": '\U00002A87',
"lneqq;": '\U00002268',
"lnsim;": '\U000022E6',
"loang;": '\U000027EC',
"loarr;": '\U000021FD',
"lobrk;": '\U000027E6',
"longleftarrow;": '\U000027F5',
"longleftrightarrow;": '\U000027F7',
"longmapsto;": '\U000027FC',
"longrightarrow;": '\U000027F6',
"looparrowleft;": '\U000021AB',
"looparrowright;": '\U000021AC',
"lopar;": '\U00002985',
"lopf;": '\U0001D55D',
"loplus;": '\U00002A2D',
"lotimes;": '\U00002A34',
"lowast;": '\U00002217',
"lowbar;": '\U0000005F',
"loz;": '\U000025CA',
"lozenge;": '\U000025CA',
"lozf;": '\U000029EB',
"lpar;": '\U00000028',
"lparlt;": '\U00002993',
"lrarr;": '\U000021C6',
"lrcorner;": '\U0000231F',
"lrhar;": '\U000021CB',
"lrhard;": '\U0000296D',
"lrm;": '\U0000200E',
"lrtri;": '\U000022BF',
"lsaquo;": '\U00002039',
"lscr;": '\U0001D4C1',
"lsh;": '\U000021B0',
"lsim;": '\U00002272',
"lsime;": '\U00002A8D',
"lsimg;": '\U00002A8F',
"lsqb;": '\U0000005B',
"lsquo;": '\U00002018',
"lsquor;": '\U0000201A',
"lstrok;": '\U00000142',
"lt;": '\U0000003C',
"ltcc;": '\U00002AA6',
"ltcir;": '\U00002A79',
"ltdot;": '\U000022D6',
"lthree;": '\U000022CB',
"ltimes;": '\U000022C9',
"ltlarr;": '\U00002976',
"ltquest;": '\U00002A7B',
"ltrPar;": '\U00002996',
"ltri;": '\U000025C3',
"ltrie;": '\U000022B4',
"ltrif;": '\U000025C2',
"lurdshar;": '\U0000294A',
"luruhar;": '\U00002966',
"mDDot;": '\U0000223A',
"macr;": '\U000000AF',
"male;": '\U00002642',
"malt;": '\U00002720',
"maltese;": '\U00002720',
"map;": '\U000021A6',
"mapsto;": '\U000021A6',
"mapstodown;": '\U000021A7',
"mapstoleft;": '\U000021A4',
"mapstoup;": '\U000021A5',
"marker;": '\U000025AE',
"mcomma;": '\U00002A29',
"mcy;": '\U0000043C',
"mdash;": '\U00002014',
"measuredangle;": '\U00002221',
"mfr;": '\U0001D52A',
"mho;": '\U00002127',
"micro;": '\U000000B5',
"mid;": '\U00002223',
"midast;": '\U0000002A',
"midcir;": '\U00002AF0',
"middot;": '\U000000B7',
"minus;": '\U00002212',
"minusb;": '\U0000229F',
"minusd;": '\U00002238',
"minusdu;": '\U00002A2A',
"mlcp;": '\U00002ADB',
"mldr;": '\U00002026',
"mnplus;": '\U00002213',
"models;": '\U000022A7',
"mopf;": '\U0001D55E',
"mp;": '\U00002213',
"mscr;": '\U0001D4C2',
"mstpos;": '\U0000223E',
"mu;": '\U000003BC',
"multimap;": '\U000022B8',
"mumap;": '\U000022B8',
"nLeftarrow;": '\U000021CD',
"nLeftrightarrow;": '\U000021CE',
"nRightarrow;": '\U000021CF',
"nVDash;": '\U000022AF',
"nVdash;": '\U000022AE',
"nabla;": '\U00002207',
"nacute;": '\U00000144',
"nap;": '\U00002249',
"napos;": '\U00000149',
"napprox;": '\U00002249',
"natur;": '\U0000266E',
"natural;": '\U0000266E',
"naturals;": '\U00002115',
"nbsp;": '\U000000A0',
"ncap;": '\U00002A43',
"ncaron;": '\U00000148',
"ncedil;": '\U00000146',
"ncong;": '\U00002247',
"ncup;": '\U00002A42',
"ncy;": '\U0000043D',
"ndash;": '\U00002013',
"ne;": '\U00002260',
"neArr;": '\U000021D7',
"nearhk;": '\U00002924',
"nearr;": '\U00002197',
"nearrow;": '\U00002197',
"nequiv;": '\U00002262',
"nesear;": '\U00002928',
"nexist;": '\U00002204',
"nexists;": '\U00002204',
"nfr;": '\U0001D52B',
"nge;": '\U00002271',
"ngeq;": '\U00002271',
"ngsim;": '\U00002275',
"ngt;": '\U0000226F',
"ngtr;": '\U0000226F',
"nhArr;": '\U000021CE',
"nharr;": '\U000021AE',
"nhpar;": '\U00002AF2',
"ni;": '\U0000220B',
"nis;": '\U000022FC',
"nisd;": '\U000022FA',
"niv;": '\U0000220B',
"njcy;": '\U0000045A',
"nlArr;": '\U000021CD',
"nlarr;": '\U0000219A',
"nldr;": '\U00002025',
"nle;": '\U00002270',
"nleftarrow;": '\U0000219A',
"nleftrightarrow;": '\U000021AE',
"nleq;": '\U00002270',
"nless;": '\U0000226E',
"nlsim;": '\U00002274',
"nlt;": '\U0000226E',
"nltri;": '\U000022EA',
"nltrie;": '\U000022EC',
"nmid;": '\U00002224',
"nopf;": '\U0001D55F',
"not;": '\U000000AC',
"notin;": '\U00002209',
"notinva;": '\U00002209',
"notinvb;": '\U000022F7',
"notinvc;": '\U000022F6',
"notni;": '\U0000220C',
"notniva;": '\U0000220C',
"notnivb;": '\U000022FE',
"notnivc;": '\U000022FD',
"npar;": '\U00002226',
"nparallel;": '\U00002226',
"npolint;": '\U00002A14',
"npr;": '\U00002280',
"nprcue;": '\U000022E0',
"nprec;": '\U00002280',
"nrArr;": '\U000021CF',
"nrarr;": '\U0000219B',
"nrightarrow;": '\U0000219B',
"nrtri;": '\U000022EB',
"nrtrie;": '\U000022ED',
"nsc;": '\U00002281',
"nsccue;": '\U000022E1',
"nscr;": '\U0001D4C3',
"nshortmid;": '\U00002224',
"nshortparallel;": '\U00002226',
"nsim;": '\U00002241',
"nsime;": '\U00002244',
"nsimeq;": '\U00002244',
"nsmid;": '\U00002224',
"nspar;": '\U00002226',
"nsqsube;": '\U000022E2',
"nsqsupe;": '\U000022E3',
"nsub;": '\U00002284',
"nsube;": '\U00002288',
"nsubseteq;": '\U00002288',
"nsucc;": '\U00002281',
"nsup;": '\U00002285',
"nsupe;": '\U00002289',
"nsupseteq;": '\U00002289',
"ntgl;": '\U00002279',
"ntilde;": '\U000000F1',
"ntlg;": '\U00002278',
"ntriangleleft;": '\U000022EA',
"ntrianglelefteq;": '\U000022EC',
"ntriangleright;": '\U000022EB',
"ntrianglerighteq;": '\U000022ED',
"nu;": '\U000003BD',
"num;": '\U00000023',
"numero;": '\U00002116',
"numsp;": '\U00002007',
"nvDash;": '\U000022AD',
"nvHarr;": '\U00002904',
"nvdash;": '\U000022AC',
"nvinfin;": '\U000029DE',
"nvlArr;": '\U00002902',
"nvrArr;": '\U00002903',
"nwArr;": '\U000021D6',
"nwarhk;": '\U00002923',
"nwarr;": '\U00002196',
"nwarrow;": '\U00002196',
"nwnear;": '\U00002927',
"oS;": '\U000024C8',
"oacute;": '\U000000F3',
"oast;": '\U0000229B',
"ocir;": '\U0000229A',
"ocirc;": '\U000000F4',
"ocy;": '\U0000043E',
"odash;": '\U0000229D',
"odblac;": '\U00000151',
"odiv;": '\U00002A38',
"odot;": '\U00002299',
"odsold;": '\U000029BC',
"oelig;": '\U00000153',
"ofcir;": '\U000029BF',
"ofr;": '\U0001D52C',
"ogon;": '\U000002DB',
"ograve;": '\U000000F2',
"ogt;": '\U000029C1',
"ohbar;": '\U000029B5',
"ohm;": '\U000003A9',
"oint;": '\U0000222E',
"olarr;": '\U000021BA',
"olcir;": '\U000029BE',
"olcross;": '\U000029BB',
"oline;": '\U0000203E',
"olt;": '\U000029C0',
"omacr;": '\U0000014D',
"omega;": '\U000003C9',
"omicron;": '\U000003BF',
"omid;": '\U000029B6',
"ominus;": '\U00002296',
"oopf;": '\U0001D560',
"opar;": '\U000029B7',
"operp;": '\U000029B9',
"oplus;": '\U00002295',
"or;": '\U00002228',
"orarr;": '\U000021BB',
"ord;": '\U00002A5D',
"order;": '\U00002134',
"orderof;": '\U00002134',
"ordf;": '\U000000AA',
"ordm;": '\U000000BA',
"origof;": '\U000022B6',
"oror;": '\U00002A56',
"orslope;": '\U00002A57',
"orv;": '\U00002A5B',
"oscr;": '\U00002134',
"oslash;": '\U000000F8',
"osol;": '\U00002298',
"otilde;": '\U000000F5',
"otimes;": '\U00002297',
"otimesas;": '\U00002A36',
"ouml;": '\U000000F6',
"ovbar;": '\U0000233D',
"par;": '\U00002225',
"para;": '\U000000B6',
"parallel;": '\U00002225',
"parsim;": '\U00002AF3',
"parsl;": '\U00002AFD',
"part;": '\U00002202',
"pcy;": '\U0000043F',
"percnt;": '\U00000025',
"period;": '\U0000002E',
"permil;": '\U00002030',
"perp;": '\U000022A5',
"pertenk;": '\U00002031',
"pfr;": '\U0001D52D',
"phi;": '\U000003C6',
"phiv;": '\U000003D5',
"phmmat;": '\U00002133',
"phone;": '\U0000260E',
"pi;": '\U000003C0',
"pitchfork;": '\U000022D4',
"piv;": '\U000003D6',
"planck;": '\U0000210F',
"planckh;": '\U0000210E',
"plankv;": '\U0000210F',
"plus;": '\U0000002B',
"plusacir;": '\U00002A23',
"plusb;": '\U0000229E',
"pluscir;": '\U00002A22',
"plusdo;": '\U00002214',
"plusdu;": '\U00002A25',
"pluse;": '\U00002A72',
"plusmn;": '\U000000B1',
"plussim;": '\U00002A26',
"plustwo;": '\U00002A27',
"pm;": '\U000000B1',
"pointint;": '\U00002A15',
"popf;": '\U0001D561',
"pound;": '\U000000A3',
"pr;": '\U0000227A',
"prE;": '\U00002AB3',
"prap;": '\U00002AB7',
"prcue;": '\U0000227C',
"pre;": '\U00002AAF',
"prec;": '\U0000227A',
"precapprox;": '\U00002AB7',
"preccurlyeq;": '\U0000227C',
"preceq;": '\U00002AAF',
"precnapprox;": '\U00002AB9',
"precneqq;": '\U00002AB5',
"precnsim;": '\U000022E8',
"precsim;": '\U0000227E',
"prime;": '\U00002032',
"primes;": '\U00002119',
"prnE;": '\U00002AB5',
"prnap;": '\U00002AB9',
"prnsim;": '\U000022E8',
"prod;": '\U0000220F',
"profalar;": '\U0000232E',
"profline;": '\U00002312',
"profsurf;": '\U00002313',
"prop;": '\U0000221D',
"propto;": '\U0000221D',
"prsim;": '\U0000227E',
"prurel;": '\U000022B0',
"pscr;": '\U0001D4C5',
"psi;": '\U000003C8',
"puncsp;": '\U00002008',
"qfr;": '\U0001D52E',
"qint;": '\U00002A0C',
"qopf;": '\U0001D562',
"qprime;": '\U00002057',
"qscr;": '\U0001D4C6',
"quaternions;": '\U0000210D',
"quatint;": '\U00002A16',
"quest;": '\U0000003F',
"questeq;": '\U0000225F',
"quot;": '\U00000022',
"rAarr;": '\U000021DB',
"rArr;": '\U000021D2',
"rAtail;": '\U0000291C',
"rBarr;": '\U0000290F',
"rHar;": '\U00002964',
"racute;": '\U00000155',
"radic;": '\U0000221A',
"raemptyv;": '\U000029B3',
"rang;": '\U000027E9',
"rangd;": '\U00002992',
"range;": '\U000029A5',
"rangle;": '\U000027E9',
"raquo;": '\U000000BB',
"rarr;": '\U00002192',
"rarrap;": '\U00002975',
"rarrb;": '\U000021E5',
"rarrbfs;": '\U00002920',
"rarrc;": '\U00002933',
"rarrfs;": '\U0000291E',
"rarrhk;": '\U000021AA',
"rarrlp;": '\U000021AC',
"rarrpl;": '\U00002945',
"rarrsim;": '\U00002974',
"rarrtl;": '\U000021A3',
"rarrw;": '\U0000219D',
"ratail;": '\U0000291A',
"ratio;": '\U00002236',
"rationals;": '\U0000211A',
"rbarr;": '\U0000290D',
"rbbrk;": '\U00002773',
"rbrace;": '\U0000007D',
"rbrack;": '\U0000005D',
"rbrke;": '\U0000298C',
"rbrksld;": '\U0000298E',
"rbrkslu;": '\U00002990',
"rcaron;": '\U00000159',
"rcedil;": '\U00000157',
"rceil;": '\U00002309',
"rcub;": '\U0000007D',
"rcy;": '\U00000440',
"rdca;": '\U00002937',
"rdldhar;": '\U00002969',
"rdquo;": '\U0000201D',
"rdquor;": '\U0000201D',
"rdsh;": '\U000021B3',
"real;": '\U0000211C',
"realine;": '\U0000211B',
"realpart;": '\U0000211C',
"reals;": '\U0000211D',
"rect;": '\U000025AD',
"reg;": '\U000000AE',
"rfisht;": '\U0000297D',
"rfloor;": '\U0000230B',
"rfr;": '\U0001D52F',
"rhard;": '\U000021C1',
"rharu;": '\U000021C0',
"rharul;": '\U0000296C',
"rho;": '\U000003C1',
"rhov;": '\U000003F1',
"rightarrow;": '\U00002192',
"rightarrowtail;": '\U000021A3',
"rightharpoondown;": '\U000021C1',
"rightharpoonup;": '\U000021C0',
"rightleftarrows;": '\U000021C4',
"rightleftharpoons;": '\U000021CC',
"rightrightarrows;": '\U000021C9',
"rightsquigarrow;": '\U0000219D',
"rightthreetimes;": '\U000022CC',
"ring;": '\U000002DA',
"risingdotseq;": '\U00002253',
"rlarr;": '\U000021C4',
"rlhar;": '\U000021CC',
"rlm;": '\U0000200F',
"rmoust;": '\U000023B1',
"rmoustache;": '\U000023B1',
"rnmid;": '\U00002AEE',
"roang;": '\U000027ED',
"roarr;": '\U000021FE',
"robrk;": '\U000027E7',
"ropar;": '\U00002986',
"ropf;": '\U0001D563',
"roplus;": '\U00002A2E',
"rotimes;": '\U00002A35',
"rpar;": '\U00000029',
"rpargt;": '\U00002994',
"rppolint;": '\U00002A12',
"rrarr;": '\U000021C9',
"rsaquo;": '\U0000203A',
"rscr;": '\U0001D4C7',
"rsh;": '\U000021B1',
"rsqb;": '\U0000005D',
"rsquo;": '\U00002019',
"rsquor;": '\U00002019',
"rthree;": '\U000022CC',
"rtimes;": '\U000022CA',
"rtri;": '\U000025B9',
"rtrie;": '\U000022B5',
"rtrif;": '\U000025B8',
"rtriltri;": '\U000029CE',
"ruluhar;": '\U00002968',
"rx;": '\U0000211E',
"sacute;": '\U0000015B',
"sbquo;": '\U0000201A',
"sc;": '\U0000227B',
"scE;": '\U00002AB4',
"scap;": '\U00002AB8',
"scaron;": '\U00000161',
"sccue;": '\U0000227D',
"sce;": '\U00002AB0',
"scedil;": '\U0000015F',
"scirc;": '\U0000015D',
"scnE;": '\U00002AB6',
"scnap;": '\U00002ABA',
"scnsim;": '\U000022E9',
"scpolint;": '\U00002A13',
"scsim;": '\U0000227F',
"scy;": '\U00000441',
"sdot;": '\U000022C5',
"sdotb;": '\U000022A1',
"sdote;": '\U00002A66',
"seArr;": '\U000021D8',
"searhk;": '\U00002925',
"searr;": '\U00002198',
"searrow;": '\U00002198',
"sect;": '\U000000A7',
"semi;": '\U0000003B',
"seswar;": '\U00002929',
"setminus;": '\U00002216',
"setmn;": '\U00002216',
"sext;": '\U00002736',
"sfr;": '\U0001D530',
"sfrown;": '\U00002322',
"sharp;": '\U0000266F',
"shchcy;": '\U00000449',
"shcy;": '\U00000448',
"shortmid;": '\U00002223',
"shortparallel;": '\U00002225',
"shy;": '\U000000AD',
"sigma;": '\U000003C3',
"sigmaf;": '\U000003C2',
"sigmav;": '\U000003C2',
"sim;": '\U0000223C',
"simdot;": '\U00002A6A',
"sime;": '\U00002243',
"simeq;": '\U00002243',
"simg;": '\U00002A9E',
"simgE;": '\U00002AA0',
"siml;": '\U00002A9D',
"simlE;": '\U00002A9F',
"simne;": '\U00002246',
"simplus;": '\U00002A24',
"simrarr;": '\U00002972',
"slarr;": '\U00002190',
"smallsetminus;": '\U00002216',
"smashp;": '\U00002A33',
"smeparsl;": '\U000029E4',
"smid;": '\U00002223',
"smile;": '\U00002323',
"smt;": '\U00002AAA',
"smte;": '\U00002AAC',
"softcy;": '\U0000044C',
"sol;": '\U0000002F',
"solb;": '\U000029C4',
"solbar;": '\U0000233F',
"sopf;": '\U0001D564',
"spades;": '\U00002660',
"spadesuit;": '\U00002660',
"spar;": '\U00002225',
"sqcap;": '\U00002293',
"sqcup;": '\U00002294',
"sqsub;": '\U0000228F',
"sqsube;": '\U00002291',
"sqsubset;": '\U0000228F',
"sqsubseteq;": '\U00002291',
"sqsup;": '\U00002290',
"sqsupe;": '\U00002292',
"sqsupset;": '\U00002290',
"sqsupseteq;": '\U00002292',
"squ;": '\U000025A1',
"square;": '\U000025A1',
"squarf;": '\U000025AA',
"squf;": '\U000025AA',
"srarr;": '\U00002192',
"sscr;": '\U0001D4C8',
"ssetmn;": '\U00002216',
"ssmile;": '\U00002323',
"sstarf;": '\U000022C6',
"star;": '\U00002606',
"starf;": '\U00002605',
"straightepsilon;": '\U000003F5',
"straightphi;": '\U000003D5',
"strns;": '\U000000AF',
"sub;": '\U00002282',
"subE;": '\U00002AC5',
"subdot;": '\U00002ABD',
"sube;": '\U00002286',
"subedot;": '\U00002AC3',
"submult;": '\U00002AC1',
"subnE;": '\U00002ACB',
"subne;": '\U0000228A',
"subplus;": '\U00002ABF',
"subrarr;": '\U00002979',
"subset;": '\U00002282',
"subseteq;": '\U00002286',
"subseteqq;": '\U00002AC5',
"subsetneq;": '\U0000228A',
"subsetneqq;": '\U00002ACB',
"subsim;": '\U00002AC7',
"subsub;": '\U00002AD5',
"subsup;": '\U00002AD3',
"succ;": '\U0000227B',
"succapprox;": '\U00002AB8',
"succcurlyeq;": '\U0000227D',
"succeq;": '\U00002AB0',
"succnapprox;": '\U00002ABA',
"succneqq;": '\U00002AB6',
"succnsim;": '\U000022E9',
"succsim;": '\U0000227F',
"sum;": '\U00002211',
"sung;": '\U0000266A',
"sup;": '\U00002283',
"sup1;": '\U000000B9',
"sup2;": '\U000000B2',
"sup3;": '\U000000B3',
"supE;": '\U00002AC6',
"supdot;": '\U00002ABE',
"supdsub;": '\U00002AD8',
"supe;": '\U00002287',
"supedot;": '\U00002AC4',
"suphsol;": '\U000027C9',
"suphsub;": '\U00002AD7',
"suplarr;": '\U0000297B',
"supmult;": '\U00002AC2',
"supnE;": '\U00002ACC',
"supne;": '\U0000228B',
"supplus;": '\U00002AC0',
"supset;": '\U00002283',
"supseteq;": '\U00002287',
"supseteqq;": '\U00002AC6',
"supsetneq;": '\U0000228B',
"supsetneqq;": '\U00002ACC',
"supsim;": '\U00002AC8',
"supsub;": '\U00002AD4',
"supsup;": '\U00002AD6',
"swArr;": '\U000021D9',
"swarhk;": '\U00002926',
"swarr;": '\U00002199',
"swarrow;": '\U00002199',
"swnwar;": '\U0000292A',
"szlig;": '\U000000DF',
"target;": '\U00002316',
"tau;": '\U000003C4',
"tbrk;": '\U000023B4',
"tcaron;": '\U00000165',
"tcedil;": '\U00000163',
"tcy;": '\U00000442',
"tdot;": '\U000020DB',
"telrec;": '\U00002315',
"tfr;": '\U0001D531',
"there4;": '\U00002234',
"therefore;": '\U00002234',
"theta;": '\U000003B8',
"thetasym;": '\U000003D1',
"thetav;": '\U000003D1',
"thickapprox;": '\U00002248',
"thicksim;": '\U0000223C',
"thinsp;": '\U00002009',
"thkap;": '\U00002248',
"thksim;": '\U0000223C',
"thorn;": '\U000000FE',
"tilde;": '\U000002DC',
"times;": '\U000000D7',
"timesb;": '\U000022A0',
"timesbar;": '\U00002A31',
"timesd;": '\U00002A30',
"tint;": '\U0000222D',
"toea;": '\U00002928',
"top;": '\U000022A4',
"topbot;": '\U00002336',
"topcir;": '\U00002AF1',
"topf;": '\U0001D565',
"topfork;": '\U00002ADA',
"tosa;": '\U00002929',
"tprime;": '\U00002034',
"trade;": '\U00002122',
"triangle;": '\U000025B5',
"triangledown;": '\U000025BF',
"triangleleft;": '\U000025C3',
"trianglelefteq;": '\U000022B4',
"triangleq;": '\U0000225C',
"triangleright;": '\U000025B9',
"trianglerighteq;": '\U000022B5',
"tridot;": '\U000025EC',
"trie;": '\U0000225C',
"triminus;": '\U00002A3A',
"triplus;": '\U00002A39',
"trisb;": '\U000029CD',
"tritime;": '\U00002A3B',
"trpezium;": '\U000023E2',
"tscr;": '\U0001D4C9',
"tscy;": '\U00000446',
"tshcy;": '\U0000045B',
"tstrok;": '\U00000167',
"twixt;": '\U0000226C',
"twoheadleftarrow;": '\U0000219E',
"twoheadrightarrow;": '\U000021A0',
"uArr;": '\U000021D1',
"uHar;": '\U00002963',
"uacute;": '\U000000FA',
"uarr;": '\U00002191',
"ubrcy;": '\U0000045E',
"ubreve;": '\U0000016D',
"ucirc;": '\U000000FB',
"ucy;": '\U00000443',
"udarr;": '\U000021C5',
"udblac;": '\U00000171',
"udhar;": '\U0000296E',
"ufisht;": '\U0000297E',
"ufr;": '\U0001D532',
"ugrave;": '\U000000F9',
"uharl;": '\U000021BF',
"uharr;": '\U000021BE',
"uhblk;": '\U00002580',
"ulcorn;": '\U0000231C',
"ulcorner;": '\U0000231C',
"ulcrop;": '\U0000230F',
"ultri;": '\U000025F8',
"umacr;": '\U0000016B',
"uml;": '\U000000A8',
"uogon;": '\U00000173',
"uopf;": '\U0001D566',
"uparrow;": '\U00002191',
"updownarrow;": '\U00002195',
"upharpoonleft;": '\U000021BF',
"upharpoonright;": '\U000021BE',
"uplus;": '\U0000228E',
"upsi;": '\U000003C5',
"upsih;": '\U000003D2',
"upsilon;": '\U000003C5',
"upuparrows;": '\U000021C8',
"urcorn;": '\U0000231D',
"urcorner;": '\U0000231D',
"urcrop;": '\U0000230E',
"uring;": '\U0000016F',
"urtri;": '\U000025F9',
"uscr;": '\U0001D4CA',
"utdot;": '\U000022F0',
"utilde;": '\U00000169',
"utri;": '\U000025B5',
"utrif;": '\U000025B4',
"uuarr;": '\U000021C8',
"uuml;": '\U000000FC',
"uwangle;": '\U000029A7',
"vArr;": '\U000021D5',
"vBar;": '\U00002AE8',
"vBarv;": '\U00002AE9',
"vDash;": '\U000022A8',
"vangrt;": '\U0000299C',
"varepsilon;": '\U000003F5',
"varkappa;": '\U000003F0',
"varnothing;": '\U00002205',
"varphi;": '\U000003D5',
"varpi;": '\U000003D6',
"varpropto;": '\U0000221D',
"varr;": '\U00002195',
"varrho;": '\U000003F1',
"varsigma;": '\U000003C2',
"vartheta;": '\U000003D1',
"vartriangleleft;": '\U000022B2',
"vartriangleright;": '\U000022B3',
"vcy;": '\U00000432',
"vdash;": '\U000022A2',
"vee;": '\U00002228',
"veebar;": '\U000022BB',
"veeeq;": '\U0000225A',
"vellip;": '\U000022EE',
"verbar;": '\U0000007C',
"vert;": '\U0000007C',
"vfr;": '\U0001D533',
"vltri;": '\U000022B2',
"vopf;": '\U0001D567',
"vprop;": '\U0000221D',
"vrtri;": '\U000022B3',
"vscr;": '\U0001D4CB',
"vzigzag;": '\U0000299A',
"wcirc;": '\U00000175',
"wedbar;": '\U00002A5F',
"wedge;": '\U00002227',
"wedgeq;": '\U00002259',
"weierp;": '\U00002118',
"wfr;": '\U0001D534',
"wopf;": '\U0001D568',
"wp;": '\U00002118',
"wr;": '\U00002240',
"wreath;": '\U00002240',
"wscr;": '\U0001D4CC',
"xcap;": '\U000022C2',
"xcirc;": '\U000025EF',
"xcup;": '\U000022C3',
"xdtri;": '\U000025BD',
"xfr;": '\U0001D535',
"xhArr;": '\U000027FA',
"xharr;": '\U000027F7',
"xi;": '\U000003BE',
"xlArr;": '\U000027F8',
"xlarr;": '\U000027F5',
"xmap;": '\U000027FC',
"xnis;": '\U000022FB',
"xodot;": '\U00002A00',
"xopf;": '\U0001D569',
"xoplus;": '\U00002A01',
"xotime;": '\U00002A02',
"xrArr;": '\U000027F9',
"xrarr;": '\U000027F6',
"xscr;": '\U0001D4CD',
"xsqcup;": '\U00002A06',
"xuplus;": '\U00002A04',
"xutri;": '\U000025B3',
"xvee;": '\U000022C1',
"xwedge;": '\U000022C0',
"yacute;": '\U000000FD',
"yacy;": '\U0000044F',
"ycirc;": '\U00000177',
"ycy;": '\U0000044B',
"yen;": '\U000000A5',
"yfr;": '\U0001D536',
"yicy;": '\U00000457',
"yopf;": '\U0001D56A',
"yscr;": '\U0001D4CE',
"yucy;": '\U0000044E',
"yuml;": '\U000000FF',
"zacute;": '\U0000017A',
"zcaron;": '\U0000017E',
"zcy;": '\U00000437',
"zdot;": '\U0000017C',
"zeetrf;": '\U00002128',
"zeta;": '\U000003B6',
"zfr;": '\U0001D537',
"zhcy;": '\U00000436',
"zigrarr;": '\U000021DD',
"zopf;": '\U0001D56B',
"zscr;": '\U0001D4CF',
"zwj;": '\U0000200D',
"zwnj;": '\U0000200C',
"AElig": '\U000000C6',
"AMP": '\U00000026',
"Aacute": '\U000000C1',
"Acirc": '\U000000C2',
"Agrave": '\U000000C0',
"Aring": '\U000000C5',
"Atilde": '\U000000C3',
"Auml": '\U000000C4',
"COPY": '\U000000A9',
"Ccedil": '\U000000C7',
"ETH": '\U000000D0',
"Eacute": '\U000000C9',
"Ecirc": '\U000000CA',
"Egrave": '\U000000C8',
"Euml": '\U000000CB',
"GT": '\U0000003E',
"Iacute": '\U000000CD',
"Icirc": '\U000000CE',
"Igrave": '\U000000CC',
"Iuml": '\U000000CF',
"LT": '\U0000003C',
"Ntilde": '\U000000D1',
"Oacute": '\U000000D3',
"Ocirc": '\U000000D4',
"Ograve": '\U000000D2',
"Oslash": '\U000000D8',
"Otilde": '\U000000D5',
"Ouml": '\U000000D6',
"QUOT": '\U00000022',
"REG": '\U000000AE',
"THORN": '\U000000DE',
"Uacute": '\U000000DA',
"Ucirc": '\U000000DB',
"Ugrave": '\U000000D9',
"Uuml": '\U000000DC',
"Yacute": '\U000000DD',
"aacute": '\U000000E1',
"acirc": '\U000000E2',
"acute": '\U000000B4',
"aelig": '\U000000E6',
"agrave": '\U000000E0',
"amp": '\U00000026',
"aring": '\U000000E5',
"atilde": '\U000000E3',
"auml": '\U000000E4',
"brvbar": '\U000000A6',
"ccedil": '\U000000E7',
"cedil": '\U000000B8',
"cent": '\U000000A2',
"copy": '\U000000A9',
"curren": '\U000000A4',
"deg": '\U000000B0',
"divide": '\U000000F7',
"eacute": '\U000000E9',
"ecirc": '\U000000EA',
"egrave": '\U000000E8',
"eth": '\U000000F0',
"euml": '\U000000EB',
"frac12": '\U000000BD',
"frac14": '\U000000BC',
"frac34": '\U000000BE',
"gt": '\U0000003E',
"iacute": '\U000000ED',
"icirc": '\U000000EE',
"iexcl": '\U000000A1',
"igrave": '\U000000EC',
"iquest": '\U000000BF',
"iuml": '\U000000EF',
"laquo": '\U000000AB',
"lt": '\U0000003C',
"macr": '\U000000AF',
"micro": '\U000000B5',
"middot": '\U000000B7',
"nbsp": '\U000000A0',
"not": '\U000000AC',
"ntilde": '\U000000F1',
"oacute": '\U000000F3',
"ocirc": '\U000000F4',
"ograve": '\U000000F2',
"ordf": '\U000000AA',
"ordm": '\U000000BA',
"oslash": '\U000000F8',
"otilde": '\U000000F5',
"ouml": '\U000000F6',
"para": '\U000000B6',
"plusmn": '\U000000B1',
"pound": '\U000000A3',
"quot": '\U00000022',
"raquo": '\U000000BB',
"reg": '\U000000AE',
"sect": '\U000000A7',
"shy": '\U000000AD',
"sup1": '\U000000B9',
"sup2": '\U000000B2',
"sup3": '\U000000B3',
"szlig": '\U000000DF',
"thorn": '\U000000FE',
"times": '\U000000D7',
"uacute": '\U000000FA',
"ucirc": '\U000000FB',
"ugrave": '\U000000F9',
"uml": '\U000000A8',
"uuml": '\U000000FC',
"yacute": '\U000000FD',
"yen": '\U000000A5',
"yuml": '\U000000FF',
}
entity2 = map[string][2]rune{
// TODO(nigeltao): Handle replacements that are wider than their names.
// "nLt;": {'\u226A', '\u20D2'},
// "nGt;": {'\u226B', '\u20D2'},
"NotEqualTilde;": {'\u2242', '\u0338'},
"NotGreaterFullEqual;": {'\u2267', '\u0338'},
"NotGreaterGreater;": {'\u226B', '\u0338'},
"NotGreaterSlantEqual;": {'\u2A7E', '\u0338'},
"NotHumpDownHump;": {'\u224E', '\u0338'},
"NotHumpEqual;": {'\u224F', '\u0338'},
"NotLeftTriangleBar;": {'\u29CF', '\u0338'},
"NotLessLess;": {'\u226A', '\u0338'},
"NotLessSlantEqual;": {'\u2A7D', '\u0338'},
"NotNestedGreaterGreater;": {'\u2AA2', '\u0338'},
"NotNestedLessLess;": {'\u2AA1', '\u0338'},
"NotPrecedesEqual;": {'\u2AAF', '\u0338'},
"NotRightTriangleBar;": {'\u29D0', '\u0338'},
"NotSquareSubset;": {'\u228F', '\u0338'},
"NotSquareSuperset;": {'\u2290', '\u0338'},
"NotSubset;": {'\u2282', '\u20D2'},
"NotSucceedsEqual;": {'\u2AB0', '\u0338'},
"NotSucceedsTilde;": {'\u227F', '\u0338'},
"NotSuperset;": {'\u2283', '\u20D2'},
"ThickSpace;": {'\u205F', '\u200A'},
"acE;": {'\u223E', '\u0333'},
"bne;": {'\u003D', '\u20E5'},
"bnequiv;": {'\u2261', '\u20E5'},
"caps;": {'\u2229', '\uFE00'},
"cups;": {'\u222A', '\uFE00'},
"fjlig;": {'\u0066', '\u006A'},
"gesl;": {'\u22DB', '\uFE00'},
"gvertneqq;": {'\u2269', '\uFE00'},
"gvnE;": {'\u2269', '\uFE00'},
"lates;": {'\u2AAD', '\uFE00'},
"lesg;": {'\u22DA', '\uFE00'},
"lvertneqq;": {'\u2268', '\uFE00'},
"lvnE;": {'\u2268', '\uFE00'},
"nGg;": {'\u22D9', '\u0338'},
"nGtv;": {'\u226B', '\u0338'},
"nLl;": {'\u22D8', '\u0338'},
"nLtv;": {'\u226A', '\u0338'},
"nang;": {'\u2220', '\u20D2'},
"napE;": {'\u2A70', '\u0338'},
"napid;": {'\u224B', '\u0338'},
"nbump;": {'\u224E', '\u0338'},
"nbumpe;": {'\u224F', '\u0338'},
"ncongdot;": {'\u2A6D', '\u0338'},
"nedot;": {'\u2250', '\u0338'},
"nesim;": {'\u2242', '\u0338'},
"ngE;": {'\u2267', '\u0338'},
"ngeqq;": {'\u2267', '\u0338'},
"ngeqslant;": {'\u2A7E', '\u0338'},
"nges;": {'\u2A7E', '\u0338'},
"nlE;": {'\u2266', '\u0338'},
"nleqq;": {'\u2266', '\u0338'},
"nleqslant;": {'\u2A7D', '\u0338'},
"nles;": {'\u2A7D', '\u0338'},
"notinE;": {'\u22F9', '\u0338'},
"notindot;": {'\u22F5', '\u0338'},
"nparsl;": {'\u2AFD', '\u20E5'},
"npart;": {'\u2202', '\u0338'},
"npre;": {'\u2AAF', '\u0338'},
"npreceq;": {'\u2AAF', '\u0338'},
"nrarrc;": {'\u2933', '\u0338'},
"nrarrw;": {'\u219D', '\u0338'},
"nsce;": {'\u2AB0', '\u0338'},
"nsubE;": {'\u2AC5', '\u0338'},
"nsubset;": {'\u2282', '\u20D2'},
"nsubseteqq;": {'\u2AC5', '\u0338'},
"nsucceq;": {'\u2AB0', '\u0338'},
"nsupE;": {'\u2AC6', '\u0338'},
"nsupset;": {'\u2283', '\u20D2'},
"nsupseteqq;": {'\u2AC6', '\u0338'},
"nvap;": {'\u224D', '\u20D2'},
"nvge;": {'\u2265', '\u20D2'},
"nvgt;": {'\u003E', '\u20D2'},
"nvle;": {'\u2264', '\u20D2'},
"nvlt;": {'\u003C', '\u20D2'},
"nvltrie;": {'\u22B4', '\u20D2'},
"nvrtrie;": {'\u22B5', '\u20D2'},
"nvsim;": {'\u223C', '\u20D2'},
"race;": {'\u223D', '\u0331'},
"smtes;": {'\u2AAC', '\uFE00'},
"sqcaps;": {'\u2293', '\uFE00'},
"sqcups;": {'\u2294', '\uFE00'},
"varsubsetneq;": {'\u228A', '\uFE00'},
"varsubsetneqq;": {'\u2ACB', '\uFE00'},
"varsupsetneq;": {'\u228B', '\uFE00'},
"varsupsetneqq;": {'\u2ACC', '\uFE00'},
"vnsub;": {'\u2282', '\u20D2'},
"vnsup;": {'\u2283', '\u20D2'},
"vsubnE;": {'\u2ACB', '\uFE00'},
"vsubne;": {'\u228A', '\uFE00'},
"vsupnE;": {'\u2ACC', '\uFE00'},
"vsupne;": {'\u228B', '\uFE00'},
}
return entity, entity2
})
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package html provides functions for escaping and unescaping HTML text.
package html
import (
"strings"
"unicode/utf8"
)
// These replacements permit compatibility with old numeric entities that
// assumed Windows-1252 encoding.
// https://html.spec.whatwg.org/multipage/parsing.html#numeric-character-reference-end-state
var replacementTable = [...]rune{
'\u20AC', // First entry is what 0x80 should be replaced with.
'\u0081',
'\u201A',
'\u0192',
'\u201E',
'\u2026',
'\u2020',
'\u2021',
'\u02C6',
'\u2030',
'\u0160',
'\u2039',
'\u0152',
'\u008D',
'\u017D',
'\u008F',
'\u0090',
'\u2018',
'\u2019',
'\u201C',
'\u201D',
'\u2022',
'\u2013',
'\u2014',
'\u02DC',
'\u2122',
'\u0161',
'\u203A',
'\u0153',
'\u009D',
'\u017E',
'\u0178', // Last entry is 0x9F.
// 0x00->'\uFFFD' is handled programmatically.
// 0x0D->'\u000D' is a no-op.
}
// unescapeEntity reads an entity like "<" from b[src:] and writes the
// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
// Precondition: b[src] == '&' && dst <= src.
func unescapeEntity(b []byte, dst, src int, entity map[string]rune, entity2 map[string][2]rune) (dst1, src1 int) {
const attribute = false
// http://www.whatwg.org/specs/web-apps/current-work/multipage/tokenization.html#consume-a-character-reference
// i starts at 1 because we already know that s[0] == '&'.
i, s := 1, b[src:]
if len(s) <= 1 {
b[dst] = b[src]
return dst + 1, src + 1
}
if s[i] == '#' {
if len(s) <= 3 { // We need to have at least "&#.".
b[dst] = b[src]
return dst + 1, src + 1
}
i++
c := s[i]
hex := false
if c == 'x' || c == 'X' {
hex = true
i++
}
x := '\x00'
for i < len(s) {
c = s[i]
i++
if hex {
if '0' <= c && c <= '9' {
x = 16*x + rune(c) - '0'
continue
} else if 'a' <= c && c <= 'f' {
x = 16*x + rune(c) - 'a' + 10
continue
} else if 'A' <= c && c <= 'F' {
x = 16*x + rune(c) - 'A' + 10
continue
}
} else if '0' <= c && c <= '9' {
x = 10*x + rune(c) - '0'
continue
}
if c != ';' {
i--
}
break
}
if i <= 3 { // No characters matched.
b[dst] = b[src]
return dst + 1, src + 1
}
if 0x80 <= x && x <= 0x9F {
// Replace characters from Windows-1252 with UTF-8 equivalents.
x = replacementTable[x-0x80]
} else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
// Replace invalid characters with the replacement character.
x = '\uFFFD'
}
return dst + utf8.EncodeRune(b[dst:], x), src + i
}
// Consume the maximum number of characters possible, with the
// consumed characters matching one of the named references.
for i < len(s) {
c := s[i]
i++
// Lower-cased characters are more common in entities, so we check for them first.
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
continue
}
if c != ';' {
i--
}
break
}
entityName := s[1:i]
if len(entityName) == 0 {
// No-op.
} else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
// No-op.
} else if x := entity[string(entityName)]; x != 0 {
return dst + utf8.EncodeRune(b[dst:], x), src + i
} else if x := entity2[string(entityName)]; x[0] != 0 {
dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
} else if !attribute {
maxLen := len(entityName) - 1
if maxLen > longestEntityWithoutSemicolon {
maxLen = longestEntityWithoutSemicolon
}
for j := maxLen; j > 1; j-- {
if x := entity[string(entityName[:j])]; x != 0 {
return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
}
}
}
dst1, src1 = dst+i, src+i
copy(b[dst:dst1], b[src:src1])
return dst1, src1
}
var htmlEscaper = strings.NewReplacer(
`&`, "&",
`'`, "'", // "'" is shorter than "'" and apos was not in HTML until HTML5.
`<`, "<",
`>`, ">",
`"`, """, // """ is shorter than """.
)
// EscapeString escapes special characters like "<" to become "<". It
// escapes only five such characters: <, >, &, ' and ".
// [UnescapeString](EscapeString(s)) == s always holds, but the converse isn't
// always true.
func EscapeString(s string) string {
return htmlEscaper.Replace(s)
}
// UnescapeString unescapes entities like "<" to become "<". It unescapes a
// larger range of entities than [EscapeString] escapes. For example, "á"
// unescapes to "á", as does "á" and "á".
// UnescapeString([EscapeString](s)) == s always holds, but the converse isn't
// always true.
func UnescapeString(s string) string {
i := strings.IndexByte(s, '&')
if i < 0 {
return s
}
b := []byte(s)
entity, entity2 := entityMaps()
dst, src := unescapeEntity(b, i, i, entity, entity2)
for len(s[src:]) > 0 {
if s[src] == '&' {
i = 0
} else {
i = strings.IndexByte(s[src:], '&')
}
if i < 0 {
dst += copy(b[dst:], s[src:])
break
}
if i > 0 {
copy(b[dst:], s[src:src+i])
}
dst, src = unescapeEntity(b, dst+i, src+i, entity, entity2)
}
return string(b[:dst])
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"strings"
)
// attrTypeMap[n] describes the value of the given attribute.
// If an attribute affects (or can mask) the encoding or interpretation of
// other content, or affects the contents, idempotency, or credentials of a
// network message, then the value in this map is contentTypeUnsafe.
// This map is derived from HTML5, specifically
// https://www.w3.org/TR/html5/Overview.html#attributes-1
// as well as "%URI"-typed attributes from
// https://www.w3.org/TR/html4/index/attributes.html
var attrTypeMap = map[string]contentType{
"accept": contentTypePlain,
"accept-charset": contentTypeUnsafe,
"action": contentTypeURL,
"alt": contentTypePlain,
"archive": contentTypeURL,
"async": contentTypeUnsafe,
"autocomplete": contentTypePlain,
"autofocus": contentTypePlain,
"autoplay": contentTypePlain,
"background": contentTypeURL,
"border": contentTypePlain,
"checked": contentTypePlain,
"cite": contentTypeURL,
"challenge": contentTypeUnsafe,
"charset": contentTypeUnsafe,
"class": contentTypePlain,
"classid": contentTypeURL,
"codebase": contentTypeURL,
"cols": contentTypePlain,
"colspan": contentTypePlain,
"content": contentTypeUnsafe,
"contenteditable": contentTypePlain,
"contextmenu": contentTypePlain,
"controls": contentTypePlain,
"coords": contentTypePlain,
"crossorigin": contentTypeUnsafe,
"data": contentTypeURL,
"datetime": contentTypePlain,
"default": contentTypePlain,
"defer": contentTypeUnsafe,
"dir": contentTypePlain,
"dirname": contentTypePlain,
"disabled": contentTypePlain,
"draggable": contentTypePlain,
"dropzone": contentTypePlain,
"enctype": contentTypeUnsafe,
"for": contentTypePlain,
"form": contentTypeUnsafe,
"formaction": contentTypeURL,
"formenctype": contentTypeUnsafe,
"formmethod": contentTypeUnsafe,
"formnovalidate": contentTypeUnsafe,
"formtarget": contentTypePlain,
"headers": contentTypePlain,
"height": contentTypePlain,
"hidden": contentTypePlain,
"high": contentTypePlain,
"href": contentTypeURL,
"hreflang": contentTypePlain,
"http-equiv": contentTypeUnsafe,
"icon": contentTypeURL,
"id": contentTypePlain,
"ismap": contentTypePlain,
"keytype": contentTypeUnsafe,
"kind": contentTypePlain,
"label": contentTypePlain,
"lang": contentTypePlain,
"language": contentTypeUnsafe,
"list": contentTypePlain,
"longdesc": contentTypeURL,
"loop": contentTypePlain,
"low": contentTypePlain,
"manifest": contentTypeURL,
"max": contentTypePlain,
"maxlength": contentTypePlain,
"media": contentTypePlain,
"mediagroup": contentTypePlain,
"method": contentTypeUnsafe,
"min": contentTypePlain,
"multiple": contentTypePlain,
"name": contentTypePlain,
"novalidate": contentTypeUnsafe,
// Skip handler names from
// https://www.w3.org/TR/html5/webappapis.html#event-handlers-on-elements,-document-objects,-and-window-objects
// since we have special handling in attrType.
"open": contentTypePlain,
"optimum": contentTypePlain,
"pattern": contentTypeUnsafe,
"placeholder": contentTypePlain,
"poster": contentTypeURL,
"profile": contentTypeURL,
"preload": contentTypePlain,
"pubdate": contentTypePlain,
"radiogroup": contentTypePlain,
"readonly": contentTypePlain,
"rel": contentTypeUnsafe,
"required": contentTypePlain,
"reversed": contentTypePlain,
"rows": contentTypePlain,
"rowspan": contentTypePlain,
"sandbox": contentTypeUnsafe,
"spellcheck": contentTypePlain,
"scope": contentTypePlain,
"scoped": contentTypePlain,
"seamless": contentTypePlain,
"selected": contentTypePlain,
"shape": contentTypePlain,
"size": contentTypePlain,
"sizes": contentTypePlain,
"span": contentTypePlain,
"src": contentTypeURL,
"srcdoc": contentTypeHTML,
"srclang": contentTypePlain,
"srcset": contentTypeSrcset,
"start": contentTypePlain,
"step": contentTypePlain,
"style": contentTypeCSS,
"tabindex": contentTypePlain,
"target": contentTypePlain,
"title": contentTypePlain,
"type": contentTypeUnsafe,
"usemap": contentTypeURL,
"value": contentTypeUnsafe,
"width": contentTypePlain,
"wrap": contentTypePlain,
"xmlns": contentTypeURL,
}
// attrType returns a conservative (upper-bound on authority) guess at the
// type of the lowercase named attribute.
func attrType(name string) contentType {
if strings.HasPrefix(name, "data-") {
// Strip data- so that custom attribute heuristics below are
// widely applied.
// Treat data-action as URL below.
name = name[5:]
} else if prefix, short, ok := strings.Cut(name, ":"); ok {
if prefix == "xmlns" {
return contentTypeURL
}
// Treat svg:href and xlink:href as href below.
name = short
}
if t, ok := attrTypeMap[name]; ok {
return t
}
// Treat partial event handler names as script.
if strings.HasPrefix(name, "on") {
return contentTypeJS
}
// Heuristics to prevent "javascript:..." injection in custom
// data attributes and custom attributes like g:tweetUrl.
// https://www.w3.org/TR/html5/dom.html#embedding-custom-non-visible-data-with-the-data-*-attributes
// "Custom data attributes are intended to store custom data
// private to the page or application, for which there are no
// more appropriate attributes or elements."
// Developers seem to store URL content in data URLs that start
// or end with "URI" or "URL".
if strings.Contains(name, "src") ||
strings.Contains(name, "uri") ||
strings.Contains(name, "url") {
return contentTypeURL
}
return contentTypePlain
}
// Code generated by "stringer -type attr"; DO NOT EDIT.
package template
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[attrNone-0]
_ = x[attrScript-1]
_ = x[attrScriptType-2]
_ = x[attrStyle-3]
_ = x[attrURL-4]
_ = x[attrSrcset-5]
_ = x[attrMetaContent-6]
}
const _attr_name = "attrNoneattrScriptattrScriptTypeattrStyleattrURLattrSrcsetattrMetaContent"
var _attr_index = [...]uint8{0, 8, 18, 32, 41, 48, 58, 73}
func (i attr) String() string {
if i >= attr(len(_attr_index)-1) {
return "attr(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _attr_name[_attr_index[i]:_attr_index[i+1]]
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"fmt"
"reflect"
)
// Strings of content from a trusted source.
type (
// CSS encapsulates known safe content that matches any of:
// 1. The CSS3 stylesheet production, such as `p { color: purple }`.
// 2. The CSS3 rule production, such as `a[href=~"https:"].foo#bar`.
// 3. CSS3 declaration productions, such as `color: red; margin: 2px`.
// 4. The CSS3 value production, such as `rgba(0, 0, 255, 127)`.
// See https://www.w3.org/TR/css3-syntax/#parsing and
// https://web.archive.org/web/20090211114933/http://w3.org/TR/css3-syntax#style
//
// Use of this type presents a security risk:
// the encapsulated content should come from a trusted source,
// as it will be included verbatim in the template output.
CSS string
// HTML encapsulates a known safe HTML document fragment.
// It should not be used for HTML from a third-party, or HTML with
// unclosed tags or comments. The outputs of a sound HTML sanitizer
// and a template escaped by this package are fine for use with HTML.
//
// Use of this type presents a security risk:
// the encapsulated content should come from a trusted source,
// as it will be included verbatim in the template output.
HTML string
// HTMLAttr encapsulates an HTML attribute from a trusted source,
// for example, ` dir="ltr"`.
//
// Use of this type presents a security risk:
// the encapsulated content should come from a trusted source,
// as it will be included verbatim in the template output.
HTMLAttr string
// JS encapsulates a known safe EcmaScript5 Expression, for example,
// `(x + y * z())`.
// Template authors are responsible for ensuring that typed expressions
// do not break the intended precedence and that there is no
// statement/expression ambiguity as when passing an expression like
// "{ foo: bar() }\n['foo']()", which is both a valid Expression and a
// valid Program with a very different meaning.
//
// Use of this type presents a security risk:
// the encapsulated content should come from a trusted source,
// as it will be included verbatim in the template output.
//
// Using JS to include valid but untrusted JSON is not safe.
// A safe alternative is to parse the JSON with json.Unmarshal and then
// pass the resultant object into the template, where it will be
// converted to sanitized JSON when presented in a JavaScript context.
JS string
// JSStr encapsulates a sequence of characters meant to be embedded
// between quotes in a JavaScript expression.
// The string must match a series of StringCharacters:
// StringCharacter :: SourceCharacter but not `\` or LineTerminator
// | EscapeSequence
// Note that LineContinuations are not allowed.
// JSStr("foo\\nbar") is fine, but JSStr("foo\\\nbar") is not.
//
// Use of this type presents a security risk:
// the encapsulated content should come from a trusted source,
// as it will be included verbatim in the template output.
JSStr string
// URL encapsulates a known safe URL or URL substring (see RFC 3986).
// A URL like `javascript:checkThatFormNotEditedBeforeLeavingPage()`
// from a trusted source should go in the page, but by default dynamic
// `javascript:` URLs are filtered out since they are a frequently
// exploited injection vector.
//
// Use of this type presents a security risk:
// the encapsulated content should come from a trusted source,
// as it will be included verbatim in the template output.
URL string
// Srcset encapsulates a known safe srcset attribute
// (see https://w3c.github.io/html/semantics-embedded-content.html#element-attrdef-img-srcset).
//
// Use of this type presents a security risk:
// the encapsulated content should come from a trusted source,
// as it will be included verbatim in the template output.
Srcset string
)
type contentType uint8
const (
contentTypePlain contentType = iota
contentTypeCSS
contentTypeHTML
contentTypeHTMLAttr
contentTypeJS
contentTypeJSStr
contentTypeURL
contentTypeSrcset
// contentTypeUnsafe is used in attr.go for values that affect how
// embedded content and network messages are formed, vetted,
// or interpreted; or which credentials network messages carry.
contentTypeUnsafe
)
// indirect returns the value, after dereferencing as many times
// as necessary to reach the base type (or nil).
func indirect(a any) any {
if a == nil {
return nil
}
if t := reflect.TypeOf(a); t.Kind() != reflect.Pointer {
// Avoid creating a reflect.Value if it's not a pointer.
return a
}
v := reflect.ValueOf(a)
for v.Kind() == reflect.Pointer && !v.IsNil() {
v = v.Elem()
}
return v.Interface()
}
var (
errorType = reflect.TypeFor[error]()
fmtStringerType = reflect.TypeFor[fmt.Stringer]()
)
// indirectToStringerOrError returns the value, after dereferencing as many times
// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer
// or error.
func indirectToStringerOrError(a any) any {
if a == nil {
return nil
}
v := reflect.ValueOf(a)
for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Pointer && !v.IsNil() {
v = v.Elem()
}
return v.Interface()
}
// stringify converts its arguments to a string and the type of the content.
// All pointers are dereferenced, as in the text/template package.
func stringify(args ...any) (string, contentType) {
if len(args) == 1 {
switch s := indirect(args[0]).(type) {
case string:
return s, contentTypePlain
case CSS:
return string(s), contentTypeCSS
case HTML:
return string(s), contentTypeHTML
case HTMLAttr:
return string(s), contentTypeHTMLAttr
case JS:
return string(s), contentTypeJS
case JSStr:
return string(s), contentTypeJSStr
case URL:
return string(s), contentTypeURL
case Srcset:
return string(s), contentTypeSrcset
}
}
i := 0
for _, arg := range args {
// We skip untyped nil arguments for backward compatibility.
// Without this they would be output as <nil>, escaped.
// See issue 25875.
if arg == nil {
continue
}
args[i] = indirectToStringerOrError(arg)
i++
}
return fmt.Sprint(args[:i]...), contentTypePlain
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"fmt"
"text/template/parse"
)
// context describes the state an HTML parser must be in when it reaches the
// portion of HTML produced by evaluating a particular template node.
//
// The zero value of type context is the start context for a template that
// produces an HTML fragment as defined at
// https://www.w3.org/TR/html5/syntax.html#the-end
// where the context element is null.
type context struct {
state state
delim delim
urlPart urlPart
jsCtx jsCtx
// jsBraceDepth contains the current depth, for each JS template literal
// string interpolation expression, of braces we've seen. This is used to
// determine if the next } will close a JS template literal string
// interpolation expression or not.
jsBraceDepth []int
attr attr
element element
n parse.Node // for range break/continue
err *Error
}
func (c context) String() string {
var err error
if c.err != nil {
err = c.err
}
return fmt.Sprintf("{%v %v %v %v %v %v %v}", c.state, c.delim, c.urlPart, c.jsCtx, c.attr, c.element, err)
}
// eq reports whether two contexts are equal.
func (c context) eq(d context) bool {
return c.state == d.state &&
c.delim == d.delim &&
c.urlPart == d.urlPart &&
c.jsCtx == d.jsCtx &&
c.attr == d.attr &&
c.element == d.element &&
c.err == d.err
}
// mangle produces an identifier that includes a suffix that distinguishes it
// from template names mangled with different contexts.
func (c context) mangle(templateName string) string {
// The mangled name for the default context is the input templateName.
if c.state == stateText {
return templateName
}
s := templateName + "$htmltemplate_" + c.state.String()
if c.delim != delimNone {
s += "_" + c.delim.String()
}
if c.urlPart != urlPartNone {
s += "_" + c.urlPart.String()
}
if c.jsCtx != jsCtxRegexp {
s += "_" + c.jsCtx.String()
}
if c.attr != attrNone {
s += "_" + c.attr.String()
}
if c.element != elementNone {
s += "_" + c.element.String()
}
return s
}
// state describes a high-level HTML parser state.
//
// It bounds the top of the element stack, and by extension the HTML insertion
// mode, but also contains state that does not correspond to anything in the
// HTML5 parsing algorithm because a single token production in the HTML
// grammar may contain embedded actions in a template. For instance, the quoted
// HTML attribute produced by
//
// <div title="Hello {{.World}}">
//
// is a single token in HTML's grammar but in a template spans several nodes.
type state uint8
//go:generate stringer -type state
const (
// stateText is parsed character data. An HTML parser is in
// this state when its parse position is outside an HTML tag,
// directive, comment, and special element body.
stateText state = iota
// stateTag occurs before an HTML attribute or the end of a tag.
stateTag
// stateAttrName occurs inside an attribute name.
// It occurs between the ^'s in ` ^name^ = value`.
stateAttrName
// stateAfterName occurs after an attr name has ended but before any
// equals sign. It occurs between the ^'s in ` name^ ^= value`.
stateAfterName
// stateBeforeValue occurs after the equals sign but before the value.
// It occurs between the ^'s in ` name =^ ^value`.
stateBeforeValue
// stateHTMLCmt occurs inside an <!-- HTML comment -->.
stateHTMLCmt
// stateRCDATA occurs inside an RCDATA element (<textarea> or <title>)
// as described at https://www.w3.org/TR/html5/syntax.html#elements-0
stateRCDATA
// stateAttr occurs inside an HTML attribute whose content is text.
stateAttr
// stateURL occurs inside an HTML attribute whose content is a URL.
stateURL
// stateSrcset occurs inside an HTML srcset attribute.
stateSrcset
// stateJS occurs inside an event handler or script element.
stateJS
// stateJSDqStr occurs inside a JavaScript double quoted string.
stateJSDqStr
// stateJSSqStr occurs inside a JavaScript single quoted string.
stateJSSqStr
// stateJSTmplLit occurs inside a JavaScript back quoted string.
stateJSTmplLit
// stateJSRegexp occurs inside a JavaScript regexp literal.
stateJSRegexp
// stateJSBlockCmt occurs inside a JavaScript /* block comment */.
stateJSBlockCmt
// stateJSLineCmt occurs inside a JavaScript // line comment.
stateJSLineCmt
// stateJSHTMLOpenCmt occurs inside a JavaScript <!-- HTML-like comment.
stateJSHTMLOpenCmt
// stateJSHTMLCloseCmt occurs inside a JavaScript --> HTML-like comment.
stateJSHTMLCloseCmt
// stateCSS occurs inside a <style> element or style attribute.
stateCSS
// stateCSSDqStr occurs inside a CSS double quoted string.
stateCSSDqStr
// stateCSSSqStr occurs inside a CSS single quoted string.
stateCSSSqStr
// stateCSSDqURL occurs inside a CSS double quoted url("...").
stateCSSDqURL
// stateCSSSqURL occurs inside a CSS single quoted url('...').
stateCSSSqURL
// stateCSSURL occurs inside a CSS unquoted url(...).
stateCSSURL
// stateCSSBlockCmt occurs inside a CSS /* block comment */.
stateCSSBlockCmt
// stateCSSLineCmt occurs inside a CSS // line comment.
stateCSSLineCmt
// stateError is an infectious error state outside any valid
// HTML/CSS/JS construct.
stateError
// stateMetaContent occurs inside a HTML meta element content attribute.
stateMetaContent
// stateMetaContentURL occurs inside a "url=" tag in a HTML meta element content attribute.
stateMetaContentURL
// stateDead marks unreachable code after a {{break}} or {{continue}}.
stateDead
)
// isComment is true for any state that contains content meant for template
// authors & maintainers, not for end-users or machines.
func isComment(s state) bool {
switch s {
case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateJSHTMLOpenCmt, stateJSHTMLCloseCmt, stateCSSBlockCmt, stateCSSLineCmt:
return true
}
return false
}
// isInTag return whether s occurs solely inside an HTML tag.
func isInTag(s state) bool {
switch s {
case stateTag, stateAttrName, stateAfterName, stateBeforeValue, stateAttr:
return true
}
return false
}
// isInScriptLiteral returns true if s is one of the literal states within a
// <script> tag, and as such occurrences of "<!--", "<script", and "</script"
// need to be treated specially.
func isInScriptLiteral(s state) bool {
// Ignore the comment states (stateJSBlockCmt, stateJSLineCmt,
// stateJSHTMLOpenCmt, stateJSHTMLCloseCmt) because their content is already
// omitted from the output.
switch s {
case stateJSDqStr, stateJSSqStr, stateJSTmplLit, stateJSRegexp:
return true
}
return false
}
// delim is the delimiter that will end the current HTML attribute.
type delim uint8
//go:generate stringer -type delim
const (
// delimNone occurs outside any attribute.
delimNone delim = iota
// delimDoubleQuote occurs when a double quote (") closes the attribute.
delimDoubleQuote
// delimSingleQuote occurs when a single quote (') closes the attribute.
delimSingleQuote
// delimSpaceOrTagEnd occurs when a space or right angle bracket (>)
// closes the attribute.
delimSpaceOrTagEnd
)
// urlPart identifies a part in an RFC 3986 hierarchical URL to allow different
// encoding strategies.
type urlPart uint8
//go:generate stringer -type urlPart
const (
// urlPartNone occurs when not in a URL, or possibly at the start:
// ^ in "^http://auth/path?k=v#frag".
urlPartNone urlPart = iota
// urlPartPreQuery occurs in the scheme, authority, or path; between the
// ^s in "h^ttp://auth/path^?k=v#frag".
urlPartPreQuery
// urlPartQueryOrFrag occurs in the query portion between the ^s in
// "http://auth/path?^k=v#frag^".
urlPartQueryOrFrag
// urlPartUnknown occurs due to joining of contexts both before and
// after the query separator.
urlPartUnknown
)
// jsCtx determines whether a '/' starts a regular expression literal or a
// division operator.
type jsCtx uint8
//go:generate stringer -type jsCtx
const (
// jsCtxRegexp occurs where a '/' would start a regexp literal.
jsCtxRegexp jsCtx = iota
// jsCtxDivOp occurs where a '/' would start a division operator.
jsCtxDivOp
// jsCtxUnknown occurs where a '/' is ambiguous due to context joining.
jsCtxUnknown
)
// element identifies the HTML element when inside a start tag or special body.
// Certain HTML element (for example <script> and <style>) have bodies that are
// treated differently from stateText so the element type is necessary to
// transition into the correct context at the end of a tag and to identify the
// end delimiter for the body.
type element uint8
//go:generate stringer -type element
const (
// elementNone occurs outside a special tag or special element body.
elementNone element = iota
// elementScript corresponds to the raw text <script> element
// with JS MIME type or no type attribute.
elementScript
// elementStyle corresponds to the raw text <style> element.
elementStyle
// elementTextarea corresponds to the RCDATA <textarea> element.
elementTextarea
// elementTitle corresponds to the RCDATA <title> element.
elementTitle
// elementMeta corresponds to the HTML <meta> element.
elementMeta
)
//go:generate stringer -type attr
// attr identifies the current HTML attribute when inside the attribute,
// that is, starting from stateAttrName until stateTag/stateText (exclusive).
type attr uint8
const (
// attrNone corresponds to a normal attribute or no attribute.
attrNone attr = iota
// attrScript corresponds to an event handler attribute.
attrScript
// attrScriptType corresponds to the type attribute in script HTML element
attrScriptType
// attrStyle corresponds to the style attribute whose value is CSS.
attrStyle
// attrURL corresponds to an attribute whose value is a URL.
attrURL
// attrSrcset corresponds to a srcset attribute.
attrSrcset
// attrMetaContent corresponds to the content attribute in meta HTML element.
attrMetaContent
)
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"bytes"
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
// endsWithCSSKeyword reports whether b ends with an ident that
// case-insensitively matches the lower-case kw.
func endsWithCSSKeyword(b []byte, kw string) bool {
i := len(b) - len(kw)
if i < 0 {
// Too short.
return false
}
if i != 0 {
r, _ := utf8.DecodeLastRune(b[:i])
if isCSSNmchar(r) {
// Too long.
return false
}
}
// Many CSS keywords, such as "!important" can have characters encoded,
// but the URI production does not allow that according to
// https://www.w3.org/TR/css3-syntax/#TOK-URI
// This does not attempt to recognize encoded keywords. For example,
// given "\75\72\6c" and "url" this return false.
return string(bytes.ToLower(b[i:])) == kw
}
// isCSSNmchar reports whether rune is allowed anywhere in a CSS identifier.
func isCSSNmchar(r rune) bool {
// Based on the CSS3 nmchar production but ignores multi-rune escape
// sequences.
// https://www.w3.org/TR/css3-syntax/#SUBTOK-nmchar
return 'a' <= r && r <= 'z' ||
'A' <= r && r <= 'Z' ||
'0' <= r && r <= '9' ||
r == '-' ||
r == '_' ||
// Non-ASCII cases below.
0x80 <= r && r <= 0xd7ff ||
0xe000 <= r && r <= 0xfffd ||
0x10000 <= r && r <= 0x10ffff
}
// decodeCSS decodes CSS3 escapes given a sequence of stringchars.
// If there is no change, it returns the input, otherwise it returns a slice
// backed by a new array.
// https://www.w3.org/TR/css3-syntax/#SUBTOK-stringchar defines stringchar.
func decodeCSS(s []byte) []byte {
i := bytes.IndexByte(s, '\\')
if i == -1 {
return s
}
// The UTF-8 sequence for a codepoint is never longer than 1 + the
// number hex digits need to represent that codepoint, so len(s) is an
// upper bound on the output length.
b := make([]byte, 0, len(s))
for len(s) != 0 {
i := bytes.IndexByte(s, '\\')
if i == -1 {
i = len(s)
}
b, s = append(b, s[:i]...), s[i:]
if len(s) < 2 {
break
}
// https://www.w3.org/TR/css3-syntax/#SUBTOK-escape
// escape ::= unicode | '\' [#x20-#x7E#x80-#xD7FF#xE000-#xFFFD#x10000-#x10FFFF]
if isHex(s[1]) {
// https://www.w3.org/TR/css3-syntax/#SUBTOK-unicode
// unicode ::= '\' [0-9a-fA-F]{1,6} wc?
j := 2
for j < len(s) && j < 7 && isHex(s[j]) {
j++
}
r := hexDecode(s[1:j])
if r > unicode.MaxRune {
r, j = r/16, j-1
}
n := utf8.EncodeRune(b[len(b):cap(b)], r)
// The optional space at the end allows a hex
// sequence to be followed by a literal hex.
// string(decodeCSS([]byte(`\A B`))) == "\nB"
b, s = b[:len(b)+n], skipCSSSpace(s[j:])
} else {
// `\\` decodes to `\` and `\"` to `"`.
_, n := utf8.DecodeRune(s[1:])
b, s = append(b, s[1:1+n]...), s[1+n:]
}
}
return b
}
// isHex reports whether the given character is a hex digit.
func isHex(c byte) bool {
return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
}
// hexDecode decodes a short hex digit sequence: "10" -> 16.
func hexDecode(s []byte) rune {
n := '\x00'
for _, c := range s {
n <<= 4
switch {
case '0' <= c && c <= '9':
n |= rune(c - '0')
case 'a' <= c && c <= 'f':
n |= rune(c-'a') + 10
case 'A' <= c && c <= 'F':
n |= rune(c-'A') + 10
default:
panic(fmt.Sprintf("Bad hex digit in %q", s))
}
}
return n
}
// skipCSSSpace returns a suffix of c, skipping over a single space.
func skipCSSSpace(c []byte) []byte {
if len(c) == 0 {
return c
}
// wc ::= #x9 | #xA | #xC | #xD | #x20
switch c[0] {
case '\t', '\n', '\f', ' ':
return c[1:]
case '\r':
// This differs from CSS3's wc production because it contains a
// probable spec error whereby wc contains all the single byte
// sequences in nl (newline) but not CRLF.
if len(c) >= 2 && c[1] == '\n' {
return c[2:]
}
return c[1:]
}
return c
}
// isCSSSpace reports whether b is a CSS space char as defined in wc.
func isCSSSpace(b byte) bool {
switch b {
case '\t', '\n', '\f', '\r', ' ':
return true
}
return false
}
// cssEscaper escapes HTML and CSS special characters using \<hex>+ escapes.
func cssEscaper(args ...any) string {
s, _ := stringify(args...)
var b strings.Builder
r, w, written := rune(0), 0, 0
for i := 0; i < len(s); i += w {
// See comment in htmlEscaper.
r, w = utf8.DecodeRuneInString(s[i:])
var repl string
switch {
case int(r) < len(cssReplacementTable) && cssReplacementTable[r] != "":
repl = cssReplacementTable[r]
default:
continue
}
if written == 0 {
b.Grow(len(s))
}
b.WriteString(s[written:i])
b.WriteString(repl)
written = i + w
if repl != `\\` && (written == len(s) || isHex(s[written]) || isCSSSpace(s[written])) {
b.WriteByte(' ')
}
}
if written == 0 {
return s
}
b.WriteString(s[written:])
return b.String()
}
var cssReplacementTable = []string{
0: `\0`,
'\t': `\9`,
'\n': `\a`,
'\f': `\c`,
'\r': `\d`,
// Encode HTML specials as hex so the output can be embedded
// in HTML attributes without further encoding.
'"': `\22`,
'&': `\26`,
'\'': `\27`,
'(': `\28`,
')': `\29`,
'+': `\2b`,
'/': `\2f`,
':': `\3a`,
';': `\3b`,
'<': `\3c`,
'>': `\3e`,
'\\': `\\`,
'{': `\7b`,
'}': `\7d`,
}
var expressionBytes = []byte("expression")
var mozBindingBytes = []byte("mozbinding")
// cssValueFilter allows innocuous CSS values in the output including CSS
// quantities (10px or 25%), ID or class literals (#foo, .bar), keyword values
// (inherit, blue), and colors (#888).
// It filters out unsafe values, such as those that affect token boundaries,
// and anything that might execute scripts.
func cssValueFilter(args ...any) string {
s, t := stringify(args...)
if t == contentTypeCSS {
return s
}
b, id := decodeCSS([]byte(s)), make([]byte, 0, 64)
// CSS3 error handling is specified as honoring string boundaries per
// https://www.w3.org/TR/css3-syntax/#error-handling :
// Malformed declarations. User agents must handle unexpected
// tokens encountered while parsing a declaration by reading until
// the end of the declaration, while observing the rules for
// matching pairs of (), [], {}, "", and '', and correctly handling
// escapes. For example, a malformed declaration may be missing a
// property, colon (:) or value.
// So we need to make sure that values do not have mismatched bracket
// or quote characters to prevent the browser from restarting parsing
// inside a string that might embed JavaScript source.
for i, c := range b {
switch c {
case 0, '"', '\'', '(', ')', '/', ';', '@', '[', '\\', ']', '`', '{', '}', '<', '>':
return filterFailsafe
case '-':
// Disallow <!-- or -->.
// -- should not appear in valid identifiers.
if i != 0 && b[i-1] == '-' {
return filterFailsafe
}
default:
if c < utf8.RuneSelf && isCSSNmchar(rune(c)) {
id = append(id, c)
}
}
}
id = bytes.ToLower(id)
if bytes.Contains(id, expressionBytes) || bytes.Contains(id, mozBindingBytes) {
return filterFailsafe
}
return string(b)
}
// Code generated by "stringer -type delim"; DO NOT EDIT.
package template
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[delimNone-0]
_ = x[delimDoubleQuote-1]
_ = x[delimSingleQuote-2]
_ = x[delimSpaceOrTagEnd-3]
}
const _delim_name = "delimNonedelimDoubleQuotedelimSingleQuotedelimSpaceOrTagEnd"
var _delim_index = [...]uint8{0, 9, 25, 41, 59}
func (i delim) String() string {
if i >= delim(len(_delim_index)-1) {
return "delim(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _delim_name[_delim_index[i]:_delim_index[i+1]]
}
// Code generated by "stringer -type element"; DO NOT EDIT.
package template
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[elementNone-0]
_ = x[elementScript-1]
_ = x[elementStyle-2]
_ = x[elementTextarea-3]
_ = x[elementTitle-4]
_ = x[elementMeta-5]
}
const _element_name = "elementNoneelementScriptelementStyleelementTextareaelementTitleelementMeta"
var _element_index = [...]uint8{0, 11, 24, 36, 51, 63, 74}
func (i element) String() string {
if i >= element(len(_element_index)-1) {
return "element(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _element_name[_element_index[i]:_element_index[i+1]]
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"fmt"
"text/template/parse"
)
// Error describes a problem encountered during template Escaping.
type Error struct {
// ErrorCode describes the kind of error.
ErrorCode ErrorCode
// Node is the node that caused the problem, if known.
// If not nil, it overrides Name and Line.
Node parse.Node
// Name is the name of the template in which the error was encountered.
Name string
// Line is the line number of the error in the template source or 0.
Line int
// Description is a human-readable description of the problem.
Description string
}
// ErrorCode is a code for a kind of error.
type ErrorCode int
// We define codes for each error that manifests while escaping templates, but
// escaped templates may also fail at runtime.
//
// Output: "ZgotmplZ"
// Example:
//
// <img src="{{.X}}">
// where {{.X}} evaluates to `javascript:...`
//
// Discussion:
//
// "ZgotmplZ" is a special value that indicates that unsafe content reached a
// CSS or URL context at runtime. The output of the example will be
// <img src="#ZgotmplZ">
// If the data comes from a trusted source, use content types to exempt it
// from filtering: URL(`javascript:...`).
const (
// OK indicates the lack of an error.
OK ErrorCode = iota
// ErrAmbigContext: "... appears in an ambiguous context within a URL"
// Example:
// <a href="
// {{if .C}}
// /path/
// {{else}}
// /search?q=
// {{end}}
// {{.X}}
// ">
// Discussion:
// {{.X}} is in an ambiguous URL context since, depending on {{.C}},
// it may be either a URL suffix or a query parameter.
// Moving {{.X}} into the condition removes the ambiguity:
// <a href="{{if .C}}/path/{{.X}}{{else}}/search?q={{.X}}">
ErrAmbigContext
// ErrBadHTML: "expected space, attr name, or end of tag, but got ...",
// "... in unquoted attr", "... in attribute name"
// Example:
// <a href = /search?q=foo>
// <href=foo>
// <form na<e=...>
// <option selected<
// Discussion:
// This is often due to a typo in an HTML element, but some runes
// are banned in tag names, attribute names, and unquoted attribute
// values because they can tickle parser ambiguities.
// Quoting all attributes is the best policy.
ErrBadHTML
// ErrBranchEnd: "{{if}} branches end in different contexts"
// Examples:
// {{if .C}}<a href="{{end}}{{.X}}
// <script {{with .T}}type="{{.}}"{{end}}>
// Discussion:
// Package html/template statically examines each path through an
// {{if}}, {{range}}, or {{with}} to escape any following pipelines.
// The first example is ambiguous since {{.X}} might be an HTML text node,
// or a URL prefix in an HTML attribute. The context of {{.X}} is
// used to figure out how to escape it, but that context depends on
// the run-time value of {{.C}} which is not statically known.
// The second example is ambiguous as the script type attribute
// can change the type of escaping needed for the script contents.
//
// The problem is usually something like missing quotes or angle
// brackets, or can be avoided by refactoring to put the two contexts
// into different branches of an if, range or with. If the problem
// is in a {{range}} over a collection that should never be empty,
// adding a dummy {{else}} can help.
ErrBranchEnd
// ErrEndContext: "... ends in a non-text context: ..."
// Examples:
// <div
// <div title="no close quote>
// <script>f()
// Discussion:
// Executed templates should produce a DocumentFragment of HTML.
// Templates that end without closing tags will trigger this error.
// Templates that should not be used in an HTML context or that
// produce incomplete Fragments should not be executed directly.
//
// {{define "main"}} <script>{{template "helper"}}</script> {{end}}
// {{define "helper"}} document.write(' <div title=" ') {{end}}
//
// "helper" does not produce a valid document fragment, so should
// not be Executed directly.
ErrEndContext
// ErrNoSuchTemplate: "no such template ..."
// Examples:
// {{define "main"}}<div {{template "attrs"}}>{{end}}
// {{define "attrs"}}href="{{.URL}}"{{end}}
// Discussion:
// Package html/template looks through template calls to compute the
// context.
// Here the {{.URL}} in "attrs" must be treated as a URL when called
// from "main", but you will get this error if "attrs" is not defined
// when "main" is parsed.
ErrNoSuchTemplate
// ErrOutputContext: "cannot compute output context for template ..."
// Examples:
// {{define "t"}}{{if .T}}{{template "t" .T}}{{end}}{{.H}}",{{end}}
// Discussion:
// A recursive template does not end in the same context in which it
// starts, and a reliable output context cannot be computed.
// Look for typos in the named template.
// If the template should not be called in the named start context,
// look for calls to that template in unexpected contexts.
// Maybe refactor recursive templates to not be recursive.
ErrOutputContext
// ErrPartialCharset: "unfinished JS regexp charset in ..."
// Example:
// <script>var pattern = /foo[{{.Chars}}]/</script>
// Discussion:
// Package html/template does not support interpolation into regular
// expression literal character sets.
ErrPartialCharset
// ErrPartialEscape: "unfinished escape sequence in ..."
// Example:
// <script>alert("\{{.X}}")</script>
// Discussion:
// Package html/template does not support actions following a
// backslash.
// This is usually an error and there are better solutions; for
// example
// <script>alert("{{.X}}")</script>
// should work, and if {{.X}} is a partial escape sequence such as
// "xA0", mark the whole sequence as safe content: JSStr(`\xA0`)
ErrPartialEscape
// ErrRangeLoopReentry: "on range loop re-entry: ..."
// Example:
// <script>var x = [{{range .}}'{{.}},{{end}}]</script>
// Discussion:
// If an iteration through a range would cause it to end in a
// different context than an earlier pass, there is no single context.
// In the example, there is missing a quote, so it is not clear
// whether {{.}} is meant to be inside a JS string or in a JS value
// context. The second iteration would produce something like
//
// <script>var x = ['firstValue,'secondValue]</script>
ErrRangeLoopReentry
// ErrSlashAmbig: '/' could start a division or regexp.
// Example:
// <script>
// {{if .C}}var x = 1{{end}}
// /-{{.N}}/i.test(x) ? doThis : doThat();
// </script>
// Discussion:
// The example above could produce `var x = 1/-2/i.test(s)...`
// in which the first '/' is a mathematical division operator or it
// could produce `/-2/i.test(s)` in which the first '/' starts a
// regexp literal.
// Look for missing semicolons inside branches, and maybe add
// parentheses to make it clear which interpretation you intend.
ErrSlashAmbig
// ErrPredefinedEscaper: "predefined escaper ... disallowed in template"
// Example:
// <div class={{. | html}}>Hello<div>
// Discussion:
// Package html/template already contextually escapes all pipelines to
// produce HTML output safe against code injection. Manually escaping
// pipeline output using the predefined escapers "html" or "urlquery" is
// unnecessary, and may affect the correctness or safety of the escaped
// pipeline output in Go 1.8 and earlier.
//
// In most cases, such as the given example, this error can be resolved by
// simply removing the predefined escaper from the pipeline and letting the
// contextual autoescaper handle the escaping of the pipeline. In other
// instances, where the predefined escaper occurs in the middle of a
// pipeline where subsequent commands expect escaped input, e.g.
// {{.X | html | makeALink}}
// where makeALink does
// return `<a href="`+input+`">link</a>`
// consider refactoring the surrounding template to make use of the
// contextual autoescaper, i.e.
// <a href="{{.X}}">link</a>
//
// To ease migration to Go 1.9 and beyond, "html" and "urlquery" will
// continue to be allowed as the last command in a pipeline. However, if the
// pipeline occurs in an unquoted attribute value context, "html" is
// disallowed. Avoid using "html" and "urlquery" entirely in new templates.
ErrPredefinedEscaper
// ErrJSTemplate: "... appears in a JS template literal"
// Example:
// <script>var tmpl = `{{.Interp}}`</script>
// Discussion:
// Package html/template does not support actions inside of JS template
// literals.
//
// Deprecated: ErrJSTemplate is no longer returned when an action is present
// in a JS template literal. Actions inside of JS template literals are now
// escaped as expected.
ErrJSTemplate
)
func (e *Error) Error() string {
switch {
case e.Node != nil:
loc, _ := (*parse.Tree)(nil).ErrorContext(e.Node)
return fmt.Sprintf("html/template:%s: %s", loc, e.Description)
case e.Line != 0:
return fmt.Sprintf("html/template:%s:%d: %s", e.Name, e.Line, e.Description)
case e.Name != "":
return fmt.Sprintf("html/template:%s: %s", e.Name, e.Description)
}
return "html/template: " + e.Description
}
// errorf creates an error given a format string f and args.
// The template Name still needs to be supplied.
func errorf(k ErrorCode, node parse.Node, line int, f string, args ...any) *Error {
return &Error{k, node, "", line, fmt.Sprintf(f, args...)}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"bytes"
"fmt"
"html"
"internal/godebug"
"io"
"maps"
"regexp"
"text/template"
"text/template/parse"
)
// escapeTemplate rewrites the named template, which must be
// associated with t, to guarantee that the output of any of the named
// templates is properly escaped. If no error is returned, then the named templates have
// been modified. Otherwise the named templates have been rendered
// unusable.
func escapeTemplate(tmpl *Template, node parse.Node, name string) error {
c, _ := tmpl.esc.escapeTree(context{}, node, name, 0)
var err error
if c.err != nil {
err, c.err.Name = c.err, name
} else if c.state != stateText {
err = &Error{ErrEndContext, nil, name, 0, fmt.Sprintf("ends in a non-text context: %v", c)}
}
if err != nil {
// Prevent execution of unsafe templates.
if t := tmpl.set[name]; t != nil {
t.escapeErr = err
t.text.Tree = nil
t.Tree = nil
}
return err
}
tmpl.esc.commit()
if t := tmpl.set[name]; t != nil {
t.escapeErr = escapeOK
t.Tree = t.text.Tree
}
return nil
}
// evalArgs formats the list of arguments into a string. It is equivalent to
// fmt.Sprint(args...), except that it dereferences all pointers.
func evalArgs(args ...any) string {
// Optimization for simple common case of a single string argument.
if len(args) == 1 {
if s, ok := args[0].(string); ok {
return s
}
}
for i, arg := range args {
args[i] = indirectToStringerOrError(arg)
}
return fmt.Sprint(args...)
}
// funcMap maps command names to functions that render their inputs safe.
var funcMap = template.FuncMap{
"_html_template_attrescaper": attrEscaper,
"_html_template_commentescaper": commentEscaper,
"_html_template_cssescaper": cssEscaper,
"_html_template_cssvaluefilter": cssValueFilter,
"_html_template_htmlnamefilter": htmlNameFilter,
"_html_template_htmlescaper": htmlEscaper,
"_html_template_jsregexpescaper": jsRegexpEscaper,
"_html_template_jsstrescaper": jsStrEscaper,
"_html_template_jstmpllitescaper": jsTmplLitEscaper,
"_html_template_jsvalescaper": jsValEscaper,
"_html_template_nospaceescaper": htmlNospaceEscaper,
"_html_template_rcdataescaper": rcdataEscaper,
"_html_template_srcsetescaper": srcsetFilterAndEscaper,
"_html_template_urlescaper": urlEscaper,
"_html_template_urlfilter": urlFilter,
"_html_template_urlnormalizer": urlNormalizer,
"_eval_args_": evalArgs,
}
// escaper collects type inferences about templates and changes needed to make
// templates injection safe.
type escaper struct {
// ns is the nameSpace that this escaper is associated with.
ns *nameSpace
// output[templateName] is the output context for a templateName that
// has been mangled to include its input context.
output map[string]context
// derived[c.mangle(name)] maps to a template derived from the template
// named name templateName for the start context c.
derived map[string]*template.Template
// called[templateName] is a set of called mangled template names.
called map[string]bool
// xxxNodeEdits are the accumulated edits to apply during commit.
// Such edits are not applied immediately in case a template set
// executes a given template in different escaping contexts.
actionNodeEdits map[*parse.ActionNode][]string
templateNodeEdits map[*parse.TemplateNode]string
textNodeEdits map[*parse.TextNode][]byte
// rangeContext holds context about the current range loop.
rangeContext *rangeContext
}
// rangeContext holds information about the current range loop.
type rangeContext struct {
outer *rangeContext // outer loop
breaks []context // context at each break action
continues []context // context at each continue action
}
// makeEscaper creates a blank escaper for the given set.
func makeEscaper(n *nameSpace) escaper {
return escaper{
n,
map[string]context{},
map[string]*template.Template{},
map[string]bool{},
map[*parse.ActionNode][]string{},
map[*parse.TemplateNode]string{},
map[*parse.TextNode][]byte{},
nil,
}
}
// filterFailsafe is an innocuous word that is emitted in place of unsafe values
// by sanitizer functions. It is not a keyword in any programming language,
// contains no special characters, is not empty, and when it appears in output
// it is distinct enough that a developer can find the source of the problem
// via a search engine.
const filterFailsafe = "ZgotmplZ"
// escape escapes a template node.
func (e *escaper) escape(c context, n parse.Node) context {
switch n := n.(type) {
case *parse.ActionNode:
return e.escapeAction(c, n)
case *parse.BreakNode:
c.n = n
e.rangeContext.breaks = append(e.rangeContext.breaks, c)
return context{state: stateDead}
case *parse.CommentNode:
return c
case *parse.ContinueNode:
c.n = n
e.rangeContext.continues = append(e.rangeContext.continues, c)
return context{state: stateDead}
case *parse.IfNode:
return e.escapeBranch(c, &n.BranchNode, "if")
case *parse.ListNode:
return e.escapeList(c, n)
case *parse.RangeNode:
return e.escapeBranch(c, &n.BranchNode, "range")
case *parse.TemplateNode:
return e.escapeTemplate(c, n)
case *parse.TextNode:
return e.escapeText(c, n)
case *parse.WithNode:
return e.escapeBranch(c, &n.BranchNode, "with")
}
panic("escaping " + n.String() + " is unimplemented")
}
var debugAllowActionJSTmpl = godebug.New("jstmpllitinterp")
var htmlmetacontenturlescape = godebug.New("htmlmetacontenturlescape")
// escapeAction escapes an action template node.
func (e *escaper) escapeAction(c context, n *parse.ActionNode) context {
if len(n.Pipe.Decl) != 0 {
// A local variable assignment, not an interpolation.
return c
}
c = nudge(c)
// Check for disallowed use of predefined escapers in the pipeline.
for pos, idNode := range n.Pipe.Cmds {
node, ok := idNode.Args[0].(*parse.IdentifierNode)
if !ok {
// A predefined escaper "esc" will never be found as an identifier in a
// Chain or Field node, since:
// - "esc.x ..." is invalid, since predefined escapers return strings, and
// strings do not have methods, keys or fields.
// - "... .esc" is invalid, since predefined escapers are global functions,
// not methods or fields of any types.
// Therefore, it is safe to ignore these two node types.
continue
}
ident := node.Ident
if _, ok := predefinedEscapers[ident]; ok {
if pos < len(n.Pipe.Cmds)-1 ||
c.state == stateAttr && c.delim == delimSpaceOrTagEnd && ident == "html" {
return context{
state: stateError,
err: errorf(ErrPredefinedEscaper, n, n.Line, "predefined escaper %q disallowed in template", ident),
}
}
}
}
s := make([]string, 0, 3)
switch c.state {
case stateError:
return c
case stateURL, stateCSSDqStr, stateCSSSqStr, stateCSSDqURL, stateCSSSqURL, stateCSSURL:
switch c.urlPart {
case urlPartNone:
s = append(s, "_html_template_urlfilter")
fallthrough
case urlPartPreQuery:
switch c.state {
case stateCSSDqStr, stateCSSSqStr:
s = append(s, "_html_template_cssescaper")
default:
s = append(s, "_html_template_urlnormalizer")
}
case urlPartQueryOrFrag:
s = append(s, "_html_template_urlescaper")
case urlPartUnknown:
return context{
state: stateError,
err: errorf(ErrAmbigContext, n, n.Line, "%s appears in an ambiguous context within a URL", n),
}
default:
panic(c.urlPart.String())
}
case stateMetaContent:
// Handled below in delim check.
case stateMetaContentURL:
if htmlmetacontenturlescape.Value() != "0" {
s = append(s, "_html_template_urlfilter")
} else {
// We don't have a great place to increment this, since it's hard to
// know if we actually escape any urls in _html_template_urlfilter,
// since it has no information about what context it is being
// executed in etc. This is probably the best we can do.
htmlmetacontenturlescape.IncNonDefault()
}
case stateJS:
s = append(s, "_html_template_jsvalescaper")
// A slash after a value starts a div operator.
c.jsCtx = jsCtxDivOp
case stateJSDqStr, stateJSSqStr:
s = append(s, "_html_template_jsstrescaper")
case stateJSTmplLit:
s = append(s, "_html_template_jstmpllitescaper")
case stateJSRegexp:
s = append(s, "_html_template_jsregexpescaper")
case stateCSS:
s = append(s, "_html_template_cssvaluefilter")
case stateText:
s = append(s, "_html_template_htmlescaper")
case stateRCDATA:
s = append(s, "_html_template_rcdataescaper")
case stateAttr:
// Handled below in delim check.
case stateAttrName, stateTag:
c.state = stateAttrName
s = append(s, "_html_template_htmlnamefilter")
case stateSrcset:
s = append(s, "_html_template_srcsetescaper")
default:
if isComment(c.state) {
s = append(s, "_html_template_commentescaper")
} else {
panic("unexpected state " + c.state.String())
}
}
switch c.delim {
case delimNone:
// No extra-escaping needed for raw text content.
case delimSpaceOrTagEnd:
s = append(s, "_html_template_nospaceescaper")
default:
s = append(s, "_html_template_attrescaper")
}
e.editActionNode(n, s)
return c
}
// ensurePipelineContains ensures that the pipeline ends with the commands with
// the identifiers in s in order. If the pipeline ends with a predefined escaper
// (i.e. "html" or "urlquery"), merge it with the identifiers in s.
func ensurePipelineContains(p *parse.PipeNode, s []string) {
if len(s) == 0 {
// Do not rewrite pipeline if we have no escapers to insert.
return
}
// Precondition: p.Cmds contains at most one predefined escaper and the
// escaper will be present at p.Cmds[len(p.Cmds)-1]. This precondition is
// always true because of the checks in escapeAction.
pipelineLen := len(p.Cmds)
if pipelineLen > 0 {
lastCmd := p.Cmds[pipelineLen-1]
if idNode, ok := lastCmd.Args[0].(*parse.IdentifierNode); ok {
if esc := idNode.Ident; predefinedEscapers[esc] {
// Pipeline ends with a predefined escaper.
if len(p.Cmds) == 1 && len(lastCmd.Args) > 1 {
// Special case: pipeline is of the form {{ esc arg1 arg2 ... argN }},
// where esc is the predefined escaper, and arg1...argN are its arguments.
// Convert this into the equivalent form
// {{ _eval_args_ arg1 arg2 ... argN | esc }}, so that esc can be easily
// merged with the escapers in s.
lastCmd.Args[0] = parse.NewIdentifier("_eval_args_").SetTree(nil).SetPos(lastCmd.Args[0].Position())
p.Cmds = appendCmd(p.Cmds, newIdentCmd(esc, p.Position()))
pipelineLen++
}
// If any of the commands in s that we are about to insert is equivalent
// to the predefined escaper, use the predefined escaper instead.
dup := false
for i, escaper := range s {
if escFnsEq(esc, escaper) {
s[i] = idNode.Ident
dup = true
}
}
if dup {
// The predefined escaper will already be inserted along with the
// escapers in s, so do not copy it to the rewritten pipeline.
pipelineLen--
}
}
}
}
// Rewrite the pipeline, creating the escapers in s at the end of the pipeline.
newCmds := make([]*parse.CommandNode, pipelineLen, pipelineLen+len(s))
insertedIdents := make(map[string]bool)
for i := 0; i < pipelineLen; i++ {
cmd := p.Cmds[i]
newCmds[i] = cmd
if idNode, ok := cmd.Args[0].(*parse.IdentifierNode); ok {
insertedIdents[normalizeEscFn(idNode.Ident)] = true
}
}
for _, name := range s {
if !insertedIdents[normalizeEscFn(name)] {
// When two templates share an underlying parse tree via the use of
// AddParseTree and one template is executed after the other, this check
// ensures that escapers that were already inserted into the pipeline on
// the first escaping pass do not get inserted again.
newCmds = appendCmd(newCmds, newIdentCmd(name, p.Position()))
}
}
p.Cmds = newCmds
}
// predefinedEscapers contains template predefined escapers that are equivalent
// to some contextual escapers. Keep in sync with equivEscapers.
var predefinedEscapers = map[string]bool{
"html": true,
"urlquery": true,
}
// equivEscapers matches contextual escapers to equivalent predefined
// template escapers.
var equivEscapers = map[string]string{
// The following pairs of HTML escapers provide equivalent security
// guarantees, since they all escape '\000', '\'', '"', '&', '<', and '>'.
"_html_template_attrescaper": "html",
"_html_template_htmlescaper": "html",
"_html_template_rcdataescaper": "html",
// These two URL escapers produce URLs safe for embedding in a URL query by
// percent-encoding all the reserved characters specified in RFC 3986 Section
// 2.2
"_html_template_urlescaper": "urlquery",
// These two functions are not actually equivalent; urlquery is stricter as it
// escapes reserved characters (e.g. '#'), while _html_template_urlnormalizer
// does not. It is therefore only safe to replace _html_template_urlnormalizer
// with urlquery (this happens in ensurePipelineContains), but not the otherI've
// way around. We keep this entry around to preserve the behavior of templates
// written before Go 1.9, which might depend on this substitution taking place.
"_html_template_urlnormalizer": "urlquery",
}
// escFnsEq reports whether the two escaping functions are equivalent.
func escFnsEq(a, b string) bool {
return normalizeEscFn(a) == normalizeEscFn(b)
}
// normalizeEscFn(a) is equal to normalizeEscFn(b) for any pair of names of
// escaper functions a and b that are equivalent.
func normalizeEscFn(e string) string {
if norm := equivEscapers[e]; norm != "" {
return norm
}
return e
}
// redundantFuncs[a][b] implies that funcMap[b](funcMap[a](x)) == funcMap[a](x)
// for all x.
var redundantFuncs = map[string]map[string]bool{
"_html_template_commentescaper": {
"_html_template_attrescaper": true,
"_html_template_htmlescaper": true,
},
"_html_template_cssescaper": {
"_html_template_attrescaper": true,
},
"_html_template_jsregexpescaper": {
"_html_template_attrescaper": true,
},
"_html_template_jsstrescaper": {
"_html_template_attrescaper": true,
},
"_html_template_jstmpllitescaper": {
"_html_template_attrescaper": true,
},
"_html_template_urlescaper": {
"_html_template_urlnormalizer": true,
},
}
// appendCmd appends the given command to the end of the command pipeline
// unless it is redundant with the last command.
func appendCmd(cmds []*parse.CommandNode, cmd *parse.CommandNode) []*parse.CommandNode {
if n := len(cmds); n != 0 {
last, okLast := cmds[n-1].Args[0].(*parse.IdentifierNode)
next, okNext := cmd.Args[0].(*parse.IdentifierNode)
if okLast && okNext && redundantFuncs[last.Ident][next.Ident] {
return cmds
}
}
return append(cmds, cmd)
}
// newIdentCmd produces a command containing a single identifier node.
func newIdentCmd(identifier string, pos parse.Pos) *parse.CommandNode {
return &parse.CommandNode{
NodeType: parse.NodeCommand,
Args: []parse.Node{parse.NewIdentifier(identifier).SetTree(nil).SetPos(pos)}, // TODO: SetTree.
}
}
// nudge returns the context that would result from following empty string
// transitions from the input context.
// For example, parsing:
//
// `<a href=`
//
// will end in context{stateBeforeValue, attrURL}, but parsing one extra rune:
//
// `<a href=x`
//
// will end in context{stateURL, delimSpaceOrTagEnd, ...}.
// There are two transitions that happen when the 'x' is seen:
// (1) Transition from a before-value state to a start-of-value state without
//
// consuming any character.
//
// (2) Consume 'x' and transition past the first value character.
// In this case, nudging produces the context after (1) happens.
func nudge(c context) context {
switch c.state {
case stateTag:
// In `<foo {{.}}`, the action should emit an attribute.
c.state = stateAttrName
case stateBeforeValue:
// In `<foo bar={{.}}`, the action is an undelimited value.
c.state, c.delim, c.attr = attrStartStates[c.attr], delimSpaceOrTagEnd, attrNone
case stateAfterName:
// In `<foo bar {{.}}`, the action is an attribute name.
c.state, c.attr = stateAttrName, attrNone
}
return c
}
// join joins the two contexts of a branch template node. The result is an
// error context if either of the input contexts are error contexts, or if the
// input contexts differ.
func join(a, b context, node parse.Node, nodeName string) context {
if a.state == stateError {
return a
}
if b.state == stateError {
return b
}
if a.state == stateDead {
return b
}
if b.state == stateDead {
return a
}
if a.eq(b) {
return a
}
c := a
c.urlPart = b.urlPart
if c.eq(b) {
// The contexts differ only by urlPart.
c.urlPart = urlPartUnknown
return c
}
c = a
c.jsCtx = b.jsCtx
if c.eq(b) {
// The contexts differ only by jsCtx.
c.jsCtx = jsCtxUnknown
return c
}
// Allow a nudged context to join with an unnudged one.
// This means that
// <p title={{if .C}}{{.}}{{end}}
// ends in an unquoted value state even though the else branch
// ends in stateBeforeValue.
if c, d := nudge(a), nudge(b); !(c.eq(a) && d.eq(b)) {
if e := join(c, d, node, nodeName); e.state != stateError {
return e
}
}
return context{
state: stateError,
err: errorf(ErrBranchEnd, node, 0, "{{%s}} branches end in different contexts: %v, %v", nodeName, a, b),
}
}
// escapeBranch escapes a branch template node: "if", "range" and "with".
func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string) context {
if nodeName == "range" {
e.rangeContext = &rangeContext{outer: e.rangeContext}
}
c0 := e.escapeList(c, n.List)
if nodeName == "range" {
if c0.state != stateError {
c0 = joinRange(c0, e.rangeContext)
}
e.rangeContext = e.rangeContext.outer
if c0.state == stateError {
return c0
}
// The "true" branch of a "range" node can execute multiple times.
// We check that executing n.List once results in the same context
// as executing n.List twice.
e.rangeContext = &rangeContext{outer: e.rangeContext}
c1, _ := e.escapeListConditionally(c0, n.List, nil)
c0 = join(c0, c1, n, nodeName)
if c0.state == stateError {
e.rangeContext = e.rangeContext.outer
// Make clear that this is a problem on loop re-entry
// since developers tend to overlook that branch when
// debugging templates.
c0.err.Line = n.Line
c0.err.Description = "on range loop re-entry: " + c0.err.Description
return c0
}
c0 = joinRange(c0, e.rangeContext)
e.rangeContext = e.rangeContext.outer
if c0.state == stateError {
return c0
}
}
c1 := e.escapeList(c, n.ElseList)
return join(c0, c1, n, nodeName)
}
func joinRange(c0 context, rc *rangeContext) context {
// Merge contexts at break and continue statements into overall body context.
// In theory we could treat breaks differently from continues, but for now it is
// enough to treat them both as going back to the start of the loop (which may then stop).
for _, c := range rc.breaks {
c0 = join(c0, c, c.n, "range")
if c0.state == stateError {
c0.err.Line = c.n.(*parse.BreakNode).Line
c0.err.Description = "at range loop break: " + c0.err.Description
return c0
}
}
for _, c := range rc.continues {
c0 = join(c0, c, c.n, "range")
if c0.state == stateError {
c0.err.Line = c.n.(*parse.ContinueNode).Line
c0.err.Description = "at range loop continue: " + c0.err.Description
return c0
}
}
return c0
}
// escapeList escapes a list template node.
func (e *escaper) escapeList(c context, n *parse.ListNode) context {
if n == nil {
return c
}
for _, m := range n.Nodes {
c = e.escape(c, m)
if c.state == stateDead {
break
}
}
return c
}
// escapeListConditionally escapes a list node but only preserves edits and
// inferences in e if the inferences and output context satisfy filter.
// It returns the best guess at an output context, and the result of the filter
// which is the same as whether e was updated.
func (e *escaper) escapeListConditionally(c context, n *parse.ListNode, filter func(*escaper, context) bool) (context, bool) {
e1 := makeEscaper(e.ns)
e1.rangeContext = e.rangeContext
// Make type inferences available to f.
maps.Copy(e1.output, e.output)
c = e1.escapeList(c, n)
ok := filter != nil && filter(&e1, c)
if ok {
// Copy inferences and edits from e1 back into e.
maps.Copy(e.output, e1.output)
maps.Copy(e.derived, e1.derived)
maps.Copy(e.called, e1.called)
for k, v := range e1.actionNodeEdits {
e.editActionNode(k, v)
}
for k, v := range e1.templateNodeEdits {
e.editTemplateNode(k, v)
}
for k, v := range e1.textNodeEdits {
e.editTextNode(k, v)
}
}
return c, ok
}
// escapeTemplate escapes a {{template}} call node.
func (e *escaper) escapeTemplate(c context, n *parse.TemplateNode) context {
c, name := e.escapeTree(c, n, n.Name, n.Line)
if name != n.Name {
e.editTemplateNode(n, name)
}
return c
}
// escapeTree escapes the named template starting in the given context as
// necessary and returns its output context.
func (e *escaper) escapeTree(c context, node parse.Node, name string, line int) (context, string) {
// Mangle the template name with the input context to produce a reliable
// identifier.
dname := c.mangle(name)
e.called[dname] = true
if out, ok := e.output[dname]; ok {
// Already escaped.
return out, dname
}
t := e.template(name)
if t == nil {
// Two cases: The template exists but is empty, or has never been mentioned at
// all. Distinguish the cases in the error messages.
if e.ns.set[name] != nil {
return context{
state: stateError,
err: errorf(ErrNoSuchTemplate, node, line, "%q is an incomplete or empty template", name),
}, dname
}
return context{
state: stateError,
err: errorf(ErrNoSuchTemplate, node, line, "no such template %q", name),
}, dname
}
if dname != name {
// Use any template derived during an earlier call to escapeTemplate
// with different top level templates, or clone if necessary.
dt := e.template(dname)
if dt == nil {
dt = template.New(dname)
dt.Tree = &parse.Tree{Name: dname, Root: t.Root.CopyList()}
e.derived[dname] = dt
}
t = dt
}
return e.computeOutCtx(c, t), dname
}
// computeOutCtx takes a template and its start context and computes the output
// context while storing any inferences in e.
func (e *escaper) computeOutCtx(c context, t *template.Template) context {
// Propagate context over the body.
c1, ok := e.escapeTemplateBody(c, t)
if !ok {
// Look for a fixed point by assuming c1 as the output context.
if c2, ok2 := e.escapeTemplateBody(c1, t); ok2 {
c1, ok = c2, true
}
// Use c1 as the error context if neither assumption worked.
}
if !ok && c1.state != stateError {
return context{
state: stateError,
err: errorf(ErrOutputContext, t.Tree.Root, 0, "cannot compute output context for template %s", t.Name()),
}
}
return c1
}
// escapeTemplateBody escapes the given template assuming the given output
// context, and returns the best guess at the output context and whether the
// assumption was correct.
func (e *escaper) escapeTemplateBody(c context, t *template.Template) (context, bool) {
filter := func(e1 *escaper, c1 context) bool {
if c1.state == stateError {
// Do not update the input escaper, e.
return false
}
if !e1.called[t.Name()] {
// If t is not recursively called, then c1 is an
// accurate output context.
return true
}
// c1 is accurate if it matches our assumed output context.
return c.eq(c1)
}
// We need to assume an output context so that recursive template calls
// take the fast path out of escapeTree instead of infinitely recurring.
// Naively assuming that the input context is the same as the output
// works >90% of the time.
e.output[t.Name()] = c
return e.escapeListConditionally(c, t.Tree.Root, filter)
}
// delimEnds maps each delim to a string of characters that terminate it.
var delimEnds = [...]string{
delimDoubleQuote: `"`,
delimSingleQuote: "'",
// Determined empirically by running the below in various browsers.
// var div = document.createElement("DIV");
// for (var i = 0; i < 0x10000; ++i) {
// div.innerHTML = "<span title=x" + String.fromCharCode(i) + "-bar>";
// if (div.getElementsByTagName("SPAN")[0].title.indexOf("bar") < 0)
// document.write("<p>U+" + i.toString(16));
// }
delimSpaceOrTagEnd: " \t\n\f\r>",
}
var (
// Per WHATWG HTML specification, section 4.12.1.3, there are extremely
// complicated rules for how to handle the set of opening tags <!--,
// <script, and </script when they appear in JS literals (i.e. strings,
// regexs, and comments). The specification suggests a simple solution,
// rather than implementing the arcane ABNF, which involves simply escaping
// the opening bracket with \x3C. We use the below regex for this, since it
// makes doing the case-insensitive find-replace much simpler.
specialScriptTagRE = regexp.MustCompile("(?i)<(script|/script|!--)")
specialScriptTagReplacement = []byte("\\x3C$1")
)
func containsSpecialScriptTag(s []byte) bool {
return specialScriptTagRE.Match(s)
}
func escapeSpecialScriptTags(s []byte) []byte {
return specialScriptTagRE.ReplaceAll(s, specialScriptTagReplacement)
}
var doctypeBytes = []byte("<!DOCTYPE")
// escapeText escapes a text template node.
func (e *escaper) escapeText(c context, n *parse.TextNode) context {
s, written, i, b := n.Text, 0, 0, new(bytes.Buffer)
for i != len(s) {
c1, nread := contextAfterText(c, s[i:])
i1 := i + nread
if c.state == stateText || c.state == stateRCDATA {
end := i1
if c1.state != c.state {
for j := end - 1; j >= i; j-- {
if s[j] == '<' {
end = j
break
}
}
}
for j := i; j < end; j++ {
if s[j] == '<' && !bytes.HasPrefix(bytes.ToUpper(s[j:]), doctypeBytes) {
b.Write(s[written:j])
b.WriteString("<")
written = j + 1
}
}
} else if isComment(c.state) && c.delim == delimNone {
switch c.state {
case stateJSBlockCmt:
// https://es5.github.io/#x7.4:
// "Comments behave like white space and are
// discarded except that, if a MultiLineComment
// contains a line terminator character, then
// the entire comment is considered to be a
// LineTerminator for purposes of parsing by
// the syntactic grammar."
if bytes.ContainsAny(s[written:i1], "\n\r\u2028\u2029") {
b.WriteByte('\n')
} else {
b.WriteByte(' ')
}
case stateCSSBlockCmt:
b.WriteByte(' ')
}
written = i1
}
if c.state != c1.state && isComment(c1.state) && c1.delim == delimNone {
// Preserve the portion between written and the comment start.
cs := i1 - 2
if c1.state == stateHTMLCmt || c1.state == stateJSHTMLOpenCmt {
// "<!--" instead of "/*" or "//"
cs -= 2
} else if c1.state == stateJSHTMLCloseCmt {
// "-->" instead of "/*" or "//"
cs -= 1
}
b.Write(s[written:cs])
written = i1
}
if isInScriptLiteral(c.state) && containsSpecialScriptTag(s[i:i1]) {
b.Write(s[written:i])
b.Write(escapeSpecialScriptTags(s[i:i1]))
written = i1
}
if i == i1 && c.state == c1.state {
panic(fmt.Sprintf("infinite loop from %v to %v on %q..%q", c, c1, s[:i], s[i:]))
}
c, i = c1, i1
}
if written != 0 && c.state != stateError {
if !isComment(c.state) || c.delim != delimNone {
b.Write(n.Text[written:])
}
e.editTextNode(n, b.Bytes())
}
return c
}
// contextAfterText starts in context c, consumes some tokens from the front of
// s, then returns the context after those tokens and the unprocessed suffix.
func contextAfterText(c context, s []byte) (context, int) {
if c.delim == delimNone {
c1, i := tSpecialTagEnd(c, s)
if i == 0 {
// A special end tag (`</script>`) has been seen and
// all content preceding it has been consumed.
return c1, 0
}
// Consider all content up to any end tag.
return transitionFunc[c.state](c, s[:i])
}
// We are at the beginning of an attribute value.
i := bytes.IndexAny(s, delimEnds[c.delim])
if i == -1 {
i = len(s)
}
if c.delim == delimSpaceOrTagEnd {
// https://www.w3.org/TR/html5/syntax.html#attribute-value-(unquoted)-state
// lists the runes below as error characters.
// Error out because HTML parsers may differ on whether
// "<a id= onclick=f(" ends inside id's or onclick's value,
// "<a class=`foo " ends inside a value,
// "<a style=font:'Arial'" needs open-quote fixup.
// IE treats '`' as a quotation character.
if j := bytes.IndexAny(s[:i], "\"'<=`"); j >= 0 {
return context{
state: stateError,
err: errorf(ErrBadHTML, nil, 0, "%q in unquoted attr: %q", s[j:j+1], s[:i]),
}, len(s)
}
}
if i == len(s) {
// Remain inside the attribute.
// Decode the value so non-HTML rules can easily handle
// <button onclick="alert("Hi!")">
// without having to entity decode token boundaries.
for u := []byte(html.UnescapeString(string(s))); len(u) != 0; {
c1, i1 := transitionFunc[c.state](c, u)
c, u = c1, u[i1:]
}
return c, len(s)
}
element := c.element
// If this is a non-JS "type" attribute inside "script" tag, do not treat the contents as JS.
if c.state == stateAttr && c.element == elementScript && c.attr == attrScriptType && !isJSType(string(s[:i])) {
element = elementNone
}
if c.delim != delimSpaceOrTagEnd {
// Consume any quote.
i++
}
// On exiting an attribute, we discard all state information
// except the state and element.
return context{state: stateTag, element: element}, i
}
// editActionNode records a change to an action pipeline for later commit.
func (e *escaper) editActionNode(n *parse.ActionNode, cmds []string) {
if _, ok := e.actionNodeEdits[n]; ok {
panic(fmt.Sprintf("node %s shared between templates", n))
}
e.actionNodeEdits[n] = cmds
}
// editTemplateNode records a change to a {{template}} callee for later commit.
func (e *escaper) editTemplateNode(n *parse.TemplateNode, callee string) {
if _, ok := e.templateNodeEdits[n]; ok {
panic(fmt.Sprintf("node %s shared between templates", n))
}
e.templateNodeEdits[n] = callee
}
// editTextNode records a change to a text node for later commit.
func (e *escaper) editTextNode(n *parse.TextNode, text []byte) {
if _, ok := e.textNodeEdits[n]; ok {
panic(fmt.Sprintf("node %s shared between templates", n))
}
e.textNodeEdits[n] = text
}
// commit applies changes to actions and template calls needed to contextually
// autoescape content and adds any derived templates to the set.
func (e *escaper) commit() {
for name := range e.output {
e.template(name).Funcs(funcMap)
}
// Any template from the name space associated with this escaper can be used
// to add derived templates to the underlying text/template name space.
tmpl := e.arbitraryTemplate()
for _, t := range e.derived {
if _, err := tmpl.text.AddParseTree(t.Name(), t.Tree); err != nil {
panic("error adding derived template")
}
}
for n, s := range e.actionNodeEdits {
ensurePipelineContains(n.Pipe, s)
}
for n, name := range e.templateNodeEdits {
n.Name = name
}
for n, s := range e.textNodeEdits {
n.Text = s
}
// Reset state that is specific to this commit so that the same changes are
// not re-applied to the template on subsequent calls to commit.
e.called = make(map[string]bool)
e.actionNodeEdits = make(map[*parse.ActionNode][]string)
e.templateNodeEdits = make(map[*parse.TemplateNode]string)
e.textNodeEdits = make(map[*parse.TextNode][]byte)
}
// template returns the named template given a mangled template name.
func (e *escaper) template(name string) *template.Template {
// Any template from the name space associated with this escaper can be used
// to look up templates in the underlying text/template name space.
t := e.arbitraryTemplate().text.Lookup(name)
if t == nil {
t = e.derived[name]
}
return t
}
// arbitraryTemplate returns an arbitrary template from the name space
// associated with e and panics if no templates are found.
func (e *escaper) arbitraryTemplate() *Template {
for _, t := range e.ns.set {
return t
}
panic("no templates in name space")
}
// Forwarding functions so that clients need only import this package
// to reach the general escaping functions of text/template.
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
func HTMLEscape(w io.Writer, b []byte) {
template.HTMLEscape(w, b)
}
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
func HTMLEscapeString(s string) string {
return template.HTMLEscapeString(s)
}
// HTMLEscaper returns the escaped HTML equivalent of the textual
// representation of its arguments.
func HTMLEscaper(args ...any) string {
return template.HTMLEscaper(args...)
}
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
func JSEscape(w io.Writer, b []byte) {
template.JSEscape(w, b)
}
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
func JSEscapeString(s string) string {
return template.JSEscapeString(s)
}
// JSEscaper returns the escaped JavaScript equivalent of the textual
// representation of its arguments.
func JSEscaper(args ...any) string {
return template.JSEscaper(args...)
}
// URLQueryEscaper returns the escaped value of the textual representation of
// its arguments in a form suitable for embedding in a URL query.
func URLQueryEscaper(args ...any) string {
return template.URLQueryEscaper(args...)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"bytes"
"fmt"
"strings"
"unicode/utf8"
)
// htmlNospaceEscaper escapes for inclusion in unquoted attribute values.
func htmlNospaceEscaper(args ...any) string {
s, t := stringify(args...)
if s == "" {
return filterFailsafe
}
if t == contentTypeHTML {
return htmlReplacer(stripTags(s), htmlNospaceNormReplacementTable, false)
}
return htmlReplacer(s, htmlNospaceReplacementTable, false)
}
// attrEscaper escapes for inclusion in quoted attribute values.
func attrEscaper(args ...any) string {
s, t := stringify(args...)
if t == contentTypeHTML {
return htmlReplacer(stripTags(s), htmlNormReplacementTable, true)
}
return htmlReplacer(s, htmlReplacementTable, true)
}
// rcdataEscaper escapes for inclusion in an RCDATA element body.
func rcdataEscaper(args ...any) string {
s, t := stringify(args...)
if t == contentTypeHTML {
return htmlReplacer(s, htmlNormReplacementTable, true)
}
return htmlReplacer(s, htmlReplacementTable, true)
}
// htmlEscaper escapes for inclusion in HTML text.
func htmlEscaper(args ...any) string {
s, t := stringify(args...)
if t == contentTypeHTML {
return s
}
return htmlReplacer(s, htmlReplacementTable, true)
}
// htmlReplacementTable contains the runes that need to be escaped
// inside a quoted attribute value or in a text node.
var htmlReplacementTable = []string{
// https://www.w3.org/TR/html5/syntax.html#attribute-value-(unquoted)-state
// U+0000 NULL Parse error. Append a U+FFFD REPLACEMENT
// CHARACTER character to the current attribute's value.
// "
// and similarly
// https://www.w3.org/TR/html5/syntax.html#before-attribute-value-state
0: "\uFFFD",
'"': """,
'&': "&",
'\'': "'",
'+': "+",
'<': "<",
'>': ">",
}
// htmlNormReplacementTable is like htmlReplacementTable but without '&' to
// avoid over-encoding existing entities.
var htmlNormReplacementTable = []string{
0: "\uFFFD",
'"': """,
'\'': "'",
'+': "+",
'<': "<",
'>': ">",
}
// htmlNospaceReplacementTable contains the runes that need to be escaped
// inside an unquoted attribute value.
// The set of runes escaped is the union of the HTML specials and
// those determined by running the JS below in browsers:
// <div id=d></div>
// <script>(function () {
// var a = [], d = document.getElementById("d"), i, c, s;
// for (i = 0; i < 0x10000; ++i) {
//
// c = String.fromCharCode(i);
// d.innerHTML = "<span title=" + c + "lt" + c + "></span>"
// s = d.getElementsByTagName("SPAN")[0];
// if (!s || s.title !== c + "lt" + c) { a.push(i.toString(16)); }
//
// }
// document.write(a.join(", "));
// })()</script>
var htmlNospaceReplacementTable = []string{
0: "�",
'\t': "	",
'\n': " ",
'\v': "",
'\f': "",
'\r': " ",
' ': " ",
'"': """,
'&': "&",
'\'': "'",
'+': "+",
'<': "<",
'=': "=",
'>': ">",
// A parse error in the attribute value (unquoted) and
// before attribute value states.
// Treated as a quoting character by IE.
'`': "`",
}
// htmlNospaceNormReplacementTable is like htmlNospaceReplacementTable but
// without '&' to avoid over-encoding existing entities.
var htmlNospaceNormReplacementTable = []string{
0: "�",
'\t': "	",
'\n': " ",
'\v': "",
'\f': "",
'\r': " ",
' ': " ",
'"': """,
'\'': "'",
'+': "+",
'<': "<",
'=': "=",
'>': ">",
// A parse error in the attribute value (unquoted) and
// before attribute value states.
// Treated as a quoting character by IE.
'`': "`",
}
// htmlReplacer returns s with runes replaced according to replacementTable
// and when badRunes is true, certain bad runes are allowed through unescaped.
func htmlReplacer(s string, replacementTable []string, badRunes bool) string {
written, b := 0, new(strings.Builder)
r, w := rune(0), 0
for i := 0; i < len(s); i += w {
// Cannot use 'for range s' because we need to preserve the width
// of the runes in the input. If we see a decoding error, the input
// width will not be utf8.Runelen(r) and we will overrun the buffer.
r, w = utf8.DecodeRuneInString(s[i:])
if int(r) < len(replacementTable) {
if repl := replacementTable[r]; len(repl) != 0 {
if written == 0 {
b.Grow(len(s))
}
b.WriteString(s[written:i])
b.WriteString(repl)
written = i + w
}
} else if badRunes {
// No-op.
// IE does not allow these ranges in unquoted attrs.
} else if 0xfdd0 <= r && r <= 0xfdef || 0xfff0 <= r && r <= 0xffff {
if written == 0 {
b.Grow(len(s))
}
fmt.Fprintf(b, "%s&#x%x;", s[written:i], r)
written = i + w
}
}
if written == 0 {
return s
}
b.WriteString(s[written:])
return b.String()
}
// stripTags takes a snippet of HTML and returns only the text content.
// For example, `<b>¡Hi!</b> <script>...</script>` -> `¡Hi! `.
func stripTags(html string) string {
var b strings.Builder
s, c, i, allText := []byte(html), context{}, 0, true
// Using the transition funcs helps us avoid mangling
// `<div title="1>2">` or `I <3 Ponies!`.
for i != len(s) {
if c.delim == delimNone {
st := c.state
// Use RCDATA instead of parsing into JS or CSS styles.
if c.element != elementNone && !isInTag(st) {
st = stateRCDATA
}
d, nread := transitionFunc[st](c, s[i:])
i1 := i + nread
if c.state == stateText || c.state == stateRCDATA {
// Emit text up to the start of the tag or comment.
j := i1
if d.state != c.state {
for j1 := j - 1; j1 >= i; j1-- {
if s[j1] == '<' {
j = j1
break
}
}
}
b.Write(s[i:j])
} else {
allText = false
}
c, i = d, i1
continue
}
i1 := i + bytes.IndexAny(s[i:], delimEnds[c.delim])
if i1 < i {
break
}
if c.delim != delimSpaceOrTagEnd {
// Consume any quote.
i1++
}
c, i = context{state: stateTag, element: c.element}, i1
}
if allText {
return html
} else if c.state == stateText || c.state == stateRCDATA {
b.Write(s[i:])
}
return b.String()
}
// htmlNameFilter accepts valid parts of an HTML attribute or tag name or
// a known-safe HTML attribute.
func htmlNameFilter(args ...any) string {
s, t := stringify(args...)
if t == contentTypeHTMLAttr {
return s
}
if len(s) == 0 {
// Avoid violation of structure preservation.
// <input checked {{.K}}={{.V}}>.
// Without this, if .K is empty then .V is the value of
// checked, but otherwise .V is the value of the attribute
// named .K.
return filterFailsafe
}
s = strings.ToLower(s)
if t := attrType(s); t != contentTypePlain {
// TODO: Split attr and element name part filters so we can recognize known attributes.
return filterFailsafe
}
for _, r := range s {
switch {
case '0' <= r && r <= '9':
case 'a' <= r && r <= 'z':
default:
return filterFailsafe
}
}
return s
}
// commentEscaper returns the empty string regardless of input.
// Comment content does not correspond to any parsed structure or
// human-readable content, so the simplest and most secure policy is to drop
// content interpolated into comments.
// This approach is equally valid whether or not static comment content is
// removed from the template.
func commentEscaper(args ...any) string {
return ""
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"regexp"
"strings"
"unicode/utf8"
)
// jsWhitespace contains all of the JS whitespace characters, as defined
// by the \s character class.
// See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_expressions/Character_classes.
const jsWhitespace = "\f\n\r\t\v\u0020\u00a0\u1680\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u2028\u2029\u202f\u205f\u3000\ufeff"
// nextJSCtx returns the context that determines whether a slash after the
// given run of tokens starts a regular expression instead of a division
// operator: / or /=.
//
// This assumes that the token run does not include any string tokens, comment
// tokens, regular expression literal tokens, or division operators.
//
// This fails on some valid but nonsensical JavaScript programs like
// "x = ++/foo/i" which is quite different than "x++/foo/i", but is not known to
// fail on any known useful programs. It is based on the draft
// JavaScript 2.0 lexical grammar and requires one token of lookbehind:
// https://www.mozilla.org/js/language/js20-2000-07/rationale/syntax.html
func nextJSCtx(s []byte, preceding jsCtx) jsCtx {
// Trim all JS whitespace characters
s = bytes.TrimRight(s, jsWhitespace)
if len(s) == 0 {
return preceding
}
// All cases below are in the single-byte UTF-8 group.
switch c, n := s[len(s)-1], len(s); c {
case '+', '-':
// ++ and -- are not regexp preceders, but + and - are whether
// they are used as infix or prefix operators.
start := n - 1
// Count the number of adjacent dashes or pluses.
for start > 0 && s[start-1] == c {
start--
}
if (n-start)&1 == 1 {
// Reached for trailing minus signs since "---" is the
// same as "-- -".
return jsCtxRegexp
}
return jsCtxDivOp
case '.':
// Handle "42."
if n != 1 && '0' <= s[n-2] && s[n-2] <= '9' {
return jsCtxDivOp
}
return jsCtxRegexp
// Suffixes for all punctuators from section 7.7 of the language spec
// that only end binary operators not handled above.
case ',', '<', '>', '=', '*', '%', '&', '|', '^', '?':
return jsCtxRegexp
// Suffixes for all punctuators from section 7.7 of the language spec
// that are prefix operators not handled above.
case '!', '~':
return jsCtxRegexp
// Matches all the punctuators from section 7.7 of the language spec
// that are open brackets not handled above.
case '(', '[':
return jsCtxRegexp
// Matches all the punctuators from section 7.7 of the language spec
// that precede expression starts.
case ':', ';', '{':
return jsCtxRegexp
// CAVEAT: the close punctuators ('}', ']', ')') precede div ops and
// are handled in the default except for '}' which can precede a
// division op as in
// ({ valueOf: function () { return 42 } } / 2
// which is valid, but, in practice, developers don't divide object
// literals, so our heuristic works well for code like
// function () { ... } /foo/.test(x) && sideEffect();
// The ')' punctuator can precede a regular expression as in
// if (b) /foo/.test(x) && ...
// but this is much less likely than
// (a + b) / c
case '}':
return jsCtxRegexp
default:
// Look for an IdentifierName and see if it is a keyword that
// can precede a regular expression.
j := n
for j > 0 && isJSIdentPart(rune(s[j-1])) {
j--
}
if regexpPrecederKeywords[string(s[j:])] {
return jsCtxRegexp
}
}
// Otherwise is a punctuator not listed above, or
// a string which precedes a div op, or an identifier
// which precedes a div op.
return jsCtxDivOp
}
// regexpPrecederKeywords is a set of reserved JS keywords that can precede a
// regular expression in JS source.
var regexpPrecederKeywords = map[string]bool{
"break": true,
"case": true,
"continue": true,
"delete": true,
"do": true,
"else": true,
"finally": true,
"in": true,
"instanceof": true,
"return": true,
"throw": true,
"try": true,
"typeof": true,
"void": true,
}
var jsonMarshalType = reflect.TypeFor[json.Marshaler]()
// indirectToJSONMarshaler returns the value, after dereferencing as many times
// as necessary to reach the base type (or nil) or an implementation of json.Marshal.
func indirectToJSONMarshaler(a any) any {
// text/template now supports passing untyped nil as a func call
// argument, so we must support it. Otherwise we'd panic below, as one
// cannot call the Type or Interface methods on an invalid
// reflect.Value. See golang.org/issue/18716.
if a == nil {
return nil
}
v := reflect.ValueOf(a)
for !v.Type().Implements(jsonMarshalType) && v.Kind() == reflect.Pointer && !v.IsNil() {
v = v.Elem()
}
return v.Interface()
}
var scriptTagRe = regexp.MustCompile("(?i)<(/?)script")
// jsValEscaper escapes its inputs to a JS Expression (section 11.14) that has
// neither side-effects nor free variables outside (NaN, Infinity).
func jsValEscaper(args ...any) string {
var a any
if len(args) == 1 {
a = indirectToJSONMarshaler(args[0])
switch t := a.(type) {
case JS:
return string(t)
case JSStr:
// TODO: normalize quotes.
return `"` + string(t) + `"`
case json.Marshaler:
// Do not treat as a Stringer.
case fmt.Stringer:
a = t.String()
}
} else {
for i, arg := range args {
args[i] = indirectToJSONMarshaler(arg)
}
a = fmt.Sprint(args...)
}
// TODO: detect cycles before calling Marshal which loops infinitely on
// cyclic data. This may be an unacceptable DoS risk.
b, err := json.Marshal(a)
if err != nil {
// While the standard JSON marshaler does not include user controlled
// information in the error message, if a type has a MarshalJSON method,
// the content of the error message is not guaranteed. Since we insert
// the error into the template, as part of a comment, we attempt to
// prevent the error from either terminating the comment, or the script
// block itself.
//
// In particular we:
// * replace "*/" comment end tokens with "* /", which does not
// terminate the comment
// * replace "<script" and "</script" with "\x3Cscript" and "\x3C/script"
// (case insensitively), and "<!--" with "\x3C!--", which prevents
// confusing script block termination semantics
//
// We also put a space before the comment so that if it is flush against
// a division operator it is not turned into a line comment:
// x/{{y}}
// turning into
// x//* error marshaling y:
// second line of error message */null
errStr := err.Error()
errStr = string(scriptTagRe.ReplaceAll([]byte(errStr), []byte(`\x3C${1}script`)))
errStr = strings.ReplaceAll(errStr, "*/", "* /")
errStr = strings.ReplaceAll(errStr, "<!--", `\x3C!--`)
return fmt.Sprintf(" /* %s */null ", errStr)
}
// TODO: maybe post-process output to prevent it from containing
// "<!--", "-->", "<![CDATA[", "]]>", or "</script"
// in case custom marshalers produce output containing those.
// Note: Do not use \x escaping to save bytes because it is not JSON compatible and this escaper
// supports ld+json content-type.
if len(b) == 0 {
// In, `x=y/{{.}}*z` a json.Marshaler that produces "" should
// not cause the output `x=y/*z`.
return " null "
}
first, _ := utf8.DecodeRune(b)
last, _ := utf8.DecodeLastRune(b)
var buf strings.Builder
// Prevent IdentifierNames and NumericLiterals from running into
// keywords: in, instanceof, typeof, void
pad := isJSIdentPart(first) || isJSIdentPart(last)
if pad {
buf.WriteByte(' ')
}
written := 0
// Make sure that json.Marshal escapes codepoints U+2028 & U+2029
// so it falls within the subset of JSON which is valid JS.
for i := 0; i < len(b); {
rune, n := utf8.DecodeRune(b[i:])
repl := ""
if rune == 0x2028 {
repl = `\u2028`
} else if rune == 0x2029 {
repl = `\u2029`
}
if repl != "" {
buf.Write(b[written:i])
buf.WriteString(repl)
written = i + n
}
i += n
}
if buf.Len() != 0 {
buf.Write(b[written:])
if pad {
buf.WriteByte(' ')
}
return buf.String()
}
return string(b)
}
// jsStrEscaper produces a string that can be included between quotes in
// JavaScript source, in JavaScript embedded in an HTML5 <script> element,
// or in an HTML5 event handler attribute such as onclick.
func jsStrEscaper(args ...any) string {
s, t := stringify(args...)
if t == contentTypeJSStr {
return replace(s, jsStrNormReplacementTable)
}
return replace(s, jsStrReplacementTable)
}
func jsTmplLitEscaper(args ...any) string {
s, _ := stringify(args...)
return replace(s, jsBqStrReplacementTable)
}
// jsRegexpEscaper behaves like jsStrEscaper but escapes regular expression
// specials so the result is treated literally when included in a regular
// expression literal. /foo{{.X}}bar/ matches the string "foo" followed by
// the literal text of {{.X}} followed by the string "bar".
func jsRegexpEscaper(args ...any) string {
s, _ := stringify(args...)
s = replace(s, jsRegexpReplacementTable)
if s == "" {
// /{{.X}}/ should not produce a line comment when .X == "".
return "(?:)"
}
return s
}
// replace replaces each rune r of s with replacementTable[r], provided that
// r < len(replacementTable). If replacementTable[r] is the empty string then
// no replacement is made.
// It also replaces runes U+2028 and U+2029 with the raw strings `\u2028` and
// `\u2029`.
func replace(s string, replacementTable []string) string {
var b strings.Builder
r, w, written := rune(0), 0, 0
for i := 0; i < len(s); i += w {
// See comment in htmlEscaper.
r, w = utf8.DecodeRuneInString(s[i:])
var repl string
switch {
case int(r) < len(lowUnicodeReplacementTable):
repl = lowUnicodeReplacementTable[r]
case int(r) < len(replacementTable) && replacementTable[r] != "":
repl = replacementTable[r]
case r == '\u2028':
repl = `\u2028`
case r == '\u2029':
repl = `\u2029`
default:
continue
}
if written == 0 {
b.Grow(len(s))
}
b.WriteString(s[written:i])
b.WriteString(repl)
written = i + w
}
if written == 0 {
return s
}
b.WriteString(s[written:])
return b.String()
}
var lowUnicodeReplacementTable = []string{
0: `\u0000`, 1: `\u0001`, 2: `\u0002`, 3: `\u0003`, 4: `\u0004`, 5: `\u0005`, 6: `\u0006`,
'\a': `\u0007`,
'\b': `\u0008`,
'\t': `\t`,
'\n': `\n`,
'\v': `\u000b`, // "\v" == "v" on IE 6.
'\f': `\f`,
'\r': `\r`,
0xe: `\u000e`, 0xf: `\u000f`, 0x10: `\u0010`, 0x11: `\u0011`, 0x12: `\u0012`, 0x13: `\u0013`,
0x14: `\u0014`, 0x15: `\u0015`, 0x16: `\u0016`, 0x17: `\u0017`, 0x18: `\u0018`, 0x19: `\u0019`,
0x1a: `\u001a`, 0x1b: `\u001b`, 0x1c: `\u001c`, 0x1d: `\u001d`, 0x1e: `\u001e`, 0x1f: `\u001f`,
}
var jsStrReplacementTable = []string{
0: `\u0000`,
'\t': `\t`,
'\n': `\n`,
'\v': `\u000b`, // "\v" == "v" on IE 6.
'\f': `\f`,
'\r': `\r`,
// Encode HTML specials as hex so the output can be embedded
// in HTML attributes without further encoding.
'"': `\u0022`,
'`': `\u0060`,
'&': `\u0026`,
'\'': `\u0027`,
'+': `\u002b`,
'/': `\/`,
'<': `\u003c`,
'>': `\u003e`,
'\\': `\\`,
}
// jsBqStrReplacementTable is like jsStrReplacementTable except it also contains
// the special characters for JS template literals: $, {, and }.
var jsBqStrReplacementTable = []string{
0: `\u0000`,
'\t': `\t`,
'\n': `\n`,
'\v': `\u000b`, // "\v" == "v" on IE 6.
'\f': `\f`,
'\r': `\r`,
// Encode HTML specials as hex so the output can be embedded
// in HTML attributes without further encoding.
'"': `\u0022`,
'`': `\u0060`,
'&': `\u0026`,
'\'': `\u0027`,
'+': `\u002b`,
'/': `\/`,
'<': `\u003c`,
'>': `\u003e`,
'\\': `\\`,
'$': `\u0024`,
'{': `\u007b`,
'}': `\u007d`,
}
// jsStrNormReplacementTable is like jsStrReplacementTable but does not
// overencode existing escapes since this table has no entry for `\`.
var jsStrNormReplacementTable = []string{
0: `\u0000`,
'\t': `\t`,
'\n': `\n`,
'\v': `\u000b`, // "\v" == "v" on IE 6.
'\f': `\f`,
'\r': `\r`,
// Encode HTML specials as hex so the output can be embedded
// in HTML attributes without further encoding.
'"': `\u0022`,
'&': `\u0026`,
'\'': `\u0027`,
'`': `\u0060`,
'+': `\u002b`,
'/': `\/`,
'<': `\u003c`,
'>': `\u003e`,
}
var jsRegexpReplacementTable = []string{
0: `\u0000`,
'\t': `\t`,
'\n': `\n`,
'\v': `\u000b`, // "\v" == "v" on IE 6.
'\f': `\f`,
'\r': `\r`,
// Encode HTML specials as hex so the output can be embedded
// in HTML attributes without further encoding.
'"': `\u0022`,
'$': `\$`,
'&': `\u0026`,
'\'': `\u0027`,
'(': `\(`,
')': `\)`,
'*': `\*`,
'+': `\u002b`,
'-': `\-`,
'.': `\.`,
'/': `\/`,
'<': `\u003c`,
'>': `\u003e`,
'?': `\?`,
'[': `\[`,
'\\': `\\`,
']': `\]`,
'^': `\^`,
'{': `\{`,
'|': `\|`,
'}': `\}`,
}
// isJSIdentPart reports whether the given rune is a JS identifier part.
// It does not handle all the non-Latin letters, joiners, and combining marks,
// but it does handle every codepoint that can occur in a numeric literal or
// a keyword.
func isJSIdentPart(r rune) bool {
switch {
case r == '$':
return true
case '0' <= r && r <= '9':
return true
case 'A' <= r && r <= 'Z':
return true
case r == '_':
return true
case 'a' <= r && r <= 'z':
return true
}
return false
}
// isJSType reports whether the given MIME type should be considered JavaScript.
//
// It is used to determine whether a script tag with a type attribute is a javascript container.
func isJSType(mimeType string) bool {
// per
// https://www.w3.org/TR/html5/scripting-1.html#attr-script-type
// https://tools.ietf.org/html/rfc7231#section-3.1.1
// https://tools.ietf.org/html/rfc4329#section-3
// https://www.ietf.org/rfc/rfc4627.txt
// discard parameters
mimeType, _, _ = strings.Cut(mimeType, ";")
mimeType = strings.ToLower(mimeType)
mimeType = strings.TrimSpace(mimeType)
switch mimeType {
case
"application/ecmascript",
"application/javascript",
"application/json",
"application/ld+json",
"application/x-ecmascript",
"application/x-javascript",
"module",
"text/ecmascript",
"text/javascript",
"text/javascript1.0",
"text/javascript1.1",
"text/javascript1.2",
"text/javascript1.3",
"text/javascript1.4",
"text/javascript1.5",
"text/jscript",
"text/livescript",
"text/x-ecmascript",
"text/x-javascript":
return true
default:
return false
}
}
// Code generated by "stringer -type jsCtx"; DO NOT EDIT.
package template
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[jsCtxRegexp-0]
_ = x[jsCtxDivOp-1]
_ = x[jsCtxUnknown-2]
}
const _jsCtx_name = "jsCtxRegexpjsCtxDivOpjsCtxUnknown"
var _jsCtx_index = [...]uint8{0, 11, 21, 33}
func (i jsCtx) String() string {
if i >= jsCtx(len(_jsCtx_index)-1) {
return "jsCtx(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _jsCtx_name[_jsCtx_index[i]:_jsCtx_index[i+1]]
}
// Code generated by "stringer -type state"; DO NOT EDIT.
package template
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[stateText-0]
_ = x[stateTag-1]
_ = x[stateAttrName-2]
_ = x[stateAfterName-3]
_ = x[stateBeforeValue-4]
_ = x[stateHTMLCmt-5]
_ = x[stateRCDATA-6]
_ = x[stateAttr-7]
_ = x[stateURL-8]
_ = x[stateSrcset-9]
_ = x[stateJS-10]
_ = x[stateJSDqStr-11]
_ = x[stateJSSqStr-12]
_ = x[stateJSTmplLit-13]
_ = x[stateJSRegexp-14]
_ = x[stateJSBlockCmt-15]
_ = x[stateJSLineCmt-16]
_ = x[stateJSHTMLOpenCmt-17]
_ = x[stateJSHTMLCloseCmt-18]
_ = x[stateCSS-19]
_ = x[stateCSSDqStr-20]
_ = x[stateCSSSqStr-21]
_ = x[stateCSSDqURL-22]
_ = x[stateCSSSqURL-23]
_ = x[stateCSSURL-24]
_ = x[stateCSSBlockCmt-25]
_ = x[stateCSSLineCmt-26]
_ = x[stateError-27]
_ = x[stateMetaContent-28]
_ = x[stateMetaContentURL-29]
_ = x[stateDead-30]
}
const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSTmplLitstateJSRegexpstateJSBlockCmtstateJSLineCmtstateJSHTMLOpenCmtstateJSHTMLCloseCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateMetaContentstateMetaContentURLstateDead"
var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 156, 169, 184, 198, 216, 235, 243, 256, 269, 282, 295, 306, 322, 337, 347, 363, 382, 391}
func (i state) String() string {
if i >= state(len(_state_index)-1) {
return "state(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _state_name[_state_index[i]:_state_index[i+1]]
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"fmt"
"io"
"io/fs"
"os"
"path"
"path/filepath"
"sync"
"text/template"
"text/template/parse"
)
// Template is a specialized Template from "text/template" that produces a safe
// HTML document fragment.
type Template struct {
// Sticky error if escaping fails, or escapeOK if succeeded.
escapeErr error
// We could embed the text/template field, but it's safer not to because
// we need to keep our version of the name space and the underlying
// template's in sync.
text *template.Template
// The underlying template's parse tree, updated to be HTML-safe.
Tree *parse.Tree
*nameSpace // common to all associated templates
}
// escapeOK is a sentinel value used to indicate valid escaping.
var escapeOK = fmt.Errorf("template escaped correctly")
// nameSpace is the data structure shared by all templates in an association.
type nameSpace struct {
mu sync.Mutex
set map[string]*Template
escaped bool
esc escaper
}
// Templates returns a slice of the templates associated with t, including t
// itself.
func (t *Template) Templates() []*Template {
ns := t.nameSpace
ns.mu.Lock()
defer ns.mu.Unlock()
// Return a slice so we don't expose the map.
m := make([]*Template, 0, len(ns.set))
for _, v := range ns.set {
m = append(m, v)
}
return m
}
// Option sets options for the template. Options are described by
// strings, either a simple string or "key=value". There can be at
// most one equals sign in an option string. If the option string
// is unrecognized or otherwise invalid, Option panics.
//
// Known options:
//
// missingkey: Control the behavior during execution if a map is
// indexed with a key that is not present in the map.
//
// "missingkey=default" or "missingkey=invalid"
// The default behavior: Do nothing and continue execution.
// If printed, the result of the index operation is the string
// "<no value>".
// "missingkey=zero"
// The operation returns the zero value for the map type's element.
// "missingkey=error"
// Execution stops immediately with an error.
func (t *Template) Option(opt ...string) *Template {
t.text.Option(opt...)
return t
}
// checkCanParse checks whether it is OK to parse templates.
// If not, it returns an error.
func (t *Template) checkCanParse() error {
if t == nil {
return nil
}
t.nameSpace.mu.Lock()
defer t.nameSpace.mu.Unlock()
if t.nameSpace.escaped {
return fmt.Errorf("html/template: cannot Parse after Execute")
}
return nil
}
// escape escapes all associated templates.
func (t *Template) escape() error {
t.nameSpace.mu.Lock()
defer t.nameSpace.mu.Unlock()
t.nameSpace.escaped = true
if t.escapeErr == nil {
if t.Tree == nil {
return fmt.Errorf("template: %q is an incomplete or empty template", t.Name())
}
if err := escapeTemplate(t, t.text.Root, t.Name()); err != nil {
return err
}
} else if t.escapeErr != escapeOK {
return t.escapeErr
}
return nil
}
// Execute applies a parsed template to the specified data object,
// writing the output to wr.
// If an error occurs executing the template or writing its output,
// execution stops, but partial results may already have been written to
// the output writer.
// A template may be executed safely in parallel, although if parallel
// executions share a Writer the output may be interleaved.
func (t *Template) Execute(wr io.Writer, data any) error {
if err := t.escape(); err != nil {
return err
}
return t.text.Execute(wr, data)
}
// ExecuteTemplate applies the template associated with t that has the given
// name to the specified data object and writes the output to wr.
// If an error occurs executing the template or writing its output,
// execution stops, but partial results may already have been written to
// the output writer.
// A template may be executed safely in parallel, although if parallel
// executions share a Writer the output may be interleaved.
func (t *Template) ExecuteTemplate(wr io.Writer, name string, data any) error {
tmpl, err := t.lookupAndEscapeTemplate(name)
if err != nil {
return err
}
return tmpl.text.Execute(wr, data)
}
// lookupAndEscapeTemplate guarantees that the template with the given name
// is escaped, or returns an error if it cannot be. It returns the named
// template.
func (t *Template) lookupAndEscapeTemplate(name string) (tmpl *Template, err error) {
t.nameSpace.mu.Lock()
defer t.nameSpace.mu.Unlock()
t.nameSpace.escaped = true
tmpl = t.set[name]
if tmpl == nil {
return nil, fmt.Errorf("html/template: %q is undefined", name)
}
if tmpl.escapeErr != nil && tmpl.escapeErr != escapeOK {
return nil, tmpl.escapeErr
}
if tmpl.text.Tree == nil || tmpl.text.Root == nil {
return nil, fmt.Errorf("html/template: %q is an incomplete template", name)
}
if t.text.Lookup(name) == nil {
panic("html/template internal error: template escaping out of sync")
}
if tmpl.escapeErr == nil {
err = escapeTemplate(tmpl, tmpl.text.Root, name)
}
return tmpl, err
}
// DefinedTemplates returns a string listing the defined templates,
// prefixed by the string "; defined templates are: ". If there are none,
// it returns the empty string. Used to generate an error message.
func (t *Template) DefinedTemplates() string {
return t.text.DefinedTemplates()
}
// Parse parses text as a template body for t.
// Named template definitions ({{define ...}} or {{block ...}} statements) in text
// define additional templates associated with t and are removed from the
// definition of t itself.
//
// Templates can be redefined in successive calls to Parse,
// before the first use of [Template.Execute] on t or any associated template.
// A template definition with a body containing only white space and comments
// is considered empty and will not replace an existing template's body.
// This allows using Parse to add new named template definitions without
// overwriting the main template body.
func (t *Template) Parse(text string) (*Template, error) {
if err := t.checkCanParse(); err != nil {
return nil, err
}
ret, err := t.text.Parse(text)
if err != nil {
return nil, err
}
// In general, all the named templates might have changed underfoot.
// Regardless, some new ones may have been defined.
// The template.Template set has been updated; update ours.
t.nameSpace.mu.Lock()
defer t.nameSpace.mu.Unlock()
for _, v := range ret.Templates() {
name := v.Name()
tmpl := t.set[name]
if tmpl == nil {
tmpl = t.new(name)
}
tmpl.text = v
tmpl.Tree = v.Tree
}
return t, nil
}
// AddParseTree creates a new template with the name and parse tree
// and associates it with t.
//
// It returns an error if t or any associated template has already been executed.
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
if err := t.checkCanParse(); err != nil {
return nil, err
}
t.nameSpace.mu.Lock()
defer t.nameSpace.mu.Unlock()
text, err := t.text.AddParseTree(name, tree)
if err != nil {
return nil, err
}
ret := &Template{
nil,
text,
text.Tree,
t.nameSpace,
}
t.set[name] = ret
return ret, nil
}
// Clone returns a duplicate of the template, including all associated
// templates. The actual representation is not copied, but the name space of
// associated templates is, so further calls to [Template.Parse] in the copy will add
// templates to the copy but not to the original. [Template.Clone] can be used to prepare
// common templates and use them with variant definitions for other templates
// by adding the variants after the clone is made.
//
// It returns an error if t has already been executed.
func (t *Template) Clone() (*Template, error) {
t.nameSpace.mu.Lock()
defer t.nameSpace.mu.Unlock()
if t.escapeErr != nil {
return nil, fmt.Errorf("html/template: cannot Clone %q after it has executed", t.Name())
}
textClone, err := t.text.Clone()
if err != nil {
return nil, err
}
ns := &nameSpace{set: make(map[string]*Template)}
ns.esc = makeEscaper(ns)
ret := &Template{
nil,
textClone,
textClone.Tree,
ns,
}
ret.set[ret.Name()] = ret
for _, x := range textClone.Templates() {
name := x.Name()
src := t.set[name]
if src == nil || src.escapeErr != nil {
return nil, fmt.Errorf("html/template: cannot Clone %q after it has executed", t.Name())
}
x.Tree = x.Tree.Copy()
ret.set[name] = &Template{
nil,
x,
x.Tree,
ret.nameSpace,
}
}
// Return the template associated with the name of this template.
return ret.set[ret.Name()], nil
}
// New allocates a new HTML template with the given name.
func New(name string) *Template {
ns := &nameSpace{set: make(map[string]*Template)}
ns.esc = makeEscaper(ns)
tmpl := &Template{
nil,
template.New(name),
nil,
ns,
}
tmpl.set[name] = tmpl
return tmpl
}
// New allocates a new HTML template associated with the given one
// and with the same delimiters. The association, which is transitive,
// allows one template to invoke another with a {{template}} action.
//
// If a template with the given name already exists, the new HTML template
// will replace it. The existing template will be reset and disassociated with
// t.
func (t *Template) New(name string) *Template {
t.nameSpace.mu.Lock()
defer t.nameSpace.mu.Unlock()
return t.new(name)
}
// new is the implementation of New, without the lock.
func (t *Template) new(name string) *Template {
tmpl := &Template{
nil,
t.text.New(name),
nil,
t.nameSpace,
}
if existing, ok := tmpl.set[name]; ok {
emptyTmpl := New(existing.Name())
*existing = *emptyTmpl
}
tmpl.set[name] = tmpl
return tmpl
}
// Name returns the name of the template.
func (t *Template) Name() string {
return t.text.Name()
}
type FuncMap = template.FuncMap
// Funcs adds the elements of the argument map to the template's function map.
// It must be called before the template is parsed.
// It panics if a value in the map is not a function with appropriate return
// type. However, it is legal to overwrite elements of the map. The return
// value is the template, so calls can be chained.
func (t *Template) Funcs(funcMap FuncMap) *Template {
t.text.Funcs(template.FuncMap(funcMap))
return t
}
// Delims sets the action delimiters to the specified strings, to be used in
// subsequent calls to [Template.Parse], [ParseFiles], or [ParseGlob]. Nested template
// definitions will inherit the settings. An empty delimiter stands for the
// corresponding default: {{ or }}.
// The return value is the template, so calls can be chained.
func (t *Template) Delims(left, right string) *Template {
t.text.Delims(left, right)
return t
}
// Lookup returns the template with the given name that is associated with t,
// or nil if there is no such template.
func (t *Template) Lookup(name string) *Template {
t.nameSpace.mu.Lock()
defer t.nameSpace.mu.Unlock()
return t.set[name]
}
// Must is a helper that wraps a call to a function returning ([*Template], error)
// and panics if the error is non-nil. It is intended for use in variable initializations
// such as
//
// var t = template.Must(template.New("name").Parse("html"))
func Must(t *Template, err error) *Template {
if err != nil {
panic(err)
}
return t
}
// ParseFiles creates a new [Template] and parses the template definitions from
// the named files. The returned template's name will have the (base) name and
// (parsed) contents of the first file. There must be at least one file.
// If an error occurs, parsing stops and the returned [*Template] is nil.
//
// When parsing multiple files with the same name in different directories,
// the last one mentioned will be the one that results.
// For instance, ParseFiles("a/foo", "b/foo") stores "b/foo" as the template
// named "foo", while "a/foo" is unavailable.
func ParseFiles(filenames ...string) (*Template, error) {
return parseFiles(nil, readFileOS, filenames...)
}
// ParseFiles parses the named files and associates the resulting templates with
// t. If an error occurs, parsing stops and the returned template is nil;
// otherwise it is t. There must be at least one file.
//
// When parsing multiple files with the same name in different directories,
// the last one mentioned will be the one that results.
//
// ParseFiles returns an error if t or any associated template has already been executed.
func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
return parseFiles(t, readFileOS, filenames...)
}
// parseFiles is the helper for the method and function. If the argument
// template is nil, it is created from the first file.
func parseFiles(t *Template, readFile func(string) (string, []byte, error), filenames ...string) (*Template, error) {
if err := t.checkCanParse(); err != nil {
return nil, err
}
if len(filenames) == 0 {
// Not really a problem, but be consistent.
return nil, fmt.Errorf("html/template: no files named in call to ParseFiles")
}
for _, filename := range filenames {
name, b, err := readFile(filename)
if err != nil {
return nil, err
}
s := string(b)
// First template becomes return value if not already defined,
// and we use that one for subsequent New calls to associate
// all the templates together. Also, if this file has the same name
// as t, this file becomes the contents of t, so
// t, err := New(name).Funcs(xxx).ParseFiles(name)
// works. Otherwise we create a new template associated with t.
var tmpl *Template
if t == nil {
t = New(name)
}
if name == t.Name() {
tmpl = t
} else {
tmpl = t.New(name)
}
_, err = tmpl.Parse(s)
if err != nil {
return nil, err
}
}
return t, nil
}
// ParseGlob creates a new [Template] and parses the template definitions from
// the files identified by the pattern. The files are matched according to the
// semantics of filepath.Match, and the pattern must match at least one file.
// The returned template will have the (base) name and (parsed) contents of the
// first file matched by the pattern. ParseGlob is equivalent to calling
// [ParseFiles] with the list of files matched by the pattern.
//
// When parsing multiple files with the same name in different directories,
// the last one mentioned will be the one that results.
func ParseGlob(pattern string) (*Template, error) {
return parseGlob(nil, pattern)
}
// ParseGlob parses the template definitions in the files identified by the
// pattern and associates the resulting templates with t. The files are matched
// according to the semantics of filepath.Match, and the pattern must match at
// least one file. ParseGlob is equivalent to calling t.ParseFiles with the
// list of files matched by the pattern.
//
// When parsing multiple files with the same name in different directories,
// the last one mentioned will be the one that results.
//
// ParseGlob returns an error if t or any associated template has already been executed.
func (t *Template) ParseGlob(pattern string) (*Template, error) {
return parseGlob(t, pattern)
}
// parseGlob is the implementation of the function and method ParseGlob.
func parseGlob(t *Template, pattern string) (*Template, error) {
if err := t.checkCanParse(); err != nil {
return nil, err
}
filenames, err := filepath.Glob(pattern)
if err != nil {
return nil, err
}
if len(filenames) == 0 {
return nil, fmt.Errorf("html/template: pattern matches no files: %#q", pattern)
}
return parseFiles(t, readFileOS, filenames...)
}
// IsTrue reports whether the value is 'true', in the sense of not the zero of its type,
// and whether the value has a meaningful truth value. This is the definition of
// truth used by if and other such actions.
func IsTrue(val any) (truth, ok bool) {
return template.IsTrue(val)
}
// ParseFS is like [ParseFiles] or [ParseGlob] but reads from the file system fs
// instead of the host operating system's file system.
// It accepts a list of glob patterns.
// (Note that most file names serve as glob patterns matching only themselves.)
func ParseFS(fs fs.FS, patterns ...string) (*Template, error) {
return parseFS(nil, fs, patterns)
}
// ParseFS is like [Template.ParseFiles] or [Template.ParseGlob] but reads from the file system fs
// instead of the host operating system's file system.
// It accepts a list of glob patterns.
// (Note that most file names serve as glob patterns matching only themselves.)
func (t *Template) ParseFS(fs fs.FS, patterns ...string) (*Template, error) {
return parseFS(t, fs, patterns)
}
func parseFS(t *Template, fsys fs.FS, patterns []string) (*Template, error) {
var filenames []string
for _, pattern := range patterns {
list, err := fs.Glob(fsys, pattern)
if err != nil {
return nil, err
}
if len(list) == 0 {
return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
}
filenames = append(filenames, list...)
}
return parseFiles(t, readFileFS(fsys), filenames...)
}
func readFileOS(file string) (name string, b []byte, err error) {
name = filepath.Base(file)
b, err = os.ReadFile(file)
return
}
func readFileFS(fsys fs.FS) func(string) (string, []byte, error) {
return func(file string) (name string, b []byte, err error) {
name = path.Base(file)
b, err = fs.ReadFile(fsys, file)
return
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"bytes"
"strings"
)
// transitionFunc is the array of context transition functions for text nodes.
// A transition function takes a context and template text input, and returns
// the updated context and the number of bytes consumed from the front of the
// input.
var transitionFunc = [...]func(context, []byte) (context, int){
stateText: tText,
stateTag: tTag,
stateAttrName: tAttrName,
stateAfterName: tAfterName,
stateBeforeValue: tBeforeValue,
stateHTMLCmt: tHTMLCmt,
stateRCDATA: tSpecialTagEnd,
stateAttr: tAttr,
stateURL: tURL,
stateMetaContent: tMetaContent,
stateMetaContentURL: tMetaContentURL,
stateSrcset: tURL,
stateJS: tJS,
stateJSDqStr: tJSDelimited,
stateJSSqStr: tJSDelimited,
stateJSRegexp: tJSDelimited,
stateJSTmplLit: tJSTmpl,
stateJSBlockCmt: tBlockCmt,
stateJSLineCmt: tLineCmt,
stateJSHTMLOpenCmt: tLineCmt,
stateJSHTMLCloseCmt: tLineCmt,
stateCSS: tCSS,
stateCSSDqStr: tCSSStr,
stateCSSSqStr: tCSSStr,
stateCSSDqURL: tCSSStr,
stateCSSSqURL: tCSSStr,
stateCSSURL: tCSSStr,
stateCSSBlockCmt: tBlockCmt,
stateCSSLineCmt: tLineCmt,
stateError: tError,
}
var commentStart = []byte("<!--")
var commentEnd = []byte("-->")
// tText is the context transition function for the text state.
func tText(c context, s []byte) (context, int) {
k := 0
for {
i := k + bytes.IndexByte(s[k:], '<')
if i < k || i+1 == len(s) {
return c, len(s)
} else if i+4 <= len(s) && bytes.Equal(commentStart, s[i:i+4]) {
return context{state: stateHTMLCmt}, i + 4
}
i++
end := false
if s[i] == '/' {
if i+1 == len(s) {
return c, len(s)
}
end, i = true, i+1
}
j, e := eatTagName(s, i)
if j != i {
if end {
e = elementNone
}
// We've found an HTML tag.
return context{state: stateTag, element: e}, j
}
k = j
}
}
var elementContentType = [...]state{
elementNone: stateText,
elementScript: stateJS,
elementStyle: stateCSS,
elementTextarea: stateRCDATA,
elementTitle: stateRCDATA,
elementMeta: stateText,
}
// tTag is the context transition function for the tag state.
func tTag(c context, s []byte) (context, int) {
// Find the attribute name.
i := eatWhiteSpace(s, 0)
if i == len(s) {
return c, len(s)
}
if s[i] == '>' {
// Treat <meta> specially, because it doesn't have an end tag, and we
// want to transition into the correct state/element for it.
if c.element == elementMeta {
return context{state: stateText, element: elementNone}, i + 1
}
return context{
state: elementContentType[c.element],
element: c.element,
}, i + 1
}
j, err := eatAttrName(s, i)
if err != nil {
return context{state: stateError, err: err}, len(s)
}
state, attr := stateTag, attrNone
if i == j {
return context{
state: stateError,
err: errorf(ErrBadHTML, nil, 0, "expected space, attr name, or end of tag, but got %q", s[i:]),
}, len(s)
}
attrName := strings.ToLower(string(s[i:j]))
if c.element == elementScript && attrName == "type" {
attr = attrScriptType
} else if c.element == elementMeta && attrName == "content" {
attr = attrMetaContent
} else {
switch attrType(attrName) {
case contentTypeURL:
attr = attrURL
case contentTypeCSS:
attr = attrStyle
case contentTypeJS:
attr = attrScript
case contentTypeSrcset:
attr = attrSrcset
}
}
if j == len(s) {
state = stateAttrName
} else {
state = stateAfterName
}
return context{state: state, element: c.element, attr: attr}, j
}
// tAttrName is the context transition function for stateAttrName.
func tAttrName(c context, s []byte) (context, int) {
i, err := eatAttrName(s, 0)
if err != nil {
return context{state: stateError, err: err}, len(s)
} else if i != len(s) {
c.state = stateAfterName
}
return c, i
}
// tAfterName is the context transition function for stateAfterName.
func tAfterName(c context, s []byte) (context, int) {
// Look for the start of the value.
i := eatWhiteSpace(s, 0)
if i == len(s) {
return c, len(s)
} else if s[i] != '=' {
// Occurs due to tag ending '>', and valueless attribute.
c.state = stateTag
return c, i
}
c.state = stateBeforeValue
// Consume the "=".
return c, i + 1
}
var attrStartStates = [...]state{
attrNone: stateAttr,
attrScript: stateJS,
attrScriptType: stateAttr,
attrStyle: stateCSS,
attrURL: stateURL,
attrSrcset: stateSrcset,
attrMetaContent: stateMetaContent,
}
// tBeforeValue is the context transition function for stateBeforeValue.
func tBeforeValue(c context, s []byte) (context, int) {
i := eatWhiteSpace(s, 0)
if i == len(s) {
return c, len(s)
}
// Find the attribute delimiter.
delim := delimSpaceOrTagEnd
switch s[i] {
case '\'':
delim, i = delimSingleQuote, i+1
case '"':
delim, i = delimDoubleQuote, i+1
}
c.state, c.delim = attrStartStates[c.attr], delim
return c, i
}
// tHTMLCmt is the context transition function for stateHTMLCmt.
func tHTMLCmt(c context, s []byte) (context, int) {
if i := bytes.Index(s, commentEnd); i != -1 {
return context{}, i + 3
}
return c, len(s)
}
// specialTagEndMarkers maps element types to the character sequence that
// case-insensitively signals the end of the special tag body.
var specialTagEndMarkers = [...][]byte{
elementScript: []byte("script"),
elementStyle: []byte("style"),
elementTextarea: []byte("textarea"),
elementTitle: []byte("title"),
elementMeta: []byte(""),
}
var (
specialTagEndPrefix = []byte("</")
tagEndSeparators = []byte("> \t\n\f/")
)
// tSpecialTagEnd is the context transition function for raw text and RCDATA
// element states.
func tSpecialTagEnd(c context, s []byte) (context, int) {
if c.element != elementNone {
// script end tags ("</script") within script literals are ignored, so that
// we can properly escape them.
if c.element == elementScript && (isInScriptLiteral(c.state) || isComment(c.state)) {
return c, len(s)
}
if i := indexTagEnd(s, specialTagEndMarkers[c.element]); i != -1 {
return context{}, i
}
}
return c, len(s)
}
// indexTagEnd finds the index of a special tag end in a case insensitive way, or returns -1
func indexTagEnd(s []byte, tag []byte) int {
res := 0
plen := len(specialTagEndPrefix)
for len(s) > 0 {
// Try to find the tag end prefix first
i := bytes.Index(s, specialTagEndPrefix)
if i == -1 {
return i
}
s = s[i+plen:]
// Try to match the actual tag if there is still space for it
if len(tag) <= len(s) && bytes.EqualFold(tag, s[:len(tag)]) {
s = s[len(tag):]
// Check the tag is followed by a proper separator
if len(s) > 0 && bytes.IndexByte(tagEndSeparators, s[0]) != -1 {
return res + i
}
res += len(tag)
}
res += i + plen
}
return -1
}
// tAttr is the context transition function for the attribute state.
func tAttr(c context, s []byte) (context, int) {
return c, len(s)
}
// tURL is the context transition function for the URL state.
func tURL(c context, s []byte) (context, int) {
if bytes.ContainsAny(s, "#?") {
c.urlPart = urlPartQueryOrFrag
} else if len(s) != eatWhiteSpace(s, 0) && c.urlPart == urlPartNone {
// HTML5 uses "Valid URL potentially surrounded by spaces" for
// attrs: https://www.w3.org/TR/html5/index.html#attributes-1
c.urlPart = urlPartPreQuery
}
return c, len(s)
}
// tJS is the context transition function for the JS state.
func tJS(c context, s []byte) (context, int) {
i := bytes.IndexAny(s, "\"`'/{}<-#")
if i == -1 {
// Entire input is non string, comment, regexp tokens.
c.jsCtx = nextJSCtx(s, c.jsCtx)
return c, len(s)
}
c.jsCtx = nextJSCtx(s[:i], c.jsCtx)
switch s[i] {
case '"':
c.state, c.jsCtx = stateJSDqStr, jsCtxRegexp
case '\'':
c.state, c.jsCtx = stateJSSqStr, jsCtxRegexp
case '`':
c.state, c.jsCtx = stateJSTmplLit, jsCtxRegexp
case '/':
switch {
case i+1 < len(s) && s[i+1] == '/':
c.state, i = stateJSLineCmt, i+1
case i+1 < len(s) && s[i+1] == '*':
c.state, i = stateJSBlockCmt, i+1
case c.jsCtx == jsCtxRegexp:
c.state = stateJSRegexp
case c.jsCtx == jsCtxDivOp:
c.jsCtx = jsCtxRegexp
default:
return context{
state: stateError,
err: errorf(ErrSlashAmbig, nil, 0, "'/' could start a division or regexp: %.32q", s[i:]),
}, len(s)
}
// ECMAScript supports HTML style comments for legacy reasons, see Appendix
// B.1.1 "HTML-like Comments". The handling of these comments is somewhat
// confusing. Multi-line comments are not supported, i.e. anything on lines
// between the opening and closing tokens is not considered a comment, but
// anything following the opening or closing token, on the same line, is
// ignored. As such we simply treat any line prefixed with "<!--" or "-->"
// as if it were actually prefixed with "//" and move on.
case '<':
if i+3 < len(s) && bytes.Equal(commentStart, s[i:i+4]) {
c.state, i = stateJSHTMLOpenCmt, i+3
}
case '-':
if i+2 < len(s) && bytes.Equal(commentEnd, s[i:i+3]) {
c.state, i = stateJSHTMLCloseCmt, i+2
}
// ECMAScript also supports "hashbang" comment lines, see Section 12.5.
case '#':
if i+1 < len(s) && s[i+1] == '!' {
c.state, i = stateJSLineCmt, i+1
}
case '{':
// We only care about tracking brace depth if we are inside of a
// template literal.
if len(c.jsBraceDepth) == 0 {
return c, i + 1
}
c.jsBraceDepth[len(c.jsBraceDepth)-1]++
case '}':
if len(c.jsBraceDepth) == 0 {
return c, i + 1
}
// There are no cases where a brace can be escaped in the JS context
// that are not syntax errors, it seems. Because of this we can just
// count "\}" as "}" and move on, the script is already broken as
// fully fledged parsers will just fail anyway.
c.jsBraceDepth[len(c.jsBraceDepth)-1]--
if c.jsBraceDepth[len(c.jsBraceDepth)-1] >= 0 {
return c, i + 1
}
c.jsBraceDepth = c.jsBraceDepth[:len(c.jsBraceDepth)-1]
c.state = stateJSTmplLit
default:
panic("unreachable")
}
return c, i + 1
}
func tJSTmpl(c context, s []byte) (context, int) {
var k int
for {
i := k + bytes.IndexAny(s[k:], "`\\$")
if i < k {
break
}
switch s[i] {
case '\\':
i++
if i == len(s) {
return context{
state: stateError,
err: errorf(ErrPartialEscape, nil, 0, "unfinished escape sequence in JS string: %q", s),
}, len(s)
}
case '$':
if len(s) >= i+2 && s[i+1] == '{' {
c.jsBraceDepth = append(c.jsBraceDepth, 0)
c.state = stateJS
return c, i + 2
}
case '`':
// end
c.state = stateJS
return c, i + 1
}
k = i + 1
}
return c, len(s)
}
// tJSDelimited is the context transition function for the JS string and regexp
// states.
func tJSDelimited(c context, s []byte) (context, int) {
specials := `\"`
switch c.state {
case stateJSSqStr:
specials = `\'`
case stateJSRegexp:
specials = `\/[]`
}
k, inCharset := 0, false
for {
i := k + bytes.IndexAny(s[k:], specials)
if i < k {
break
}
switch s[i] {
case '\\':
i++
if i == len(s) {
return context{
state: stateError,
err: errorf(ErrPartialEscape, nil, 0, "unfinished escape sequence in JS string: %q", s),
}, len(s)
}
case '[':
inCharset = true
case ']':
inCharset = false
case '/':
// If "</script" appears in a regex literal, the '/' should not
// close the regex literal, and it will later be escaped to
// "\x3C/script" in escapeText.
if i > 0 && i+7 <= len(s) && bytes.Equal(bytes.ToLower(s[i-1:i+7]), []byte("</script")) {
i++
} else if !inCharset {
c.state, c.jsCtx = stateJS, jsCtxDivOp
return c, i + 1
}
default:
// end delimiter
if !inCharset {
c.state, c.jsCtx = stateJS, jsCtxDivOp
return c, i + 1
}
}
k = i + 1
}
if inCharset {
// This can be fixed by making context richer if interpolation
// into charsets is desired.
return context{
state: stateError,
err: errorf(ErrPartialCharset, nil, 0, "unfinished JS regexp charset: %q", s),
}, len(s)
}
return c, len(s)
}
var blockCommentEnd = []byte("*/")
// tBlockCmt is the context transition function for /*comment*/ states.
func tBlockCmt(c context, s []byte) (context, int) {
i := bytes.Index(s, blockCommentEnd)
if i == -1 {
return c, len(s)
}
switch c.state {
case stateJSBlockCmt:
c.state = stateJS
case stateCSSBlockCmt:
c.state = stateCSS
default:
panic(c.state.String())
}
return c, i + 2
}
// tLineCmt is the context transition function for //comment states, and the JS HTML-like comment state.
func tLineCmt(c context, s []byte) (context, int) {
var lineTerminators string
var endState state
switch c.state {
case stateJSLineCmt, stateJSHTMLOpenCmt, stateJSHTMLCloseCmt:
lineTerminators, endState = "\n\r\u2028\u2029", stateJS
case stateCSSLineCmt:
lineTerminators, endState = "\n\f\r", stateCSS
// Line comments are not part of any published CSS standard but
// are supported by the 4 major browsers.
// This defines line comments as
// LINECOMMENT ::= "//" [^\n\f\d]*
// since https://www.w3.org/TR/css3-syntax/#SUBTOK-nl defines
// newlines:
// nl ::= #xA | #xD #xA | #xD | #xC
default:
panic(c.state.String())
}
i := bytes.IndexAny(s, lineTerminators)
if i == -1 {
return c, len(s)
}
c.state = endState
// Per section 7.4 of EcmaScript 5 : https://es5.github.io/#x7.4
// "However, the LineTerminator at the end of the line is not
// considered to be part of the single-line comment; it is
// recognized separately by the lexical grammar and becomes part
// of the stream of input elements for the syntactic grammar."
return c, i
}
// tCSS is the context transition function for the CSS state.
func tCSS(c context, s []byte) (context, int) {
// CSS quoted strings are almost never used except for:
// (1) URLs as in background: "/foo.png"
// (2) Multiword font-names as in font-family: "Times New Roman"
// (3) List separators in content values as in inline-lists:
// <style>
// ul.inlineList { list-style: none; padding:0 }
// ul.inlineList > li { display: inline }
// ul.inlineList > li:before { content: ", " }
// ul.inlineList > li:first-child:before { content: "" }
// </style>
// <ul class=inlineList><li>One<li>Two<li>Three</ul>
// (4) Attribute value selectors as in a[href="http://example.com/"]
//
// We conservatively treat all strings as URLs, but make some
// allowances to avoid confusion.
//
// In (1), our conservative assumption is justified.
// In (2), valid font names do not contain ':', '?', or '#', so our
// conservative assumption is fine since we will never transition past
// urlPartPreQuery.
// In (3), our protocol heuristic should not be tripped, and there
// should not be non-space content after a '?' or '#', so as long as
// we only %-encode RFC 3986 reserved characters we are ok.
// In (4), we should URL escape for URL attributes, and for others we
// have the attribute name available if our conservative assumption
// proves problematic for real code.
k := 0
for {
i := k + bytes.IndexAny(s[k:], `("'/`)
if i < k {
return c, len(s)
}
switch s[i] {
case '(':
// Look for url to the left.
p := bytes.TrimRight(s[:i], "\t\n\f\r ")
if endsWithCSSKeyword(p, "url") {
j := len(s) - len(bytes.TrimLeft(s[i+1:], "\t\n\f\r "))
switch {
case j != len(s) && s[j] == '"':
c.state, j = stateCSSDqURL, j+1
case j != len(s) && s[j] == '\'':
c.state, j = stateCSSSqURL, j+1
default:
c.state = stateCSSURL
}
return c, j
}
case '/':
if i+1 < len(s) {
switch s[i+1] {
case '/':
c.state = stateCSSLineCmt
return c, i + 2
case '*':
c.state = stateCSSBlockCmt
return c, i + 2
}
}
case '"':
c.state = stateCSSDqStr
return c, i + 1
case '\'':
c.state = stateCSSSqStr
return c, i + 1
}
k = i + 1
}
}
// tCSSStr is the context transition function for the CSS string and URL states.
func tCSSStr(c context, s []byte) (context, int) {
var endAndEsc string
switch c.state {
case stateCSSDqStr, stateCSSDqURL:
endAndEsc = `\"`
case stateCSSSqStr, stateCSSSqURL:
endAndEsc = `\'`
case stateCSSURL:
// Unquoted URLs end with a newline or close parenthesis.
// The below includes the wc (whitespace character) and nl.
endAndEsc = "\\\t\n\f\r )"
default:
panic(c.state.String())
}
k := 0
for {
i := k + bytes.IndexAny(s[k:], endAndEsc)
if i < k {
c, nread := tURL(c, decodeCSS(s[k:]))
return c, k + nread
}
if s[i] == '\\' {
i++
if i == len(s) {
return context{
state: stateError,
err: errorf(ErrPartialEscape, nil, 0, "unfinished escape sequence in CSS string: %q", s),
}, len(s)
}
} else {
c.state = stateCSS
return c, i + 1
}
c, _ = tURL(c, decodeCSS(s[:i+1]))
k = i + 1
}
}
// tError is the context transition function for the error state.
func tError(c context, s []byte) (context, int) {
return c, len(s)
}
// tMetaContent is the context transition function for the meta content attribute state.
func tMetaContent(c context, s []byte) (context, int) {
for i := 0; i < len(s); i++ {
if i+3 <= len(s)-1 && bytes.Equal(bytes.ToLower(s[i:i+4]), []byte("url=")) {
c.state = stateMetaContentURL
return c, i + 4
}
}
return c, len(s)
}
// tMetaContentURL is the context transition function for the "url=" part of a meta content attribute state.
func tMetaContentURL(c context, s []byte) (context, int) {
for i := 0; i < len(s); i++ {
if s[i] == ';' {
c.state = stateMetaContent
return c, i + 1
}
}
return c, len(s)
}
// eatAttrName returns the largest j such that s[i:j] is an attribute name.
// It returns an error if s[i:] does not look like it begins with an
// attribute name, such as encountering a quote mark without a preceding
// equals sign.
func eatAttrName(s []byte, i int) (int, *Error) {
for j := i; j < len(s); j++ {
switch s[j] {
case ' ', '\t', '\n', '\f', '\r', '=', '>':
return j, nil
case '\'', '"', '<':
// These result in a parse warning in HTML5 and are
// indicative of serious problems if seen in an attr
// name in a template.
return -1, errorf(ErrBadHTML, nil, 0, "%q in attribute name: %.32q", s[j:j+1], s)
default:
// No-op.
}
}
return len(s), nil
}
var elementNameMap = map[string]element{
"script": elementScript,
"style": elementStyle,
"textarea": elementTextarea,
"title": elementTitle,
"meta": elementMeta,
}
// asciiAlpha reports whether c is an ASCII letter.
func asciiAlpha(c byte) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
}
// asciiAlphaNum reports whether c is an ASCII letter or digit.
func asciiAlphaNum(c byte) bool {
return asciiAlpha(c) || '0' <= c && c <= '9'
}
// eatTagName returns the largest j such that s[i:j] is a tag name and the tag type.
func eatTagName(s []byte, i int) (int, element) {
if i == len(s) || !asciiAlpha(s[i]) {
return i, elementNone
}
j := i + 1
for j < len(s) {
x := s[j]
if asciiAlphaNum(x) {
j++
continue
}
// Allow "x-y" or "x:y" but not "x-", "-y", or "x--y".
if (x == ':' || x == '-') && j+1 < len(s) && asciiAlphaNum(s[j+1]) {
j += 2
continue
}
break
}
return j, elementNameMap[strings.ToLower(string(s[i:j]))]
}
// eatWhiteSpace returns the largest j such that s[i:j] is white space.
func eatWhiteSpace(s []byte, i int) int {
for j := i; j < len(s); j++ {
switch s[j] {
case ' ', '\t', '\n', '\f', '\r':
// No-op.
default:
return j
}
}
return len(s)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"fmt"
"strings"
)
// urlFilter returns its input unless it contains an unsafe scheme in which
// case it defangs the entire URL.
//
// Schemes that cause unintended side effects that are irreversible without user
// interaction are considered unsafe. For example, clicking on a "javascript:"
// link can immediately trigger JavaScript code execution.
//
// This filter conservatively assumes that all schemes other than the following
// are unsafe:
// - http: Navigates to a new website, and may open a new window or tab.
// These side effects can be reversed by navigating back to the
// previous website, or closing the window or tab. No irreversible
// changes will take place without further user interaction with
// the new website.
// - https: Same as http.
// - mailto: Opens an email program and starts a new draft. This side effect
// is not irreversible until the user explicitly clicks send; it
// can be undone by closing the email program.
//
// To allow URLs containing other schemes to bypass this filter, developers must
// explicitly indicate that such a URL is expected and safe by encapsulating it
// in a template.URL value.
func urlFilter(args ...any) string {
s, t := stringify(args...)
if t == contentTypeURL {
return s
}
if !isSafeURL(s) {
return "#" + filterFailsafe
}
return s
}
// isSafeURL is true if s is a relative URL or if URL has a protocol in
// (http, https, mailto).
func isSafeURL(s string) bool {
if protocol, _, ok := strings.Cut(s, ":"); ok && !strings.Contains(protocol, "/") {
if !strings.EqualFold(protocol, "http") && !strings.EqualFold(protocol, "https") && !strings.EqualFold(protocol, "mailto") {
return false
}
}
return true
}
// urlEscaper produces an output that can be embedded in a URL query.
// The output can be embedded in an HTML attribute without further escaping.
func urlEscaper(args ...any) string {
return urlProcessor(false, args...)
}
// urlNormalizer normalizes URL content so it can be embedded in a quote-delimited
// string or parenthesis delimited url(...).
// The normalizer does not encode all HTML specials. Specifically, it does not
// encode '&' so correct embedding in an HTML attribute requires escaping of
// '&' to '&'.
func urlNormalizer(args ...any) string {
return urlProcessor(true, args...)
}
// urlProcessor normalizes (when norm is true) or escapes its input to produce
// a valid hierarchical or opaque URL part.
func urlProcessor(norm bool, args ...any) string {
s, t := stringify(args...)
if t == contentTypeURL {
norm = true
}
var b strings.Builder
if processURLOnto(s, norm, &b) {
return b.String()
}
return s
}
// processURLOnto appends a normalized URL corresponding to its input to b
// and reports whether the appended content differs from s.
func processURLOnto(s string, norm bool, b *strings.Builder) bool {
b.Grow(len(s) + 16)
written := 0
// The byte loop below assumes that all URLs use UTF-8 as the
// content-encoding. This is similar to the URI to IRI encoding scheme
// defined in section 3.1 of RFC 3987, and behaves the same as the
// EcmaScript builtin encodeURIComponent.
// It should not cause any misencoding of URLs in pages with
// Content-type: text/html;charset=UTF-8.
for i, n := 0, len(s); i < n; i++ {
c := s[i]
switch c {
// Single quote and parens are sub-delims in RFC 3986, but we
// escape them so the output can be embedded in single
// quoted attributes and unquoted CSS url(...) constructs.
// Single quotes are reserved in URLs, but are only used in
// the obsolete "mark" rule in an appendix in RFC 3986
// so can be safely encoded.
case '!', '#', '$', '&', '*', '+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
if norm {
continue
}
// Unreserved according to RFC 3986 sec 2.3
// "For consistency, percent-encoded octets in the ranges of
// ALPHA (%41-%5A and %61-%7A), DIGIT (%30-%39), hyphen (%2D),
// period (%2E), underscore (%5F), or tilde (%7E) should not be
// created by URI producers
case '-', '.', '_', '~':
continue
case '%':
// When normalizing do not re-encode valid escapes.
if norm && i+2 < len(s) && isHex(s[i+1]) && isHex(s[i+2]) {
continue
}
default:
// Unreserved according to RFC 3986 sec 2.3
if 'a' <= c && c <= 'z' {
continue
}
if 'A' <= c && c <= 'Z' {
continue
}
if '0' <= c && c <= '9' {
continue
}
}
b.WriteString(s[written:i])
fmt.Fprintf(b, "%%%02x", c)
written = i + 1
}
b.WriteString(s[written:])
return written != 0
}
// Filters and normalizes srcset values which are comma separated
// URLs followed by metadata.
func srcsetFilterAndEscaper(args ...any) string {
s, t := stringify(args...)
switch t {
case contentTypeSrcset:
return s
case contentTypeURL:
// Normalizing gets rid of all HTML whitespace
// which separate the image URL from its metadata.
var b strings.Builder
if processURLOnto(s, true, &b) {
s = b.String()
}
// Additionally, commas separate one source from another.
return strings.ReplaceAll(s, ",", "%2c")
}
var b strings.Builder
written := 0
for i := 0; i < len(s); i++ {
if s[i] == ',' {
filterSrcsetElement(s, written, i, &b)
b.WriteString(",")
written = i + 1
}
}
filterSrcsetElement(s, written, len(s), &b)
return b.String()
}
// Derived from https://play.golang.org/p/Dhmj7FORT5
const htmlSpaceAndASCIIAlnumBytes = "\x00\x36\x00\x00\x01\x00\xff\x03\xfe\xff\xff\x07\xfe\xff\xff\x07"
// isHTMLSpace is true iff c is a whitespace character per
// https://infra.spec.whatwg.org/#ascii-whitespace
func isHTMLSpace(c byte) bool {
return (c <= 0x20) && 0 != (htmlSpaceAndASCIIAlnumBytes[c>>3]&(1<<uint(c&0x7)))
}
func isHTMLSpaceOrASCIIAlnum(c byte) bool {
return (c < 0x80) && 0 != (htmlSpaceAndASCIIAlnumBytes[c>>3]&(1<<uint(c&0x7)))
}
func filterSrcsetElement(s string, left int, right int, b *strings.Builder) {
start := left
for start < right && isHTMLSpace(s[start]) {
start++
}
end := right
for i := start; i < right; i++ {
if isHTMLSpace(s[i]) {
end = i
break
}
}
if url := s[start:end]; isSafeURL(url) {
// If image metadata is only spaces or alnums then
// we don't need to URL normalize it.
metadataOk := true
for i := end; i < right; i++ {
if !isHTMLSpaceOrASCIIAlnum(s[i]) {
metadataOk = false
break
}
}
if metadataOk {
b.WriteString(s[left:start])
processURLOnto(url, true, b)
b.WriteString(s[end:right])
return
}
}
b.WriteString("#")
b.WriteString(filterFailsafe)
}
// Code generated by "stringer -type urlPart"; DO NOT EDIT.
package template
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[urlPartNone-0]
_ = x[urlPartPreQuery-1]
_ = x[urlPartQueryOrFrag-2]
_ = x[urlPartUnknown-3]
}
const _urlPart_name = "urlPartNoneurlPartPreQueryurlPartQueryOrFragurlPartUnknown"
var _urlPart_index = [...]uint8{0, 11, 26, 44, 58}
func (i urlPart) String() string {
if i >= urlPart(len(_urlPart_index)-1) {
return "urlPart(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _urlPart_name[_urlPart_index[i]:_urlPart_index[i+1]]
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package image
import (
"bufio"
"errors"
"io"
"sync"
"sync/atomic"
)
// ErrFormat indicates that decoding encountered an unknown format.
var ErrFormat = errors.New("image: unknown format")
// A format holds an image format's name, magic header and how to decode it.
type format struct {
name, magic string
decode func(io.Reader) (Image, error)
decodeConfig func(io.Reader) (Config, error)
}
// Formats is the list of registered formats.
var (
formatsMu sync.Mutex
atomicFormats atomic.Value
)
// RegisterFormat registers an image format for use by [Decode].
// Name is the name of the format, like "jpeg" or "png".
// Magic is the magic prefix that identifies the format's encoding. The magic
// string can contain "?" wildcards that each match any one byte.
// [Decode] is the function that decodes the encoded image.
// [DecodeConfig] is the function that decodes just its configuration.
func RegisterFormat(name, magic string, decode func(io.Reader) (Image, error), decodeConfig func(io.Reader) (Config, error)) {
formatsMu.Lock()
formats, _ := atomicFormats.Load().([]format)
atomicFormats.Store(append(formats, format{name, magic, decode, decodeConfig}))
formatsMu.Unlock()
}
// A reader is an io.Reader that can also peek ahead.
type reader interface {
io.Reader
Peek(int) ([]byte, error)
}
// asReader converts an io.Reader to a reader.
func asReader(r io.Reader) reader {
if rr, ok := r.(reader); ok {
return rr
}
return bufio.NewReader(r)
}
// match reports whether magic matches b. Magic may contain "?" wildcards.
func match(magic string, b []byte) bool {
if len(magic) != len(b) {
return false
}
for i, c := range b {
if magic[i] != c && magic[i] != '?' {
return false
}
}
return true
}
// sniff determines the format of r's data.
func sniff(r reader) format {
formats, _ := atomicFormats.Load().([]format)
for _, f := range formats {
b, err := r.Peek(len(f.magic))
if err == nil && match(f.magic, b) {
return f
}
}
return format{}
}
// Decode decodes an image that has been encoded in a registered format.
// The string returned is the format name used during format registration.
// Format registration is typically done by an init function in the codec-
// specific package.
func Decode(r io.Reader) (Image, string, error) {
rr := asReader(r)
f := sniff(rr)
if f.decode == nil {
return nil, "", ErrFormat
}
m, err := f.decode(rr)
return m, f.name, err
}
// DecodeConfig decodes the color model and dimensions of an image that has
// been encoded in a registered format. The string returned is the format name
// used during format registration. Format registration is typically done by
// an init function in the codec-specific package.
func DecodeConfig(r io.Reader) (Config, string, error) {
rr := asReader(r)
f := sniff(rr)
if f.decodeConfig == nil {
return Config{}, "", ErrFormat
}
c, err := f.decodeConfig(rr)
return c, f.name, err
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package image
import (
"image/color"
"math/bits"
"strconv"
)
// A Point is an X, Y coordinate pair. The axes increase right and down.
type Point struct {
X, Y int
}
// String returns a string representation of p like "(3,4)".
func (p Point) String() string {
return "(" + strconv.Itoa(p.X) + "," + strconv.Itoa(p.Y) + ")"
}
// Add returns the vector p+q.
func (p Point) Add(q Point) Point {
return Point{p.X + q.X, p.Y + q.Y}
}
// Sub returns the vector p-q.
func (p Point) Sub(q Point) Point {
return Point{p.X - q.X, p.Y - q.Y}
}
// Mul returns the vector p*k.
func (p Point) Mul(k int) Point {
return Point{p.X * k, p.Y * k}
}
// Div returns the vector p/k.
func (p Point) Div(k int) Point {
return Point{p.X / k, p.Y / k}
}
// In reports whether p is in r.
func (p Point) In(r Rectangle) bool {
return r.Min.X <= p.X && p.X < r.Max.X &&
r.Min.Y <= p.Y && p.Y < r.Max.Y
}
// Mod returns the point q in r such that p.X-q.X is a multiple of r's width
// and p.Y-q.Y is a multiple of r's height.
func (p Point) Mod(r Rectangle) Point {
w, h := r.Dx(), r.Dy()
p = p.Sub(r.Min)
p.X = p.X % w
if p.X < 0 {
p.X += w
}
p.Y = p.Y % h
if p.Y < 0 {
p.Y += h
}
return p.Add(r.Min)
}
// Eq reports whether p and q are equal.
func (p Point) Eq(q Point) bool {
return p == q
}
// ZP is the zero [Point].
//
// Deprecated: Use a literal [image.Point] instead.
var ZP Point
// Pt is shorthand for [Point]{X, Y}.
func Pt(X, Y int) Point {
return Point{X, Y}
}
// A Rectangle contains the points with Min.X <= X < Max.X, Min.Y <= Y < Max.Y.
// It is well-formed if Min.X <= Max.X and likewise for Y. Points are always
// well-formed. A rectangle's methods always return well-formed outputs for
// well-formed inputs.
//
// A Rectangle is also an [Image] whose bounds are the rectangle itself. At
// returns color.Opaque for points in the rectangle and color.Transparent
// otherwise.
type Rectangle struct {
Min, Max Point
}
// String returns a string representation of r like "(3,4)-(6,5)".
func (r Rectangle) String() string {
return r.Min.String() + "-" + r.Max.String()
}
// Dx returns r's width.
func (r Rectangle) Dx() int {
return r.Max.X - r.Min.X
}
// Dy returns r's height.
func (r Rectangle) Dy() int {
return r.Max.Y - r.Min.Y
}
// Size returns r's width and height.
func (r Rectangle) Size() Point {
return Point{
r.Max.X - r.Min.X,
r.Max.Y - r.Min.Y,
}
}
// Add returns the rectangle r translated by p.
func (r Rectangle) Add(p Point) Rectangle {
return Rectangle{
Point{r.Min.X + p.X, r.Min.Y + p.Y},
Point{r.Max.X + p.X, r.Max.Y + p.Y},
}
}
// Sub returns the rectangle r translated by -p.
func (r Rectangle) Sub(p Point) Rectangle {
return Rectangle{
Point{r.Min.X - p.X, r.Min.Y - p.Y},
Point{r.Max.X - p.X, r.Max.Y - p.Y},
}
}
// Inset returns the rectangle r inset by n, which may be negative. If either
// of r's dimensions is less than 2*n then an empty rectangle near the center
// of r will be returned.
func (r Rectangle) Inset(n int) Rectangle {
if r.Dx() < 2*n {
r.Min.X = (r.Min.X + r.Max.X) / 2
r.Max.X = r.Min.X
} else {
r.Min.X += n
r.Max.X -= n
}
if r.Dy() < 2*n {
r.Min.Y = (r.Min.Y + r.Max.Y) / 2
r.Max.Y = r.Min.Y
} else {
r.Min.Y += n
r.Max.Y -= n
}
return r
}
// Intersect returns the largest rectangle contained by both r and s. If the
// two rectangles do not overlap then the zero rectangle will be returned.
func (r Rectangle) Intersect(s Rectangle) Rectangle {
if r.Min.X < s.Min.X {
r.Min.X = s.Min.X
}
if r.Min.Y < s.Min.Y {
r.Min.Y = s.Min.Y
}
if r.Max.X > s.Max.X {
r.Max.X = s.Max.X
}
if r.Max.Y > s.Max.Y {
r.Max.Y = s.Max.Y
}
// Letting r0 and s0 be the values of r and s at the time that the method
// is called, this next line is equivalent to:
//
// if max(r0.Min.X, s0.Min.X) >= min(r0.Max.X, s0.Max.X) || likewiseForY { etc }
if r.Empty() {
return Rectangle{}
}
return r
}
// Union returns the smallest rectangle that contains both r and s.
func (r Rectangle) Union(s Rectangle) Rectangle {
if r.Empty() {
return s
}
if s.Empty() {
return r
}
if r.Min.X > s.Min.X {
r.Min.X = s.Min.X
}
if r.Min.Y > s.Min.Y {
r.Min.Y = s.Min.Y
}
if r.Max.X < s.Max.X {
r.Max.X = s.Max.X
}
if r.Max.Y < s.Max.Y {
r.Max.Y = s.Max.Y
}
return r
}
// Empty reports whether the rectangle contains no points.
func (r Rectangle) Empty() bool {
return r.Min.X >= r.Max.X || r.Min.Y >= r.Max.Y
}
// Eq reports whether r and s contain the same set of points. All empty
// rectangles are considered equal.
func (r Rectangle) Eq(s Rectangle) bool {
return r == s || r.Empty() && s.Empty()
}
// Overlaps reports whether r and s have a non-empty intersection.
func (r Rectangle) Overlaps(s Rectangle) bool {
return !r.Empty() && !s.Empty() &&
r.Min.X < s.Max.X && s.Min.X < r.Max.X &&
r.Min.Y < s.Max.Y && s.Min.Y < r.Max.Y
}
// In reports whether every point in r is in s.
func (r Rectangle) In(s Rectangle) bool {
if r.Empty() {
return true
}
// Note that r.Max is an exclusive bound for r, so that r.In(s)
// does not require that r.Max.In(s).
return s.Min.X <= r.Min.X && r.Max.X <= s.Max.X &&
s.Min.Y <= r.Min.Y && r.Max.Y <= s.Max.Y
}
// Canon returns the canonical version of r. The returned rectangle has minimum
// and maximum coordinates swapped if necessary so that it is well-formed.
func (r Rectangle) Canon() Rectangle {
if r.Max.X < r.Min.X {
r.Min.X, r.Max.X = r.Max.X, r.Min.X
}
if r.Max.Y < r.Min.Y {
r.Min.Y, r.Max.Y = r.Max.Y, r.Min.Y
}
return r
}
// At implements the [Image] interface.
func (r Rectangle) At(x, y int) color.Color {
if (Point{x, y}).In(r) {
return color.Opaque
}
return color.Transparent
}
// RGBA64At implements the [RGBA64Image] interface.
func (r Rectangle) RGBA64At(x, y int) color.RGBA64 {
if (Point{x, y}).In(r) {
return color.RGBA64{0xffff, 0xffff, 0xffff, 0xffff}
}
return color.RGBA64{}
}
// Bounds implements the [Image] interface.
func (r Rectangle) Bounds() Rectangle {
return r
}
// ColorModel implements the [Image] interface.
func (r Rectangle) ColorModel() color.Model {
return color.Alpha16Model
}
// ZR is the zero [Rectangle].
//
// Deprecated: Use a literal [image.Rectangle] instead.
var ZR Rectangle
// Rect is shorthand for [Rectangle]{Pt(x0, y0), [Pt](x1, y1)}. The returned
// rectangle has minimum and maximum coordinates swapped if necessary so that
// it is well-formed.
func Rect(x0, y0, x1, y1 int) Rectangle {
if x0 > x1 {
x0, x1 = x1, x0
}
if y0 > y1 {
y0, y1 = y1, y0
}
return Rectangle{Point{x0, y0}, Point{x1, y1}}
}
// mul3NonNeg returns (x * y * z), unless at least one argument is negative or
// if the computation overflows the int type, in which case it returns -1.
func mul3NonNeg(x int, y int, z int) int {
if (x < 0) || (y < 0) || (z < 0) {
return -1
}
hi, lo := bits.Mul64(uint64(x), uint64(y))
if hi != 0 {
return -1
}
hi, lo = bits.Mul64(lo, uint64(z))
if hi != 0 {
return -1
}
a := int(lo)
if (a < 0) || (uint64(a) != lo) {
return -1
}
return a
}
// add2NonNeg returns (x + y), unless at least one argument is negative or if
// the computation overflows the int type, in which case it returns -1.
func add2NonNeg(x int, y int) int {
if (x < 0) || (y < 0) {
return -1
}
a := x + y
if a < 0 {
return -1
}
return a
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gif implements a GIF image decoder and encoder.
//
// The GIF specification is at https://www.w3.org/Graphics/GIF/spec-gif89a.txt.
package gif
import (
"bufio"
"compress/lzw"
"errors"
"fmt"
"image"
"image/color"
"io"
)
var (
errNotEnough = errors.New("gif: not enough image data")
errTooMuch = errors.New("gif: too much image data")
errBadPixel = errors.New("gif: invalid pixel value")
)
// If the io.Reader does not also have ReadByte, then decode will introduce its own buffering.
type reader interface {
io.Reader
io.ByteReader
}
// Masks etc.
const (
// Fields.
fColorTable = 1 << 7
fInterlace = 1 << 6
fColorTableBitsMask = 7
// Graphic control flags.
gcTransparentColorSet = 1 << 0
gcDisposalMethodMask = 7 << 2
)
// Disposal Methods.
const (
DisposalNone = 0x01
DisposalBackground = 0x02
DisposalPrevious = 0x03
)
// Section indicators.
const (
sExtension = 0x21
sImageDescriptor = 0x2C
sTrailer = 0x3B
)
// Extensions.
const (
eText = 0x01 // Plain Text
eGraphicControl = 0xF9 // Graphic Control
eComment = 0xFE // Comment
eApplication = 0xFF // Application
)
func readFull(r io.Reader, b []byte) error {
_, err := io.ReadFull(r, b)
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return err
}
func readByte(r io.ByteReader) (byte, error) {
b, err := r.ReadByte()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return b, err
}
// decoder is the type used to decode a GIF file.
type decoder struct {
r reader
// From header.
vers string
width int
height int
loopCount int
delayTime int
backgroundIndex byte
disposalMethod byte
// From image descriptor.
imageFields byte
// From graphics control.
transparentIndex byte
hasTransparentIndex bool
// Computed.
globalColorTable color.Palette
// Used when decoding.
delay []int
disposal []byte
image []*image.Paletted
tmp [1024]byte // must be at least 768 so we can read color table
}
// blockReader parses the block structure of GIF image data, which comprises
// (n, (n bytes)) blocks, with 1 <= n <= 255. It is the reader given to the
// LZW decoder, which is thus immune to the blocking. After the LZW decoder
// completes, there will be a 0-byte block remaining (0, ()), which is
// consumed when checking that the blockReader is exhausted.
//
// To avoid the allocation of a bufio.Reader for the lzw Reader, blockReader
// implements io.ByteReader and buffers blocks into the decoder's "tmp" buffer.
type blockReader struct {
d *decoder
i, j uint8 // d.tmp[i:j] contains the buffered bytes
err error
}
func (b *blockReader) fill() {
if b.err != nil {
return
}
b.j, b.err = readByte(b.d.r)
if b.j == 0 && b.err == nil {
b.err = io.EOF
}
if b.err != nil {
return
}
b.i = 0
b.err = readFull(b.d.r, b.d.tmp[:b.j])
if b.err != nil {
b.j = 0
}
}
func (b *blockReader) ReadByte() (byte, error) {
if b.i == b.j {
b.fill()
if b.err != nil {
return 0, b.err
}
}
c := b.d.tmp[b.i]
b.i++
return c, nil
}
// blockReader must implement io.Reader, but its Read shouldn't ever actually
// be called in practice. The compress/lzw package will only call [blockReader.ReadByte].
func (b *blockReader) Read(p []byte) (int, error) {
if len(p) == 0 || b.err != nil {
return 0, b.err
}
if b.i == b.j {
b.fill()
if b.err != nil {
return 0, b.err
}
}
n := copy(p, b.d.tmp[b.i:b.j])
b.i += uint8(n)
return n, nil
}
// close primarily detects whether or not a block terminator was encountered
// after reading a sequence of data sub-blocks. It allows at most one trailing
// sub-block worth of data. I.e., if some number of bytes exist in one sub-block
// following the end of LZW data, the very next sub-block must be the block
// terminator. If the very end of LZW data happened to fill one sub-block, at
// most one more sub-block of length 1 may exist before the block-terminator.
// These accommodations allow us to support GIFs created by less strict encoders.
// See https://golang.org/issue/16146.
func (b *blockReader) close() error {
if b.err == io.EOF {
// A clean block-sequence terminator was encountered while reading.
return nil
} else if b.err != nil {
// Some other error was encountered while reading.
return b.err
}
if b.i == b.j {
// We reached the end of a sub block reading LZW data. We'll allow at
// most one more sub block of data with a length of 1 byte.
b.fill()
if b.err == io.EOF {
return nil
} else if b.err != nil {
return b.err
} else if b.j > 1 {
return errTooMuch
}
}
// Part of a sub-block remains buffered. We expect that the next attempt to
// buffer a sub-block will reach the block terminator.
b.fill()
if b.err == io.EOF {
return nil
} else if b.err != nil {
return b.err
}
return errTooMuch
}
// decode reads a GIF image from r and stores the result in d.
func (d *decoder) decode(r io.Reader, configOnly, keepAllFrames bool) error {
// Add buffering if r does not provide ReadByte.
if rr, ok := r.(reader); ok {
d.r = rr
} else {
d.r = bufio.NewReader(r)
}
d.loopCount = -1
err := d.readHeaderAndScreenDescriptor()
if err != nil {
return err
}
if configOnly {
return nil
}
for {
c, err := readByte(d.r)
if err != nil {
return fmt.Errorf("gif: reading frames: %v", err)
}
switch c {
case sExtension:
if err = d.readExtension(); err != nil {
return err
}
case sImageDescriptor:
if err = d.readImageDescriptor(keepAllFrames); err != nil {
return err
}
if !keepAllFrames && len(d.image) == 1 {
return nil
}
case sTrailer:
if len(d.image) == 0 {
return fmt.Errorf("gif: missing image data")
}
return nil
default:
return fmt.Errorf("gif: unknown block type: 0x%.2x", c)
}
}
}
func (d *decoder) readHeaderAndScreenDescriptor() error {
err := readFull(d.r, d.tmp[:13])
if err != nil {
return fmt.Errorf("gif: reading header: %v", err)
}
d.vers = string(d.tmp[:6])
if d.vers != "GIF87a" && d.vers != "GIF89a" {
return fmt.Errorf("gif: can't recognize format %q", d.vers)
}
d.width = int(d.tmp[6]) + int(d.tmp[7])<<8
d.height = int(d.tmp[8]) + int(d.tmp[9])<<8
if fields := d.tmp[10]; fields&fColorTable != 0 {
d.backgroundIndex = d.tmp[11]
// readColorTable overwrites the contents of d.tmp, but that's OK.
if d.globalColorTable, err = d.readColorTable(fields); err != nil {
return err
}
}
// d.tmp[12] is the Pixel Aspect Ratio, which is ignored.
return nil
}
func (d *decoder) readColorTable(fields byte) (color.Palette, error) {
n := 1 << (1 + uint(fields&fColorTableBitsMask))
err := readFull(d.r, d.tmp[:3*n])
if err != nil {
return nil, fmt.Errorf("gif: reading color table: %s", err)
}
j, p := 0, make(color.Palette, n)
for i := range p {
p[i] = color.RGBA{d.tmp[j+0], d.tmp[j+1], d.tmp[j+2], 0xFF}
j += 3
}
return p, nil
}
func (d *decoder) readExtension() error {
extension, err := readByte(d.r)
if err != nil {
return fmt.Errorf("gif: reading extension: %v", err)
}
size := 0
switch extension {
case eText:
size = 13
case eGraphicControl:
return d.readGraphicControl()
case eComment:
// nothing to do but read the data.
case eApplication:
b, err := readByte(d.r)
if err != nil {
return fmt.Errorf("gif: reading extension: %v", err)
}
// The spec requires size be 11, but Adobe sometimes uses 10.
size = int(b)
default:
return fmt.Errorf("gif: unknown extension 0x%.2x", extension)
}
if size > 0 {
if err := readFull(d.r, d.tmp[:size]); err != nil {
return fmt.Errorf("gif: reading extension: %v", err)
}
}
// Application Extension with "NETSCAPE2.0" as string and 1 in data means
// this extension defines a loop count.
if extension == eApplication && string(d.tmp[:size]) == "NETSCAPE2.0" {
n, err := d.readBlock()
if err != nil {
return fmt.Errorf("gif: reading extension: %v", err)
}
if n == 0 {
return nil
}
if n == 3 && d.tmp[0] == 1 {
d.loopCount = int(d.tmp[1]) | int(d.tmp[2])<<8
}
}
for {
n, err := d.readBlock()
if err != nil {
return fmt.Errorf("gif: reading extension: %v", err)
}
if n == 0 {
return nil
}
}
}
func (d *decoder) readGraphicControl() error {
if err := readFull(d.r, d.tmp[:6]); err != nil {
return fmt.Errorf("gif: can't read graphic control: %s", err)
}
if d.tmp[0] != 4 {
return fmt.Errorf("gif: invalid graphic control extension block size: %d", d.tmp[0])
}
flags := d.tmp[1]
d.disposalMethod = (flags & gcDisposalMethodMask) >> 2
d.delayTime = int(d.tmp[2]) | int(d.tmp[3])<<8
if flags&gcTransparentColorSet != 0 {
d.transparentIndex = d.tmp[4]
d.hasTransparentIndex = true
}
if d.tmp[5] != 0 {
return fmt.Errorf("gif: invalid graphic control extension block terminator: %d", d.tmp[5])
}
return nil
}
func (d *decoder) readImageDescriptor(keepAllFrames bool) error {
m, err := d.newImageFromDescriptor()
if err != nil {
return err
}
useLocalColorTable := d.imageFields&fColorTable != 0
if useLocalColorTable {
m.Palette, err = d.readColorTable(d.imageFields)
if err != nil {
return err
}
} else {
if d.globalColorTable == nil {
return errors.New("gif: no color table")
}
m.Palette = d.globalColorTable
}
if d.hasTransparentIndex {
if !useLocalColorTable {
// Clone the global color table.
m.Palette = append(color.Palette(nil), d.globalColorTable...)
}
if ti := int(d.transparentIndex); ti < len(m.Palette) {
m.Palette[ti] = color.RGBA{}
} else {
// The transparentIndex is out of range, which is an error
// according to the spec, but Firefox and Google Chrome
// seem OK with this, so we enlarge the palette with
// transparent colors. See golang.org/issue/15059.
p := make(color.Palette, ti+1)
copy(p, m.Palette)
for i := len(m.Palette); i < len(p); i++ {
p[i] = color.RGBA{}
}
m.Palette = p
}
}
litWidth, err := readByte(d.r)
if err != nil {
return fmt.Errorf("gif: reading image data: %v", err)
}
if litWidth < 2 || litWidth > 8 {
return fmt.Errorf("gif: pixel size in decode out of range: %d", litWidth)
}
// A wonderfully Go-like piece of magic.
br := &blockReader{d: d}
lzwr := lzw.NewReader(br, lzw.LSB, int(litWidth))
defer lzwr.Close()
if err = readFull(lzwr, m.Pix); err != nil {
if err != io.ErrUnexpectedEOF {
return fmt.Errorf("gif: reading image data: %v", err)
}
return errNotEnough
}
// In theory, both lzwr and br should be exhausted. Reading from them
// should yield (0, io.EOF).
//
// The spec (Appendix F - Compression), says that "An End of
// Information code... must be the last code output by the encoder
// for an image". In practice, though, giflib (a widely used C
// library) does not enforce this, so we also accept lzwr returning
// io.ErrUnexpectedEOF (meaning that the encoded stream hit io.EOF
// before the LZW decoder saw an explicit end code), provided that
// the io.ReadFull call above successfully read len(m.Pix) bytes.
// See https://golang.org/issue/9856 for an example GIF.
if n, err := lzwr.Read(d.tmp[256:257]); n != 0 || (err != io.EOF && err != io.ErrUnexpectedEOF) {
if err != nil {
return fmt.Errorf("gif: reading image data: %v", err)
}
return errTooMuch
}
// In practice, some GIFs have an extra byte in the data sub-block
// stream, which we ignore. See https://golang.org/issue/16146.
if err := br.close(); err == errTooMuch {
return errTooMuch
} else if err != nil {
return fmt.Errorf("gif: reading image data: %v", err)
}
// Check that the color indexes are inside the palette.
if len(m.Palette) < 256 {
for _, pixel := range m.Pix {
if int(pixel) >= len(m.Palette) {
return errBadPixel
}
}
}
// Undo the interlacing if necessary.
if d.imageFields&fInterlace != 0 {
uninterlace(m)
}
if keepAllFrames || len(d.image) == 0 {
d.image = append(d.image, m)
d.delay = append(d.delay, d.delayTime)
d.disposal = append(d.disposal, d.disposalMethod)
}
// The GIF89a spec, Section 23 (Graphic Control Extension) says:
// "The scope of this extension is the first graphic rendering block
// to follow." We therefore reset the GCE fields to zero.
d.delayTime = 0
d.hasTransparentIndex = false
return nil
}
func (d *decoder) newImageFromDescriptor() (*image.Paletted, error) {
if err := readFull(d.r, d.tmp[:9]); err != nil {
return nil, fmt.Errorf("gif: can't read image descriptor: %s", err)
}
left := int(d.tmp[0]) + int(d.tmp[1])<<8
top := int(d.tmp[2]) + int(d.tmp[3])<<8
width := int(d.tmp[4]) + int(d.tmp[5])<<8
height := int(d.tmp[6]) + int(d.tmp[7])<<8
d.imageFields = d.tmp[8]
// The GIF89a spec, Section 20 (Image Descriptor) says: "Each image must
// fit within the boundaries of the Logical Screen, as defined in the
// Logical Screen Descriptor."
//
// This is conceptually similar to testing
// frameBounds := image.Rect(left, top, left+width, top+height)
// imageBounds := image.Rect(0, 0, d.width, d.height)
// if !frameBounds.In(imageBounds) { etc }
// but the semantics of the Go image.Rectangle type is that r.In(s) is true
// whenever r is an empty rectangle, even if r.Min.X > s.Max.X. Here, we
// want something stricter.
//
// Note that, by construction, left >= 0 && top >= 0, so we only have to
// explicitly compare frameBounds.Max (left+width, top+height) against
// imageBounds.Max (d.width, d.height) and not frameBounds.Min (left, top)
// against imageBounds.Min (0, 0).
if left+width > d.width || top+height > d.height {
return nil, errors.New("gif: frame bounds larger than image bounds")
}
return image.NewPaletted(image.Rectangle{
Min: image.Point{left, top},
Max: image.Point{left + width, top + height},
}, nil), nil
}
func (d *decoder) readBlock() (int, error) {
n, err := readByte(d.r)
if n == 0 || err != nil {
return 0, err
}
if err := readFull(d.r, d.tmp[:n]); err != nil {
return 0, err
}
return int(n), nil
}
// interlaceScan defines the ordering for a pass of the interlace algorithm.
type interlaceScan struct {
skip, start int
}
// interlacing represents the set of scans in an interlaced GIF image.
var interlacing = []interlaceScan{
{8, 0}, // Group 1 : Every 8th. row, starting with row 0.
{8, 4}, // Group 2 : Every 8th. row, starting with row 4.
{4, 2}, // Group 3 : Every 4th. row, starting with row 2.
{2, 1}, // Group 4 : Every 2nd. row, starting with row 1.
}
// uninterlace rearranges the pixels in m to account for interlaced input.
func uninterlace(m *image.Paletted) {
var nPix []uint8
dx := m.Bounds().Dx()
dy := m.Bounds().Dy()
nPix = make([]uint8, dx*dy)
offset := 0 // steps through the input by sequential scan lines.
for _, pass := range interlacing {
nOffset := pass.start * dx // steps through the output as defined by pass.
for y := pass.start; y < dy; y += pass.skip {
copy(nPix[nOffset:nOffset+dx], m.Pix[offset:offset+dx])
offset += dx
nOffset += dx * pass.skip
}
}
m.Pix = nPix
}
// Decode reads a GIF image from r and returns the first embedded
// image as an [image.Image].
func Decode(r io.Reader) (image.Image, error) {
var d decoder
if err := d.decode(r, false, false); err != nil {
return nil, err
}
return d.image[0], nil
}
// GIF represents the possibly multiple images stored in a GIF file.
type GIF struct {
Image []*image.Paletted // The successive images.
Delay []int // The successive delay times, one per frame, in 100ths of a second.
// LoopCount controls the number of times an animation will be
// restarted during display.
// A LoopCount of 0 means to loop forever.
// A LoopCount of -1 means to show each frame only once.
// Otherwise, the animation is looped LoopCount+1 times.
LoopCount int
// Disposal is the successive disposal methods, one per frame. For
// backwards compatibility, a nil Disposal is valid to pass to EncodeAll,
// and implies that each frame's disposal method is 0 (no disposal
// specified).
Disposal []byte
// Config is the global color table (palette), width and height. A nil or
// empty-color.Palette Config.ColorModel means that each frame has its own
// color table and there is no global color table. Each frame's bounds must
// be within the rectangle defined by the two points (0, 0) and
// (Config.Width, Config.Height).
//
// For backwards compatibility, a zero-valued Config is valid to pass to
// EncodeAll, and implies that the overall GIF's width and height equals
// the first frame's bounds' Rectangle.Max point.
Config image.Config
// BackgroundIndex is the background index in the global color table, for
// use with the DisposalBackground disposal method.
BackgroundIndex byte
}
// DecodeAll reads a GIF image from r and returns the sequential frames
// and timing information.
func DecodeAll(r io.Reader) (*GIF, error) {
var d decoder
if err := d.decode(r, false, true); err != nil {
return nil, err
}
gif := &GIF{
Image: d.image,
LoopCount: d.loopCount,
Delay: d.delay,
Disposal: d.disposal,
Config: image.Config{
ColorModel: d.globalColorTable,
Width: d.width,
Height: d.height,
},
BackgroundIndex: d.backgroundIndex,
}
return gif, nil
}
// DecodeConfig returns the global color model and dimensions of a GIF image
// without decoding the entire image.
func DecodeConfig(r io.Reader) (image.Config, error) {
var d decoder
if err := d.decode(r, true, false); err != nil {
return image.Config{}, err
}
return image.Config{
ColorModel: d.globalColorTable,
Width: d.width,
Height: d.height,
}, nil
}
func init() {
image.RegisterFormat("gif", "GIF8?a", Decode, DecodeConfig)
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gif
import (
"bufio"
"bytes"
"compress/lzw"
"errors"
"image"
"image/color"
"image/color/palette"
"image/draw"
"internal/byteorder"
"io"
"math/bits"
)
// Graphic control extension fields.
const (
gcLabel = 0xF9
gcBlockSize = 0x04
)
func log2(x int) int {
if x < 2 {
return 0
}
return bits.Len(uint(x-1)) - 1
}
// writer is a buffered writer.
type writer interface {
Flush() error
io.Writer
io.ByteWriter
}
// encoder encodes an image to the GIF format.
type encoder struct {
// w is the writer to write to. err is the first error encountered during
// writing. All attempted writes after the first error become no-ops.
w writer
err error
// g is a reference to the data that is being encoded.
g GIF
// globalCT is the size in bytes of the global color table.
globalCT int
// buf is a scratch buffer. It must be at least 256 for the blockWriter.
buf [256]byte
globalColorTable [3 * 256]byte
localColorTable [3 * 256]byte
}
// blockWriter writes the block structure of GIF image data, which
// comprises (n, (n bytes)) blocks, with 1 <= n <= 255. It is the
// writer given to the LZW encoder, which is thus immune to the
// blocking.
type blockWriter struct {
e *encoder
}
func (b blockWriter) setup() {
b.e.buf[0] = 0
}
func (b blockWriter) Flush() error {
return b.e.err
}
func (b blockWriter) WriteByte(c byte) error {
if b.e.err != nil {
return b.e.err
}
// Append c to buffered sub-block.
b.e.buf[0]++
b.e.buf[b.e.buf[0]] = c
if b.e.buf[0] < 255 {
return nil
}
// Flush block
b.e.write(b.e.buf[:256])
b.e.buf[0] = 0
return b.e.err
}
// blockWriter must be an io.Writer for lzw.NewWriter, but this is never
// actually called.
func (b blockWriter) Write(data []byte) (int, error) {
for i, c := range data {
if err := b.WriteByte(c); err != nil {
return i, err
}
}
return len(data), nil
}
func (b blockWriter) close() {
// Write the block terminator (0x00), either by itself, or along with a
// pending sub-block.
if b.e.buf[0] == 0 {
b.e.writeByte(0)
} else {
n := uint(b.e.buf[0])
b.e.buf[n+1] = 0
b.e.write(b.e.buf[:n+2])
}
b.e.flush()
}
func (e *encoder) flush() {
if e.err != nil {
return
}
e.err = e.w.Flush()
}
func (e *encoder) write(p []byte) {
if e.err != nil {
return
}
_, e.err = e.w.Write(p)
}
func (e *encoder) writeByte(b byte) {
if e.err != nil {
return
}
e.err = e.w.WriteByte(b)
}
func (e *encoder) writeHeader() {
if e.err != nil {
return
}
_, e.err = io.WriteString(e.w, "GIF89a")
if e.err != nil {
return
}
// Logical screen width and height.
byteorder.LEPutUint16(e.buf[0:2], uint16(e.g.Config.Width))
byteorder.LEPutUint16(e.buf[2:4], uint16(e.g.Config.Height))
e.write(e.buf[:4])
if p, ok := e.g.Config.ColorModel.(color.Palette); ok && len(p) > 0 {
paddedSize := log2(len(p)) // Size of Global Color Table: 2^(1+n).
e.buf[0] = fColorTable | uint8(paddedSize)
e.buf[1] = e.g.BackgroundIndex
e.buf[2] = 0x00 // Pixel Aspect Ratio.
e.write(e.buf[:3])
var err error
e.globalCT, err = encodeColorTable(e.globalColorTable[:], p, paddedSize)
if err != nil && e.err == nil {
e.err = err
return
}
e.write(e.globalColorTable[:e.globalCT])
} else {
// All frames have a local color table, so a global color table
// is not needed.
e.buf[0] = 0x00
e.buf[1] = 0x00 // Background Color Index.
e.buf[2] = 0x00 // Pixel Aspect Ratio.
e.write(e.buf[:3])
}
// Add animation info if necessary.
if len(e.g.Image) > 1 && e.g.LoopCount >= 0 {
e.buf[0] = 0x21 // Extension Introducer.
e.buf[1] = 0xff // Application Label.
e.buf[2] = 0x0b // Block Size.
e.write(e.buf[:3])
_, err := io.WriteString(e.w, "NETSCAPE2.0") // Application Identifier.
if err != nil && e.err == nil {
e.err = err
return
}
e.buf[0] = 0x03 // Block Size.
e.buf[1] = 0x01 // Sub-block Index.
byteorder.LEPutUint16(e.buf[2:4], uint16(e.g.LoopCount))
e.buf[4] = 0x00 // Block Terminator.
e.write(e.buf[:5])
}
}
func encodeColorTable(dst []byte, p color.Palette, size int) (int, error) {
if uint(size) >= 8 {
return 0, errors.New("gif: cannot encode color table with more than 256 entries")
}
for i, c := range p {
if c == nil {
return 0, errors.New("gif: cannot encode color table with nil entries")
}
var r, g, b uint8
// It is most likely that the palette is full of color.RGBAs, so they
// get a fast path.
if rgba, ok := c.(color.RGBA); ok {
r, g, b = rgba.R, rgba.G, rgba.B
} else {
rr, gg, bb, _ := c.RGBA()
r, g, b = uint8(rr>>8), uint8(gg>>8), uint8(bb>>8)
}
dst[3*i+0] = r
dst[3*i+1] = g
dst[3*i+2] = b
}
n := 1 << (size + 1)
if n > len(p) {
// Pad with black.
clear(dst[3*len(p) : 3*n])
}
return 3 * n, nil
}
func (e *encoder) colorTablesMatch(localLen, transparentIndex int) bool {
localSize := 3 * localLen
if transparentIndex >= 0 {
trOff := 3 * transparentIndex
return bytes.Equal(e.globalColorTable[:trOff], e.localColorTable[:trOff]) &&
bytes.Equal(e.globalColorTable[trOff+3:localSize], e.localColorTable[trOff+3:localSize])
}
return bytes.Equal(e.globalColorTable[:localSize], e.localColorTable[:localSize])
}
func (e *encoder) writeImageBlock(pm *image.Paletted, delay int, disposal byte) {
if e.err != nil {
return
}
if len(pm.Palette) == 0 {
e.err = errors.New("gif: cannot encode image block with empty palette")
return
}
b := pm.Bounds()
if b.Min.X < 0 || b.Max.X >= 1<<16 || b.Min.Y < 0 || b.Max.Y >= 1<<16 {
e.err = errors.New("gif: image block is too large to encode")
return
}
if !b.In(image.Rectangle{Max: image.Point{e.g.Config.Width, e.g.Config.Height}}) {
e.err = errors.New("gif: image block is out of bounds")
return
}
transparentIndex := -1
for i, c := range pm.Palette {
if c == nil {
e.err = errors.New("gif: cannot encode color table with nil entries")
return
}
if _, _, _, a := c.RGBA(); a == 0 {
transparentIndex = i
break
}
}
if delay > 0 || disposal != 0 || transparentIndex != -1 {
e.buf[0] = sExtension // Extension Introducer.
e.buf[1] = gcLabel // Graphic Control Label.
e.buf[2] = gcBlockSize // Block Size.
if transparentIndex != -1 {
e.buf[3] = 0x01 | disposal<<2
} else {
e.buf[3] = 0x00 | disposal<<2
}
byteorder.LEPutUint16(e.buf[4:6], uint16(delay)) // Delay Time (1/100ths of a second)
// Transparent color index.
if transparentIndex != -1 {
e.buf[6] = uint8(transparentIndex)
} else {
e.buf[6] = 0x00
}
e.buf[7] = 0x00 // Block Terminator.
e.write(e.buf[:8])
}
e.buf[0] = sImageDescriptor
byteorder.LEPutUint16(e.buf[1:3], uint16(b.Min.X))
byteorder.LEPutUint16(e.buf[3:5], uint16(b.Min.Y))
byteorder.LEPutUint16(e.buf[5:7], uint16(b.Dx()))
byteorder.LEPutUint16(e.buf[7:9], uint16(b.Dy()))
e.write(e.buf[:9])
// To determine whether or not this frame's palette is the same as the
// global palette, we can check a couple things. First, do they actually
// point to the same []color.Color? If so, they are equal so long as the
// frame's palette is not longer than the global palette...
paddedSize := log2(len(pm.Palette)) // Size of Local Color Table: 2^(1+n).
if gp, ok := e.g.Config.ColorModel.(color.Palette); ok && len(pm.Palette) <= len(gp) && &gp[0] == &pm.Palette[0] {
e.writeByte(0) // Use the global color table.
} else {
ct, err := encodeColorTable(e.localColorTable[:], pm.Palette, paddedSize)
if err != nil {
if e.err == nil {
e.err = err
}
return
}
// This frame's palette is not the very same slice as the global
// palette, but it might be a copy, possibly with one value turned into
// transparency by DecodeAll.
if ct <= e.globalCT && e.colorTablesMatch(len(pm.Palette), transparentIndex) {
e.writeByte(0) // Use the global color table.
} else {
// Use a local color table.
e.writeByte(fColorTable | uint8(paddedSize))
e.write(e.localColorTable[:ct])
}
}
litWidth := paddedSize + 1
if litWidth < 2 {
litWidth = 2
}
e.writeByte(uint8(litWidth)) // LZW Minimum Code Size.
bw := blockWriter{e: e}
bw.setup()
lzww := lzw.NewWriter(bw, lzw.LSB, litWidth)
if dx := b.Dx(); dx == pm.Stride {
_, e.err = lzww.Write(pm.Pix[:dx*b.Dy()])
if e.err != nil {
lzww.Close()
return
}
} else {
for i, y := 0, b.Min.Y; y < b.Max.Y; i, y = i+pm.Stride, y+1 {
_, e.err = lzww.Write(pm.Pix[i : i+dx])
if e.err != nil {
lzww.Close()
return
}
}
}
lzww.Close() // flush to bw
bw.close() // flush to e.w
}
// Options are the encoding parameters.
type Options struct {
// NumColors is the maximum number of colors used in the image.
// It ranges from 1 to 256.
NumColors int
// Quantizer is used to produce a palette with size NumColors.
// palette.Plan9 is used in place of a nil Quantizer.
Quantizer draw.Quantizer
// Drawer is used to convert the source image to the desired palette.
// draw.FloydSteinberg is used in place of a nil Drawer.
Drawer draw.Drawer
}
// EncodeAll writes the images in g to w in GIF format with the
// given loop count and delay between frames.
func EncodeAll(w io.Writer, g *GIF) error {
if len(g.Image) == 0 {
return errors.New("gif: must provide at least one image")
}
if len(g.Image) != len(g.Delay) {
return errors.New("gif: mismatched image and delay lengths")
}
e := encoder{g: *g}
// The GIF.Disposal, GIF.Config and GIF.BackgroundIndex fields were added
// in Go 1.5. Valid Go 1.4 code, such as when the Disposal field is omitted
// in a GIF struct literal, should still produce valid GIFs.
if e.g.Disposal != nil && len(e.g.Image) != len(e.g.Disposal) {
return errors.New("gif: mismatched image and disposal lengths")
}
if e.g.Config == (image.Config{}) {
p := g.Image[0].Bounds().Max
e.g.Config.Width = p.X
e.g.Config.Height = p.Y
} else if e.g.Config.ColorModel != nil {
if _, ok := e.g.Config.ColorModel.(color.Palette); !ok {
return errors.New("gif: GIF color model must be a color.Palette")
}
}
if ww, ok := w.(writer); ok {
e.w = ww
} else {
e.w = bufio.NewWriter(w)
}
e.writeHeader()
for i, pm := range g.Image {
disposal := uint8(0)
if g.Disposal != nil {
disposal = g.Disposal[i]
}
e.writeImageBlock(pm, g.Delay[i], disposal)
}
e.writeByte(sTrailer)
e.flush()
return e.err
}
// Encode writes the Image m to w in GIF format.
func Encode(w io.Writer, m image.Image, o *Options) error {
// Check for bounds and size restrictions.
b := m.Bounds()
if b.Dx() >= 1<<16 || b.Dy() >= 1<<16 {
return errors.New("gif: image is too large to encode")
}
opts := Options{}
if o != nil {
opts = *o
}
if opts.NumColors < 1 || 256 < opts.NumColors {
opts.NumColors = 256
}
if opts.Drawer == nil {
opts.Drawer = draw.FloydSteinberg
}
pm, _ := m.(*image.Paletted)
if pm == nil {
if cp, ok := m.ColorModel().(color.Palette); ok {
pm = image.NewPaletted(b, cp)
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
pm.Set(x, y, cp.Convert(m.At(x, y)))
}
}
}
}
if pm == nil || len(pm.Palette) > opts.NumColors {
// Set pm to be a palettedized copy of m, including its bounds, which
// might not start at (0, 0).
//
// TODO: Pick a better sub-sample of the Plan 9 palette.
pm = image.NewPaletted(b, palette.Plan9[:opts.NumColors])
if opts.Quantizer != nil {
pm.Palette = opts.Quantizer.Quantize(make(color.Palette, 0, opts.NumColors), m)
}
opts.Drawer.Draw(pm, b, m, b.Min)
}
// When calling Encode instead of EncodeAll, the single-frame image is
// translated such that its top-left corner is (0, 0), so that the single
// frame completely fills the overall GIF's bounds.
if pm.Rect.Min != (image.Point{}) {
dup := *pm
dup.Rect = dup.Rect.Sub(dup.Rect.Min)
pm = &dup
}
return EncodeAll(w, &GIF{
Image: []*image.Paletted{pm},
Delay: []int{0},
Config: image.Config{
ColorModel: pm.Palette,
Width: b.Dx(),
Height: b.Dy(),
},
})
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package image implements a basic 2-D image library.
//
// The fundamental interface is called [Image]. An [Image] contains colors, which
// are described in the image/color package.
//
// Values of the [Image] interface are created either by calling functions such
// as [NewRGBA] and [NewPaletted], or by calling [Decode] on an [io.Reader] containing
// image data in a format such as GIF, JPEG or PNG. Decoding any particular
// image format requires the prior registration of a decoder function.
// Registration is typically automatic as a side effect of initializing that
// format's package so that, to decode a PNG image, it suffices to have
//
// import _ "image/png"
//
// in a program's main package. The _ means to import a package purely for its
// initialization side effects.
//
// See "The Go image package" for more details:
// https://golang.org/doc/articles/image_package.html
//
// # Security Considerations
//
// The image package can be used to parse arbitrarily large images, which can
// cause resource exhaustion on machines which do not have enough memory to
// store them. When operating on arbitrary images, [DecodeConfig] should be called
// before [Decode], so that the program can decide whether the image, as defined
// in the returned header, can be safely decoded with the available resources. A
// call to [Decode] which produces an extremely large image, as defined in the
// header returned by [DecodeConfig], is not considered a security issue,
// regardless of whether the image is itself malformed or not. A call to
// [DecodeConfig] which returns a header which does not match the image returned
// by [Decode] may be considered a security issue, and should be reported per the
// [Go Security Policy].
//
// [Go Security Policy]: https://go.dev/security/policy
package image
import (
"image/color"
)
// Config holds an image's color model and dimensions.
type Config struct {
ColorModel color.Model
Width, Height int
}
// Image is a finite rectangular grid of [color.Color] values taken from a color
// model.
type Image interface {
// ColorModel returns the Image's color model.
ColorModel() color.Model
// Bounds returns the domain for which At can return non-zero color.
// The bounds do not necessarily contain the point (0, 0).
Bounds() Rectangle
// At returns the color of the pixel at (x, y).
// At(Bounds().Min.X, Bounds().Min.Y) returns the upper-left pixel of the grid.
// At(Bounds().Max.X-1, Bounds().Max.Y-1) returns the lower-right one.
At(x, y int) color.Color
}
// RGBA64Image is an [Image] whose pixels can be converted directly to a
// color.RGBA64.
type RGBA64Image interface {
// RGBA64At returns the RGBA64 color of the pixel at (x, y). It is
// equivalent to calling At(x, y).RGBA() and converting the resulting
// 32-bit return values to a color.RGBA64, but it can avoid allocations
// from converting concrete color types to the color.Color interface type.
RGBA64At(x, y int) color.RGBA64
Image
}
// PalettedImage is an image whose colors may come from a limited palette.
// If m is a PalettedImage and m.ColorModel() returns a [color.Palette] p,
// then m.At(x, y) should be equivalent to p[m.ColorIndexAt(x, y)]. If m's
// color model is not a color.Palette, then ColorIndexAt's behavior is
// undefined.
type PalettedImage interface {
// ColorIndexAt returns the palette index of the pixel at (x, y).
ColorIndexAt(x, y int) uint8
Image
}
// pixelBufferLength returns the length of the []uint8 typed Pix slice field
// for the NewXxx functions. Conceptually, this is just (bpp * width * height),
// but this function panics if at least one of those is negative or if the
// computation would overflow the int type.
//
// This panics instead of returning an error because of backwards
// compatibility. The NewXxx functions do not return an error.
func pixelBufferLength(bytesPerPixel int, r Rectangle, imageTypeName string) int {
totalLength := mul3NonNeg(bytesPerPixel, r.Dx(), r.Dy())
if totalLength < 0 {
panic("image: New" + imageTypeName + " Rectangle has huge or negative dimensions")
}
return totalLength
}
// RGBA is an in-memory image whose At method returns [color.RGBA] values.
type RGBA struct {
// Pix holds the image's pixels, in R, G, B, A order. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*4].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *RGBA) ColorModel() color.Model { return color.RGBAModel }
func (p *RGBA) Bounds() Rectangle { return p.Rect }
func (p *RGBA) At(x, y int) color.Color {
return p.RGBAAt(x, y)
}
func (p *RGBA) RGBA64At(x, y int) color.RGBA64 {
if !(Point{x, y}.In(p.Rect)) {
return color.RGBA64{}
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
r := uint16(s[0])
g := uint16(s[1])
b := uint16(s[2])
a := uint16(s[3])
return color.RGBA64{
(r << 8) | r,
(g << 8) | g,
(b << 8) | b,
(a << 8) | a,
}
}
func (p *RGBA) RGBAAt(x, y int) color.RGBA {
if !(Point{x, y}.In(p.Rect)) {
return color.RGBA{}
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
return color.RGBA{s[0], s[1], s[2], s[3]}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *RGBA) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
}
func (p *RGBA) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
c1 := color.RGBAModel.Convert(c).(color.RGBA)
s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = c1.R
s[1] = c1.G
s[2] = c1.B
s[3] = c1.A
}
func (p *RGBA) SetRGBA64(x, y int, c color.RGBA64) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = uint8(c.R >> 8)
s[1] = uint8(c.G >> 8)
s[2] = uint8(c.B >> 8)
s[3] = uint8(c.A >> 8)
}
func (p *RGBA) SetRGBA(x, y int, c color.RGBA) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = c.R
s[1] = c.G
s[2] = c.B
s[3] = c.A
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *RGBA) SubImage(r Rectangle) Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &RGBA{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &RGBA{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *RGBA) Opaque() bool {
if p.Rect.Empty() {
return true
}
i0, i1 := 3, p.Rect.Dx()*4
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for i := i0; i < i1; i += 4 {
if p.Pix[i] != 0xff {
return false
}
}
i0 += p.Stride
i1 += p.Stride
}
return true
}
// NewRGBA returns a new [RGBA] image with the given bounds.
func NewRGBA(r Rectangle) *RGBA {
return &RGBA{
Pix: make([]uint8, pixelBufferLength(4, r, "RGBA")),
Stride: 4 * r.Dx(),
Rect: r,
}
}
// RGBA64 is an in-memory image whose At method returns [color.RGBA64] values.
type RGBA64 struct {
// Pix holds the image's pixels, in R, G, B, A order and big-endian format. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*8].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *RGBA64) ColorModel() color.Model { return color.RGBA64Model }
func (p *RGBA64) Bounds() Rectangle { return p.Rect }
func (p *RGBA64) At(x, y int) color.Color {
return p.RGBA64At(x, y)
}
func (p *RGBA64) RGBA64At(x, y int) color.RGBA64 {
if !(Point{x, y}.In(p.Rect)) {
return color.RGBA64{}
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
return color.RGBA64{
uint16(s[0])<<8 | uint16(s[1]),
uint16(s[2])<<8 | uint16(s[3]),
uint16(s[4])<<8 | uint16(s[5]),
uint16(s[6])<<8 | uint16(s[7]),
}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *RGBA64) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*8
}
func (p *RGBA64) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
c1 := color.RGBA64Model.Convert(c).(color.RGBA64)
s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = uint8(c1.R >> 8)
s[1] = uint8(c1.R)
s[2] = uint8(c1.G >> 8)
s[3] = uint8(c1.G)
s[4] = uint8(c1.B >> 8)
s[5] = uint8(c1.B)
s[6] = uint8(c1.A >> 8)
s[7] = uint8(c1.A)
}
func (p *RGBA64) SetRGBA64(x, y int, c color.RGBA64) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = uint8(c.R >> 8)
s[1] = uint8(c.R)
s[2] = uint8(c.G >> 8)
s[3] = uint8(c.G)
s[4] = uint8(c.B >> 8)
s[5] = uint8(c.B)
s[6] = uint8(c.A >> 8)
s[7] = uint8(c.A)
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *RGBA64) SubImage(r Rectangle) Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &RGBA64{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &RGBA64{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *RGBA64) Opaque() bool {
if p.Rect.Empty() {
return true
}
i0, i1 := 6, p.Rect.Dx()*8
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for i := i0; i < i1; i += 8 {
if p.Pix[i+0] != 0xff || p.Pix[i+1] != 0xff {
return false
}
}
i0 += p.Stride
i1 += p.Stride
}
return true
}
// NewRGBA64 returns a new [RGBA64] image with the given bounds.
func NewRGBA64(r Rectangle) *RGBA64 {
return &RGBA64{
Pix: make([]uint8, pixelBufferLength(8, r, "RGBA64")),
Stride: 8 * r.Dx(),
Rect: r,
}
}
// NRGBA is an in-memory image whose At method returns [color.NRGBA] values.
type NRGBA struct {
// Pix holds the image's pixels, in R, G, B, A order. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*4].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *NRGBA) ColorModel() color.Model { return color.NRGBAModel }
func (p *NRGBA) Bounds() Rectangle { return p.Rect }
func (p *NRGBA) At(x, y int) color.Color {
return p.NRGBAAt(x, y)
}
func (p *NRGBA) RGBA64At(x, y int) color.RGBA64 {
r, g, b, a := p.NRGBAAt(x, y).RGBA()
return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
}
func (p *NRGBA) NRGBAAt(x, y int) color.NRGBA {
if !(Point{x, y}.In(p.Rect)) {
return color.NRGBA{}
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
return color.NRGBA{s[0], s[1], s[2], s[3]}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *NRGBA) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
}
func (p *NRGBA) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
c1 := color.NRGBAModel.Convert(c).(color.NRGBA)
s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = c1.R
s[1] = c1.G
s[2] = c1.B
s[3] = c1.A
}
func (p *NRGBA) SetRGBA64(x, y int, c color.RGBA64) {
if !(Point{x, y}.In(p.Rect)) {
return
}
r, g, b, a := uint32(c.R), uint32(c.G), uint32(c.B), uint32(c.A)
if (a != 0) && (a != 0xffff) {
r = (r * 0xffff) / a
g = (g * 0xffff) / a
b = (b * 0xffff) / a
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = uint8(r >> 8)
s[1] = uint8(g >> 8)
s[2] = uint8(b >> 8)
s[3] = uint8(a >> 8)
}
func (p *NRGBA) SetNRGBA(x, y int, c color.NRGBA) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = c.R
s[1] = c.G
s[2] = c.B
s[3] = c.A
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *NRGBA) SubImage(r Rectangle) Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &NRGBA{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &NRGBA{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *NRGBA) Opaque() bool {
if p.Rect.Empty() {
return true
}
i0, i1 := 3, p.Rect.Dx()*4
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for i := i0; i < i1; i += 4 {
if p.Pix[i] != 0xff {
return false
}
}
i0 += p.Stride
i1 += p.Stride
}
return true
}
// NewNRGBA returns a new [NRGBA] image with the given bounds.
func NewNRGBA(r Rectangle) *NRGBA {
return &NRGBA{
Pix: make([]uint8, pixelBufferLength(4, r, "NRGBA")),
Stride: 4 * r.Dx(),
Rect: r,
}
}
// NRGBA64 is an in-memory image whose At method returns [color.NRGBA64] values.
type NRGBA64 struct {
// Pix holds the image's pixels, in R, G, B, A order and big-endian format. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*8].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *NRGBA64) ColorModel() color.Model { return color.NRGBA64Model }
func (p *NRGBA64) Bounds() Rectangle { return p.Rect }
func (p *NRGBA64) At(x, y int) color.Color {
return p.NRGBA64At(x, y)
}
func (p *NRGBA64) RGBA64At(x, y int) color.RGBA64 {
r, g, b, a := p.NRGBA64At(x, y).RGBA()
return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
}
func (p *NRGBA64) NRGBA64At(x, y int) color.NRGBA64 {
if !(Point{x, y}.In(p.Rect)) {
return color.NRGBA64{}
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
return color.NRGBA64{
uint16(s[0])<<8 | uint16(s[1]),
uint16(s[2])<<8 | uint16(s[3]),
uint16(s[4])<<8 | uint16(s[5]),
uint16(s[6])<<8 | uint16(s[7]),
}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *NRGBA64) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*8
}
func (p *NRGBA64) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
c1 := color.NRGBA64Model.Convert(c).(color.NRGBA64)
s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = uint8(c1.R >> 8)
s[1] = uint8(c1.R)
s[2] = uint8(c1.G >> 8)
s[3] = uint8(c1.G)
s[4] = uint8(c1.B >> 8)
s[5] = uint8(c1.B)
s[6] = uint8(c1.A >> 8)
s[7] = uint8(c1.A)
}
func (p *NRGBA64) SetRGBA64(x, y int, c color.RGBA64) {
if !(Point{x, y}.In(p.Rect)) {
return
}
r, g, b, a := uint32(c.R), uint32(c.G), uint32(c.B), uint32(c.A)
if (a != 0) && (a != 0xffff) {
r = (r * 0xffff) / a
g = (g * 0xffff) / a
b = (b * 0xffff) / a
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = uint8(r >> 8)
s[1] = uint8(r)
s[2] = uint8(g >> 8)
s[3] = uint8(g)
s[4] = uint8(b >> 8)
s[5] = uint8(b)
s[6] = uint8(a >> 8)
s[7] = uint8(a)
}
func (p *NRGBA64) SetNRGBA64(x, y int, c color.NRGBA64) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = uint8(c.R >> 8)
s[1] = uint8(c.R)
s[2] = uint8(c.G >> 8)
s[3] = uint8(c.G)
s[4] = uint8(c.B >> 8)
s[5] = uint8(c.B)
s[6] = uint8(c.A >> 8)
s[7] = uint8(c.A)
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *NRGBA64) SubImage(r Rectangle) Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &NRGBA64{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &NRGBA64{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *NRGBA64) Opaque() bool {
if p.Rect.Empty() {
return true
}
i0, i1 := 6, p.Rect.Dx()*8
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for i := i0; i < i1; i += 8 {
if p.Pix[i+0] != 0xff || p.Pix[i+1] != 0xff {
return false
}
}
i0 += p.Stride
i1 += p.Stride
}
return true
}
// NewNRGBA64 returns a new [NRGBA64] image with the given bounds.
func NewNRGBA64(r Rectangle) *NRGBA64 {
return &NRGBA64{
Pix: make([]uint8, pixelBufferLength(8, r, "NRGBA64")),
Stride: 8 * r.Dx(),
Rect: r,
}
}
// Alpha is an in-memory image whose At method returns [color.Alpha] values.
type Alpha struct {
// Pix holds the image's pixels, as alpha values. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*1].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *Alpha) ColorModel() color.Model { return color.AlphaModel }
func (p *Alpha) Bounds() Rectangle { return p.Rect }
func (p *Alpha) At(x, y int) color.Color {
return p.AlphaAt(x, y)
}
func (p *Alpha) RGBA64At(x, y int) color.RGBA64 {
a := uint16(p.AlphaAt(x, y).A)
a |= a << 8
return color.RGBA64{a, a, a, a}
}
func (p *Alpha) AlphaAt(x, y int) color.Alpha {
if !(Point{x, y}.In(p.Rect)) {
return color.Alpha{}
}
i := p.PixOffset(x, y)
return color.Alpha{p.Pix[i]}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *Alpha) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*1
}
func (p *Alpha) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i] = color.AlphaModel.Convert(c).(color.Alpha).A
}
func (p *Alpha) SetRGBA64(x, y int, c color.RGBA64) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i] = uint8(c.A >> 8)
}
func (p *Alpha) SetAlpha(x, y int, c color.Alpha) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i] = c.A
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *Alpha) SubImage(r Rectangle) Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &Alpha{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &Alpha{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *Alpha) Opaque() bool {
if p.Rect.Empty() {
return true
}
i0, i1 := 0, p.Rect.Dx()
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for i := i0; i < i1; i++ {
if p.Pix[i] != 0xff {
return false
}
}
i0 += p.Stride
i1 += p.Stride
}
return true
}
// NewAlpha returns a new [Alpha] image with the given bounds.
func NewAlpha(r Rectangle) *Alpha {
return &Alpha{
Pix: make([]uint8, pixelBufferLength(1, r, "Alpha")),
Stride: 1 * r.Dx(),
Rect: r,
}
}
// Alpha16 is an in-memory image whose At method returns [color.Alpha16] values.
type Alpha16 struct {
// Pix holds the image's pixels, as alpha values in big-endian format. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*2].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *Alpha16) ColorModel() color.Model { return color.Alpha16Model }
func (p *Alpha16) Bounds() Rectangle { return p.Rect }
func (p *Alpha16) At(x, y int) color.Color {
return p.Alpha16At(x, y)
}
func (p *Alpha16) RGBA64At(x, y int) color.RGBA64 {
a := p.Alpha16At(x, y).A
return color.RGBA64{a, a, a, a}
}
func (p *Alpha16) Alpha16At(x, y int) color.Alpha16 {
if !(Point{x, y}.In(p.Rect)) {
return color.Alpha16{}
}
i := p.PixOffset(x, y)
return color.Alpha16{uint16(p.Pix[i+0])<<8 | uint16(p.Pix[i+1])}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *Alpha16) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*2
}
func (p *Alpha16) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
c1 := color.Alpha16Model.Convert(c).(color.Alpha16)
p.Pix[i+0] = uint8(c1.A >> 8)
p.Pix[i+1] = uint8(c1.A)
}
func (p *Alpha16) SetRGBA64(x, y int, c color.RGBA64) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i+0] = uint8(c.A >> 8)
p.Pix[i+1] = uint8(c.A)
}
func (p *Alpha16) SetAlpha16(x, y int, c color.Alpha16) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i+0] = uint8(c.A >> 8)
p.Pix[i+1] = uint8(c.A)
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *Alpha16) SubImage(r Rectangle) Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &Alpha16{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &Alpha16{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *Alpha16) Opaque() bool {
if p.Rect.Empty() {
return true
}
i0, i1 := 0, p.Rect.Dx()*2
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for i := i0; i < i1; i += 2 {
if p.Pix[i+0] != 0xff || p.Pix[i+1] != 0xff {
return false
}
}
i0 += p.Stride
i1 += p.Stride
}
return true
}
// NewAlpha16 returns a new [Alpha16] image with the given bounds.
func NewAlpha16(r Rectangle) *Alpha16 {
return &Alpha16{
Pix: make([]uint8, pixelBufferLength(2, r, "Alpha16")),
Stride: 2 * r.Dx(),
Rect: r,
}
}
// Gray is an in-memory image whose At method returns [color.Gray] values.
type Gray struct {
// Pix holds the image's pixels, as gray values. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*1].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *Gray) ColorModel() color.Model { return color.GrayModel }
func (p *Gray) Bounds() Rectangle { return p.Rect }
func (p *Gray) At(x, y int) color.Color {
return p.GrayAt(x, y)
}
func (p *Gray) RGBA64At(x, y int) color.RGBA64 {
gray := uint16(p.GrayAt(x, y).Y)
gray |= gray << 8
return color.RGBA64{gray, gray, gray, 0xffff}
}
func (p *Gray) GrayAt(x, y int) color.Gray {
if !(Point{x, y}.In(p.Rect)) {
return color.Gray{}
}
i := p.PixOffset(x, y)
return color.Gray{p.Pix[i]}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *Gray) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*1
}
func (p *Gray) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i] = color.GrayModel.Convert(c).(color.Gray).Y
}
func (p *Gray) SetRGBA64(x, y int, c color.RGBA64) {
if !(Point{x, y}.In(p.Rect)) {
return
}
// This formula is the same as in color.grayModel.
gray := (19595*uint32(c.R) + 38470*uint32(c.G) + 7471*uint32(c.B) + 1<<15) >> 24
i := p.PixOffset(x, y)
p.Pix[i] = uint8(gray)
}
func (p *Gray) SetGray(x, y int, c color.Gray) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i] = c.Y
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *Gray) SubImage(r Rectangle) Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &Gray{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &Gray{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *Gray) Opaque() bool {
return true
}
// NewGray returns a new [Gray] image with the given bounds.
func NewGray(r Rectangle) *Gray {
return &Gray{
Pix: make([]uint8, pixelBufferLength(1, r, "Gray")),
Stride: 1 * r.Dx(),
Rect: r,
}
}
// Gray16 is an in-memory image whose At method returns [color.Gray16] values.
type Gray16 struct {
// Pix holds the image's pixels, as gray values in big-endian format. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*2].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *Gray16) ColorModel() color.Model { return color.Gray16Model }
func (p *Gray16) Bounds() Rectangle { return p.Rect }
func (p *Gray16) At(x, y int) color.Color {
return p.Gray16At(x, y)
}
func (p *Gray16) RGBA64At(x, y int) color.RGBA64 {
gray := p.Gray16At(x, y).Y
return color.RGBA64{gray, gray, gray, 0xffff}
}
func (p *Gray16) Gray16At(x, y int) color.Gray16 {
if !(Point{x, y}.In(p.Rect)) {
return color.Gray16{}
}
i := p.PixOffset(x, y)
return color.Gray16{uint16(p.Pix[i+0])<<8 | uint16(p.Pix[i+1])}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *Gray16) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*2
}
func (p *Gray16) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
c1 := color.Gray16Model.Convert(c).(color.Gray16)
p.Pix[i+0] = uint8(c1.Y >> 8)
p.Pix[i+1] = uint8(c1.Y)
}
func (p *Gray16) SetRGBA64(x, y int, c color.RGBA64) {
if !(Point{x, y}.In(p.Rect)) {
return
}
// This formula is the same as in color.gray16Model.
gray := (19595*uint32(c.R) + 38470*uint32(c.G) + 7471*uint32(c.B) + 1<<15) >> 16
i := p.PixOffset(x, y)
p.Pix[i+0] = uint8(gray >> 8)
p.Pix[i+1] = uint8(gray)
}
func (p *Gray16) SetGray16(x, y int, c color.Gray16) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i+0] = uint8(c.Y >> 8)
p.Pix[i+1] = uint8(c.Y)
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *Gray16) SubImage(r Rectangle) Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &Gray16{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &Gray16{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *Gray16) Opaque() bool {
return true
}
// NewGray16 returns a new [Gray16] image with the given bounds.
func NewGray16(r Rectangle) *Gray16 {
return &Gray16{
Pix: make([]uint8, pixelBufferLength(2, r, "Gray16")),
Stride: 2 * r.Dx(),
Rect: r,
}
}
// CMYK is an in-memory image whose At method returns [color.CMYK] values.
type CMYK struct {
// Pix holds the image's pixels, in C, M, Y, K order. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*4].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *CMYK) ColorModel() color.Model { return color.CMYKModel }
func (p *CMYK) Bounds() Rectangle { return p.Rect }
func (p *CMYK) At(x, y int) color.Color {
return p.CMYKAt(x, y)
}
func (p *CMYK) RGBA64At(x, y int) color.RGBA64 {
r, g, b, a := p.CMYKAt(x, y).RGBA()
return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
}
func (p *CMYK) CMYKAt(x, y int) color.CMYK {
if !(Point{x, y}.In(p.Rect)) {
return color.CMYK{}
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
return color.CMYK{s[0], s[1], s[2], s[3]}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *CMYK) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
}
func (p *CMYK) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
c1 := color.CMYKModel.Convert(c).(color.CMYK)
s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = c1.C
s[1] = c1.M
s[2] = c1.Y
s[3] = c1.K
}
func (p *CMYK) SetRGBA64(x, y int, c color.RGBA64) {
if !(Point{x, y}.In(p.Rect)) {
return
}
cc, mm, yy, kk := color.RGBToCMYK(uint8(c.R>>8), uint8(c.G>>8), uint8(c.B>>8))
i := p.PixOffset(x, y)
s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = cc
s[1] = mm
s[2] = yy
s[3] = kk
}
func (p *CMYK) SetCMYK(x, y int, c color.CMYK) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
s[0] = c.C
s[1] = c.M
s[2] = c.Y
s[3] = c.K
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *CMYK) SubImage(r Rectangle) Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &CMYK{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &CMYK{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *CMYK) Opaque() bool {
return true
}
// NewCMYK returns a new CMYK image with the given bounds.
func NewCMYK(r Rectangle) *CMYK {
return &CMYK{
Pix: make([]uint8, pixelBufferLength(4, r, "CMYK")),
Stride: 4 * r.Dx(),
Rect: r,
}
}
// Paletted is an in-memory image of uint8 indices into a given palette.
type Paletted struct {
// Pix holds the image's pixels, as palette indices. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*1].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect Rectangle
// Palette is the image's palette.
Palette color.Palette
}
func (p *Paletted) ColorModel() color.Model { return p.Palette }
func (p *Paletted) Bounds() Rectangle { return p.Rect }
func (p *Paletted) At(x, y int) color.Color {
if len(p.Palette) == 0 {
return nil
}
if !(Point{x, y}.In(p.Rect)) {
return p.Palette[0]
}
i := p.PixOffset(x, y)
return p.Palette[p.Pix[i]]
}
func (p *Paletted) RGBA64At(x, y int) color.RGBA64 {
if len(p.Palette) == 0 {
return color.RGBA64{}
}
c := color.Color(nil)
if !(Point{x, y}.In(p.Rect)) {
c = p.Palette[0]
} else {
i := p.PixOffset(x, y)
c = p.Palette[p.Pix[i]]
}
r, g, b, a := c.RGBA()
return color.RGBA64{
uint16(r),
uint16(g),
uint16(b),
uint16(a),
}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *Paletted) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*1
}
func (p *Paletted) Set(x, y int, c color.Color) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i] = uint8(p.Palette.Index(c))
}
func (p *Paletted) SetRGBA64(x, y int, c color.RGBA64) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i] = uint8(p.Palette.Index(c))
}
func (p *Paletted) ColorIndexAt(x, y int) uint8 {
if !(Point{x, y}.In(p.Rect)) {
return 0
}
i := p.PixOffset(x, y)
return p.Pix[i]
}
func (p *Paletted) SetColorIndex(x, y int, index uint8) {
if !(Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i] = index
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *Paletted) SubImage(r Rectangle) Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &Paletted{
Palette: p.Palette,
}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &Paletted{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: p.Rect.Intersect(r),
Palette: p.Palette,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *Paletted) Opaque() bool {
var present [256]bool
i0, i1 := 0, p.Rect.Dx()
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for _, c := range p.Pix[i0:i1] {
present[c] = true
}
i0 += p.Stride
i1 += p.Stride
}
for i, c := range p.Palette {
if !present[i] {
continue
}
_, _, _, a := c.RGBA()
if a != 0xffff {
return false
}
}
return true
}
// NewPaletted returns a new [Paletted] image with the given width, height and
// palette.
func NewPaletted(r Rectangle, p color.Palette) *Paletted {
return &Paletted{
Pix: make([]uint8, pixelBufferLength(1, r, "Paletted")),
Stride: 1 * r.Dx(),
Rect: r,
Palette: p,
}
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jpeg
// Discrete Cosine Transformation (DCT) implementations using the algorithm from
// Christoph Loeffler, Adriaan Lightenberg, and George S. Mostchytz,
// “Practical Fast 1-D DCT Algorithms with 11 Multiplications,” ICASSP 1989.
// https://ieeexplore.ieee.org/document/266596
//
// Since the paper is paywalled, the rest of this comment gives a summary.
//
// A 1-dimensional forward DCT (1D FDCT) takes as input 8 values x0..x7
// and transforms them in place into the result values.
//
// The mathematical definition of the N-point 1D FDCT is:
//
// X[k] = α_k Σ_n x[n] * cos (2n+1)*k*π/2N
//
// where α₀ = √2 and α_k = 1 for k > 0.
//
// For our purposes, N=8, so the angles end up being multiples of π/16.
// The most direct implementation of this definition would require 64 multiplications.
//
// Loeffler's paper presents a more efficient computation that requires only
// 11 multiplications and works in terms of three basic operations:
//
// - A “butterfly” x0, x1 = x0+x1, x0-x1.
// The inverse is x0, x1 = (x0+x1)/2, (x0-x1)/2.
//
// - A scaling of x0 by k: x0 *= k. The inverse is scaling by 1/k.
//
// - A rotation of x0, x1 by θ, defined as:
// x0, x1 = x0 cos θ + x1 sin θ, -x0 sin θ + x1 cos θ.
// The inverse is rotation by -θ.
//
// The algorithm proceeds in four stages:
//
// Stage 1:
// - butterfly x0, x7; x1, x6; x2, x5; x3, x4.
//
// Stage 2:
// - butterfly x0, x3; x1, x2
// - rotate x4, x7 by 3π/16
// - rotate x5, x6 by π/16.
//
// Stage 3:
// - butterfly x0, x1; x4, x6; x7, x5
// - rotate x2, x3 by 6π/16 and scale by √2.
//
// Stage 4:
// - butterfly x7, x4
// - scale x5, x6 by √2.
//
// Finally, the values are permuted. The permutation can be read as either:
// - x0, x4, x2, x6, x7, x3, x5, x1 = x0, x1, x2, x3, x4, x5, x6, x7 (paper's form)
// - x0, x1, x2, x3, x4, x5, x6, x7 = x0, x7, x2, x5, x1, x6, x3, x4 (sorted by LHS)
// The code below uses the second form to make it easier to merge adjacent stores.
// (Note that unlike in recursive FFT implementations, the permutation here is
// not always mapping indexes to their bit reversals.)
//
// As written above, the rotation requires four multiplications, but it can be
// reduced to three by refactoring (see [dctBox] below), and the scaling in
// stage 3 can be merged into the rotation constants, so the overall cost
// of a 1D FDCT is 11 multiplies.
//
// The 1D inverse DCT (IDCT) is the 1D FDCT run backward
// with all the basic operations inverted.
// dctBox implements a 3-multiply, 3-add rotation+scaling.
// Given x0, x1, k*cos θ, and k*sin θ, dctBox returns the
// rotated and scaled coordinates.
// (It is called dctBox because the rotate+scale operation
// is drawn as a box in Figures 1 and 2 in the paper.)
func dctBox(x0, x1, kcos, ksin int32) (y0, y1 int32) {
// y0 = x0*kcos + x1*ksin
// y1 = -x0*ksin + x1*kcos
ksum := kcos * (x0 + x1)
y0 = ksum + (ksin-kcos)*x1
y1 = ksum - (kcos+ksin)*x0
return y0, y1
}
// A block is an 8x8 input to a 2D DCT (either the FDCT or IDCT).
// The input is actually only 8x8 uint8 values, and the outputs are 8x8 int16,
// but it is convenient to use int32s for intermediate storage,
// so we define only a single block type of [8*8]int32.
//
// A 2D DCT is implemented as 1D DCTs over the rows and columns.
//
// dct_test.go defines a String method for nice printing in tests.
type block [blockSize]int32
const blockSize = 8 * 8
// Note on Numerical Precision
//
// The inputs to both the FDCT and IDCT are uint8 values stored in a block,
// and the outputs are int16s in the same block, but the overall operation
// uses int32 values as fixed-point intermediate values.
// In the code comments below, the notation “QN.M” refers to a
// signed value of 1+N+M significant bits, one of which is the sign bit,
// and M of which hold fractional (sub-integer) precision.
// For example, 255 as a Q8.0 value is stored as int32(255),
// while 255 as a Q8.1 value is stored as int32(510),
// and 255.5 as a Q8.1 value is int32(511).
// The notation UQN.M refers to an unsigned value of N+M significant bits.
// See https://en.wikipedia.org/wiki/Q_(number_format) for more.
//
// In general we only need to keep about 16 significant bits, but it is more
// efficient and somewhat more precise to let unnecessary fractional bits
// accumulate and shift them away in bulk rather than after every operation.
// As such, it is important to keep track of the number of fractional bits
// in each variable at different points in the code, to avoid mistakes like
// adding numbers with different fractional precisions, as well as to keep
// track of the total number of bits, to avoid overflow. A comment like:
//
// // x[123] now Q8.2.
//
// means that x1, x2, and x3 are all Q8.2 (11-bit) values.
// Keeping extra precision bits also reduces the size of the errors introduced
// by using right shift to approximate rounded division.
// Constants needed for the implementation.
// These are all 60-bit precision fixed-point constants.
// The function c(val, b) rounds the constant to b bits.
// c is simple enough that calls to it with constant args
// are inlined and constant-propagated down to an inline constant.
// Each constant is commented with its Ivy definition (see robpike.io/ivy),
// using this scaling helper function:
//
// op fix x = floor 0.5 + x * 2**60
const (
cos1 = 1130768441178740757 // fix cos 1*pi/16
sin1 = 224923827593068887 // fix sin 1*pi/16
cos3 = 958619196450722178 // fix cos 3*pi/16
sin3 = 640528868967736374 // fix sin 3*pi/16
sqrt2 = 1630477228166597777 // fix sqrt 2
sqrt2_cos6 = 623956622067911264 // fix (sqrt 2)*cos 6*pi/16
sqrt2_sin6 = 1506364539328854985 // fix (sqrt 2)*sin 6*pi/16
sqrt2inv = 815238614083298888 // fix 1/sqrt 2
sqrt2inv_cos6 = 311978311033955632 // fix (1/sqrt 2)*cos 6*pi/16
sqrt2inv_sin6 = 753182269664427492 // fix (1/sqrt 2)*sin 6*pi/16
)
func c(x uint64, bits int) int32 {
return int32((x + (1 << (59 - bits))) >> (60 - bits))
}
// fdct implements the forward DCT.
// Inputs are UQ8.0; outputs are Q13.0.
func fdct(b *block) {
fdctCols(b)
fdctRows(b)
}
// fdctCols applies the 1D DCT to the columns of b.
// Inputs are UQ8.0 in [0,255] but interpreted as [-128,127].
// Outputs are Q10.18.
func fdctCols(b *block) {
for i := range 8 {
x0 := b[0*8+i]
x1 := b[1*8+i]
x2 := b[2*8+i]
x3 := b[3*8+i]
x4 := b[4*8+i]
x5 := b[5*8+i]
x6 := b[6*8+i]
x7 := b[7*8+i]
// x[01234567] are UQ8.0 in [0,255].
// Stage 1: four butterflies.
// In general a butterfly of QN.M inputs produces Q(N+1).M outputs.
// A butterfly of UQN.M inputs produces a UQ(N+1).M sum and a QN.M difference.
x0, x7 = x0+x7, x0-x7
x1, x6 = x1+x6, x1-x6
x2, x5 = x2+x5, x2-x5
x3, x4 = x3+x4, x3-x4
// x[0123] now UQ9.0 in [0, 510].
// x[4567] now Q8.0 in [-255,255].
// Stage 2: two boxes and two butterflies.
// A box on QN.M inputs with B-bit constants
// produces Q(N+1).(M+B) outputs.
// (The +1 is from the addition.)
x4, x7 = dctBox(x4, x7, c(cos3, 18), c(sin3, 18))
x5, x6 = dctBox(x5, x6, c(cos1, 18), c(sin1, 18))
// x[47] now Q9.18 in [-354, 354].
// x[56] now Q9.18 in [-300, 300].
x0, x3 = x0+x3, x0-x3
x1, x2 = x1+x2, x1-x2
// x[01] now UQ10.0 in [0, 1020].
// x[23] now Q9.0 in [-510, 510].
// Stage 3: one box and three butterflies.
x2, x3 = dctBox(x2, x3, c(sqrt2_cos6, 18), c(sqrt2_sin6, 18))
// x[23] now Q10.18 in [-943, 943].
x0, x1 = x0+x1, x0-x1
// x0 now UQ11.0 in [0, 2040].
// x1 now Q10.0 in [-1020, 1020].
// Store x0, x1, x2, x3 to their permuted targets.
// The original +128 in every input value
// has cancelled out except in the “DC signal” x0.
// Subtracting 128*8 here is equivalent to subtracting 128
// from every input before we started, but cheaper.
// It also converts x0 from UQ11.18 to Q10.18.
b[0*8+i] = (x0 - 128*8) << 18
b[4*8+i] = x1 << 18
b[2*8+i] = x2
b[6*8+i] = x3
x4, x6 = x4+x6, x4-x6
x7, x5 = x7+x5, x7-x5
// x[4567] now Q10.18 in [-654, 654].
// Stage 4: two √2 scalings and one butterfly.
x5 = (x5 >> 12) * c(sqrt2, 12)
x6 = (x6 >> 12) * c(sqrt2, 12)
// x[56] still Q10.18 in [-925, 925] (= 654√2).
x7, x4 = x7+x4, x7-x4
// x[47] still Q10.18 in [-925, 925] (not Q11.18!).
// This is not obvious at all! See “Note on 925” below.
// Store x4 x5 x6 x7 to their permuted targets.
b[1*8+i] = x7
b[3*8+i] = x5
b[5*8+i] = x6
b[7*8+i] = x4
}
}
// fdctRows applies the 1D DCT to the rows of b.
// Inputs are Q10.18; outputs are Q13.0.
func fdctRows(b *block) {
for i := range 8 {
x := b[8*i : 8*i+8 : 8*i+8]
x0 := x[0]
x1 := x[1]
x2 := x[2]
x3 := x[3]
x4 := x[4]
x5 := x[5]
x6 := x[6]
x7 := x[7]
// x[01234567] are Q10.18 [-1020, 1020].
// Stage 1: four butterflies.
x0, x7 = x0+x7, x0-x7
x1, x6 = x1+x6, x1-x6
x2, x5 = x2+x5, x2-x5
x3, x4 = x3+x4, x3-x4
// x[01234567] now Q11.18 in [-2040, 2040].
// Stage 2: two boxes and two butterflies.
x4, x7 = dctBox(x4>>14, x7>>14, c(cos3, 14), c(sin3, 14))
x5, x6 = dctBox(x5>>14, x6>>14, c(cos1, 14), c(sin1, 14))
// x[47] now Q12.18 in [-2830, 2830].
// x[56] now Q12.18 in [-2400, 2400].
x0, x3 = x0+x3, x0-x3
x1, x2 = x1+x2, x1-x2
// x[01234567] now Q12.18 in [-4080, 4080].
// Stage 3: one box and three butterflies.
x2, x3 = dctBox(x2>>14, x3>>14, c(sqrt2_cos6, 14), c(sqrt2_sin6, 14))
// x[23] now Q13.18 in [-7539, 7539].
x0, x1 = x0+x1, x0-x1
// x[01] now Q13.18 in [-8160, 8160].
x4, x6 = x4+x6, x4-x6
x7, x5 = x7+x5, x7-x5
// x[4567] now Q13.18 in [-5230, 5230].
// Stage 4: two √2 scalings and one butterfly.
x5 = (x5 >> 14) * c(sqrt2, 14)
x6 = (x6 >> 14) * c(sqrt2, 14)
// x[56] still Q13.18 in [-7397, 7397] (= 5230√2).
x7, x4 = x7+x4, x7-x4
// x[47] still Q13.18 in [-7395, 7395] (= 2040*3.6246).
// See “Note on 925” below.
// Cut from Q13.18 to Q13.0.
x0 = (x0 + 1<<17) >> 18
x1 = (x1 + 1<<17) >> 18
x2 = (x2 + 1<<17) >> 18
x3 = (x3 + 1<<17) >> 18
x4 = (x4 + 1<<17) >> 18
x5 = (x5 + 1<<17) >> 18
x6 = (x6 + 1<<17) >> 18
x7 = (x7 + 1<<17) >> 18
// Note: Unlike in fdctCols, saved all stores for the end
// because they are adjacent memory locations and some systems
// can use multiword stores.
x[0] = x0
x[1] = x7
x[2] = x2
x[3] = x5
x[4] = x1
x[5] = x6
x[6] = x3
x[7] = x4
}
}
// “Note on 925”, deferred from above to avoid interrupting code.
//
// In fdctCols, heading into stage 2, the values x4, x5, x6, x7 are in [-255, 255].
// Let's call those specific values b4, b5, b6, b7, and trace how x[4567] evolve:
//
// Stage 2:
// x4 = b4*cos3 + b7*sin3
// x7 = -b4*sin3 + b7*cos3
// x5 = b5*cos1 + b6*sin1
// x6 = -b5*sin1 + b6*cos1
//
// Stage 3:
//
// x4 = x4+x6 = b4*cos3 + b7*sin3 - b5*sin1 + b6*cos1
// x6 = x4-x6 = b4*cos3 + b7*sin3 + b5*sin1 - b6*cos1
// x7 = x7+x5 = -b4*sin3 + b7*cos3 + b5*cos1 + b6*sin1
// x5 = x7-x5 = -b4*sin3 + b7*cos3 - b5*cos1 - b6*sin1
//
// Stage 4:
//
// x7 = x7+x4 = -b4*sin3 + b7*cos3 + b5*cos1 + b6*sin1 + b4*cos3 + b7*sin3 - b5*sin1 + b6*cos1
// = b4*(cos3-sin3) + b5*(cos1-sin1) + b6*(cos1+sin1) + b7*(cos3+sin3)
// < 255*(0.2759 + 0.7857 + 1.1759 + 1.3871) = 255*3.6246 < 925.
//
// x4 = x7-x4 = -b4*sin3 + b7*cos3 + b5*cos1 + b6*sin1 - b4*cos3 - b7*sin3 + b5*sin1 - b6*cos1
// = -b4*(cos3+sin3) + b5*(cos1+sin1) + b6*(sin1-cos1) + b7*(cos3-sin3)
// < same 925.
//
// The fact that x5, x6 are also at most 925 is not a coincidence: we are computing
// the same kinds of numbers for all four, just with different paths to them.
//
// In fdctRows, the same analysis applies, but the initial values are
// in [-2040, 2040] instead of [-255, 255], so the bound is 2040*3.6246 < 7395.
// idct implements the inverse DCT.
// Inputs are UQ8.0; outputs are Q10.3.
func idct(b *block) {
// A 2D IDCT is a 1D IDCT on rows followed by columns.
idctRows(b)
idctCols(b)
}
// idctRows applies the 1D IDCT to the rows of b.
// Inputs are UQ8.0; outputs are Q9.20.
func idctRows(b *block) {
for i := range 8 {
x := b[8*i : 8*i+8 : 8*i+8]
x0 := x[0]
x7 := x[1]
x2 := x[2]
x5 := x[3]
x1 := x[4]
x6 := x[5]
x3 := x[6]
x4 := x[7]
// Run FDCT backward.
// Independent operations have been reordered somewhat
// to make precision tracking easier.
//
// Note that “x0, x1 = x0+x1, x0-x1” is now a reverse butterfly
// and carries with it an implicit divide by two: the extra bit
// is added to the precision, not the value size.
// x[01234567] are UQ8.0 in [0, 255].
// Stages 4, 3, 2: x0, x1, x2, x3.
x0 <<= 17
x1 <<= 17
// x0, x1 now UQ8.17.
x0, x1 = x0+x1, x0-x1
// x0 now UQ8.18 in [0, 255].
// x1 now Q7.18 in [-127½, 127½].
// Note: (1/sqrt 2)*((cos 6*pi/16)+(sin 6*pi/16)) < 0.924, so no new high bit.
x2, x3 = dctBox(x2, x3, c(sqrt2inv_cos6, 18), -c(sqrt2inv_sin6, 18))
// x[23] now Q8.18 in [-236, 236].
x1, x2 = x1+x2, x1-x2
x0, x3 = x0+x3, x0-x3
// x[0123] now Q8.19 in [-246, 246].
// Stages 4, 3, 2: x4, x5, x6, x7.
x4 <<= 7
x7 <<= 7
// x[47] now UQ8.7
x7, x4 = x7+x4, x7-x4
// x7 now UQ8.8 in [0, 255].
// x4 now Q7.8 in [-127½, 127½].
x6 = x6 * c(sqrt2inv, 8)
x5 = x5 * c(sqrt2inv, 8)
// x[56] now UQ8.8 in [0, 181].
// Note that 1/√2 has five 0s in its binary representation after
// the 8th bit, so this multipliy is actually producing 12 bits of precision.
x7, x5 = x7+x5, x7-x5
x4, x6 = x4+x6, x4-x6
// x[4567] now Q8.9 in [-218, 218].
x4, x7 = dctBox(x4>>2, x7>>2, c(cos3, 12), -c(sin3, 12))
x5, x6 = dctBox(x5>>2, x6>>2, c(cos1, 12), -c(sin1, 12))
// x[4567] now Q9.19 in [-303, 303].
// Stage 1.
x0, x7 = x0+x7, x0-x7
x1, x6 = x1+x6, x1-x6
x2, x5 = x2+x5, x2-x5
x3, x4 = x3+x4, x3-x4
// x[01234567] now Q9.20 in [-275, 275].
// Note: we don't need all 20 bits of “precision”,
// but it is faster to let idctCols shift it away as part
// of other operations rather than downshift here.
x[0] = x0
x[1] = x1
x[2] = x2
x[3] = x3
x[4] = x4
x[5] = x5
x[6] = x6
x[7] = x7
}
}
// idctCols applies the 1D IDCT to the columns of b.
// Inputs are Q9.20.
// Outputs are Q10.3. That is, the result is the IDCT*8.
func idctCols(b *block) {
for i := range 8 {
x0 := b[0*8+i]
x7 := b[1*8+i]
x2 := b[2*8+i]
x5 := b[3*8+i]
x1 := b[4*8+i]
x6 := b[5*8+i]
x3 := b[6*8+i]
x4 := b[7*8+i]
// x[012345678] are Q9.20.
// Start by adding 0.5 to x0 (the incoming DC signal).
// The butterflies will add it to all the other values,
// and then the final shifts will round properly.
x0 += 1 << 19
// Stages 4, 3, 2: x0, x1, x2, x3.
x0, x1 = (x0+x1)>>2, (x0-x1)>>2
// x[01] now Q9.19.
// Note: (1/sqrt 2)*((cos 6*pi/16)+(sin 6*pi/16)) < 1, so no new high bit.
x2, x3 = dctBox(x2>>13, x3>>13, c(sqrt2inv_cos6, 12), -c(sqrt2inv_sin6, 12))
// x[0123] now Q9.19.
x1, x2 = x1+x2, x1-x2
x0, x3 = x0+x3, x0-x3
// x[0123] now Q9.20.
// Stages 4, 3, 2: x4, x5, x6, x7.
x7, x4 = x7+x4, x7-x4
// x[47] now Q9.21.
x5 = (x5 >> 13) * c(sqrt2inv, 14)
x6 = (x6 >> 13) * c(sqrt2inv, 14)
// x[56] now Q9.21.
x7, x5 = x7+x5, x7-x5
x4, x6 = x4+x6, x4-x6
// x[4567] now Q9.22.
x4, x7 = dctBox(x4>>14, x7>>14, c(cos3, 12), -c(sin3, 12))
x5, x6 = dctBox(x5>>14, x6>>14, c(cos1, 12), -c(sin1, 12))
// x[4567] now Q10.20.
x0, x7 = x0+x7, x0-x7
x1, x6 = x1+x6, x1-x6
x2, x5 = x2+x5, x2-x5
x3, x4 = x3+x4, x3-x4
// x[01234567] now Q10.21.
x0 >>= 18
x1 >>= 18
x2 >>= 18
x3 >>= 18
x4 >>= 18
x5 >>= 18
x6 >>= 18
x7 >>= 18
// x[01234567] now Q10.3.
b[0*8+i] = x0
b[1*8+i] = x1
b[2*8+i] = x2
b[3*8+i] = x3
b[4*8+i] = x4
b[5*8+i] = x5
b[6*8+i] = x6
b[7*8+i] = x7
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jpeg
import (
"io"
)
// maxCodeLength is the maximum (inclusive) number of bits in a Huffman code.
const maxCodeLength = 16
// maxNCodes is the maximum (inclusive) number of codes in a Huffman tree.
const maxNCodes = 256
// lutSize is the log-2 size of the Huffman decoder's look-up table.
const lutSize = 8
// huffman is a Huffman decoder, specified in section C.
type huffman struct {
// length is the number of codes in the tree.
nCodes int32
// lut is the look-up table for the next lutSize bits in the bit-stream.
// The high 8 bits of the uint16 are the encoded value. The low 8 bits
// are 1 plus the code length, or 0 if the value is too large to fit in
// lutSize bits.
lut [1 << lutSize]uint16
// vals are the decoded values, sorted by their encoding.
vals [maxNCodes]uint8
// minCodes[i] is the minimum code of length i, or -1 if there are no
// codes of that length.
minCodes [maxCodeLength]int32
// maxCodes[i] is the maximum code of length i, or -1 if there are no
// codes of that length.
maxCodes [maxCodeLength]int32
// valsIndices[i] is the index into vals of minCodes[i].
valsIndices [maxCodeLength]int32
}
// errShortHuffmanData means that an unexpected EOF occurred while decoding
// Huffman data.
var errShortHuffmanData = FormatError("short Huffman data")
// ensureNBits reads bytes from the byte buffer to ensure that d.bits.n is at
// least n. For best performance (avoiding function calls inside hot loops),
// the caller is the one responsible for first checking that d.bits.n < n.
func (d *decoder) ensureNBits(n int32) error {
for {
c, err := d.readByteStuffedByte()
if err != nil {
if err == io.ErrUnexpectedEOF {
return errShortHuffmanData
}
return err
}
d.bits.a = d.bits.a<<8 | uint32(c)
d.bits.n += 8
if d.bits.m == 0 {
d.bits.m = 1 << 7
} else {
d.bits.m <<= 8
}
if d.bits.n >= n {
break
}
}
return nil
}
// receiveExtend is the composition of RECEIVE and EXTEND, specified in section
// F.2.2.1.
//
// It returns the signed integer that's encoded in t bits, where t < 16. The
// possible return values are:
//
// - t == 0: 0
// - t == 1: -1, +1
// - t == 2: -3, -2, +2, +3
// - t == 3: -7, -6, -5, -4, +4, +5, +6, +7
// - ...
// - t == 15: -32767, -32766, ..., -16384, +16384, ..., +32766, +32767
func (d *decoder) receiveExtend(t uint8) (int32, error) {
if d.bits.n < int32(t) {
if err := d.ensureNBits(int32(t)); err != nil {
return 0, err
}
}
d.bits.n -= int32(t)
d.bits.m >>= t
s := int32(1) << t
x := int32(d.bits.a>>uint8(d.bits.n)) & (s - 1)
// This adjustment, assuming two's complement, is a branchless equivalent of:
//
// if x < s>>1 {
// x += ((-1) << t) + 1
// }
//
// sign is either -1 or 0, depending on whether x is in the low or high
// half of the range 0 .. 1<<t.
sign := (x >> (t - 1)) - 1
x += sign & (((-1) << t) + 1)
return x, nil
}
// processDHT processes a Define Huffman Table marker, and initializes a huffman
// struct from its contents. Specified in section B.2.4.2.
func (d *decoder) processDHT(n int) error {
for n > 0 {
if n < 17 {
return FormatError("DHT has wrong length")
}
if err := d.readFull(d.tmp[:17]); err != nil {
return err
}
tc := d.tmp[0] >> 4
if tc > maxTc {
return FormatError("bad Tc value")
}
th := d.tmp[0] & 0x0f
// The baseline th <= 1 restriction is specified in table B.5.
if th > maxTh || (d.baseline && th > 1) {
return FormatError("bad Th value")
}
h := &d.huff[tc][th]
// Read nCodes and h.vals (and derive h.nCodes).
// nCodes[i] is the number of codes with code length i.
// h.nCodes is the total number of codes.
h.nCodes = 0
var nCodes [maxCodeLength]int32
for i := range nCodes {
nCodes[i] = int32(d.tmp[i+1])
h.nCodes += nCodes[i]
}
if h.nCodes == 0 {
return FormatError("Huffman table has zero length")
}
if h.nCodes > maxNCodes {
return FormatError("Huffman table has excessive length")
}
n -= int(h.nCodes) + 17
if n < 0 {
return FormatError("DHT has wrong length")
}
if err := d.readFull(h.vals[:h.nCodes]); err != nil {
return err
}
// Derive the look-up table.
clear(h.lut[:])
var x, code uint32
for i := uint32(0); i < lutSize; i++ {
code <<= 1
for j := int32(0); j < nCodes[i]; j++ {
// The codeLength is 1+i, so shift code by 8-(1+i) to
// calculate the high bits for every 8-bit sequence
// whose codeLength's high bits matches code.
// The high 8 bits of lutValue are the encoded value.
// The low 8 bits are 1 plus the codeLength.
base := uint8(code << (7 - i))
lutValue := uint16(h.vals[x])<<8 | uint16(2+i)
for k := uint8(0); k < 1<<(7-i); k++ {
h.lut[base|k] = lutValue
}
code++
x++
}
}
// Derive minCodes, maxCodes, and valsIndices.
var c, index int32
for i, n := range nCodes {
if n == 0 {
h.minCodes[i] = -1
h.maxCodes[i] = -1
h.valsIndices[i] = -1
} else {
h.minCodes[i] = c
h.maxCodes[i] = c + n - 1
h.valsIndices[i] = index
c += n
index += n
}
c <<= 1
}
}
return nil
}
// decodeHuffman returns the next Huffman-coded value from the bit-stream,
// decoded according to h.
func (d *decoder) decodeHuffman(h *huffman) (uint8, error) {
if h.nCodes == 0 {
return 0, FormatError("uninitialized Huffman table")
}
if d.bits.n < 8 {
if err := d.ensureNBits(8); err != nil {
if err != errMissingFF00 && err != errShortHuffmanData {
return 0, err
}
// There are no more bytes of data in this segment, but we may still
// be able to read the next symbol out of the previously read bits.
// First, undo the readByte that the ensureNBits call made.
if d.bytes.nUnreadable != 0 {
d.unreadByteStuffedByte()
}
goto slowPath
}
}
if v := h.lut[(d.bits.a>>uint32(d.bits.n-lutSize))&0xff]; v != 0 {
n := (v & 0xff) - 1
d.bits.n -= int32(n)
d.bits.m >>= n
return uint8(v >> 8), nil
}
slowPath:
for i, code := 0, int32(0); i < maxCodeLength; i++ {
if d.bits.n == 0 {
if err := d.ensureNBits(1); err != nil {
return 0, err
}
}
if d.bits.a&d.bits.m != 0 {
code |= 1
}
d.bits.n--
d.bits.m >>= 1
if code <= h.maxCodes[i] {
return h.vals[h.valsIndices[i]+code-h.minCodes[i]], nil
}
code <<= 1
}
return 0, FormatError("bad Huffman code")
}
func (d *decoder) decodeBit() (bool, error) {
if d.bits.n == 0 {
if err := d.ensureNBits(1); err != nil {
return false, err
}
}
ret := d.bits.a&d.bits.m != 0
d.bits.n--
d.bits.m >>= 1
return ret, nil
}
func (d *decoder) decodeBits(n int32) (uint32, error) {
if d.bits.n < n {
if err := d.ensureNBits(n); err != nil {
return 0, err
}
}
ret := d.bits.a >> uint32(d.bits.n-n)
ret &= (1 << uint32(n)) - 1
d.bits.n -= n
d.bits.m >>= uint32(n)
return ret, nil
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package jpeg implements a JPEG image decoder and encoder.
//
// JPEG is defined in ITU-T T.81: https://www.w3.org/Graphics/JPEG/itu-t81.pdf.
package jpeg
import (
"image"
"image/color"
"image/internal/imageutil"
"io"
)
// A FormatError reports that the input is not a valid JPEG.
type FormatError string
func (e FormatError) Error() string { return "invalid JPEG format: " + string(e) }
// An UnsupportedError reports that the input uses a valid but unimplemented JPEG feature.
type UnsupportedError string
func (e UnsupportedError) Error() string { return "unsupported JPEG feature: " + string(e) }
var errUnsupportedSubsamplingRatio = UnsupportedError("luma/chroma subsampling ratio")
// Component specification, specified in section B.2.2.
type component struct {
h int // Horizontal sampling factor.
v int // Vertical sampling factor.
c uint8 // Component identifier.
tq uint8 // Quantization table destination selector.
expandH int // Horizontal expansion factor for non-standard subsampling.
expandV int // Vertical expansion factor for non-standard subsampling.
}
const (
dcTable = 0
acTable = 1
maxTc = 1
maxTh = 3
maxTq = 3
maxComponents = 4
)
const (
sof0Marker = 0xc0 // Start Of Frame (Baseline Sequential).
sof1Marker = 0xc1 // Start Of Frame (Extended Sequential).
sof2Marker = 0xc2 // Start Of Frame (Progressive).
dhtMarker = 0xc4 // Define Huffman Table.
rst0Marker = 0xd0 // ReSTart (0).
rst7Marker = 0xd7 // ReSTart (7).
soiMarker = 0xd8 // Start Of Image.
eoiMarker = 0xd9 // End Of Image.
sosMarker = 0xda // Start Of Scan.
dqtMarker = 0xdb // Define Quantization Table.
driMarker = 0xdd // Define Restart Interval.
comMarker = 0xfe // COMment.
// "APPlication specific" markers aren't part of the JPEG spec per se,
// but in practice, their use is described at
// https://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html
app0Marker = 0xe0
app14Marker = 0xee
app15Marker = 0xef
)
// See https://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe
const (
adobeTransformUnknown = 0
adobeTransformYCbCr = 1
adobeTransformYCbCrK = 2
)
// unzig maps from the zig-zag ordering to the natural ordering. For example,
// unzig[3] is the column and row of the fourth element in zig-zag order. The
// value is 16, which means first column (16%8 == 0) and third row (16/8 == 2).
var unzig = [blockSize]int{
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63,
}
// Deprecated: Reader is not used by the [image/jpeg] package and should
// not be used by others. It is kept for compatibility.
type Reader interface {
io.ByteReader
io.Reader
}
// bits holds the unprocessed bits that have been taken from the byte-stream.
// The n least significant bits of a form the unread bits, to be read in MSB to
// LSB order.
type bits struct {
a uint32 // accumulator.
m uint32 // mask. m==1<<(n-1) when n>0, with m==0 when n==0.
n int32 // the number of unread bits in a.
}
type decoder struct {
r io.Reader
bits bits
// bytes is a byte buffer, similar to a bufio.Reader, except that it
// has to be able to unread more than 1 byte, due to byte stuffing.
// Byte stuffing is specified in section F.1.2.3.
bytes struct {
// buf[i:j] are the buffered bytes read from the underlying
// io.Reader that haven't yet been passed further on.
buf [4096]byte
i, j int
// nUnreadable is the number of bytes to back up i after
// overshooting. It can be 0, 1 or 2.
nUnreadable int
}
width, height int
img1 *image.Gray
img3 *image.YCbCr
blackPix []byte
blackStride int
// For non-standard subsampling ratios (flex mode).
flex bool // True if using non-standard subsampling that requires manual pixel expansion.
maxH, maxV int // Maximum horizontal and vertical sampling factors across all components.
ri int // Restart Interval.
nComp int
// As per section 4.5, there are four modes of operation (selected by the
// SOF? markers): sequential DCT, progressive DCT, lossless and
// hierarchical, although this implementation does not support the latter
// two non-DCT modes. Sequential DCT is further split into baseline and
// extended, as per section 4.11.
baseline bool
progressive bool
jfif bool
adobeTransformValid bool
adobeTransform uint8
eobRun uint16 // End-of-Band run, specified in section G.1.2.2.
comp [maxComponents]component
progCoeffs [maxComponents][]block // Saved state between progressive-mode scans.
huff [maxTc + 1][maxTh + 1]huffman
quant [maxTq + 1]block // Quantization tables, in zig-zag order.
tmp [2 * blockSize]byte
}
// fill fills up the d.bytes.buf buffer from the underlying io.Reader. It
// should only be called when there are no unread bytes in d.bytes.
func (d *decoder) fill() error {
if d.bytes.i != d.bytes.j {
panic("jpeg: fill called when unread bytes exist")
}
// Move the last 2 bytes to the start of the buffer, in case we need
// to call unreadByteStuffedByte.
if d.bytes.j > 2 {
d.bytes.buf[0] = d.bytes.buf[d.bytes.j-2]
d.bytes.buf[1] = d.bytes.buf[d.bytes.j-1]
d.bytes.i, d.bytes.j = 2, 2
}
// Fill in the rest of the buffer.
n, err := d.r.Read(d.bytes.buf[d.bytes.j:])
d.bytes.j += n
if n > 0 {
return nil
}
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return err
}
// unreadByteStuffedByte undoes the most recent readByteStuffedByte call,
// giving a byte of data back from d.bits to d.bytes. The Huffman look-up table
// requires at least 8 bits for look-up, which means that Huffman decoding can
// sometimes overshoot and read one or two too many bytes. Two-byte overshoot
// can happen when expecting to read a 0xff 0x00 byte-stuffed byte.
func (d *decoder) unreadByteStuffedByte() {
d.bytes.i -= d.bytes.nUnreadable
d.bytes.nUnreadable = 0
if d.bits.n >= 8 {
d.bits.a >>= 8
d.bits.n -= 8
d.bits.m >>= 8
}
}
// readByte returns the next byte, whether buffered or not buffered. It does
// not care about byte stuffing.
func (d *decoder) readByte() (x byte, err error) {
for d.bytes.i == d.bytes.j {
if err = d.fill(); err != nil {
return 0, err
}
}
x = d.bytes.buf[d.bytes.i]
d.bytes.i++
d.bytes.nUnreadable = 0
return x, nil
}
// errMissingFF00 means that readByteStuffedByte encountered an 0xff byte (a
// marker byte) that wasn't the expected byte-stuffed sequence 0xff, 0x00.
var errMissingFF00 = FormatError("missing 0xff00 sequence")
// readByteStuffedByte is like readByte but is for byte-stuffed Huffman data.
func (d *decoder) readByteStuffedByte() (x byte, err error) {
// Take the fast path if d.bytes.buf contains at least two bytes.
if d.bytes.i+2 <= d.bytes.j {
x = d.bytes.buf[d.bytes.i]
d.bytes.i++
d.bytes.nUnreadable = 1
if x != 0xff {
return x, err
}
if d.bytes.buf[d.bytes.i] != 0x00 {
return 0, errMissingFF00
}
d.bytes.i++
d.bytes.nUnreadable = 2
return 0xff, nil
}
d.bytes.nUnreadable = 0
x, err = d.readByte()
if err != nil {
return 0, err
}
d.bytes.nUnreadable = 1
if x != 0xff {
return x, nil
}
x, err = d.readByte()
if err != nil {
return 0, err
}
d.bytes.nUnreadable = 2
if x != 0x00 {
return 0, errMissingFF00
}
return 0xff, nil
}
// readFull reads exactly len(p) bytes into p. It does not care about byte
// stuffing.
func (d *decoder) readFull(p []byte) error {
// Unread the overshot bytes, if any.
if d.bytes.nUnreadable != 0 {
if d.bits.n >= 8 {
d.unreadByteStuffedByte()
}
d.bytes.nUnreadable = 0
}
for {
n := copy(p, d.bytes.buf[d.bytes.i:d.bytes.j])
p = p[n:]
d.bytes.i += n
if len(p) == 0 {
break
}
if err := d.fill(); err != nil {
return err
}
}
return nil
}
// ignore ignores the next n bytes.
func (d *decoder) ignore(n int) error {
// Unread the overshot bytes, if any.
if d.bytes.nUnreadable != 0 {
if d.bits.n >= 8 {
d.unreadByteStuffedByte()
}
d.bytes.nUnreadable = 0
}
for {
m := d.bytes.j - d.bytes.i
if m > n {
m = n
}
d.bytes.i += m
n -= m
if n == 0 {
break
}
if err := d.fill(); err != nil {
return err
}
}
return nil
}
// Specified in section B.2.2.
func (d *decoder) processSOF(n int) error {
if d.nComp != 0 {
return FormatError("multiple SOF markers")
}
switch n {
case 6 + 3*1: // Grayscale image.
d.nComp = 1
case 6 + 3*3: // YCbCr or RGB image.
d.nComp = 3
case 6 + 3*4: // YCbCrK or CMYK image.
d.nComp = 4
default:
return UnsupportedError("number of components")
}
if err := d.readFull(d.tmp[:n]); err != nil {
return err
}
// We only support 8-bit precision.
if d.tmp[0] != 8 {
return UnsupportedError("precision")
}
d.height = int(d.tmp[1])<<8 + int(d.tmp[2])
d.width = int(d.tmp[3])<<8 + int(d.tmp[4])
if int(d.tmp[5]) != d.nComp {
return FormatError("SOF has wrong length")
}
for i := 0; i < d.nComp; i++ {
d.comp[i].c = d.tmp[6+3*i]
// Section B.2.2 states that "the value of C_i shall be different from
// the values of C_1 through C_(i-1)".
for j := 0; j < i; j++ {
if d.comp[i].c == d.comp[j].c {
return FormatError("repeated component identifier")
}
}
d.comp[i].tq = d.tmp[8+3*i]
if d.comp[i].tq > maxTq {
return FormatError("bad Tq value")
}
hv := d.tmp[7+3*i]
h, v := int(hv>>4), int(hv&0x0f)
if h < 1 || 4 < h || v < 1 || 4 < v {
return FormatError("luma/chroma subsampling ratio")
}
if h == 3 || v == 3 {
return errUnsupportedSubsamplingRatio
}
switch d.nComp {
case 1:
// If a JPEG image has only one component, section A.2 says "this data
// is non-interleaved by definition" and section A.2.2 says "[in this
// case...] the order of data units within a scan shall be left-to-right
// and top-to-bottom... regardless of the values of H_1 and V_1". Section
// 4.8.2 also says "[for non-interleaved data], the MCU is defined to be
// one data unit". Similarly, section A.1.1 explains that it is the ratio
// of H_i to max_j(H_j) that matters, and similarly for V. For grayscale
// images, H_1 is the maximum H_j for all components j, so that ratio is
// always 1. The component's (h, v) is effectively always (1, 1): even if
// the nominal (h, v) is (2, 1), a 20x5 image is encoded in three 8x8
// MCUs, not two 16x8 MCUs.
h, v = 1, 1
case 3:
// For YCbCr images, we support both standard subsampling ratios
// (4:4:4, 4:4:0, 4:2:2, 4:2:0, 4:1:1, 4:1:0) and non-standard ratios
// where components may have different sampling factors. The only
// restriction is that each component's sampling factors must evenly
// divide the maximum factors (validated after the loop).
case 4:
// For 4-component images (either CMYK or YCbCrK), we only support two
// hv vectors: [0x11 0x11 0x11 0x11] and [0x22 0x11 0x11 0x22].
// Theoretically, 4-component JPEG images could mix and match hv values
// but in practice, those two combinations are the only ones in use,
// and it simplifies the applyBlack code below if we can assume that:
// - for CMYK, the C and K channels have full samples, and if the M
// and Y channels subsample, they subsample both horizontally and
// vertically.
// - for YCbCrK, the Y and K channels have full samples.
switch i {
case 0:
if hv != 0x11 && hv != 0x22 {
return errUnsupportedSubsamplingRatio
}
case 1, 2:
if hv != 0x11 {
return errUnsupportedSubsamplingRatio
}
case 3:
if d.comp[0].h != h || d.comp[0].v != v {
return errUnsupportedSubsamplingRatio
}
}
}
d.maxH, d.maxV = max(d.maxH, h), max(d.maxV, v)
d.comp[i].h = h
d.comp[i].v = v
}
// For 3-component images, validate that maxH and maxV are evenly divisible
// by each component's sampling factors.
if d.nComp == 3 {
for i := 0; i < 3; i++ {
if d.maxH%d.comp[i].h != 0 || d.maxV%d.comp[i].v != 0 {
return errUnsupportedSubsamplingRatio
}
}
}
// Compute expansion factors for each component.
for i := 0; i < d.nComp; i++ {
d.comp[i].expandH = d.maxH / d.comp[i].h
d.comp[i].expandV = d.maxV / d.comp[i].v
}
return nil
}
// Specified in section B.2.4.1.
func (d *decoder) processDQT(n int) error {
loop:
for n > 0 {
n--
x, err := d.readByte()
if err != nil {
return err
}
tq := x & 0x0f
if tq > maxTq {
return FormatError("bad Tq value")
}
switch x >> 4 {
default:
return FormatError("bad Pq value")
case 0:
if n < blockSize {
break loop
}
n -= blockSize
if err := d.readFull(d.tmp[:blockSize]); err != nil {
return err
}
for i := range d.quant[tq] {
d.quant[tq][i] = int32(d.tmp[i])
}
case 1:
if n < 2*blockSize {
break loop
}
n -= 2 * blockSize
if err := d.readFull(d.tmp[:2*blockSize]); err != nil {
return err
}
for i := range d.quant[tq] {
d.quant[tq][i] = int32(d.tmp[2*i])<<8 | int32(d.tmp[2*i+1])
}
}
}
if n != 0 {
return FormatError("DQT has wrong length")
}
return nil
}
// Specified in section B.2.4.4.
func (d *decoder) processDRI(n int) error {
if n != 2 {
return FormatError("DRI has wrong length")
}
if err := d.readFull(d.tmp[:2]); err != nil {
return err
}
d.ri = int(d.tmp[0])<<8 + int(d.tmp[1])
return nil
}
func (d *decoder) processApp0Marker(n int) error {
if n < 5 {
return d.ignore(n)
}
if err := d.readFull(d.tmp[:5]); err != nil {
return err
}
n -= 5
d.jfif = d.tmp[0] == 'J' && d.tmp[1] == 'F' && d.tmp[2] == 'I' && d.tmp[3] == 'F' && d.tmp[4] == '\x00'
if n > 0 {
return d.ignore(n)
}
return nil
}
func (d *decoder) processApp14Marker(n int) error {
if n < 12 {
return d.ignore(n)
}
if err := d.readFull(d.tmp[:12]); err != nil {
return err
}
n -= 12
if d.tmp[0] == 'A' && d.tmp[1] == 'd' && d.tmp[2] == 'o' && d.tmp[3] == 'b' && d.tmp[4] == 'e' {
d.adobeTransformValid = true
d.adobeTransform = d.tmp[11]
}
if n > 0 {
return d.ignore(n)
}
return nil
}
// decode reads a JPEG image from r and returns it as an image.Image.
func (d *decoder) decode(r io.Reader, configOnly bool) (image.Image, error) {
d.r = r
// Check for the Start Of Image marker.
if err := d.readFull(d.tmp[:2]); err != nil {
return nil, err
}
if d.tmp[0] != 0xff || d.tmp[1] != soiMarker {
return nil, FormatError("missing SOI marker")
}
// Process the remaining segments until the End Of Image marker.
for {
err := d.readFull(d.tmp[:2])
if err != nil {
return nil, err
}
for d.tmp[0] != 0xff {
// Strictly speaking, this is a format error. However, libjpeg is
// liberal in what it accepts. As of version 9, next_marker in
// jdmarker.c treats this as a warning (JWRN_EXTRANEOUS_DATA) and
// continues to decode the stream. Even before next_marker sees
// extraneous data, jpeg_fill_bit_buffer in jdhuff.c reads as many
// bytes as it can, possibly past the end of a scan's data. It
// effectively puts back any markers that it overscanned (e.g. an
// "\xff\xd9" EOI marker), but it does not put back non-marker data,
// and thus it can silently ignore a small number of extraneous
// non-marker bytes before next_marker has a chance to see them (and
// print a warning).
//
// We are therefore also liberal in what we accept. Extraneous data
// is silently ignored.
//
// This is similar to, but not exactly the same as, the restart
// mechanism within a scan (the RST[0-7] markers).
//
// Note that extraneous 0xff bytes in e.g. SOS data are escaped as
// "\xff\x00", and so are detected a little further down below.
d.tmp[0] = d.tmp[1]
d.tmp[1], err = d.readByte()
if err != nil {
return nil, err
}
}
marker := d.tmp[1]
if marker == 0 {
// Treat "\xff\x00" as extraneous data.
continue
}
for marker == 0xff {
// Section B.1.1.2 says, "Any marker may optionally be preceded by any
// number of fill bytes, which are bytes assigned code X'FF'".
marker, err = d.readByte()
if err != nil {
return nil, err
}
}
if marker == eoiMarker { // End Of Image.
break
}
if rst0Marker <= marker && marker <= rst7Marker {
// Figures B.2 and B.16 of the specification suggest that restart markers should
// only occur between Entropy Coded Segments and not after the final ECS.
// However, some encoders may generate incorrect JPEGs with a final restart
// marker. That restart marker will be seen here instead of inside the processSOS
// method, and is ignored as a harmless error. Restart markers have no extra data,
// so we check for this before we read the 16-bit length of the segment.
continue
}
// Read the 16-bit length of the segment. The value includes the 2 bytes for the
// length itself, so we subtract 2 to get the number of remaining bytes.
if err = d.readFull(d.tmp[:2]); err != nil {
return nil, err
}
n := int(d.tmp[0])<<8 + int(d.tmp[1]) - 2
if n < 0 {
return nil, FormatError("short segment length")
}
switch marker {
case sof0Marker, sof1Marker, sof2Marker:
d.baseline = marker == sof0Marker
d.progressive = marker == sof2Marker
err = d.processSOF(n)
if configOnly && d.jfif {
return nil, err
}
case dhtMarker:
if configOnly {
err = d.ignore(n)
} else {
err = d.processDHT(n)
}
case dqtMarker:
if configOnly {
err = d.ignore(n)
} else {
err = d.processDQT(n)
}
case sosMarker:
if configOnly {
return nil, nil
}
err = d.processSOS(n)
case driMarker:
if configOnly {
err = d.ignore(n)
} else {
err = d.processDRI(n)
}
case app0Marker:
err = d.processApp0Marker(n)
case app14Marker:
err = d.processApp14Marker(n)
default:
if app0Marker <= marker && marker <= app15Marker || marker == comMarker {
err = d.ignore(n)
} else if marker < 0xc0 { // See Table B.1 "Marker code assignments".
err = FormatError("unknown marker")
} else {
err = UnsupportedError("unknown marker")
}
}
if err != nil {
return nil, err
}
}
if d.progressive {
if err := d.reconstructProgressiveImage(); err != nil {
return nil, err
}
}
if d.img1 != nil {
return d.img1, nil
}
if d.img3 != nil {
if d.blackPix != nil {
return d.applyBlack()
} else if d.isRGB() {
return d.convertToRGB()
}
return d.img3, nil
}
return nil, FormatError("missing SOS marker")
}
// applyBlack combines d.img3 and d.blackPix into a CMYK image. The formula
// used depends on whether the JPEG image is stored as CMYK or YCbCrK,
// indicated by the APP14 (Adobe) metadata.
//
// Adobe CMYK JPEG images are inverted, where 255 means no ink instead of full
// ink, so we apply "v = 255 - v" at various points. Note that a double
// inversion is a no-op, so inversions might be implicit in the code below.
func (d *decoder) applyBlack() (image.Image, error) {
if !d.adobeTransformValid {
return nil, UnsupportedError("unknown color model: 4-component JPEG doesn't have Adobe APP14 metadata")
}
// If the 4-component JPEG image isn't explicitly marked as "Unknown (RGB
// or CMYK)" as per
// https://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe
// we assume that it is YCbCrK. This matches libjpeg's jdapimin.c.
if d.adobeTransform != adobeTransformUnknown {
// Convert the YCbCr part of the YCbCrK to RGB, invert the RGB to get
// CMY, and patch in the original K. The RGB to CMY inversion cancels
// out the 'Adobe inversion' described in the applyBlack doc comment
// above, so in practice, only the fourth channel (black) is inverted.
bounds := d.img3.Bounds()
img := image.NewRGBA(bounds)
imageutil.DrawYCbCr(img, bounds, d.img3, bounds.Min)
for iBase, y := 0, bounds.Min.Y; y < bounds.Max.Y; iBase, y = iBase+img.Stride, y+1 {
for i, x := iBase+3, bounds.Min.X; x < bounds.Max.X; i, x = i+4, x+1 {
img.Pix[i] = 255 - d.blackPix[(y-bounds.Min.Y)*d.blackStride+(x-bounds.Min.X)]
}
}
return &image.CMYK{
Pix: img.Pix,
Stride: img.Stride,
Rect: img.Rect,
}, nil
}
// The first three channels (cyan, magenta, yellow) of the CMYK
// were decoded into d.img3, but each channel was decoded into a separate
// []byte slice, and some channels may be subsampled. We interleave the
// separate channels into an image.CMYK's single []byte slice containing 4
// contiguous bytes per pixel.
bounds := d.img3.Bounds()
img := image.NewCMYK(bounds)
translations := [4]struct {
src []byte
stride int
}{
{d.img3.Y, d.img3.YStride},
{d.img3.Cb, d.img3.CStride},
{d.img3.Cr, d.img3.CStride},
{d.blackPix, d.blackStride},
}
for t, translation := range translations {
subsample := d.comp[t].h != d.comp[0].h || d.comp[t].v != d.comp[0].v
for iBase, y := 0, bounds.Min.Y; y < bounds.Max.Y; iBase, y = iBase+img.Stride, y+1 {
sy := y - bounds.Min.Y
if subsample {
sy /= 2
}
for i, x := iBase+t, bounds.Min.X; x < bounds.Max.X; i, x = i+4, x+1 {
sx := x - bounds.Min.X
if subsample {
sx /= 2
}
img.Pix[i] = 255 - translation.src[sy*translation.stride+sx]
}
}
}
return img, nil
}
func (d *decoder) isRGB() bool {
if d.jfif {
return false
}
if d.adobeTransformValid && d.adobeTransform == adobeTransformUnknown {
// https://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe
// says that 0 means Unknown (and in practice RGB) and 1 means YCbCr.
return true
}
return d.comp[0].c == 'R' && d.comp[1].c == 'G' && d.comp[2].c == 'B'
}
func (d *decoder) convertToRGB() (image.Image, error) {
cScale := d.comp[0].h / d.comp[1].h
bounds := d.img3.Bounds()
img := image.NewRGBA(bounds)
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
po := img.PixOffset(bounds.Min.X, y)
yo := d.img3.YOffset(bounds.Min.X, y)
co := d.img3.COffset(bounds.Min.X, y)
for i, iMax := 0, bounds.Max.X-bounds.Min.X; i < iMax; i++ {
img.Pix[po+4*i+0] = d.img3.Y[yo+i]
img.Pix[po+4*i+1] = d.img3.Cb[co+i/cScale]
img.Pix[po+4*i+2] = d.img3.Cr[co+i/cScale]
img.Pix[po+4*i+3] = 255
}
}
return img, nil
}
// Decode reads a JPEG image from r and returns it as an [image.Image].
func Decode(r io.Reader) (image.Image, error) {
var d decoder
return d.decode(r, false)
}
// DecodeConfig returns the color model and dimensions of a JPEG image without
// decoding the entire image.
func DecodeConfig(r io.Reader) (image.Config, error) {
var d decoder
if _, err := d.decode(r, true); err != nil {
return image.Config{}, err
}
switch d.nComp {
case 1:
return image.Config{
ColorModel: color.GrayModel,
Width: d.width,
Height: d.height,
}, nil
case 3:
cm := color.YCbCrModel
if d.isRGB() {
cm = color.RGBAModel
}
return image.Config{
ColorModel: cm,
Width: d.width,
Height: d.height,
}, nil
case 4:
return image.Config{
ColorModel: color.CMYKModel,
Width: d.width,
Height: d.height,
}, nil
}
return image.Config{}, FormatError("missing SOF marker")
}
func init() {
image.RegisterFormat("jpeg", "\xff\xd8", Decode, DecodeConfig)
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jpeg
import (
"image"
)
// makeImg allocates and initializes the destination image.
func (d *decoder) makeImg(mxx, myy int) {
if d.nComp == 1 {
m := image.NewGray(image.Rect(0, 0, 8*mxx, 8*myy))
d.img1 = m.SubImage(image.Rect(0, 0, d.width, d.height)).(*image.Gray)
return
}
// Determine if we need flex mode for non-standard subsampling.
// Flex mode is needed when:
// - Cb and Cr have different sampling factors, or
// - The Y component doesn't have the maximum sampling factors, or
// - The ratio doesn't match any standard YCbCrSubsampleRatio.
subsampleRatio := image.YCbCrSubsampleRatio444
if d.comp[1].h != d.comp[2].h || d.comp[1].v != d.comp[2].v ||
d.maxH != d.comp[0].h || d.maxV != d.comp[0].v {
d.flex = true
} else {
hRatio := d.maxH / d.comp[1].h
vRatio := d.maxV / d.comp[1].v
switch hRatio<<4 | vRatio {
case 0x11:
subsampleRatio = image.YCbCrSubsampleRatio444
case 0x12:
subsampleRatio = image.YCbCrSubsampleRatio440
case 0x21:
subsampleRatio = image.YCbCrSubsampleRatio422
case 0x22:
subsampleRatio = image.YCbCrSubsampleRatio420
case 0x41:
subsampleRatio = image.YCbCrSubsampleRatio411
case 0x42:
subsampleRatio = image.YCbCrSubsampleRatio410
default:
d.flex = true
}
}
m := image.NewYCbCr(image.Rect(0, 0, 8*d.maxH*mxx, 8*d.maxV*myy), subsampleRatio)
d.img3 = m.SubImage(image.Rect(0, 0, d.width, d.height)).(*image.YCbCr)
if d.nComp == 4 {
h3, v3 := d.comp[3].h, d.comp[3].v
d.blackPix = make([]byte, 8*h3*mxx*8*v3*myy)
d.blackStride = 8 * h3 * mxx
}
}
// Specified in section B.2.3.
func (d *decoder) processSOS(n int) error {
if d.nComp == 0 {
return FormatError("missing SOF marker")
}
if n < 6 || 4+2*d.nComp < n || n%2 != 0 {
return FormatError("SOS has wrong length")
}
if err := d.readFull(d.tmp[:n]); err != nil {
return err
}
nComp := int(d.tmp[0])
if n != 4+2*nComp {
return FormatError("SOS length inconsistent with number of components")
}
var scan [maxComponents]struct {
compIndex uint8
td uint8 // DC table selector.
ta uint8 // AC table selector.
}
totalHV := 0
for i := 0; i < nComp; i++ {
cs := d.tmp[1+2*i] // Component selector.
compIndex := -1
for j, comp := range d.comp[:d.nComp] {
if cs == comp.c {
compIndex = j
}
}
if compIndex < 0 {
return FormatError("unknown component selector")
}
scan[i].compIndex = uint8(compIndex)
// Section B.2.3 states that "the value of Cs_j shall be different from
// the values of Cs_1 through Cs_(j-1)". Since we have previously
// verified that a frame's component identifiers (C_i values in section
// B.2.2) are unique, it suffices to check that the implicit indexes
// into d.comp are unique.
for j := 0; j < i; j++ {
if scan[i].compIndex == scan[j].compIndex {
return FormatError("repeated component selector")
}
}
totalHV += d.comp[compIndex].h * d.comp[compIndex].v
// The baseline t <= 1 restriction is specified in table B.3.
scan[i].td = d.tmp[2+2*i] >> 4
if t := scan[i].td; t > maxTh || (d.baseline && t > 1) {
return FormatError("bad Td value")
}
scan[i].ta = d.tmp[2+2*i] & 0x0f
if t := scan[i].ta; t > maxTh || (d.baseline && t > 1) {
return FormatError("bad Ta value")
}
}
// Section B.2.3 states that if there is more than one component then the
// total H*V values in a scan must be <= 10.
if d.nComp > 1 && totalHV > 10 {
return FormatError("total sampling factors too large")
}
// zigStart and zigEnd are the spectral selection bounds.
// ah and al are the successive approximation high and low values.
// The spec calls these values Ss, Se, Ah and Al.
//
// For progressive JPEGs, these are the two more-or-less independent
// aspects of progression. Spectral selection progression is when not
// all of a block's 64 DCT coefficients are transmitted in one pass.
// For example, three passes could transmit coefficient 0 (the DC
// component), coefficients 1-5, and coefficients 6-63, in zig-zag
// order. Successive approximation is when not all of the bits of a
// band of coefficients are transmitted in one pass. For example,
// three passes could transmit the 6 most significant bits, followed
// by the second-least significant bit, followed by the least
// significant bit.
//
// For sequential JPEGs, these parameters are hard-coded to 0/63/0/0, as
// per table B.3.
zigStart, zigEnd, ah, al := int32(0), int32(blockSize-1), uint32(0), uint32(0)
if d.progressive {
zigStart = int32(d.tmp[1+2*nComp])
zigEnd = int32(d.tmp[2+2*nComp])
ah = uint32(d.tmp[3+2*nComp] >> 4)
al = uint32(d.tmp[3+2*nComp] & 0x0f)
if (zigStart == 0 && zigEnd != 0) || zigStart > zigEnd || blockSize <= zigEnd {
return FormatError("bad spectral selection bounds")
}
if zigStart != 0 && nComp != 1 {
return FormatError("progressive AC coefficients for more than one component")
}
if ah != 0 && ah != al+1 {
return FormatError("bad successive approximation values")
}
}
// mxx and myy are the number of MCUs (Minimum Coded Units) in the image.
// The MCU dimensions are based on the maximum sampling factors.
// For standard subsampling, maxH/maxV equals h0/v0 (Y's factors).
// For flex mode, Y may not have the maximum factors.
mxx := (d.width + 8*d.maxH - 1) / (8 * d.maxH)
myy := (d.height + 8*d.maxV - 1) / (8 * d.maxV)
if d.img1 == nil && d.img3 == nil {
d.makeImg(mxx, myy)
}
if d.progressive {
for i := 0; i < nComp; i++ {
compIndex := scan[i].compIndex
if d.progCoeffs[compIndex] == nil {
d.progCoeffs[compIndex] = make([]block, mxx*myy*d.comp[compIndex].h*d.comp[compIndex].v)
}
}
}
d.bits = bits{}
mcu, expectedRST := 0, uint8(rst0Marker)
var (
// b is the decoded coefficients, in natural (not zig-zag) order.
b block
dc [maxComponents]int32
// bx and by are the location of the current block, in units of 8x8
// blocks: the third block in the first row has (bx, by) = (2, 0).
bx, by int
blockCount int
)
for my := 0; my < myy; my++ {
for mx := 0; mx < mxx; mx++ {
for i := 0; i < nComp; i++ {
compIndex := scan[i].compIndex
hi := d.comp[compIndex].h
vi := d.comp[compIndex].v
for j := 0; j < hi*vi; j++ {
// The blocks are traversed one MCU at a time. For 4:2:0 chroma
// subsampling, there are four Y 8x8 blocks in every 16x16 MCU.
//
// For a sequential 32x16 pixel image, the Y blocks visiting order is:
// 0 1 4 5
// 2 3 6 7
//
// For progressive images, the interleaved scans (those with nComp > 1)
// are traversed as above, but non-interleaved scans are traversed left
// to right, top to bottom:
// 0 1 2 3
// 4 5 6 7
// Only DC scans (zigStart == 0) can be interleaved. AC scans must have
// only one component.
//
// To further complicate matters, for non-interleaved scans, there is no
// data for any blocks that are inside the image at the MCU level but
// outside the image at the pixel level. For example, a 24x16 pixel 4:2:0
// progressive image consists of two 16x16 MCUs. The interleaved scans
// will process 8 Y blocks:
// 0 1 4 5
// 2 3 6 7
// The non-interleaved scans will process only 6 Y blocks:
// 0 1 2
// 3 4 5
if nComp != 1 {
bx = hi*mx + j%hi
by = vi*my + j/hi
} else {
q := mxx * hi
bx = blockCount % q
by = blockCount / q
blockCount++
if bx*8 >= d.width || by*8 >= d.height {
continue
}
}
// Load the previous partially decoded coefficients, if applicable.
if d.progressive {
b = d.progCoeffs[compIndex][by*mxx*hi+bx]
} else {
b = block{}
}
if ah != 0 {
if err := d.refine(&b, &d.huff[acTable][scan[i].ta], zigStart, zigEnd, 1<<al); err != nil {
return err
}
} else {
zig := zigStart
if zig == 0 {
zig++
// Decode the DC coefficient, as specified in section F.2.2.1.
value, err := d.decodeHuffman(&d.huff[dcTable][scan[i].td])
if err != nil {
return err
}
if value > 16 {
return UnsupportedError("excessive DC component")
}
dcDelta, err := d.receiveExtend(value)
if err != nil {
return err
}
dc[compIndex] += dcDelta
b[0] = dc[compIndex] << al
}
if zig <= zigEnd && d.eobRun > 0 {
d.eobRun--
} else {
// Decode the AC coefficients, as specified in section F.2.2.2.
huff := &d.huff[acTable][scan[i].ta]
for ; zig <= zigEnd; zig++ {
value, err := d.decodeHuffman(huff)
if err != nil {
return err
}
val0 := value >> 4
val1 := value & 0x0f
if val1 != 0 {
zig += int32(val0)
if zig > zigEnd {
break
}
ac, err := d.receiveExtend(val1)
if err != nil {
return err
}
b[unzig[zig]] = ac << al
} else {
if val0 != 0x0f {
d.eobRun = uint16(1 << val0)
if val0 != 0 {
bits, err := d.decodeBits(int32(val0))
if err != nil {
return err
}
d.eobRun |= uint16(bits)
}
d.eobRun--
break
}
zig += 0x0f
}
}
}
}
if d.progressive {
// Save the coefficients.
d.progCoeffs[compIndex][by*mxx*hi+bx] = b
// At this point, we could call reconstructBlock to dequantize and perform the
// inverse DCT, to save early stages of a progressive image to the *image.YCbCr
// buffers (the whole point of progressive encoding), but in Go, the jpeg.Decode
// function does not return until the entire image is decoded, so we "continue"
// here to avoid wasted computation. Instead, reconstructBlock is called on each
// accumulated block by the reconstructProgressiveImage method after all of the
// SOS markers are processed.
continue
}
if err := d.reconstructBlock(&b, bx, by, int(compIndex)); err != nil {
return err
}
} // for j
} // for i
mcu++
if d.ri > 0 && mcu%d.ri == 0 && mcu < mxx*myy {
// For well-formed input, the RST[0-7] restart marker follows
// immediately. For corrupt input, call findRST to try to
// resynchronize.
if err := d.readFull(d.tmp[:2]); err != nil {
return err
} else if d.tmp[0] != 0xff || d.tmp[1] != expectedRST {
if err := d.findRST(expectedRST); err != nil {
return err
}
}
expectedRST++
if expectedRST == rst7Marker+1 {
expectedRST = rst0Marker
}
// Reset the Huffman decoder.
d.bits = bits{}
// Reset the DC components, as per section F.2.1.3.1.
dc = [maxComponents]int32{}
// Reset the progressive decoder state, as per section G.1.2.2.
d.eobRun = 0
}
} // for mx
} // for my
return nil
}
// refine decodes a successive approximation refinement block, as specified in
// section G.1.2.
func (d *decoder) refine(b *block, h *huffman, zigStart, zigEnd, delta int32) error {
// Refining a DC component is trivial.
if zigStart == 0 {
if zigEnd != 0 {
panic("unreachable")
}
bit, err := d.decodeBit()
if err != nil {
return err
}
if bit {
b[0] |= delta
}
return nil
}
// Refining AC components is more complicated; see sections G.1.2.2 and G.1.2.3.
zig := zigStart
if d.eobRun == 0 {
loop:
for ; zig <= zigEnd; zig++ {
z := int32(0)
value, err := d.decodeHuffman(h)
if err != nil {
return err
}
val0 := value >> 4
val1 := value & 0x0f
switch val1 {
case 0:
if val0 != 0x0f {
d.eobRun = uint16(1 << val0)
if val0 != 0 {
bits, err := d.decodeBits(int32(val0))
if err != nil {
return err
}
d.eobRun |= uint16(bits)
}
break loop
}
case 1:
z = delta
bit, err := d.decodeBit()
if err != nil {
return err
}
if !bit {
z = -z
}
default:
return FormatError("unexpected Huffman code")
}
zig, err = d.refineNonZeroes(b, zig, zigEnd, int32(val0), delta)
if err != nil {
return err
}
if zig > zigEnd {
return FormatError("too many coefficients")
}
if z != 0 {
b[unzig[zig]] = z
}
}
}
if d.eobRun > 0 {
d.eobRun--
if _, err := d.refineNonZeroes(b, zig, zigEnd, -1, delta); err != nil {
return err
}
}
return nil
}
// refineNonZeroes refines non-zero entries of b in zig-zag order. If nz >= 0,
// the first nz zero entries are skipped over.
func (d *decoder) refineNonZeroes(b *block, zig, zigEnd, nz, delta int32) (int32, error) {
for ; zig <= zigEnd; zig++ {
u := unzig[zig]
if b[u] == 0 {
if nz == 0 {
break
}
nz--
continue
}
bit, err := d.decodeBit()
if err != nil {
return 0, err
}
if !bit {
continue
}
if b[u] >= 0 {
b[u] += delta
} else {
b[u] -= delta
}
}
return zig, nil
}
func (d *decoder) reconstructProgressiveImage() error {
// The mxx, by and bx variables have the same meaning as in the
// processSOS method.
mxx := (d.width + 8*d.maxH - 1) / (8 * d.maxH)
for i := 0; i < d.nComp; i++ {
if d.progCoeffs[i] == nil {
continue
}
v := 8 * d.maxV / d.comp[i].v
h := 8 * d.maxH / d.comp[i].h
stride := mxx * d.comp[i].h
for by := 0; by*v < d.height; by++ {
for bx := 0; bx*h < d.width; bx++ {
if err := d.reconstructBlock(&d.progCoeffs[i][by*stride+bx], bx, by, i); err != nil {
return err
}
}
}
}
return nil
}
// reconstructBlock dequantizes, performs the inverse DCT and stores the block
// to the image.
func (d *decoder) reconstructBlock(b *block, bx, by, compIndex int) error {
qt := &d.quant[d.comp[compIndex].tq]
for zig := 0; zig < blockSize; zig++ {
b[unzig[zig]] *= qt[zig]
}
idct(b)
var h, v int
if d.flex {
// Flex mode: scale bx and by according to the component's sampling factors.
h = d.comp[compIndex].expandH
v = d.comp[compIndex].expandV
bx, by = bx*h, by*v
}
dst, stride := []byte(nil), 0
if d.nComp == 1 {
dst, stride = d.img1.Pix[8*(by*d.img1.Stride+bx):], d.img1.Stride
} else {
switch compIndex {
case 0:
dst, stride = d.img3.Y[8*(by*d.img3.YStride+bx):], d.img3.YStride
case 1:
dst, stride = d.img3.Cb[8*(by*d.img3.CStride+bx):], d.img3.CStride
case 2:
dst, stride = d.img3.Cr[8*(by*d.img3.CStride+bx):], d.img3.CStride
case 3:
dst, stride = d.blackPix[8*(by*d.blackStride+bx):], d.blackStride
default:
return UnsupportedError("too many components")
}
}
if d.flex {
// Flex mode: expand each source pixel to h×v destination pixels.
for y := 0; y < 8; y++ {
y8 := y * 8
yv := y * v
for x := 0; x < 8; x++ {
val := uint8(max(0, min(255, b[y8+x]+128)))
xh := x * h
for yy := 0; yy < v; yy++ {
for xx := 0; xx < h; xx++ {
dst[(yv+yy)*stride+xh+xx] = val
}
}
}
}
return nil
}
// Level shift by +128, clip to [0, 255], and write to dst.
for y := 0; y < 8; y++ {
y8 := y * 8
yStride := y * stride
for x := 0; x < 8; x++ {
dst[yStride+x] = uint8(max(0, min(255, b[y8+x]+128)))
}
}
return nil
}
// findRST advances past the next RST restart marker that matches expectedRST.
// Other than I/O errors, it is also an error if we encounter an {0xFF, M}
// two-byte marker sequence where M is not 0x00, 0xFF or the expectedRST.
//
// This is similar to libjpeg's jdmarker.c's next_marker function.
// https://github.com/libjpeg-turbo/libjpeg-turbo/blob/2dfe6c0fe9e18671105e94f7cbf044d4a1d157e6/jdmarker.c#L892-L935
//
// Precondition: d.tmp[:2] holds the next two bytes of JPEG-encoded input
// (input in the d.readFull sense).
func (d *decoder) findRST(expectedRST uint8) error {
for {
// i is the index such that, at the bottom of the loop, we read 2-i
// bytes into d.tmp[i:2], maintaining the invariant that d.tmp[:2]
// holds the next two bytes of JPEG-encoded input. It is either 0 or 1,
// so that each iteration advances by 1 or 2 bytes (or returns).
i := 0
if d.tmp[0] == 0xff {
if d.tmp[1] == expectedRST {
return nil
} else if d.tmp[1] == 0xff {
i = 1
} else if d.tmp[1] != 0x00 {
// libjpeg's jdmarker.c's jpeg_resync_to_restart does something
// fancy here, treating RST markers within two (modulo 8) of
// expectedRST differently from RST markers that are 'more
// distant'. Until we see evidence that recovering from such
// cases is frequent enough to be worth the complexity, we take
// a simpler approach for now. Any marker that's not 0x00, 0xff
// or expectedRST is a fatal FormatError.
return FormatError("bad RST marker")
}
} else if d.tmp[1] == 0xff {
d.tmp[0] = 0xff
i = 1
}
if err := d.readFull(d.tmp[i:2]); err != nil {
return err
}
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jpeg
import (
"bufio"
"errors"
"image"
"image/color"
"io"
)
// div returns a/b rounded to the nearest integer, instead of rounded to zero.
func div(a, b int32) int32 {
if a >= 0 {
return (a + (b >> 1)) / b
}
return -((-a + (b >> 1)) / b)
}
// bitCount counts the number of bits needed to hold an integer.
var bitCount = [256]byte{
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
}
type quantIndex int
const (
quantIndexLuminance quantIndex = iota
quantIndexChrominance
nQuantIndex
)
// unscaledQuant are the unscaled quantization tables in zig-zag order. Each
// encoder copies and scales the tables according to its quality parameter.
// The values are derived from section K.1 of the spec, after converting from
// natural to zig-zag order.
var unscaledQuant = [nQuantIndex][blockSize]byte{
// Luminance.
{
16, 11, 12, 14, 12, 10, 16, 14,
13, 14, 18, 17, 16, 19, 24, 40,
26, 24, 22, 22, 24, 49, 35, 37,
29, 40, 58, 51, 61, 60, 57, 51,
56, 55, 64, 72, 92, 78, 64, 68,
87, 69, 55, 56, 80, 109, 81, 87,
95, 98, 103, 104, 103, 62, 77, 113,
121, 112, 100, 120, 92, 101, 103, 99,
},
// Chrominance.
{
17, 18, 18, 24, 21, 24, 47, 26,
26, 47, 99, 66, 56, 66, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
},
}
type huffIndex int
const (
huffIndexLuminanceDC huffIndex = iota
huffIndexLuminanceAC
huffIndexChrominanceDC
huffIndexChrominanceAC
nHuffIndex
)
// huffmanSpec specifies a Huffman encoding.
type huffmanSpec struct {
// count[i] is the number of codes of length i+1 bits.
count [16]byte
// value[i] is the decoded value of the i'th codeword.
value []byte
}
// theHuffmanSpec is the Huffman encoding specifications.
//
// This encoder uses the same Huffman encoding for all images. It is also the
// same Huffman encoding used by section K.3 of the spec.
//
// The DC tables have 12 decoded values, called categories.
//
// The AC tables have 162 decoded values: bytes that pack a 4-bit Run and a
// 4-bit Size. There are 16 valid Runs and 10 valid Sizes, plus two special R|S
// cases: 0|0 (meaning EOB) and F|0 (meaning ZRL).
var theHuffmanSpec = [nHuffIndex]huffmanSpec{
// Luminance DC.
{
[16]byte{0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0},
[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
},
// Luminance AC.
{
[16]byte{0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 125},
[]byte{
0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa,
},
},
// Chrominance DC.
{
[16]byte{0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
},
// Chrominance AC.
{
[16]byte{0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 119},
[]byte{
0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa,
},
},
}
// huffmanLUT is a compiled look-up table representation of a huffmanSpec.
// Each value maps to a uint32 of which the 8 most significant bits hold the
// codeword size in bits and the 24 least significant bits hold the codeword.
// The maximum codeword size is 16 bits.
type huffmanLUT []uint32
func (h *huffmanLUT) init(s huffmanSpec) {
maxValue := 0
for _, v := range s.value {
if int(v) > maxValue {
maxValue = int(v)
}
}
*h = make([]uint32, maxValue+1)
code, k := uint32(0), 0
for i := 0; i < len(s.count); i++ {
nBits := uint32(i+1) << 24
for j := uint8(0); j < s.count[i]; j++ {
(*h)[s.value[k]] = nBits | code
code++
k++
}
code <<= 1
}
}
// theHuffmanLUT are compiled representations of theHuffmanSpec.
var theHuffmanLUT [4]huffmanLUT
func init() {
for i, s := range theHuffmanSpec {
theHuffmanLUT[i].init(s)
}
}
// writer is a buffered writer.
type writer interface {
Flush() error
io.Writer
io.ByteWriter
}
// encoder encodes an image to the JPEG format.
type encoder struct {
// w is the writer to write to. err is the first error encountered during
// writing. All attempted writes after the first error become no-ops.
w writer
err error
// buf is a scratch buffer.
buf [16]byte
// bits and nBits are accumulated bits to write to w.
bits, nBits uint32
// quant is the scaled quantization tables, in zig-zag order.
quant [nQuantIndex][blockSize]byte
}
func (e *encoder) flush() {
if e.err != nil {
return
}
e.err = e.w.Flush()
}
func (e *encoder) write(p []byte) {
if e.err != nil {
return
}
_, e.err = e.w.Write(p)
}
func (e *encoder) writeByte(b byte) {
if e.err != nil {
return
}
e.err = e.w.WriteByte(b)
}
// emit emits the least significant nBits bits of bits to the bit-stream.
// The precondition is bits < 1<<nBits && nBits <= 16.
func (e *encoder) emit(bits, nBits uint32) {
nBits += e.nBits
bits <<= 32 - nBits
bits |= e.bits
for nBits >= 8 {
b := uint8(bits >> 24)
e.writeByte(b)
if b == 0xff {
e.writeByte(0x00)
}
bits <<= 8
nBits -= 8
}
e.bits, e.nBits = bits, nBits
}
// emitHuff emits the given value with the given Huffman encoder.
func (e *encoder) emitHuff(h huffIndex, value int32) {
x := theHuffmanLUT[h][value]
e.emit(x&(1<<24-1), x>>24)
}
// emitHuffRLE emits a run of runLength copies of value encoded with the given
// Huffman encoder.
func (e *encoder) emitHuffRLE(h huffIndex, runLength, value int32) {
a, b := value, value
if a < 0 {
a, b = -value, value-1
}
var nBits uint32
if a < 0x100 {
nBits = uint32(bitCount[a])
} else {
nBits = 8 + uint32(bitCount[a>>8])
}
e.emitHuff(h, runLength<<4|int32(nBits))
if nBits > 0 {
e.emit(uint32(b)&(1<<nBits-1), nBits)
}
}
// writeMarkerHeader writes the header for a marker with the given length.
func (e *encoder) writeMarkerHeader(marker uint8, markerlen int) {
e.buf[0] = 0xff
e.buf[1] = marker
e.buf[2] = uint8(markerlen >> 8)
e.buf[3] = uint8(markerlen & 0xff)
e.write(e.buf[:4])
}
// writeDQT writes the Define Quantization Table marker.
func (e *encoder) writeDQT() {
const markerlen = 2 + int(nQuantIndex)*(1+blockSize)
e.writeMarkerHeader(dqtMarker, markerlen)
for i := range e.quant {
e.writeByte(uint8(i))
e.write(e.quant[i][:])
}
}
// writeSOF0 writes the Start Of Frame (Baseline Sequential) marker.
func (e *encoder) writeSOF0(size image.Point, nComponent int) {
markerlen := 8 + 3*nComponent
e.writeMarkerHeader(sof0Marker, markerlen)
e.buf[0] = 8 // 8-bit color.
e.buf[1] = uint8(size.Y >> 8)
e.buf[2] = uint8(size.Y & 0xff)
e.buf[3] = uint8(size.X >> 8)
e.buf[4] = uint8(size.X & 0xff)
e.buf[5] = uint8(nComponent)
if nComponent == 1 {
e.buf[6] = 1
// No subsampling for grayscale image.
e.buf[7] = 0x11
e.buf[8] = 0x00
} else {
for i := 0; i < nComponent; i++ {
e.buf[3*i+6] = uint8(i + 1)
// We use 4:2:0 chroma subsampling.
e.buf[3*i+7] = "\x22\x11\x11"[i]
e.buf[3*i+8] = "\x00\x01\x01"[i]
}
}
e.write(e.buf[:3*(nComponent-1)+9])
}
// writeDHT writes the Define Huffman Table marker.
func (e *encoder) writeDHT(nComponent int) {
markerlen := 2
specs := theHuffmanSpec[:]
if nComponent == 1 {
// Drop the Chrominance tables.
specs = specs[:2]
}
for _, s := range specs {
markerlen += 1 + 16 + len(s.value)
}
e.writeMarkerHeader(dhtMarker, markerlen)
for i, s := range specs {
e.writeByte("\x00\x10\x01\x11"[i])
e.write(s.count[:])
e.write(s.value)
}
}
// writeBlock writes a block of pixel data using the given quantization table,
// returning the post-quantized DC value of the DCT-transformed block. b is in
// natural (not zig-zag) order.
func (e *encoder) writeBlock(b *block, q quantIndex, prevDC int32) int32 {
fdct(b)
// Emit the DC delta.
dc := div(b[0], 8*int32(e.quant[q][0]))
e.emitHuffRLE(huffIndex(2*q+0), 0, dc-prevDC)
// Emit the AC components.
h, runLength := huffIndex(2*q+1), int32(0)
for zig := 1; zig < blockSize; zig++ {
ac := div(b[unzig[zig]], 8*int32(e.quant[q][zig]))
if ac == 0 {
runLength++
} else {
for runLength > 15 {
e.emitHuff(h, 0xf0)
runLength -= 16
}
e.emitHuffRLE(h, runLength, ac)
runLength = 0
}
}
if runLength > 0 {
e.emitHuff(h, 0x00)
}
return dc
}
// toYCbCr converts the 8x8 region of m whose top-left corner is p to its
// YCbCr values.
func toYCbCr(m image.Image, p image.Point, yBlock, cbBlock, crBlock *block) {
b := m.Bounds()
xmax := b.Max.X - 1
ymax := b.Max.Y - 1
for j := 0; j < 8; j++ {
for i := 0; i < 8; i++ {
r, g, b, _ := m.At(min(p.X+i, xmax), min(p.Y+j, ymax)).RGBA()
yy, cb, cr := color.RGBToYCbCr(uint8(r>>8), uint8(g>>8), uint8(b>>8))
yBlock[8*j+i] = int32(yy)
cbBlock[8*j+i] = int32(cb)
crBlock[8*j+i] = int32(cr)
}
}
}
// grayToY stores the 8x8 region of m whose top-left corner is p in yBlock.
func grayToY(m *image.Gray, p image.Point, yBlock *block) {
b := m.Bounds()
xmax := b.Max.X - 1
ymax := b.Max.Y - 1
pix := m.Pix
for j := 0; j < 8; j++ {
for i := 0; i < 8; i++ {
idx := m.PixOffset(min(p.X+i, xmax), min(p.Y+j, ymax))
yBlock[8*j+i] = int32(pix[idx])
}
}
}
// rgbaToYCbCr is a specialized version of toYCbCr for image.RGBA images.
func rgbaToYCbCr(m *image.RGBA, p image.Point, yBlock, cbBlock, crBlock *block) {
b := m.Bounds()
xmax := b.Max.X - 1
ymax := b.Max.Y - 1
for j := 0; j < 8; j++ {
sj := p.Y + j
if sj > ymax {
sj = ymax
}
offset := (sj-b.Min.Y)*m.Stride - b.Min.X*4
for i := 0; i < 8; i++ {
sx := p.X + i
if sx > xmax {
sx = xmax
}
pix := m.Pix[offset+sx*4:]
yy, cb, cr := color.RGBToYCbCr(pix[0], pix[1], pix[2])
yBlock[8*j+i] = int32(yy)
cbBlock[8*j+i] = int32(cb)
crBlock[8*j+i] = int32(cr)
}
}
}
// yCbCrToYCbCr is a specialized version of toYCbCr for image.YCbCr images.
func yCbCrToYCbCr(m *image.YCbCr, p image.Point, yBlock, cbBlock, crBlock *block) {
b := m.Bounds()
xmax := b.Max.X - 1
ymax := b.Max.Y - 1
for j := 0; j < 8; j++ {
sy := p.Y + j
if sy > ymax {
sy = ymax
}
for i := 0; i < 8; i++ {
sx := p.X + i
if sx > xmax {
sx = xmax
}
yi := m.YOffset(sx, sy)
ci := m.COffset(sx, sy)
yBlock[8*j+i] = int32(m.Y[yi])
cbBlock[8*j+i] = int32(m.Cb[ci])
crBlock[8*j+i] = int32(m.Cr[ci])
}
}
}
// scale scales the 16x16 region represented by the 4 src blocks to the 8x8
// dst block.
func scale(dst *block, src *[4]block) {
for i := 0; i < 4; i++ {
dstOff := (i&2)<<4 | (i&1)<<2
for y := 0; y < 4; y++ {
for x := 0; x < 4; x++ {
j := 16*y + 2*x
sum := src[i][j] + src[i][j+1] + src[i][j+8] + src[i][j+9]
dst[8*y+x+dstOff] = (sum + 2) >> 2
}
}
}
}
// sosHeaderY is the SOS marker "\xff\xda" followed by 8 bytes:
// - the marker length "\x00\x08",
// - the number of components "\x01",
// - component 1 uses DC table 0 and AC table 0 "\x01\x00",
// - the bytes "\x00\x3f\x00". Section B.2.3 of the spec says that for
// sequential DCTs, those bytes (8-bit Ss, 8-bit Se, 4-bit Ah, 4-bit Al)
// should be 0x00, 0x3f, 0x00<<4 | 0x00.
var sosHeaderY = []byte{
0xff, 0xda, 0x00, 0x08, 0x01, 0x01, 0x00, 0x00, 0x3f, 0x00,
}
// sosHeaderYCbCr is the SOS marker "\xff\xda" followed by 12 bytes:
// - the marker length "\x00\x0c",
// - the number of components "\x03",
// - component 1 uses DC table 0 and AC table 0 "\x01\x00",
// - component 2 uses DC table 1 and AC table 1 "\x02\x11",
// - component 3 uses DC table 1 and AC table 1 "\x03\x11",
// - the bytes "\x00\x3f\x00". Section B.2.3 of the spec says that for
// sequential DCTs, those bytes (8-bit Ss, 8-bit Se, 4-bit Ah, 4-bit Al)
// should be 0x00, 0x3f, 0x00<<4 | 0x00.
var sosHeaderYCbCr = []byte{
0xff, 0xda, 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02,
0x11, 0x03, 0x11, 0x00, 0x3f, 0x00,
}
// writeSOS writes the StartOfScan marker.
func (e *encoder) writeSOS(m image.Image) {
switch m.(type) {
case *image.Gray:
e.write(sosHeaderY)
default:
e.write(sosHeaderYCbCr)
}
var (
// Scratch buffers to hold the YCbCr values.
// The blocks are in natural (not zig-zag) order.
b block
cb, cr [4]block
// DC components are delta-encoded.
prevDCY, prevDCCb, prevDCCr int32
)
bounds := m.Bounds()
switch m := m.(type) {
// TODO(wathiede): switch on m.ColorModel() instead of type.
case *image.Gray:
for y := bounds.Min.Y; y < bounds.Max.Y; y += 8 {
for x := bounds.Min.X; x < bounds.Max.X; x += 8 {
p := image.Pt(x, y)
grayToY(m, p, &b)
prevDCY = e.writeBlock(&b, 0, prevDCY)
}
}
default:
rgba, _ := m.(*image.RGBA)
ycbcr, _ := m.(*image.YCbCr)
for y := bounds.Min.Y; y < bounds.Max.Y; y += 16 {
for x := bounds.Min.X; x < bounds.Max.X; x += 16 {
for i := 0; i < 4; i++ {
xOff := (i & 1) * 8
yOff := (i & 2) * 4
p := image.Pt(x+xOff, y+yOff)
if rgba != nil {
rgbaToYCbCr(rgba, p, &b, &cb[i], &cr[i])
} else if ycbcr != nil {
yCbCrToYCbCr(ycbcr, p, &b, &cb[i], &cr[i])
} else {
toYCbCr(m, p, &b, &cb[i], &cr[i])
}
prevDCY = e.writeBlock(&b, 0, prevDCY)
}
scale(&b, &cb)
prevDCCb = e.writeBlock(&b, 1, prevDCCb)
scale(&b, &cr)
prevDCCr = e.writeBlock(&b, 1, prevDCCr)
}
}
}
// Pad the last byte with 1's.
e.emit(0x7f, 7)
}
// DefaultQuality is the default quality encoding parameter.
const DefaultQuality = 75
// Options are the encoding parameters.
// Quality ranges from 1 to 100 inclusive, higher is better.
type Options struct {
Quality int
}
// Encode writes the Image m to w in JPEG 4:2:0 baseline format with the given
// options. Default parameters are used if a nil *[Options] is passed.
func Encode(w io.Writer, m image.Image, o *Options) error {
b := m.Bounds()
if b.Dx() >= 1<<16 || b.Dy() >= 1<<16 {
return errors.New("jpeg: image is too large to encode")
}
var e encoder
if ww, ok := w.(writer); ok {
e.w = ww
} else {
e.w = bufio.NewWriter(w)
}
// Clip quality to [1, 100].
quality := DefaultQuality
if o != nil {
quality = o.Quality
if quality < 1 {
quality = 1
} else if quality > 100 {
quality = 100
}
}
// Convert from a quality rating to a scaling factor.
var scale int
if quality < 50 {
scale = 5000 / quality
} else {
scale = 200 - quality*2
}
// Initialize the quantization tables.
for i := range e.quant {
for j := range e.quant[i] {
x := int(unscaledQuant[i][j])
x = (x*scale + 50) / 100
if x < 1 {
x = 1
} else if x > 255 {
x = 255
}
e.quant[i][j] = uint8(x)
}
}
// Compute number of components based on input image type.
nComponent := 3
switch m.(type) {
// TODO(wathiede): switch on m.ColorModel() instead of type.
case *image.Gray:
nComponent = 1
}
// Write the Start Of Image marker.
e.buf[0] = 0xff
e.buf[1] = 0xd8
e.write(e.buf[:2])
// Write the quantization tables.
e.writeDQT()
// Write the image dimensions.
e.writeSOF0(b.Size(), nComponent)
// Write the Huffman tables.
e.writeDHT(nComponent)
// Write the image data.
e.writeSOS(m)
// Write the End Of Image marker.
e.buf[0] = 0xff
e.buf[1] = 0xd9
e.write(e.buf[:2])
e.flush()
return e.err
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package image
import (
"image/color"
)
var (
// Black is an opaque black uniform image.
Black = NewUniform(color.Black)
// White is an opaque white uniform image.
White = NewUniform(color.White)
// Transparent is a fully transparent uniform image.
Transparent = NewUniform(color.Transparent)
// Opaque is a fully opaque uniform image.
Opaque = NewUniform(color.Opaque)
)
// Uniform is an infinite-sized [Image] of uniform color.
// It implements the [color.Color], [color.Model], and [Image] interfaces.
type Uniform struct {
C color.Color
}
func (c *Uniform) RGBA() (r, g, b, a uint32) {
return c.C.RGBA()
}
func (c *Uniform) ColorModel() color.Model {
return c
}
func (c *Uniform) Convert(color.Color) color.Color {
return c.C
}
func (c *Uniform) Bounds() Rectangle { return Rectangle{Point{-1e9, -1e9}, Point{1e9, 1e9}} }
func (c *Uniform) At(x, y int) color.Color { return c.C }
func (c *Uniform) RGBA64At(x, y int) color.RGBA64 {
r, g, b, a := c.C.RGBA()
return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (c *Uniform) Opaque() bool {
_, _, _, a := c.C.RGBA()
return a == 0xffff
}
// NewUniform returns a new [Uniform] image of the given color.
func NewUniform(c color.Color) *Uniform {
return &Uniform{c}
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package png
// intSize is either 32 or 64.
const intSize = 32 << (^uint(0) >> 63)
func abs(x int) int {
// m := -1 if x < 0. m := 0 otherwise.
m := x >> (intSize - 1)
// In two's complement representation, the negative number
// of any number (except the smallest one) can be computed
// by flipping all the bits and add 1. This is faster than
// code with a branch.
// See Hacker's Delight, section 2-4.
return (x ^ m) - m
}
// paeth implements the Paeth filter function, as per the PNG specification.
func paeth(a, b, c uint8) uint8 {
// This is an optimized version of the sample code in the PNG spec.
// For example, the sample code starts with:
// p := int(a) + int(b) - int(c)
// pa := abs(p - int(a))
// but the optimized form uses fewer arithmetic operations:
// pa := int(b) - int(c)
// pa = abs(pa)
pc := int(c)
pa := int(b) - pc
pb := int(a) - pc
pc = abs(pa + pb)
pa = abs(pa)
pb = abs(pb)
if pa <= pb && pa <= pc {
return a
} else if pb <= pc {
return b
}
return c
}
// filterPaeth applies the Paeth filter to the cdat slice.
// cdat is the current row's data, pdat is the previous row's data.
func filterPaeth(cdat, pdat []byte, bytesPerPixel int) {
var a, b, c, pa, pb, pc int
for i := 0; i < bytesPerPixel; i++ {
a, c = 0, 0
for j := i; j < len(cdat); j += bytesPerPixel {
b = int(pdat[j])
pa = b - c
pb = a - c
pc = abs(pa + pb)
pa = abs(pa)
pb = abs(pb)
if pa <= pb && pa <= pc {
// No-op.
} else if pb <= pc {
a = b
} else {
a = c
}
a += int(cdat[j])
a &= 0xff
cdat[j] = uint8(a)
c = b
}
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package png implements a PNG image decoder and encoder.
//
// The PNG specification is at https://www.w3.org/TR/PNG/.
package png
import (
"compress/zlib"
"encoding/binary"
"fmt"
"hash"
"hash/crc32"
"image"
"image/color"
"io"
)
// Color type, as per the PNG spec.
const (
ctGrayscale = 0
ctTrueColor = 2
ctPaletted = 3
ctGrayscaleAlpha = 4
ctTrueColorAlpha = 6
)
// A cb is a combination of color type and bit depth.
const (
cbInvalid = iota
cbG1
cbG2
cbG4
cbG8
cbGA8
cbTC8
cbP1
cbP2
cbP4
cbP8
cbTCA8
cbG16
cbGA16
cbTC16
cbTCA16
)
func cbPaletted(cb int) bool {
return cbP1 <= cb && cb <= cbP8
}
func cbTrueColor(cb int) bool {
return cb == cbTC8 || cb == cbTC16
}
// Filter type, as per the PNG spec.
const (
ftNone = 0
ftSub = 1
ftUp = 2
ftAverage = 3
ftPaeth = 4
nFilter = 5
)
// Interlace type.
const (
itNone = 0
itAdam7 = 1
)
// interlaceScan defines the placement and size of a pass for Adam7 interlacing.
type interlaceScan struct {
xFactor, yFactor, xOffset, yOffset int
}
// interlacing defines Adam7 interlacing, with 7 passes of reduced images.
// See https://www.w3.org/TR/PNG/#8Interlace
var interlacing = []interlaceScan{
{8, 8, 0, 0},
{8, 8, 4, 0},
{4, 8, 0, 4},
{4, 4, 2, 0},
{2, 4, 0, 2},
{2, 2, 1, 0},
{1, 2, 0, 1},
}
// Decoding stage.
// The PNG specification says that the IHDR, PLTE (if present), tRNS (if
// present), IDAT and IEND chunks must appear in that order. There may be
// multiple IDAT chunks, and IDAT chunks must be sequential (i.e. they may not
// have any other chunks between them).
// https://www.w3.org/TR/PNG/#5ChunkOrdering
const (
dsStart = iota
dsSeenIHDR
dsSeenPLTE
dsSeentRNS
dsSeenIDAT
dsSeenIEND
)
const pngHeader = "\x89PNG\r\n\x1a\n"
type decoder struct {
r io.Reader
img image.Image
crc hash.Hash32
width, height int
depth int
palette color.Palette
cb int
stage int
idatLength uint32
tmp [3 * 256]byte
interlace int
// useTransparent and transparent are used for grayscale and truecolor
// transparency, as opposed to palette transparency.
useTransparent bool
transparent [6]byte
}
// A FormatError reports that the input is not a valid PNG.
type FormatError string
func (e FormatError) Error() string { return "png: invalid format: " + string(e) }
var chunkOrderError = FormatError("chunk out of order")
// An UnsupportedError reports that the input uses a valid but unimplemented PNG feature.
type UnsupportedError string
func (e UnsupportedError) Error() string { return "png: unsupported feature: " + string(e) }
func (d *decoder) parseIHDR(length uint32) error {
if length != 13 {
return FormatError("bad IHDR length")
}
if _, err := io.ReadFull(d.r, d.tmp[:13]); err != nil {
return err
}
d.crc.Write(d.tmp[:13])
if d.tmp[10] != 0 {
return UnsupportedError("compression method")
}
if d.tmp[11] != 0 {
return UnsupportedError("filter method")
}
if d.tmp[12] != itNone && d.tmp[12] != itAdam7 {
return FormatError("invalid interlace method")
}
d.interlace = int(d.tmp[12])
w := int32(binary.BigEndian.Uint32(d.tmp[0:4]))
h := int32(binary.BigEndian.Uint32(d.tmp[4:8]))
if w <= 0 || h <= 0 {
return FormatError("non-positive dimension")
}
nPixels64 := int64(w) * int64(h)
nPixels := int(nPixels64)
if nPixels64 != int64(nPixels) {
return UnsupportedError("dimension overflow")
}
// There can be up to 8 bytes per pixel, for 16 bits per channel RGBA.
if nPixels != (nPixels*8)/8 {
return UnsupportedError("dimension overflow")
}
d.cb = cbInvalid
d.depth = int(d.tmp[8])
switch d.depth {
case 1:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG1
case ctPaletted:
d.cb = cbP1
}
case 2:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG2
case ctPaletted:
d.cb = cbP2
}
case 4:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG4
case ctPaletted:
d.cb = cbP4
}
case 8:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG8
case ctTrueColor:
d.cb = cbTC8
case ctPaletted:
d.cb = cbP8
case ctGrayscaleAlpha:
d.cb = cbGA8
case ctTrueColorAlpha:
d.cb = cbTCA8
}
case 16:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG16
case ctTrueColor:
d.cb = cbTC16
case ctGrayscaleAlpha:
d.cb = cbGA16
case ctTrueColorAlpha:
d.cb = cbTCA16
}
}
if d.cb == cbInvalid {
return UnsupportedError(fmt.Sprintf("bit depth %d, color type %d", d.tmp[8], d.tmp[9]))
}
d.width, d.height = int(w), int(h)
return d.verifyChecksum()
}
func (d *decoder) parsePLTE(length uint32) error {
np := int(length / 3) // The number of palette entries.
if length%3 != 0 || np <= 0 || np > 256 || np > 1<<uint(d.depth) {
return FormatError("bad PLTE length")
}
n, err := io.ReadFull(d.r, d.tmp[:3*np])
if err != nil {
return err
}
d.crc.Write(d.tmp[:n])
switch d.cb {
case cbP1, cbP2, cbP4, cbP8:
d.palette = make(color.Palette, 256)
for i := 0; i < np; i++ {
d.palette[i] = color.RGBA{d.tmp[3*i+0], d.tmp[3*i+1], d.tmp[3*i+2], 0xff}
}
for i := np; i < 256; i++ {
// Initialize the rest of the palette to opaque black. The spec (section
// 11.2.3) says that "any out-of-range pixel value found in the image data
// is an error", but some real-world PNG files have out-of-range pixel
// values. We fall back to opaque black, the same as libpng 1.5.13;
// ImageMagick 6.5.7 returns an error.
d.palette[i] = color.RGBA{0x00, 0x00, 0x00, 0xff}
}
d.palette = d.palette[:np]
case cbTC8, cbTCA8, cbTC16, cbTCA16:
// As per the PNG spec, a PLTE chunk is optional (and for practical purposes,
// ignorable) for the ctTrueColor and ctTrueColorAlpha color types (section 4.1.2).
default:
return FormatError("PLTE, color type mismatch")
}
return d.verifyChecksum()
}
func (d *decoder) parsetRNS(length uint32) error {
switch d.cb {
case cbG1, cbG2, cbG4, cbG8, cbG16:
if length != 2 {
return FormatError("bad tRNS length")
}
n, err := io.ReadFull(d.r, d.tmp[:length])
if err != nil {
return err
}
d.crc.Write(d.tmp[:n])
copy(d.transparent[:], d.tmp[:length])
switch d.cb {
case cbG1:
d.transparent[1] *= 0xff
case cbG2:
d.transparent[1] *= 0x55
case cbG4:
d.transparent[1] *= 0x11
}
d.useTransparent = true
case cbTC8, cbTC16:
if length != 6 {
return FormatError("bad tRNS length")
}
n, err := io.ReadFull(d.r, d.tmp[:length])
if err != nil {
return err
}
d.crc.Write(d.tmp[:n])
copy(d.transparent[:], d.tmp[:length])
d.useTransparent = true
case cbP1, cbP2, cbP4, cbP8:
if length > 256 {
return FormatError("bad tRNS length")
}
n, err := io.ReadFull(d.r, d.tmp[:length])
if err != nil {
return err
}
d.crc.Write(d.tmp[:n])
if len(d.palette) < n {
d.palette = d.palette[:n]
}
for i := 0; i < n; i++ {
rgba := d.palette[i].(color.RGBA)
d.palette[i] = color.NRGBA{rgba.R, rgba.G, rgba.B, d.tmp[i]}
}
default:
return FormatError("tRNS, color type mismatch")
}
return d.verifyChecksum()
}
// Read presents one or more IDAT chunks as one continuous stream (minus the
// intermediate chunk headers and footers). If the PNG data looked like:
//
// ... len0 IDAT xxx crc0 len1 IDAT yy crc1 len2 IEND crc2
//
// then this reader presents xxxyy. For well-formed PNG data, the decoder state
// immediately before the first Read call is that d.r is positioned between the
// first IDAT and xxx, and the decoder state immediately after the last Read
// call is that d.r is positioned between yy and crc1.
func (d *decoder) Read(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
for d.idatLength == 0 {
// We have exhausted an IDAT chunk. Verify the checksum of that chunk.
if err := d.verifyChecksum(); err != nil {
return 0, err
}
// Read the length and chunk type of the next chunk, and check that
// it is an IDAT chunk.
if _, err := io.ReadFull(d.r, d.tmp[:8]); err != nil {
return 0, err
}
d.idatLength = binary.BigEndian.Uint32(d.tmp[:4])
if string(d.tmp[4:8]) != "IDAT" {
return 0, FormatError("not enough pixel data")
}
d.crc.Reset()
d.crc.Write(d.tmp[4:8])
}
if int(d.idatLength) < 0 {
return 0, UnsupportedError("IDAT chunk length overflow")
}
n, err := d.r.Read(p[:min(len(p), int(d.idatLength))])
d.crc.Write(p[:n])
d.idatLength -= uint32(n)
return n, err
}
// decode decodes the IDAT data into an image.
func (d *decoder) decode() (image.Image, error) {
r, err := zlib.NewReader(d)
if err != nil {
return nil, err
}
defer r.Close()
var img image.Image
if d.interlace == itNone {
img, err = d.readImagePass(r, 0, false)
if err != nil {
return nil, err
}
} else if d.interlace == itAdam7 {
// Allocate a blank image of the full size.
img, err = d.readImagePass(nil, 0, true)
if err != nil {
return nil, err
}
for pass := 0; pass < 7; pass++ {
imagePass, err := d.readImagePass(r, pass, false)
if err != nil {
return nil, err
}
if imagePass != nil {
d.mergePassInto(img, imagePass, pass)
}
}
}
// Check for EOF, to verify the zlib checksum.
n := 0
for i := 0; n == 0 && err == nil; i++ {
if i == 100 {
return nil, io.ErrNoProgress
}
n, err = r.Read(d.tmp[:1])
}
if err != nil && err != io.EOF {
return nil, FormatError(err.Error())
}
if n != 0 || d.idatLength != 0 {
return nil, FormatError("too much pixel data")
}
return img, nil
}
// readImagePass reads a single image pass, sized according to the pass number.
func (d *decoder) readImagePass(r io.Reader, pass int, allocateOnly bool) (image.Image, error) {
bitsPerPixel := 0
pixOffset := 0
var (
gray *image.Gray
rgba *image.RGBA
paletted *image.Paletted
nrgba *image.NRGBA
gray16 *image.Gray16
rgba64 *image.RGBA64
nrgba64 *image.NRGBA64
img image.Image
)
width, height := d.width, d.height
if d.interlace == itAdam7 && !allocateOnly {
p := interlacing[pass]
// Add the multiplication factor and subtract one, effectively rounding up.
width = (width - p.xOffset + p.xFactor - 1) / p.xFactor
height = (height - p.yOffset + p.yFactor - 1) / p.yFactor
// A PNG image can't have zero width or height, but for an interlaced
// image, an individual pass might have zero width or height. If so, we
// shouldn't even read a per-row filter type byte, so return early.
if width == 0 || height == 0 {
return nil, nil
}
}
switch d.cb {
case cbG1, cbG2, cbG4, cbG8:
bitsPerPixel = d.depth
if d.useTransparent {
nrgba = image.NewNRGBA(image.Rect(0, 0, width, height))
img = nrgba
} else {
gray = image.NewGray(image.Rect(0, 0, width, height))
img = gray
}
case cbGA8:
bitsPerPixel = 16
nrgba = image.NewNRGBA(image.Rect(0, 0, width, height))
img = nrgba
case cbTC8:
bitsPerPixel = 24
if d.useTransparent {
nrgba = image.NewNRGBA(image.Rect(0, 0, width, height))
img = nrgba
} else {
rgba = image.NewRGBA(image.Rect(0, 0, width, height))
img = rgba
}
case cbP1, cbP2, cbP4, cbP8:
bitsPerPixel = d.depth
paletted = image.NewPaletted(image.Rect(0, 0, width, height), d.palette)
img = paletted
case cbTCA8:
bitsPerPixel = 32
nrgba = image.NewNRGBA(image.Rect(0, 0, width, height))
img = nrgba
case cbG16:
bitsPerPixel = 16
if d.useTransparent {
nrgba64 = image.NewNRGBA64(image.Rect(0, 0, width, height))
img = nrgba64
} else {
gray16 = image.NewGray16(image.Rect(0, 0, width, height))
img = gray16
}
case cbGA16:
bitsPerPixel = 32
nrgba64 = image.NewNRGBA64(image.Rect(0, 0, width, height))
img = nrgba64
case cbTC16:
bitsPerPixel = 48
if d.useTransparent {
nrgba64 = image.NewNRGBA64(image.Rect(0, 0, width, height))
img = nrgba64
} else {
rgba64 = image.NewRGBA64(image.Rect(0, 0, width, height))
img = rgba64
}
case cbTCA16:
bitsPerPixel = 64
nrgba64 = image.NewNRGBA64(image.Rect(0, 0, width, height))
img = nrgba64
}
if allocateOnly {
return img, nil
}
bytesPerPixel := (bitsPerPixel + 7) / 8
// The +1 is for the per-row filter type, which is at cr[0].
rowSize := 1 + (int64(bitsPerPixel)*int64(width)+7)/8
if rowSize != int64(int(rowSize)) {
return nil, UnsupportedError("dimension overflow")
}
// cr and pr are the bytes for the current and previous row.
cr := make([]uint8, rowSize)
pr := make([]uint8, rowSize)
for y := 0; y < height; y++ {
// Read the decompressed bytes.
_, err := io.ReadFull(r, cr)
if err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
return nil, FormatError("not enough pixel data")
}
return nil, err
}
// Apply the filter.
cdat := cr[1:]
pdat := pr[1:]
switch cr[0] {
case ftNone:
// No-op.
case ftSub:
for i := bytesPerPixel; i < len(cdat); i++ {
cdat[i] += cdat[i-bytesPerPixel]
}
case ftUp:
for i, p := range pdat {
cdat[i] += p
}
case ftAverage:
// The first column has no column to the left of it, so it is a
// special case. We know that the first column exists because we
// check above that width != 0, and so len(cdat) != 0.
for i := 0; i < bytesPerPixel; i++ {
cdat[i] += pdat[i] / 2
}
for i := bytesPerPixel; i < len(cdat); i++ {
cdat[i] += uint8((int(cdat[i-bytesPerPixel]) + int(pdat[i])) / 2)
}
case ftPaeth:
filterPaeth(cdat, pdat, bytesPerPixel)
default:
return nil, FormatError("bad filter type")
}
// Convert from bytes to colors.
switch d.cb {
case cbG1:
if d.useTransparent {
ty := d.transparent[1]
for x := 0; x < width; x += 8 {
b := cdat[x/8]
for x2 := 0; x2 < 8 && x+x2 < width; x2++ {
ycol := (b >> 7) * 0xff
acol := uint8(0xff)
if ycol == ty {
acol = 0x00
}
nrgba.SetNRGBA(x+x2, y, color.NRGBA{ycol, ycol, ycol, acol})
b <<= 1
}
}
} else {
for x := 0; x < width; x += 8 {
b := cdat[x/8]
for x2 := 0; x2 < 8 && x+x2 < width; x2++ {
gray.SetGray(x+x2, y, color.Gray{(b >> 7) * 0xff})
b <<= 1
}
}
}
case cbG2:
if d.useTransparent {
ty := d.transparent[1]
for x := 0; x < width; x += 4 {
b := cdat[x/4]
for x2 := 0; x2 < 4 && x+x2 < width; x2++ {
ycol := (b >> 6) * 0x55
acol := uint8(0xff)
if ycol == ty {
acol = 0x00
}
nrgba.SetNRGBA(x+x2, y, color.NRGBA{ycol, ycol, ycol, acol})
b <<= 2
}
}
} else {
for x := 0; x < width; x += 4 {
b := cdat[x/4]
for x2 := 0; x2 < 4 && x+x2 < width; x2++ {
gray.SetGray(x+x2, y, color.Gray{(b >> 6) * 0x55})
b <<= 2
}
}
}
case cbG4:
if d.useTransparent {
ty := d.transparent[1]
for x := 0; x < width; x += 2 {
b := cdat[x/2]
for x2 := 0; x2 < 2 && x+x2 < width; x2++ {
ycol := (b >> 4) * 0x11
acol := uint8(0xff)
if ycol == ty {
acol = 0x00
}
nrgba.SetNRGBA(x+x2, y, color.NRGBA{ycol, ycol, ycol, acol})
b <<= 4
}
}
} else {
for x := 0; x < width; x += 2 {
b := cdat[x/2]
for x2 := 0; x2 < 2 && x+x2 < width; x2++ {
gray.SetGray(x+x2, y, color.Gray{(b >> 4) * 0x11})
b <<= 4
}
}
}
case cbG8:
if d.useTransparent {
ty := d.transparent[1]
for x := 0; x < width; x++ {
ycol := cdat[x]
acol := uint8(0xff)
if ycol == ty {
acol = 0x00
}
nrgba.SetNRGBA(x, y, color.NRGBA{ycol, ycol, ycol, acol})
}
} else {
copy(gray.Pix[pixOffset:], cdat)
pixOffset += gray.Stride
}
case cbGA8:
for x := 0; x < width; x++ {
ycol := cdat[2*x+0]
nrgba.SetNRGBA(x, y, color.NRGBA{ycol, ycol, ycol, cdat[2*x+1]})
}
case cbTC8:
if d.useTransparent {
pix, i, j := nrgba.Pix, pixOffset, 0
tr, tg, tb := d.transparent[1], d.transparent[3], d.transparent[5]
for x := 0; x < width; x++ {
r := cdat[j+0]
g := cdat[j+1]
b := cdat[j+2]
a := uint8(0xff)
if r == tr && g == tg && b == tb {
a = 0x00
}
pix[i+0] = r
pix[i+1] = g
pix[i+2] = b
pix[i+3] = a
i += 4
j += 3
}
pixOffset += nrgba.Stride
} else {
pix, i, j := rgba.Pix, pixOffset, 0
for x := 0; x < width; x++ {
pix[i+0] = cdat[j+0]
pix[i+1] = cdat[j+1]
pix[i+2] = cdat[j+2]
pix[i+3] = 0xff
i += 4
j += 3
}
pixOffset += rgba.Stride
}
case cbP1:
for x := 0; x < width; x += 8 {
b := cdat[x/8]
for x2 := 0; x2 < 8 && x+x2 < width; x2++ {
idx := b >> 7
if len(paletted.Palette) <= int(idx) {
paletted.Palette = paletted.Palette[:int(idx)+1]
}
paletted.SetColorIndex(x+x2, y, idx)
b <<= 1
}
}
case cbP2:
for x := 0; x < width; x += 4 {
b := cdat[x/4]
for x2 := 0; x2 < 4 && x+x2 < width; x2++ {
idx := b >> 6
if len(paletted.Palette) <= int(idx) {
paletted.Palette = paletted.Palette[:int(idx)+1]
}
paletted.SetColorIndex(x+x2, y, idx)
b <<= 2
}
}
case cbP4:
for x := 0; x < width; x += 2 {
b := cdat[x/2]
for x2 := 0; x2 < 2 && x+x2 < width; x2++ {
idx := b >> 4
if len(paletted.Palette) <= int(idx) {
paletted.Palette = paletted.Palette[:int(idx)+1]
}
paletted.SetColorIndex(x+x2, y, idx)
b <<= 4
}
}
case cbP8:
if len(paletted.Palette) != 256 {
for x := 0; x < width; x++ {
if len(paletted.Palette) <= int(cdat[x]) {
paletted.Palette = paletted.Palette[:int(cdat[x])+1]
}
}
}
copy(paletted.Pix[pixOffset:], cdat)
pixOffset += paletted.Stride
case cbTCA8:
copy(nrgba.Pix[pixOffset:], cdat)
pixOffset += nrgba.Stride
case cbG16:
if d.useTransparent {
ty := uint16(d.transparent[0])<<8 | uint16(d.transparent[1])
for x := 0; x < width; x++ {
ycol := uint16(cdat[2*x+0])<<8 | uint16(cdat[2*x+1])
acol := uint16(0xffff)
if ycol == ty {
acol = 0x0000
}
nrgba64.SetNRGBA64(x, y, color.NRGBA64{ycol, ycol, ycol, acol})
}
} else {
for x := 0; x < width; x++ {
ycol := uint16(cdat[2*x+0])<<8 | uint16(cdat[2*x+1])
gray16.SetGray16(x, y, color.Gray16{ycol})
}
}
case cbGA16:
for x := 0; x < width; x++ {
ycol := uint16(cdat[4*x+0])<<8 | uint16(cdat[4*x+1])
acol := uint16(cdat[4*x+2])<<8 | uint16(cdat[4*x+3])
nrgba64.SetNRGBA64(x, y, color.NRGBA64{ycol, ycol, ycol, acol})
}
case cbTC16:
if d.useTransparent {
tr := uint16(d.transparent[0])<<8 | uint16(d.transparent[1])
tg := uint16(d.transparent[2])<<8 | uint16(d.transparent[3])
tb := uint16(d.transparent[4])<<8 | uint16(d.transparent[5])
for x := 0; x < width; x++ {
rcol := uint16(cdat[6*x+0])<<8 | uint16(cdat[6*x+1])
gcol := uint16(cdat[6*x+2])<<8 | uint16(cdat[6*x+3])
bcol := uint16(cdat[6*x+4])<<8 | uint16(cdat[6*x+5])
acol := uint16(0xffff)
if rcol == tr && gcol == tg && bcol == tb {
acol = 0x0000
}
nrgba64.SetNRGBA64(x, y, color.NRGBA64{rcol, gcol, bcol, acol})
}
} else {
for x := 0; x < width; x++ {
rcol := uint16(cdat[6*x+0])<<8 | uint16(cdat[6*x+1])
gcol := uint16(cdat[6*x+2])<<8 | uint16(cdat[6*x+3])
bcol := uint16(cdat[6*x+4])<<8 | uint16(cdat[6*x+5])
rgba64.SetRGBA64(x, y, color.RGBA64{rcol, gcol, bcol, 0xffff})
}
}
case cbTCA16:
for x := 0; x < width; x++ {
rcol := uint16(cdat[8*x+0])<<8 | uint16(cdat[8*x+1])
gcol := uint16(cdat[8*x+2])<<8 | uint16(cdat[8*x+3])
bcol := uint16(cdat[8*x+4])<<8 | uint16(cdat[8*x+5])
acol := uint16(cdat[8*x+6])<<8 | uint16(cdat[8*x+7])
nrgba64.SetNRGBA64(x, y, color.NRGBA64{rcol, gcol, bcol, acol})
}
}
// The current row for y is the previous row for y+1.
pr, cr = cr, pr
}
return img, nil
}
// mergePassInto merges a single pass into a full sized image.
func (d *decoder) mergePassInto(dst image.Image, src image.Image, pass int) {
p := interlacing[pass]
var (
srcPix []uint8
dstPix []uint8
stride int
rect image.Rectangle
bytesPerPixel int
)
switch target := dst.(type) {
case *image.Alpha:
srcPix = src.(*image.Alpha).Pix
dstPix, stride, rect = target.Pix, target.Stride, target.Rect
bytesPerPixel = 1
case *image.Alpha16:
srcPix = src.(*image.Alpha16).Pix
dstPix, stride, rect = target.Pix, target.Stride, target.Rect
bytesPerPixel = 2
case *image.Gray:
srcPix = src.(*image.Gray).Pix
dstPix, stride, rect = target.Pix, target.Stride, target.Rect
bytesPerPixel = 1
case *image.Gray16:
srcPix = src.(*image.Gray16).Pix
dstPix, stride, rect = target.Pix, target.Stride, target.Rect
bytesPerPixel = 2
case *image.NRGBA:
srcPix = src.(*image.NRGBA).Pix
dstPix, stride, rect = target.Pix, target.Stride, target.Rect
bytesPerPixel = 4
case *image.NRGBA64:
srcPix = src.(*image.NRGBA64).Pix
dstPix, stride, rect = target.Pix, target.Stride, target.Rect
bytesPerPixel = 8
case *image.Paletted:
source := src.(*image.Paletted)
srcPix = source.Pix
dstPix, stride, rect = target.Pix, target.Stride, target.Rect
bytesPerPixel = 1
if len(target.Palette) < len(source.Palette) {
// readImagePass can return a paletted image whose implicit palette
// length (one more than the maximum Pix value) is larger than the
// explicit palette length (what's in the PLTE chunk). Make the
// same adjustment here.
target.Palette = source.Palette
}
case *image.RGBA:
srcPix = src.(*image.RGBA).Pix
dstPix, stride, rect = target.Pix, target.Stride, target.Rect
bytesPerPixel = 4
case *image.RGBA64:
srcPix = src.(*image.RGBA64).Pix
dstPix, stride, rect = target.Pix, target.Stride, target.Rect
bytesPerPixel = 8
}
s, bounds := 0, src.Bounds()
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
dBase := (y*p.yFactor+p.yOffset-rect.Min.Y)*stride + (p.xOffset-rect.Min.X)*bytesPerPixel
for x := bounds.Min.X; x < bounds.Max.X; x++ {
d := dBase + x*p.xFactor*bytesPerPixel
copy(dstPix[d:], srcPix[s:s+bytesPerPixel])
s += bytesPerPixel
}
}
}
func (d *decoder) parseIDAT(length uint32) (err error) {
d.idatLength = length
d.img, err = d.decode()
if err != nil {
return err
}
return d.verifyChecksum()
}
func (d *decoder) parseIEND(length uint32) error {
if length != 0 {
return FormatError("bad IEND length")
}
return d.verifyChecksum()
}
func (d *decoder) parseChunk(configOnly bool) error {
// Read the length and chunk type.
if _, err := io.ReadFull(d.r, d.tmp[:8]); err != nil {
return err
}
length := binary.BigEndian.Uint32(d.tmp[:4])
d.crc.Reset()
d.crc.Write(d.tmp[4:8])
// Read the chunk data.
switch string(d.tmp[4:8]) {
case "IHDR":
if d.stage != dsStart {
return chunkOrderError
}
d.stage = dsSeenIHDR
return d.parseIHDR(length)
case "PLTE":
if d.stage != dsSeenIHDR {
return chunkOrderError
}
d.stage = dsSeenPLTE
return d.parsePLTE(length)
case "tRNS":
if cbPaletted(d.cb) {
if d.stage != dsSeenPLTE {
return chunkOrderError
}
} else if cbTrueColor(d.cb) {
if d.stage != dsSeenIHDR && d.stage != dsSeenPLTE {
return chunkOrderError
}
} else if d.stage != dsSeenIHDR {
return chunkOrderError
}
d.stage = dsSeentRNS
return d.parsetRNS(length)
case "IDAT":
if d.stage < dsSeenIHDR || d.stage > dsSeenIDAT || (d.stage == dsSeenIHDR && cbPaletted(d.cb)) {
return chunkOrderError
} else if d.stage == dsSeenIDAT {
// Ignore trailing zero-length or garbage IDAT chunks.
//
// This does not affect valid PNG images that contain multiple IDAT
// chunks, since the first call to parseIDAT below will consume all
// consecutive IDAT chunks required for decoding the image.
break
}
d.stage = dsSeenIDAT
if configOnly {
return nil
}
return d.parseIDAT(length)
case "IEND":
if d.stage != dsSeenIDAT {
return chunkOrderError
}
d.stage = dsSeenIEND
return d.parseIEND(length)
}
if length > 0x7fffffff {
return FormatError(fmt.Sprintf("Bad chunk length: %d", length))
}
// Ignore this chunk (of a known length).
var ignored [4096]byte
for length > 0 {
n, err := io.ReadFull(d.r, ignored[:min(len(ignored), int(length))])
if err != nil {
return err
}
d.crc.Write(ignored[:n])
length -= uint32(n)
}
return d.verifyChecksum()
}
func (d *decoder) verifyChecksum() error {
if _, err := io.ReadFull(d.r, d.tmp[:4]); err != nil {
return err
}
if binary.BigEndian.Uint32(d.tmp[:4]) != d.crc.Sum32() {
return FormatError("invalid checksum")
}
return nil
}
func (d *decoder) checkHeader() error {
_, err := io.ReadFull(d.r, d.tmp[:len(pngHeader)])
if err != nil {
return err
}
if string(d.tmp[:len(pngHeader)]) != pngHeader {
return FormatError("not a PNG file")
}
return nil
}
// Decode reads a PNG image from r and returns it as an [image.Image].
// The type of Image returned depends on the PNG contents.
func Decode(r io.Reader) (image.Image, error) {
d := &decoder{
r: r,
crc: crc32.NewIEEE(),
}
if err := d.checkHeader(); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
for d.stage != dsSeenIEND {
if err := d.parseChunk(false); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
}
return d.img, nil
}
// DecodeConfig returns the color model and dimensions of a PNG image without
// decoding the entire image.
func DecodeConfig(r io.Reader) (image.Config, error) {
d := &decoder{
r: r,
crc: crc32.NewIEEE(),
}
if err := d.checkHeader(); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return image.Config{}, err
}
for {
if err := d.parseChunk(true); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return image.Config{}, err
}
if cbPaletted(d.cb) {
if d.stage >= dsSeentRNS {
break
}
} else {
if d.stage >= dsSeenIHDR {
break
}
}
}
var cm color.Model
switch d.cb {
case cbG1, cbG2, cbG4, cbG8:
cm = color.GrayModel
case cbGA8:
cm = color.NRGBAModel
case cbTC8:
cm = color.RGBAModel
case cbP1, cbP2, cbP4, cbP8:
cm = d.palette
case cbTCA8:
cm = color.NRGBAModel
case cbG16:
cm = color.Gray16Model
case cbGA16:
cm = color.NRGBA64Model
case cbTC16:
cm = color.RGBA64Model
case cbTCA16:
cm = color.NRGBA64Model
}
return image.Config{
ColorModel: cm,
Width: d.width,
Height: d.height,
}, nil
}
func init() {
image.RegisterFormat("png", pngHeader, Decode, DecodeConfig)
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package png
import (
"bufio"
"compress/zlib"
"encoding/binary"
"hash/crc32"
"image"
"image/color"
"io"
"strconv"
)
// Encoder configures encoding PNG images.
type Encoder struct {
CompressionLevel CompressionLevel
// BufferPool optionally specifies a buffer pool to get temporary
// EncoderBuffers when encoding an image.
BufferPool EncoderBufferPool
}
// EncoderBufferPool is an interface for getting and returning temporary
// instances of the [EncoderBuffer] struct. This can be used to reuse buffers
// when encoding multiple images.
type EncoderBufferPool interface {
Get() *EncoderBuffer
Put(*EncoderBuffer)
}
// EncoderBuffer holds the buffers used for encoding PNG images.
type EncoderBuffer encoder
type encoder struct {
enc *Encoder
w io.Writer
m image.Image
cb int
err error
header [8]byte
footer [4]byte
tmp [4 * 256]byte
cr [nFilter][]uint8
pr []uint8
zw *zlib.Writer
zwLevel int
bw *bufio.Writer
}
// CompressionLevel indicates the compression level.
type CompressionLevel int
const (
DefaultCompression CompressionLevel = 0
NoCompression CompressionLevel = -1
BestSpeed CompressionLevel = -2
BestCompression CompressionLevel = -3
// Positive CompressionLevel values are reserved to mean a numeric zlib
// compression level, although that is not implemented yet.
)
type opaquer interface {
Opaque() bool
}
// Returns whether or not the image is fully opaque.
func opaque(m image.Image) bool {
if o, ok := m.(opaquer); ok {
return o.Opaque()
}
b := m.Bounds()
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
_, _, _, a := m.At(x, y).RGBA()
if a != 0xffff {
return false
}
}
}
return true
}
// The absolute value of a byte interpreted as a signed int8.
func abs8(d uint8) int {
if d < 128 {
return int(d)
}
return 256 - int(d)
}
func (e *encoder) writeChunk(b []byte, name string) {
if e.err != nil {
return
}
n := uint32(len(b))
if int(n) != len(b) {
e.err = UnsupportedError(name + " chunk is too large: " + strconv.Itoa(len(b)))
return
}
binary.BigEndian.PutUint32(e.header[:4], n)
e.header[4] = name[0]
e.header[5] = name[1]
e.header[6] = name[2]
e.header[7] = name[3]
crc := crc32.NewIEEE()
crc.Write(e.header[4:8])
crc.Write(b)
binary.BigEndian.PutUint32(e.footer[:4], crc.Sum32())
_, e.err = e.w.Write(e.header[:8])
if e.err != nil {
return
}
_, e.err = e.w.Write(b)
if e.err != nil {
return
}
_, e.err = e.w.Write(e.footer[:4])
}
func (e *encoder) writeIHDR() {
b := e.m.Bounds()
binary.BigEndian.PutUint32(e.tmp[0:4], uint32(b.Dx()))
binary.BigEndian.PutUint32(e.tmp[4:8], uint32(b.Dy()))
// Set bit depth and color type.
switch e.cb {
case cbG8:
e.tmp[8] = 8
e.tmp[9] = ctGrayscale
case cbTC8:
e.tmp[8] = 8
e.tmp[9] = ctTrueColor
case cbP8:
e.tmp[8] = 8
e.tmp[9] = ctPaletted
case cbP4:
e.tmp[8] = 4
e.tmp[9] = ctPaletted
case cbP2:
e.tmp[8] = 2
e.tmp[9] = ctPaletted
case cbP1:
e.tmp[8] = 1
e.tmp[9] = ctPaletted
case cbTCA8:
e.tmp[8] = 8
e.tmp[9] = ctTrueColorAlpha
case cbG16:
e.tmp[8] = 16
e.tmp[9] = ctGrayscale
case cbTC16:
e.tmp[8] = 16
e.tmp[9] = ctTrueColor
case cbTCA16:
e.tmp[8] = 16
e.tmp[9] = ctTrueColorAlpha
}
e.tmp[10] = 0 // default compression method
e.tmp[11] = 0 // default filter method
e.tmp[12] = 0 // non-interlaced
e.writeChunk(e.tmp[:13], "IHDR")
}
func (e *encoder) writePLTEAndTRNS(p color.Palette) {
if len(p) < 1 || len(p) > 256 {
e.err = FormatError("bad palette length: " + strconv.Itoa(len(p)))
return
}
last := -1
for i, c := range p {
c1 := color.NRGBAModel.Convert(c).(color.NRGBA)
e.tmp[3*i+0] = c1.R
e.tmp[3*i+1] = c1.G
e.tmp[3*i+2] = c1.B
if c1.A != 0xff {
last = i
}
e.tmp[3*256+i] = c1.A
}
e.writeChunk(e.tmp[:3*len(p)], "PLTE")
if last != -1 {
e.writeChunk(e.tmp[3*256:3*256+1+last], "tRNS")
}
}
// An encoder is an io.Writer that satisfies writes by writing PNG IDAT chunks,
// including an 8-byte header and 4-byte CRC checksum per Write call. Such calls
// should be relatively infrequent, since writeIDATs uses a [bufio.Writer].
//
// This method should only be called from writeIDATs (via writeImage).
// No other code should treat an encoder as an io.Writer.
func (e *encoder) Write(b []byte) (int, error) {
e.writeChunk(b, "IDAT")
if e.err != nil {
return 0, e.err
}
return len(b), nil
}
// Chooses the filter to use for encoding the current row, and applies it.
// The return value is the index of the filter and also of the row in cr that has had it applied.
func filter(cr *[nFilter][]byte, pr []byte, bpp int) int {
// We try all five filter types, and pick the one that minimizes the sum of absolute differences.
// This is the same heuristic that libpng uses, although the filters are attempted in order of
// estimated most likely to be minimal (ftUp, ftPaeth, ftNone, ftSub, ftAverage), rather than
// in their enumeration order (ftNone, ftSub, ftUp, ftAverage, ftPaeth).
cdat0 := cr[0][1:]
cdat1 := cr[1][1:]
cdat2 := cr[2][1:]
cdat3 := cr[3][1:]
cdat4 := cr[4][1:]
pdat := pr[1:]
n := len(cdat0)
// The up filter.
sum := 0
for i := 0; i < n; i++ {
cdat2[i] = cdat0[i] - pdat[i]
sum += abs8(cdat2[i])
}
best := sum
filter := ftUp
// The Paeth filter.
sum = 0
for i := 0; i < bpp; i++ {
cdat4[i] = cdat0[i] - pdat[i]
sum += abs8(cdat4[i])
}
for i := bpp; i < n; i++ {
cdat4[i] = cdat0[i] - paeth(cdat0[i-bpp], pdat[i], pdat[i-bpp])
sum += abs8(cdat4[i])
if sum >= best {
break
}
}
if sum < best {
best = sum
filter = ftPaeth
}
// The none filter.
sum = 0
for i := 0; i < n; i++ {
sum += abs8(cdat0[i])
if sum >= best {
break
}
}
if sum < best {
best = sum
filter = ftNone
}
// The sub filter.
sum = 0
for i := 0; i < bpp; i++ {
cdat1[i] = cdat0[i]
sum += abs8(cdat1[i])
}
for i := bpp; i < n; i++ {
cdat1[i] = cdat0[i] - cdat0[i-bpp]
sum += abs8(cdat1[i])
if sum >= best {
break
}
}
if sum < best {
best = sum
filter = ftSub
}
// The average filter.
sum = 0
for i := 0; i < bpp; i++ {
cdat3[i] = cdat0[i] - pdat[i]/2
sum += abs8(cdat3[i])
}
for i := bpp; i < n; i++ {
cdat3[i] = cdat0[i] - uint8((int(cdat0[i-bpp])+int(pdat[i]))/2)
sum += abs8(cdat3[i])
if sum >= best {
break
}
}
if sum < best {
filter = ftAverage
}
return filter
}
func (e *encoder) writeImage(w io.Writer, m image.Image, cb int, level int) error {
if e.zw == nil || e.zwLevel != level {
zw, err := zlib.NewWriterLevel(w, level)
if err != nil {
return err
}
e.zw = zw
e.zwLevel = level
} else {
e.zw.Reset(w)
}
defer e.zw.Close()
bitsPerPixel := 0
switch cb {
case cbG8:
bitsPerPixel = 8
case cbTC8:
bitsPerPixel = 24
case cbP8:
bitsPerPixel = 8
case cbP4:
bitsPerPixel = 4
case cbP2:
bitsPerPixel = 2
case cbP1:
bitsPerPixel = 1
case cbTCA8:
bitsPerPixel = 32
case cbTC16:
bitsPerPixel = 48
case cbTCA16:
bitsPerPixel = 64
case cbG16:
bitsPerPixel = 16
}
// cr[*] and pr are the bytes for the current and previous row.
// cr[0] is unfiltered (or equivalently, filtered with the ftNone filter).
// cr[ft], for non-zero filter types ft, are buffers for transforming cr[0] under the
// other PNG filter types. These buffers are allocated once and re-used for each row.
// The +1 is for the per-row filter type, which is at cr[*][0].
b := m.Bounds()
sz := 1 + (bitsPerPixel*b.Dx()+7)/8
for i := range e.cr {
if cap(e.cr[i]) < sz {
e.cr[i] = make([]uint8, sz)
} else {
e.cr[i] = e.cr[i][:sz]
}
e.cr[i][0] = uint8(i)
}
cr := e.cr
if cap(e.pr) < sz {
e.pr = make([]uint8, sz)
} else {
e.pr = e.pr[:sz]
clear(e.pr)
}
pr := e.pr
gray, _ := m.(*image.Gray)
rgba, _ := m.(*image.RGBA)
paletted, _ := m.(*image.Paletted)
nrgba, _ := m.(*image.NRGBA)
for y := b.Min.Y; y < b.Max.Y; y++ {
// Convert from colors to bytes.
i := 1
switch cb {
case cbG8:
if gray != nil {
offset := (y - b.Min.Y) * gray.Stride
copy(cr[0][1:], gray.Pix[offset:offset+b.Dx()])
} else {
for x := b.Min.X; x < b.Max.X; x++ {
c := color.GrayModel.Convert(m.At(x, y)).(color.Gray)
cr[0][i] = c.Y
i++
}
}
case cbTC8:
// We have previously verified that the alpha value is fully opaque.
cr0 := cr[0]
stride, pix := 0, []byte(nil)
if rgba != nil {
stride, pix = rgba.Stride, rgba.Pix
} else if nrgba != nil {
stride, pix = nrgba.Stride, nrgba.Pix
}
if stride != 0 {
j0 := (y - b.Min.Y) * stride
j1 := j0 + b.Dx()*4
for j := j0; j < j1; j += 4 {
cr0[i+0] = pix[j+0]
cr0[i+1] = pix[j+1]
cr0[i+2] = pix[j+2]
i += 3
}
} else {
for x := b.Min.X; x < b.Max.X; x++ {
r, g, b, _ := m.At(x, y).RGBA()
cr0[i+0] = uint8(r >> 8)
cr0[i+1] = uint8(g >> 8)
cr0[i+2] = uint8(b >> 8)
i += 3
}
}
case cbP8:
if paletted != nil {
offset := (y - b.Min.Y) * paletted.Stride
copy(cr[0][1:], paletted.Pix[offset:offset+b.Dx()])
} else {
pi := m.(image.PalettedImage)
for x := b.Min.X; x < b.Max.X; x++ {
cr[0][i] = pi.ColorIndexAt(x, y)
i += 1
}
}
case cbP4, cbP2, cbP1:
pi := m.(image.PalettedImage)
var a uint8
var c int
pixelsPerByte := 8 / bitsPerPixel
for x := b.Min.X; x < b.Max.X; x++ {
a = a<<uint(bitsPerPixel) | pi.ColorIndexAt(x, y)
c++
if c == pixelsPerByte {
cr[0][i] = a
i += 1
a = 0
c = 0
}
}
if c != 0 {
for c != pixelsPerByte {
a = a << uint(bitsPerPixel)
c++
}
cr[0][i] = a
}
case cbTCA8:
if nrgba != nil {
offset := (y - b.Min.Y) * nrgba.Stride
copy(cr[0][1:], nrgba.Pix[offset:offset+b.Dx()*4])
} else if rgba != nil {
dst := cr[0][1:]
src := rgba.Pix[rgba.PixOffset(b.Min.X, y):rgba.PixOffset(b.Max.X, y)]
for ; len(src) >= 4; dst, src = dst[4:], src[4:] {
d := (*[4]byte)(dst)
s := (*[4]byte)(src)
if s[3] == 0x00 {
d[0] = 0
d[1] = 0
d[2] = 0
d[3] = 0
} else if s[3] == 0xff {
copy(d[:], s[:])
} else {
// This code does the same as color.NRGBAModel.Convert(
// rgba.At(x, y)).(color.NRGBA) but with no extra memory
// allocations or interface/function call overhead.
//
// The multiplier m combines 0x101 (which converts
// 8-bit color to 16-bit color) and 0xffff (which, when
// combined with the division-by-a, converts from
// alpha-premultiplied to non-alpha-premultiplied).
const m = 0x101 * 0xffff
a := uint32(s[3]) * 0x101
d[0] = uint8((uint32(s[0]) * m / a) >> 8)
d[1] = uint8((uint32(s[1]) * m / a) >> 8)
d[2] = uint8((uint32(s[2]) * m / a) >> 8)
d[3] = s[3]
}
}
} else {
// Convert from image.Image (which is alpha-premultiplied) to PNG's non-alpha-premultiplied.
for x := b.Min.X; x < b.Max.X; x++ {
c := color.NRGBAModel.Convert(m.At(x, y)).(color.NRGBA)
cr[0][i+0] = c.R
cr[0][i+1] = c.G
cr[0][i+2] = c.B
cr[0][i+3] = c.A
i += 4
}
}
case cbG16:
for x := b.Min.X; x < b.Max.X; x++ {
c := color.Gray16Model.Convert(m.At(x, y)).(color.Gray16)
cr[0][i+0] = uint8(c.Y >> 8)
cr[0][i+1] = uint8(c.Y)
i += 2
}
case cbTC16:
// We have previously verified that the alpha value is fully opaque.
for x := b.Min.X; x < b.Max.X; x++ {
r, g, b, _ := m.At(x, y).RGBA()
cr[0][i+0] = uint8(r >> 8)
cr[0][i+1] = uint8(r)
cr[0][i+2] = uint8(g >> 8)
cr[0][i+3] = uint8(g)
cr[0][i+4] = uint8(b >> 8)
cr[0][i+5] = uint8(b)
i += 6
}
case cbTCA16:
// Convert from image.Image (which is alpha-premultiplied) to PNG's non-alpha-premultiplied.
for x := b.Min.X; x < b.Max.X; x++ {
c := color.NRGBA64Model.Convert(m.At(x, y)).(color.NRGBA64)
cr[0][i+0] = uint8(c.R >> 8)
cr[0][i+1] = uint8(c.R)
cr[0][i+2] = uint8(c.G >> 8)
cr[0][i+3] = uint8(c.G)
cr[0][i+4] = uint8(c.B >> 8)
cr[0][i+5] = uint8(c.B)
cr[0][i+6] = uint8(c.A >> 8)
cr[0][i+7] = uint8(c.A)
i += 8
}
}
// Apply the filter.
// Skip filter for NoCompression and paletted images (cbP8) as
// "filters are rarely useful on palette images" and will result
// in larger files (see http://www.libpng.org/pub/png/book/chapter09.html).
f := ftNone
if level != zlib.NoCompression && cb != cbP8 && cb != cbP4 && cb != cbP2 && cb != cbP1 {
// Since we skip paletted images we don't have to worry about
// bitsPerPixel not being a multiple of 8
bpp := bitsPerPixel / 8
f = filter(&cr, pr, bpp)
}
// Write the compressed bytes.
if _, err := e.zw.Write(cr[f]); err != nil {
return err
}
// The current row for y is the previous row for y+1.
pr, cr[0] = cr[0], pr
}
return nil
}
// Write the actual image data to one or more IDAT chunks.
func (e *encoder) writeIDATs() {
if e.err != nil {
return
}
if e.bw == nil {
e.bw = bufio.NewWriterSize(e, 1<<15)
} else {
e.bw.Reset(e)
}
e.err = e.writeImage(e.bw, e.m, e.cb, levelToZlib(e.enc.CompressionLevel))
if e.err != nil {
return
}
e.err = e.bw.Flush()
}
// This function is required because we want the zero value of
// Encoder.CompressionLevel to map to zlib.DefaultCompression.
func levelToZlib(l CompressionLevel) int {
switch l {
case DefaultCompression:
return zlib.DefaultCompression
case NoCompression:
return zlib.NoCompression
case BestSpeed:
return zlib.BestSpeed
case BestCompression:
return zlib.BestCompression
default:
return zlib.DefaultCompression
}
}
func (e *encoder) writeIEND() { e.writeChunk(nil, "IEND") }
// Encode writes the Image m to w in PNG format. Any Image may be
// encoded, but images that are not [image.NRGBA] might be encoded lossily.
func Encode(w io.Writer, m image.Image) error {
var e Encoder
return e.Encode(w, m)
}
// Encode writes the Image m to w in PNG format.
func (enc *Encoder) Encode(w io.Writer, m image.Image) error {
// Obviously, negative widths and heights are invalid. Furthermore, the PNG
// spec section 11.2.2 says that zero is invalid. Excessively large images are
// also rejected.
mw, mh := int64(m.Bounds().Dx()), int64(m.Bounds().Dy())
if mw <= 0 || mh <= 0 || mw >= 1<<32 || mh >= 1<<32 {
return FormatError("invalid image size: " + strconv.FormatInt(mw, 10) + "x" + strconv.FormatInt(mh, 10))
}
var e *encoder
if enc.BufferPool != nil {
buffer := enc.BufferPool.Get()
e = (*encoder)(buffer)
}
if e == nil {
e = &encoder{}
}
if enc.BufferPool != nil {
defer enc.BufferPool.Put((*EncoderBuffer)(e))
}
e.enc = enc
e.w = w
e.m = m
var pal color.Palette
// cbP8 encoding needs PalettedImage's ColorIndexAt method.
if _, ok := m.(image.PalettedImage); ok {
pal, _ = m.ColorModel().(color.Palette)
}
if pal != nil {
if len(pal) <= 2 {
e.cb = cbP1
} else if len(pal) <= 4 {
e.cb = cbP2
} else if len(pal) <= 16 {
e.cb = cbP4
} else {
e.cb = cbP8
}
} else {
switch m.ColorModel() {
case color.GrayModel:
e.cb = cbG8
case color.Gray16Model:
e.cb = cbG16
case color.RGBAModel, color.NRGBAModel, color.AlphaModel:
if opaque(m) {
e.cb = cbTC8
} else {
e.cb = cbTCA8
}
default:
if opaque(m) {
e.cb = cbTC16
} else {
e.cb = cbTCA16
}
}
}
_, e.err = io.WriteString(w, pngHeader)
e.writeIHDR()
if pal != nil {
e.writePLTEAndTRNS(pal)
}
e.writeIDATs()
e.writeIEND()
return e.err
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package image
import (
"image/color"
)
// YCbCrSubsampleRatio is the chroma subsample ratio used in a YCbCr image.
type YCbCrSubsampleRatio int
const (
YCbCrSubsampleRatio444 YCbCrSubsampleRatio = iota
YCbCrSubsampleRatio422
YCbCrSubsampleRatio420
YCbCrSubsampleRatio440
YCbCrSubsampleRatio411
YCbCrSubsampleRatio410
)
func (s YCbCrSubsampleRatio) String() string {
switch s {
case YCbCrSubsampleRatio444:
return "YCbCrSubsampleRatio444"
case YCbCrSubsampleRatio422:
return "YCbCrSubsampleRatio422"
case YCbCrSubsampleRatio420:
return "YCbCrSubsampleRatio420"
case YCbCrSubsampleRatio440:
return "YCbCrSubsampleRatio440"
case YCbCrSubsampleRatio411:
return "YCbCrSubsampleRatio411"
case YCbCrSubsampleRatio410:
return "YCbCrSubsampleRatio410"
}
return "YCbCrSubsampleRatioUnknown"
}
// YCbCr is an in-memory image of Y'CbCr colors. There is one Y sample per
// pixel, but each Cb and Cr sample can span one or more pixels.
// YStride is the Y slice index delta between vertically adjacent pixels.
// CStride is the Cb and Cr slice index delta between vertically adjacent pixels
// that map to separate chroma samples.
// It is not an absolute requirement, but YStride and len(Y) are typically
// multiples of 8, and:
//
// For 4:4:4, CStride == YStride/1 && len(Cb) == len(Cr) == len(Y)/1.
// For 4:2:2, CStride == YStride/2 && len(Cb) == len(Cr) == len(Y)/2.
// For 4:2:0, CStride == YStride/2 && len(Cb) == len(Cr) == len(Y)/4.
// For 4:4:0, CStride == YStride/1 && len(Cb) == len(Cr) == len(Y)/2.
// For 4:1:1, CStride == YStride/4 && len(Cb) == len(Cr) == len(Y)/4.
// For 4:1:0, CStride == YStride/4 && len(Cb) == len(Cr) == len(Y)/8.
type YCbCr struct {
Y, Cb, Cr []uint8
YStride int
CStride int
SubsampleRatio YCbCrSubsampleRatio
Rect Rectangle
}
func (p *YCbCr) ColorModel() color.Model {
return color.YCbCrModel
}
func (p *YCbCr) Bounds() Rectangle {
return p.Rect
}
func (p *YCbCr) At(x, y int) color.Color {
return p.YCbCrAt(x, y)
}
func (p *YCbCr) RGBA64At(x, y int) color.RGBA64 {
r, g, b, a := p.YCbCrAt(x, y).RGBA()
return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
}
func (p *YCbCr) YCbCrAt(x, y int) color.YCbCr {
if !(Point{x, y}.In(p.Rect)) {
return color.YCbCr{}
}
yi := p.YOffset(x, y)
ci := p.COffset(x, y)
return color.YCbCr{
p.Y[yi],
p.Cb[ci],
p.Cr[ci],
}
}
// YOffset returns the index of the first element of Y that corresponds to
// the pixel at (x, y).
func (p *YCbCr) YOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.YStride + (x - p.Rect.Min.X)
}
// COffset returns the index of the first element of Cb or Cr that corresponds
// to the pixel at (x, y).
func (p *YCbCr) COffset(x, y int) int {
switch p.SubsampleRatio {
case YCbCrSubsampleRatio422:
return (y-p.Rect.Min.Y)*p.CStride + (x/2 - p.Rect.Min.X/2)
case YCbCrSubsampleRatio420:
return (y/2-p.Rect.Min.Y/2)*p.CStride + (x/2 - p.Rect.Min.X/2)
case YCbCrSubsampleRatio440:
return (y/2-p.Rect.Min.Y/2)*p.CStride + (x - p.Rect.Min.X)
case YCbCrSubsampleRatio411:
return (y-p.Rect.Min.Y)*p.CStride + (x/4 - p.Rect.Min.X/4)
case YCbCrSubsampleRatio410:
return (y/2-p.Rect.Min.Y/2)*p.CStride + (x/4 - p.Rect.Min.X/4)
}
// Default to 4:4:4 subsampling.
return (y-p.Rect.Min.Y)*p.CStride + (x - p.Rect.Min.X)
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *YCbCr) SubImage(r Rectangle) Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &YCbCr{
SubsampleRatio: p.SubsampleRatio,
}
}
yi := p.YOffset(r.Min.X, r.Min.Y)
ci := p.COffset(r.Min.X, r.Min.Y)
return &YCbCr{
Y: p.Y[yi:],
Cb: p.Cb[ci:],
Cr: p.Cr[ci:],
SubsampleRatio: p.SubsampleRatio,
YStride: p.YStride,
CStride: p.CStride,
Rect: r,
}
}
func (p *YCbCr) Opaque() bool {
return true
}
func yCbCrSize(r Rectangle, subsampleRatio YCbCrSubsampleRatio) (w, h, cw, ch int) {
w, h = r.Dx(), r.Dy()
switch subsampleRatio {
case YCbCrSubsampleRatio422:
cw = (r.Max.X+1)/2 - r.Min.X/2
ch = h
case YCbCrSubsampleRatio420:
cw = (r.Max.X+1)/2 - r.Min.X/2
ch = (r.Max.Y+1)/2 - r.Min.Y/2
case YCbCrSubsampleRatio440:
cw = w
ch = (r.Max.Y+1)/2 - r.Min.Y/2
case YCbCrSubsampleRatio411:
cw = (r.Max.X+3)/4 - r.Min.X/4
ch = h
case YCbCrSubsampleRatio410:
cw = (r.Max.X+3)/4 - r.Min.X/4
ch = (r.Max.Y+1)/2 - r.Min.Y/2
default:
// Default to 4:4:4 subsampling.
cw = w
ch = h
}
return
}
// NewYCbCr returns a new YCbCr image with the given bounds and subsample
// ratio.
func NewYCbCr(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *YCbCr {
w, h, cw, ch := yCbCrSize(r, subsampleRatio)
// totalLength should be the same as i2, below, for a valid Rectangle r.
totalLength := add2NonNeg(
mul3NonNeg(1, w, h),
mul3NonNeg(2, cw, ch),
)
if totalLength < 0 {
panic("image: NewYCbCr Rectangle has huge or negative dimensions")
}
i0 := w*h + 0*cw*ch
i1 := w*h + 1*cw*ch
i2 := w*h + 2*cw*ch
b := make([]byte, i2)
return &YCbCr{
Y: b[:i0:i0],
Cb: b[i0:i1:i1],
Cr: b[i1:i2:i2],
SubsampleRatio: subsampleRatio,
YStride: w,
CStride: cw,
Rect: r,
}
}
// NYCbCrA is an in-memory image of non-alpha-premultiplied Y'CbCr-with-alpha
// colors. A and AStride are analogous to the Y and YStride fields of the
// embedded YCbCr.
type NYCbCrA struct {
YCbCr
A []uint8
AStride int
}
func (p *NYCbCrA) ColorModel() color.Model {
return color.NYCbCrAModel
}
func (p *NYCbCrA) At(x, y int) color.Color {
return p.NYCbCrAAt(x, y)
}
func (p *NYCbCrA) RGBA64At(x, y int) color.RGBA64 {
r, g, b, a := p.NYCbCrAAt(x, y).RGBA()
return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
}
func (p *NYCbCrA) NYCbCrAAt(x, y int) color.NYCbCrA {
if !(Point{X: x, Y: y}.In(p.Rect)) {
return color.NYCbCrA{}
}
yi := p.YOffset(x, y)
ci := p.COffset(x, y)
ai := p.AOffset(x, y)
return color.NYCbCrA{
color.YCbCr{
Y: p.Y[yi],
Cb: p.Cb[ci],
Cr: p.Cr[ci],
},
p.A[ai],
}
}
// AOffset returns the index of the first element of A that corresponds to the
// pixel at (x, y).
func (p *NYCbCrA) AOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.AStride + (x - p.Rect.Min.X)
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *NYCbCrA) SubImage(r Rectangle) Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &NYCbCrA{
YCbCr: YCbCr{
SubsampleRatio: p.SubsampleRatio,
},
}
}
yi := p.YOffset(r.Min.X, r.Min.Y)
ci := p.COffset(r.Min.X, r.Min.Y)
ai := p.AOffset(r.Min.X, r.Min.Y)
return &NYCbCrA{
YCbCr: YCbCr{
Y: p.Y[yi:],
Cb: p.Cb[ci:],
Cr: p.Cr[ci:],
SubsampleRatio: p.SubsampleRatio,
YStride: p.YStride,
CStride: p.CStride,
Rect: r,
},
A: p.A[ai:],
AStride: p.AStride,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *NYCbCrA) Opaque() bool {
if p.Rect.Empty() {
return true
}
i0, i1 := 0, p.Rect.Dx()
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for _, a := range p.A[i0:i1] {
if a != 0xff {
return false
}
}
i0 += p.AStride
i1 += p.AStride
}
return true
}
// NewNYCbCrA returns a new [NYCbCrA] image with the given bounds and subsample
// ratio.
func NewNYCbCrA(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *NYCbCrA {
w, h, cw, ch := yCbCrSize(r, subsampleRatio)
// totalLength should be the same as i3, below, for a valid Rectangle r.
totalLength := add2NonNeg(
mul3NonNeg(2, w, h),
mul3NonNeg(2, cw, ch),
)
if totalLength < 0 {
panic("image: NewNYCbCrA Rectangle has huge or negative dimension")
}
i0 := 1*w*h + 0*cw*ch
i1 := 1*w*h + 1*cw*ch
i2 := 1*w*h + 2*cw*ch
i3 := 2*w*h + 2*cw*ch
b := make([]byte, i3)
return &NYCbCrA{
YCbCr: YCbCr{
Y: b[:i0:i0],
Cb: b[i0:i1:i1],
Cr: b[i1:i2:i2],
SubsampleRatio: subsampleRatio,
YStride: w,
CStride: cw,
Rect: r,
},
A: b[i2:],
AStride: w,
}
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Suffix array construction by induced sorting (SAIS).
// See Ge Nong, Sen Zhang, and Wai Hong Chen,
// "Two Efficient Algorithms for Linear Time Suffix Array Construction",
// especially section 3 (https://ieeexplore.ieee.org/document/5582081).
// See also http://zork.net/~st/jottings/sais.html.
//
// With optimizations inspired by Yuta Mori's sais-lite
// (https://sites.google.com/site/yuta256/sais).
//
// And with other new optimizations.
// Many of these functions are parameterized by the sizes of
// the types they operate on. The generator gen.go makes
// copies of these functions for use with other sizes.
// Specifically:
//
// - A function with a name ending in _8_32 takes []byte and []int32 arguments
// and is duplicated into _32_32, _8_64, and _64_64 forms.
// The _32_32 and _64_64_ suffixes are shortened to plain _32 and _64.
// Any lines in the function body that contain the text "byte-only" or "256"
// are stripped when creating _32_32 and _64_64 forms.
// (Those lines are typically 8-bit-specific optimizations.)
//
// - A function with a name ending only in _32 operates on []int32
// and is duplicated into a _64 form. (Note that it may still take a []byte,
// but there is no need for a version of the function in which the []byte
// is widened to a full integer array.)
// The overall runtime of this code is linear in the input size:
// it runs a sequence of linear passes to reduce the problem to
// a subproblem at most half as big, invokes itself recursively,
// and then runs a sequence of linear passes to turn the answer
// for the subproblem into the answer for the original problem.
// This gives T(N) = O(N) + T(N/2) = O(N) + O(N/2) + O(N/4) + ... = O(N).
//
// The outline of the code, with the forward and backward scans
// through O(N)-sized arrays called out, is:
//
// sais_I_N
// placeLMS_I_B
// bucketMax_I_B
// freq_I_B
// <scan +text> (1)
// <scan +freq> (2)
// <scan -text, random bucket> (3)
// induceSubL_I_B
// bucketMin_I_B
// freq_I_B
// <scan +text, often optimized away> (4)
// <scan +freq> (5)
// <scan +sa, random text, random bucket> (6)
// induceSubS_I_B
// bucketMax_I_B
// freq_I_B
// <scan +text, often optimized away> (7)
// <scan +freq> (8)
// <scan -sa, random text, random bucket> (9)
// assignID_I_B
// <scan +sa, random text substrings> (10)
// map_B
// <scan -sa> (11)
// recurse_B
// (recursive call to sais_B_B for a subproblem of size at most 1/2 input, often much smaller)
// unmap_I_B
// <scan -text> (12)
// <scan +sa> (13)
// expand_I_B
// bucketMax_I_B
// freq_I_B
// <scan +text, often optimized away> (14)
// <scan +freq> (15)
// <scan -sa, random text, random bucket> (16)
// induceL_I_B
// bucketMin_I_B
// freq_I_B
// <scan +text, often optimized away> (17)
// <scan +freq> (18)
// <scan +sa, random text, random bucket> (19)
// induceS_I_B
// bucketMax_I_B
// freq_I_B
// <scan +text, often optimized away> (20)
// <scan +freq> (21)
// <scan -sa, random text, random bucket> (22)
//
// Here, _B indicates the suffix array size (_32 or _64) and _I the input size (_8 or _B).
//
// The outline shows there are in general 22 scans through
// O(N)-sized arrays for a given level of the recursion.
// In the top level, operating on 8-bit input text,
// the six freq scans are fixed size (256) instead of potentially
// input-sized. Also, the frequency is counted once and cached
// whenever there is room to do so (there is nearly always room in general,
// and always room at the top level), which eliminates all but
// the first freq_I_B text scans (that is, 5 of the 6).
// So the top level of the recursion only does 22 - 6 - 5 = 11
// input-sized scans and a typical level does 16 scans.
//
// The linear scans do not cost anywhere near as much as
// the random accesses to the text made during a few of
// the scans (specifically #6, #9, #16, #19, #22 marked above).
// In real texts, there is not much but some locality to
// the accesses, due to the repetitive structure of the text
// (the same reason Burrows-Wheeler compression is so effective).
// For random inputs, there is no locality, which makes those
// accesses even more expensive, especially once the text
// no longer fits in cache.
// For example, running on 50 MB of Go source code, induceSubL_8_32
// (which runs only once, at the top level of the recursion)
// takes 0.44s, while on 50 MB of random input, it takes 2.55s.
// Nearly all the relative slowdown is explained by the text access:
//
// c0, c1 := text[k-1], text[k]
//
// That line runs for 0.23s on the Go text and 2.02s on random text.
//go:generate go run gen.go
package suffixarray
// text_32 returns the suffix array for the input text.
// It requires that len(text) fit in an int32
// and that the caller zero sa.
func text_32(text []byte, sa []int32) {
if int(int32(len(text))) != len(text) || len(text) != len(sa) {
panic("suffixarray: misuse of text_32")
}
sais_8_32(text, 256, sa, make([]int32, 2*256))
}
// sais_8_32 computes the suffix array of text.
// The text must contain only values in [0, textMax).
// The suffix array is stored in sa, which the caller
// must ensure is already zeroed.
// The caller must also provide temporary space tmp
// with len(tmp) ≥ textMax. If len(tmp) ≥ 2*textMax
// then the algorithm runs a little faster.
// If sais_8_32 modifies tmp, it sets tmp[0] = -1 on return.
func sais_8_32(text []byte, textMax int, sa, tmp []int32) {
if len(sa) != len(text) || len(tmp) < textMax {
panic("suffixarray: misuse of sais_8_32")
}
// Trivial base cases. Sorting 0 or 1 things is easy.
if len(text) == 0 {
return
}
if len(text) == 1 {
sa[0] = 0
return
}
// Establish slices indexed by text character
// holding character frequency and bucket-sort offsets.
// If there's only enough tmp for one slice,
// we make it the bucket offsets and recompute
// the character frequency each time we need it.
var freq, bucket []int32
if len(tmp) >= 2*textMax {
freq, bucket = tmp[:textMax], tmp[textMax:2*textMax]
freq[0] = -1 // mark as uninitialized
} else {
freq, bucket = nil, tmp[:textMax]
}
// The SAIS algorithm.
// Each of these calls makes one scan through sa.
// See the individual functions for documentation
// about each's role in the algorithm.
numLMS := placeLMS_8_32(text, sa, freq, bucket)
if numLMS <= 1 {
// 0 or 1 items are already sorted. Do nothing.
} else {
induceSubL_8_32(text, sa, freq, bucket)
induceSubS_8_32(text, sa, freq, bucket)
length_8_32(text, sa, numLMS)
maxID := assignID_8_32(text, sa, numLMS)
if maxID < numLMS {
map_32(sa, numLMS)
recurse_32(sa, tmp, numLMS, maxID)
unmap_8_32(text, sa, numLMS)
} else {
// If maxID == numLMS, then each LMS-substring
// is unique, so the relative ordering of two LMS-suffixes
// is determined by just the leading LMS-substring.
// That is, the LMS-suffix sort order matches the
// (simpler) LMS-substring sort order.
// Copy the original LMS-substring order into the
// suffix array destination.
copy(sa, sa[len(sa)-numLMS:])
}
expand_8_32(text, freq, bucket, sa, numLMS)
}
induceL_8_32(text, sa, freq, bucket)
induceS_8_32(text, sa, freq, bucket)
// Mark for caller that we overwrote tmp.
tmp[0] = -1
}
// freq_8_32 returns the character frequencies
// for text, as a slice indexed by character value.
// If freq is nil, freq_8_32 uses and returns bucket.
// If freq is non-nil, freq_8_32 assumes that freq[0] >= 0
// means the frequencies are already computed.
// If the frequency data is overwritten or uninitialized,
// the caller must set freq[0] = -1 to force recomputation
// the next time it is needed.
func freq_8_32(text []byte, freq, bucket []int32) []int32 {
if freq != nil && freq[0] >= 0 {
return freq // already computed
}
if freq == nil {
freq = bucket
}
freq = freq[:256] // eliminate bounds check for freq[c] below
clear(freq)
for _, c := range text {
freq[c]++
}
return freq
}
// bucketMin_8_32 stores into bucket[c] the minimum index
// in the bucket for character c in a bucket-sort of text.
func bucketMin_8_32(text []byte, freq, bucket []int32) {
freq = freq_8_32(text, freq, bucket)
freq = freq[:256] // establish len(freq) = 256, so 0 ≤ i < 256 below
bucket = bucket[:256] // eliminate bounds check for bucket[i] below
total := int32(0)
for i, n := range freq {
bucket[i] = total
total += n
}
}
// bucketMax_8_32 stores into bucket[c] the maximum index
// in the bucket for character c in a bucket-sort of text.
// The bucket indexes for c are [min, max).
// That is, max is one past the final index in that bucket.
func bucketMax_8_32(text []byte, freq, bucket []int32) {
freq = freq_8_32(text, freq, bucket)
freq = freq[:256] // establish len(freq) = 256, so 0 ≤ i < 256 below
bucket = bucket[:256] // eliminate bounds check for bucket[i] below
total := int32(0)
for i, n := range freq {
total += n
bucket[i] = total
}
}
// The SAIS algorithm proceeds in a sequence of scans through sa.
// Each of the following functions implements one scan,
// and the functions appear here in the order they execute in the algorithm.
// placeLMS_8_32 places into sa the indexes of the
// final characters of the LMS substrings of text,
// sorted into the rightmost ends of their correct buckets
// in the suffix array.
//
// The imaginary sentinel character at the end of the text
// is the final character of the final LMS substring, but there
// is no bucket for the imaginary sentinel character,
// which has a smaller value than any real character.
// The caller must therefore pretend that sa[-1] == len(text).
//
// The text indexes of LMS-substring characters are always ≥ 1
// (the first LMS-substring must be preceded by one or more L-type
// characters that are not part of any LMS-substring),
// so using 0 as a “not present” suffix array entry is safe,
// both in this function and in most later functions
// (until induceL_8_32 below).
func placeLMS_8_32(text []byte, sa, freq, bucket []int32) int {
bucketMax_8_32(text, freq, bucket)
numLMS := 0
lastB := int32(-1)
bucket = bucket[:256] // eliminate bounds check for bucket[c1] below
// The next stanza of code (until the blank line) loop backward
// over text, stopping to execute a code body at each position i
// such that text[i] is an L-character and text[i+1] is an S-character.
// That is, i+1 is the position of the start of an LMS-substring.
// These could be hoisted out into a function with a callback,
// but at a significant speed cost. Instead, we just write these
// seven lines a few times in this source file. The copies below
// refer back to the pattern established by this original as the
// "LMS-substring iterator".
//
// In every scan through the text, c0, c1 are successive characters of text.
// In this backward scan, c0 == text[i] and c1 == text[i+1].
// By scanning backward, we can keep track of whether the current
// position is type-S or type-L according to the usual definition:
//
// - position len(text) is type S with text[len(text)] == -1 (the sentinel)
// - position i is type S if text[i] < text[i+1], or if text[i] == text[i+1] && i+1 is type S.
// - position i is type L if text[i] > text[i+1], or if text[i] == text[i+1] && i+1 is type L.
//
// The backward scan lets us maintain the current type,
// update it when we see c0 != c1, and otherwise leave it alone.
// We want to identify all S positions with a preceding L.
// Position len(text) is one such position by definition, but we have
// nowhere to write it down, so we eliminate it by untruthfully
// setting isTypeS = false at the start of the loop.
c0, c1, isTypeS := byte(0), byte(0), false
for i := len(text) - 1; i >= 0; i-- {
c0, c1 = text[i], c0
if c0 < c1 {
isTypeS = true
} else if c0 > c1 && isTypeS {
isTypeS = false
// Bucket the index i+1 for the start of an LMS-substring.
b := bucket[c1] - 1
bucket[c1] = b
sa[b] = int32(i + 1)
lastB = b
numLMS++
}
}
// We recorded the LMS-substring starts but really want the ends.
// Luckily, with two differences, the start indexes and the end indexes are the same.
// The first difference is that the rightmost LMS-substring's end index is len(text),
// so the caller must pretend that sa[-1] == len(text), as noted above.
// The second difference is that the first leftmost LMS-substring start index
// does not end an earlier LMS-substring, so as an optimization we can omit
// that leftmost LMS-substring start index (the last one we wrote).
//
// Exception: if numLMS <= 1, the caller is not going to bother with
// the recursion at all and will treat the result as containing LMS-substring starts.
// In that case, we don't remove the final entry.
if numLMS > 1 {
sa[lastB] = 0
}
return numLMS
}
// induceSubL_8_32 inserts the L-type text indexes of LMS-substrings
// into sa, assuming that the final characters of the LMS-substrings
// are already inserted into sa, sorted by final character, and at the
// right (not left) end of the corresponding character bucket.
// Each LMS-substring has the form (as a regexp) /S+L+S/:
// one or more S-type, one or more L-type, final S-type.
// induceSubL_8_32 leaves behind only the leftmost L-type text
// index for each LMS-substring. That is, it removes the final S-type
// indexes that are present on entry, and it inserts but then removes
// the interior L-type indexes too.
// (Only the leftmost L-type index is needed by induceSubS_8_32.)
func induceSubL_8_32(text []byte, sa, freq, bucket []int32) {
// Initialize positions for left side of character buckets.
bucketMin_8_32(text, freq, bucket)
bucket = bucket[:256] // eliminate bounds check for bucket[cB] below
// As we scan the array left-to-right, each sa[i] = j > 0 is a correctly
// sorted suffix array entry (for text[j:]) for which we know that j-1 is type L.
// Because j-1 is type L, inserting it into sa now will sort it correctly.
// But we want to distinguish a j-1 with j-2 of type L from type S.
// We can process the former but want to leave the latter for the caller.
// We record the difference by negating j-1 if it is preceded by type S.
// Either way, the insertion (into the text[j-1] bucket) is guaranteed to
// happen at sa[i´] for some i´ > i, that is, in the portion of sa we have
// yet to scan. A single pass therefore sees indexes j, j-1, j-2, j-3,
// and so on, in sorted but not necessarily adjacent order, until it finds
// one preceded by an index of type S, at which point it must stop.
//
// As we scan through the array, we clear the worked entries (sa[i] > 0) to zero,
// and we flip sa[i] < 0 to -sa[i], so that the loop finishes with sa containing
// only the indexes of the leftmost L-type indexes for each LMS-substring.
//
// The suffix array sa therefore serves simultaneously as input, output,
// and a miraculously well-tailored work queue.
// placeLMS_8_32 left out the implicit entry sa[-1] == len(text),
// corresponding to the identified type-L index len(text)-1.
// Process it before the left-to-right scan of sa proper.
// See body in loop for commentary.
k := len(text) - 1
c0, c1 := text[k-1], text[k]
if c0 < c1 {
k = -k
}
// Cache recently used bucket index:
// we're processing suffixes in sorted order
// and accessing buckets indexed by the
// byte before the sorted order, which still
// has very good locality.
// Invariant: b is cached, possibly dirty copy of bucket[cB].
cB := c1
b := bucket[cB]
sa[b] = int32(k)
b++
for i := 0; i < len(sa); i++ {
j := int(sa[i])
if j == 0 {
// Skip empty entry.
continue
}
if j < 0 {
// Leave discovered type-S index for caller.
sa[i] = int32(-j)
continue
}
sa[i] = 0
// Index j was on work queue, meaning k := j-1 is L-type,
// so we can now place k correctly into sa.
// If k-1 is L-type, queue k for processing later in this loop.
// If k-1 is S-type (text[k-1] < text[k]), queue -k to save for the caller.
k := j - 1
c0, c1 := text[k-1], text[k]
if c0 < c1 {
k = -k
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
sa[b] = int32(k)
b++
}
}
// induceSubS_8_32 inserts the S-type text indexes of LMS-substrings
// into sa, assuming that the leftmost L-type text indexes are already
// inserted into sa, sorted by LMS-substring suffix, and at the
// left end of the corresponding character bucket.
// Each LMS-substring has the form (as a regexp) /S+L+S/:
// one or more S-type, one or more L-type, final S-type.
// induceSubS_8_32 leaves behind only the leftmost S-type text
// index for each LMS-substring, in sorted order, at the right end of sa.
// That is, it removes the L-type indexes that are present on entry,
// and it inserts but then removes the interior S-type indexes too,
// leaving the LMS-substring start indexes packed into sa[len(sa)-numLMS:].
// (Only the LMS-substring start indexes are processed by the recursion.)
func induceSubS_8_32(text []byte, sa, freq, bucket []int32) {
// Initialize positions for right side of character buckets.
bucketMax_8_32(text, freq, bucket)
bucket = bucket[:256] // eliminate bounds check for bucket[cB] below
// Analogous to induceSubL_8_32 above,
// as we scan the array right-to-left, each sa[i] = j > 0 is a correctly
// sorted suffix array entry (for text[j:]) for which we know that j-1 is type S.
// Because j-1 is type S, inserting it into sa now will sort it correctly.
// But we want to distinguish a j-1 with j-2 of type S from type L.
// We can process the former but want to leave the latter for the caller.
// We record the difference by negating j-1 if it is preceded by type L.
// Either way, the insertion (into the text[j-1] bucket) is guaranteed to
// happen at sa[i´] for some i´ < i, that is, in the portion of sa we have
// yet to scan. A single pass therefore sees indexes j, j-1, j-2, j-3,
// and so on, in sorted but not necessarily adjacent order, until it finds
// one preceded by an index of type L, at which point it must stop.
// That index (preceded by one of type L) is an LMS-substring start.
//
// As we scan through the array, we clear the worked entries (sa[i] > 0) to zero,
// and we flip sa[i] < 0 to -sa[i] and compact into the top of sa,
// so that the loop finishes with the top of sa containing exactly
// the LMS-substring start indexes, sorted by LMS-substring.
// Cache recently used bucket index:
cB := byte(0)
b := bucket[cB]
top := len(sa)
for i := len(sa) - 1; i >= 0; i-- {
j := int(sa[i])
if j == 0 {
// Skip empty entry.
continue
}
sa[i] = 0
if j < 0 {
// Leave discovered LMS-substring start index for caller.
top--
sa[top] = int32(-j)
continue
}
// Index j was on work queue, meaning k := j-1 is S-type,
// so we can now place k correctly into sa.
// If k-1 is S-type, queue k for processing later in this loop.
// If k-1 is L-type (text[k-1] > text[k]), queue -k to save for the caller.
k := j - 1
c1 := text[k]
c0 := text[k-1]
if c0 > c1 {
k = -k
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
b--
sa[b] = int32(k)
}
}
// length_8_32 computes and records the length of each LMS-substring in text.
// The length of the LMS-substring at index j is stored at sa[j/2],
// avoiding the LMS-substring indexes already stored in the top half of sa.
// (If index j is an LMS-substring start, then index j-1 is type L and cannot be.)
// There are two exceptions, made for optimizations in name_8_32 below.
//
// First, the final LMS-substring is recorded as having length 0, which is otherwise
// impossible, instead of giving it a length that includes the implicit sentinel.
// This ensures the final LMS-substring has length unequal to all others
// and therefore can be detected as different without text comparison
// (it is unequal because it is the only one that ends in the implicit sentinel,
// and the text comparison would be problematic since the implicit sentinel
// is not actually present at text[len(text)]).
//
// Second, to avoid text comparison entirely, if an LMS-substring is very short,
// sa[j/2] records its actual text instead of its length, so that if two such
// substrings have matching “length,” the text need not be read at all.
// The definition of “very short” is that the text bytes must pack into a uint32,
// and the unsigned encoding e must be ≥ len(text), so that it can be
// distinguished from a valid length.
func length_8_32(text []byte, sa []int32, numLMS int) {
end := 0 // index of current LMS-substring end (0 indicates final LMS-substring)
// The encoding of N text bytes into a “length” word
// adds 1 to each byte, packs them into the bottom
// N*8 bits of a word, and then bitwise inverts the result.
// That is, the text sequence A B C (hex 41 42 43)
// encodes as ^uint32(0x42_43_44).
// LMS-substrings can never start or end with 0xFF.
// Adding 1 ensures the encoded byte sequence never
// starts or ends with 0x00, so that present bytes can be
// distinguished from zero-padding in the top bits,
// so the length need not be separately encoded.
// Inverting the bytes increases the chance that a
// 4-byte encoding will still be ≥ len(text).
// In particular, if the first byte is ASCII (<= 0x7E, so +1 <= 0x7F)
// then the high bit of the inversion will be set,
// making it clearly not a valid length (it would be a negative one).
//
// cx holds the pre-inverted encoding (the packed incremented bytes).
cx := uint32(0) // byte-only
// This stanza (until the blank line) is the "LMS-substring iterator",
// described in placeLMS_8_32 above, with one line added to maintain cx.
c0, c1, isTypeS := byte(0), byte(0), false
for i := len(text) - 1; i >= 0; i-- {
c0, c1 = text[i], c0
cx = cx<<8 | uint32(c1+1) // byte-only
if c0 < c1 {
isTypeS = true
} else if c0 > c1 && isTypeS {
isTypeS = false
// Index j = i+1 is the start of an LMS-substring.
// Compute length or encoded text to store in sa[j/2].
j := i + 1
var code int32
if end == 0 {
code = 0
} else {
code = int32(end - j)
if code <= 32/8 && ^cx >= uint32(len(text)) { // byte-only
code = int32(^cx) // byte-only
} // byte-only
}
sa[j>>1] = code
end = j + 1
cx = uint32(c1 + 1) // byte-only
}
}
}
// assignID_8_32 assigns a dense ID numbering to the
// set of LMS-substrings respecting string ordering and equality,
// returning the maximum assigned ID.
// For example given the input "ababab", the LMS-substrings
// are "aba", "aba", and "ab", renumbered as 2 2 1.
// sa[len(sa)-numLMS:] holds the LMS-substring indexes
// sorted in string order, so to assign numbers we can
// consider each in turn, removing adjacent duplicates.
// The new ID for the LMS-substring at index j is written to sa[j/2],
// overwriting the length previously stored there (by length_8_32 above).
func assignID_8_32(text []byte, sa []int32, numLMS int) int {
id := 0
lastLen := int32(-1) // impossible
lastPos := int32(0)
for _, j := range sa[len(sa)-numLMS:] {
// Is the LMS-substring at index j new, or is it the same as the last one we saw?
n := sa[j/2]
if n != lastLen {
goto New
}
if uint32(n) >= uint32(len(text)) {
// “Length” is really encoded full text, and they match.
goto Same
}
{
// Compare actual texts.
n := int(n)
this := text[j:][:n]
last := text[lastPos:][:n]
for i := 0; i < n; i++ {
if this[i] != last[i] {
goto New
}
}
goto Same
}
New:
id++
lastPos = j
lastLen = n
Same:
sa[j/2] = int32(id)
}
return id
}
// map_32 maps the LMS-substrings in text to their new IDs,
// producing the subproblem for the recursion.
// The mapping itself was mostly applied by assignID_8_32:
// sa[i] is either 0, the ID for the LMS-substring at index 2*i,
// or the ID for the LMS-substring at index 2*i+1.
// To produce the subproblem we need only remove the zeros
// and change ID into ID-1 (our IDs start at 1, but text chars start at 0).
//
// map_32 packs the result, which is the input to the recursion,
// into the top of sa, so that the recursion result can be stored
// in the bottom of sa, which sets up for expand_8_32 well.
func map_32(sa []int32, numLMS int) {
w := len(sa)
for i := len(sa) / 2; i >= 0; i-- {
j := sa[i]
if j > 0 {
w--
sa[w] = j - 1
}
}
}
// recurse_32 calls sais_32 recursively to solve the subproblem we've built.
// The subproblem is at the right end of sa, the suffix array result will be
// written at the left end of sa, and the middle of sa is available for use as
// temporary frequency and bucket storage.
func recurse_32(sa, oldTmp []int32, numLMS, maxID int) {
dst, saTmp, text := sa[:numLMS], sa[numLMS:len(sa)-numLMS], sa[len(sa)-numLMS:]
// Set up temporary space for recursive call.
// We must pass sais_32 a tmp buffer with at least maxID entries.
//
// The subproblem is guaranteed to have length at most len(sa)/2,
// so that sa can hold both the subproblem and its suffix array.
// Nearly all the time, however, the subproblem has length < len(sa)/3,
// in which case there is a subproblem-sized middle of sa that
// we can reuse for temporary space (saTmp).
// When recurse_32 is called from sais_8_32, oldTmp is length 512
// (from text_32), and saTmp will typically be much larger, so we'll use saTmp.
// When deeper recursions come back to recurse_32, now oldTmp is
// the saTmp from the top-most recursion, it is typically larger than
// the current saTmp (because the current sa gets smaller and smaller
// as the recursion gets deeper), and we keep reusing that top-most
// large saTmp instead of the offered smaller ones.
//
// Why is the subproblem length so often just under len(sa)/3?
// See Nong, Zhang, and Chen, section 3.6 for a plausible explanation.
// In brief, the len(sa)/2 case would correspond to an SLSLSLSLSLSL pattern
// in the input, perfect alternation of larger and smaller input bytes.
// Real text doesn't do that. If each L-type index is randomly followed
// by either an L-type or S-type index, then half the substrings will
// be of the form SLS, but the other half will be longer. Of that half,
// half (a quarter overall) will be SLLS; an eighth will be SLLLS, and so on.
// Not counting the final S in each (which overlaps the first S in the next),
// This works out to an average length 2×½ + 3×¼ + 4×⅛ + ... = 3.
// The space we need is further reduced by the fact that many of the
// short patterns like SLS will often be the same character sequences
// repeated throughout the text, reducing maxID relative to numLMS.
//
// For short inputs, the averages may not run in our favor, but then we
// can often fall back to using the length-512 tmp available in the
// top-most call. (Also a short allocation would not be a big deal.)
//
// For pathological inputs, we fall back to allocating a new tmp of length
// max(maxID, numLMS/2). This level of the recursion needs maxID,
// and all deeper levels of the recursion will need no more than numLMS/2,
// so this one allocation is guaranteed to suffice for the entire stack
// of recursive calls.
tmp := oldTmp
if len(tmp) < len(saTmp) {
tmp = saTmp
}
if len(tmp) < numLMS {
// TestSAIS/forcealloc reaches this code.
n := maxID
if n < numLMS/2 {
n = numLMS / 2
}
tmp = make([]int32, n)
}
// sais_32 requires that the caller arrange to clear dst,
// because in general the caller may know dst is
// freshly-allocated and already cleared. But this one is not.
clear(dst)
sais_32(text, maxID, dst, tmp)
}
// unmap_8_32 unmaps the subproblem back to the original.
// sa[:numLMS] is the LMS-substring numbers, which don't matter much anymore.
// sa[len(sa)-numLMS:] is the sorted list of those LMS-substring numbers.
// The key part is that if the list says K that means the K'th substring.
// We can replace sa[:numLMS] with the indexes of the LMS-substrings.
// Then if the list says K it really means sa[K].
// Having mapped the list back to LMS-substring indexes,
// we can place those into the right buckets.
func unmap_8_32(text []byte, sa []int32, numLMS int) {
unmap := sa[len(sa)-numLMS:]
j := len(unmap)
// "LMS-substring iterator" (see placeLMS_8_32 above).
c0, c1, isTypeS := byte(0), byte(0), false
for i := len(text) - 1; i >= 0; i-- {
c0, c1 = text[i], c0
if c0 < c1 {
isTypeS = true
} else if c0 > c1 && isTypeS {
isTypeS = false
// Populate inverse map.
j--
unmap[j] = int32(i + 1)
}
}
// Apply inverse map to subproblem suffix array.
sa = sa[:numLMS]
for i := 0; i < len(sa); i++ {
sa[i] = unmap[sa[i]]
}
}
// expand_8_32 distributes the compacted, sorted LMS-suffix indexes
// from sa[:numLMS] into the tops of the appropriate buckets in sa,
// preserving the sorted order and making room for the L-type indexes
// to be slotted into the sorted sequence by induceL_8_32.
func expand_8_32(text []byte, freq, bucket, sa []int32, numLMS int) {
bucketMax_8_32(text, freq, bucket)
bucket = bucket[:256] // eliminate bound check for bucket[c] below
// Loop backward through sa, always tracking
// the next index to populate from sa[:numLMS].
// When we get to one, populate it.
// Zero the rest of the slots; they have dead values in them.
x := numLMS - 1
saX := sa[x]
c := text[saX]
b := bucket[c] - 1
bucket[c] = b
for i := len(sa) - 1; i >= 0; i-- {
if i != int(b) {
sa[i] = 0
continue
}
sa[i] = saX
// Load next entry to put down (if any).
if x > 0 {
x--
saX = sa[x] // TODO bounds check
c = text[saX]
b = bucket[c] - 1
bucket[c] = b
}
}
}
// induceL_8_32 inserts L-type text indexes into sa,
// assuming that the leftmost S-type indexes are inserted
// into sa, in sorted order, in the right bucket halves.
// It leaves all the L-type indexes in sa, but the
// leftmost L-type indexes are negated, to mark them
// for processing by induceS_8_32.
func induceL_8_32(text []byte, sa, freq, bucket []int32) {
// Initialize positions for left side of character buckets.
bucketMin_8_32(text, freq, bucket)
bucket = bucket[:256] // eliminate bounds check for bucket[cB] below
// This scan is similar to the one in induceSubL_8_32 above.
// That one arranges to clear all but the leftmost L-type indexes.
// This scan leaves all the L-type indexes and the original S-type
// indexes, but it negates the positive leftmost L-type indexes
// (the ones that induceS_8_32 needs to process).
// expand_8_32 left out the implicit entry sa[-1] == len(text),
// corresponding to the identified type-L index len(text)-1.
// Process it before the left-to-right scan of sa proper.
// See body in loop for commentary.
k := len(text) - 1
c0, c1 := text[k-1], text[k]
if c0 < c1 {
k = -k
}
// Cache recently used bucket index.
cB := c1
b := bucket[cB]
sa[b] = int32(k)
b++
for i := 0; i < len(sa); i++ {
j := int(sa[i])
if j <= 0 {
// Skip empty or negated entry (including negated zero).
continue
}
// Index j was on work queue, meaning k := j-1 is L-type,
// so we can now place k correctly into sa.
// If k-1 is L-type, queue k for processing later in this loop.
// If k-1 is S-type (text[k-1] < text[k]), queue -k to save for the caller.
// If k is zero, k-1 doesn't exist, so we only need to leave it
// for the caller. The caller can't tell the difference between
// an empty slot and a non-empty zero, but there's no need
// to distinguish them anyway: the final suffix array will end up
// with one zero somewhere, and that will be a real zero.
k := j - 1
c1 := text[k]
if k > 0 {
if c0 := text[k-1]; c0 < c1 {
k = -k
}
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
sa[b] = int32(k)
b++
}
}
func induceS_8_32(text []byte, sa, freq, bucket []int32) {
// Initialize positions for right side of character buckets.
bucketMax_8_32(text, freq, bucket)
bucket = bucket[:256] // eliminate bounds check for bucket[cB] below
cB := byte(0)
b := bucket[cB]
for i := len(sa) - 1; i >= 0; i-- {
j := int(sa[i])
if j >= 0 {
// Skip non-flagged entry.
// (This loop can't see an empty entry; 0 means the real zero index.)
continue
}
// Negative j is a work queue entry; rewrite to positive j for final suffix array.
j = -j
sa[i] = int32(j)
// Index j was on work queue (encoded as -j but now decoded),
// meaning k := j-1 is L-type,
// so we can now place k correctly into sa.
// If k-1 is S-type, queue -k for processing later in this loop.
// If k-1 is L-type (text[k-1] > text[k]), queue k to save for the caller.
// If k is zero, k-1 doesn't exist, so we only need to leave it
// for the caller.
k := j - 1
c1 := text[k]
if k > 0 {
if c0 := text[k-1]; c0 <= c1 {
k = -k
}
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
b--
sa[b] = int32(k)
}
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by go generate; DO NOT EDIT.
package suffixarray
func text_64(text []byte, sa []int64) {
if int(int64(len(text))) != len(text) || len(text) != len(sa) {
panic("suffixarray: misuse of text_64")
}
sais_8_64(text, 256, sa, make([]int64, 2*256))
}
func sais_8_64(text []byte, textMax int, sa, tmp []int64) {
if len(sa) != len(text) || len(tmp) < textMax {
panic("suffixarray: misuse of sais_8_64")
}
// Trivial base cases. Sorting 0 or 1 things is easy.
if len(text) == 0 {
return
}
if len(text) == 1 {
sa[0] = 0
return
}
// Establish slices indexed by text character
// holding character frequency and bucket-sort offsets.
// If there's only enough tmp for one slice,
// we make it the bucket offsets and recompute
// the character frequency each time we need it.
var freq, bucket []int64
if len(tmp) >= 2*textMax {
freq, bucket = tmp[:textMax], tmp[textMax:2*textMax]
freq[0] = -1 // mark as uninitialized
} else {
freq, bucket = nil, tmp[:textMax]
}
// The SAIS algorithm.
// Each of these calls makes one scan through sa.
// See the individual functions for documentation
// about each's role in the algorithm.
numLMS := placeLMS_8_64(text, sa, freq, bucket)
if numLMS <= 1 {
// 0 or 1 items are already sorted. Do nothing.
} else {
induceSubL_8_64(text, sa, freq, bucket)
induceSubS_8_64(text, sa, freq, bucket)
length_8_64(text, sa, numLMS)
maxID := assignID_8_64(text, sa, numLMS)
if maxID < numLMS {
map_64(sa, numLMS)
recurse_64(sa, tmp, numLMS, maxID)
unmap_8_64(text, sa, numLMS)
} else {
// If maxID == numLMS, then each LMS-substring
// is unique, so the relative ordering of two LMS-suffixes
// is determined by just the leading LMS-substring.
// That is, the LMS-suffix sort order matches the
// (simpler) LMS-substring sort order.
// Copy the original LMS-substring order into the
// suffix array destination.
copy(sa, sa[len(sa)-numLMS:])
}
expand_8_64(text, freq, bucket, sa, numLMS)
}
induceL_8_64(text, sa, freq, bucket)
induceS_8_64(text, sa, freq, bucket)
// Mark for caller that we overwrote tmp.
tmp[0] = -1
}
func sais_32(text []int32, textMax int, sa, tmp []int32) {
if len(sa) != len(text) || len(tmp) < textMax {
panic("suffixarray: misuse of sais_32")
}
// Trivial base cases. Sorting 0 or 1 things is easy.
if len(text) == 0 {
return
}
if len(text) == 1 {
sa[0] = 0
return
}
// Establish slices indexed by text character
// holding character frequency and bucket-sort offsets.
// If there's only enough tmp for one slice,
// we make it the bucket offsets and recompute
// the character frequency each time we need it.
var freq, bucket []int32
if len(tmp) >= 2*textMax {
freq, bucket = tmp[:textMax], tmp[textMax:2*textMax]
freq[0] = -1 // mark as uninitialized
} else {
freq, bucket = nil, tmp[:textMax]
}
// The SAIS algorithm.
// Each of these calls makes one scan through sa.
// See the individual functions for documentation
// about each's role in the algorithm.
numLMS := placeLMS_32(text, sa, freq, bucket)
if numLMS <= 1 {
// 0 or 1 items are already sorted. Do nothing.
} else {
induceSubL_32(text, sa, freq, bucket)
induceSubS_32(text, sa, freq, bucket)
length_32(text, sa, numLMS)
maxID := assignID_32(text, sa, numLMS)
if maxID < numLMS {
map_32(sa, numLMS)
recurse_32(sa, tmp, numLMS, maxID)
unmap_32(text, sa, numLMS)
} else {
// If maxID == numLMS, then each LMS-substring
// is unique, so the relative ordering of two LMS-suffixes
// is determined by just the leading LMS-substring.
// That is, the LMS-suffix sort order matches the
// (simpler) LMS-substring sort order.
// Copy the original LMS-substring order into the
// suffix array destination.
copy(sa, sa[len(sa)-numLMS:])
}
expand_32(text, freq, bucket, sa, numLMS)
}
induceL_32(text, sa, freq, bucket)
induceS_32(text, sa, freq, bucket)
// Mark for caller that we overwrote tmp.
tmp[0] = -1
}
func sais_64(text []int64, textMax int, sa, tmp []int64) {
if len(sa) != len(text) || len(tmp) < textMax {
panic("suffixarray: misuse of sais_64")
}
// Trivial base cases. Sorting 0 or 1 things is easy.
if len(text) == 0 {
return
}
if len(text) == 1 {
sa[0] = 0
return
}
// Establish slices indexed by text character
// holding character frequency and bucket-sort offsets.
// If there's only enough tmp for one slice,
// we make it the bucket offsets and recompute
// the character frequency each time we need it.
var freq, bucket []int64
if len(tmp) >= 2*textMax {
freq, bucket = tmp[:textMax], tmp[textMax:2*textMax]
freq[0] = -1 // mark as uninitialized
} else {
freq, bucket = nil, tmp[:textMax]
}
// The SAIS algorithm.
// Each of these calls makes one scan through sa.
// See the individual functions for documentation
// about each's role in the algorithm.
numLMS := placeLMS_64(text, sa, freq, bucket)
if numLMS <= 1 {
// 0 or 1 items are already sorted. Do nothing.
} else {
induceSubL_64(text, sa, freq, bucket)
induceSubS_64(text, sa, freq, bucket)
length_64(text, sa, numLMS)
maxID := assignID_64(text, sa, numLMS)
if maxID < numLMS {
map_64(sa, numLMS)
recurse_64(sa, tmp, numLMS, maxID)
unmap_64(text, sa, numLMS)
} else {
// If maxID == numLMS, then each LMS-substring
// is unique, so the relative ordering of two LMS-suffixes
// is determined by just the leading LMS-substring.
// That is, the LMS-suffix sort order matches the
// (simpler) LMS-substring sort order.
// Copy the original LMS-substring order into the
// suffix array destination.
copy(sa, sa[len(sa)-numLMS:])
}
expand_64(text, freq, bucket, sa, numLMS)
}
induceL_64(text, sa, freq, bucket)
induceS_64(text, sa, freq, bucket)
// Mark for caller that we overwrote tmp.
tmp[0] = -1
}
func freq_8_64(text []byte, freq, bucket []int64) []int64 {
if freq != nil && freq[0] >= 0 {
return freq // already computed
}
if freq == nil {
freq = bucket
}
freq = freq[:256] // eliminate bounds check for freq[c] below
clear(freq)
for _, c := range text {
freq[c]++
}
return freq
}
func freq_32(text []int32, freq, bucket []int32) []int32 {
if freq != nil && freq[0] >= 0 {
return freq // already computed
}
if freq == nil {
freq = bucket
}
clear(freq)
for _, c := range text {
freq[c]++
}
return freq
}
func freq_64(text []int64, freq, bucket []int64) []int64 {
if freq != nil && freq[0] >= 0 {
return freq // already computed
}
if freq == nil {
freq = bucket
}
clear(freq)
for _, c := range text {
freq[c]++
}
return freq
}
func bucketMin_8_64(text []byte, freq, bucket []int64) {
freq = freq_8_64(text, freq, bucket)
freq = freq[:256] // establish len(freq) = 256, so 0 ≤ i < 256 below
bucket = bucket[:256] // eliminate bounds check for bucket[i] below
total := int64(0)
for i, n := range freq {
bucket[i] = total
total += n
}
}
func bucketMin_32(text []int32, freq, bucket []int32) {
freq = freq_32(text, freq, bucket)
total := int32(0)
for i, n := range freq {
bucket[i] = total
total += n
}
}
func bucketMin_64(text []int64, freq, bucket []int64) {
freq = freq_64(text, freq, bucket)
total := int64(0)
for i, n := range freq {
bucket[i] = total
total += n
}
}
func bucketMax_8_64(text []byte, freq, bucket []int64) {
freq = freq_8_64(text, freq, bucket)
freq = freq[:256] // establish len(freq) = 256, so 0 ≤ i < 256 below
bucket = bucket[:256] // eliminate bounds check for bucket[i] below
total := int64(0)
for i, n := range freq {
total += n
bucket[i] = total
}
}
func bucketMax_32(text []int32, freq, bucket []int32) {
freq = freq_32(text, freq, bucket)
total := int32(0)
for i, n := range freq {
total += n
bucket[i] = total
}
}
func bucketMax_64(text []int64, freq, bucket []int64) {
freq = freq_64(text, freq, bucket)
total := int64(0)
for i, n := range freq {
total += n
bucket[i] = total
}
}
func placeLMS_8_64(text []byte, sa, freq, bucket []int64) int {
bucketMax_8_64(text, freq, bucket)
numLMS := 0
lastB := int64(-1)
bucket = bucket[:256] // eliminate bounds check for bucket[c1] below
// The next stanza of code (until the blank line) loop backward
// over text, stopping to execute a code body at each position i
// such that text[i] is an L-character and text[i+1] is an S-character.
// That is, i+1 is the position of the start of an LMS-substring.
// These could be hoisted out into a function with a callback,
// but at a significant speed cost. Instead, we just write these
// seven lines a few times in this source file. The copies below
// refer back to the pattern established by this original as the
// "LMS-substring iterator".
//
// In every scan through the text, c0, c1 are successive characters of text.
// In this backward scan, c0 == text[i] and c1 == text[i+1].
// By scanning backward, we can keep track of whether the current
// position is type-S or type-L according to the usual definition:
//
// - position len(text) is type S with text[len(text)] == -1 (the sentinel)
// - position i is type S if text[i] < text[i+1], or if text[i] == text[i+1] && i+1 is type S.
// - position i is type L if text[i] > text[i+1], or if text[i] == text[i+1] && i+1 is type L.
//
// The backward scan lets us maintain the current type,
// update it when we see c0 != c1, and otherwise leave it alone.
// We want to identify all S positions with a preceding L.
// Position len(text) is one such position by definition, but we have
// nowhere to write it down, so we eliminate it by untruthfully
// setting isTypeS = false at the start of the loop.
c0, c1, isTypeS := byte(0), byte(0), false
for i := len(text) - 1; i >= 0; i-- {
c0, c1 = text[i], c0
if c0 < c1 {
isTypeS = true
} else if c0 > c1 && isTypeS {
isTypeS = false
// Bucket the index i+1 for the start of an LMS-substring.
b := bucket[c1] - 1
bucket[c1] = b
sa[b] = int64(i + 1)
lastB = b
numLMS++
}
}
// We recorded the LMS-substring starts but really want the ends.
// Luckily, with two differences, the start indexes and the end indexes are the same.
// The first difference is that the rightmost LMS-substring's end index is len(text),
// so the caller must pretend that sa[-1] == len(text), as noted above.
// The second difference is that the first leftmost LMS-substring start index
// does not end an earlier LMS-substring, so as an optimization we can omit
// that leftmost LMS-substring start index (the last one we wrote).
//
// Exception: if numLMS <= 1, the caller is not going to bother with
// the recursion at all and will treat the result as containing LMS-substring starts.
// In that case, we don't remove the final entry.
if numLMS > 1 {
sa[lastB] = 0
}
return numLMS
}
func placeLMS_32(text []int32, sa, freq, bucket []int32) int {
bucketMax_32(text, freq, bucket)
numLMS := 0
lastB := int32(-1)
// The next stanza of code (until the blank line) loop backward
// over text, stopping to execute a code body at each position i
// such that text[i] is an L-character and text[i+1] is an S-character.
// That is, i+1 is the position of the start of an LMS-substring.
// These could be hoisted out into a function with a callback,
// but at a significant speed cost. Instead, we just write these
// seven lines a few times in this source file. The copies below
// refer back to the pattern established by this original as the
// "LMS-substring iterator".
//
// In every scan through the text, c0, c1 are successive characters of text.
// In this backward scan, c0 == text[i] and c1 == text[i+1].
// By scanning backward, we can keep track of whether the current
// position is type-S or type-L according to the usual definition:
//
// - position len(text) is type S with text[len(text)] == -1 (the sentinel)
// - position i is type S if text[i] < text[i+1], or if text[i] == text[i+1] && i+1 is type S.
// - position i is type L if text[i] > text[i+1], or if text[i] == text[i+1] && i+1 is type L.
//
// The backward scan lets us maintain the current type,
// update it when we see c0 != c1, and otherwise leave it alone.
// We want to identify all S positions with a preceding L.
// Position len(text) is one such position by definition, but we have
// nowhere to write it down, so we eliminate it by untruthfully
// setting isTypeS = false at the start of the loop.
c0, c1, isTypeS := int32(0), int32(0), false
for i := len(text) - 1; i >= 0; i-- {
c0, c1 = text[i], c0
if c0 < c1 {
isTypeS = true
} else if c0 > c1 && isTypeS {
isTypeS = false
// Bucket the index i+1 for the start of an LMS-substring.
b := bucket[c1] - 1
bucket[c1] = b
sa[b] = int32(i + 1)
lastB = b
numLMS++
}
}
// We recorded the LMS-substring starts but really want the ends.
// Luckily, with two differences, the start indexes and the end indexes are the same.
// The first difference is that the rightmost LMS-substring's end index is len(text),
// so the caller must pretend that sa[-1] == len(text), as noted above.
// The second difference is that the first leftmost LMS-substring start index
// does not end an earlier LMS-substring, so as an optimization we can omit
// that leftmost LMS-substring start index (the last one we wrote).
//
// Exception: if numLMS <= 1, the caller is not going to bother with
// the recursion at all and will treat the result as containing LMS-substring starts.
// In that case, we don't remove the final entry.
if numLMS > 1 {
sa[lastB] = 0
}
return numLMS
}
func placeLMS_64(text []int64, sa, freq, bucket []int64) int {
bucketMax_64(text, freq, bucket)
numLMS := 0
lastB := int64(-1)
// The next stanza of code (until the blank line) loop backward
// over text, stopping to execute a code body at each position i
// such that text[i] is an L-character and text[i+1] is an S-character.
// That is, i+1 is the position of the start of an LMS-substring.
// These could be hoisted out into a function with a callback,
// but at a significant speed cost. Instead, we just write these
// seven lines a few times in this source file. The copies below
// refer back to the pattern established by this original as the
// "LMS-substring iterator".
//
// In every scan through the text, c0, c1 are successive characters of text.
// In this backward scan, c0 == text[i] and c1 == text[i+1].
// By scanning backward, we can keep track of whether the current
// position is type-S or type-L according to the usual definition:
//
// - position len(text) is type S with text[len(text)] == -1 (the sentinel)
// - position i is type S if text[i] < text[i+1], or if text[i] == text[i+1] && i+1 is type S.
// - position i is type L if text[i] > text[i+1], or if text[i] == text[i+1] && i+1 is type L.
//
// The backward scan lets us maintain the current type,
// update it when we see c0 != c1, and otherwise leave it alone.
// We want to identify all S positions with a preceding L.
// Position len(text) is one such position by definition, but we have
// nowhere to write it down, so we eliminate it by untruthfully
// setting isTypeS = false at the start of the loop.
c0, c1, isTypeS := int64(0), int64(0), false
for i := len(text) - 1; i >= 0; i-- {
c0, c1 = text[i], c0
if c0 < c1 {
isTypeS = true
} else if c0 > c1 && isTypeS {
isTypeS = false
// Bucket the index i+1 for the start of an LMS-substring.
b := bucket[c1] - 1
bucket[c1] = b
sa[b] = int64(i + 1)
lastB = b
numLMS++
}
}
// We recorded the LMS-substring starts but really want the ends.
// Luckily, with two differences, the start indexes and the end indexes are the same.
// The first difference is that the rightmost LMS-substring's end index is len(text),
// so the caller must pretend that sa[-1] == len(text), as noted above.
// The second difference is that the first leftmost LMS-substring start index
// does not end an earlier LMS-substring, so as an optimization we can omit
// that leftmost LMS-substring start index (the last one we wrote).
//
// Exception: if numLMS <= 1, the caller is not going to bother with
// the recursion at all and will treat the result as containing LMS-substring starts.
// In that case, we don't remove the final entry.
if numLMS > 1 {
sa[lastB] = 0
}
return numLMS
}
func induceSubL_8_64(text []byte, sa, freq, bucket []int64) {
// Initialize positions for left side of character buckets.
bucketMin_8_64(text, freq, bucket)
bucket = bucket[:256] // eliminate bounds check for bucket[cB] below
// As we scan the array left-to-right, each sa[i] = j > 0 is a correctly
// sorted suffix array entry (for text[j:]) for which we know that j-1 is type L.
// Because j-1 is type L, inserting it into sa now will sort it correctly.
// But we want to distinguish a j-1 with j-2 of type L from type S.
// We can process the former but want to leave the latter for the caller.
// We record the difference by negating j-1 if it is preceded by type S.
// Either way, the insertion (into the text[j-1] bucket) is guaranteed to
// happen at sa[i´] for some i´ > i, that is, in the portion of sa we have
// yet to scan. A single pass therefore sees indexes j, j-1, j-2, j-3,
// and so on, in sorted but not necessarily adjacent order, until it finds
// one preceded by an index of type S, at which point it must stop.
//
// As we scan through the array, we clear the worked entries (sa[i] > 0) to zero,
// and we flip sa[i] < 0 to -sa[i], so that the loop finishes with sa containing
// only the indexes of the leftmost L-type indexes for each LMS-substring.
//
// The suffix array sa therefore serves simultaneously as input, output,
// and a miraculously well-tailored work queue.
// placeLMS_8_64 left out the implicit entry sa[-1] == len(text),
// corresponding to the identified type-L index len(text)-1.
// Process it before the left-to-right scan of sa proper.
// See body in loop for commentary.
k := len(text) - 1
c0, c1 := text[k-1], text[k]
if c0 < c1 {
k = -k
}
// Cache recently used bucket index:
// we're processing suffixes in sorted order
// and accessing buckets indexed by the
// byte before the sorted order, which still
// has very good locality.
// Invariant: b is cached, possibly dirty copy of bucket[cB].
cB := c1
b := bucket[cB]
sa[b] = int64(k)
b++
for i := 0; i < len(sa); i++ {
j := int(sa[i])
if j == 0 {
// Skip empty entry.
continue
}
if j < 0 {
// Leave discovered type-S index for caller.
sa[i] = int64(-j)
continue
}
sa[i] = 0
// Index j was on work queue, meaning k := j-1 is L-type,
// so we can now place k correctly into sa.
// If k-1 is L-type, queue k for processing later in this loop.
// If k-1 is S-type (text[k-1] < text[k]), queue -k to save for the caller.
k := j - 1
c0, c1 := text[k-1], text[k]
if c0 < c1 {
k = -k
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
sa[b] = int64(k)
b++
}
}
func induceSubL_32(text []int32, sa, freq, bucket []int32) {
// Initialize positions for left side of character buckets.
bucketMin_32(text, freq, bucket)
// As we scan the array left-to-right, each sa[i] = j > 0 is a correctly
// sorted suffix array entry (for text[j:]) for which we know that j-1 is type L.
// Because j-1 is type L, inserting it into sa now will sort it correctly.
// But we want to distinguish a j-1 with j-2 of type L from type S.
// We can process the former but want to leave the latter for the caller.
// We record the difference by negating j-1 if it is preceded by type S.
// Either way, the insertion (into the text[j-1] bucket) is guaranteed to
// happen at sa[i´] for some i´ > i, that is, in the portion of sa we have
// yet to scan. A single pass therefore sees indexes j, j-1, j-2, j-3,
// and so on, in sorted but not necessarily adjacent order, until it finds
// one preceded by an index of type S, at which point it must stop.
//
// As we scan through the array, we clear the worked entries (sa[i] > 0) to zero,
// and we flip sa[i] < 0 to -sa[i], so that the loop finishes with sa containing
// only the indexes of the leftmost L-type indexes for each LMS-substring.
//
// The suffix array sa therefore serves simultaneously as input, output,
// and a miraculously well-tailored work queue.
// placeLMS_32 left out the implicit entry sa[-1] == len(text),
// corresponding to the identified type-L index len(text)-1.
// Process it before the left-to-right scan of sa proper.
// See body in loop for commentary.
k := len(text) - 1
c0, c1 := text[k-1], text[k]
if c0 < c1 {
k = -k
}
// Cache recently used bucket index:
// we're processing suffixes in sorted order
// and accessing buckets indexed by the
// int32 before the sorted order, which still
// has very good locality.
// Invariant: b is cached, possibly dirty copy of bucket[cB].
cB := c1
b := bucket[cB]
sa[b] = int32(k)
b++
for i := 0; i < len(sa); i++ {
j := int(sa[i])
if j == 0 {
// Skip empty entry.
continue
}
if j < 0 {
// Leave discovered type-S index for caller.
sa[i] = int32(-j)
continue
}
sa[i] = 0
// Index j was on work queue, meaning k := j-1 is L-type,
// so we can now place k correctly into sa.
// If k-1 is L-type, queue k for processing later in this loop.
// If k-1 is S-type (text[k-1] < text[k]), queue -k to save for the caller.
k := j - 1
c0, c1 := text[k-1], text[k]
if c0 < c1 {
k = -k
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
sa[b] = int32(k)
b++
}
}
func induceSubL_64(text []int64, sa, freq, bucket []int64) {
// Initialize positions for left side of character buckets.
bucketMin_64(text, freq, bucket)
// As we scan the array left-to-right, each sa[i] = j > 0 is a correctly
// sorted suffix array entry (for text[j:]) for which we know that j-1 is type L.
// Because j-1 is type L, inserting it into sa now will sort it correctly.
// But we want to distinguish a j-1 with j-2 of type L from type S.
// We can process the former but want to leave the latter for the caller.
// We record the difference by negating j-1 if it is preceded by type S.
// Either way, the insertion (into the text[j-1] bucket) is guaranteed to
// happen at sa[i´] for some i´ > i, that is, in the portion of sa we have
// yet to scan. A single pass therefore sees indexes j, j-1, j-2, j-3,
// and so on, in sorted but not necessarily adjacent order, until it finds
// one preceded by an index of type S, at which point it must stop.
//
// As we scan through the array, we clear the worked entries (sa[i] > 0) to zero,
// and we flip sa[i] < 0 to -sa[i], so that the loop finishes with sa containing
// only the indexes of the leftmost L-type indexes for each LMS-substring.
//
// The suffix array sa therefore serves simultaneously as input, output,
// and a miraculously well-tailored work queue.
// placeLMS_64 left out the implicit entry sa[-1] == len(text),
// corresponding to the identified type-L index len(text)-1.
// Process it before the left-to-right scan of sa proper.
// See body in loop for commentary.
k := len(text) - 1
c0, c1 := text[k-1], text[k]
if c0 < c1 {
k = -k
}
// Cache recently used bucket index:
// we're processing suffixes in sorted order
// and accessing buckets indexed by the
// int64 before the sorted order, which still
// has very good locality.
// Invariant: b is cached, possibly dirty copy of bucket[cB].
cB := c1
b := bucket[cB]
sa[b] = int64(k)
b++
for i := 0; i < len(sa); i++ {
j := int(sa[i])
if j == 0 {
// Skip empty entry.
continue
}
if j < 0 {
// Leave discovered type-S index for caller.
sa[i] = int64(-j)
continue
}
sa[i] = 0
// Index j was on work queue, meaning k := j-1 is L-type,
// so we can now place k correctly into sa.
// If k-1 is L-type, queue k for processing later in this loop.
// If k-1 is S-type (text[k-1] < text[k]), queue -k to save for the caller.
k := j - 1
c0, c1 := text[k-1], text[k]
if c0 < c1 {
k = -k
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
sa[b] = int64(k)
b++
}
}
func induceSubS_8_64(text []byte, sa, freq, bucket []int64) {
// Initialize positions for right side of character buckets.
bucketMax_8_64(text, freq, bucket)
bucket = bucket[:256] // eliminate bounds check for bucket[cB] below
// Analogous to induceSubL_8_64 above,
// as we scan the array right-to-left, each sa[i] = j > 0 is a correctly
// sorted suffix array entry (for text[j:]) for which we know that j-1 is type S.
// Because j-1 is type S, inserting it into sa now will sort it correctly.
// But we want to distinguish a j-1 with j-2 of type S from type L.
// We can process the former but want to leave the latter for the caller.
// We record the difference by negating j-1 if it is preceded by type L.
// Either way, the insertion (into the text[j-1] bucket) is guaranteed to
// happen at sa[i´] for some i´ < i, that is, in the portion of sa we have
// yet to scan. A single pass therefore sees indexes j, j-1, j-2, j-3,
// and so on, in sorted but not necessarily adjacent order, until it finds
// one preceded by an index of type L, at which point it must stop.
// That index (preceded by one of type L) is an LMS-substring start.
//
// As we scan through the array, we clear the worked entries (sa[i] > 0) to zero,
// and we flip sa[i] < 0 to -sa[i] and compact into the top of sa,
// so that the loop finishes with the top of sa containing exactly
// the LMS-substring start indexes, sorted by LMS-substring.
// Cache recently used bucket index:
cB := byte(0)
b := bucket[cB]
top := len(sa)
for i := len(sa) - 1; i >= 0; i-- {
j := int(sa[i])
if j == 0 {
// Skip empty entry.
continue
}
sa[i] = 0
if j < 0 {
// Leave discovered LMS-substring start index for caller.
top--
sa[top] = int64(-j)
continue
}
// Index j was on work queue, meaning k := j-1 is S-type,
// so we can now place k correctly into sa.
// If k-1 is S-type, queue k for processing later in this loop.
// If k-1 is L-type (text[k-1] > text[k]), queue -k to save for the caller.
k := j - 1
c1 := text[k]
c0 := text[k-1]
if c0 > c1 {
k = -k
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
b--
sa[b] = int64(k)
}
}
func induceSubS_32(text []int32, sa, freq, bucket []int32) {
// Initialize positions for right side of character buckets.
bucketMax_32(text, freq, bucket)
// Analogous to induceSubL_32 above,
// as we scan the array right-to-left, each sa[i] = j > 0 is a correctly
// sorted suffix array entry (for text[j:]) for which we know that j-1 is type S.
// Because j-1 is type S, inserting it into sa now will sort it correctly.
// But we want to distinguish a j-1 with j-2 of type S from type L.
// We can process the former but want to leave the latter for the caller.
// We record the difference by negating j-1 if it is preceded by type L.
// Either way, the insertion (into the text[j-1] bucket) is guaranteed to
// happen at sa[i´] for some i´ < i, that is, in the portion of sa we have
// yet to scan. A single pass therefore sees indexes j, j-1, j-2, j-3,
// and so on, in sorted but not necessarily adjacent order, until it finds
// one preceded by an index of type L, at which point it must stop.
// That index (preceded by one of type L) is an LMS-substring start.
//
// As we scan through the array, we clear the worked entries (sa[i] > 0) to zero,
// and we flip sa[i] < 0 to -sa[i] and compact into the top of sa,
// so that the loop finishes with the top of sa containing exactly
// the LMS-substring start indexes, sorted by LMS-substring.
// Cache recently used bucket index:
cB := int32(0)
b := bucket[cB]
top := len(sa)
for i := len(sa) - 1; i >= 0; i-- {
j := int(sa[i])
if j == 0 {
// Skip empty entry.
continue
}
sa[i] = 0
if j < 0 {
// Leave discovered LMS-substring start index for caller.
top--
sa[top] = int32(-j)
continue
}
// Index j was on work queue, meaning k := j-1 is S-type,
// so we can now place k correctly into sa.
// If k-1 is S-type, queue k for processing later in this loop.
// If k-1 is L-type (text[k-1] > text[k]), queue -k to save for the caller.
k := j - 1
c1 := text[k]
c0 := text[k-1]
if c0 > c1 {
k = -k
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
b--
sa[b] = int32(k)
}
}
func induceSubS_64(text []int64, sa, freq, bucket []int64) {
// Initialize positions for right side of character buckets.
bucketMax_64(text, freq, bucket)
// Analogous to induceSubL_64 above,
// as we scan the array right-to-left, each sa[i] = j > 0 is a correctly
// sorted suffix array entry (for text[j:]) for which we know that j-1 is type S.
// Because j-1 is type S, inserting it into sa now will sort it correctly.
// But we want to distinguish a j-1 with j-2 of type S from type L.
// We can process the former but want to leave the latter for the caller.
// We record the difference by negating j-1 if it is preceded by type L.
// Either way, the insertion (into the text[j-1] bucket) is guaranteed to
// happen at sa[i´] for some i´ < i, that is, in the portion of sa we have
// yet to scan. A single pass therefore sees indexes j, j-1, j-2, j-3,
// and so on, in sorted but not necessarily adjacent order, until it finds
// one preceded by an index of type L, at which point it must stop.
// That index (preceded by one of type L) is an LMS-substring start.
//
// As we scan through the array, we clear the worked entries (sa[i] > 0) to zero,
// and we flip sa[i] < 0 to -sa[i] and compact into the top of sa,
// so that the loop finishes with the top of sa containing exactly
// the LMS-substring start indexes, sorted by LMS-substring.
// Cache recently used bucket index:
cB := int64(0)
b := bucket[cB]
top := len(sa)
for i := len(sa) - 1; i >= 0; i-- {
j := int(sa[i])
if j == 0 {
// Skip empty entry.
continue
}
sa[i] = 0
if j < 0 {
// Leave discovered LMS-substring start index for caller.
top--
sa[top] = int64(-j)
continue
}
// Index j was on work queue, meaning k := j-1 is S-type,
// so we can now place k correctly into sa.
// If k-1 is S-type, queue k for processing later in this loop.
// If k-1 is L-type (text[k-1] > text[k]), queue -k to save for the caller.
k := j - 1
c1 := text[k]
c0 := text[k-1]
if c0 > c1 {
k = -k
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
b--
sa[b] = int64(k)
}
}
func length_8_64(text []byte, sa []int64, numLMS int) {
end := 0 // index of current LMS-substring end (0 indicates final LMS-substring)
// The encoding of N text bytes into a “length” word
// adds 1 to each byte, packs them into the bottom
// N*8 bits of a word, and then bitwise inverts the result.
// That is, the text sequence A B C (hex 41 42 43)
// encodes as ^uint64(0x42_43_44).
// LMS-substrings can never start or end with 0xFF.
// Adding 1 ensures the encoded byte sequence never
// starts or ends with 0x00, so that present bytes can be
// distinguished from zero-padding in the top bits,
// so the length need not be separately encoded.
// Inverting the bytes increases the chance that a
// 4-byte encoding will still be ≥ len(text).
// In particular, if the first byte is ASCII (<= 0x7E, so +1 <= 0x7F)
// then the high bit of the inversion will be set,
// making it clearly not a valid length (it would be a negative one).
//
// cx holds the pre-inverted encoding (the packed incremented bytes).
cx := uint64(0) // byte-only
// This stanza (until the blank line) is the "LMS-substring iterator",
// described in placeLMS_8_64 above, with one line added to maintain cx.
c0, c1, isTypeS := byte(0), byte(0), false
for i := len(text) - 1; i >= 0; i-- {
c0, c1 = text[i], c0
cx = cx<<8 | uint64(c1+1) // byte-only
if c0 < c1 {
isTypeS = true
} else if c0 > c1 && isTypeS {
isTypeS = false
// Index j = i+1 is the start of an LMS-substring.
// Compute length or encoded text to store in sa[j/2].
j := i + 1
var code int64
if end == 0 {
code = 0
} else {
code = int64(end - j)
if code <= 64/8 && ^cx >= uint64(len(text)) { // byte-only
code = int64(^cx) // byte-only
} // byte-only
}
sa[j>>1] = code
end = j + 1
cx = uint64(c1 + 1) // byte-only
}
}
}
func length_32(text []int32, sa []int32, numLMS int) {
end := 0 // index of current LMS-substring end (0 indicates final LMS-substring)
// The encoding of N text int32s into a “length” word
// adds 1 to each int32, packs them into the bottom
// N*8 bits of a word, and then bitwise inverts the result.
// That is, the text sequence A B C (hex 41 42 43)
// encodes as ^uint32(0x42_43_44).
// LMS-substrings can never start or end with 0xFF.
// Adding 1 ensures the encoded int32 sequence never
// starts or ends with 0x00, so that present int32s can be
// distinguished from zero-padding in the top bits,
// so the length need not be separately encoded.
// Inverting the int32s increases the chance that a
// 4-int32 encoding will still be ≥ len(text).
// In particular, if the first int32 is ASCII (<= 0x7E, so +1 <= 0x7F)
// then the high bit of the inversion will be set,
// making it clearly not a valid length (it would be a negative one).
//
// cx holds the pre-inverted encoding (the packed incremented int32s).
// This stanza (until the blank line) is the "LMS-substring iterator",
// described in placeLMS_32 above, with one line added to maintain cx.
c0, c1, isTypeS := int32(0), int32(0), false
for i := len(text) - 1; i >= 0; i-- {
c0, c1 = text[i], c0
if c0 < c1 {
isTypeS = true
} else if c0 > c1 && isTypeS {
isTypeS = false
// Index j = i+1 is the start of an LMS-substring.
// Compute length or encoded text to store in sa[j/2].
j := i + 1
var code int32
if end == 0 {
code = 0
} else {
code = int32(end - j)
}
sa[j>>1] = code
end = j + 1
}
}
}
func length_64(text []int64, sa []int64, numLMS int) {
end := 0 // index of current LMS-substring end (0 indicates final LMS-substring)
// The encoding of N text int64s into a “length” word
// adds 1 to each int64, packs them into the bottom
// N*8 bits of a word, and then bitwise inverts the result.
// That is, the text sequence A B C (hex 41 42 43)
// encodes as ^uint64(0x42_43_44).
// LMS-substrings can never start or end with 0xFF.
// Adding 1 ensures the encoded int64 sequence never
// starts or ends with 0x00, so that present int64s can be
// distinguished from zero-padding in the top bits,
// so the length need not be separately encoded.
// Inverting the int64s increases the chance that a
// 4-int64 encoding will still be ≥ len(text).
// In particular, if the first int64 is ASCII (<= 0x7E, so +1 <= 0x7F)
// then the high bit of the inversion will be set,
// making it clearly not a valid length (it would be a negative one).
//
// cx holds the pre-inverted encoding (the packed incremented int64s).
// This stanza (until the blank line) is the "LMS-substring iterator",
// described in placeLMS_64 above, with one line added to maintain cx.
c0, c1, isTypeS := int64(0), int64(0), false
for i := len(text) - 1; i >= 0; i-- {
c0, c1 = text[i], c0
if c0 < c1 {
isTypeS = true
} else if c0 > c1 && isTypeS {
isTypeS = false
// Index j = i+1 is the start of an LMS-substring.
// Compute length or encoded text to store in sa[j/2].
j := i + 1
var code int64
if end == 0 {
code = 0
} else {
code = int64(end - j)
}
sa[j>>1] = code
end = j + 1
}
}
}
func assignID_8_64(text []byte, sa []int64, numLMS int) int {
id := 0
lastLen := int64(-1) // impossible
lastPos := int64(0)
for _, j := range sa[len(sa)-numLMS:] {
// Is the LMS-substring at index j new, or is it the same as the last one we saw?
n := sa[j/2]
if n != lastLen {
goto New
}
if uint64(n) >= uint64(len(text)) {
// “Length” is really encoded full text, and they match.
goto Same
}
{
// Compare actual texts.
n := int(n)
this := text[j:][:n]
last := text[lastPos:][:n]
for i := 0; i < n; i++ {
if this[i] != last[i] {
goto New
}
}
goto Same
}
New:
id++
lastPos = j
lastLen = n
Same:
sa[j/2] = int64(id)
}
return id
}
func assignID_32(text []int32, sa []int32, numLMS int) int {
id := 0
lastLen := int32(-1) // impossible
lastPos := int32(0)
for _, j := range sa[len(sa)-numLMS:] {
// Is the LMS-substring at index j new, or is it the same as the last one we saw?
n := sa[j/2]
if n != lastLen {
goto New
}
if uint32(n) >= uint32(len(text)) {
// “Length” is really encoded full text, and they match.
goto Same
}
{
// Compare actual texts.
n := int(n)
this := text[j:][:n]
last := text[lastPos:][:n]
for i := 0; i < n; i++ {
if this[i] != last[i] {
goto New
}
}
goto Same
}
New:
id++
lastPos = j
lastLen = n
Same:
sa[j/2] = int32(id)
}
return id
}
func assignID_64(text []int64, sa []int64, numLMS int) int {
id := 0
lastLen := int64(-1) // impossible
lastPos := int64(0)
for _, j := range sa[len(sa)-numLMS:] {
// Is the LMS-substring at index j new, or is it the same as the last one we saw?
n := sa[j/2]
if n != lastLen {
goto New
}
if uint64(n) >= uint64(len(text)) {
// “Length” is really encoded full text, and they match.
goto Same
}
{
// Compare actual texts.
n := int(n)
this := text[j:][:n]
last := text[lastPos:][:n]
for i := 0; i < n; i++ {
if this[i] != last[i] {
goto New
}
}
goto Same
}
New:
id++
lastPos = j
lastLen = n
Same:
sa[j/2] = int64(id)
}
return id
}
func map_64(sa []int64, numLMS int) {
w := len(sa)
for i := len(sa) / 2; i >= 0; i-- {
j := sa[i]
if j > 0 {
w--
sa[w] = j - 1
}
}
}
func recurse_64(sa, oldTmp []int64, numLMS, maxID int) {
dst, saTmp, text := sa[:numLMS], sa[numLMS:len(sa)-numLMS], sa[len(sa)-numLMS:]
// Set up temporary space for recursive call.
// We must pass sais_64 a tmp buffer with at least maxID entries.
//
// The subproblem is guaranteed to have length at most len(sa)/2,
// so that sa can hold both the subproblem and its suffix array.
// Nearly all the time, however, the subproblem has length < len(sa)/3,
// in which case there is a subproblem-sized middle of sa that
// we can reuse for temporary space (saTmp).
// When recurse_64 is called from sais_8_64, oldTmp is length 512
// (from text_64), and saTmp will typically be much larger, so we'll use saTmp.
// When deeper recursions come back to recurse_64, now oldTmp is
// the saTmp from the top-most recursion, it is typically larger than
// the current saTmp (because the current sa gets smaller and smaller
// as the recursion gets deeper), and we keep reusing that top-most
// large saTmp instead of the offered smaller ones.
//
// Why is the subproblem length so often just under len(sa)/3?
// See Nong, Zhang, and Chen, section 3.6 for a plausible explanation.
// In brief, the len(sa)/2 case would correspond to an SLSLSLSLSLSL pattern
// in the input, perfect alternation of larger and smaller input bytes.
// Real text doesn't do that. If each L-type index is randomly followed
// by either an L-type or S-type index, then half the substrings will
// be of the form SLS, but the other half will be longer. Of that half,
// half (a quarter overall) will be SLLS; an eighth will be SLLLS, and so on.
// Not counting the final S in each (which overlaps the first S in the next),
// This works out to an average length 2×½ + 3×¼ + 4×⅛ + ... = 3.
// The space we need is further reduced by the fact that many of the
// short patterns like SLS will often be the same character sequences
// repeated throughout the text, reducing maxID relative to numLMS.
//
// For short inputs, the averages may not run in our favor, but then we
// can often fall back to using the length-512 tmp available in the
// top-most call. (Also a short allocation would not be a big deal.)
//
// For pathological inputs, we fall back to allocating a new tmp of length
// max(maxID, numLMS/2). This level of the recursion needs maxID,
// and all deeper levels of the recursion will need no more than numLMS/2,
// so this one allocation is guaranteed to suffice for the entire stack
// of recursive calls.
tmp := oldTmp
if len(tmp) < len(saTmp) {
tmp = saTmp
}
if len(tmp) < numLMS {
// TestSAIS/forcealloc reaches this code.
n := maxID
if n < numLMS/2 {
n = numLMS / 2
}
tmp = make([]int64, n)
}
// sais_64 requires that the caller arrange to clear dst,
// because in general the caller may know dst is
// freshly-allocated and already cleared. But this one is not.
clear(dst)
sais_64(text, maxID, dst, tmp)
}
func unmap_8_64(text []byte, sa []int64, numLMS int) {
unmap := sa[len(sa)-numLMS:]
j := len(unmap)
// "LMS-substring iterator" (see placeLMS_8_64 above).
c0, c1, isTypeS := byte(0), byte(0), false
for i := len(text) - 1; i >= 0; i-- {
c0, c1 = text[i], c0
if c0 < c1 {
isTypeS = true
} else if c0 > c1 && isTypeS {
isTypeS = false
// Populate inverse map.
j--
unmap[j] = int64(i + 1)
}
}
// Apply inverse map to subproblem suffix array.
sa = sa[:numLMS]
for i := 0; i < len(sa); i++ {
sa[i] = unmap[sa[i]]
}
}
func unmap_32(text []int32, sa []int32, numLMS int) {
unmap := sa[len(sa)-numLMS:]
j := len(unmap)
// "LMS-substring iterator" (see placeLMS_32 above).
c0, c1, isTypeS := int32(0), int32(0), false
for i := len(text) - 1; i >= 0; i-- {
c0, c1 = text[i], c0
if c0 < c1 {
isTypeS = true
} else if c0 > c1 && isTypeS {
isTypeS = false
// Populate inverse map.
j--
unmap[j] = int32(i + 1)
}
}
// Apply inverse map to subproblem suffix array.
sa = sa[:numLMS]
for i := 0; i < len(sa); i++ {
sa[i] = unmap[sa[i]]
}
}
func unmap_64(text []int64, sa []int64, numLMS int) {
unmap := sa[len(sa)-numLMS:]
j := len(unmap)
// "LMS-substring iterator" (see placeLMS_64 above).
c0, c1, isTypeS := int64(0), int64(0), false
for i := len(text) - 1; i >= 0; i-- {
c0, c1 = text[i], c0
if c0 < c1 {
isTypeS = true
} else if c0 > c1 && isTypeS {
isTypeS = false
// Populate inverse map.
j--
unmap[j] = int64(i + 1)
}
}
// Apply inverse map to subproblem suffix array.
sa = sa[:numLMS]
for i := 0; i < len(sa); i++ {
sa[i] = unmap[sa[i]]
}
}
func expand_8_64(text []byte, freq, bucket, sa []int64, numLMS int) {
bucketMax_8_64(text, freq, bucket)
bucket = bucket[:256] // eliminate bound check for bucket[c] below
// Loop backward through sa, always tracking
// the next index to populate from sa[:numLMS].
// When we get to one, populate it.
// Zero the rest of the slots; they have dead values in them.
x := numLMS - 1
saX := sa[x]
c := text[saX]
b := bucket[c] - 1
bucket[c] = b
for i := len(sa) - 1; i >= 0; i-- {
if i != int(b) {
sa[i] = 0
continue
}
sa[i] = saX
// Load next entry to put down (if any).
if x > 0 {
x--
saX = sa[x] // TODO bounds check
c = text[saX]
b = bucket[c] - 1
bucket[c] = b
}
}
}
func expand_32(text []int32, freq, bucket, sa []int32, numLMS int) {
bucketMax_32(text, freq, bucket)
// Loop backward through sa, always tracking
// the next index to populate from sa[:numLMS].
// When we get to one, populate it.
// Zero the rest of the slots; they have dead values in them.
x := numLMS - 1
saX := sa[x]
c := text[saX]
b := bucket[c] - 1
bucket[c] = b
for i := len(sa) - 1; i >= 0; i-- {
if i != int(b) {
sa[i] = 0
continue
}
sa[i] = saX
// Load next entry to put down (if any).
if x > 0 {
x--
saX = sa[x] // TODO bounds check
c = text[saX]
b = bucket[c] - 1
bucket[c] = b
}
}
}
func expand_64(text []int64, freq, bucket, sa []int64, numLMS int) {
bucketMax_64(text, freq, bucket)
// Loop backward through sa, always tracking
// the next index to populate from sa[:numLMS].
// When we get to one, populate it.
// Zero the rest of the slots; they have dead values in them.
x := numLMS - 1
saX := sa[x]
c := text[saX]
b := bucket[c] - 1
bucket[c] = b
for i := len(sa) - 1; i >= 0; i-- {
if i != int(b) {
sa[i] = 0
continue
}
sa[i] = saX
// Load next entry to put down (if any).
if x > 0 {
x--
saX = sa[x] // TODO bounds check
c = text[saX]
b = bucket[c] - 1
bucket[c] = b
}
}
}
func induceL_8_64(text []byte, sa, freq, bucket []int64) {
// Initialize positions for left side of character buckets.
bucketMin_8_64(text, freq, bucket)
bucket = bucket[:256] // eliminate bounds check for bucket[cB] below
// This scan is similar to the one in induceSubL_8_64 above.
// That one arranges to clear all but the leftmost L-type indexes.
// This scan leaves all the L-type indexes and the original S-type
// indexes, but it negates the positive leftmost L-type indexes
// (the ones that induceS_8_64 needs to process).
// expand_8_64 left out the implicit entry sa[-1] == len(text),
// corresponding to the identified type-L index len(text)-1.
// Process it before the left-to-right scan of sa proper.
// See body in loop for commentary.
k := len(text) - 1
c0, c1 := text[k-1], text[k]
if c0 < c1 {
k = -k
}
// Cache recently used bucket index.
cB := c1
b := bucket[cB]
sa[b] = int64(k)
b++
for i := 0; i < len(sa); i++ {
j := int(sa[i])
if j <= 0 {
// Skip empty or negated entry (including negated zero).
continue
}
// Index j was on work queue, meaning k := j-1 is L-type,
// so we can now place k correctly into sa.
// If k-1 is L-type, queue k for processing later in this loop.
// If k-1 is S-type (text[k-1] < text[k]), queue -k to save for the caller.
// If k is zero, k-1 doesn't exist, so we only need to leave it
// for the caller. The caller can't tell the difference between
// an empty slot and a non-empty zero, but there's no need
// to distinguish them anyway: the final suffix array will end up
// with one zero somewhere, and that will be a real zero.
k := j - 1
c1 := text[k]
if k > 0 {
if c0 := text[k-1]; c0 < c1 {
k = -k
}
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
sa[b] = int64(k)
b++
}
}
func induceL_32(text []int32, sa, freq, bucket []int32) {
// Initialize positions for left side of character buckets.
bucketMin_32(text, freq, bucket)
// This scan is similar to the one in induceSubL_32 above.
// That one arranges to clear all but the leftmost L-type indexes.
// This scan leaves all the L-type indexes and the original S-type
// indexes, but it negates the positive leftmost L-type indexes
// (the ones that induceS_32 needs to process).
// expand_32 left out the implicit entry sa[-1] == len(text),
// corresponding to the identified type-L index len(text)-1.
// Process it before the left-to-right scan of sa proper.
// See body in loop for commentary.
k := len(text) - 1
c0, c1 := text[k-1], text[k]
if c0 < c1 {
k = -k
}
// Cache recently used bucket index.
cB := c1
b := bucket[cB]
sa[b] = int32(k)
b++
for i := 0; i < len(sa); i++ {
j := int(sa[i])
if j <= 0 {
// Skip empty or negated entry (including negated zero).
continue
}
// Index j was on work queue, meaning k := j-1 is L-type,
// so we can now place k correctly into sa.
// If k-1 is L-type, queue k for processing later in this loop.
// If k-1 is S-type (text[k-1] < text[k]), queue -k to save for the caller.
// If k is zero, k-1 doesn't exist, so we only need to leave it
// for the caller. The caller can't tell the difference between
// an empty slot and a non-empty zero, but there's no need
// to distinguish them anyway: the final suffix array will end up
// with one zero somewhere, and that will be a real zero.
k := j - 1
c1 := text[k]
if k > 0 {
if c0 := text[k-1]; c0 < c1 {
k = -k
}
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
sa[b] = int32(k)
b++
}
}
func induceL_64(text []int64, sa, freq, bucket []int64) {
// Initialize positions for left side of character buckets.
bucketMin_64(text, freq, bucket)
// This scan is similar to the one in induceSubL_64 above.
// That one arranges to clear all but the leftmost L-type indexes.
// This scan leaves all the L-type indexes and the original S-type
// indexes, but it negates the positive leftmost L-type indexes
// (the ones that induceS_64 needs to process).
// expand_64 left out the implicit entry sa[-1] == len(text),
// corresponding to the identified type-L index len(text)-1.
// Process it before the left-to-right scan of sa proper.
// See body in loop for commentary.
k := len(text) - 1
c0, c1 := text[k-1], text[k]
if c0 < c1 {
k = -k
}
// Cache recently used bucket index.
cB := c1
b := bucket[cB]
sa[b] = int64(k)
b++
for i := 0; i < len(sa); i++ {
j := int(sa[i])
if j <= 0 {
// Skip empty or negated entry (including negated zero).
continue
}
// Index j was on work queue, meaning k := j-1 is L-type,
// so we can now place k correctly into sa.
// If k-1 is L-type, queue k for processing later in this loop.
// If k-1 is S-type (text[k-1] < text[k]), queue -k to save for the caller.
// If k is zero, k-1 doesn't exist, so we only need to leave it
// for the caller. The caller can't tell the difference between
// an empty slot and a non-empty zero, but there's no need
// to distinguish them anyway: the final suffix array will end up
// with one zero somewhere, and that will be a real zero.
k := j - 1
c1 := text[k]
if k > 0 {
if c0 := text[k-1]; c0 < c1 {
k = -k
}
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
sa[b] = int64(k)
b++
}
}
func induceS_8_64(text []byte, sa, freq, bucket []int64) {
// Initialize positions for right side of character buckets.
bucketMax_8_64(text, freq, bucket)
bucket = bucket[:256] // eliminate bounds check for bucket[cB] below
cB := byte(0)
b := bucket[cB]
for i := len(sa) - 1; i >= 0; i-- {
j := int(sa[i])
if j >= 0 {
// Skip non-flagged entry.
// (This loop can't see an empty entry; 0 means the real zero index.)
continue
}
// Negative j is a work queue entry; rewrite to positive j for final suffix array.
j = -j
sa[i] = int64(j)
// Index j was on work queue (encoded as -j but now decoded),
// meaning k := j-1 is L-type,
// so we can now place k correctly into sa.
// If k-1 is S-type, queue -k for processing later in this loop.
// If k-1 is L-type (text[k-1] > text[k]), queue k to save for the caller.
// If k is zero, k-1 doesn't exist, so we only need to leave it
// for the caller.
k := j - 1
c1 := text[k]
if k > 0 {
if c0 := text[k-1]; c0 <= c1 {
k = -k
}
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
b--
sa[b] = int64(k)
}
}
func induceS_32(text []int32, sa, freq, bucket []int32) {
// Initialize positions for right side of character buckets.
bucketMax_32(text, freq, bucket)
cB := int32(0)
b := bucket[cB]
for i := len(sa) - 1; i >= 0; i-- {
j := int(sa[i])
if j >= 0 {
// Skip non-flagged entry.
// (This loop can't see an empty entry; 0 means the real zero index.)
continue
}
// Negative j is a work queue entry; rewrite to positive j for final suffix array.
j = -j
sa[i] = int32(j)
// Index j was on work queue (encoded as -j but now decoded),
// meaning k := j-1 is L-type,
// so we can now place k correctly into sa.
// If k-1 is S-type, queue -k for processing later in this loop.
// If k-1 is L-type (text[k-1] > text[k]), queue k to save for the caller.
// If k is zero, k-1 doesn't exist, so we only need to leave it
// for the caller.
k := j - 1
c1 := text[k]
if k > 0 {
if c0 := text[k-1]; c0 <= c1 {
k = -k
}
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
b--
sa[b] = int32(k)
}
}
func induceS_64(text []int64, sa, freq, bucket []int64) {
// Initialize positions for right side of character buckets.
bucketMax_64(text, freq, bucket)
cB := int64(0)
b := bucket[cB]
for i := len(sa) - 1; i >= 0; i-- {
j := int(sa[i])
if j >= 0 {
// Skip non-flagged entry.
// (This loop can't see an empty entry; 0 means the real zero index.)
continue
}
// Negative j is a work queue entry; rewrite to positive j for final suffix array.
j = -j
sa[i] = int64(j)
// Index j was on work queue (encoded as -j but now decoded),
// meaning k := j-1 is L-type,
// so we can now place k correctly into sa.
// If k-1 is S-type, queue -k for processing later in this loop.
// If k-1 is L-type (text[k-1] > text[k]), queue k to save for the caller.
// If k is zero, k-1 doesn't exist, so we only need to leave it
// for the caller.
k := j - 1
c1 := text[k]
if k > 0 {
if c0 := text[k-1]; c0 <= c1 {
k = -k
}
}
if cB != c1 {
bucket[cB] = b
cB = c1
b = bucket[cB]
}
b--
sa[b] = int64(k)
}
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package suffixarray implements substring search in logarithmic time using
// an in-memory suffix array.
//
// Example use:
//
// // create index for some data
// index := suffixarray.New(data)
//
// // lookup byte slice s
// offsets1 := index.Lookup(s, -1) // the list of all indices where s occurs in data
// offsets2 := index.Lookup(s, 3) // the list of at most 3 indices where s occurs in data
package suffixarray
import (
"bytes"
"encoding/binary"
"errors"
"io"
"math"
"regexp"
"slices"
"sort"
)
// Can change for testing
var maxData32 int = realMaxData32
const realMaxData32 = math.MaxInt32
// Index implements a suffix array for fast substring search.
type Index struct {
data []byte
sa ints // suffix array for data; sa.len() == len(data)
}
// An ints is either an []int32 or an []int64.
// That is, one of them is empty, and one is the real data.
// The int64 form is used when len(data) > maxData32
type ints struct {
int32 []int32
int64 []int64
}
func (a *ints) len() int {
return len(a.int32) + len(a.int64)
}
func (a *ints) get(i int) int64 {
if a.int32 != nil {
return int64(a.int32[i])
}
return a.int64[i]
}
func (a *ints) set(i int, v int64) {
if a.int32 != nil {
a.int32[i] = int32(v)
} else {
a.int64[i] = v
}
}
func (a *ints) slice(i, j int) ints {
if a.int32 != nil {
return ints{a.int32[i:j], nil}
}
return ints{nil, a.int64[i:j]}
}
// New creates a new [Index] for data.
// [Index] creation time is O(N) for N = len(data).
func New(data []byte) *Index {
ix := &Index{data: data}
if len(data) <= maxData32 {
ix.sa.int32 = make([]int32, len(data))
text_32(data, ix.sa.int32)
} else {
ix.sa.int64 = make([]int64, len(data))
text_64(data, ix.sa.int64)
}
return ix
}
// writeInt writes an int x to w using buf to buffer the write.
func writeInt(w io.Writer, buf []byte, x int) error {
binary.PutVarint(buf, int64(x))
_, err := w.Write(buf[0:binary.MaxVarintLen64])
return err
}
// readInt reads an int x from r using buf to buffer the read and returns x.
func readInt(r io.Reader, buf []byte) (int64, error) {
_, err := io.ReadFull(r, buf[0:binary.MaxVarintLen64]) // ok to continue with error
x, _ := binary.Varint(buf)
return x, err
}
// writeSlice writes data[:n] to w and returns n.
// It uses buf to buffer the write.
func writeSlice(w io.Writer, buf []byte, data ints) (n int, err error) {
// encode as many elements as fit into buf
p := binary.MaxVarintLen64
m := data.len()
for ; n < m && p+binary.MaxVarintLen64 <= len(buf); n++ {
p += binary.PutUvarint(buf[p:], uint64(data.get(n)))
}
// update buffer size
binary.PutVarint(buf, int64(p))
// write buffer
_, err = w.Write(buf[0:p])
return
}
var errTooBig = errors.New("suffixarray: data too large")
// readSlice reads data[:n] from r and returns n.
// It uses buf to buffer the read.
func readSlice(r io.Reader, buf []byte, data ints) (n int, err error) {
// read buffer size
var size64 int64
size64, err = readInt(r, buf)
if err != nil {
return
}
if int64(int(size64)) != size64 || int(size64) < 0 {
// We never write chunks this big anyway.
return 0, errTooBig
}
size := int(size64)
// read buffer w/o the size
if _, err = io.ReadFull(r, buf[binary.MaxVarintLen64:size]); err != nil {
return
}
// decode as many elements as present in buf
for p := binary.MaxVarintLen64; p < size; n++ {
x, w := binary.Uvarint(buf[p:])
data.set(n, int64(x))
p += w
}
return
}
const bufSize = 16 << 10 // reasonable for BenchmarkSaveRestore
// Read reads the index from r into x; x must not be nil.
func (x *Index) Read(r io.Reader) error {
// buffer for all reads
buf := make([]byte, bufSize)
// read length
n64, err := readInt(r, buf)
if err != nil {
return err
}
if int64(int(n64)) != n64 || int(n64) < 0 {
return errTooBig
}
n := int(n64)
// allocate space
if 2*n < cap(x.data) || cap(x.data) < n || x.sa.int32 != nil && n > maxData32 || x.sa.int64 != nil && n <= maxData32 {
// new data is significantly smaller or larger than
// existing buffers - allocate new ones
x.data = make([]byte, n)
x.sa.int32 = nil
x.sa.int64 = nil
if n <= maxData32 {
x.sa.int32 = make([]int32, n)
} else {
x.sa.int64 = make([]int64, n)
}
} else {
// re-use existing buffers
x.data = x.data[0:n]
x.sa = x.sa.slice(0, n)
}
// read data
if _, err := io.ReadFull(r, x.data); err != nil {
return err
}
// read index
sa := x.sa
for sa.len() > 0 {
n, err := readSlice(r, buf, sa)
if err != nil {
return err
}
sa = sa.slice(n, sa.len())
}
return nil
}
// Write writes the index x to w.
func (x *Index) Write(w io.Writer) error {
// buffer for all writes
buf := make([]byte, bufSize)
// write length
if err := writeInt(w, buf, len(x.data)); err != nil {
return err
}
// write data
if _, err := w.Write(x.data); err != nil {
return err
}
// write index
sa := x.sa
for sa.len() > 0 {
n, err := writeSlice(w, buf, sa)
if err != nil {
return err
}
sa = sa.slice(n, sa.len())
}
return nil
}
// Bytes returns the data over which the index was created.
// It must not be modified.
func (x *Index) Bytes() []byte {
return x.data
}
func (x *Index) at(i int) []byte {
return x.data[x.sa.get(i):]
}
// lookupAll returns a slice into the matching region of the index.
// The runtime is O(log(N)*len(s)).
func (x *Index) lookupAll(s []byte) ints {
// find matching suffix index range [i:j]
// find the first index where s would be the prefix
i := sort.Search(x.sa.len(), func(i int) bool { return bytes.Compare(x.at(i), s) >= 0 })
// starting at i, find the first index at which s is not a prefix
j := i + sort.Search(x.sa.len()-i, func(j int) bool { return !bytes.HasPrefix(x.at(j+i), s) })
return x.sa.slice(i, j)
}
// Lookup returns an unsorted list of at most n indices where the byte string s
// occurs in the indexed data. If n < 0, all occurrences are returned.
// The result is nil if s is empty, s is not found, or n == 0.
// Lookup time is O(log(N)*len(s) + len(result)) where N is the
// size of the indexed data.
func (x *Index) Lookup(s []byte, n int) (result []int) {
if len(s) > 0 && n != 0 {
matches := x.lookupAll(s)
count := matches.len()
if n < 0 || count < n {
n = count
}
// 0 <= n <= count
if n > 0 {
result = make([]int, n)
if matches.int32 != nil {
for i := range result {
result[i] = int(matches.int32[i])
}
} else {
for i := range result {
result[i] = int(matches.int64[i])
}
}
}
}
return
}
// FindAllIndex returns a sorted list of non-overlapping matches of the
// regular expression r, where a match is a pair of indices specifying
// the matched slice of x.Bytes(). If n < 0, all matches are returned
// in successive order. Otherwise, at most n matches are returned and
// they may not be successive. The result is nil if there are no matches,
// or if n == 0.
func (x *Index) FindAllIndex(r *regexp.Regexp, n int) (result [][]int) {
// a non-empty literal prefix is used to determine possible
// match start indices with Lookup
prefix, complete := r.LiteralPrefix()
lit := []byte(prefix)
// worst-case scenario: no literal prefix
if prefix == "" {
return r.FindAllIndex(x.data, n)
}
// if regexp is a literal just use Lookup and convert its
// result into match pairs
if complete {
// Lookup returns indices that may belong to overlapping matches.
// After eliminating them, we may end up with fewer than n matches.
// If we don't have enough at the end, redo the search with an
// increased value n1, but only if Lookup returned all the requested
// indices in the first place (if it returned fewer than that then
// there cannot be more).
for n1 := n; ; n1 += 2 * (n - len(result)) /* overflow ok */ {
indices := x.Lookup(lit, n1)
if len(indices) == 0 {
return
}
slices.Sort(indices)
pairs := make([]int, 2*len(indices))
result = make([][]int, len(indices))
count := 0
prev := 0
for _, i := range indices {
if count == n {
break
}
// ignore indices leading to overlapping matches
if prev <= i {
j := 2 * count
pairs[j+0] = i
pairs[j+1] = i + len(lit)
result[count] = pairs[j : j+2]
count++
prev = i + len(lit)
}
}
result = result[0:count]
if len(result) >= n || len(indices) != n1 {
// found all matches or there's no chance to find more
// (n and n1 can be negative)
break
}
}
if len(result) == 0 {
result = nil
}
return
}
// regexp has a non-empty literal prefix; Lookup(lit) computes
// the indices of possible complete matches; use these as starting
// points for anchored searches
// (regexp "^" matches beginning of input, not beginning of line)
r = regexp.MustCompile("^" + r.String()) // compiles because r compiled
// same comment about Lookup applies here as in the loop above
for n1 := n; ; n1 += 2 * (n - len(result)) /* overflow ok */ {
indices := x.Lookup(lit, n1)
if len(indices) == 0 {
return
}
slices.Sort(indices)
result = result[0:0]
prev := 0
for _, i := range indices {
if len(result) == n {
break
}
m := r.FindIndex(x.data[i:]) // anchored search - will not run off
// ignore indices leading to overlapping matches
if m != nil && prev <= i {
m[0] = i // correct m
m[1] += i
result = append(result, m)
prev = m[1]
}
}
if len(result) >= n || len(indices) != n1 {
// found all matches or there's no chance to find more
// (n and n1 can be negative)
break
}
}
if len(result) == 0 {
result = nil
}
return
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package io provides basic interfaces to I/O primitives.
// Its primary job is to wrap existing implementations of such primitives,
// such as those in package os, into shared public interfaces that
// abstract the functionality, plus some other related primitives.
//
// Because these interfaces and primitives wrap lower-level operations with
// various implementations, unless otherwise informed clients should not
// assume they are safe for parallel execution.
package io
import (
"errors"
"sync"
)
// Seek whence values.
const (
SeekStart = 0 // seek relative to the origin of the file
SeekCurrent = 1 // seek relative to the current offset
SeekEnd = 2 // seek relative to the end
)
// ErrShortWrite means that a write accepted fewer bytes than requested
// but failed to return an explicit error.
var ErrShortWrite = errors.New("short write")
// errInvalidWrite means that a write returned an impossible count.
var errInvalidWrite = errors.New("invalid write result")
// ErrShortBuffer means that a read required a longer buffer than was provided.
var ErrShortBuffer = errors.New("short buffer")
// EOF is the error returned by Read when no more input is available.
// (Read must return EOF itself, not an error wrapping EOF,
// because callers will test for EOF using ==.)
// Functions should return EOF only to signal a graceful end of input.
// If the EOF occurs unexpectedly in a structured data stream,
// the appropriate error is either [ErrUnexpectedEOF] or some other error
// giving more detail.
var EOF = errors.New("EOF")
// ErrUnexpectedEOF means that EOF was encountered in the
// middle of reading a fixed-size block or data structure.
var ErrUnexpectedEOF = errors.New("unexpected EOF")
// ErrNoProgress is returned by some clients of a [Reader] when
// many calls to Read have failed to return any data or error,
// usually the sign of a broken [Reader] implementation.
var ErrNoProgress = errors.New("multiple Read calls return no data or error")
// Reader is the interface that wraps the basic Read method.
//
// Read reads up to len(p) bytes into p. It returns the number of bytes
// read (0 <= n <= len(p)) and any error encountered. Even if Read
// returns n < len(p), it may use all of p as scratch space during the call.
// If some data is available but not len(p) bytes, Read conventionally
// returns what is available instead of waiting for more.
//
// When Read encounters an error or end-of-file condition after
// successfully reading n > 0 bytes, it returns the number of
// bytes read. It may return the (non-nil) error from the same call
// or return the error (and n == 0) from a subsequent call.
// An instance of this general case is that a Reader returning
// a non-zero number of bytes at the end of the input stream may
// return either err == EOF or err == nil. The next Read should
// return 0, EOF.
//
// Callers should always process the n > 0 bytes returned before
// considering the error err. Doing so correctly handles I/O errors
// that happen after reading some bytes and also both of the
// allowed EOF behaviors.
//
// If len(p) == 0, Read should always return n == 0. It may return a
// non-nil error if some error condition is known, such as EOF.
//
// Implementations of Read are discouraged from returning a
// zero byte count with a nil error, except when len(p) == 0.
// Callers should treat a return of 0 and nil as indicating that
// nothing happened; in particular it does not indicate EOF.
//
// Implementations must not retain p.
type Reader interface {
Read(p []byte) (n int, err error)
}
// Writer is the interface that wraps the basic Write method.
//
// Write writes len(p) bytes from p to the underlying data stream.
// It returns the number of bytes written from p (0 <= n <= len(p))
// and any error encountered that caused the write to stop early.
// Write must return a non-nil error if it returns n < len(p).
// Write must not modify the slice data, even temporarily.
//
// Implementations must not retain p.
type Writer interface {
Write(p []byte) (n int, err error)
}
// Closer is the interface that wraps the basic Close method.
//
// The behavior of Close after the first call is undefined.
// Specific implementations may document their own behavior.
type Closer interface {
Close() error
}
// Seeker is the interface that wraps the basic Seek method.
//
// Seek sets the offset for the next Read or Write to offset,
// interpreted according to whence:
// [SeekStart] means relative to the start of the file,
// [SeekCurrent] means relative to the current offset, and
// [SeekEnd] means relative to the end
// (for example, offset = -2 specifies the penultimate byte of the file).
// Seek returns the new offset relative to the start of the
// file or an error, if any.
//
// Seeking to an offset before the start of the file is an error.
// Seeking to any positive offset may be allowed, but if the new offset exceeds
// the size of the underlying object the behavior of subsequent I/O operations
// is implementation-dependent.
type Seeker interface {
Seek(offset int64, whence int) (int64, error)
}
// ReadWriter is the interface that groups the basic Read and Write methods.
type ReadWriter interface {
Reader
Writer
}
// ReadCloser is the interface that groups the basic Read and Close methods.
type ReadCloser interface {
Reader
Closer
}
// WriteCloser is the interface that groups the basic Write and Close methods.
type WriteCloser interface {
Writer
Closer
}
// ReadWriteCloser is the interface that groups the basic Read, Write and Close methods.
type ReadWriteCloser interface {
Reader
Writer
Closer
}
// ReadSeeker is the interface that groups the basic Read and Seek methods.
type ReadSeeker interface {
Reader
Seeker
}
// ReadSeekCloser is the interface that groups the basic Read, Seek and Close
// methods.
type ReadSeekCloser interface {
Reader
Seeker
Closer
}
// WriteSeeker is the interface that groups the basic Write and Seek methods.
type WriteSeeker interface {
Writer
Seeker
}
// ReadWriteSeeker is the interface that groups the basic Read, Write and Seek methods.
type ReadWriteSeeker interface {
Reader
Writer
Seeker
}
// ReaderFrom is the interface that wraps the ReadFrom method.
//
// ReadFrom reads data from r until EOF or error.
// The return value n is the number of bytes read.
// Any error except EOF encountered during the read is also returned.
//
// The [Copy] function uses [ReaderFrom] if available.
type ReaderFrom interface {
ReadFrom(r Reader) (n int64, err error)
}
// WriterTo is the interface that wraps the WriteTo method.
//
// WriteTo writes data to w until there's no more data to write or
// when an error occurs. The return value n is the number of bytes
// written. Any error encountered during the write is also returned.
//
// The Copy function uses WriterTo if available.
type WriterTo interface {
WriteTo(w Writer) (n int64, err error)
}
// ReaderAt is the interface that wraps the basic ReadAt method.
//
// ReadAt reads len(p) bytes into p starting at offset off in the
// underlying input source. It returns the number of bytes
// read (0 <= n <= len(p)) and any error encountered.
//
// When ReadAt returns n < len(p), it returns a non-nil error
// explaining why more bytes were not returned. In this respect,
// ReadAt is stricter than Read.
//
// Even if ReadAt returns n < len(p), it may use all of p as scratch
// space during the call. If some data is available but not len(p) bytes,
// ReadAt blocks until either all the data is available or an error occurs.
// In this respect ReadAt is different from Read.
//
// If the n = len(p) bytes returned by ReadAt are at the end of the
// input source, ReadAt may return either err == EOF or err == nil.
//
// If ReadAt is reading from an input source with a seek offset,
// ReadAt should not affect nor be affected by the underlying
// seek offset.
//
// Clients of ReadAt can execute parallel ReadAt calls on the
// same input source.
//
// Implementations must not retain p.
type ReaderAt interface {
ReadAt(p []byte, off int64) (n int, err error)
}
// WriterAt is the interface that wraps the basic WriteAt method.
//
// WriteAt writes len(p) bytes from p to the underlying data stream
// at offset off. It returns the number of bytes written from p (0 <= n <= len(p))
// and any error encountered that caused the write to stop early.
// WriteAt must return a non-nil error if it returns n < len(p).
//
// If WriteAt is writing to a destination with a seek offset,
// WriteAt should not affect nor be affected by the underlying
// seek offset.
//
// Clients of WriteAt can execute parallel WriteAt calls on the same
// destination if the ranges do not overlap.
//
// Implementations must not retain p.
type WriterAt interface {
WriteAt(p []byte, off int64) (n int, err error)
}
// ByteReader is the interface that wraps the ReadByte method.
//
// ReadByte reads and returns the next byte from the input or
// any error encountered. If ReadByte returns an error, no input
// byte was consumed, and the returned byte value is undefined.
//
// ReadByte provides an efficient interface for byte-at-time
// processing. A [Reader] that does not implement ByteReader
// can be wrapped using bufio.NewReader to add this method.
type ByteReader interface {
ReadByte() (byte, error)
}
// ByteScanner is the interface that adds the UnreadByte method to the
// basic ReadByte method.
//
// UnreadByte causes the next call to ReadByte to return the last byte read.
// If the last operation was not a successful call to ReadByte, UnreadByte may
// return an error, unread the last byte read (or the byte prior to the
// last-unread byte), or (in implementations that support the [Seeker] interface)
// seek to one byte before the current offset.
type ByteScanner interface {
ByteReader
UnreadByte() error
}
// ByteWriter is the interface that wraps the WriteByte method.
type ByteWriter interface {
WriteByte(c byte) error
}
// RuneReader is the interface that wraps the ReadRune method.
//
// ReadRune reads a single encoded Unicode character
// and returns the rune and its size in bytes. If no character is
// available, err will be set.
type RuneReader interface {
ReadRune() (r rune, size int, err error)
}
// RuneScanner is the interface that adds the UnreadRune method to the
// basic ReadRune method.
//
// UnreadRune causes the next call to ReadRune to return the last rune read.
// If the last operation was not a successful call to ReadRune, UnreadRune may
// return an error, unread the last rune read (or the rune prior to the
// last-unread rune), or (in implementations that support the [Seeker] interface)
// seek to the start of the rune before the current offset.
type RuneScanner interface {
RuneReader
UnreadRune() error
}
// StringWriter is the interface that wraps the WriteString method.
type StringWriter interface {
WriteString(s string) (n int, err error)
}
// WriteString writes the contents of the string s to w, which accepts a slice of bytes.
// If w implements [StringWriter], [StringWriter.WriteString] is invoked directly.
// Otherwise, [Writer.Write] is called exactly once.
func WriteString(w Writer, s string) (n int, err error) {
if sw, ok := w.(StringWriter); ok {
return sw.WriteString(s)
}
return w.Write([]byte(s))
}
// ReadAtLeast reads from r into buf until it has read at least min bytes.
// It returns the number of bytes copied and an error if fewer bytes were read.
// The error is EOF only if no bytes were read.
// If an EOF happens after reading fewer than min bytes,
// ReadAtLeast returns [ErrUnexpectedEOF].
// If min is greater than the length of buf, ReadAtLeast returns [ErrShortBuffer].
// On return, n >= min if and only if err == nil.
// If r returns an error having read at least min bytes, the error is dropped.
func ReadAtLeast(r Reader, buf []byte, min int) (n int, err error) {
if len(buf) < min {
return 0, ErrShortBuffer
}
for n < min && err == nil {
var nn int
nn, err = r.Read(buf[n:])
n += nn
}
if n >= min {
err = nil
} else if n > 0 && err == EOF {
err = ErrUnexpectedEOF
}
return
}
// ReadFull reads exactly len(buf) bytes from r into buf.
// It returns the number of bytes copied and an error if fewer bytes were read.
// The error is EOF only if no bytes were read.
// If an EOF happens after reading some but not all the bytes,
// ReadFull returns [ErrUnexpectedEOF].
// On return, n == len(buf) if and only if err == nil.
// If r returns an error having read at least len(buf) bytes, the error is dropped.
func ReadFull(r Reader, buf []byte) (n int, err error) {
return ReadAtLeast(r, buf, len(buf))
}
// CopyN copies n bytes (or until an error) from src to dst.
// It returns the number of bytes copied and the earliest
// error encountered while copying.
// On return, written == n if and only if err == nil.
//
// If dst implements [ReaderFrom], the copy is implemented using it.
func CopyN(dst Writer, src Reader, n int64) (written int64, err error) {
written, err = Copy(dst, LimitReader(src, n))
if written == n {
return n, nil
}
if written < n && err == nil {
// src stopped early; must have been EOF.
err = EOF
}
return
}
// Copy copies from src to dst until either EOF is reached
// on src or an error occurs. It returns the number of bytes
// copied and the first error encountered while copying, if any.
//
// A successful Copy returns err == nil, not err == EOF.
// Because Copy is defined to read from src until EOF, it does
// not treat an EOF from Read as an error to be reported.
//
// If src implements [WriterTo],
// the copy is implemented by calling src.WriteTo(dst).
// Otherwise, if dst implements [ReaderFrom],
// the copy is implemented by calling dst.ReadFrom(src).
func Copy(dst Writer, src Reader) (written int64, err error) {
return copyBuffer(dst, src, nil)
}
// CopyBuffer is identical to Copy except that it stages through the
// provided buffer (if one is required) rather than allocating a
// temporary one. If buf is nil, one is allocated; otherwise if it has
// zero length, CopyBuffer panics.
//
// If either src implements [WriterTo] or dst implements [ReaderFrom],
// buf will not be used to perform the copy.
func CopyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
if buf != nil && len(buf) == 0 {
panic("empty buffer in CopyBuffer")
}
return copyBuffer(dst, src, buf)
}
// copyBuffer is the actual implementation of Copy and CopyBuffer.
// if buf is nil, one is allocated.
func copyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
// If the reader has a WriteTo method, use it to do the copy.
// Avoids an allocation and a copy.
if wt, ok := src.(WriterTo); ok {
return wt.WriteTo(dst)
}
// Similarly, if the writer has a ReadFrom method, use it to do the copy.
if rf, ok := dst.(ReaderFrom); ok {
return rf.ReadFrom(src)
}
if buf == nil {
size := 32 * 1024
if l, ok := src.(*LimitedReader); ok && int64(size) > l.N {
if l.N < 1 {
size = 1
} else {
size = int(l.N)
}
}
buf = make([]byte, size)
}
for {
nr, er := src.Read(buf)
if nr > 0 {
nw, ew := dst.Write(buf[0:nr])
if nw < 0 || nr < nw {
nw = 0
if ew == nil {
ew = errInvalidWrite
}
}
written += int64(nw)
if ew != nil {
err = ew
break
}
if nr != nw {
err = ErrShortWrite
break
}
}
if er != nil {
if er != EOF {
err = er
}
break
}
}
return written, err
}
// LimitReader returns a Reader that reads from r
// but stops with EOF after n bytes.
// The underlying implementation is a *LimitedReader.
func LimitReader(r Reader, n int64) Reader { return &LimitedReader{r, n} }
// A LimitedReader reads from R but limits the amount of
// data returned to just N bytes. Each call to Read
// updates N to reflect the new amount remaining.
// Read returns EOF when N <= 0 or when the underlying R returns EOF.
type LimitedReader struct {
R Reader // underlying reader
N int64 // max bytes remaining
}
func (l *LimitedReader) Read(p []byte) (n int, err error) {
if l.N <= 0 {
return 0, EOF
}
if int64(len(p)) > l.N {
p = p[0:l.N]
}
n, err = l.R.Read(p)
l.N -= int64(n)
return
}
// NewSectionReader returns a [SectionReader] that reads from r
// starting at offset off and stops with EOF after n bytes.
func NewSectionReader(r ReaderAt, off int64, n int64) *SectionReader {
var remaining int64
const maxint64 = 1<<63 - 1
if off <= maxint64-n {
remaining = n + off
} else {
// Overflow, with no way to return error.
// Assume we can read up to an offset of 1<<63 - 1.
remaining = maxint64
}
return &SectionReader{r, off, off, remaining, n}
}
// SectionReader implements Read, Seek, and ReadAt on a section
// of an underlying [ReaderAt].
type SectionReader struct {
r ReaderAt // constant after creation
base int64 // constant after creation
off int64
limit int64 // constant after creation
n int64 // constant after creation
}
func (s *SectionReader) Read(p []byte) (n int, err error) {
if s.off >= s.limit {
return 0, EOF
}
if max := s.limit - s.off; int64(len(p)) > max {
p = p[0:max]
}
n, err = s.r.ReadAt(p, s.off)
s.off += int64(n)
return
}
var errWhence = errors.New("Seek: invalid whence")
var errOffset = errors.New("Seek: invalid offset")
func (s *SectionReader) Seek(offset int64, whence int) (int64, error) {
switch whence {
default:
return 0, errWhence
case SeekStart:
offset += s.base
case SeekCurrent:
offset += s.off
case SeekEnd:
offset += s.limit
}
if offset < s.base {
return 0, errOffset
}
s.off = offset
return offset - s.base, nil
}
func (s *SectionReader) ReadAt(p []byte, off int64) (n int, err error) {
if off < 0 || off >= s.Size() {
return 0, EOF
}
off += s.base
if max := s.limit - off; int64(len(p)) > max {
p = p[0:max]
n, err = s.r.ReadAt(p, off)
if err == nil {
err = EOF
}
return n, err
}
return s.r.ReadAt(p, off)
}
// Size returns the size of the section in bytes.
func (s *SectionReader) Size() int64 { return s.limit - s.base }
// Outer returns the underlying [ReaderAt] and offsets for the section.
//
// The returned values are the same that were passed to [NewSectionReader]
// when the [SectionReader] was created.
func (s *SectionReader) Outer() (r ReaderAt, off int64, n int64) {
return s.r, s.base, s.n
}
// An OffsetWriter maps writes at offset base to offset base+off in the underlying writer.
type OffsetWriter struct {
w WriterAt
base int64 // the original offset
off int64 // the current offset
}
// NewOffsetWriter returns an [OffsetWriter] that writes to w
// starting at offset off.
func NewOffsetWriter(w WriterAt, off int64) *OffsetWriter {
return &OffsetWriter{w, off, off}
}
func (o *OffsetWriter) Write(p []byte) (n int, err error) {
n, err = o.w.WriteAt(p, o.off)
o.off += int64(n)
return
}
func (o *OffsetWriter) WriteAt(p []byte, off int64) (n int, err error) {
if off < 0 {
return 0, errOffset
}
off += o.base
return o.w.WriteAt(p, off)
}
func (o *OffsetWriter) Seek(offset int64, whence int) (int64, error) {
switch whence {
default:
return 0, errWhence
case SeekStart:
offset += o.base
case SeekCurrent:
offset += o.off
}
if offset < o.base {
return 0, errOffset
}
o.off = offset
return offset - o.base, nil
}
// TeeReader returns a [Reader] that writes to w what it reads from r.
// All reads from r performed through it are matched with
// corresponding writes to w. There is no internal buffering -
// the write must complete before the read completes.
// Any error encountered while writing is reported as a read error.
func TeeReader(r Reader, w Writer) Reader {
return &teeReader{r, w}
}
type teeReader struct {
r Reader
w Writer
}
func (t *teeReader) Read(p []byte) (n int, err error) {
n, err = t.r.Read(p)
if n > 0 {
if n, err := t.w.Write(p[:n]); err != nil {
return n, err
}
}
return
}
// Discard is a [Writer] on which all Write calls succeed
// without doing anything.
var Discard Writer = discard{}
type discard struct{}
// discard implements ReaderFrom as an optimization so Copy to
// io.Discard can avoid doing unnecessary work.
var _ ReaderFrom = discard{}
func (discard) Write(p []byte) (int, error) {
return len(p), nil
}
func (discard) WriteString(s string) (int, error) {
return len(s), nil
}
var blackHolePool = sync.Pool{
New: func() any {
b := make([]byte, 8192)
return &b
},
}
func (discard) ReadFrom(r Reader) (n int64, err error) {
bufp := blackHolePool.Get().(*[]byte)
readSize := 0
for {
readSize, err = r.Read(*bufp)
n += int64(readSize)
if err != nil {
blackHolePool.Put(bufp)
if err == EOF {
return n, nil
}
return
}
}
}
// NopCloser returns a [ReadCloser] with a no-op Close method wrapping
// the provided [Reader] r.
// If r implements [WriterTo], the returned [ReadCloser] will implement [WriterTo]
// by forwarding calls to r.
func NopCloser(r Reader) ReadCloser {
if _, ok := r.(WriterTo); ok {
return nopCloserWriterTo{r}
}
return nopCloser{r}
}
type nopCloser struct {
Reader
}
func (nopCloser) Close() error { return nil }
type nopCloserWriterTo struct {
Reader
}
func (nopCloserWriterTo) Close() error { return nil }
func (c nopCloserWriterTo) WriteTo(w Writer) (n int64, err error) {
return c.Reader.(WriterTo).WriteTo(w)
}
// ReadAll reads from r until an error or EOF and returns the data it read.
// A successful call returns err == nil, not err == EOF. Because ReadAll is
// defined to read from src until EOF, it does not treat an EOF from Read
// as an error to be reported.
func ReadAll(r Reader) ([]byte, error) {
// Build slices of exponentially growing size,
// then copy into a perfectly-sized slice at the end.
b := make([]byte, 0, 512)
// Starting with next equal to 256 (instead of say 512 or 1024)
// allows less memory usage for small inputs that finish in the
// early growth stages, but we grow the read sizes quickly such that
// it does not materially impact medium or large inputs.
next := 256
chunks := make([][]byte, 0, 4)
// Invariant: finalSize = sum(len(c) for c in chunks)
var finalSize int
for {
n, err := r.Read(b[len(b):cap(b)])
b = b[:len(b)+n]
if err != nil {
if err == EOF {
err = nil
}
if len(chunks) == 0 {
return b, err
}
// Build our final right-sized slice.
finalSize += len(b)
final := append([]byte(nil), make([]byte, finalSize)...)[:0]
for _, chunk := range chunks {
final = append(final, chunk...)
}
final = append(final, b...)
return final, err
}
if cap(b)-len(b) < cap(b)/16 {
// Move to the next intermediate slice.
chunks = append(chunks, b)
finalSize += len(b)
b = append([]byte(nil), make([]byte, next)...)[:0]
next += next / 2
}
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ioutil implements some I/O utility functions.
//
// Deprecated: As of Go 1.16, the same functionality is now provided
// by package [io] or package [os], and those implementations
// should be preferred in new code.
// See the specific function documentation for details.
package ioutil
import (
"io"
"io/fs"
"os"
"slices"
"strings"
)
// ReadAll reads from r until an error or EOF and returns the data it read.
// A successful call returns err == nil, not err == EOF. Because ReadAll is
// defined to read from src until EOF, it does not treat an EOF from Read
// as an error to be reported.
//
// Deprecated: As of Go 1.16, this function simply calls [io.ReadAll].
//
//go:fix inline
func ReadAll(r io.Reader) ([]byte, error) {
return io.ReadAll(r)
}
// ReadFile reads the file named by filename and returns the contents.
// A successful call returns err == nil, not err == EOF. Because ReadFile
// reads the whole file, it does not treat an EOF from Read as an error
// to be reported.
//
// Deprecated: As of Go 1.16, this function simply calls [os.ReadFile].
//
//go:fix inline
func ReadFile(filename string) ([]byte, error) {
return os.ReadFile(filename)
}
// WriteFile writes data to a file named by filename.
// If the file does not exist, WriteFile creates it with permissions perm
// (before umask); otherwise WriteFile truncates it before writing, without changing permissions.
//
// Deprecated: As of Go 1.16, this function simply calls [os.WriteFile].
//
//go:fix inline
func WriteFile(filename string, data []byte, perm fs.FileMode) error {
return os.WriteFile(filename, data, perm)
}
// ReadDir reads the directory named by dirname and returns
// a list of fs.FileInfo for the directory's contents,
// sorted by filename. If an error occurs reading the directory,
// ReadDir returns no directory entries along with the error.
//
// Deprecated: As of Go 1.16, [os.ReadDir] is a more efficient and correct choice:
// it returns a list of [fs.DirEntry] instead of [fs.FileInfo],
// and it returns partial results in the case of an error
// midway through reading a directory.
//
// If you must continue obtaining a list of [fs.FileInfo], you still can:
//
// entries, err := os.ReadDir(dirname)
// if err != nil { ... }
// infos := make([]fs.FileInfo, 0, len(entries))
// for _, entry := range entries {
// info, err := entry.Info()
// if err != nil { ... }
// infos = append(infos, info)
// }
func ReadDir(dirname string) ([]fs.FileInfo, error) {
f, err := os.Open(dirname)
if err != nil {
return nil, err
}
list, err := f.Readdir(-1)
f.Close()
if err != nil {
return nil, err
}
slices.SortFunc(list, func(a, b os.FileInfo) int {
return strings.Compare(a.Name(), b.Name())
})
return list, nil
}
// NopCloser returns a ReadCloser with a no-op Close method wrapping
// the provided Reader r.
//
// Deprecated: As of Go 1.16, this function simply calls [io.NopCloser].
//
//go:fix inline
func NopCloser(r io.Reader) io.ReadCloser {
return io.NopCloser(r)
}
// Discard is an io.Writer on which all Write calls succeed
// without doing anything.
//
// Deprecated: As of Go 1.16, this value is simply [io.Discard].
var Discard io.Writer = io.Discard
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ioutil
import (
"os"
)
// TempFile creates a new temporary file in the directory dir,
// opens the file for reading and writing, and returns the resulting *[os.File].
// The filename is generated by taking pattern and adding a random
// string to the end. If pattern includes a "*", the random string
// replaces the last "*".
// If dir is the empty string, TempFile uses the default directory
// for temporary files (see [os.TempDir]).
// Multiple programs calling TempFile simultaneously
// will not choose the same file. The caller can use f.Name()
// to find the pathname of the file. It is the caller's responsibility
// to remove the file when no longer needed.
//
// Deprecated: As of Go 1.17, this function simply calls [os.CreateTemp].
//
//go:fix inline
func TempFile(dir, pattern string) (f *os.File, err error) {
return os.CreateTemp(dir, pattern)
}
// TempDir creates a new temporary directory in the directory dir.
// The directory name is generated by taking pattern and applying a
// random string to the end. If pattern includes a "*", the random string
// replaces the last "*". TempDir returns the name of the new directory.
// If dir is the empty string, TempDir uses the
// default directory for temporary files (see [os.TempDir]).
// Multiple programs calling TempDir simultaneously
// will not choose the same directory. It is the caller's responsibility
// to remove the directory when no longer needed.
//
// Deprecated: As of Go 1.17, this function simply calls [os.MkdirTemp].
//
//go:fix inline
func TempDir(dir, pattern string) (name string, err error) {
return os.MkdirTemp(dir, pattern)
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package io
type eofReader struct{}
func (eofReader) Read([]byte) (int, error) {
return 0, EOF
}
type multiReader struct {
readers []Reader
}
func (mr *multiReader) Read(p []byte) (n int, err error) {
for len(mr.readers) > 0 {
// Optimization to flatten nested multiReaders (Issue 13558).
if len(mr.readers) == 1 {
if r, ok := mr.readers[0].(*multiReader); ok {
mr.readers = r.readers
continue
}
}
n, err = mr.readers[0].Read(p)
if err == EOF {
// Use eofReader instead of nil to avoid nil panic
// after performing flatten (Issue 18232).
mr.readers[0] = eofReader{} // permit earlier GC
mr.readers = mr.readers[1:]
}
if n > 0 || err != EOF {
if err == EOF && len(mr.readers) > 0 {
// Don't return EOF yet. More readers remain.
err = nil
}
return
}
}
return 0, EOF
}
func (mr *multiReader) WriteTo(w Writer) (sum int64, err error) {
return mr.writeToWithBuffer(w, make([]byte, 1024*32))
}
func (mr *multiReader) writeToWithBuffer(w Writer, buf []byte) (sum int64, err error) {
for i, r := range mr.readers {
var n int64
if subMr, ok := r.(*multiReader); ok { // reuse buffer with nested multiReaders
n, err = subMr.writeToWithBuffer(w, buf)
} else {
n, err = copyBuffer(w, r, buf)
}
sum += n
if err != nil {
mr.readers = mr.readers[i:] // permit resume / retry after error
return sum, err
}
mr.readers[i] = nil // permit early GC
}
mr.readers = nil
return sum, nil
}
var _ WriterTo = (*multiReader)(nil)
// MultiReader returns a Reader that's the logical concatenation of
// the provided input readers. They're read sequentially. Once all
// inputs have returned EOF, Read will return EOF. If any of the readers
// return a non-nil, non-EOF error, Read will return that error.
func MultiReader(readers ...Reader) Reader {
r := make([]Reader, len(readers))
copy(r, readers)
return &multiReader{r}
}
type multiWriter struct {
writers []Writer
}
func (t *multiWriter) Write(p []byte) (n int, err error) {
for _, w := range t.writers {
n, err = w.Write(p)
if err != nil {
return
}
if n != len(p) {
err = ErrShortWrite
return
}
}
return len(p), nil
}
var _ StringWriter = (*multiWriter)(nil)
func (t *multiWriter) WriteString(s string) (n int, err error) {
var p []byte // lazily initialized if/when needed
for _, w := range t.writers {
if sw, ok := w.(StringWriter); ok {
n, err = sw.WriteString(s)
} else {
if p == nil {
p = []byte(s)
}
n, err = w.Write(p)
}
if err != nil {
return
}
if n != len(s) {
err = ErrShortWrite
return
}
}
return len(s), nil
}
// MultiWriter creates a writer that duplicates its writes to all the
// provided writers, similar to the Unix tee(1) command.
//
// Each write is written to each listed writer, one at a time.
// If a listed writer returns an error, that overall write operation
// stops and returns the error; it does not continue down the list.
func MultiWriter(writers ...Writer) Writer {
allWriters := make([]Writer, 0, len(writers))
for _, w := range writers {
if mw, ok := w.(*multiWriter); ok {
allWriters = append(allWriters, mw.writers...)
} else {
allWriters = append(allWriters, w)
}
}
return &multiWriter{allWriters}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Pipe adapter to connect code expecting an io.Reader
// with code expecting an io.Writer.
package io
import (
"errors"
"sync"
)
// onceError is an object that will only store an error once.
type onceError struct {
sync.Mutex // guards following
err error
}
func (a *onceError) Store(err error) {
a.Lock()
defer a.Unlock()
if a.err != nil {
return
}
a.err = err
}
func (a *onceError) Load() error {
a.Lock()
defer a.Unlock()
return a.err
}
// ErrClosedPipe is the error used for read or write operations on a closed pipe.
var ErrClosedPipe = errors.New("io: read/write on closed pipe")
// A pipe is the shared pipe structure underlying PipeReader and PipeWriter.
type pipe struct {
wrMu sync.Mutex // Serializes Write operations
wrCh chan []byte
rdCh chan int
once sync.Once // Protects closing done
done chan struct{}
rerr onceError
werr onceError
}
func (p *pipe) read(b []byte) (n int, err error) {
select {
case <-p.done:
return 0, p.readCloseError()
default:
}
select {
case bw := <-p.wrCh:
nr := copy(b, bw)
p.rdCh <- nr
return nr, nil
case <-p.done:
return 0, p.readCloseError()
}
}
func (p *pipe) closeRead(err error) error {
if err == nil {
err = ErrClosedPipe
}
p.rerr.Store(err)
p.once.Do(func() { close(p.done) })
return nil
}
func (p *pipe) write(b []byte) (n int, err error) {
select {
case <-p.done:
return 0, p.writeCloseError()
default:
p.wrMu.Lock()
defer p.wrMu.Unlock()
}
for once := true; once || len(b) > 0; once = false {
select {
case p.wrCh <- b:
nw := <-p.rdCh
b = b[nw:]
n += nw
case <-p.done:
return n, p.writeCloseError()
}
}
return n, nil
}
func (p *pipe) closeWrite(err error) error {
if err == nil {
err = EOF
}
p.werr.Store(err)
p.once.Do(func() { close(p.done) })
return nil
}
// readCloseError is considered internal to the pipe type.
func (p *pipe) readCloseError() error {
rerr := p.rerr.Load()
if werr := p.werr.Load(); rerr == nil && werr != nil {
return werr
}
return ErrClosedPipe
}
// writeCloseError is considered internal to the pipe type.
func (p *pipe) writeCloseError() error {
werr := p.werr.Load()
if rerr := p.rerr.Load(); werr == nil && rerr != nil {
return rerr
}
return ErrClosedPipe
}
// A PipeReader is the read half of a pipe.
type PipeReader struct{ pipe }
// Read implements the standard Read interface:
// it reads data from the pipe, blocking until a writer
// arrives or the write end is closed.
// If the write end is closed with an error, that error is
// returned as err; otherwise err is EOF.
func (r *PipeReader) Read(data []byte) (n int, err error) {
return r.pipe.read(data)
}
// Close closes the reader; subsequent writes to the
// write half of the pipe will return the error [ErrClosedPipe].
func (r *PipeReader) Close() error {
return r.CloseWithError(nil)
}
// CloseWithError closes the reader; subsequent writes
// to the write half of the pipe will return the error err.
//
// CloseWithError never overwrites the previous error if it exists
// and always returns nil.
func (r *PipeReader) CloseWithError(err error) error {
return r.pipe.closeRead(err)
}
// A PipeWriter is the write half of a pipe.
type PipeWriter struct{ r PipeReader }
// Write implements the standard Write interface:
// it writes data to the pipe, blocking until one or more readers
// have consumed all the data or the read end is closed.
// If the read end is closed with an error, that err is
// returned as err; otherwise err is [ErrClosedPipe].
func (w *PipeWriter) Write(data []byte) (n int, err error) {
return w.r.pipe.write(data)
}
// Close closes the writer; subsequent reads from the
// read half of the pipe will return no bytes and EOF.
func (w *PipeWriter) Close() error {
return w.CloseWithError(nil)
}
// CloseWithError closes the writer; subsequent reads from the
// read half of the pipe will return no bytes and the error err,
// or EOF if err is nil.
//
// CloseWithError never overwrites the previous error if it exists
// and always returns nil.
func (w *PipeWriter) CloseWithError(err error) error {
return w.r.pipe.closeWrite(err)
}
// Pipe creates a synchronous in-memory pipe.
// It can be used to connect code expecting an [io.Reader]
// with code expecting an [io.Writer].
//
// Reads and Writes on the pipe are matched one to one
// except when multiple Reads are needed to consume a single Write.
// That is, each Write to the [PipeWriter] blocks until it has satisfied
// one or more Reads from the [PipeReader] that fully consume
// the written data.
// The data is copied directly from the Write to the corresponding
// Read (or Reads); there is no internal buffering.
//
// It is safe to call Read and Write in parallel with each other or with Close.
// Parallel calls to Read and parallel calls to Write are also safe:
// the individual calls will be gated sequentially.
func Pipe() (*PipeReader, *PipeWriter) {
pw := &PipeWriter{r: PipeReader{pipe: pipe{
wrCh: make(chan []byte),
rdCh: make(chan int),
done: make(chan struct{}),
}}}
return &pw.r, pw
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package log implements a simple logging package. It defines a type, [Logger],
// with methods for formatting output. It also has a predefined 'standard'
// Logger accessible through helper functions Print[f|ln], Fatal[f|ln], and
// Panic[f|ln], which are easier to use than creating a Logger manually.
// That logger writes to standard error and prints the date and time
// of each logged message.
// Every log message is output on a separate line: if the message being
// printed does not end in a newline, the logger will add one.
// The Fatal functions call [os.Exit](1) after writing the log message.
// The Panic functions call panic after writing the log message.
package log
import (
"fmt"
"io"
"log/internal"
"os"
"runtime"
"sync"
"sync/atomic"
"time"
)
// These flags define which text to prefix to each log entry generated by the [Logger].
// Bits are or'ed together to control what's printed.
// With the exception of the Lmsgprefix flag, there is no
// control over the order they appear (the order listed here)
// or the format they present (as described in the comments).
// The prefix is followed by a colon only when Llongfile or Lshortfile
// is specified.
// For example, flags Ldate | Ltime (or LstdFlags) produce,
//
// 2009/01/23 01:23:23 message
//
// while flags Ldate | Ltime | Lmicroseconds | Llongfile produce,
//
// 2009/01/23 01:23:23.123123 /a/b/c/d.go:23: message
const (
Ldate = 1 << iota // the date in the local time zone: 2009/01/23
Ltime // the time in the local time zone: 01:23:23
Lmicroseconds // microsecond resolution: 01:23:23.123123. assumes Ltime.
Llongfile // full file name and line number: /a/b/c/d.go:23
Lshortfile // final file name element and line number: d.go:23. overrides Llongfile
LUTC // if Ldate or Ltime is set, use UTC rather than the local time zone
Lmsgprefix // move the "prefix" from the beginning of the line to before the message
LstdFlags = Ldate | Ltime // initial values for the standard logger
)
// A Logger represents an active logging object that generates lines of
// output to an [io.Writer]. Each logging operation makes a single call to
// the Writer's Write method. A Logger can be used simultaneously from
// multiple goroutines; it guarantees to serialize access to the Writer.
type Logger struct {
outMu sync.Mutex
out io.Writer // destination for output
prefix atomic.Pointer[string] // prefix on each line to identify the logger (but see Lmsgprefix)
flag atomic.Int32 // properties
isDiscard atomic.Bool
}
// New creates a new [Logger]. The out variable sets the
// destination to which log data will be written.
// The prefix appears at the beginning of each generated log line, or
// after the log header if the [Lmsgprefix] flag is provided.
// The flag argument defines the logging properties.
func New(out io.Writer, prefix string, flag int) *Logger {
l := new(Logger)
l.SetOutput(out)
l.SetPrefix(prefix)
l.SetFlags(flag)
return l
}
// SetOutput sets the output destination for the logger.
func (l *Logger) SetOutput(w io.Writer) {
l.outMu.Lock()
defer l.outMu.Unlock()
l.out = w
l.isDiscard.Store(w == io.Discard)
}
var std = New(os.Stderr, "", LstdFlags)
// Default returns the standard logger used by the package-level output functions.
func Default() *Logger { return std }
// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid zero-padding.
func itoa(buf *[]byte, i int, wid int) {
// Assemble decimal in reverse order.
var b [20]byte
bp := len(b) - 1
for i >= 10 || wid > 1 {
wid--
q := i / 10
b[bp] = byte('0' + i - q*10)
bp--
i = q
}
// i < 10
b[bp] = byte('0' + i)
*buf = append(*buf, b[bp:]...)
}
// formatHeader writes log header to buf in following order:
// - l.prefix (if it's not blank and Lmsgprefix is unset),
// - date and/or time (if corresponding flags are provided),
// - file and line number (if corresponding flags are provided),
// - l.prefix (if it's not blank and Lmsgprefix is set).
func formatHeader(buf *[]byte, t time.Time, prefix string, flag int, file string, line int) {
if flag&Lmsgprefix == 0 {
*buf = append(*buf, prefix...)
}
if flag&(Ldate|Ltime|Lmicroseconds) != 0 {
if flag&LUTC != 0 {
t = t.UTC()
}
if flag&Ldate != 0 {
year, month, day := t.Date()
itoa(buf, year, 4)
*buf = append(*buf, '/')
itoa(buf, int(month), 2)
*buf = append(*buf, '/')
itoa(buf, day, 2)
*buf = append(*buf, ' ')
}
if flag&(Ltime|Lmicroseconds) != 0 {
hour, min, sec := t.Clock()
itoa(buf, hour, 2)
*buf = append(*buf, ':')
itoa(buf, min, 2)
*buf = append(*buf, ':')
itoa(buf, sec, 2)
if flag&Lmicroseconds != 0 {
*buf = append(*buf, '.')
itoa(buf, t.Nanosecond()/1e3, 6)
}
*buf = append(*buf, ' ')
}
}
if flag&(Lshortfile|Llongfile) != 0 {
if flag&Lshortfile != 0 {
short := file
for i := len(file) - 1; i > 0; i-- {
if file[i] == '/' {
short = file[i+1:]
break
}
}
file = short
}
*buf = append(*buf, file...)
*buf = append(*buf, ':')
itoa(buf, line, -1)
*buf = append(*buf, ": "...)
}
if flag&Lmsgprefix != 0 {
*buf = append(*buf, prefix...)
}
}
var bufferPool = sync.Pool{New: func() any { return new([]byte) }}
func getBuffer() *[]byte {
p := bufferPool.Get().(*[]byte)
*p = (*p)[:0]
return p
}
func putBuffer(p *[]byte) {
// Proper usage of a sync.Pool requires each entry to have approximately
// the same memory cost. To obtain this property when the stored type
// contains a variably-sized buffer, we add a hard limit on the maximum buffer
// to place back in the pool.
//
// See https://go.dev/issue/23199
if cap(*p) > 64<<10 {
*p = nil
}
bufferPool.Put(p)
}
// Output writes the output for a logging event. The string s contains
// the text to print after the prefix specified by the flags of the
// Logger. A newline is appended if the last character of s is not
// already a newline. Calldepth is used to recover the PC and is
// provided for generality, although at the moment on all pre-defined
// paths it will be 2.
func (l *Logger) Output(calldepth int, s string) error {
return l.output(0, calldepth+1, func(b []byte) []byte { // +1 for this frame.
return append(b, s...)
})
}
// output can take either a calldepth or a pc to get source line information.
// It uses the pc if it is non-zero.
func (l *Logger) output(pc uintptr, calldepth int, appendOutput func([]byte) []byte) error {
if l.isDiscard.Load() {
return nil
}
now := time.Now() // get this early.
// Load prefix and flag once so that their value is consistent within
// this call regardless of any concurrent changes to their value.
prefix := l.Prefix()
flag := l.Flags()
var file string
var line int
if flag&(Lshortfile|Llongfile) != 0 {
if pc == 0 {
var ok bool
_, file, line, ok = runtime.Caller(calldepth)
if !ok {
file = "???"
line = 0
}
} else {
fs := runtime.CallersFrames([]uintptr{pc})
f, _ := fs.Next()
file = f.File
if file == "" {
file = "???"
}
line = f.Line
}
}
buf := getBuffer()
defer putBuffer(buf)
formatHeader(buf, now, prefix, flag, file, line)
*buf = appendOutput(*buf)
if len(*buf) == 0 || (*buf)[len(*buf)-1] != '\n' {
*buf = append(*buf, '\n')
}
l.outMu.Lock()
defer l.outMu.Unlock()
_, err := l.out.Write(*buf)
return err
}
func init() {
internal.DefaultOutput = func(pc uintptr, data []byte) error {
return std.output(pc, 0, func(buf []byte) []byte {
return append(buf, data...)
})
}
}
// Print calls l.Output to print to the logger.
// Arguments are handled in the manner of [fmt.Print].
func (l *Logger) Print(v ...any) {
l.output(0, 2, func(b []byte) []byte {
return fmt.Append(b, v...)
})
}
// Printf calls l.Output to print to the logger.
// Arguments are handled in the manner of [fmt.Printf].
func (l *Logger) Printf(format string, v ...any) {
l.output(0, 2, func(b []byte) []byte {
return fmt.Appendf(b, format, v...)
})
}
// Println calls l.Output to print to the logger.
// Arguments are handled in the manner of [fmt.Println].
func (l *Logger) Println(v ...any) {
l.output(0, 2, func(b []byte) []byte {
return fmt.Appendln(b, v...)
})
}
// Fatal is equivalent to l.Print() followed by a call to [os.Exit](1).
func (l *Logger) Fatal(v ...any) {
l.output(0, 2, func(b []byte) []byte {
return fmt.Append(b, v...)
})
os.Exit(1)
}
// Fatalf is equivalent to l.Printf() followed by a call to [os.Exit](1).
func (l *Logger) Fatalf(format string, v ...any) {
l.output(0, 2, func(b []byte) []byte {
return fmt.Appendf(b, format, v...)
})
os.Exit(1)
}
// Fatalln is equivalent to l.Println() followed by a call to [os.Exit](1).
func (l *Logger) Fatalln(v ...any) {
l.output(0, 2, func(b []byte) []byte {
return fmt.Appendln(b, v...)
})
os.Exit(1)
}
// Panic is equivalent to l.Print() followed by a call to panic().
func (l *Logger) Panic(v ...any) {
s := fmt.Sprint(v...)
l.output(0, 2, func(b []byte) []byte {
return append(b, s...)
})
panic(s)
}
// Panicf is equivalent to l.Printf() followed by a call to panic().
func (l *Logger) Panicf(format string, v ...any) {
s := fmt.Sprintf(format, v...)
l.output(0, 2, func(b []byte) []byte {
return append(b, s...)
})
panic(s)
}
// Panicln is equivalent to l.Println() followed by a call to panic().
func (l *Logger) Panicln(v ...any) {
s := fmt.Sprintln(v...)
l.output(0, 2, func(b []byte) []byte {
return append(b, s...)
})
panic(s)
}
// Flags returns the output flags for the logger.
// The flag bits are [Ldate], [Ltime], and so on.
func (l *Logger) Flags() int {
return int(l.flag.Load())
}
// SetFlags sets the output flags for the logger.
// The flag bits are [Ldate], [Ltime], and so on.
func (l *Logger) SetFlags(flag int) {
l.flag.Store(int32(flag))
}
// Prefix returns the output prefix for the logger.
func (l *Logger) Prefix() string {
if p := l.prefix.Load(); p != nil {
return *p
}
return ""
}
// SetPrefix sets the output prefix for the logger.
func (l *Logger) SetPrefix(prefix string) {
l.prefix.Store(&prefix)
}
// Writer returns the output destination for the logger.
func (l *Logger) Writer() io.Writer {
l.outMu.Lock()
defer l.outMu.Unlock()
return l.out
}
// SetOutput sets the output destination for the standard logger.
func SetOutput(w io.Writer) {
std.SetOutput(w)
}
// Flags returns the output flags for the standard logger.
// The flag bits are [Ldate], [Ltime], and so on.
func Flags() int {
return std.Flags()
}
// SetFlags sets the output flags for the standard logger.
// The flag bits are [Ldate], [Ltime], and so on.
func SetFlags(flag int) {
std.SetFlags(flag)
}
// Prefix returns the output prefix for the standard logger.
func Prefix() string {
return std.Prefix()
}
// SetPrefix sets the output prefix for the standard logger.
func SetPrefix(prefix string) {
std.SetPrefix(prefix)
}
// Writer returns the output destination for the standard logger.
func Writer() io.Writer {
return std.Writer()
}
// These functions write to the standard logger.
// Print calls Output to print to the standard logger.
// Arguments are handled in the manner of [fmt.Print].
func Print(v ...any) {
std.output(0, 2, func(b []byte) []byte {
return fmt.Append(b, v...)
})
}
// Printf calls Output to print to the standard logger.
// Arguments are handled in the manner of [fmt.Printf].
func Printf(format string, v ...any) {
std.output(0, 2, func(b []byte) []byte {
return fmt.Appendf(b, format, v...)
})
}
// Println calls Output to print to the standard logger.
// Arguments are handled in the manner of [fmt.Println].
func Println(v ...any) {
std.output(0, 2, func(b []byte) []byte {
return fmt.Appendln(b, v...)
})
}
// Fatal is equivalent to [Print] followed by a call to [os.Exit](1).
func Fatal(v ...any) {
std.output(0, 2, func(b []byte) []byte {
return fmt.Append(b, v...)
})
os.Exit(1)
}
// Fatalf is equivalent to [Printf] followed by a call to [os.Exit](1).
func Fatalf(format string, v ...any) {
std.output(0, 2, func(b []byte) []byte {
return fmt.Appendf(b, format, v...)
})
os.Exit(1)
}
// Fatalln is equivalent to [Println] followed by a call to [os.Exit](1).
func Fatalln(v ...any) {
std.output(0, 2, func(b []byte) []byte {
return fmt.Appendln(b, v...)
})
os.Exit(1)
}
// Panic is equivalent to [Print] followed by a call to panic().
func Panic(v ...any) {
s := fmt.Sprint(v...)
std.output(0, 2, func(b []byte) []byte {
return append(b, s...)
})
panic(s)
}
// Panicf is equivalent to [Printf] followed by a call to panic().
func Panicf(format string, v ...any) {
s := fmt.Sprintf(format, v...)
std.output(0, 2, func(b []byte) []byte {
return append(b, s...)
})
panic(s)
}
// Panicln is equivalent to [Println] followed by a call to panic().
func Panicln(v ...any) {
s := fmt.Sprintln(v...)
std.output(0, 2, func(b []byte) []byte {
return append(b, s...)
})
panic(s)
}
// Output writes the output for a logging event. The string s contains
// the text to print after the prefix specified by the flags of the
// Logger. A newline is appended if the last character of s is not
// already a newline. Calldepth is the count of the number of
// frames to skip when computing the file name and line number
// if [Llongfile] or [Lshortfile] is set; a value of 1 will print the details
// for the caller of Output.
func Output(calldepth int, s string) error {
return std.output(0, calldepth+1, func(b []byte) []byte { // +1 for this frame.
return append(b, s...)
})
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows && !plan9
package syslog
import (
"errors"
"fmt"
"log"
"net"
"os"
"strings"
"sync"
"time"
)
// The Priority is a combination of the syslog facility and
// severity. For example, [LOG_ALERT] | [LOG_FTP] sends an alert severity
// message from the FTP facility. The default severity is [LOG_EMERG];
// the default facility is [LOG_KERN].
type Priority int
const severityMask = 0x07
const facilityMask = 0xf8
const (
// Severity.
// From /usr/include/sys/syslog.h.
// These are the same on Linux, BSD, and OS X.
LOG_EMERG Priority = iota
LOG_ALERT
LOG_CRIT
LOG_ERR
LOG_WARNING
LOG_NOTICE
LOG_INFO
LOG_DEBUG
)
const (
// Facility.
// From /usr/include/sys/syslog.h.
// These are the same up to LOG_FTP on Linux, BSD, and OS X.
LOG_KERN Priority = iota << 3
LOG_USER
LOG_MAIL
LOG_DAEMON
LOG_AUTH
LOG_SYSLOG
LOG_LPR
LOG_NEWS
LOG_UUCP
LOG_CRON
LOG_AUTHPRIV
LOG_FTP
_ // unused
_ // unused
_ // unused
_ // unused
LOG_LOCAL0
LOG_LOCAL1
LOG_LOCAL2
LOG_LOCAL3
LOG_LOCAL4
LOG_LOCAL5
LOG_LOCAL6
LOG_LOCAL7
)
// A Writer is a connection to a syslog server.
type Writer struct {
priority Priority
tag string
hostname string
network string
raddr string
mu sync.Mutex // guards conn
conn serverConn
}
// This interface and the separate syslog_unix.go file exist for
// Solaris support as implemented by gccgo. On Solaris you cannot
// simply open a TCP connection to the syslog daemon. The gccgo
// sources have a syslog_solaris.go file that implements unixSyslog to
// return a type that satisfies this interface and simply calls the C
// library syslog function.
type serverConn interface {
writeString(p Priority, hostname, tag, s, nl string) error
close() error
}
type netConn struct {
local bool
conn net.Conn
}
// New establishes a new connection to the system log daemon. Each
// write to the returned writer sends a log message with the given
// priority (a combination of the syslog facility and severity) and
// prefix tag. If tag is empty, the [os.Args][0] is used.
func New(priority Priority, tag string) (*Writer, error) {
return Dial("", "", priority, tag)
}
// Dial establishes a connection to a log daemon by connecting to
// address raddr on the specified network. Each write to the returned
// writer sends a log message with the facility and severity
// (from priority) and tag. If tag is empty, the [os.Args][0] is used.
// If network is empty, Dial will connect to the local syslog server.
// Otherwise, see the documentation for net.Dial for valid values
// of network and raddr.
func Dial(network, raddr string, priority Priority, tag string) (*Writer, error) {
if priority < 0 || priority > LOG_LOCAL7|LOG_DEBUG {
return nil, errors.New("log/syslog: invalid priority")
}
if tag == "" {
tag = os.Args[0]
}
hostname, _ := os.Hostname()
w := &Writer{
priority: priority,
tag: tag,
hostname: hostname,
network: network,
raddr: raddr,
}
w.mu.Lock()
defer w.mu.Unlock()
err := w.connect()
if err != nil {
return nil, err
}
return w, nil
}
// connect makes a connection to the syslog server.
// It must be called with w.mu held.
func (w *Writer) connect() (err error) {
if w.conn != nil {
// ignore err from close, it makes sense to continue anyway
w.conn.close()
w.conn = nil
}
if w.network == "" {
w.conn, err = unixSyslog()
if w.hostname == "" {
w.hostname = "localhost"
}
} else {
var c net.Conn
c, err = net.Dial(w.network, w.raddr)
if err == nil {
w.conn = &netConn{
conn: c,
local: w.network == "unixgram" || w.network == "unix",
}
if w.hostname == "" {
w.hostname = c.LocalAddr().String()
}
}
}
return
}
// Write sends a log message to the syslog daemon.
func (w *Writer) Write(b []byte) (int, error) {
return w.writeAndRetry(w.priority, string(b))
}
// Close closes a connection to the syslog daemon.
func (w *Writer) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.conn != nil {
err := w.conn.close()
w.conn = nil
return err
}
return nil
}
// Emerg logs a message with severity [LOG_EMERG], ignoring the severity
// passed to New.
func (w *Writer) Emerg(m string) error {
_, err := w.writeAndRetry(LOG_EMERG, m)
return err
}
// Alert logs a message with severity [LOG_ALERT], ignoring the severity
// passed to New.
func (w *Writer) Alert(m string) error {
_, err := w.writeAndRetry(LOG_ALERT, m)
return err
}
// Crit logs a message with severity [LOG_CRIT], ignoring the severity
// passed to New.
func (w *Writer) Crit(m string) error {
_, err := w.writeAndRetry(LOG_CRIT, m)
return err
}
// Err logs a message with severity [LOG_ERR], ignoring the severity
// passed to New.
func (w *Writer) Err(m string) error {
_, err := w.writeAndRetry(LOG_ERR, m)
return err
}
// Warning logs a message with severity [LOG_WARNING], ignoring the
// severity passed to New.
func (w *Writer) Warning(m string) error {
_, err := w.writeAndRetry(LOG_WARNING, m)
return err
}
// Notice logs a message with severity [LOG_NOTICE], ignoring the
// severity passed to New.
func (w *Writer) Notice(m string) error {
_, err := w.writeAndRetry(LOG_NOTICE, m)
return err
}
// Info logs a message with severity [LOG_INFO], ignoring the severity
// passed to New.
func (w *Writer) Info(m string) error {
_, err := w.writeAndRetry(LOG_INFO, m)
return err
}
// Debug logs a message with severity [LOG_DEBUG], ignoring the severity
// passed to New.
func (w *Writer) Debug(m string) error {
_, err := w.writeAndRetry(LOG_DEBUG, m)
return err
}
func (w *Writer) writeAndRetry(p Priority, s string) (int, error) {
pr := (w.priority & facilityMask) | (p & severityMask)
w.mu.Lock()
defer w.mu.Unlock()
if w.conn != nil {
if n, err := w.write(pr, s); err == nil {
return n, nil
}
}
if err := w.connect(); err != nil {
return 0, err
}
return w.write(pr, s)
}
// write generates and writes a syslog formatted string. The
// format is as follows: <PRI>TIMESTAMP HOSTNAME TAG[PID]: MSG
func (w *Writer) write(p Priority, msg string) (int, error) {
// ensure it ends in a \n
nl := ""
if !strings.HasSuffix(msg, "\n") {
nl = "\n"
}
err := w.conn.writeString(p, w.hostname, w.tag, msg, nl)
if err != nil {
return 0, err
}
// Note: return the length of the input, not the number of
// bytes printed by Fprintf, because this must behave like
// an io.Writer.
return len(msg), nil
}
func (n *netConn) writeString(p Priority, hostname, tag, msg, nl string) error {
if n.local {
// Compared to the network form below, the changes are:
// 1. Use time.Stamp instead of time.RFC3339.
// 2. Drop the hostname field from the Fprintf.
timestamp := time.Now().Format(time.Stamp)
_, err := fmt.Fprintf(n.conn, "<%d>%s %s[%d]: %s%s",
p, timestamp,
tag, os.Getpid(), msg, nl)
return err
}
timestamp := time.Now().Format(time.RFC3339)
_, err := fmt.Fprintf(n.conn, "<%d>%s %s %s[%d]: %s%s",
p, timestamp, hostname,
tag, os.Getpid(), msg, nl)
return err
}
func (n *netConn) close() error {
return n.conn.Close()
}
// NewLogger creates a [log.Logger] whose output is written to the
// system log service with the specified priority, a combination of
// the syslog facility and severity. The logFlag argument is the flag
// set passed through to [log.New] to create the Logger.
func NewLogger(p Priority, logFlag int) (*log.Logger, error) {
s, err := New(p, "")
if err != nil {
return nil, err
}
return log.New(s, "", logFlag), nil
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows && !plan9
package syslog
import (
"errors"
"net"
)
// unixSyslog opens a connection to the syslog daemon running on the
// local machine using a Unix domain socket.
func unixSyslog() (conn serverConn, err error) {
logTypes := []string{"unixgram", "unix"}
logPaths := []string{"/dev/log", "/var/run/syslog", "/var/run/log"}
for _, network := range logTypes {
for _, path := range logPaths {
conn, err := net.Dial(network, path)
if err == nil {
return &netConn{conn: conn, local: true}, nil
}
}
}
return nil, errors.New("Unix syslog delivery error")
}
// Code generated by "stringer -type=Accuracy"; DO NOT EDIT.
package big
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[Below - -1]
_ = x[Exact-0]
_ = x[Above-1]
}
const _Accuracy_name = "BelowExactAbove"
var _Accuracy_index = [...]uint8{0, 5, 10, 15}
func (i Accuracy) String() string {
i -= -1
if i < 0 || i >= Accuracy(len(_Accuracy_index)-1) {
return "Accuracy(" + strconv.FormatInt(int64(i+-1), 10) + ")"
}
return _Accuracy_name[_Accuracy_index[i]:_Accuracy_index[i+1]]
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file provides Go implementations of elementary multi-precision
// arithmetic operations on word vectors. These have the suffix _g.
// These are needed for platforms without assembly implementations of these routines.
// This file also contains elementary operations that can be implemented
// sufficiently efficiently in Go.
package big
import (
"math/bits"
_ "unsafe" // for go:linkname
)
// A Word represents a single digit of a multi-precision unsigned integer.
type Word uint
const (
_S = _W / 8 // word size in bytes
_W = bits.UintSize // word size in bits
_B = 1 << _W // digit base
_M = _B - 1 // digit mask
)
// In these routines, it is the caller's responsibility to arrange for
// x, y, and z to all have the same length. We check this and panic.
// The assembly versions of these routines do not include that check.
//
// The check+panic also has the effect of teaching the compiler that
// “i in range for z” implies “i in range for x and y”, eliminating all
// bounds checks in loops from 0 to len(z) and vice versa.
// ----------------------------------------------------------------------------
// Elementary operations on words
//
// These operations are used by the vector operations below.
// z1<<_W + z0 = x*y
func mulWW(x, y Word) (z1, z0 Word) {
hi, lo := bits.Mul(uint(x), uint(y))
return Word(hi), Word(lo)
}
// z1<<_W + z0 = x*y + c
func mulAddWWW_g(x, y, c Word) (z1, z0 Word) {
hi, lo := bits.Mul(uint(x), uint(y))
var cc uint
lo, cc = bits.Add(lo, uint(c), 0)
return Word(hi + cc), Word(lo)
}
// nlz returns the number of leading zeros in x.
// Wraps bits.LeadingZeros call for convenience.
func nlz(x Word) uint {
return uint(bits.LeadingZeros(uint(x)))
}
// The resulting carry c is either 0 or 1.
func addVV_g(z, x, y []Word) (c Word) {
if len(x) != len(z) || len(y) != len(z) {
panic("addVV len")
}
for i := range z {
zi, cc := bits.Add(uint(x[i]), uint(y[i]), uint(c))
z[i] = Word(zi)
c = Word(cc)
}
return
}
// The resulting carry c is either 0 or 1.
func subVV_g(z, x, y []Word) (c Word) {
if len(x) != len(z) || len(y) != len(z) {
panic("subVV len")
}
for i := range z {
zi, cc := bits.Sub(uint(x[i]), uint(y[i]), uint(c))
z[i] = Word(zi)
c = Word(cc)
}
return
}
// addVW sets z = x + y, returning the final carry c.
// The behavior is undefined if len(x) != len(z).
// If len(z) == 0, c = y; otherwise, c is 0 or 1.
//
// addVW should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/remyoudompheng/bigfft
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname addVW
func addVW(z, x []Word, y Word) (c Word) {
if len(x) != len(z) {
panic("addVW len")
}
if len(z) == 0 {
return y
}
zi, cc := bits.Add(uint(x[0]), uint(y), 0)
z[0] = Word(zi)
if cc == 0 {
if &z[0] != &x[0] {
copy(z[1:], x[1:])
}
return 0
}
for i := 1; i < len(z); i++ {
xi := x[i]
if xi != ^Word(0) {
z[i] = xi + 1
if &z[0] != &x[0] {
copy(z[i+1:], x[i+1:])
}
return 0
}
z[i] = 0
}
return 1
}
// addVW_ref is the reference implementation for addVW, used only for testing.
func addVW_ref(z, x []Word, y Word) (c Word) {
c = y
for i := range z {
zi, cc := bits.Add(uint(x[i]), uint(c), 0)
z[i] = Word(zi)
c = Word(cc)
}
return
}
// subVW sets z = x - y, returning the final carry c.
// The behavior is undefined if len(x) != len(z).
// If len(z) == 0, c = y; otherwise, c is 0 or 1.
//
// subVW should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/remyoudompheng/bigfft
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname subVW
func subVW(z, x []Word, y Word) (c Word) {
if len(x) != len(z) {
panic("subVW len")
}
if len(z) == 0 {
return y
}
zi, cc := bits.Sub(uint(x[0]), uint(y), 0)
z[0] = Word(zi)
if cc == 0 {
if &z[0] != &x[0] {
copy(z[1:], x[1:])
}
return 0
}
for i := 1; i < len(z); i++ {
xi := x[i]
if xi != 0 {
z[i] = xi - 1
if &z[0] != &x[0] {
copy(z[i+1:], x[i+1:])
}
return 0
}
z[i] = ^Word(0)
}
return 1
}
// subVW_ref is the reference implementation for subVW, used only for testing.
func subVW_ref(z, x []Word, y Word) (c Word) {
c = y
for i := range z {
zi, cc := bits.Sub(uint(x[i]), uint(c), 0)
z[i] = Word(zi)
c = Word(cc)
}
return c
}
func lshVU_g(z, x []Word, s uint) (c Word) {
if len(x) != len(z) {
panic("lshVU len")
}
if s == 0 {
copy(z, x)
return
}
if len(z) == 0 {
return
}
s &= _W - 1 // hint to the compiler that shifts by s don't need guard code
ŝ := _W - s
ŝ &= _W - 1 // ditto
c = x[len(z)-1] >> ŝ
for i := len(z) - 1; i > 0; i-- {
z[i] = x[i]<<s | x[i-1]>>ŝ
}
z[0] = x[0] << s
return
}
func rshVU_g(z, x []Word, s uint) (c Word) {
if len(x) != len(z) {
panic("rshVU len")
}
if s == 0 {
copy(z, x)
return
}
if len(z) == 0 {
return
}
s &= _W - 1 // hint to the compiler that shifts by s don't need guard code
ŝ := _W - s
ŝ &= _W - 1 // ditto
c = x[0] << ŝ
for i := 1; i < len(z); i++ {
z[i-1] = x[i-1]>>s | x[i]<<ŝ
}
z[len(z)-1] = x[len(z)-1] >> s
return
}
func mulAddVWW_g(z, x []Word, y, r Word) (c Word) {
if len(x) != len(z) {
panic("mulAddVWW len")
}
c = r
for i := range z {
c, z[i] = mulAddWWW_g(x[i], y, c)
}
return
}
func addMulVVWW_g(z, x, y []Word, m, a Word) (c Word) {
if len(x) != len(z) || len(y) != len(z) {
panic("rshVU len")
}
c = a
for i := range z {
z1, z0 := mulAddWWW_g(y[i], m, x[i])
lo, cc := bits.Add(uint(z0), uint(c), 0)
c, z[i] = Word(cc), Word(lo)
c += z1
}
return
}
// q = ( x1 << _W + x0 - r)/y. m = floor(( _B^2 - 1 ) / d - _B). Requiring x1<y.
// An approximate reciprocal with a reference to "Improved Division by Invariant Integers
// (IEEE Transactions on Computers, 11 Jun. 2010)"
func divWW(x1, x0, y, m Word) (q, r Word) {
s := nlz(y)
if s != 0 {
x1 = x1<<s | x0>>(_W-s)
x0 <<= s
y <<= s
}
d := uint(y)
// We know that
// m = ⎣(B^2-1)/d⎦-B
// ⎣(B^2-1)/d⎦ = m+B
// (B^2-1)/d = m+B+delta1 0 <= delta1 <= (d-1)/d
// B^2/d = m+B+delta2 0 <= delta2 <= 1
// The quotient we're trying to compute is
// quotient = ⎣(x1*B+x0)/d⎦
// = ⎣(x1*B*(B^2/d)+x0*(B^2/d))/B^2⎦
// = ⎣(x1*B*(m+B+delta2)+x0*(m+B+delta2))/B^2⎦
// = ⎣(x1*m+x1*B+x0)/B + x0*m/B^2 + delta2*(x1*B+x0)/B^2⎦
// The latter two terms of this three-term sum are between 0 and 1.
// So we can compute just the first term, and we will be low by at most 2.
t1, t0 := bits.Mul(uint(m), uint(x1))
_, c := bits.Add(t0, uint(x0), 0)
t1, _ = bits.Add(t1, uint(x1), c)
// The quotient is either t1, t1+1, or t1+2.
// We'll try t1 and adjust if needed.
qq := t1
// compute remainder r=x-d*q.
dq1, dq0 := bits.Mul(d, qq)
r0, b := bits.Sub(uint(x0), dq0, 0)
r1, _ := bits.Sub(uint(x1), dq1, b)
// The remainder we just computed is bounded above by B+d:
// r = x1*B + x0 - d*q.
// = x1*B + x0 - d*⎣(x1*m+x1*B+x0)/B⎦
// = x1*B + x0 - d*((x1*m+x1*B+x0)/B-alpha) 0 <= alpha < 1
// = x1*B + x0 - x1*d/B*m - x1*d - x0*d/B + d*alpha
// = x1*B + x0 - x1*d/B*⎣(B^2-1)/d-B⎦ - x1*d - x0*d/B + d*alpha
// = x1*B + x0 - x1*d/B*⎣(B^2-1)/d-B⎦ - x1*d - x0*d/B + d*alpha
// = x1*B + x0 - x1*d/B*((B^2-1)/d-B-beta) - x1*d - x0*d/B + d*alpha 0 <= beta < 1
// = x1*B + x0 - x1*B + x1/B + x1*d + x1*d/B*beta - x1*d - x0*d/B + d*alpha
// = x0 + x1/B + x1*d/B*beta - x0*d/B + d*alpha
// = x0*(1-d/B) + x1*(1+d*beta)/B + d*alpha
// < B*(1-d/B) + d*B/B + d because x0<B (and 1-d/B>0), x1<d, 1+d*beta<=B, alpha<1
// = B - d + d + d
// = B+d
// So r1 can only be 0 or 1. If r1 is 1, then we know q was too small.
// Add 1 to q and subtract d from r. That guarantees that r is <B, so
// we no longer need to keep track of r1.
if r1 != 0 {
qq++
r0 -= d
}
// If the remainder is still too large, increment q one more time.
if r0 >= d {
qq++
r0 -= d
}
return Word(qq), Word(r0 >> s)
}
// reciprocalWord return the reciprocal of the divisor. rec = floor(( _B^2 - 1 ) / u - _B). u = d1 << nlz(d1).
func reciprocalWord(d1 Word) Word {
u := uint(d1 << nlz(d1))
x1 := ^u
x0 := uint(_M)
rec, _ := bits.Div(x1, x0, u) // (_B^2-1)/U-_B = (_B*(_M-C)+_M)/U
return Word(rec)
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !math_big_pure_go
//go:generate go test ./internal/asmgen -generate
package big
import _ "unsafe" // for linkname
// implemented in arith_$GOARCH.s
// addVV should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/remyoudompheng/bigfft
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname addVV
//go:noescape
func addVV(z, x, y []Word) (c Word)
// subVV should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/remyoudompheng/bigfft
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname subVV
//go:noescape
func subVV(z, x, y []Word) (c Word)
// shlVU should be an internal detail (and a stale one at that),
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/remyoudompheng/bigfft
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname shlVU
func shlVU(z, x []Word, s uint) (c Word) {
if s == 0 {
copy(z, x)
return 0
}
return lshVU(z, x, s)
}
// lshVU sets z = x<<s, returning the high bits c. 1 ≤ s ≤ _B-1.
//
//go:noescape
func lshVU(z, x []Word, s uint) (c Word)
// rshVU sets z = x>>s, returning the low bits c. 1 ≤ s ≤ _B-1.
//
//go:noescape
func rshVU(z, x []Word, s uint) (c Word)
// mulAddVWW should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/remyoudompheng/bigfft
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mulAddVWW
//go:noescape
func mulAddVWW(z, x []Word, m, a Word) (c Word)
// addMulVVW should be an internal detail (and a stale one at that),
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/remyoudompheng/bigfft
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname addMulVVW
func addMulVVW(z, x []Word, y Word) (c Word) {
return addMulVVWW(z, z, x, y, 0)
}
// addMulVVWW sets z = x+y*m+a.
//
//go:noescape
func addMulVVWW(z, x, y []Word, m, a Word) (c Word)
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements multi-precision decimal numbers.
// The implementation is for float to decimal conversion only;
// not general purpose use.
// The only operations are precise conversion from binary to
// decimal and rounding.
//
// The key observation and some code (shr) is borrowed from
// strconv/decimal.go: conversion of binary fractional values can be done
// precisely in multi-precision decimal because 2 divides 10 (required for
// >> of mantissa); but conversion of decimal floating-point values cannot
// be done precisely in binary representation.
//
// In contrast to strconv/decimal.go, only right shift is implemented in
// decimal format - left shift can be done precisely in binary format.
package big
// A decimal represents an unsigned floating-point number in decimal representation.
// The value of a non-zero decimal d is d.mant * 10**d.exp with 0.1 <= d.mant < 1,
// with the most-significant mantissa digit at index 0. For the zero decimal, the
// mantissa length and exponent are 0.
// The zero value for decimal represents a ready-to-use 0.0.
type decimal struct {
mant []byte // mantissa ASCII digits, big-endian
exp int // exponent
}
// at returns the i'th mantissa digit, starting with the most significant digit at 0.
func (d *decimal) at(i int) byte {
if 0 <= i && i < len(d.mant) {
return d.mant[i]
}
return '0'
}
// Maximum shift amount that can be done in one pass without overflow.
// A Word has _W bits and (1<<maxShift - 1)*10 + 9 must fit into Word.
const maxShift = _W - 4
// TODO(gri) Since we know the desired decimal precision when converting
// a floating-point number, we may be able to limit the number of decimal
// digits that need to be computed by init by providing an additional
// precision argument and keeping track of when a number was truncated early
// (equivalent of "sticky bit" in binary rounding).
// TODO(gri) Along the same lines, enforce some limit to shift magnitudes
// to avoid "infinitely" long running conversions (until we run out of space).
// Init initializes x to the decimal representation of m << shift (for
// shift >= 0), or m >> -shift (for shift < 0).
func (x *decimal) init(m nat, shift int) {
// special case 0
if len(m) == 0 {
x.mant = x.mant[:0]
x.exp = 0
return
}
// Optimization: If we need to shift right, first remove any trailing
// zero bits from m to reduce shift amount that needs to be done in
// decimal format (since that is likely slower).
if shift < 0 {
ntz := m.trailingZeroBits()
s := uint(-shift)
if s >= ntz {
s = ntz // shift at most ntz bits
}
m = nat(nil).rsh(m, s)
shift += int(s)
}
// Do any shift left in binary representation.
if shift > 0 {
m = nat(nil).lsh(m, uint(shift))
shift = 0
}
// Convert mantissa into decimal representation.
s := m.utoa(10)
n := len(s)
x.exp = n
// Trim trailing zeros; instead the exponent is tracking
// the decimal point independent of the number of digits.
for n > 0 && s[n-1] == '0' {
n--
}
x.mant = append(x.mant[:0], s[:n]...)
// Do any (remaining) shift right in decimal representation.
if shift < 0 {
for shift < -maxShift {
rsh(x, maxShift)
shift += maxShift
}
rsh(x, uint(-shift))
}
}
// rsh implements x >> s, for s <= maxShift.
func rsh(x *decimal, s uint) {
// Division by 1<<s using shift-and-subtract algorithm.
// pick up enough leading digits to cover first shift
r := 0 // read index
var n Word
for n>>s == 0 && r < len(x.mant) {
ch := Word(x.mant[r])
r++
n = n*10 + ch - '0'
}
if n == 0 {
// x == 0; shouldn't get here, but handle anyway
x.mant = x.mant[:0]
return
}
for n>>s == 0 {
r++
n *= 10
}
x.exp += 1 - r
// read a digit, write a digit
w := 0 // write index
mask := Word(1)<<s - 1
for r < len(x.mant) {
ch := Word(x.mant[r])
r++
d := n >> s
n &= mask // n -= d << s
x.mant[w] = byte(d + '0')
w++
n = n*10 + ch - '0'
}
// write extra digits that still fit
for n > 0 && w < len(x.mant) {
d := n >> s
n &= mask
x.mant[w] = byte(d + '0')
w++
n = n * 10
}
x.mant = x.mant[:w] // the number may be shorter (e.g. 1024 >> 10)
// append additional digits that didn't fit
for n > 0 {
d := n >> s
n &= mask
x.mant = append(x.mant, byte(d+'0'))
n = n * 10
}
trim(x)
}
func (x *decimal) String() string {
if len(x.mant) == 0 {
return "0"
}
var buf []byte
switch {
case x.exp <= 0:
// 0.00ddd
buf = make([]byte, 0, 2+(-x.exp)+len(x.mant))
buf = append(buf, "0."...)
buf = appendZeros(buf, -x.exp)
buf = append(buf, x.mant...)
case /* 0 < */ x.exp < len(x.mant):
// dd.ddd
buf = make([]byte, 0, 1+len(x.mant))
buf = append(buf, x.mant[:x.exp]...)
buf = append(buf, '.')
buf = append(buf, x.mant[x.exp:]...)
default: // len(x.mant) <= x.exp
// ddd00
buf = make([]byte, 0, x.exp)
buf = append(buf, x.mant...)
buf = appendZeros(buf, x.exp-len(x.mant))
}
return string(buf)
}
// appendZeros appends n 0 digits to buf and returns buf.
func appendZeros(buf []byte, n int) []byte {
for ; n > 0; n-- {
buf = append(buf, '0')
}
return buf
}
// shouldRoundUp reports if x should be rounded up
// if shortened to n digits. n must be a valid index
// for x.mant.
func shouldRoundUp(x *decimal, n int) bool {
if x.mant[n] == '5' && n+1 == len(x.mant) {
// exactly halfway - round to even
return n > 0 && (x.mant[n-1]-'0')&1 != 0
}
// not halfway - digit tells all (x.mant has no trailing zeros)
return x.mant[n] >= '5'
}
// round sets x to (at most) n mantissa digits by rounding it
// to the nearest even value with n (or fever) mantissa digits.
// If n < 0, x remains unchanged.
func (x *decimal) round(n int) {
if n < 0 || n >= len(x.mant) {
return // nothing to do
}
if shouldRoundUp(x, n) {
x.roundUp(n)
} else {
x.roundDown(n)
}
}
func (x *decimal) roundUp(n int) {
if n < 0 || n >= len(x.mant) {
return // nothing to do
}
// 0 <= n < len(x.mant)
// find first digit < '9'
for n > 0 && x.mant[n-1] >= '9' {
n--
}
if n == 0 {
// all digits are '9's => round up to '1' and update exponent
x.mant[0] = '1' // ok since len(x.mant) > n
x.mant = x.mant[:1]
x.exp++
return
}
// n > 0 && x.mant[n-1] < '9'
x.mant[n-1]++
x.mant = x.mant[:n]
// x already trimmed
}
func (x *decimal) roundDown(n int) {
if n < 0 || n >= len(x.mant) {
return // nothing to do
}
x.mant = x.mant[:n]
trim(x)
}
// trim cuts off any trailing zeros from x's mantissa;
// they are meaningless for the value of x.
func trim(x *decimal) {
i := len(x.mant)
for i > 0 && x.mant[i-1] == '0' {
i--
}
x.mant = x.mant[:i]
if i == 0 {
x.exp = 0
}
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements multi-precision floating-point numbers.
// Like in the GNU MPFR library (https://www.mpfr.org/), operands
// can be of mixed precision. Unlike MPFR, the rounding mode is
// not specified with each operation, but with each operand. The
// rounding mode of the result operand determines the rounding
// mode of an operation. This is a from-scratch implementation.
package big
import (
"fmt"
"math"
"math/bits"
)
const debugFloat = false // enable for debugging
// A nonzero finite Float represents a multi-precision floating point number
//
// sign × mantissa × 2**exponent
//
// with 0.5 <= mantissa < 1.0, and MinExp <= exponent <= MaxExp.
// A Float may also be zero (+0, -0) or infinite (+Inf, -Inf).
// All Floats are ordered, and the ordering of two Floats x and y
// is defined by x.Cmp(y).
//
// Each Float value also has a precision, rounding mode, and accuracy.
// The precision is the maximum number of mantissa bits available to
// represent the value. The rounding mode specifies how a result should
// be rounded to fit into the mantissa bits, and accuracy describes the
// rounding error with respect to the exact result.
//
// Unless specified otherwise, all operations (including setters) that
// specify a *Float variable for the result (usually via the receiver
// with the exception of [Float.MantExp]), round the numeric result according
// to the precision and rounding mode of the result variable.
//
// If the provided result precision is 0 (see below), it is set to the
// precision of the argument with the largest precision value before any
// rounding takes place, and the rounding mode remains unchanged. Thus,
// uninitialized Floats provided as result arguments will have their
// precision set to a reasonable value determined by the operands, and
// their mode is the zero value for RoundingMode (ToNearestEven).
//
// By setting the desired precision to 24 or 53 and using matching rounding
// mode (typically [ToNearestEven]), Float operations produce the same results
// as the corresponding float32 or float64 IEEE 754 arithmetic for operands
// that correspond to normal (i.e., not denormal) float32 or float64 numbers.
// Exponent underflow and overflow lead to a 0 or an Infinity for different
// values than IEEE 754 because Float exponents have a much larger range.
//
// The zero (uninitialized) value for a Float is ready to use and represents
// the number +0.0 exactly, with precision 0 and rounding mode [ToNearestEven].
//
// Operations always take pointer arguments (*Float) rather
// than Float values, and each unique Float value requires
// its own unique *Float pointer. To "copy" a Float value,
// an existing (or newly allocated) Float must be set to
// a new value using the [Float.Set] method; shallow copies
// of Floats are not supported and may lead to errors.
type Float struct {
prec uint32
mode RoundingMode
acc Accuracy
form form
neg bool
mant nat
exp int32
}
// An ErrNaN panic is raised by a [Float] operation that would lead to
// a NaN under IEEE 754 rules. An ErrNaN implements the error interface.
type ErrNaN struct {
msg string
}
func (err ErrNaN) Error() string {
return err.msg
}
// NewFloat allocates and returns a new [Float] set to x,
// with precision 53 and rounding mode [ToNearestEven].
// NewFloat panics with [ErrNaN] if x is a NaN.
func NewFloat(x float64) *Float {
if math.IsNaN(x) {
panic(ErrNaN{"NewFloat(NaN)"})
}
return new(Float).SetFloat64(x)
}
// Exponent and precision limits.
const (
MaxExp = math.MaxInt32 // largest supported exponent
MinExp = math.MinInt32 // smallest supported exponent
MaxPrec = math.MaxUint32 // largest (theoretically) supported precision; likely memory-limited
)
// Internal representation: The mantissa bits x.mant of a nonzero finite
// Float x are stored in a nat slice long enough to hold up to x.prec bits;
// the slice may (but doesn't have to) be shorter if the mantissa contains
// trailing 0 bits. x.mant is normalized if the msb of x.mant == 1 (i.e.,
// the msb is shifted all the way "to the left"). Thus, if the mantissa has
// trailing 0 bits or x.prec is not a multiple of the Word size _W,
// x.mant[0] has trailing zero bits. The msb of the mantissa corresponds
// to the value 0.5; the exponent x.exp shifts the binary point as needed.
//
// A zero or non-finite Float x ignores x.mant and x.exp.
//
// x form neg mant exp
// ----------------------------------------------------------
// ±0 zero sign - -
// 0 < |x| < +Inf finite sign mantissa exponent
// ±Inf inf sign - -
// A form value describes the internal representation.
type form byte
// The form value order is relevant - do not change!
const (
zero form = iota
finite
inf
)
// RoundingMode determines how a [Float] value is rounded to the
// desired precision. Rounding may change the [Float] value; the
// rounding error is described by the [Float]'s [Accuracy].
type RoundingMode byte
// These constants define supported rounding modes.
const (
ToNearestEven RoundingMode = iota // == IEEE 754-2008 roundTiesToEven
ToNearestAway // == IEEE 754-2008 roundTiesToAway
ToZero // == IEEE 754-2008 roundTowardZero
AwayFromZero // no IEEE 754-2008 equivalent
ToNegativeInf // == IEEE 754-2008 roundTowardNegative
ToPositiveInf // == IEEE 754-2008 roundTowardPositive
)
//go:generate stringer -type=RoundingMode
// Accuracy describes the rounding error produced by the most recent
// operation that generated a [Float] value, relative to the exact value.
type Accuracy int8
// Constants describing the [Accuracy] of a [Float].
const (
Below Accuracy = -1
Exact Accuracy = 0
Above Accuracy = +1
)
//go:generate stringer -type=Accuracy
// SetPrec sets z's precision to prec and returns the (possibly) rounded
// value of z. Rounding occurs according to z's rounding mode if the mantissa
// cannot be represented in prec bits without loss of precision.
// SetPrec(0) maps all finite values to ±0; infinite values remain unchanged.
// If prec > [MaxPrec], it is set to [MaxPrec].
func (z *Float) SetPrec(prec uint) *Float {
z.acc = Exact // optimistically assume no rounding is needed
// special case
if prec == 0 {
z.prec = 0
if z.form == finite {
// truncate z to 0
z.acc = makeAcc(z.neg)
z.form = zero
}
return z
}
// general case
if prec > MaxPrec {
prec = MaxPrec
}
old := z.prec
z.prec = uint32(prec)
if z.prec < old {
z.round(0)
}
return z
}
func makeAcc(above bool) Accuracy {
if above {
return Above
}
return Below
}
// SetMode sets z's rounding mode to mode and returns an exact z.
// z remains unchanged otherwise.
// z.SetMode(z.Mode()) is a cheap way to set z's accuracy to [Exact].
func (z *Float) SetMode(mode RoundingMode) *Float {
z.mode = mode
z.acc = Exact
return z
}
// Prec returns the mantissa precision of x in bits.
// The result may be 0 for |x| == 0 and |x| == Inf.
func (x *Float) Prec() uint {
return uint(x.prec)
}
// MinPrec returns the minimum precision required to represent x exactly
// (i.e., the smallest prec before x.SetPrec(prec) would start rounding x).
// The result is 0 for |x| == 0 and |x| == Inf.
func (x *Float) MinPrec() uint {
if x.form != finite {
return 0
}
return uint(len(x.mant))*_W - x.mant.trailingZeroBits()
}
// Mode returns the rounding mode of x.
func (x *Float) Mode() RoundingMode {
return x.mode
}
// Acc returns the accuracy of x produced by the most recent
// operation, unless explicitly documented otherwise by that
// operation.
func (x *Float) Acc() Accuracy {
return x.acc
}
// Sign returns:
// - -1 if x < 0;
// - 0 if x is ±0;
// - +1 if x > 0.
func (x *Float) Sign() int {
if debugFloat {
x.validate()
}
if x.form == zero {
return 0
}
if x.neg {
return -1
}
return 1
}
// MantExp breaks x into its mantissa and exponent components
// and returns the exponent. If a non-nil mant argument is
// provided its value is set to the mantissa of x, with the
// same precision and rounding mode as x. The components
// satisfy x == mant × 2**exp, with 0.5 <= |mant| < 1.0.
// Calling MantExp with a nil argument is an efficient way to
// get the exponent of the receiver.
//
// Special cases are:
//
// ( ±0).MantExp(mant) = 0, with mant set to ±0
// (±Inf).MantExp(mant) = 0, with mant set to ±Inf
//
// x and mant may be the same in which case x is set to its
// mantissa value.
func (x *Float) MantExp(mant *Float) (exp int) {
if debugFloat {
x.validate()
}
if x.form == finite {
exp = int(x.exp)
}
if mant != nil {
mant.Copy(x)
if mant.form == finite {
mant.exp = 0
}
}
return
}
func (z *Float) setExpAndRound(exp int64, sbit uint) {
if exp < MinExp {
// underflow
z.acc = makeAcc(z.neg)
z.form = zero
return
}
if exp > MaxExp {
// overflow
z.acc = makeAcc(!z.neg)
z.form = inf
return
}
z.form = finite
z.exp = int32(exp)
z.round(sbit)
}
// SetMantExp sets z to mant × 2**exp and returns z.
// The result z has the same precision and rounding mode
// as mant. SetMantExp is an inverse of [Float.MantExp] but does
// not require 0.5 <= |mant| < 1.0. Specifically, for a
// given x of type *[Float], SetMantExp relates to [Float.MantExp]
// as follows:
//
// mant := new(Float)
// new(Float).SetMantExp(mant, x.MantExp(mant)).Cmp(x) == 0
//
// Special cases are:
//
// z.SetMantExp( ±0, exp) = ±0
// z.SetMantExp(±Inf, exp) = ±Inf
//
// z and mant may be the same in which case z's exponent
// is set to exp.
func (z *Float) SetMantExp(mant *Float, exp int) *Float {
if debugFloat {
z.validate()
mant.validate()
}
z.Copy(mant)
if z.form == finite {
// 0 < |mant| < +Inf
z.setExpAndRound(int64(z.exp)+int64(exp), 0)
}
return z
}
// Signbit reports whether x is negative or negative zero.
func (x *Float) Signbit() bool {
return x.neg
}
// IsInf reports whether x is +Inf or -Inf.
func (x *Float) IsInf() bool {
return x.form == inf
}
// IsInt reports whether x is an integer.
// ±Inf values are not integers.
func (x *Float) IsInt() bool {
if debugFloat {
x.validate()
}
// special cases
if x.form != finite {
return x.form == zero
}
// x.form == finite
if x.exp <= 0 {
return false
}
// x.exp > 0
return x.prec <= uint32(x.exp) || x.MinPrec() <= uint(x.exp) // not enough bits for fractional mantissa
}
// debugging support
func (x *Float) validate() {
if !debugFloat {
// avoid performance bugs
panic("validate called but debugFloat is not set")
}
if msg := x.validate0(); msg != "" {
panic(msg)
}
}
func (x *Float) validate0() string {
if x.form != finite {
return ""
}
m := len(x.mant)
if m == 0 {
return "nonzero finite number with empty mantissa"
}
const msb = 1 << (_W - 1)
if x.mant[m-1]&msb == 0 {
return fmt.Sprintf("msb not set in last word %#x of %s", x.mant[m-1], x.Text('p', 0))
}
if x.prec == 0 {
return "zero precision finite number"
}
return ""
}
// round rounds z according to z.mode to z.prec bits and sets z.acc accordingly.
// sbit must be 0 or 1 and summarizes any "sticky bit" information one might
// have before calling round. z's mantissa must be normalized (with the msb set)
// or empty.
//
// CAUTION: The rounding modes [ToNegativeInf], [ToPositiveInf] are affected by the
// sign of z. For correct rounding, the sign of z must be set correctly before
// calling round.
func (z *Float) round(sbit uint) {
if debugFloat {
z.validate()
}
z.acc = Exact
if z.form != finite {
// ±0 or ±Inf => nothing left to do
return
}
// z.form == finite && len(z.mant) > 0
// m > 0 implies z.prec > 0 (checked by validate)
m := uint32(len(z.mant)) // present mantissa length in words
bits := m * _W // present mantissa bits; bits > 0
if bits <= z.prec {
// mantissa fits => nothing to do
return
}
// bits > z.prec
// Rounding is based on two bits: the rounding bit (rbit) and the
// sticky bit (sbit). The rbit is the bit immediately before the
// z.prec leading mantissa bits (the "0.5"). The sbit is set if any
// of the bits before the rbit are set (the "0.25", "0.125", etc.):
//
// rbit sbit => "fractional part"
//
// 0 0 == 0
// 0 1 > 0 , < 0.5
// 1 0 == 0.5
// 1 1 > 0.5, < 1.0
// bits > z.prec: mantissa too large => round
r := uint(bits - z.prec - 1) // rounding bit position; r >= 0
rbit := z.mant.bit(r) & 1 // rounding bit; be safe and ensure it's a single bit
// The sticky bit is only needed for rounding ToNearestEven
// or when the rounding bit is zero. Avoid computation otherwise.
if sbit == 0 && (rbit == 0 || z.mode == ToNearestEven) {
sbit = z.mant.sticky(r)
}
sbit &= 1 // be safe and ensure it's a single bit
// cut off extra words
n := (z.prec + (_W - 1)) / _W // mantissa length in words for desired precision
if m > n {
copy(z.mant, z.mant[m-n:]) // move n last words to front
z.mant = z.mant[:n]
}
// determine number of trailing zero bits (ntz) and compute lsb mask of mantissa's least-significant word
ntz := n*_W - z.prec // 0 <= ntz < _W
lsb := Word(1) << ntz
// round if result is inexact
if rbit|sbit != 0 {
// Make rounding decision: The result mantissa is truncated ("rounded down")
// by default. Decide if we need to increment, or "round up", the (unsigned)
// mantissa.
inc := false
switch z.mode {
case ToNegativeInf:
inc = z.neg
case ToZero:
// nothing to do
case ToNearestEven:
inc = rbit != 0 && (sbit != 0 || z.mant[0]&lsb != 0)
case ToNearestAway:
inc = rbit != 0
case AwayFromZero:
inc = true
case ToPositiveInf:
inc = !z.neg
default:
panic("unreachable")
}
// A positive result (!z.neg) is Above the exact result if we increment,
// and it's Below if we truncate (Exact results require no rounding).
// For a negative result (z.neg) it is exactly the opposite.
z.acc = makeAcc(inc != z.neg)
if inc {
// add 1 to mantissa
if addVW(z.mant, z.mant, lsb) != 0 {
// mantissa overflow => adjust exponent
if z.exp >= MaxExp {
// exponent overflow
z.form = inf
return
}
z.exp++
// adjust mantissa: divide by 2 to compensate for exponent adjustment
rshVU(z.mant, z.mant, 1)
// set msb == carry == 1 from the mantissa overflow above
const msb = 1 << (_W - 1)
z.mant[n-1] |= msb
}
}
}
// zero out trailing bits in least-significant word
z.mant[0] &^= lsb - 1
if debugFloat {
z.validate()
}
}
func (z *Float) setBits64(neg bool, x uint64) *Float {
if z.prec == 0 {
z.prec = 64
}
z.acc = Exact
z.neg = neg
if x == 0 {
z.form = zero
return z
}
// x != 0
z.form = finite
s := bits.LeadingZeros64(x)
z.mant = z.mant.setUint64(x << uint(s))
z.exp = int32(64 - s) // always fits
if z.prec < 64 {
z.round(0)
}
return z
}
// SetUint64 sets z to the (possibly rounded) value of x and returns z.
// If z's precision is 0, it is changed to 64 (and rounding will have
// no effect).
func (z *Float) SetUint64(x uint64) *Float {
return z.setBits64(false, x)
}
// SetInt64 sets z to the (possibly rounded) value of x and returns z.
// If z's precision is 0, it is changed to 64 (and rounding will have
// no effect).
func (z *Float) SetInt64(x int64) *Float {
u := x
if u < 0 {
u = -u
}
// We cannot simply call z.SetUint64(uint64(u)) and change
// the sign afterwards because the sign affects rounding.
return z.setBits64(x < 0, uint64(u))
}
// SetFloat64 sets z to the (possibly rounded) value of x and returns z.
// If z's precision is 0, it is changed to 53 (and rounding will have
// no effect). SetFloat64 panics with [ErrNaN] if x is a NaN.
func (z *Float) SetFloat64(x float64) *Float {
if z.prec == 0 {
z.prec = 53
}
if math.IsNaN(x) {
panic(ErrNaN{"Float.SetFloat64(NaN)"})
}
z.acc = Exact
z.neg = math.Signbit(x) // handle -0, -Inf correctly
if x == 0 {
z.form = zero
return z
}
if math.IsInf(x, 0) {
z.form = inf
return z
}
// normalized x != 0
z.form = finite
fmant, exp := math.Frexp(x) // get normalized mantissa
z.mant = z.mant.setUint64(1<<63 | math.Float64bits(fmant)<<11)
z.exp = int32(exp) // always fits
if z.prec < 53 {
z.round(0)
}
return z
}
// fnorm normalizes mantissa m by shifting it to the left
// such that the msb of the most-significant word (msw) is 1.
// It returns the shift amount. It assumes that len(m) != 0.
func fnorm(m nat) int64 {
if debugFloat && (len(m) == 0 || m[len(m)-1] == 0) {
panic("msw of mantissa is 0")
}
s := nlz(m[len(m)-1])
if s > 0 {
c := lshVU(m, m, s)
if debugFloat && c != 0 {
panic("nlz or lshVU incorrect")
}
}
return int64(s)
}
// SetInt sets z to the (possibly rounded) value of x and returns z.
// If z's precision is 0, it is changed to the larger of x.BitLen()
// or 64 (and rounding will have no effect).
func (z *Float) SetInt(x *Int) *Float {
// TODO(gri) can be more efficient if z.prec > 0
// but small compared to the size of x, or if there
// are many trailing 0's.
bits := uint32(x.BitLen())
if z.prec == 0 {
z.prec = max(bits, 64)
}
z.acc = Exact
z.neg = x.neg
if len(x.abs) == 0 {
z.form = zero
return z
}
// x != 0
z.mant = z.mant.set(x.abs)
fnorm(z.mant)
z.setExpAndRound(int64(bits), 0)
return z
}
// SetRat sets z to the (possibly rounded) value of x and returns z.
// If z's precision is 0, it is changed to the largest of a.BitLen(),
// b.BitLen(), or 64; with x = a/b.
func (z *Float) SetRat(x *Rat) *Float {
if x.IsInt() {
return z.SetInt(x.Num())
}
var a, b Float
a.SetInt(x.Num())
b.SetInt(x.Denom())
if z.prec == 0 {
z.prec = max(a.prec, b.prec)
}
return z.Quo(&a, &b)
}
// SetInf sets z to the infinite Float -Inf if signbit is
// set, or +Inf if signbit is not set, and returns z. The
// precision of z is unchanged and the result is always
// [Exact].
func (z *Float) SetInf(signbit bool) *Float {
z.acc = Exact
z.form = inf
z.neg = signbit
return z
}
// Set sets z to the (possibly rounded) value of x and returns z.
// If z's precision is 0, it is changed to the precision of x
// before setting z (and rounding will have no effect).
// Rounding is performed according to z's precision and rounding
// mode; and z's accuracy reports the result error relative to the
// exact (not rounded) result.
func (z *Float) Set(x *Float) *Float {
if debugFloat {
x.validate()
}
z.acc = Exact
if z != x {
z.form = x.form
z.neg = x.neg
if x.form == finite {
z.exp = x.exp
z.mant = z.mant.set(x.mant)
}
if z.prec == 0 {
z.prec = x.prec
} else if z.prec < x.prec {
z.round(0)
}
}
return z
}
// Copy sets z to x, with the same precision, rounding mode, and accuracy as x.
// Copy returns z. If x and z are identical, Copy is a no-op.
func (z *Float) Copy(x *Float) *Float {
if debugFloat {
x.validate()
}
if z != x {
z.prec = x.prec
z.mode = x.mode
z.acc = x.acc
z.form = x.form
z.neg = x.neg
if z.form == finite {
z.mant = z.mant.set(x.mant)
z.exp = x.exp
}
}
return z
}
// msb32 returns the 32 most significant bits of x.
func msb32(x nat) uint32 {
i := len(x) - 1
if i < 0 {
return 0
}
if debugFloat && x[i]&(1<<(_W-1)) == 0 {
panic("x not normalized")
}
switch _W {
case 32:
return uint32(x[i])
case 64:
return uint32(x[i] >> 32)
}
panic("unreachable")
}
// msb64 returns the 64 most significant bits of x.
func msb64(x nat) uint64 {
i := len(x) - 1
if i < 0 {
return 0
}
if debugFloat && x[i]&(1<<(_W-1)) == 0 {
panic("x not normalized")
}
switch _W {
case 32:
v := uint64(x[i]) << 32
if i > 0 {
v |= uint64(x[i-1])
}
return v
case 64:
return uint64(x[i])
}
panic("unreachable")
}
// Uint64 returns the unsigned integer resulting from truncating x
// towards zero. If 0 <= x <= [math.MaxUint64], the result is [Exact]
// if x is an integer and [Below] otherwise.
// The result is (0, [Above]) for x < 0, and ([math.MaxUint64], [Below])
// for x > [math.MaxUint64].
func (x *Float) Uint64() (uint64, Accuracy) {
if debugFloat {
x.validate()
}
switch x.form {
case finite:
if x.neg {
return 0, Above
}
// 0 < x < +Inf
if x.exp <= 0 {
// 0 < x < 1
return 0, Below
}
// 1 <= x < Inf
if x.exp <= 64 {
// u = trunc(x) fits into a uint64
u := msb64(x.mant) >> (64 - uint32(x.exp))
if x.MinPrec() <= 64 {
return u, Exact
}
return u, Below // x truncated
}
// x too large
return math.MaxUint64, Below
case zero:
return 0, Exact
case inf:
if x.neg {
return 0, Above
}
return math.MaxUint64, Below
}
panic("unreachable")
}
// Int64 returns the integer resulting from truncating x towards zero.
// If [math.MinInt64] <= x <= [math.MaxInt64], the result is [Exact] if x is
// an integer, and [Above] (x < 0) or [Below] (x > 0) otherwise.
// The result is ([math.MinInt64], [Above]) for x < [math.MinInt64],
// and ([math.MaxInt64], [Below]) for x > [math.MaxInt64].
func (x *Float) Int64() (int64, Accuracy) {
if debugFloat {
x.validate()
}
switch x.form {
case finite:
// 0 < |x| < +Inf
acc := makeAcc(x.neg)
if x.exp <= 0 {
// 0 < |x| < 1
return 0, acc
}
// x.exp > 0
// 1 <= |x| < +Inf
if x.exp <= 63 {
// i = trunc(x) fits into an int64 (excluding math.MinInt64)
i := int64(msb64(x.mant) >> (64 - uint32(x.exp)))
if x.neg {
i = -i
}
if x.MinPrec() <= uint(x.exp) {
return i, Exact
}
return i, acc // x truncated
}
if x.neg {
// check for special case x == math.MinInt64 (i.e., x == -(0.5 << 64))
if x.exp == 64 && x.MinPrec() == 1 {
acc = Exact
}
return math.MinInt64, acc
}
// x too large
return math.MaxInt64, Below
case zero:
return 0, Exact
case inf:
if x.neg {
return math.MinInt64, Above
}
return math.MaxInt64, Below
}
panic("unreachable")
}
// Float32 returns the float32 value nearest to x. If x is too small to be
// represented by a float32 (|x| < [math.SmallestNonzeroFloat32]), the result
// is (0, [Below]) or (-0, [Above]), respectively, depending on the sign of x.
// If x is too large to be represented by a float32 (|x| > [math.MaxFloat32]),
// the result is (+Inf, [Above]) or (-Inf, [Below]), depending on the sign of x.
func (x *Float) Float32() (float32, Accuracy) {
if debugFloat {
x.validate()
}
switch x.form {
case finite:
// 0 < |x| < +Inf
const (
fbits = 32 // float size
mbits = 23 // mantissa size (excluding implicit msb)
ebits = fbits - mbits - 1 // 8 exponent size
bias = 1<<(ebits-1) - 1 // 127 exponent bias
dmin = 1 - bias - mbits // -149 smallest unbiased exponent (denormal)
emin = 1 - bias // -126 smallest unbiased exponent (normal)
emax = bias // 127 largest unbiased exponent (normal)
)
// Float mantissa m is 0.5 <= m < 1.0; compute exponent e for float32 mantissa.
e := x.exp - 1 // exponent for normal mantissa m with 1.0 <= m < 2.0
// Compute precision p for float32 mantissa.
// If the exponent is too small, we have a denormal number before
// rounding and fewer than p mantissa bits of precision available
// (the exponent remains fixed but the mantissa gets shifted right).
p := mbits + 1 // precision of normal float
if e < emin {
// recompute precision
p = mbits + 1 - emin + int(e)
// If p == 0, the mantissa of x is shifted so much to the right
// that its msb falls immediately to the right of the float32
// mantissa space. In other words, if the smallest denormal is
// considered "1.0", for p == 0, the mantissa value m is >= 0.5.
// If m > 0.5, it is rounded up to 1.0; i.e., the smallest denormal.
// If m == 0.5, it is rounded down to even, i.e., 0.0.
// If p < 0, the mantissa value m is <= "0.25" which is never rounded up.
if p < 0 /* m <= 0.25 */ || p == 0 && x.mant.sticky(uint(len(x.mant))*_W-1) == 0 /* m == 0.5 */ {
// underflow to ±0
if x.neg {
var z float32
return -z, Above
}
return 0.0, Below
}
// otherwise, round up
// We handle p == 0 explicitly because it's easy and because
// Float.round doesn't support rounding to 0 bits of precision.
if p == 0 {
if x.neg {
return -math.SmallestNonzeroFloat32, Below
}
return math.SmallestNonzeroFloat32, Above
}
}
// p > 0
// round
var r Float
r.prec = uint32(p)
r.Set(x)
e = r.exp - 1
// Rounding may have caused r to overflow to ±Inf
// (rounding never causes underflows to 0).
// If the exponent is too large, also overflow to ±Inf.
if r.form == inf || e > emax {
// overflow
if x.neg {
return float32(math.Inf(-1)), Below
}
return float32(math.Inf(+1)), Above
}
// e <= emax
// Determine sign, biased exponent, and mantissa.
var sign, bexp, mant uint32
if x.neg {
sign = 1 << (fbits - 1)
}
// Rounding may have caused a denormal number to
// become normal. Check again.
if e < emin {
// denormal number: recompute precision
// Since rounding may have at best increased precision
// and we have eliminated p <= 0 early, we know p > 0.
// bexp == 0 for denormals
p = mbits + 1 - emin + int(e)
mant = msb32(r.mant) >> uint(fbits-p)
} else {
// normal number: emin <= e <= emax
bexp = uint32(e+bias) << mbits
mant = msb32(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
}
return math.Float32frombits(sign | bexp | mant), r.acc
case zero:
if x.neg {
var z float32
return -z, Exact
}
return 0.0, Exact
case inf:
if x.neg {
return float32(math.Inf(-1)), Exact
}
return float32(math.Inf(+1)), Exact
}
panic("unreachable")
}
// Float64 returns the float64 value nearest to x. If x is too small to be
// represented by a float64 (|x| < [math.SmallestNonzeroFloat64]), the result
// is (0, [Below]) or (-0, [Above]), respectively, depending on the sign of x.
// If x is too large to be represented by a float64 (|x| > [math.MaxFloat64]),
// the result is (+Inf, [Above]) or (-Inf, [Below]), depending on the sign of x.
func (x *Float) Float64() (float64, Accuracy) {
if debugFloat {
x.validate()
}
switch x.form {
case finite:
// 0 < |x| < +Inf
const (
fbits = 64 // float size
mbits = 52 // mantissa size (excluding implicit msb)
ebits = fbits - mbits - 1 // 11 exponent size
bias = 1<<(ebits-1) - 1 // 1023 exponent bias
dmin = 1 - bias - mbits // -1074 smallest unbiased exponent (denormal)
emin = 1 - bias // -1022 smallest unbiased exponent (normal)
emax = bias // 1023 largest unbiased exponent (normal)
)
// Float mantissa m is 0.5 <= m < 1.0; compute exponent e for float64 mantissa.
e := x.exp - 1 // exponent for normal mantissa m with 1.0 <= m < 2.0
// Compute precision p for float64 mantissa.
// If the exponent is too small, we have a denormal number before
// rounding and fewer than p mantissa bits of precision available
// (the exponent remains fixed but the mantissa gets shifted right).
p := mbits + 1 // precision of normal float
if e < emin {
// recompute precision
p = mbits + 1 - emin + int(e)
// If p == 0, the mantissa of x is shifted so much to the right
// that its msb falls immediately to the right of the float64
// mantissa space. In other words, if the smallest denormal is
// considered "1.0", for p == 0, the mantissa value m is >= 0.5.
// If m > 0.5, it is rounded up to 1.0; i.e., the smallest denormal.
// If m == 0.5, it is rounded down to even, i.e., 0.0.
// If p < 0, the mantissa value m is <= "0.25" which is never rounded up.
if p < 0 /* m <= 0.25 */ || p == 0 && x.mant.sticky(uint(len(x.mant))*_W-1) == 0 /* m == 0.5 */ {
// underflow to ±0
if x.neg {
var z float64
return -z, Above
}
return 0.0, Below
}
// otherwise, round up
// We handle p == 0 explicitly because it's easy and because
// Float.round doesn't support rounding to 0 bits of precision.
if p == 0 {
if x.neg {
return -math.SmallestNonzeroFloat64, Below
}
return math.SmallestNonzeroFloat64, Above
}
}
// p > 0
// round
var r Float
r.prec = uint32(p)
r.Set(x)
e = r.exp - 1
// Rounding may have caused r to overflow to ±Inf
// (rounding never causes underflows to 0).
// If the exponent is too large, also overflow to ±Inf.
if r.form == inf || e > emax {
// overflow
if x.neg {
return math.Inf(-1), Below
}
return math.Inf(+1), Above
}
// e <= emax
// Determine sign, biased exponent, and mantissa.
var sign, bexp, mant uint64
if x.neg {
sign = 1 << (fbits - 1)
}
// Rounding may have caused a denormal number to
// become normal. Check again.
if e < emin {
// denormal number: recompute precision
// Since rounding may have at best increased precision
// and we have eliminated p <= 0 early, we know p > 0.
// bexp == 0 for denormals
p = mbits + 1 - emin + int(e)
mant = msb64(r.mant) >> uint(fbits-p)
} else {
// normal number: emin <= e <= emax
bexp = uint64(e+bias) << mbits
mant = msb64(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
}
return math.Float64frombits(sign | bexp | mant), r.acc
case zero:
if x.neg {
var z float64
return -z, Exact
}
return 0.0, Exact
case inf:
if x.neg {
return math.Inf(-1), Exact
}
return math.Inf(+1), Exact
}
panic("unreachable")
}
// Int returns the result of truncating x towards zero;
// or nil if x is an infinity.
// The result is [Exact] if x.IsInt(); otherwise it is [Below]
// for x > 0, and [Above] for x < 0.
// If a non-nil *[Int] argument z is provided, [Int] stores
// the result in z instead of allocating a new [Int].
func (x *Float) Int(z *Int) (*Int, Accuracy) {
if debugFloat {
x.validate()
}
if z == nil && x.form <= finite {
z = new(Int)
}
switch x.form {
case finite:
// 0 < |x| < +Inf
acc := makeAcc(x.neg)
if x.exp <= 0 {
// 0 < |x| < 1
return z.SetInt64(0), acc
}
// x.exp > 0
// 1 <= |x| < +Inf
// determine minimum required precision for x
allBits := uint(len(x.mant)) * _W
exp := uint(x.exp)
if x.MinPrec() <= exp {
acc = Exact
}
// shift mantissa as needed
if z == nil {
z = new(Int)
}
z.neg = x.neg
switch {
case exp > allBits:
z.abs = z.abs.lsh(x.mant, exp-allBits)
default:
z.abs = z.abs.set(x.mant)
case exp < allBits:
z.abs = z.abs.rsh(x.mant, allBits-exp)
}
return z, acc
case zero:
return z.SetInt64(0), Exact
case inf:
return nil, makeAcc(x.neg)
}
panic("unreachable")
}
// Rat returns the rational number corresponding to x;
// or nil if x is an infinity.
// The result is [Exact] if x is not an Inf.
// If a non-nil *[Rat] argument z is provided, [Rat] stores
// the result in z instead of allocating a new [Rat].
func (x *Float) Rat(z *Rat) (*Rat, Accuracy) {
if debugFloat {
x.validate()
}
if z == nil && x.form <= finite {
z = new(Rat)
}
switch x.form {
case finite:
// 0 < |x| < +Inf
allBits := int32(len(x.mant)) * _W
// build up numerator and denominator
z.a.neg = x.neg
switch {
case x.exp > allBits:
z.a.abs = z.a.abs.lsh(x.mant, uint(x.exp-allBits))
z.b.abs = z.b.abs[:0] // == 1 (see Rat)
// z already in normal form
default:
z.a.abs = z.a.abs.set(x.mant)
z.b.abs = z.b.abs[:0] // == 1 (see Rat)
// z already in normal form
case x.exp < allBits:
z.a.abs = z.a.abs.set(x.mant)
t := z.b.abs.setUint64(1)
z.b.abs = t.lsh(t, uint(allBits-x.exp))
z.norm()
}
return z, Exact
case zero:
return z.SetInt64(0), Exact
case inf:
return nil, makeAcc(x.neg)
}
panic("unreachable")
}
// Abs sets z to the (possibly rounded) value |x| (the absolute value of x)
// and returns z.
func (z *Float) Abs(x *Float) *Float {
z.Set(x)
z.neg = false
return z
}
// Neg sets z to the (possibly rounded) value of x with its sign negated,
// and returns z.
func (z *Float) Neg(x *Float) *Float {
z.Set(x)
z.neg = !z.neg
return z
}
func validateBinaryOperands(x, y *Float) {
if !debugFloat {
// avoid performance bugs
panic("validateBinaryOperands called but debugFloat is not set")
}
if len(x.mant) == 0 {
panic("empty mantissa for x")
}
if len(y.mant) == 0 {
panic("empty mantissa for y")
}
}
// z = x + y, ignoring signs of x and y for the addition
// but using the sign of z for rounding the result.
// x and y must have a non-empty mantissa and valid exponent.
func (z *Float) uadd(x, y *Float) {
// Note: This implementation requires 2 shifts most of the
// time. It is also inefficient if exponents or precisions
// differ by wide margins. The following article describes
// an efficient (but much more complicated) implementation
// compatible with the internal representation used here:
//
// Vincent Lefèvre: "The Generic Multiple-Precision Floating-
// Point Addition With Exact Rounding (as in the MPFR Library)"
// http://www.vinc17.net/research/papers/rnc6.pdf
if debugFloat {
validateBinaryOperands(x, y)
}
// compute exponents ex, ey for mantissa with "binary point"
// on the right (mantissa.0) - use int64 to avoid overflow
ex := int64(x.exp) - int64(len(x.mant))*_W
ey := int64(y.exp) - int64(len(y.mant))*_W
al := alias(z.mant, x.mant) || alias(z.mant, y.mant)
// TODO(gri) having a combined add-and-shift primitive
// could make this code significantly faster
switch {
case ex < ey:
if al {
t := nat(nil).lsh(y.mant, uint(ey-ex))
z.mant = z.mant.add(x.mant, t)
} else {
z.mant = z.mant.lsh(y.mant, uint(ey-ex))
z.mant = z.mant.add(x.mant, z.mant)
}
default:
// ex == ey, no shift needed
z.mant = z.mant.add(x.mant, y.mant)
case ex > ey:
if al {
t := nat(nil).lsh(x.mant, uint(ex-ey))
z.mant = z.mant.add(t, y.mant)
} else {
z.mant = z.mant.lsh(x.mant, uint(ex-ey))
z.mant = z.mant.add(z.mant, y.mant)
}
ex = ey
}
// len(z.mant) > 0
z.setExpAndRound(ex+int64(len(z.mant))*_W-fnorm(z.mant), 0)
}
// z = x - y for |x| > |y|, ignoring signs of x and y for the subtraction
// but using the sign of z for rounding the result.
// x and y must have a non-empty mantissa and valid exponent.
func (z *Float) usub(x, y *Float) {
// This code is symmetric to uadd.
// We have not factored the common code out because
// eventually uadd (and usub) should be optimized
// by special-casing, and the code will diverge.
if debugFloat {
validateBinaryOperands(x, y)
}
ex := int64(x.exp) - int64(len(x.mant))*_W
ey := int64(y.exp) - int64(len(y.mant))*_W
al := alias(z.mant, x.mant) || alias(z.mant, y.mant)
switch {
case ex < ey:
if al {
t := nat(nil).lsh(y.mant, uint(ey-ex))
z.mant = t.sub(x.mant, t)
} else {
z.mant = z.mant.lsh(y.mant, uint(ey-ex))
z.mant = z.mant.sub(x.mant, z.mant)
}
default:
// ex == ey, no shift needed
z.mant = z.mant.sub(x.mant, y.mant)
case ex > ey:
if al {
t := nat(nil).lsh(x.mant, uint(ex-ey))
z.mant = t.sub(t, y.mant)
} else {
z.mant = z.mant.lsh(x.mant, uint(ex-ey))
z.mant = z.mant.sub(z.mant, y.mant)
}
ex = ey
}
// operands may have canceled each other out
if len(z.mant) == 0 {
z.acc = Exact
z.form = zero
z.neg = false
return
}
// len(z.mant) > 0
z.setExpAndRound(ex+int64(len(z.mant))*_W-fnorm(z.mant), 0)
}
// z = x * y, ignoring signs of x and y for the multiplication
// but using the sign of z for rounding the result.
// x and y must have a non-empty mantissa and valid exponent.
func (z *Float) umul(x, y *Float) {
if debugFloat {
validateBinaryOperands(x, y)
}
// Note: This is doing too much work if the precision
// of z is less than the sum of the precisions of x
// and y which is often the case (e.g., if all floats
// have the same precision).
// TODO(gri) Optimize this for the common case.
e := int64(x.exp) + int64(y.exp)
if x == y {
z.mant = z.mant.sqr(nil, x.mant)
} else {
z.mant = z.mant.mul(nil, x.mant, y.mant)
}
z.setExpAndRound(e-fnorm(z.mant), 0)
}
// z = x / y, ignoring signs of x and y for the division
// but using the sign of z for rounding the result.
// x and y must have a non-empty mantissa and valid exponent.
func (z *Float) uquo(x, y *Float) {
if debugFloat {
validateBinaryOperands(x, y)
}
// mantissa length in words for desired result precision + 1
// (at least one extra bit so we get the rounding bit after
// the division)
n := int(z.prec/_W) + 1
// compute adjusted x.mant such that we get enough result precision
xadj := x.mant
if d := n - len(x.mant) + len(y.mant); d > 0 {
// d extra words needed => add d "0 digits" to x
xadj = make(nat, len(x.mant)+d)
copy(xadj[d:], x.mant)
}
// TODO(gri): If we have too many digits (d < 0), we should be able
// to shorten x for faster division. But we must be extra careful
// with rounding in that case.
// Compute d before division since there may be aliasing of x.mant
// (via xadj) or y.mant with z.mant.
d := len(xadj) - len(y.mant)
// divide
stk := getStack()
defer stk.free()
var r nat
z.mant, r = z.mant.div(stk, nil, xadj, y.mant)
e := int64(x.exp) - int64(y.exp) - int64(d-len(z.mant))*_W
// The result is long enough to include (at least) the rounding bit.
// If there's a non-zero remainder, the corresponding fractional part
// (if it were computed), would have a non-zero sticky bit (if it were
// zero, it couldn't have a non-zero remainder).
var sbit uint
if len(r) > 0 {
sbit = 1
}
z.setExpAndRound(e-fnorm(z.mant), sbit)
}
// ucmp returns -1, 0, or +1, depending on whether
// |x| < |y|, |x| == |y|, or |x| > |y|.
// x and y must have a non-empty mantissa and valid exponent.
func (x *Float) ucmp(y *Float) int {
if debugFloat {
validateBinaryOperands(x, y)
}
switch {
case x.exp < y.exp:
return -1
case x.exp > y.exp:
return +1
}
// x.exp == y.exp
// compare mantissas
i := len(x.mant)
j := len(y.mant)
for i > 0 || j > 0 {
var xm, ym Word
if i > 0 {
i--
xm = x.mant[i]
}
if j > 0 {
j--
ym = y.mant[j]
}
switch {
case xm < ym:
return -1
case xm > ym:
return +1
}
}
return 0
}
// Handling of sign bit as defined by IEEE 754-2008, section 6.3:
//
// When neither the inputs nor result are NaN, the sign of a product or
// quotient is the exclusive OR of the operands’ signs; the sign of a sum,
// or of a difference x−y regarded as a sum x+(−y), differs from at most
// one of the addends’ signs; and the sign of the result of conversions,
// the quantize operation, the roundToIntegral operations, and the
// roundToIntegralExact (see 5.3.1) is the sign of the first or only operand.
// These rules shall apply even when operands or results are zero or infinite.
//
// When the sum of two operands with opposite signs (or the difference of
// two operands with like signs) is exactly zero, the sign of that sum (or
// difference) shall be +0 in all rounding-direction attributes except
// roundTowardNegative; under that attribute, the sign of an exact zero
// sum (or difference) shall be −0. However, x+x = x−(−x) retains the same
// sign as x even when x is zero.
//
// See also: https://play.golang.org/p/RtH3UCt5IH
// Add sets z to the rounded sum x+y and returns z. If z's precision is 0,
// it is changed to the larger of x's or y's precision before the operation.
// Rounding is performed according to z's precision and rounding mode; and
// z's accuracy reports the result error relative to the exact (not rounded)
// result. Add panics with [ErrNaN] if x and y are infinities with opposite
// signs. The value of z is undefined in that case.
func (z *Float) Add(x, y *Float) *Float {
if debugFloat {
x.validate()
y.validate()
}
if z.prec == 0 {
z.prec = max(x.prec, y.prec)
}
if x.form == finite && y.form == finite {
// x + y (common case)
// Below we set z.neg = x.neg, and when z aliases y this will
// change the y operand's sign. This is fine, because if an
// operand aliases the receiver it'll be overwritten, but we still
// want the original x.neg and y.neg values when we evaluate
// x.neg != y.neg, so we need to save y.neg before setting z.neg.
yneg := y.neg
z.neg = x.neg
if x.neg == yneg {
// x + y == x + y
// (-x) + (-y) == -(x + y)
z.uadd(x, y)
} else {
// x + (-y) == x - y == -(y - x)
// (-x) + y == y - x == -(x - y)
if x.ucmp(y) > 0 {
z.usub(x, y)
} else {
z.neg = !z.neg
z.usub(y, x)
}
}
if z.form == zero && z.mode == ToNegativeInf && z.acc == Exact {
z.neg = true
}
return z
}
if x.form == inf && y.form == inf && x.neg != y.neg {
// +Inf + -Inf
// -Inf + +Inf
// value of z is undefined but make sure it's valid
z.acc = Exact
z.form = zero
z.neg = false
panic(ErrNaN{"addition of infinities with opposite signs"})
}
if x.form == zero && y.form == zero {
// ±0 + ±0
z.acc = Exact
z.form = zero
z.neg = x.neg && y.neg // -0 + -0 == -0
return z
}
if x.form == inf || y.form == zero {
// ±Inf + y
// x + ±0
return z.Set(x)
}
// ±0 + y
// x + ±Inf
return z.Set(y)
}
// Sub sets z to the rounded difference x-y and returns z.
// Precision, rounding, and accuracy reporting are as for [Float.Add].
// Sub panics with [ErrNaN] if x and y are infinities with equal
// signs. The value of z is undefined in that case.
func (z *Float) Sub(x, y *Float) *Float {
if debugFloat {
x.validate()
y.validate()
}
if z.prec == 0 {
z.prec = max(x.prec, y.prec)
}
if x.form == finite && y.form == finite {
// x - y (common case)
yneg := y.neg
z.neg = x.neg
if x.neg != yneg {
// x - (-y) == x + y
// (-x) - y == -(x + y)
z.uadd(x, y)
} else {
// x - y == x - y == -(y - x)
// (-x) - (-y) == y - x == -(x - y)
if x.ucmp(y) > 0 {
z.usub(x, y)
} else {
z.neg = !z.neg
z.usub(y, x)
}
}
if z.form == zero && z.mode == ToNegativeInf && z.acc == Exact {
z.neg = true
}
return z
}
if x.form == inf && y.form == inf && x.neg == y.neg {
// +Inf - +Inf
// -Inf - -Inf
// value of z is undefined but make sure it's valid
z.acc = Exact
z.form = zero
z.neg = false
panic(ErrNaN{"subtraction of infinities with equal signs"})
}
if x.form == zero && y.form == zero {
// ±0 - ±0
z.acc = Exact
z.form = zero
z.neg = x.neg && !y.neg // -0 - +0 == -0
return z
}
if x.form == inf || y.form == zero {
// ±Inf - y
// x - ±0
return z.Set(x)
}
// ±0 - y
// x - ±Inf
return z.Neg(y)
}
// Mul sets z to the rounded product x*y and returns z.
// Precision, rounding, and accuracy reporting are as for [Float.Add].
// Mul panics with [ErrNaN] if one operand is zero and the other
// operand an infinity. The value of z is undefined in that case.
func (z *Float) Mul(x, y *Float) *Float {
if debugFloat {
x.validate()
y.validate()
}
if z.prec == 0 {
z.prec = max(x.prec, y.prec)
}
z.neg = x.neg != y.neg
if x.form == finite && y.form == finite {
// x * y (common case)
z.umul(x, y)
return z
}
z.acc = Exact
if x.form == zero && y.form == inf || x.form == inf && y.form == zero {
// ±0 * ±Inf
// ±Inf * ±0
// value of z is undefined but make sure it's valid
z.form = zero
z.neg = false
panic(ErrNaN{"multiplication of zero with infinity"})
}
if x.form == inf || y.form == inf {
// ±Inf * y
// x * ±Inf
z.form = inf
return z
}
// ±0 * y
// x * ±0
z.form = zero
return z
}
// Quo sets z to the rounded quotient x/y and returns z.
// Precision, rounding, and accuracy reporting are as for [Float.Add].
// Quo panics with [ErrNaN] if both operands are zero or infinities.
// The value of z is undefined in that case.
func (z *Float) Quo(x, y *Float) *Float {
if debugFloat {
x.validate()
y.validate()
}
if z.prec == 0 {
z.prec = max(x.prec, y.prec)
}
z.neg = x.neg != y.neg
if x.form == finite && y.form == finite {
// x / y (common case)
z.uquo(x, y)
return z
}
z.acc = Exact
if x.form == zero && y.form == zero || x.form == inf && y.form == inf {
// ±0 / ±0
// ±Inf / ±Inf
// value of z is undefined but make sure it's valid
z.form = zero
z.neg = false
panic(ErrNaN{"division of zero by zero or infinity by infinity"})
}
if x.form == zero || y.form == inf {
// ±0 / y
// x / ±Inf
z.form = zero
return z
}
// x / ±0
// ±Inf / y
z.form = inf
return z
}
// Cmp compares x and y and returns:
// - -1 if x < y;
// - 0 if x == y (incl. -0 == 0, -Inf == -Inf, and +Inf == +Inf);
// - +1 if x > y.
func (x *Float) Cmp(y *Float) int {
if debugFloat {
x.validate()
y.validate()
}
mx := x.ord()
my := y.ord()
switch {
case mx < my:
return -1
case mx > my:
return +1
}
// mx == my
// only if |mx| == 1 we have to compare the mantissae
switch mx {
case -1:
return y.ucmp(x)
case +1:
return x.ucmp(y)
}
return 0
}
// ord classifies x and returns:
//
// -2 if -Inf == x
// -1 if -Inf < x < 0
// 0 if x == 0 (signed or unsigned)
// +1 if 0 < x < +Inf
// +2 if x == +Inf
func (x *Float) ord() int {
var m int
switch x.form {
case finite:
m = 1
case zero:
return 0
case inf:
m = 2
}
if x.neg {
m = -m
}
return m
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements string-to-Float conversion functions.
package big
import (
"fmt"
"io"
"strings"
)
var floatZero Float
// SetString sets z to the value of s and returns z and a boolean indicating
// success. s must be a floating-point number of the same format as accepted
// by [Float.Parse], with base argument 0. The entire string (not just a prefix) must
// be valid for success. If the operation failed, the value of z is undefined
// but the returned value is nil.
func (z *Float) SetString(s string) (*Float, bool) {
if f, _, err := z.Parse(s, 0); err == nil {
return f, true
}
return nil, false
}
// scan is like Parse but reads the longest possible prefix representing a valid
// floating point number from an io.ByteScanner rather than a string. It serves
// as the implementation of Parse. It does not recognize ±Inf and does not expect
// EOF at the end.
func (z *Float) scan(r io.ByteScanner, base int) (f *Float, b int, err error) {
prec := z.prec
if prec == 0 {
prec = 64
}
// A reasonable value in case of an error.
z.form = zero
// sign
z.neg, err = scanSign(r)
if err != nil {
return
}
// mantissa
var fcount int // fractional digit count; valid if <= 0
z.mant, b, fcount, err = z.mant.scan(r, base, true)
if err != nil {
return
}
// exponent
var exp int64
var ebase int
exp, ebase, err = scanExponent(r, true, base == 0)
if err != nil {
return
}
// special-case 0
if len(z.mant) == 0 {
z.prec = prec
z.acc = Exact
z.form = zero
f = z
return
}
// len(z.mant) > 0
// The mantissa may have a radix point (fcount <= 0) and there
// may be a nonzero exponent exp. The radix point amounts to a
// division by b**(-fcount). An exponent means multiplication by
// ebase**exp. Finally, mantissa normalization (shift left) requires
// a correcting multiplication by 2**(-shiftcount). Multiplications
// are commutative, so we can apply them in any order as long as there
// is no loss of precision. We only have powers of 2 and 10, and
// we split powers of 10 into the product of the same powers of
// 2 and 5. This reduces the size of the multiplication factor
// needed for base-10 exponents.
// normalize mantissa and determine initial exponent contributions
exp2 := int64(len(z.mant))*_W - fnorm(z.mant)
exp5 := int64(0)
// determine binary or decimal exponent contribution of radix point
if fcount < 0 {
// The mantissa has a radix point ddd.dddd; and
// -fcount is the number of digits to the right
// of '.'. Adjust relevant exponent accordingly.
d := int64(fcount)
switch b {
case 10:
exp5 = d
fallthrough // 10**e == 5**e * 2**e
case 2:
exp2 += d
case 8:
exp2 += d * 3 // octal digits are 3 bits each
case 16:
exp2 += d * 4 // hexadecimal digits are 4 bits each
default:
panic("unexpected mantissa base")
}
// fcount consumed - not needed anymore
}
// take actual exponent into account
switch ebase {
case 10:
exp5 += exp
fallthrough // see fallthrough above
case 2:
exp2 += exp
default:
panic("unexpected exponent base")
}
// exp consumed - not needed anymore
// apply 2**exp2
if MinExp <= exp2 && exp2 <= MaxExp {
z.prec = prec
z.form = finite
z.exp = int32(exp2)
f = z
} else {
err = fmt.Errorf("exponent overflow")
return
}
if exp5 == 0 {
// no decimal exponent contribution
z.round(0)
return
}
// exp5 != 0
// apply 5**exp5
p := new(Float).SetPrec(z.Prec() + 64) // use more bits for p -- TODO(gri) what is the right number?
if exp5 < 0 {
z.Quo(z, p.pow5(uint64(-exp5)))
} else {
z.Mul(z, p.pow5(uint64(exp5)))
}
return
}
// These powers of 5 fit into a uint64.
//
// for p, q := uint64(0), uint64(1); p < q; p, q = q, q*5 {
// fmt.Println(q)
// }
var pow5tab = [...]uint64{
1,
5,
25,
125,
625,
3125,
15625,
78125,
390625,
1953125,
9765625,
48828125,
244140625,
1220703125,
6103515625,
30517578125,
152587890625,
762939453125,
3814697265625,
19073486328125,
95367431640625,
476837158203125,
2384185791015625,
11920928955078125,
59604644775390625,
298023223876953125,
1490116119384765625,
7450580596923828125,
}
// pow5 sets z to 5**n and returns z.
// n must not be negative.
func (z *Float) pow5(n uint64) *Float {
const m = uint64(len(pow5tab) - 1)
if n <= m {
return z.SetUint64(pow5tab[n])
}
// n > m
z.SetUint64(pow5tab[m])
n -= m
// use more bits for f than for z
// TODO(gri) what is the right number?
f := new(Float).SetPrec(z.Prec() + 64).SetUint64(5)
for n > 0 {
if n&1 != 0 {
z.Mul(z, f)
}
f.Mul(f, f)
n >>= 1
}
return z
}
// Parse parses s which must contain a text representation of a floating-
// point number with a mantissa in the given conversion base (the exponent
// is always a decimal number), or a string representing an infinite value.
//
// For base 0, an underscore character “_” may appear between a base
// prefix and an adjacent digit, and between successive digits; such
// underscores do not change the value of the number, or the returned
// digit count. Incorrect placement of underscores is reported as an
// error if there are no other errors. If base != 0, underscores are
// not recognized and thus terminate scanning like any other character
// that is not a valid radix point or digit.
//
// It sets z to the (possibly rounded) value of the corresponding floating-
// point value, and returns z, the actual base b, and an error err, if any.
// The entire string (not just a prefix) must be consumed for success.
// If z's precision is 0, it is changed to 64 before rounding takes effect.
// The number must be of the form:
//
// number = [ sign ] ( float | "inf" | "Inf" ) .
// sign = "+" | "-" .
// float = ( mantissa | prefix pmantissa ) [ exponent ] .
// prefix = "0" [ "b" | "B" | "o" | "O" | "x" | "X" ] .
// mantissa = digits "." [ digits ] | digits | "." digits .
// pmantissa = [ "_" ] digits "." [ digits ] | [ "_" ] digits | "." digits .
// exponent = ( "e" | "E" | "p" | "P" ) [ sign ] digits .
// digits = digit { [ "_" ] digit } .
// digit = "0" ... "9" | "a" ... "z" | "A" ... "Z" .
//
// The base argument must be 0, 2, 8, 10, or 16. Providing an invalid base
// argument will lead to a run-time panic.
//
// For base 0, the number prefix determines the actual base: A prefix of
// “0b” or “0B” selects base 2, “0o” or “0O” selects base 8, and
// “0x” or “0X” selects base 16. Otherwise, the actual base is 10 and
// no prefix is accepted. The octal prefix "0" is not supported (a leading
// "0" is simply considered a "0").
//
// A "p" or "P" exponent indicates a base 2 (rather than base 10) exponent;
// for instance, "0x1.fffffffffffffp1023" (using base 0) represents the
// maximum float64 value. For hexadecimal mantissae, the exponent character
// must be one of 'p' or 'P', if present (an "e" or "E" exponent indicator
// cannot be distinguished from a mantissa digit).
//
// The returned *Float f is nil and the value of z is valid but not
// defined if an error is reported.
func (z *Float) Parse(s string, base int) (f *Float, b int, err error) {
// scan doesn't handle ±Inf
if len(s) == 3 && (s == "Inf" || s == "inf") {
f = z.SetInf(false)
return
}
if len(s) == 4 && (s[0] == '+' || s[0] == '-') && (s[1:] == "Inf" || s[1:] == "inf") {
f = z.SetInf(s[0] == '-')
return
}
r := strings.NewReader(s)
if f, b, err = z.scan(r, base); err != nil {
return
}
// entire string must have been consumed
if ch, err2 := r.ReadByte(); err2 == nil {
err = fmt.Errorf("expected end of string, found %q", ch)
} else if err2 != io.EOF {
err = err2
}
return
}
// ParseFloat is like f.Parse(s, base) with f set to the given precision
// and rounding mode.
func ParseFloat(s string, base int, prec uint, mode RoundingMode) (f *Float, b int, err error) {
return new(Float).SetPrec(prec).SetMode(mode).Parse(s, base)
}
var _ fmt.Scanner = (*Float)(nil) // *Float must implement fmt.Scanner
// Scan is a support routine for [fmt.Scanner]; it sets z to the value of
// the scanned number. It accepts formats whose verbs are supported by
// [fmt.Scan] for floating point values, which are:
// 'b' (binary), 'e', 'E', 'f', 'F', 'g' and 'G'.
// Scan doesn't handle ±Inf.
func (z *Float) Scan(s fmt.ScanState, ch rune) error {
s.SkipSpace()
_, _, err := z.scan(byteReader{s}, 0)
return err
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements encoding/decoding of Floats.
package big
import (
"errors"
"fmt"
"internal/byteorder"
)
// Gob codec version. Permits backward-compatible changes to the encoding.
const floatGobVersion byte = 1
// GobEncode implements the [encoding/gob.GobEncoder] interface.
// The [Float] value and all its attributes (precision,
// rounding mode, accuracy) are marshaled.
func (x *Float) GobEncode() ([]byte, error) {
if x == nil {
return nil, nil
}
// determine max. space (bytes) required for encoding
sz := 1 + 1 + 4 // version + mode|acc|form|neg (3+2+2+1bit) + prec
n := 0 // number of mantissa words
if x.form == finite {
// add space for mantissa and exponent
n = int((x.prec + (_W - 1)) / _W) // required mantissa length in words for given precision
// actual mantissa slice could be shorter (trailing 0's) or longer (unused bits):
// - if shorter, only encode the words present
// - if longer, cut off unused words when encoding in bytes
// (in practice, this should never happen since rounding
// takes care of it, but be safe and do it always)
if len(x.mant) < n {
n = len(x.mant)
}
// len(x.mant) >= n
sz += 4 + n*_S // exp + mant
}
buf := make([]byte, sz)
buf[0] = floatGobVersion
b := byte(x.mode&7)<<5 | byte((x.acc+1)&3)<<3 | byte(x.form&3)<<1
if x.neg {
b |= 1
}
buf[1] = b
byteorder.BEPutUint32(buf[2:], x.prec)
if x.form == finite {
byteorder.BEPutUint32(buf[6:], uint32(x.exp))
x.mant[len(x.mant)-n:].bytes(buf[10:]) // cut off unused trailing words
}
return buf, nil
}
// GobDecode implements the [encoding/gob.GobDecoder] interface.
// The result is rounded per the precision and rounding mode of
// z unless z's precision is 0, in which case z is set exactly
// to the decoded value.
func (z *Float) GobDecode(buf []byte) error {
if len(buf) == 0 {
// Other side sent a nil or default value.
*z = Float{}
return nil
}
if len(buf) < 6 {
return errors.New("Float.GobDecode: buffer too small")
}
if buf[0] != floatGobVersion {
return fmt.Errorf("Float.GobDecode: encoding version %d not supported", buf[0])
}
oldPrec := z.prec
oldMode := z.mode
b := buf[1]
z.mode = RoundingMode((b >> 5) & 7)
z.acc = Accuracy((b>>3)&3) - 1
z.form = form((b >> 1) & 3)
z.neg = b&1 != 0
z.prec = byteorder.BEUint32(buf[2:])
if z.form == finite {
if len(buf) < 10 {
return errors.New("Float.GobDecode: buffer too small for finite form float")
}
z.exp = int32(byteorder.BEUint32(buf[6:]))
z.mant = z.mant.setBytes(buf[10:])
}
if oldPrec != 0 {
z.mode = oldMode
z.SetPrec(uint(oldPrec))
}
if msg := z.validate0(); msg != "" {
return errors.New("Float.GobDecode: " + msg)
}
return nil
}
// AppendText implements the [encoding.TextAppender] interface.
// Only the [Float] value is marshaled (in full precision), other
// attributes such as precision or accuracy are ignored.
func (x *Float) AppendText(b []byte) ([]byte, error) {
if x == nil {
return append(b, "<nil>"...), nil
}
return x.Append(b, 'g', -1), nil
}
// MarshalText implements the [encoding.TextMarshaler] interface.
// Only the [Float] value is marshaled (in full precision), other
// attributes such as precision or accuracy are ignored.
func (x *Float) MarshalText() (text []byte, err error) {
return x.AppendText(nil)
}
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
// The result is rounded per the precision and rounding mode of z.
// If z's precision is 0, it is changed to 64 before rounding takes
// effect.
func (z *Float) UnmarshalText(text []byte) error {
// TODO(gri): get rid of the []byte/string conversion
_, _, err := z.Parse(string(text), 0)
if err != nil {
err = fmt.Errorf("math/big: cannot unmarshal %q into a *big.Float (%v)", text, err)
}
return err
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements Float-to-string conversion functions.
// It is closely following the corresponding implementation
// in strconv/ftoa.go, but modified and simplified for Float.
package big
import (
"bytes"
"fmt"
"strconv"
)
// Text converts the floating-point number x to a string according
// to the given format and precision prec. The format is one of:
//
// 'e' -d.dddde±dd, decimal exponent, at least two (possibly 0) exponent digits
// 'E' -d.ddddE±dd, decimal exponent, at least two (possibly 0) exponent digits
// 'f' -ddddd.dddd, no exponent
// 'g' like 'e' for large exponents, like 'f' otherwise
// 'G' like 'E' for large exponents, like 'f' otherwise
// 'x' -0xd.dddddp±dd, hexadecimal mantissa, decimal power of two exponent
// 'p' -0x.dddp±dd, hexadecimal mantissa, decimal power of two exponent (non-standard)
// 'b' -ddddddp±dd, decimal mantissa, decimal power of two exponent (non-standard)
//
// For the power-of-two exponent formats, the mantissa is printed in normalized form:
//
// 'x' hexadecimal mantissa in [1, 2), or 0
// 'p' hexadecimal mantissa in [½, 1), or 0
// 'b' decimal integer mantissa using x.Prec() bits, or 0
//
// Note that the 'x' form is the one used by most other languages and libraries.
//
// If format is a different character, Text returns a "%" followed by the
// unrecognized format character.
//
// The precision prec controls the number of digits (excluding the exponent)
// printed by the 'e', 'E', 'f', 'g', 'G', and 'x' formats.
// For 'e', 'E', 'f', and 'x', it is the number of digits after the decimal point.
// For 'g' and 'G' it is the total number of digits. A negative precision selects
// the smallest number of decimal digits necessary to identify the value x uniquely
// using x.Prec() mantissa bits.
// The prec value is ignored for the 'b' and 'p' formats.
func (x *Float) Text(format byte, prec int) string {
cap := 10 // TODO(gri) determine a good/better value here
if prec > 0 {
cap += prec
}
return string(x.Append(make([]byte, 0, cap), format, prec))
}
// String formats x like x.Text('g', 10).
// (String must be called explicitly, [Float.Format] does not support %s verb.)
func (x *Float) String() string {
return x.Text('g', 10)
}
// Append appends to buf the string form of the floating-point number x,
// as generated by x.Text, and returns the extended buffer.
func (x *Float) Append(buf []byte, fmt byte, prec int) []byte {
// sign
if x.neg {
buf = append(buf, '-')
}
// Inf
if x.form == inf {
if !x.neg {
buf = append(buf, '+')
}
return append(buf, "Inf"...)
}
// pick off easy formats
switch fmt {
case 'b':
return x.fmtB(buf)
case 'p':
return x.fmtP(buf)
case 'x':
return x.fmtX(buf, prec)
}
// Algorithm:
// 1) convert Float to multiprecision decimal
// 2) round to desired precision
// 3) read digits out and format
// 1) convert Float to multiprecision decimal
var d decimal // == 0.0
if x.form == finite {
// x != 0
d.init(x.mant, int(x.exp)-x.mant.bitLen())
}
// 2) round to desired precision
shortest := false
if prec < 0 {
shortest = true
roundShortest(&d, x)
// Precision for shortest representation mode.
switch fmt {
case 'e', 'E':
prec = len(d.mant) - 1
case 'f':
prec = max(len(d.mant)-d.exp, 0)
case 'g', 'G':
prec = len(d.mant)
}
} else {
// round appropriately
switch fmt {
case 'e', 'E':
// one digit before and number of digits after decimal point
d.round(1 + prec)
case 'f':
// number of digits before and after decimal point
d.round(d.exp + prec)
case 'g', 'G':
if prec == 0 {
prec = 1
}
d.round(prec)
}
}
// 3) read digits out and format
switch fmt {
case 'e', 'E':
return fmtE(buf, fmt, prec, d)
case 'f':
return fmtF(buf, prec, d)
case 'g', 'G':
// trim trailing fractional zeros in %e format
eprec := prec
if eprec > len(d.mant) && len(d.mant) >= d.exp {
eprec = len(d.mant)
}
// %e is used if the exponent from the conversion
// is less than -4 or greater than or equal to the precision.
// If precision was the shortest possible, use eprec = 6 for
// this decision.
if shortest {
eprec = 6
}
exp := d.exp - 1
if exp < -4 || exp >= eprec {
if prec > len(d.mant) {
prec = len(d.mant)
}
return fmtE(buf, fmt+'e'-'g', prec-1, d)
}
if prec > d.exp {
prec = len(d.mant)
}
return fmtF(buf, max(prec-d.exp, 0), d)
}
// unknown format
if x.neg {
buf = buf[:len(buf)-1] // sign was added prematurely - remove it again
}
return append(buf, '%', fmt)
}
func roundShortest(d *decimal, x *Float) {
// if the mantissa is zero, the number is zero - stop now
if len(d.mant) == 0 {
return
}
// Approach: All numbers in the interval [x - 1/2ulp, x + 1/2ulp]
// (possibly exclusive) round to x for the given precision of x.
// Compute the lower and upper bound in decimal form and find the
// shortest decimal number d such that lower <= d <= upper.
// TODO(gri) strconv/ftoa.do describes a shortcut in some cases.
// See if we can use it (in adjusted form) here as well.
// 1) Compute normalized mantissa mant and exponent exp for x such
// that the lsb of mant corresponds to 1/2 ulp for the precision of
// x (i.e., for mant we want x.prec + 1 bits).
mant := nat(nil).set(x.mant)
exp := int(x.exp) - mant.bitLen()
s := mant.bitLen() - int(x.prec+1)
switch {
case s < 0:
mant = mant.lsh(mant, uint(-s))
case s > 0:
mant = mant.rsh(mant, uint(+s))
}
exp += s
// x = mant * 2**exp with lsb(mant) == 1/2 ulp of x.prec
// 2) Compute lower bound by subtracting 1/2 ulp.
var lower decimal
var tmp nat
lower.init(tmp.sub(mant, natOne), exp)
// 3) Compute upper bound by adding 1/2 ulp.
var upper decimal
upper.init(tmp.add(mant, natOne), exp)
// The upper and lower bounds are possible outputs only if
// the original mantissa is even, so that ToNearestEven rounding
// would round to the original mantissa and not the neighbors.
inclusive := mant[0]&2 == 0 // test bit 1 since original mantissa was shifted by 1
// Now we can figure out the minimum number of digits required.
// Walk along until d has distinguished itself from upper and lower.
for i, m := range d.mant {
l := lower.at(i)
u := upper.at(i)
// Okay to round down (truncate) if lower has a different digit
// or if lower is inclusive and is exactly the result of rounding
// down (i.e., and we have reached the final digit of lower).
okdown := l != m || inclusive && i+1 == len(lower.mant)
// Okay to round up if upper has a different digit and either upper
// is inclusive or upper is bigger than the result of rounding up.
okup := m != u && (inclusive || m+1 < u || i+1 < len(upper.mant))
// If it's okay to do either, then round to the nearest one.
// If it's okay to do only one, do it.
switch {
case okdown && okup:
d.round(i + 1)
return
case okdown:
d.roundDown(i + 1)
return
case okup:
d.roundUp(i + 1)
return
}
}
}
// %e: d.ddddde±dd
func fmtE(buf []byte, fmt byte, prec int, d decimal) []byte {
// first digit
ch := byte('0')
if len(d.mant) > 0 {
ch = d.mant[0]
}
buf = append(buf, ch)
// .moredigits
if prec > 0 {
buf = append(buf, '.')
i := 1
m := min(len(d.mant), prec+1)
if i < m {
buf = append(buf, d.mant[i:m]...)
i = m
}
for ; i <= prec; i++ {
buf = append(buf, '0')
}
}
// e±
buf = append(buf, fmt)
var exp int64
if len(d.mant) > 0 {
exp = int64(d.exp) - 1 // -1 because first digit was printed before '.'
}
if exp < 0 {
ch = '-'
exp = -exp
} else {
ch = '+'
}
buf = append(buf, ch)
// dd...d
if exp < 10 {
buf = append(buf, '0') // at least 2 exponent digits
}
return strconv.AppendInt(buf, exp, 10)
}
// %f: ddddddd.ddddd
func fmtF(buf []byte, prec int, d decimal) []byte {
// integer, padded with zeros as needed
if d.exp > 0 {
m := min(len(d.mant), d.exp)
buf = append(buf, d.mant[:m]...)
for ; m < d.exp; m++ {
buf = append(buf, '0')
}
} else {
buf = append(buf, '0')
}
// fraction
if prec > 0 {
buf = append(buf, '.')
for i := 0; i < prec; i++ {
buf = append(buf, d.at(d.exp+i))
}
}
return buf
}
// fmtB appends the string of x in the format mantissa "p" exponent
// with a decimal mantissa and a binary exponent, or "0" if x is zero,
// and returns the extended buffer.
// The mantissa is normalized such that is uses x.Prec() bits in binary
// representation.
// The sign of x is ignored, and x must not be an Inf.
// (The caller handles Inf before invoking fmtB.)
func (x *Float) fmtB(buf []byte) []byte {
if x.form == zero {
return append(buf, '0')
}
if debugFloat && x.form != finite {
panic("non-finite float")
}
// x != 0
// adjust mantissa to use exactly x.prec bits
m := x.mant
switch w := uint32(len(x.mant)) * _W; {
case w < x.prec:
m = nat(nil).lsh(m, uint(x.prec-w))
case w > x.prec:
m = nat(nil).rsh(m, uint(w-x.prec))
}
buf = append(buf, m.utoa(10)...)
buf = append(buf, 'p')
e := int64(x.exp) - int64(x.prec)
if e >= 0 {
buf = append(buf, '+')
}
return strconv.AppendInt(buf, e, 10)
}
// fmtX appends the string of x in the format "0x1." mantissa "p" exponent
// with a hexadecimal mantissa and a binary exponent, or "0x0p0" if x is zero,
// and returns the extended buffer.
// A non-zero mantissa is normalized such that 1.0 <= mantissa < 2.0.
// The sign of x is ignored, and x must not be an Inf.
// (The caller handles Inf before invoking fmtX.)
func (x *Float) fmtX(buf []byte, prec int) []byte {
if x.form == zero {
buf = append(buf, "0x0"...)
if prec > 0 {
buf = append(buf, '.')
for i := 0; i < prec; i++ {
buf = append(buf, '0')
}
}
buf = append(buf, "p+00"...)
return buf
}
if debugFloat && x.form != finite {
panic("non-finite float")
}
// round mantissa to n bits
var n uint
if prec < 0 {
n = 1 + (x.MinPrec()-1+3)/4*4 // round MinPrec up to 1 mod 4
} else {
n = 1 + 4*uint(prec)
}
// n%4 == 1
x = new(Float).SetPrec(n).SetMode(x.mode).Set(x)
// adjust mantissa to use exactly n bits
m := x.mant
switch w := uint(len(x.mant)) * _W; {
case w < n:
m = nat(nil).lsh(m, n-w)
case w > n:
m = nat(nil).rsh(m, w-n)
}
exp64 := int64(x.exp) - 1 // avoid wrap-around
hm := m.utoa(16)
if debugFloat && hm[0] != '1' {
panic("incorrect mantissa: " + string(hm))
}
buf = append(buf, "0x1"...)
if len(hm) > 1 {
buf = append(buf, '.')
buf = append(buf, hm[1:]...)
}
buf = append(buf, 'p')
if exp64 >= 0 {
buf = append(buf, '+')
} else {
exp64 = -exp64
buf = append(buf, '-')
}
// Force at least two exponent digits, to match fmt.
if exp64 < 10 {
buf = append(buf, '0')
}
return strconv.AppendInt(buf, exp64, 10)
}
// fmtP appends the string of x in the format "0x." mantissa "p" exponent
// with a hexadecimal mantissa and a binary exponent, or "0" if x is zero,
// and returns the extended buffer.
// The mantissa is normalized such that 0.5 <= 0.mantissa < 1.0.
// The sign of x is ignored, and x must not be an Inf.
// (The caller handles Inf before invoking fmtP.)
func (x *Float) fmtP(buf []byte) []byte {
if x.form == zero {
return append(buf, '0')
}
if debugFloat && x.form != finite {
panic("non-finite float")
}
// x != 0
// remove trailing 0 words early
// (no need to convert to hex 0's and trim later)
m := x.mant
i := 0
for i < len(m) && m[i] == 0 {
i++
}
m = m[i:]
buf = append(buf, "0x."...)
buf = append(buf, bytes.TrimRight(m.utoa(16), "0")...)
buf = append(buf, 'p')
if x.exp >= 0 {
buf = append(buf, '+')
}
return strconv.AppendInt(buf, int64(x.exp), 10)
}
var _ fmt.Formatter = &floatZero // *Float must implement fmt.Formatter
// Format implements [fmt.Formatter]. It accepts all the regular
// formats for floating-point numbers ('b', 'e', 'E', 'f', 'F',
// 'g', 'G', 'x') as well as 'p' and 'v'. See (*Float).Text for the
// interpretation of 'p'. The 'v' format is handled like 'g'.
// Format also supports specification of the minimum precision
// in digits, the output field width, as well as the format flags
// '+' and ' ' for sign control, '0' for space or zero padding,
// and '-' for left or right justification. See the fmt package
// for details.
func (x *Float) Format(s fmt.State, format rune) {
prec, hasPrec := s.Precision()
if !hasPrec {
prec = 6 // default precision for 'e', 'f'
}
switch format {
case 'e', 'E', 'f', 'b', 'p', 'x':
// nothing to do
case 'F':
// (*Float).Text doesn't support 'F'; handle like 'f'
format = 'f'
case 'v':
// handle like 'g'
format = 'g'
fallthrough
case 'g', 'G':
if !hasPrec {
prec = -1 // default precision for 'g', 'G'
}
default:
fmt.Fprintf(s, "%%!%c(*big.Float=%s)", format, x.String())
return
}
var buf []byte
buf = x.Append(buf, byte(format), prec)
if len(buf) == 0 {
buf = []byte("?") // should never happen, but don't crash
}
// len(buf) > 0
var sign string
switch {
case buf[0] == '-':
sign = "-"
buf = buf[1:]
case buf[0] == '+':
// +Inf
sign = "+"
if s.Flag(' ') {
sign = " "
}
buf = buf[1:]
case s.Flag('+'):
sign = "+"
case s.Flag(' '):
sign = " "
}
var padding int
if width, hasWidth := s.Width(); hasWidth && width > len(sign)+len(buf) {
padding = width - len(sign) - len(buf)
}
switch {
case s.Flag('0') && !x.IsInf():
// 0-padding on left
writeMultiple(s, sign, 1)
writeMultiple(s, "0", padding)
s.Write(buf)
case s.Flag('-'):
// padding on right
writeMultiple(s, sign, 1)
s.Write(buf)
writeMultiple(s, " ", padding)
default:
// padding on left
writeMultiple(s, " ", padding)
writeMultiple(s, sign, 1)
s.Write(buf)
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements signed multi-precision integers.
package big
import (
"fmt"
"io"
"math/rand"
"strings"
)
// An Int represents a signed multi-precision integer.
// The zero value for an Int represents the value 0.
//
// Operations always take pointer arguments (*Int) rather
// than Int values, and each unique Int value requires
// its own unique *Int pointer. To "copy" an Int value,
// an existing (or newly allocated) Int must be set to
// a new value using the [Int.Set] method; shallow copies
// of Ints are not supported and may lead to errors.
//
// Note that methods may leak the Int's value through timing side-channels.
// Because of this and because of the scope and complexity of the
// implementation, Int is not well-suited to implement cryptographic operations.
// The standard library avoids exposing non-trivial Int methods to
// attacker-controlled inputs and the determination of whether a bug in math/big
// is considered a security vulnerability might depend on the impact on the
// standard library.
type Int struct {
neg bool // sign
abs nat // absolute value of the integer
}
var intOne = &Int{false, natOne}
// Sign returns:
// - -1 if x < 0;
// - 0 if x == 0;
// - +1 if x > 0.
func (x *Int) Sign() int {
// This function is used in cryptographic operations. It must not leak
// anything but the Int's sign and bit size through side-channels. Any
// changes must be reviewed by a security expert.
if len(x.abs) == 0 {
return 0
}
if x.neg {
return -1
}
return 1
}
// SetInt64 sets z to x and returns z.
func (z *Int) SetInt64(x int64) *Int {
neg := false
if x < 0 {
neg = true
x = -x
}
z.abs = z.abs.setUint64(uint64(x))
z.neg = neg
return z
}
// SetUint64 sets z to x and returns z.
func (z *Int) SetUint64(x uint64) *Int {
z.abs = z.abs.setUint64(x)
z.neg = false
return z
}
// NewInt allocates and returns a new [Int] set to x.
func NewInt(x int64) *Int {
// This code is arranged to be inlineable and produce
// zero allocations when inlined. See issue 29951.
u := uint64(x)
if x < 0 {
u = -u
}
var abs []Word
if x == 0 {
} else if _W == 32 && u>>32 != 0 {
abs = []Word{Word(u), Word(u >> 32)}
} else {
abs = []Word{Word(u)}
}
return &Int{neg: x < 0, abs: abs}
}
// Set sets z to x and returns z.
func (z *Int) Set(x *Int) *Int {
if z != x {
z.abs = z.abs.set(x.abs)
z.neg = x.neg
}
return z
}
// Bits provides raw (unchecked but fast) access to x by returning its
// absolute value as a little-endian [Word] slice. The result and x share
// the same underlying array.
// Bits is intended to support implementation of missing low-level [Int]
// functionality outside this package; it should be avoided otherwise.
func (x *Int) Bits() []Word {
// This function is used in cryptographic operations. It must not leak
// anything but the Int's sign and bit size through side-channels. Any
// changes must be reviewed by a security expert.
return x.abs
}
// SetBits provides raw (unchecked but fast) access to z by setting its
// value to abs, interpreted as a little-endian [Word] slice, and returning
// z. The result and abs share the same underlying array.
// SetBits is intended to support implementation of missing low-level [Int]
// functionality outside this package; it should be avoided otherwise.
func (z *Int) SetBits(abs []Word) *Int {
z.abs = nat(abs).norm()
z.neg = false
return z
}
// Abs sets z to |x| (the absolute value of x) and returns z.
func (z *Int) Abs(x *Int) *Int {
z.Set(x)
z.neg = false
return z
}
// Neg sets z to -x and returns z.
func (z *Int) Neg(x *Int) *Int {
z.Set(x)
z.neg = len(z.abs) > 0 && !z.neg // 0 has no sign
return z
}
// Add sets z to the sum x+y and returns z.
func (z *Int) Add(x, y *Int) *Int {
neg := x.neg
if x.neg == y.neg {
// x + y == x + y
// (-x) + (-y) == -(x + y)
z.abs = z.abs.add(x.abs, y.abs)
} else {
// x + (-y) == x - y == -(y - x)
// (-x) + y == y - x == -(x - y)
if x.abs.cmp(y.abs) >= 0 {
z.abs = z.abs.sub(x.abs, y.abs)
} else {
neg = !neg
z.abs = z.abs.sub(y.abs, x.abs)
}
}
z.neg = len(z.abs) > 0 && neg // 0 has no sign
return z
}
// Sub sets z to the difference x-y and returns z.
func (z *Int) Sub(x, y *Int) *Int {
neg := x.neg
if x.neg != y.neg {
// x - (-y) == x + y
// (-x) - y == -(x + y)
z.abs = z.abs.add(x.abs, y.abs)
} else {
// x - y == x - y == -(y - x)
// (-x) - (-y) == y - x == -(x - y)
if x.abs.cmp(y.abs) >= 0 {
z.abs = z.abs.sub(x.abs, y.abs)
} else {
neg = !neg
z.abs = z.abs.sub(y.abs, x.abs)
}
}
z.neg = len(z.abs) > 0 && neg // 0 has no sign
return z
}
// Mul sets z to the product x*y and returns z.
func (z *Int) Mul(x, y *Int) *Int {
z.mul(nil, x, y)
return z
}
// mul is like Mul but takes an explicit stack to use, for internal use.
// It does not return a *Int because doing so makes the stack-allocated Ints
// used in natmul.go escape to the heap (even though the result is unused).
func (z *Int) mul(stk *stack, x, y *Int) {
// x * y == x * y
// x * (-y) == -(x * y)
// (-x) * y == -(x * y)
// (-x) * (-y) == x * y
if x == y {
z.abs = z.abs.sqr(stk, x.abs)
z.neg = false
return
}
z.abs = z.abs.mul(stk, x.abs, y.abs)
z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
}
// MulRange sets z to the product of all integers
// in the range [a, b] inclusively and returns z.
// If a > b (empty range), the result is 1.
func (z *Int) MulRange(a, b int64) *Int {
switch {
case a > b:
return z.SetInt64(1) // empty range
case a <= 0 && b >= 0:
return z.SetInt64(0) // range includes 0
}
// a <= b && (b < 0 || a > 0)
neg := false
if a < 0 {
neg = (b-a)&1 == 0
a, b = -b, -a
}
z.abs = z.abs.mulRange(nil, uint64(a), uint64(b))
z.neg = neg
return z
}
// Binomial sets z to the binomial coefficient C(n, k) and returns z.
func (z *Int) Binomial(n, k int64) *Int {
if k > n {
return z.SetInt64(0)
}
// reduce the number of multiplications by reducing k
if k > n-k {
k = n - k // C(n, k) == C(n, n-k)
}
// C(n, k) == n * (n-1) * ... * (n-k+1) / k * (k-1) * ... * 1
// == n * (n-1) * ... * (n-k+1) / 1 * (1+1) * ... * k
//
// Using the multiplicative formula produces smaller values
// at each step, requiring fewer allocations and computations:
//
// z = 1
// for i := 0; i < k; i = i+1 {
// z *= n-i
// z /= i+1
// }
//
// finally to avoid computing i+1 twice per loop:
//
// z = 1
// i := 0
// for i < k {
// z *= n-i
// i++
// z /= i
// }
var N, K, i, t Int
N.SetInt64(n)
K.SetInt64(k)
z.Set(intOne)
for i.Cmp(&K) < 0 {
z.Mul(z, t.Sub(&N, &i))
i.Add(&i, intOne)
z.Quo(z, &i)
}
return z
}
// Quo sets z to the quotient x/y for y != 0 and returns z.
// If y == 0, a division-by-zero run-time panic occurs.
// Quo implements truncated division (like Go); see [Int.QuoRem] for more details.
func (z *Int) Quo(x, y *Int) *Int {
z.abs, _ = z.abs.div(nil, nil, x.abs, y.abs)
z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
return z
}
// Rem sets z to the remainder x%y for y != 0 and returns z.
// If y == 0, a division-by-zero run-time panic occurs.
// Rem implements truncated modulus (like Go); see [Int.QuoRem] for more details.
func (z *Int) Rem(x, y *Int) *Int {
_, z.abs = nat(nil).div(nil, z.abs, x.abs, y.abs)
z.neg = len(z.abs) > 0 && x.neg // 0 has no sign
return z
}
// QuoRem sets z to the quotient x/y and r to the remainder x%y
// and returns the pair (z, r) for y != 0.
// If y == 0, a division-by-zero run-time panic occurs.
//
// QuoRem implements T-division and modulus (like Go):
//
// q = x/y with the result truncated to zero
// r = x - y*q
//
// (See Daan Leijen, “Division and Modulus for Computer Scientists”.)
// See [Int.DivMod] for Euclidean division and modulus (unlike Go).
func (z *Int) QuoRem(x, y, r *Int) (*Int, *Int) {
z.abs, r.abs = z.abs.div(nil, r.abs, x.abs, y.abs)
z.neg, r.neg = len(z.abs) > 0 && x.neg != y.neg, len(r.abs) > 0 && x.neg // 0 has no sign
return z, r
}
// Div sets z to the quotient x/y for y != 0 and returns z.
// If y == 0, a division-by-zero run-time panic occurs.
// Div implements Euclidean division (unlike Go); see [Int.DivMod] for more details.
func (z *Int) Div(x, y *Int) *Int {
y_neg := y.neg // z may be an alias for y
var r Int
z.QuoRem(x, y, &r)
if r.neg {
if y_neg {
z.Add(z, intOne)
} else {
z.Sub(z, intOne)
}
}
return z
}
// Mod sets z to the modulus x%y for y != 0 and returns z.
// If y == 0, a division-by-zero run-time panic occurs.
// Mod implements Euclidean modulus (unlike Go); see [Int.DivMod] for more details.
func (z *Int) Mod(x, y *Int) *Int {
y0 := y // save y
if z == y || alias(z.abs, y.abs) {
y0 = new(Int).Set(y)
}
var q Int
q.QuoRem(x, y, z)
if z.neg {
if y0.neg {
z.Sub(z, y0)
} else {
z.Add(z, y0)
}
}
return z
}
// DivMod sets z to the quotient x div y and m to the modulus x mod y
// and returns the pair (z, m) for y != 0.
// If y == 0, a division-by-zero run-time panic occurs.
//
// DivMod implements Euclidean division and modulus (unlike Go):
//
// q = x div y such that
// m = x - y*q with 0 <= m < |y|
//
// (See Raymond T. Boute, “The Euclidean definition of the functions
// div and mod”. ACM Transactions on Programming Languages and
// Systems (TOPLAS), 14(2):127-144, New York, NY, USA, 4/1992.
// ACM press.)
// See [Int.QuoRem] for T-division and modulus (like Go).
func (z *Int) DivMod(x, y, m *Int) (*Int, *Int) {
y0 := y // save y
if z == y || alias(z.abs, y.abs) {
y0 = new(Int).Set(y)
}
z.QuoRem(x, y, m)
if m.neg {
if y0.neg {
z.Add(z, intOne)
m.Sub(m, y0)
} else {
z.Sub(z, intOne)
m.Add(m, y0)
}
}
return z, m
}
// Cmp compares x and y and returns:
// - -1 if x < y;
// - 0 if x == y;
// - +1 if x > y.
func (x *Int) Cmp(y *Int) (r int) {
// x cmp y == x cmp y
// x cmp (-y) == x
// (-x) cmp y == y
// (-x) cmp (-y) == -(x cmp y)
switch {
case x == y:
// nothing to do
case x.neg == y.neg:
r = x.abs.cmp(y.abs)
if x.neg {
r = -r
}
case x.neg:
r = -1
default:
r = 1
}
return
}
// CmpAbs compares the absolute values of x and y and returns:
// - -1 if |x| < |y|;
// - 0 if |x| == |y|;
// - +1 if |x| > |y|.
func (x *Int) CmpAbs(y *Int) int {
return x.abs.cmp(y.abs)
}
// low32 returns the least significant 32 bits of x.
func low32(x nat) uint32 {
if len(x) == 0 {
return 0
}
return uint32(x[0])
}
// low64 returns the least significant 64 bits of x.
func low64(x nat) uint64 {
if len(x) == 0 {
return 0
}
v := uint64(x[0])
if _W == 32 && len(x) > 1 {
return uint64(x[1])<<32 | v
}
return v
}
// Int64 returns the int64 representation of x.
// If x cannot be represented in an int64, the result is undefined.
func (x *Int) Int64() int64 {
v := int64(low64(x.abs))
if x.neg {
v = -v
}
return v
}
// Uint64 returns the uint64 representation of x.
// If x cannot be represented in a uint64, the result is undefined.
func (x *Int) Uint64() uint64 {
return low64(x.abs)
}
// IsInt64 reports whether x can be represented as an int64.
func (x *Int) IsInt64() bool {
if len(x.abs) <= 64/_W {
w := int64(low64(x.abs))
return w >= 0 || x.neg && w == -w
}
return false
}
// IsUint64 reports whether x can be represented as a uint64.
func (x *Int) IsUint64() bool {
return !x.neg && len(x.abs) <= 64/_W
}
// Float64 returns the float64 value nearest x,
// and an indication of any rounding that occurred.
func (x *Int) Float64() (float64, Accuracy) {
n := x.abs.bitLen() // NB: still uses slow crypto impl!
if n == 0 {
return 0.0, Exact
}
// Fast path: no more than 53 significant bits.
if n <= 53 || n < 64 && n-int(x.abs.trailingZeroBits()) <= 53 {
f := float64(low64(x.abs))
if x.neg {
f = -f
}
return f, Exact
}
return new(Float).SetInt(x).Float64()
}
// SetString sets z to the value of s, interpreted in the given base,
// and returns z and a boolean indicating success. The entire string
// (not just a prefix) must be valid for success. If SetString fails,
// the value of z is undefined but the returned value is nil.
//
// The base argument must be 0 or a value between 2 and [MaxBase].
// For base 0, the number prefix determines the actual base: A prefix of
// “0b” or “0B” selects base 2, “0”, “0o” or “0O” selects base 8,
// and “0x” or “0X” selects base 16. Otherwise, the selected base is 10
// and no prefix is accepted.
//
// For bases <= 36, lower and upper case letters are considered the same:
// The letters 'a' to 'z' and 'A' to 'Z' represent digit values 10 to 35.
// For bases > 36, the upper case letters 'A' to 'Z' represent the digit
// values 36 to 61.
//
// For base 0, an underscore character “_” may appear between a base
// prefix and an adjacent digit, and between successive digits; such
// underscores do not change the value of the number.
// Incorrect placement of underscores is reported as an error if there
// are no other errors. If base != 0, underscores are not recognized
// and act like any other character that is not a valid digit.
func (z *Int) SetString(s string, base int) (*Int, bool) {
return z.setFromScanner(strings.NewReader(s), base)
}
// setFromScanner implements SetString given an io.ByteScanner.
// For documentation see comments of SetString.
func (z *Int) setFromScanner(r io.ByteScanner, base int) (*Int, bool) {
if _, _, err := z.scan(r, base); err != nil {
return nil, false
}
// entire content must have been consumed
if _, err := r.ReadByte(); err != io.EOF {
return nil, false
}
return z, true // err == io.EOF => scan consumed all content of r
}
// SetBytes interprets buf as the bytes of a big-endian unsigned
// integer, sets z to that value, and returns z.
func (z *Int) SetBytes(buf []byte) *Int {
z.abs = z.abs.setBytes(buf)
z.neg = false
return z
}
// Bytes returns the absolute value of x as a big-endian byte slice.
//
// To use a fixed length slice, or a preallocated one, use [Int.FillBytes].
func (x *Int) Bytes() []byte {
// This function is used in cryptographic operations. It must not leak
// anything but the Int's sign and bit size through side-channels. Any
// changes must be reviewed by a security expert.
buf := make([]byte, len(x.abs)*_S)
return buf[x.abs.bytes(buf):]
}
// FillBytes sets buf to the absolute value of x, storing it as a zero-extended
// big-endian byte slice, and returns buf.
//
// If the absolute value of x doesn't fit in buf, FillBytes will panic.
func (x *Int) FillBytes(buf []byte) []byte {
// Clear whole buffer.
clear(buf)
x.abs.bytes(buf)
return buf
}
// BitLen returns the length of the absolute value of x in bits.
// The bit length of 0 is 0.
func (x *Int) BitLen() int {
// This function is used in cryptographic operations. It must not leak
// anything but the Int's sign and bit size through side-channels. Any
// changes must be reviewed by a security expert.
return x.abs.bitLen()
}
// TrailingZeroBits returns the number of consecutive least significant zero
// bits of |x|.
func (x *Int) TrailingZeroBits() uint {
return x.abs.trailingZeroBits()
}
// Exp sets z = x**y mod |m| (i.e. the sign of m is ignored), and returns z.
// If m == nil or m == 0, z = x**y unless y <= 0 then z = 1. If m != 0, y < 0,
// and x and m are not relatively prime, z is unchanged and nil is returned.
//
// Modular exponentiation of inputs of a particular size is not a
// cryptographically constant-time operation.
func (z *Int) Exp(x, y, m *Int) *Int {
return z.exp(x, y, m, false)
}
func (z *Int) expSlow(x, y, m *Int) *Int {
return z.exp(x, y, m, true)
}
func (z *Int) exp(x, y, m *Int, slow bool) *Int {
// See Knuth, volume 2, section 4.6.3.
xWords := x.abs
if y.neg {
if m == nil || len(m.abs) == 0 {
return z.SetInt64(1)
}
// for y < 0: x**y mod m == (x**(-1))**|y| mod m
inverse := new(Int).ModInverse(x, m)
if inverse == nil {
return nil
}
xWords = inverse.abs
}
yWords := y.abs
var mWords nat
if m != nil {
if z == m || alias(z.abs, m.abs) {
m = new(Int).Set(m)
}
mWords = m.abs // m.abs may be nil for m == 0
}
z.abs = z.abs.expNN(nil, xWords, yWords, mWords, slow)
z.neg = len(z.abs) > 0 && x.neg && len(yWords) > 0 && yWords[0]&1 == 1 // 0 has no sign
if z.neg && len(mWords) > 0 {
// make modulus result positive
z.abs = z.abs.sub(mWords, z.abs) // z == x**y mod |m| && 0 <= z < |m|
z.neg = false
}
return z
}
// GCD sets z to the greatest common divisor of a and b and returns z.
// If x or y are not nil, GCD sets their value such that z = a*x + b*y.
//
// a and b may be positive, zero or negative. (Before Go 1.14 both had
// to be > 0.) Regardless of the signs of a and b, z is always >= 0.
//
// If a == b == 0, GCD sets z = x = y = 0.
//
// If a == 0 and b != 0, GCD sets z = |b|, x = 0, y = sign(b) * 1.
//
// If a != 0 and b == 0, GCD sets z = |a|, x = sign(a) * 1, y = 0.
func (z *Int) GCD(x, y, a, b *Int) *Int {
if len(a.abs) == 0 || len(b.abs) == 0 {
lenA, lenB, negA, negB := len(a.abs), len(b.abs), a.neg, b.neg
if lenA == 0 {
z.Set(b)
} else {
z.Set(a)
}
z.neg = false
if x != nil {
if lenA == 0 {
x.SetUint64(0)
} else {
x.SetUint64(1)
x.neg = negA
}
}
if y != nil {
if lenB == 0 {
y.SetUint64(0)
} else {
y.SetUint64(1)
y.neg = negB
}
}
return z
}
return z.lehmerGCD(x, y, a, b)
}
// lehmerSimulate attempts to simulate several Euclidean update steps
// using the leading digits of A and B. It returns u0, u1, v0, v1
// such that A and B can be updated as:
//
// A = u0*A + v0*B
// B = u1*A + v1*B
//
// Requirements: A >= B and len(B.abs) >= 2
// Since we are calculating with full words to avoid overflow,
// we use 'even' to track the sign of the cosequences.
// For even iterations: u0, v1 >= 0 && u1, v0 <= 0
// For odd iterations: u0, v1 <= 0 && u1, v0 >= 0
func lehmerSimulate(A, B *Int) (u0, u1, v0, v1 Word, even bool) {
// initialize the digits
var a1, a2, u2, v2 Word
m := len(B.abs) // m >= 2
n := len(A.abs) // n >= m >= 2
// extract the top Word of bits from A and B
h := nlz(A.abs[n-1])
a1 = A.abs[n-1]<<h | A.abs[n-2]>>(_W-h)
// B may have implicit zero words in the high bits if the lengths differ
switch {
case n == m:
a2 = B.abs[n-1]<<h | B.abs[n-2]>>(_W-h)
case n == m+1:
a2 = B.abs[n-2] >> (_W - h)
default:
a2 = 0
}
// Since we are calculating with full words to avoid overflow,
// we use 'even' to track the sign of the cosequences.
// For even iterations: u0, v1 >= 0 && u1, v0 <= 0
// For odd iterations: u0, v1 <= 0 && u1, v0 >= 0
// The first iteration starts with k=1 (odd).
even = false
// variables to track the cosequences
u0, u1, u2 = 0, 1, 0
v0, v1, v2 = 0, 0, 1
// Calculate the quotient and cosequences using Collins' stopping condition.
// Note that overflow of a Word is not possible when computing the remainder
// sequence and cosequences since the cosequence size is bounded by the input size.
// See section 4.2 of Jebelean for details.
for a2 >= v2 && a1-a2 >= v1+v2 {
q, r := a1/a2, a1%a2
a1, a2 = a2, r
u0, u1, u2 = u1, u2, u1+q*u2
v0, v1, v2 = v1, v2, v1+q*v2
even = !even
}
return
}
// lehmerUpdate updates the inputs A and B such that:
//
// A = u0*A + v0*B
// B = u1*A + v1*B
//
// where the signs of u0, u1, v0, v1 are given by even
// For even == true: u0, v1 >= 0 && u1, v0 <= 0
// For even == false: u0, v1 <= 0 && u1, v0 >= 0
// q, r, s, t are temporary variables to avoid allocations in the multiplication.
func lehmerUpdate(A, B, q, r *Int, u0, u1, v0, v1 Word, even bool) {
mulW(q, B, even, v0)
mulW(r, A, even, u1)
mulW(A, A, !even, u0)
mulW(B, B, !even, v1)
A.Add(A, q)
B.Add(B, r)
}
// mulW sets z = x * (-?)w
// where the minus sign is present when neg is true.
func mulW(z, x *Int, neg bool, w Word) {
z.abs = z.abs.mulAddWW(x.abs, w, 0)
z.neg = x.neg != neg
}
// euclidUpdate performs a single step of the Euclidean GCD algorithm
// if extended is true, it also updates the cosequence Ua, Ub.
// q and r are used as temporaries; the initial values are ignored.
func euclidUpdate(A, B, Ua, Ub, q, r *Int, extended bool) (nA, nB, nr, nUa, nUb *Int) {
q.QuoRem(A, B, r)
if extended {
// Ua, Ub = Ub, Ua-q*Ub
q.Mul(q, Ub)
Ua, Ub = Ub, Ua
Ub.Sub(Ub, q)
}
return B, r, A, Ua, Ub
}
// lehmerGCD sets z to the greatest common divisor of a and b,
// which both must be != 0, and returns z.
// If x or y are not nil, their values are set such that z = a*x + b*y.
// See Knuth, The Art of Computer Programming, Vol. 2, Section 4.5.2, Algorithm L.
// This implementation uses the improved condition by Collins requiring only one
// quotient and avoiding the possibility of single Word overflow.
// See Jebelean, "Improving the multiprecision Euclidean algorithm",
// Design and Implementation of Symbolic Computation Systems, pp 45-58.
// The cosequences are updated according to Algorithm 10.45 from
// Cohen et al. "Handbook of Elliptic and Hyperelliptic Curve Cryptography" pp 192.
func (z *Int) lehmerGCD(x, y, a, b *Int) *Int {
var A, B, Ua, Ub *Int
A = new(Int).Abs(a)
B = new(Int).Abs(b)
extended := x != nil || y != nil
if extended {
// Ua (Ub) tracks how many times input a has been accumulated into A (B).
Ua = new(Int).SetInt64(1)
Ub = new(Int)
}
// temp variables for multiprecision update
q := new(Int)
r := new(Int)
// ensure A >= B
if A.abs.cmp(B.abs) < 0 {
A, B = B, A
Ub, Ua = Ua, Ub
}
// loop invariant A >= B
for len(B.abs) > 1 {
// Attempt to calculate in single-precision using leading words of A and B.
u0, u1, v0, v1, even := lehmerSimulate(A, B)
// multiprecision Step
if v0 != 0 {
// Simulate the effect of the single-precision steps using the cosequences.
// A = u0*A + v0*B
// B = u1*A + v1*B
lehmerUpdate(A, B, q, r, u0, u1, v0, v1, even)
if extended {
// Ua = u0*Ua + v0*Ub
// Ub = u1*Ua + v1*Ub
lehmerUpdate(Ua, Ub, q, r, u0, u1, v0, v1, even)
}
} else {
// Single-digit calculations failed to simulate any quotients.
// Do a standard Euclidean step.
A, B, r, Ua, Ub = euclidUpdate(A, B, Ua, Ub, q, r, extended)
}
}
if len(B.abs) > 0 {
// extended Euclidean algorithm base case if B is a single Word
if len(A.abs) > 1 {
// A is longer than a single Word, so one update is needed.
A, B, r, Ua, Ub = euclidUpdate(A, B, Ua, Ub, q, r, extended)
}
if len(B.abs) > 0 {
// A and B are both a single Word.
aWord, bWord := A.abs[0], B.abs[0]
if extended {
var ua, ub, va, vb Word
ua, ub = 1, 0
va, vb = 0, 1
even := true
for bWord != 0 {
q, r := aWord/bWord, aWord%bWord
aWord, bWord = bWord, r
ua, ub = ub, ua+q*ub
va, vb = vb, va+q*vb
even = !even
}
mulW(Ua, Ua, !even, ua)
mulW(Ub, Ub, even, va)
Ua.Add(Ua, Ub)
} else {
for bWord != 0 {
aWord, bWord = bWord, aWord%bWord
}
}
A.abs[0] = aWord
}
}
negA := a.neg
if y != nil {
// avoid aliasing b needed in the division below
if y == b {
B.Set(b)
} else {
B = b
}
// y = (z - a*x)/b
y.Mul(a, Ua) // y can safely alias a
if negA {
y.neg = !y.neg
}
y.Sub(A, y)
y.Div(y, B)
}
if x != nil {
x.Set(Ua)
if negA {
x.neg = !x.neg
}
}
z.Set(A)
return z
}
// Rand sets z to a pseudo-random number in [0, n) and returns z.
//
// As this uses the [math/rand] package, it must not be used for
// security-sensitive work. Use [crypto/rand.Int] instead.
func (z *Int) Rand(rnd *rand.Rand, n *Int) *Int {
// z.neg is not modified before the if check, because z and n might alias.
if n.neg || len(n.abs) == 0 {
z.neg = false
z.abs = nil
return z
}
z.neg = false
z.abs = z.abs.random(rnd, n.abs, n.abs.bitLen())
return z
}
// ModInverse sets z to the multiplicative inverse of g in the ring ℤ/nℤ
// and returns z. If g and n are not relatively prime, g has no multiplicative
// inverse in the ring ℤ/nℤ. In this case, z is unchanged and the return value
// is nil. If n == 0, a division-by-zero run-time panic occurs.
func (z *Int) ModInverse(g, n *Int) *Int {
// GCD expects parameters a and b to be > 0.
if n.neg {
var n2 Int
n = n2.Neg(n)
}
if g.neg {
var g2 Int
g = g2.Mod(g, n)
}
var d, x Int
d.GCD(&x, nil, g, n)
// if and only if d==1, g and n are relatively prime
if d.Cmp(intOne) != 0 {
return nil
}
// x and y are such that g*x + n*y = 1, therefore x is the inverse element,
// but it may be negative, so convert to the range 0 <= z < |n|
if x.neg {
z.Add(&x, n)
} else {
z.Set(&x)
}
return z
}
func (z nat) modInverse(g, n nat) nat {
// TODO(rsc): ModInverse should be implemented in terms of this function.
return (&Int{abs: z}).ModInverse(&Int{abs: g}, &Int{abs: n}).abs
}
// Jacobi returns the Jacobi symbol (x/y), either +1, -1, or 0.
// The y argument must be an odd integer.
func Jacobi(x, y *Int) int {
if len(y.abs) == 0 || y.abs[0]&1 == 0 {
panic(fmt.Sprintf("big: invalid 2nd argument to Int.Jacobi: need odd integer but got %s", y.String()))
}
// We use the formulation described in chapter 2, section 2.4,
// "The Yacas Book of Algorithms":
// http://yacas.sourceforge.net/Algo.book.pdf
var a, b, c Int
a.Set(x)
b.Set(y)
j := 1
if b.neg {
if a.neg {
j = -1
}
b.neg = false
}
for {
if b.Cmp(intOne) == 0 {
return j
}
if len(a.abs) == 0 {
return 0
}
a.Mod(&a, &b)
if len(a.abs) == 0 {
return 0
}
// a > 0
// handle factors of 2 in 'a'
s := a.abs.trailingZeroBits()
if s&1 != 0 {
bmod8 := b.abs[0] & 7
if bmod8 == 3 || bmod8 == 5 {
j = -j
}
}
c.Rsh(&a, s) // a = 2^s*c
// swap numerator and denominator
if b.abs[0]&3 == 3 && c.abs[0]&3 == 3 {
j = -j
}
a.Set(&b)
b.Set(&c)
}
}
// modSqrt3Mod4 uses the identity
//
// (a^((p+1)/4))^2 mod p
// == u^(p+1) mod p
// == u^2 mod p
//
// to calculate the square root of any quadratic residue mod p quickly for 3
// mod 4 primes.
func (z *Int) modSqrt3Mod4Prime(x, p *Int) *Int {
e := new(Int).Add(p, intOne) // e = p + 1
e.Rsh(e, 2) // e = (p + 1) / 4
z.Exp(x, e, p) // z = x^e mod p
return z
}
// modSqrt5Mod8Prime uses Atkin's observation that 2 is not a square mod p
//
// alpha == (2*a)^((p-5)/8) mod p
// beta == 2*a*alpha^2 mod p is a square root of -1
// b == a*alpha*(beta-1) mod p is a square root of a
//
// to calculate the square root of any quadratic residue mod p quickly for 5
// mod 8 primes.
func (z *Int) modSqrt5Mod8Prime(x, p *Int) *Int {
// p == 5 mod 8 implies p = e*8 + 5
// e is the quotient and 5 the remainder on division by 8
e := new(Int).Rsh(p, 3) // e = (p - 5) / 8
tx := new(Int).Lsh(x, 1) // tx = 2*x
alpha := new(Int).Exp(tx, e, p)
beta := new(Int).Mul(alpha, alpha)
beta.Mod(beta, p)
beta.Mul(beta, tx)
beta.Mod(beta, p)
beta.Sub(beta, intOne)
beta.Mul(beta, x)
beta.Mod(beta, p)
beta.Mul(beta, alpha)
z.Mod(beta, p)
return z
}
// modSqrtTonelliShanks uses the Tonelli-Shanks algorithm to find the square
// root of a quadratic residue modulo any prime.
func (z *Int) modSqrtTonelliShanks(x, p *Int) *Int {
// Break p-1 into s*2^e such that s is odd.
var s Int
s.Sub(p, intOne)
e := s.abs.trailingZeroBits()
s.Rsh(&s, e)
// find some non-square n
var n Int
n.SetInt64(2)
for Jacobi(&n, p) != -1 {
n.Add(&n, intOne)
}
// Core of the Tonelli-Shanks algorithm. Follows the description in
// section 6 of "Square roots from 1; 24, 51, 10 to Dan Shanks" by Ezra
// Brown:
// https://www.maa.org/sites/default/files/pdf/upload_library/22/Polya/07468342.di020786.02p0470a.pdf
var y, b, g, t Int
y.Add(&s, intOne)
y.Rsh(&y, 1)
y.Exp(x, &y, p) // y = x^((s+1)/2)
b.Exp(x, &s, p) // b = x^s
g.Exp(&n, &s, p) // g = n^s
r := e
for {
// find the least m such that ord_p(b) = 2^m
var m uint
t.Set(&b)
for t.Cmp(intOne) != 0 {
t.Mul(&t, &t).Mod(&t, p)
m++
}
if m == 0 {
return z.Set(&y)
}
t.SetInt64(0).SetBit(&t, int(r-m-1), 1).Exp(&g, &t, p)
// t = g^(2^(r-m-1)) mod p
g.Mul(&t, &t).Mod(&g, p) // g = g^(2^(r-m)) mod p
y.Mul(&y, &t).Mod(&y, p)
b.Mul(&b, &g).Mod(&b, p)
r = m
}
}
// ModSqrt sets z to a square root of x mod p if such a square root exists, and
// returns z. The modulus p must be an odd prime. If x is not a square mod p,
// ModSqrt leaves z unchanged and returns nil. This function panics if p is
// not an odd integer, its behavior is undefined if p is odd but not prime.
func (z *Int) ModSqrt(x, p *Int) *Int {
switch Jacobi(x, p) {
case -1:
return nil // x is not a square mod p
case 0:
return z.SetInt64(0) // sqrt(0) mod p = 0
case 1:
break
}
if x.neg || x.Cmp(p) >= 0 { // ensure 0 <= x < p
x = new(Int).Mod(x, p)
}
switch {
case p.abs[0]%4 == 3:
// Check whether p is 3 mod 4, and if so, use the faster algorithm.
return z.modSqrt3Mod4Prime(x, p)
case p.abs[0]%8 == 5:
// Check whether p is 5 mod 8, use Atkin's algorithm.
return z.modSqrt5Mod8Prime(x, p)
default:
// Otherwise, use Tonelli-Shanks.
return z.modSqrtTonelliShanks(x, p)
}
}
// Lsh sets z = x << n and returns z.
func (z *Int) Lsh(x *Int, n uint) *Int {
z.abs = z.abs.lsh(x.abs, n)
z.neg = x.neg
return z
}
// Rsh sets z = x >> n and returns z.
func (z *Int) Rsh(x *Int, n uint) *Int {
if x.neg {
// (-x) >> s == ^(x-1) >> s == ^((x-1) >> s) == -(((x-1) >> s) + 1)
t := z.abs.sub(x.abs, natOne) // no underflow because |x| > 0
t = t.rsh(t, n)
z.abs = t.add(t, natOne)
z.neg = true // z cannot be zero if x is negative
return z
}
z.abs = z.abs.rsh(x.abs, n)
z.neg = false
return z
}
// Bit returns the value of the i'th bit of x. That is, it
// returns (x>>i)&1. The bit index i must be >= 0.
func (x *Int) Bit(i int) uint {
if i == 0 {
// optimization for common case: odd/even test of x
if len(x.abs) > 0 {
return uint(x.abs[0] & 1) // bit 0 is same for -x
}
return 0
}
if i < 0 {
panic("negative bit index")
}
if x.neg {
t := nat(nil).sub(x.abs, natOne)
return t.bit(uint(i)) ^ 1
}
return x.abs.bit(uint(i))
}
// SetBit sets z to x, with x's i'th bit set to b (0 or 1).
// That is,
// - if b is 1, SetBit sets z = x | (1 << i);
// - if b is 0, SetBit sets z = x &^ (1 << i);
// - if b is not 0 or 1, SetBit will panic.
func (z *Int) SetBit(x *Int, i int, b uint) *Int {
if i < 0 {
panic("negative bit index")
}
if x.neg {
t := z.abs.sub(x.abs, natOne)
t = t.setBit(t, uint(i), b^1)
z.abs = t.add(t, natOne)
z.neg = len(z.abs) > 0
return z
}
z.abs = z.abs.setBit(x.abs, uint(i), b)
z.neg = false
return z
}
// And sets z = x & y and returns z.
func (z *Int) And(x, y *Int) *Int {
if x.neg == y.neg {
if x.neg {
// (-x) & (-y) == ^(x-1) & ^(y-1) == ^((x-1) | (y-1)) == -(((x-1) | (y-1)) + 1)
x1 := nat(nil).sub(x.abs, natOne)
y1 := nat(nil).sub(y.abs, natOne)
z.abs = z.abs.add(z.abs.or(x1, y1), natOne)
z.neg = true // z cannot be zero if x and y are negative
return z
}
// x & y == x & y
z.abs = z.abs.and(x.abs, y.abs)
z.neg = false
return z
}
// x.neg != y.neg
if x.neg {
x, y = y, x // & is symmetric
}
// x & (-y) == x & ^(y-1) == x &^ (y-1)
y1 := nat(nil).sub(y.abs, natOne)
z.abs = z.abs.andNot(x.abs, y1)
z.neg = false
return z
}
// AndNot sets z = x &^ y and returns z.
func (z *Int) AndNot(x, y *Int) *Int {
if x.neg == y.neg {
if x.neg {
// (-x) &^ (-y) == ^(x-1) &^ ^(y-1) == ^(x-1) & (y-1) == (y-1) &^ (x-1)
x1 := nat(nil).sub(x.abs, natOne)
y1 := nat(nil).sub(y.abs, natOne)
z.abs = z.abs.andNot(y1, x1)
z.neg = false
return z
}
// x &^ y == x &^ y
z.abs = z.abs.andNot(x.abs, y.abs)
z.neg = false
return z
}
if x.neg {
// (-x) &^ y == ^(x-1) &^ y == ^(x-1) & ^y == ^((x-1) | y) == -(((x-1) | y) + 1)
x1 := nat(nil).sub(x.abs, natOne)
z.abs = z.abs.add(z.abs.or(x1, y.abs), natOne)
z.neg = true // z cannot be zero if x is negative and y is positive
return z
}
// x &^ (-y) == x &^ ^(y-1) == x & (y-1)
y1 := nat(nil).sub(y.abs, natOne)
z.abs = z.abs.and(x.abs, y1)
z.neg = false
return z
}
// Or sets z = x | y and returns z.
func (z *Int) Or(x, y *Int) *Int {
if x.neg == y.neg {
if x.neg {
// (-x) | (-y) == ^(x-1) | ^(y-1) == ^((x-1) & (y-1)) == -(((x-1) & (y-1)) + 1)
x1 := nat(nil).sub(x.abs, natOne)
y1 := nat(nil).sub(y.abs, natOne)
z.abs = z.abs.add(z.abs.and(x1, y1), natOne)
z.neg = true // z cannot be zero if x and y are negative
return z
}
// x | y == x | y
z.abs = z.abs.or(x.abs, y.abs)
z.neg = false
return z
}
// x.neg != y.neg
if x.neg {
x, y = y, x // | is symmetric
}
// x | (-y) == x | ^(y-1) == ^((y-1) &^ x) == -(^((y-1) &^ x) + 1)
y1 := nat(nil).sub(y.abs, natOne)
z.abs = z.abs.add(z.abs.andNot(y1, x.abs), natOne)
z.neg = true // z cannot be zero if one of x or y is negative
return z
}
// Xor sets z = x ^ y and returns z.
func (z *Int) Xor(x, y *Int) *Int {
if x.neg == y.neg {
if x.neg {
// (-x) ^ (-y) == ^(x-1) ^ ^(y-1) == (x-1) ^ (y-1)
x1 := nat(nil).sub(x.abs, natOne)
y1 := nat(nil).sub(y.abs, natOne)
z.abs = z.abs.xor(x1, y1)
z.neg = false
return z
}
// x ^ y == x ^ y
z.abs = z.abs.xor(x.abs, y.abs)
z.neg = false
return z
}
// x.neg != y.neg
if x.neg {
x, y = y, x // ^ is symmetric
}
// x ^ (-y) == x ^ ^(y-1) == ^(x ^ (y-1)) == -((x ^ (y-1)) + 1)
y1 := nat(nil).sub(y.abs, natOne)
z.abs = z.abs.add(z.abs.xor(x.abs, y1), natOne)
z.neg = true // z cannot be zero if only one of x or y is negative
return z
}
// Not sets z = ^x and returns z.
func (z *Int) Not(x *Int) *Int {
if x.neg {
// ^(-x) == ^(^(x-1)) == x-1
z.abs = z.abs.sub(x.abs, natOne)
z.neg = false
return z
}
// ^x == -x-1 == -(x+1)
z.abs = z.abs.add(x.abs, natOne)
z.neg = true // z cannot be zero if x is positive
return z
}
// Sqrt sets z to ⌊√x⌋, the largest integer such that z² ≤ x, and returns z.
// It panics if x is negative.
func (z *Int) Sqrt(x *Int) *Int {
if x.neg {
panic("square root of negative number")
}
z.neg = false
z.abs = z.abs.sqrt(nil, x.abs)
return z
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements int-to-string conversion functions.
package big
import (
"errors"
"fmt"
"io"
)
// Text returns the string representation of x in the given base.
// Base must be between 2 and 62, inclusive. The result uses the
// lower-case letters 'a' to 'z' for digit values 10 to 35, and
// the upper-case letters 'A' to 'Z' for digit values 36 to 61.
// No prefix (such as "0x") is added to the string. If x is a nil
// pointer it returns "<nil>".
func (x *Int) Text(base int) string {
if x == nil {
return "<nil>"
}
return string(x.abs.itoa(x.neg, base))
}
// Append appends the string representation of x, as generated by
// x.Text(base), to buf and returns the extended buffer.
func (x *Int) Append(buf []byte, base int) []byte {
if x == nil {
return append(buf, "<nil>"...)
}
return append(buf, x.abs.itoa(x.neg, base)...)
}
// String returns the decimal representation of x as generated by
// x.Text(10).
func (x *Int) String() string {
return x.Text(10)
}
// write count copies of text to s.
func writeMultiple(s fmt.State, text string, count int) {
if len(text) > 0 {
b := []byte(text)
for ; count > 0; count-- {
s.Write(b)
}
}
}
var _ fmt.Formatter = intOne // *Int must implement fmt.Formatter
// Format implements [fmt.Formatter]. It accepts the formats
// 'b' (binary), 'o' (octal with 0 prefix), 'O' (octal with 0o prefix),
// 'd' (decimal), 'x' (lowercase hexadecimal), and
// 'X' (uppercase hexadecimal).
// Also supported are the full suite of package fmt's format
// flags for integral types, including '+' and ' ' for sign
// control, '#' for leading zero in octal and for hexadecimal,
// a leading "0x" or "0X" for "%#x" and "%#X" respectively,
// specification of minimum digits precision, output field
// width, space or zero padding, and '-' for left or right
// justification.
func (x *Int) Format(s fmt.State, ch rune) {
// determine base
var base int
switch ch {
case 'b':
base = 2
case 'o', 'O':
base = 8
case 'd', 's', 'v':
base = 10
case 'x', 'X':
base = 16
default:
// unknown format
fmt.Fprintf(s, "%%!%c(big.Int=%s)", ch, x.String())
return
}
if x == nil {
fmt.Fprint(s, "<nil>")
return
}
// determine sign character
sign := ""
switch {
case x.neg:
sign = "-"
case s.Flag('+'): // supersedes ' ' when both specified
sign = "+"
case s.Flag(' '):
sign = " "
}
// determine prefix characters for indicating output base
prefix := ""
if s.Flag('#') {
switch ch {
case 'b': // binary
prefix = "0b"
case 'o': // octal
prefix = "0"
case 'x': // hexadecimal
prefix = "0x"
case 'X':
prefix = "0X"
}
}
if ch == 'O' {
prefix = "0o"
}
digits := x.abs.utoa(base)
if ch == 'X' {
// faster than bytes.ToUpper
for i, d := range digits {
if 'a' <= d && d <= 'z' {
digits[i] = 'A' + (d - 'a')
}
}
}
// number of characters for the three classes of number padding
var left int // space characters to left of digits for right justification ("%8d")
var zeros int // zero characters (actually cs[0]) as left-most digits ("%.8d")
var right int // space characters to right of digits for left justification ("%-8d")
// determine number padding from precision: the least number of digits to output
precision, precisionSet := s.Precision()
if precisionSet {
switch {
case len(digits) < precision:
zeros = precision - len(digits) // count of zero padding
case len(digits) == 1 && digits[0] == '0' && precision == 0:
return // print nothing if zero value (x == 0) and zero precision ("." or ".0")
}
}
// determine field pad from width: the least number of characters to output
length := len(sign) + len(prefix) + zeros + len(digits)
if width, widthSet := s.Width(); widthSet && length < width { // pad as specified
switch d := width - length; {
case s.Flag('-'):
// pad on the right with spaces; supersedes '0' when both specified
right = d
case s.Flag('0') && !precisionSet:
// pad with zeros unless precision also specified
zeros = d
default:
// pad on the left with spaces
left = d
}
}
// print number as [left pad][sign][prefix][zero pad][digits][right pad]
writeMultiple(s, " ", left)
writeMultiple(s, sign, 1)
writeMultiple(s, prefix, 1)
writeMultiple(s, "0", zeros)
s.Write(digits)
writeMultiple(s, " ", right)
}
// scan sets z to the integer value corresponding to the longest possible prefix
// read from r representing a signed integer number in a given conversion base.
// It returns z, the actual conversion base used, and an error, if any. In the
// error case, the value of z is undefined but the returned value is nil. The
// syntax follows the syntax of integer literals in Go.
//
// The base argument must be 0 or a value from 2 through MaxBase. If the base
// is 0, the string prefix determines the actual conversion base. A prefix of
// “0b” or “0B” selects base 2; a “0”, “0o”, or “0O” prefix selects
// base 8, and a “0x” or “0X” prefix selects base 16. Otherwise the selected
// base is 10.
func (z *Int) scan(r io.ByteScanner, base int) (*Int, int, error) {
// determine sign
neg, err := scanSign(r)
if err != nil {
return nil, 0, err
}
// determine mantissa
z.abs, base, _, err = z.abs.scan(r, base, false)
if err != nil {
return nil, base, err
}
z.neg = len(z.abs) > 0 && neg // 0 has no sign
return z, base, nil
}
func scanSign(r io.ByteScanner) (neg bool, err error) {
var ch byte
if ch, err = r.ReadByte(); err != nil {
return false, err
}
switch ch {
case '-':
neg = true
case '+':
// nothing to do
default:
r.UnreadByte()
}
return
}
// byteReader is a local wrapper around fmt.ScanState;
// it implements the ByteReader interface.
type byteReader struct {
fmt.ScanState
}
func (r byteReader) ReadByte() (byte, error) {
ch, size, err := r.ReadRune()
if size != 1 && err == nil {
err = fmt.Errorf("invalid rune %#U", ch)
}
return byte(ch), err
}
func (r byteReader) UnreadByte() error {
return r.UnreadRune()
}
var _ fmt.Scanner = intOne // *Int must implement fmt.Scanner
// Scan is a support routine for [fmt.Scanner]; it sets z to the value of
// the scanned number. It accepts the formats 'b' (binary), 'o' (octal),
// 'd' (decimal), 'x' (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
func (z *Int) Scan(s fmt.ScanState, ch rune) error {
s.SkipSpace() // skip leading space characters
base := 0
switch ch {
case 'b':
base = 2
case 'o':
base = 8
case 'd':
base = 10
case 'x', 'X':
base = 16
case 's', 'v':
// let scan determine the base
default:
return errors.New("Int.Scan: invalid verb")
}
_, _, err := z.scan(byteReader{s}, base)
return err
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements encoding/decoding of Ints.
package big
import (
"bytes"
"fmt"
)
// Gob codec version. Permits backward-compatible changes to the encoding.
const intGobVersion byte = 1
// GobEncode implements the [encoding/gob.GobEncoder] interface.
func (x *Int) GobEncode() ([]byte, error) {
if x == nil {
return nil, nil
}
buf := make([]byte, 1+len(x.abs)*_S) // extra byte for version and sign bit
i := x.abs.bytes(buf) - 1 // i >= 0
b := intGobVersion << 1 // make space for sign bit
if x.neg {
b |= 1
}
buf[i] = b
return buf[i:], nil
}
// GobDecode implements the [encoding/gob.GobDecoder] interface.
func (z *Int) GobDecode(buf []byte) error {
if len(buf) == 0 {
// Other side sent a nil or default value.
*z = Int{}
return nil
}
b := buf[0]
if b>>1 != intGobVersion {
return fmt.Errorf("Int.GobDecode: encoding version %d not supported", b>>1)
}
z.neg = b&1 != 0
z.abs = z.abs.setBytes(buf[1:])
return nil
}
// AppendText implements the [encoding.TextAppender] interface.
func (x *Int) AppendText(b []byte) (text []byte, err error) {
return x.Append(b, 10), nil
}
// MarshalText implements the [encoding.TextMarshaler] interface.
func (x *Int) MarshalText() (text []byte, err error) {
return x.AppendText(nil)
}
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (z *Int) UnmarshalText(text []byte) error {
if _, ok := z.setFromScanner(bytes.NewReader(text), 0); !ok {
return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Int", text)
}
return nil
}
// The JSON marshalers are only here for API backward compatibility
// (programs that explicitly look for these two methods). JSON works
// fine with the TextMarshaler only.
// MarshalJSON implements the [encoding/json.Marshaler] interface.
func (x *Int) MarshalJSON() ([]byte, error) {
if x == nil {
return []byte("null"), nil
}
return x.abs.itoa(x.neg, 10), nil
}
// UnmarshalJSON implements the [encoding/json.Unmarshaler] interface.
func (z *Int) UnmarshalJSON(text []byte) error {
// Ignore null, like in the main JSON package.
if string(text) == "null" {
return nil
}
return z.UnmarshalText(text)
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements unsigned multi-precision integers (natural
// numbers). They are the building blocks for the implementation
// of signed integers, rationals, and floating-point numbers.
//
// Caution: This implementation relies on the function "alias"
// which assumes that (nat) slice capacities are never
// changed (no 3-operand slice expressions). If that
// changes, alias needs to be updated for correctness.
package big
import (
"internal/byteorder"
"math/bits"
"math/rand"
"slices"
"sync"
)
// An unsigned integer x of the form
//
// x = x[n-1]*_B^(n-1) + x[n-2]*_B^(n-2) + ... + x[1]*_B + x[0]
//
// with 0 <= x[i] < _B and 0 <= i < n is stored in a slice of length n,
// with the digits x[i] as the slice elements.
//
// A number is normalized if the slice contains no leading 0 digits.
// During arithmetic operations, denormalized values may occur but are
// always normalized before returning the final result. The normalized
// representation of 0 is the empty or nil slice (length = 0).
type nat []Word
var (
natOne = nat{1}
natTwo = nat{2}
natFive = nat{5}
natTen = nat{10}
)
func (z nat) String() string {
return "0x" + string(z.itoa(false, 16))
}
func (z nat) norm() nat {
i := len(z)
for i > 0 && z[i-1] == 0 {
i--
}
return z[0:i]
}
func (z nat) make(n int) nat {
if n <= cap(z) {
return z[:n] // reuse z
}
if n == 1 {
// Most nats start small and stay that way; don't over-allocate.
return make(nat, 1)
}
// Choosing a good value for e has significant performance impact
// because it increases the chance that a value can be reused.
const e = 4 // extra capacity
return make(nat, n, n+e)
}
func (z nat) setWord(x Word) nat {
if x == 0 {
return z[:0]
}
z = z.make(1)
z[0] = x
return z
}
func (z nat) setUint64(x uint64) nat {
// single-word value
if w := Word(x); uint64(w) == x {
return z.setWord(w)
}
// 2-word value
z = z.make(2)
z[1] = Word(x >> 32)
z[0] = Word(x)
return z
}
func (z nat) set(x nat) nat {
z = z.make(len(x))
copy(z, x)
return z
}
func (z nat) add(x, y nat) nat {
m := len(x)
n := len(y)
switch {
case m < n:
return z.add(y, x)
case m == 0:
// n == 0 because m >= n; result is 0
return z[:0]
case n == 0:
// result is x
return z.set(x)
}
// m > 0
z = z.make(m + 1)
c := addVV(z[:n], x[:n], y[:n])
if m > n {
c = addVW(z[n:m], x[n:], c)
}
z[m] = c
return z.norm()
}
func (z nat) sub(x, y nat) nat {
m := len(x)
n := len(y)
switch {
case m < n:
panic("underflow")
case m == 0:
// n == 0 because m >= n; result is 0
return z[:0]
case n == 0:
// result is x
return z.set(x)
}
// m > 0
z = z.make(m)
c := subVV(z[:n], x[:n], y[:n])
if m > n {
c = subVW(z[n:], x[n:], c)
}
if c != 0 {
panic("underflow")
}
return z.norm()
}
func (x nat) cmp(y nat) (r int) {
m := len(x)
n := len(y)
if m != n || m == 0 {
switch {
case m < n:
r = -1
case m > n:
r = 1
}
return
}
i := m - 1
for i > 0 && x[i] == y[i] {
i--
}
switch {
case x[i] < y[i]:
r = -1
case x[i] > y[i]:
r = 1
}
return
}
// montgomery computes z mod m = x*y*2**(-n*_W) mod m,
// assuming k = -1/m mod 2**_W.
// z is used for storing the result which is returned;
// z must not alias x, y or m.
// See Gueron, "Efficient Software Implementations of Modular Exponentiation".
// https://eprint.iacr.org/2011/239.pdf
// In the terminology of that paper, this is an "Almost Montgomery Multiplication":
// x and y are required to satisfy 0 <= z < 2**(n*_W) and then the result
// z is guaranteed to satisfy 0 <= z < 2**(n*_W), but it may not be < m.
func (z nat) montgomery(x, y, m nat, k Word, n int) nat {
// This code assumes x, y, m are all the same length, n.
// (required by addMulVVW and the for loop).
// It also assumes that x, y are already reduced mod m,
// or else the result will not be properly reduced.
if len(x) != n || len(y) != n || len(m) != n {
panic("math/big: mismatched montgomery number lengths")
}
z = z.make(n * 2)
clear(z)
var c Word
for i := 0; i < n; i++ {
d := y[i]
c2 := addMulVVWW(z[i:n+i], z[i:n+i], x, d, 0)
t := z[i] * k
c3 := addMulVVWW(z[i:n+i], z[i:n+i], m, t, 0)
cx := c + c2
cy := cx + c3
z[n+i] = cy
if cx < c2 || cy < c3 {
c = 1
} else {
c = 0
}
}
if c != 0 {
subVV(z[:n], z[n:], m)
} else {
copy(z[:n], z[n:])
}
return z[:n]
}
// alias reports whether x and y share the same base array.
//
// Note: alias assumes that the capacity of underlying arrays
// is never changed for nat values; i.e. that there are
// no 3-operand slice expressions in this code (or worse,
// reflect-based operations to the same effect).
func alias(x, y nat) bool {
return cap(x) > 0 && cap(y) > 0 && &x[0:cap(x)][cap(x)-1] == &y[0:cap(y)][cap(y)-1]
}
// addTo implements z += x; z must be long enough.
// (we don't use nat.add because we need z to stay the same
// slice, and we don't need to normalize z after each addition)
func addTo(z, x nat) {
if n := len(x); n > 0 {
if c := addVV(z[:n], z[:n], x[:n]); c != 0 {
if n < len(z) {
addVW(z[n:], z[n:], c)
}
}
}
}
// mulRange computes the product of all the unsigned integers in the
// range [a, b] inclusively. If a > b (empty range), the result is 1.
// The caller may pass stk == nil to request that mulRange obtain and release one itself.
func (z nat) mulRange(stk *stack, a, b uint64) nat {
switch {
case a == 0:
// cut long ranges short (optimization)
return z.setUint64(0)
case a > b:
return z.setUint64(1)
case a == b:
return z.setUint64(a)
case a+1 == b:
return z.mul(stk, nat(nil).setUint64(a), nat(nil).setUint64(b))
}
if stk == nil {
stk = getStack()
defer stk.free()
}
m := a + (b-a)/2 // avoid overflow
return z.mul(stk, nat(nil).mulRange(stk, a, m), nat(nil).mulRange(stk, m+1, b))
}
// A stack provides temporary storage for complex calculations
// such as multiplication and division.
// The stack is a simple slice of words, extended as needed
// to hold all the temporary storage for a calculation.
// In general, if a function takes a *stack, it expects a non-nil *stack.
// However, certain functions may allow passing a nil *stack instead,
// so that they can handle trivial stack-free cases without forcing the
// caller to obtain and free a stack that will be unused. These functions
// document that they accept a nil *stack in their doc comments.
type stack struct {
w []Word
}
var stackPool sync.Pool
// getStack returns a temporary stack.
// The caller must call [stack.free] to give up use of the stack when finished.
func getStack() *stack {
s, _ := stackPool.Get().(*stack)
if s == nil {
s = new(stack)
}
return s
}
// free returns the stack for use by another calculation.
func (s *stack) free() {
s.w = s.w[:0]
stackPool.Put(s)
}
// save returns the current stack pointer.
// A future call to restore with the same value
// frees any temporaries allocated on the stack after the call to save.
func (s *stack) save() int {
return len(s.w)
}
// restore restores the stack pointer to n.
// It is almost always invoked as
//
// defer stk.restore(stk.save())
//
// which makes sure to pop any temporaries allocated in the current function
// from the stack before returning.
func (s *stack) restore(n int) {
s.w = s.w[:n]
}
// nat returns a nat of n words, allocated on the stack.
func (s *stack) nat(n int) nat {
nr := (n + 3) &^ 3 // round up to multiple of 4
off := len(s.w)
s.w = slices.Grow(s.w, nr)
s.w = s.w[:off+nr]
x := s.w[off : off+n : off+n]
if n > 0 {
x[0] = 0xfedcb
}
return x
}
// bitLen returns the length of x in bits.
// Unlike most methods, it works even if x is not normalized.
func (x nat) bitLen() int {
// This function is used in cryptographic operations. It must not leak
// anything but the Int's sign and bit size through side-channels. Any
// changes must be reviewed by a security expert.
if i := len(x) - 1; i >= 0 {
// bits.Len uses a lookup table for the low-order bits on some
// architectures. Neutralize any input-dependent behavior by setting all
// bits after the first one bit.
top := uint(x[i])
top |= top >> 1
top |= top >> 2
top |= top >> 4
top |= top >> 8
top |= top >> 16
top |= top >> 16 >> 16 // ">> 32" doesn't compile on 32-bit architectures
return i*_W + bits.Len(top)
}
return 0
}
// trailingZeroBits returns the number of consecutive least significant zero
// bits of x.
func (x nat) trailingZeroBits() uint {
if len(x) == 0 {
return 0
}
var i uint
for x[i] == 0 {
i++
}
// x[i] != 0
return i*_W + uint(bits.TrailingZeros(uint(x[i])))
}
// isPow2 returns i, true when x == 2**i and 0, false otherwise.
func (x nat) isPow2() (uint, bool) {
var i uint
for x[i] == 0 {
i++
}
if i == uint(len(x))-1 && x[i]&(x[i]-1) == 0 {
return i*_W + uint(bits.TrailingZeros(uint(x[i]))), true
}
return 0, false
}
func same(x, y nat) bool {
return len(x) == len(y) && len(x) > 0 && &x[0] == &y[0]
}
// z = x << s
func (z nat) lsh(x nat, s uint) nat {
if s == 0 {
if same(z, x) {
return z
}
if !alias(z, x) {
return z.set(x)
}
}
m := len(x)
if m == 0 {
return z[:0]
}
// m > 0
n := m + int(s/_W)
z = z.make(n + 1)
if s %= _W; s == 0 {
copy(z[n-m:n], x)
z[n] = 0
} else {
z[n] = lshVU(z[n-m:n], x, s)
}
clear(z[0 : n-m])
return z.norm()
}
// z = x >> s
func (z nat) rsh(x nat, s uint) nat {
if s == 0 {
if same(z, x) {
return z
}
if !alias(z, x) {
return z.set(x)
}
}
m := len(x)
n := m - int(s/_W)
if n <= 0 {
return z[:0]
}
// n > 0
z = z.make(n)
if s %= _W; s == 0 {
copy(z, x[m-n:])
} else {
rshVU(z, x[m-n:], s)
}
return z.norm()
}
func (z nat) setBit(x nat, i uint, b uint) nat {
j := int(i / _W)
m := Word(1) << (i % _W)
n := len(x)
switch b {
case 0:
z = z.make(n)
copy(z, x)
if j >= n {
// no need to grow
return z
}
z[j] &^= m
return z.norm()
case 1:
if j >= n {
z = z.make(j + 1)
clear(z[n:])
} else {
z = z.make(n)
}
copy(z, x)
z[j] |= m
// no need to normalize
return z
}
panic("set bit is not 0 or 1")
}
// bit returns the value of the i'th bit, with lsb == bit 0.
func (x nat) bit(i uint) uint {
j := i / _W
if j >= uint(len(x)) {
return 0
}
// 0 <= j < len(x)
return uint(x[j] >> (i % _W) & 1)
}
// sticky returns 1 if there's a 1 bit within the
// i least significant bits, otherwise it returns 0.
func (x nat) sticky(i uint) uint {
j := i / _W
if j >= uint(len(x)) {
if len(x) == 0 {
return 0
}
return 1
}
// 0 <= j < len(x)
for _, x := range x[:j] {
if x != 0 {
return 1
}
}
if x[j]<<(_W-i%_W) != 0 {
return 1
}
return 0
}
func (z nat) and(x, y nat) nat {
m := len(x)
n := len(y)
if m > n {
m = n
}
// m <= n
z = z.make(m)
for i := 0; i < m; i++ {
z[i] = x[i] & y[i]
}
return z.norm()
}
// trunc returns z = x mod 2ⁿ.
func (z nat) trunc(x nat, n uint) nat {
w := (n + _W - 1) / _W
if uint(len(x)) < w {
return z.set(x)
}
z = z.make(int(w))
copy(z, x)
if n%_W != 0 {
z[len(z)-1] &= 1<<(n%_W) - 1
}
return z.norm()
}
func (z nat) andNot(x, y nat) nat {
m := len(x)
n := len(y)
if n > m {
n = m
}
// m >= n
z = z.make(m)
for i := 0; i < n; i++ {
z[i] = x[i] &^ y[i]
}
copy(z[n:m], x[n:m])
return z.norm()
}
func (z nat) or(x, y nat) nat {
m := len(x)
n := len(y)
s := x
if m < n {
n, m = m, n
s = y
}
// m >= n
z = z.make(m)
for i := 0; i < n; i++ {
z[i] = x[i] | y[i]
}
copy(z[n:m], s[n:m])
return z.norm()
}
func (z nat) xor(x, y nat) nat {
m := len(x)
n := len(y)
s := x
if m < n {
n, m = m, n
s = y
}
// m >= n
z = z.make(m)
for i := 0; i < n; i++ {
z[i] = x[i] ^ y[i]
}
copy(z[n:m], s[n:m])
return z.norm()
}
// random creates a random integer in [0..limit), using the space in z if
// possible. n is the bit length of limit.
func (z nat) random(rand *rand.Rand, limit nat, n int) nat {
if alias(z, limit) {
z = nil // z is an alias for limit - cannot reuse
}
z = z.make(len(limit))
bitLengthOfMSW := uint(n % _W)
if bitLengthOfMSW == 0 {
bitLengthOfMSW = _W
}
mask := Word((1 << bitLengthOfMSW) - 1)
for {
switch _W {
case 32:
for i := range z {
z[i] = Word(rand.Uint32())
}
case 64:
for i := range z {
z[i] = Word(rand.Uint32()) | Word(rand.Uint32())<<32
}
default:
panic("unknown word size")
}
z[len(limit)-1] &= mask
if z.cmp(limit) < 0 {
break
}
}
return z.norm()
}
// If m != 0 (i.e., len(m) != 0), expNN sets z to x**y mod m;
// otherwise it sets z to x**y. The result is the value of z.
// The caller may pass stk == nil to request that expNN obtain and release one itself.
func (z nat) expNN(stk *stack, x, y, m nat, slow bool) nat {
if alias(z, x) || alias(z, y) {
// We cannot allow in-place modification of x or y.
z = nil
}
// x**y mod 1 == 0
if len(m) == 1 && m[0] == 1 {
return z.setWord(0)
}
// m == 0 || m > 1
// x**0 == 1
if len(y) == 0 {
return z.setWord(1)
}
// y > 0
// 0**y = 0
if len(x) == 0 {
return z.setWord(0)
}
// x > 0
// 1**y = 1
if len(x) == 1 && x[0] == 1 {
return z.setWord(1)
}
// x > 1
// x**1 == x
if len(y) == 1 && y[0] == 1 && len(m) == 0 {
return z.set(x)
}
if stk == nil {
stk = getStack()
defer stk.free()
}
if len(y) == 1 && y[0] == 1 { // len(m) > 0
return z.rem(stk, x, m)
}
// y > 1
if len(m) != 0 {
// We likely end up being as long as the modulus.
z = z.make(len(m))
// If the exponent is large, we use the Montgomery method for odd values,
// and a 4-bit, windowed exponentiation for powers of two,
// and a CRT-decomposed Montgomery method for the remaining values
// (even values times non-trivial odd values, which decompose into one
// instance of each of the first two cases).
if len(y) > 1 && !slow {
if m[0]&1 == 1 {
return z.expNNMontgomery(stk, x, y, m)
}
if logM, ok := m.isPow2(); ok {
return z.expNNWindowed(stk, x, y, logM)
}
return z.expNNMontgomeryEven(stk, x, y, m)
}
}
z = z.set(x)
v := y[len(y)-1] // v > 0 because y is normalized and y > 0
shift := nlz(v) + 1
v <<= shift
var q nat
const mask = 1 << (_W - 1)
// We walk through the bits of the exponent one by one. Each time we
// see a bit, we square, thus doubling the power. If the bit is a one,
// we also multiply by x, thus adding one to the power.
w := _W - int(shift)
// zz and r are used to avoid allocating in mul and div as
// otherwise the arguments would alias.
var zz, r nat
for j := 0; j < w; j++ {
zz = zz.sqr(stk, z)
zz, z = z, zz
if v&mask != 0 {
zz = zz.mul(stk, z, x)
zz, z = z, zz
}
if len(m) != 0 {
zz, r = zz.div(stk, r, z, m)
zz, r, q, z = q, z, zz, r
}
v <<= 1
}
for i := len(y) - 2; i >= 0; i-- {
v = y[i]
for j := 0; j < _W; j++ {
zz = zz.sqr(stk, z)
zz, z = z, zz
if v&mask != 0 {
zz = zz.mul(stk, z, x)
zz, z = z, zz
}
if len(m) != 0 {
zz, r = zz.div(stk, r, z, m)
zz, r, q, z = q, z, zz, r
}
v <<= 1
}
}
return z.norm()
}
// expNNMontgomeryEven calculates x**y mod m where m = m1 × m2 for m1 = 2ⁿ and m2 odd.
// It uses two recursive calls to expNN for x**y mod m1 and x**y mod m2
// and then uses the Chinese Remainder Theorem to combine the results.
// The recursive call using m1 will use expNNWindowed,
// while the recursive call using m2 will use expNNMontgomery.
// For more details, see Ç. K. Koç, “Montgomery Reduction with Even Modulus”,
// IEE Proceedings: Computers and Digital Techniques, 141(5) 314-316, September 1994.
// http://www.people.vcu.edu/~jwang3/CMSC691/j34monex.pdf
func (z nat) expNNMontgomeryEven(stk *stack, x, y, m nat) nat {
// Split m = m₁ × m₂ where m₁ = 2ⁿ
n := m.trailingZeroBits()
m1 := nat(nil).lsh(natOne, n)
m2 := nat(nil).rsh(m, n)
// We want z = x**y mod m.
// z₁ = x**y mod m1 = (x**y mod m) mod m1 = z mod m1
// z₂ = x**y mod m2 = (x**y mod m) mod m2 = z mod m2
// (We are using the math/big convention for names here,
// where the computation is z = x**y mod m, so its parts are z1 and z2.
// The paper is computing x = a**e mod n; it refers to these as x2 and z1.)
z1 := nat(nil).expNN(stk, x, y, m1, false)
z2 := nat(nil).expNN(stk, x, y, m2, false)
// Reconstruct z from z₁, z₂ using CRT, using algorithm from paper,
// which uses only a single modInverse (and an easy one at that).
// p = (z₁ - z₂) × m₂⁻¹ (mod m₁)
// z = z₂ + p × m₂
// The final addition is in range because:
// z = z₂ + p × m₂
// ≤ z₂ + (m₁-1) × m₂
// < m₂ + (m₁-1) × m₂
// = m₁ × m₂
// = m.
z = z.set(z2)
// Compute (z₁ - z₂) mod m1 [m1 == 2**n] into z1.
z1 = z1.subMod2N(z1, z2, n)
// Reuse z2 for p = (z₁ - z₂) [in z1] * m2⁻¹ (mod m₁ [= 2ⁿ]).
m2inv := nat(nil).modInverse(m2, m1)
z2 = z2.mul(stk, z1, m2inv)
z2 = z2.trunc(z2, n)
// Reuse z1 for p * m2.
z = z.add(z, z1.mul(stk, z2, m2))
return z
}
// expNNWindowed calculates x**y mod m using a fixed, 4-bit window,
// where m = 2**logM.
func (z nat) expNNWindowed(stk *stack, x, y nat, logM uint) nat {
if len(y) <= 1 {
panic("big: misuse of expNNWindowed")
}
if x[0]&1 == 0 {
// len(y) > 1, so y > logM.
// x is even, so x**y is a multiple of 2**y which is a multiple of 2**logM.
return z.setWord(0)
}
if logM == 1 {
return z.setWord(1)
}
// zz is used to avoid allocating in mul as otherwise
// the arguments would alias.
defer stk.restore(stk.save())
w := int((logM + _W - 1) / _W)
zz := stk.nat(w)
const n = 4
// powers[i] contains x^i.
var powers [1 << n]nat
for i := range powers {
powers[i] = stk.nat(w)
}
powers[0] = powers[0].set(natOne)
powers[1] = powers[1].trunc(x, logM)
for i := 2; i < 1<<n; i += 2 {
p2, p, p1 := &powers[i/2], &powers[i], &powers[i+1]
*p = p.sqr(stk, *p2)
*p = p.trunc(*p, logM)
*p1 = p1.mul(stk, *p, x)
*p1 = p1.trunc(*p1, logM)
}
// Because phi(2**logM) = 2**(logM-1), x**(2**(logM-1)) = 1,
// so we can compute x**(y mod 2**(logM-1)) instead of x**y.
// That is, we can throw away all but the bottom logM-1 bits of y.
// Instead of allocating a new y, we start reading y at the right word
// and truncate it appropriately at the start of the loop.
i := len(y) - 1
mtop := int((logM - 2) / _W) // -2 because the top word of N bits is the (N-1)/W'th word.
mmask := ^Word(0)
if mbits := (logM - 1) & (_W - 1); mbits != 0 {
mmask = (1 << mbits) - 1
}
if i > mtop {
i = mtop
}
advance := false
z = z.setWord(1)
for ; i >= 0; i-- {
yi := y[i]
if i == mtop {
yi &= mmask
}
for j := 0; j < _W; j += n {
if advance {
// Account for use of 4 bits in previous iteration.
// Unrolled loop for significant performance
// gain. Use go test -bench=".*" in crypto/rsa
// to check performance before making changes.
zz = zz.sqr(stk, z)
zz, z = z, zz
z = z.trunc(z, logM)
zz = zz.sqr(stk, z)
zz, z = z, zz
z = z.trunc(z, logM)
zz = zz.sqr(stk, z)
zz, z = z, zz
z = z.trunc(z, logM)
zz = zz.sqr(stk, z)
zz, z = z, zz
z = z.trunc(z, logM)
}
zz = zz.mul(stk, z, powers[yi>>(_W-n)])
zz, z = z, zz
z = z.trunc(z, logM)
yi <<= n
advance = true
}
}
return z.norm()
}
// expNNMontgomery calculates x**y mod m using a fixed, 4-bit window.
// Uses Montgomery representation.
func (z nat) expNNMontgomery(stk *stack, x, y, m nat) nat {
numWords := len(m)
// We want the lengths of x and m to be equal.
// It is OK if x >= m as long as len(x) == len(m).
if len(x) > numWords {
_, x = nat(nil).div(stk, nil, x, m)
// Note: now len(x) <= numWords, not guaranteed ==.
}
if len(x) < numWords {
rr := make(nat, numWords)
copy(rr, x)
x = rr
}
// Ideally the precomputations would be performed outside, and reused
// k0 = -m**-1 mod 2**_W. Algorithm from: Dumas, J.G. "On Newton–Raphson
// Iteration for Multiplicative Inverses Modulo Prime Powers".
k0 := 2 - m[0]
t := m[0] - 1
for i := 1; i < _W; i <<= 1 {
t *= t
k0 *= (t + 1)
}
k0 = -k0
// RR = 2**(2*_W*len(m)) mod m
RR := nat(nil).setWord(1)
zz := nat(nil).lsh(RR, uint(2*numWords*_W))
_, RR = nat(nil).div(stk, RR, zz, m)
if len(RR) < numWords {
zz = zz.make(numWords)
copy(zz, RR)
RR = zz
}
// one = 1, with equal length to that of m
one := make(nat, numWords)
one[0] = 1
const n = 4
// powers[i] contains x^i
var powers [1 << n]nat
powers[0] = powers[0].montgomery(one, RR, m, k0, numWords)
powers[1] = powers[1].montgomery(x, RR, m, k0, numWords)
for i := 2; i < 1<<n; i++ {
powers[i] = powers[i].montgomery(powers[i-1], powers[1], m, k0, numWords)
}
// initialize z = 1 (Montgomery 1)
z = z.make(numWords)
copy(z, powers[0])
zz = zz.make(numWords)
// same windowed exponent, but with Montgomery multiplications
for i := len(y) - 1; i >= 0; i-- {
yi := y[i]
for j := 0; j < _W; j += n {
if i != len(y)-1 || j != 0 {
zz = zz.montgomery(z, z, m, k0, numWords)
z = z.montgomery(zz, zz, m, k0, numWords)
zz = zz.montgomery(z, z, m, k0, numWords)
z = z.montgomery(zz, zz, m, k0, numWords)
}
zz = zz.montgomery(z, powers[yi>>(_W-n)], m, k0, numWords)
z, zz = zz, z
yi <<= n
}
}
// convert to regular number
zz = zz.montgomery(z, one, m, k0, numWords)
// One last reduction, just in case.
// See golang.org/issue/13907.
if zz.cmp(m) >= 0 {
// Common case is m has high bit set; in that case,
// since zz is the same length as m, there can be just
// one multiple of m to remove. Just subtract.
// We think that the subtract should be sufficient in general,
// so do that unconditionally, but double-check,
// in case our beliefs are wrong.
// The div is not expected to be reached.
zz = zz.sub(zz, m)
if zz.cmp(m) >= 0 {
_, zz = nat(nil).div(stk, nil, zz, m)
}
}
return zz.norm()
}
// bytes writes the value of z into buf using big-endian encoding.
// The value of z is encoded in the slice buf[i:]. If the value of z
// cannot be represented in buf, bytes panics. The number i of unused
// bytes at the beginning of buf is returned as result.
func (z nat) bytes(buf []byte) (i int) {
// This function is used in cryptographic operations. It must not leak
// anything but the Int's sign and bit size through side-channels. Any
// changes must be reviewed by a security expert.
i = len(buf)
for _, d := range z {
for j := 0; j < _S; j++ {
i--
if i >= 0 {
buf[i] = byte(d)
} else if byte(d) != 0 {
panic("math/big: buffer too small to fit value")
}
d >>= 8
}
}
if i < 0 {
i = 0
}
for i < len(buf) && buf[i] == 0 {
i++
}
return
}
// bigEndianWord returns the contents of buf interpreted as a big-endian encoded Word value.
func bigEndianWord(buf []byte) Word {
if _W == 64 {
return Word(byteorder.BEUint64(buf))
}
return Word(byteorder.BEUint32(buf))
}
// setBytes interprets buf as the bytes of a big-endian unsigned
// integer, sets z to that value, and returns z.
func (z nat) setBytes(buf []byte) nat {
z = z.make((len(buf) + _S - 1) / _S)
i := len(buf)
for k := 0; i >= _S; k++ {
z[k] = bigEndianWord(buf[i-_S : i])
i -= _S
}
if i > 0 {
var d Word
for s := uint(0); i > 0; s += 8 {
d |= Word(buf[i-1]) << s
i--
}
z[len(z)-1] = d
}
return z.norm()
}
// sqrt sets z = ⌊√x⌋
// The caller may pass stk == nil to request that sqrt obtain and release one itself.
func (z nat) sqrt(stk *stack, x nat) nat {
if x.cmp(natOne) <= 0 {
return z.set(x)
}
if alias(z, x) {
z = nil
}
if stk == nil {
stk = getStack()
defer stk.free()
}
// Start with value known to be too large and repeat "z = ⌊(z + ⌊x/z⌋)/2⌋" until it stops getting smaller.
// See Brent and Zimmermann, Modern Computer Arithmetic, Algorithm 1.13 (SqrtInt).
// https://members.loria.fr/PZimmermann/mca/pub226.html
// If x is one less than a perfect square, the sequence oscillates between the correct z and z+1;
// otherwise it converges to the correct z and stays there.
var z1, z2 nat
z1 = z
z1 = z1.setUint64(1)
z1 = z1.lsh(z1, uint(x.bitLen()+1)/2) // must be ≥ √x
for n := 0; ; n++ {
z2, _ = z2.div(stk, nil, x, z1)
z2 = z2.add(z2, z1)
z2 = z2.rsh(z2, 1)
if z2.cmp(z1) >= 0 {
// z1 is answer.
// Figure out whether z1 or z2 is currently aliased to z by looking at loop count.
if n&1 == 0 {
return z1
}
return z.set(z1)
}
z1, z2 = z2, z1
}
}
// subMod2N returns z = (x - y) mod 2ⁿ.
func (z nat) subMod2N(x, y nat, n uint) nat {
if uint(x.bitLen()) > n {
if alias(z, x) {
// ok to overwrite x in place
x = x.trunc(x, n)
} else {
x = nat(nil).trunc(x, n)
}
}
if uint(y.bitLen()) > n {
if alias(z, y) {
// ok to overwrite y in place
y = y.trunc(y, n)
} else {
y = nat(nil).trunc(y, n)
}
}
if x.cmp(y) >= 0 {
return z.sub(x, y)
}
// x - y < 0; x - y mod 2ⁿ = x - y + 2ⁿ = 2ⁿ - (y - x) = 1 + 2ⁿ-1 - (y - x) = 1 + ^(y - x).
z = z.sub(y, x)
for uint(len(z))*_W < n {
z = append(z, 0)
}
for i := range z {
z[i] = ^z[i]
}
z = z.trunc(z, n)
return z.add(z, natOne)
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements nat-to-string conversion functions.
package big
import (
"errors"
"fmt"
"io"
"math"
"math/bits"
"slices"
"sync"
)
const digits = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
// Note: MaxBase = len(digits), but it must remain an untyped rune constant
// for API compatibility.
// MaxBase is the largest number base accepted for string conversions.
const MaxBase = 10 + ('z' - 'a' + 1) + ('Z' - 'A' + 1)
const maxBaseSmall = 10 + ('z' - 'a' + 1)
// maxPow returns (b**n, n) such that b**n is the largest power b**n <= _M.
// For instance maxPow(10) == (1e19, 19) for 19 decimal digits in a 64bit Word.
// In other words, at most n digits in base b fit into a Word.
// TODO(gri) replace this with a table, generated at build time.
func maxPow(b Word) (p Word, n int) {
p, n = b, 1 // assuming b <= _M
for max := _M / b; p <= max; {
// p == b**n && p <= max
p *= b
n++
}
// p == b**n && p <= _M
return
}
// pow returns x**n for n > 0, and 1 otherwise.
func pow(x Word, n int) (p Word) {
// n == sum of bi * 2**i, for 0 <= i < imax, and bi is 0 or 1
// thus x**n == product of x**(2**i) for all i where bi == 1
// (Russian Peasant Method for exponentiation)
p = 1
for n > 0 {
if n&1 != 0 {
p *= x
}
x *= x
n >>= 1
}
return
}
// scan errors
var (
errNoDigits = errors.New("number has no digits")
errInvalSep = errors.New("'_' must separate successive digits")
)
// scan scans the number corresponding to the longest possible prefix
// from r representing an unsigned number in a given conversion base.
// scan returns the corresponding natural number res, the actual base b,
// a digit count, and a read or syntax error err, if any.
//
// For base 0, an underscore character “_” may appear between a base
// prefix and an adjacent digit, and between successive digits; such
// underscores do not change the value of the number, or the returned
// digit count. Incorrect placement of underscores is reported as an
// error if there are no other errors. If base != 0, underscores are
// not recognized and thus terminate scanning like any other character
// that is not a valid radix point or digit.
//
// number = mantissa | prefix pmantissa .
// prefix = "0" [ "b" | "B" | "o" | "O" | "x" | "X" ] .
// mantissa = digits "." [ digits ] | digits | "." digits .
// pmantissa = [ "_" ] digits "." [ digits ] | [ "_" ] digits | "." digits .
// digits = digit { [ "_" ] digit } .
// digit = "0" ... "9" | "a" ... "z" | "A" ... "Z" .
//
// Unless fracOk is set, the base argument must be 0 or a value between
// 2 and MaxBase. If fracOk is set, the base argument must be one of
// 0, 2, 8, 10, or 16. Providing an invalid base argument leads to a run-
// time panic.
//
// For base 0, the number prefix determines the actual base: A prefix of
// “0b” or “0B” selects base 2, “0o” or “0O” selects base 8, and
// “0x” or “0X” selects base 16. If fracOk is false, a “0” prefix
// (immediately followed by digits) selects base 8 as well. Otherwise,
// the selected base is 10 and no prefix is accepted.
//
// If fracOk is set, a period followed by a fractional part is permitted.
// The result value is computed as if there were no period present; and
// the count value is used to determine the fractional part.
//
// For bases <= 36, lower and upper case letters are considered the same:
// The letters 'a' to 'z' and 'A' to 'Z' represent digit values 10 to 35.
// For bases > 36, the upper case letters 'A' to 'Z' represent the digit
// values 36 to 61.
//
// A result digit count > 0 corresponds to the number of (non-prefix) digits
// parsed. A digit count <= 0 indicates the presence of a period (if fracOk
// is set, only), and -count is the number of fractional digits found.
// In this case, the actual value of the scanned number is res * b**count.
func (z nat) scan(r io.ByteScanner, base int, fracOk bool) (res nat, b, count int, err error) {
// Reject invalid bases.
baseOk := base == 0 ||
!fracOk && 2 <= base && base <= MaxBase ||
fracOk && (base == 2 || base == 8 || base == 10 || base == 16)
if !baseOk {
panic(fmt.Sprintf("invalid number base %d", base))
}
// prev encodes the previously seen char: it is one
// of '_', '0' (a digit), or '.' (anything else). A
// valid separator '_' may only occur after a digit
// and if base == 0.
prev := '.'
invalSep := false
// one char look-ahead
ch, err := r.ReadByte()
// Determine actual base.
b, prefix := base, 0
if base == 0 {
// Actual base is 10 unless there's a base prefix.
b = 10
if err == nil && ch == '0' {
prev = '0'
count = 1
ch, err = r.ReadByte()
if err == nil {
// possibly one of 0b, 0B, 0o, 0O, 0x, 0X
switch ch {
case 'b', 'B':
b, prefix = 2, 'b'
case 'o', 'O':
b, prefix = 8, 'o'
case 'x', 'X':
b, prefix = 16, 'x'
default:
if !fracOk {
b, prefix = 8, '0'
}
}
if prefix != 0 {
count = 0 // prefix is not counted
if prefix != '0' {
ch, err = r.ReadByte()
}
}
}
}
}
// Convert string.
// Algorithm: Collect digits in groups of at most n digits in di.
// For bases that pack exactly into words (2, 4, 16), append di's
// directly to the int representation and then reverse at the end (bn==0 marks this case).
// For other bases, use mulAddWW for every such group to shift
// z up one group and add di to the result.
// With more cleverness we could also handle binary bases like 8 and 32
// (corresponding to 3-bit and 5-bit chunks) that don't pack nicely into
// words, but those are not too important.
z = z[:0]
b1 := Word(b)
var bn Word // b1**n (or 0 for the special bit-packing cases b=2,4,16)
var n int // max digits that fit into Word
switch b {
case 2: // 1 bit per digit
n = _W
case 4: // 2 bits per digit
n = _W / 2
case 16: // 4 bits per digit
n = _W / 4
default:
bn, n = maxPow(b1)
}
di := Word(0) // 0 <= di < b1**i < bn
i := 0 // 0 <= i < n
dp := -1 // position of decimal point
for err == nil {
if ch == '.' && fracOk {
fracOk = false
if prev == '_' {
invalSep = true
}
prev = '.'
dp = count
} else if ch == '_' && base == 0 {
if prev != '0' {
invalSep = true
}
prev = '_'
} else {
// convert rune into digit value d1
var d1 Word
switch {
case '0' <= ch && ch <= '9':
d1 = Word(ch - '0')
case 'a' <= ch && ch <= 'z':
d1 = Word(ch - 'a' + 10)
case 'A' <= ch && ch <= 'Z':
if b <= maxBaseSmall {
d1 = Word(ch - 'A' + 10)
} else {
d1 = Word(ch - 'A' + maxBaseSmall)
}
default:
d1 = MaxBase + 1
}
if d1 >= b1 {
r.UnreadByte() // ch does not belong to number anymore
break
}
prev = '0'
count++
// collect d1 in di
di = di*b1 + d1
i++
// if di is "full", add it to the result
if i == n {
if bn == 0 {
z = append(z, di)
} else {
z = z.mulAddWW(z, bn, di)
}
di = 0
i = 0
}
}
ch, err = r.ReadByte()
}
if err == io.EOF {
err = nil
}
// other errors take precedence over invalid separators
if err == nil && (invalSep || prev == '_') {
err = errInvalSep
}
if count == 0 {
// no digits found
if prefix == '0' {
// there was only the octal prefix 0 (possibly followed by separators and digits > 7);
// interpret as decimal 0
return z[:0], 10, 1, err
}
err = errNoDigits // fall through; result will be 0
}
if bn == 0 {
if i > 0 {
// Add remaining digit chunk to result.
// Left-justify group's digits; will shift back down after reverse.
z = append(z, di*pow(b1, n-i))
}
slices.Reverse(z)
z = z.norm()
if i > 0 {
z = z.rsh(z, uint(n-i)*uint(_W/n))
}
} else {
if i > 0 {
// Add remaining digit chunk to result.
z = z.mulAddWW(z, pow(b1, i), di)
}
}
res = z
// adjust count for fraction, if any
if dp >= 0 {
// 0 <= dp <= count
count = dp - count
}
return
}
// utoa converts x to an ASCII representation in the given base;
// base must be between 2 and MaxBase, inclusive.
func (x nat) utoa(base int) []byte {
return x.itoa(false, base)
}
// itoa is like utoa but it prepends a '-' if neg && x != 0.
func (x nat) itoa(neg bool, base int) []byte {
if base < 2 || base > MaxBase {
panic("invalid base")
}
// x == 0
if len(x) == 0 {
return []byte("0")
}
// len(x) > 0
// allocate buffer for conversion
i := int(float64(x.bitLen())/math.Log2(float64(base))) + 1 // off by 1 at most
if neg {
i++
}
s := make([]byte, i)
// convert power of two and non power of two bases separately
if b := Word(base); b == b&-b {
// shift is base b digit size in bits
shift := uint(bits.TrailingZeros(uint(b))) // shift > 0 because b >= 2
mask := Word(1<<shift - 1)
w := x[0] // current word
nbits := uint(_W) // number of unprocessed bits in w
// convert less-significant words (include leading zeros)
for k := 1; k < len(x); k++ {
// convert full digits
for nbits >= shift {
i--
s[i] = digits[w&mask]
w >>= shift
nbits -= shift
}
// convert any partial leading digit and advance to next word
if nbits == 0 {
// no partial digit remaining, just advance
w = x[k]
nbits = _W
} else {
// partial digit in current word w (== x[k-1]) and next word x[k]
w |= x[k] << nbits
i--
s[i] = digits[w&mask]
// advance
w = x[k] >> (shift - nbits)
nbits = _W - (shift - nbits)
}
}
// convert digits of most-significant word w (omit leading zeros)
for w != 0 {
i--
s[i] = digits[w&mask]
w >>= shift
}
} else {
stk := getStack()
defer stk.free()
bb, ndigits := maxPow(b)
// construct table of successive squares of bb*leafSize to use in subdivisions
// result (table != nil) <=> (len(x) > leafSize > 0)
table := divisors(stk, len(x), b, ndigits, bb)
// preserve x, create local copy for use by convertWords
q := nat(nil).set(x)
// convert q to string s in base b
q.convertWords(stk, s, b, ndigits, bb, table)
// strip leading zeros
// (x != 0; thus s must contain at least one non-zero digit
// and the loop will terminate)
i = 0
for s[i] == '0' {
i++
}
}
if neg {
i--
s[i] = '-'
}
return s[i:]
}
// Convert words of q to base b digits in s. If q is large, it is recursively "split in half"
// by nat/nat division using tabulated divisors. Otherwise, it is converted iteratively using
// repeated nat/Word division.
//
// The iterative method processes n Words by n divW() calls, each of which visits every Word in the
// incrementally shortened q for a total of n + (n-1) + (n-2) ... + 2 + 1, or n(n+1)/2 divW()'s.
// Recursive conversion divides q by its approximate square root, yielding two parts, each half
// the size of q. Using the iterative method on both halves means 2 * (n/2)(n/2 + 1)/2 divW()'s
// plus the expensive long div(). Asymptotically, the ratio is favorable at 1/2 the divW()'s, and
// is made better by splitting the subblocks recursively. Best is to split blocks until one more
// split would take longer (because of the nat/nat div()) than the twice as many divW()'s of the
// iterative approach. This threshold is represented by leafSize. Benchmarking of leafSize in the
// range 2..64 shows that values of 8 and 16 work well, with a 4x speedup at medium lengths and
// ~30x for 20000 digits. Use nat_test.go's BenchmarkLeafSize tests to optimize leafSize for
// specific hardware.
func (q nat) convertWords(stk *stack, s []byte, b Word, ndigits int, bb Word, table []divisor) {
// split larger blocks recursively
if table != nil {
// len(q) > leafSize > 0
var r nat
index := len(table) - 1
for len(q) > leafSize {
// find divisor close to sqrt(q) if possible, but in any case < q
maxLength := q.bitLen() // ~= log2 q, or at of least largest possible q of this bit length
minLength := maxLength >> 1 // ~= log2 sqrt(q)
for index > 0 && table[index-1].nbits > minLength {
index-- // desired
}
if table[index].nbits >= maxLength && table[index].bbb.cmp(q) >= 0 {
index--
if index < 0 {
panic("internal inconsistency")
}
}
// split q into the two digit number (q'*bbb + r) to form independent subblocks
q, r = q.div(stk, r, q, table[index].bbb)
// convert subblocks and collect results in s[:h] and s[h:]
h := len(s) - table[index].ndigits
r.convertWords(stk, s[h:], b, ndigits, bb, table[0:index])
s = s[:h] // == q.convertWords(stk, s, b, ndigits, bb, table[0:index+1])
}
}
// having split any large blocks now process the remaining (small) block iteratively
i := len(s)
var r Word
if b == 10 {
// hard-coding for 10 here speeds this up by 1.25x (allows for / and % by constants)
for len(q) > 0 {
// extract least significant, base bb "digit"
q, r = q.divW(q, bb)
for j := 0; j < ndigits && i > 0; j++ {
i--
// avoid % computation since r%10 == r - int(r/10)*10;
// this appears to be faster for BenchmarkString10000Base10
// and smaller strings (but a bit slower for larger ones)
t := r / 10
s[i] = '0' + byte(r-t*10)
r = t
}
}
} else {
for len(q) > 0 {
// extract least significant, base bb "digit"
q, r = q.divW(q, bb)
for j := 0; j < ndigits && i > 0; j++ {
i--
s[i] = digits[r%b]
r /= b
}
}
}
// prepend high-order zeros
for i > 0 { // while need more leading zeros
i--
s[i] = '0'
}
}
// Split blocks greater than leafSize Words (or set to 0 to disable recursive conversion)
// Benchmark and configure leafSize using: go test -bench="Leaf"
//
// 8 and 16 effective on 3.0 GHz Xeon "Clovertown" CPU (128 byte cache lines)
// 8 and 16 effective on 2.66 GHz Core 2 Duo "Penryn" CPU
var leafSize int = 8 // number of Word-size binary values treat as a monolithic block
type divisor struct {
bbb nat // divisor
nbits int // bit length of divisor (discounting leading zeros) ~= log2(bbb)
ndigits int // digit length of divisor in terms of output base digits
}
var cacheBase10 struct {
sync.Mutex
table [64]divisor // cached divisors for base 10
}
// expWW computes x**y
func (z nat) expWW(stk *stack, x, y Word) nat {
return z.expNN(stk, nat(nil).setWord(x), nat(nil).setWord(y), nil, false)
}
// construct table of powers of bb*leafSize to use in subdivisions.
func divisors(stk *stack, m int, b Word, ndigits int, bb Word) []divisor {
// only compute table when recursive conversion is enabled and x is large
if leafSize == 0 || m <= leafSize {
return nil
}
// determine k where (bb**leafSize)**(2**k) >= sqrt(x)
k := 1
for words := leafSize; words < m>>1 && k < len(cacheBase10.table); words <<= 1 {
k++
}
// reuse and extend existing table of divisors or create new table as appropriate
var table []divisor // for b == 10, table overlaps with cacheBase10.table
if b == 10 {
cacheBase10.Lock()
table = cacheBase10.table[0:k] // reuse old table for this conversion
} else {
table = make([]divisor, k) // create new table for this conversion
}
// extend table
if table[k-1].ndigits == 0 {
// add new entries as needed
var larger nat
for i := 0; i < k; i++ {
if table[i].ndigits == 0 {
if i == 0 {
table[0].bbb = nat(nil).expWW(stk, bb, Word(leafSize))
table[0].ndigits = ndigits * leafSize
} else {
table[i].bbb = nat(nil).sqr(stk, table[i-1].bbb)
table[i].ndigits = 2 * table[i-1].ndigits
}
// optimization: exploit aggregated extra bits in macro blocks
larger = nat(nil).set(table[i].bbb)
for mulAddVWW(larger, larger, b, 0) == 0 {
table[i].bbb = table[i].bbb.set(larger)
table[i].ndigits++
}
table[i].nbits = table[i].bbb.bitLen()
}
}
}
if b == 10 {
cacheBase10.Unlock()
}
return table
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Multi-precision division. Here be dragons.
Given u and v, where u is n+m digits, and v is n digits (with no leading zeros),
the goal is to return quo, rem such that u = quo*v + rem, where 0 ≤ rem < v.
That is, quo = ⌊u/v⌋ where ⌊x⌋ denotes the floor (truncation to integer) of x,
and rem = u - quo·v.
Long Division
Division in a computer proceeds the same as long division in elementary school,
but computers are not as good as schoolchildren at following vague directions,
so we have to be much more precise about the actual steps and what can happen.
We work from most to least significant digit of the quotient, doing:
• Guess a digit q, the number of v to subtract from the current
section of u to zero out the topmost digit.
• Check the guess by multiplying q·v and comparing it against
the current section of u, adjusting the guess as needed.
• Subtract q·v from the current section of u.
• Add q to the corresponding section of the result quo.
When all digits have been processed, the final remainder is left in u
and returned as rem.
For example, here is a sketch of dividing 5 digits by 3 digits (n=3, m=2).
q₂ q₁ q₀
_________________
v₂ v₁ v₀ ) u₄ u₃ u₂ u₁ u₀
↓ ↓ ↓ | |
[u₄ u₃ u₂]| |
- [ q₂·v ]| |
----------- ↓ |
[ rem | u₁]|
- [ q₁·v ]|
----------- ↓
[ rem | u₀]
- [ q₀·v ]
------------
[ rem ]
Instead of creating new storage for the remainders and copying digits from u
as indicated by the arrows, we use u's storage directly as both the source
and destination of the subtractions, so that the remainders overwrite
successive overlapping sections of u as the division proceeds, using a slice
of u to identify the current section. This avoids all the copying as well as
shifting of remainders.
Division of u with n+m digits by v with n digits (in base B) can in general
produce at most m+1 digits, because:
• u < B^(n+m) [B^(n+m) has n+m+1 digits]
• v ≥ B^(n-1) [B^(n-1) is the smallest n-digit number]
• u/v < B^(n+m) / B^(n-1) [divide bounds for u, v]
• u/v < B^(m+1) [simplify]
The first step is special: it takes the top n digits of u and divides them by
the n digits of v, producing the first quotient digit and an n-digit remainder.
In the example, q₂ = ⌊u₄u₃u₂ / v⌋.
The first step divides n digits by n digits to ensure that it produces only a
single digit.
Each subsequent step appends the next digit from u to the remainder and divides
those n+1 digits by the n digits of v, producing another quotient digit and a
new n-digit remainder.
Subsequent steps divide n+1 digits by n digits, an operation that in general
might produce two digits. However, as used in the algorithm, that division is
guaranteed to produce only a single digit. The dividend is of the form
rem·B + d, where rem is a remainder from the previous step and d is a single
digit, so:
• rem ≤ v - 1 [rem is a remainder from dividing by v]
• rem·B ≤ v·B - B [multiply by B]
• d ≤ B - 1 [d is a single digit]
• rem·B + d ≤ v·B - 1 [add]
• rem·B + d < v·B [change ≤ to <]
• (rem·B + d)/v < B [divide by v]
Guess and Check
At each step we need to divide n+1 digits by n digits, but this is for the
implementation of division by n digits, so we can't just invoke a division
routine: we _are_ the division routine. Instead, we guess at the answer and
then check it using multiplication. If the guess is wrong, we correct it.
How can this guessing possibly be efficient? It turns out that the following
statement (let's call it the Good Guess Guarantee) is true.
If
• q = ⌊u/v⌋ where u is n+1 digits and v is n digits,
• q < B, and
• the topmost digit of v = vₙ₋₁ ≥ B/2,
then q̂ = ⌊uₙuₙ₋₁ / vₙ₋₁⌋ satisfies q ≤ q̂ ≤ q+2. (Proof below.)
That is, if we know the answer has only a single digit and we guess an answer
by ignoring the bottom n-1 digits of u and v, using a 2-by-1-digit division,
then that guess is at least as large as the correct answer. It is also not
too much larger: it is off by at most two from the correct answer.
Note that in the first step of the overall division, which is an n-by-n-digit
division, the 2-by-1 guess uses an implicit uₙ = 0.
Note that using a 2-by-1-digit division here does not mean calling ourselves
recursively. Instead, we use an efficient direct hardware implementation of
that operation.
Note that because q is u/v rounded down, q·v must not exceed u: u ≥ q·v.
If a guess q̂ is too big, it will not satisfy this test. Viewed a different way,
the remainder r̂ for a given q̂ is u - q̂·v, which must be positive. If it is
negative, then the guess q̂ is too big.
This gives us a way to compute q. First compute q̂ with 2-by-1-digit division.
Then, while u < q̂·v, decrement q̂; this loop executes at most twice, because
q̂ ≤ q+2.
Scaling Inputs
The Good Guess Guarantee requires that the top digit of v (vₙ₋₁) be at least B/2.
For example in base 10, ⌊172/19⌋ = 9, but ⌊18/1⌋ = 18: the guess is wildly off
because the first digit 1 is smaller than B/2 = 5.
We can ensure that v has a large top digit by multiplying both u and v by the
right amount. Continuing the example, if we multiply both 172 and 19 by 3, we
now have ⌊516/57⌋, the leading digit of v is now ≥ 5, and sure enough
⌊51/5⌋ = 10 is much closer to the correct answer 9. It would be easier here
to multiply by 4, because that can be done with a shift. Specifically, we can
always count the number of leading zeros i in the first digit of v and then
shift both u and v left by i bits.
Having scaled u and v, the value ⌊u/v⌋ is unchanged, but the remainder will
be scaled: 172 mod 19 is 1, but 516 mod 57 is 3. We have to divide the remainder
by the scaling factor (shifting right i bits) when we finish.
Note that these shifts happen before and after the entire division algorithm,
not at each step in the per-digit iteration.
Note the effect of scaling inputs on the size of the possible quotient.
In the scaled u/v, u can gain a digit from scaling; v never does, because we
pick the scaling factor to make v's top digit larger but without overflowing.
If u and v have n+m and n digits after scaling, then:
• u < B^(n+m) [B^(n+m) has n+m+1 digits]
• v ≥ B^n / 2 [vₙ₋₁ ≥ B/2, so vₙ₋₁·B^(n-1) ≥ B^n/2]
• u/v < B^(n+m) / (B^n / 2) [divide bounds for u, v]
• u/v < 2 B^m [simplify]
The quotient can still have m+1 significant digits, but if so the top digit
must be a 1. This provides a different way to handle the first digit of the
result: compare the top n digits of u against v and fill in either a 0 or a 1.
Refining Guesses
Before we check whether u < q̂·v, we can adjust our guess to change it from
q̂ = ⌊uₙuₙ₋₁ / vₙ₋₁⌋ into the refined guess ⌊uₙuₙ₋₁uₙ₋₂ / vₙ₋₁vₙ₋₂⌋.
Although not mentioned above, the Good Guess Guarantee also promises that this
3-by-2-digit division guess is more precise and at most one away from the real
answer q. The improvement from the 2-by-1 to the 3-by-2 guess can also be done
without n-digit math.
If we have a guess q̂ = ⌊uₙuₙ₋₁ / vₙ₋₁⌋ and we want to see if it also equal to
⌊uₙuₙ₋₁uₙ₋₂ / vₙ₋₁vₙ₋₂⌋, we can use the same check we would for the full division:
if uₙuₙ₋₁uₙ₋₂ < q̂·vₙ₋₁vₙ₋₂, then the guess is too large and should be reduced.
Checking uₙuₙ₋₁uₙ₋₂ < q̂·vₙ₋₁vₙ₋₂ is the same as uₙuₙ₋₁uₙ₋₂ - q̂·vₙ₋₁vₙ₋₂ < 0,
and
uₙuₙ₋₁uₙ₋₂ - q̂·vₙ₋₁vₙ₋₂ = (uₙuₙ₋₁·B + uₙ₋₂) - q̂·(vₙ₋₁·B + vₙ₋₂)
[splitting off the bottom digit]
= (uₙuₙ₋₁ - q̂·vₙ₋₁)·B + uₙ₋₂ - q̂·vₙ₋₂
[regrouping]
The expression (uₙuₙ₋₁ - q̂·vₙ₋₁) is the remainder of uₙuₙ₋₁ / vₙ₋₁.
If the initial guess returns both q̂ and its remainder r̂, then checking
whether uₙuₙ₋₁uₙ₋₂ < q̂·vₙ₋₁vₙ₋₂ is the same as checking r̂·B + uₙ₋₂ < q̂·vₙ₋₂.
If we find that r̂·B + uₙ₋₂ < q̂·vₙ₋₂, then we can adjust the guess by
decrementing q̂ and adding vₙ₋₁ to r̂. We repeat until r̂·B + uₙ₋₂ ≥ q̂·vₙ₋₂.
(As before, this fixup is only needed at most twice.)
Now that q̂ = ⌊uₙuₙ₋₁uₙ₋₂ / vₙ₋₁vₙ₋₂⌋, as mentioned above it is at most one
away from the correct q, and we've avoided doing any n-digit math.
(If we need the new remainder, it can be computed as r̂·B + uₙ₋₂ - q̂·vₙ₋₂.)
The final check u < q̂·v and the possible fixup must be done at full precision.
For random inputs, a fixup at this step is exceedingly rare: the 3-by-2 guess
is not often wrong at all. But still we must do the check. Note that since the
3-by-2 guess is off by at most 1, it can be convenient to perform the final
u < q̂·v as part of the computation of the remainder r = u - q̂·v. If the
subtraction underflows, decremeting q̂ and adding one v back to r is enough to
arrive at the final q, r.
That's the entirety of long division: scale the inputs, and then loop over
each output position, guessing, checking, and correcting the next output digit.
For a 2n-digit number divided by an n-digit number (the worst size-n case for
division complexity), this algorithm uses n+1 iterations, each of which must do
at least the 1-by-n-digit multiplication q̂·v. That's O(n) iterations of
O(n) time each, so O(n²) time overall.
Recursive Division
For very large inputs, it is possible to improve on the O(n²) algorithm.
Let's call a group of n/2 real digits a (very) “wide digit”. We can run the
standard long division algorithm explained above over the wide digits instead of
the actual digits. This will result in many fewer steps, but the math involved in
each step is more work.
Where basic long division uses a 2-by-1-digit division to guess the initial q̂,
the new algorithm must use a 2-by-1-wide-digit division, which is of course
really an n-by-n/2-digit division. That's OK: if we implement n-digit division
in terms of n/2-digit division, the recursion will terminate when the divisor
becomes small enough to handle with standard long division or even with the
2-by-1 hardware instruction.
For example, here is a sketch of dividing 10 digits by 4, proceeding with
wide digits corresponding to two regular digits. The first step, still special,
must leave off a (regular) digit, dividing 5 by 4 and producing a 4-digit
remainder less than v. The middle steps divide 6 digits by 4, guaranteed to
produce two output digits each (one wide digit) with 4-digit remainders.
The final step must use what it has: the 4-digit remainder plus one more,
5 digits to divide by 4.
q₆ q₅ q₄ q₃ q₂ q₁ q₀
_______________________________
v₃ v₂ v₁ v₀ ) u₉ u₈ u₇ u₆ u₅ u₄ u₃ u₂ u₁ u₀
↓ ↓ ↓ ↓ ↓ | | | | |
[u₉ u₈ u₇ u₆ u₅]| | | | |
- [ q₆q₅·v ]| | | | |
----------------- ↓ ↓ | | |
[ rem |u₄ u₃]| | |
- [ q₄q₃·v ]| | |
-------------------- ↓ ↓ |
[ rem |u₂ u₁]|
- [ q₂q₁·v ]|
-------------------- ↓
[ rem |u₀]
- [ q₀·v ]
------------------
[ rem ]
An alternative would be to look ahead to how well n/2 divides into n+m and
adjust the first step to use fewer digits as needed, making the first step
more special to make the last step not special at all. For example, using the
same input, we could choose to use only 4 digits in the first step, leaving
a full wide digit for the last step:
q₆ q₅ q₄ q₃ q₂ q₁ q₀
_______________________________
v₃ v₂ v₁ v₀ ) u₉ u₈ u₇ u₆ u₅ u₄ u₃ u₂ u₁ u₀
↓ ↓ ↓ ↓ | | | | | |
[u₉ u₈ u₇ u₆]| | | | | |
- [ q₆·v ]| | | | | |
-------------- ↓ ↓ | | | |
[ rem |u₅ u₄]| | | |
- [ q₅q₄·v ]| | | |
-------------------- ↓ ↓ | |
[ rem |u₃ u₂]| |
- [ q₃q₂·v ]| |
-------------------- ↓ ↓
[ rem |u₁ u₀]
- [ q₁q₀·v ]
---------------------
[ rem ]
Today, the code in divRecursiveStep works like the first example. Perhaps in
the future we will make it work like the alternative, to avoid a special case
in the final iteration.
Either way, each step is a 3-by-2-wide-digit division approximated first by
a 2-by-1-wide-digit division, just as we did for regular digits in long division.
Because the actual answer we want is a 3-by-2-wide-digit division, instead of
multiplying q̂·v directly during the fixup, we can use the quick refinement
from long division (an n/2-by-n/2 multiply) to correct q to its actual value
and also compute the remainder (as mentioned above), and then stop after that,
never doing a full n-by-n multiply.
Instead of using an n-by-n/2-digit division to produce n/2 digits, we can add
(not discard) one more real digit, doing an (n+1)-by-(n/2+1)-digit division that
produces n/2+1 digits. That single extra digit tightens the Good Guess Guarantee
to q ≤ q̂ ≤ q+1 and lets us drop long division's special treatment of the first
digit. These benefits are discussed more after the Good Guess Guarantee proof
below.
How Fast is Recursive Division?
For a 2n-by-n-digit division, this algorithm runs a 4-by-2 long division over
wide digits, producing two wide digits plus a possible leading regular digit 1,
which can be handled without a recursive call. That is, the algorithm uses two
full iterations, each using an n-by-n/2-digit division and an n/2-by-n/2-digit
multiplication, along with a few n-digit additions and subtractions. The standard
n-by-n-digit multiplication algorithm requires O(n²) time, making the overall
algorithm require time T(n) where
T(n) = 2T(n/2) + O(n) + O(n²)
which, by the Bentley-Haken-Saxe theorem, ends up reducing to T(n) = O(n²).
This is not an improvement over regular long division.
When the number of digits n becomes large enough, Karatsuba's algorithm for
multiplication can be used instead, which takes O(n^log₂3) = O(n^1.6) time.
(Karatsuba multiplication is implemented in func karatsuba in nat.go.)
That makes the overall recursive division algorithm take O(n^1.6) time as well,
which is an improvement, but again only for large enough numbers.
It is not critical to make sure that every recursion does only two recursive
calls. While in general the number of recursive calls can change the time
analysis, in this case doing three calls does not change the analysis:
T(n) = 3T(n/2) + O(n) + O(n^log₂3)
ends up being T(n) = O(n^log₂3). Because the Karatsuba multiplication taking
time O(n^log₂3) is itself doing 3 half-sized recursions, doing three for the
division does not hurt the asymptotic performance. Of course, it is likely
still faster in practice to do two.
Proof of the Good Guess Guarantee
Given numbers x, y, let us break them into the quotients and remainders when
divided by some scaling factor S, with the added constraints that the quotient
x/y and the high part of y are both less than some limit T, and that the high
part of y is at least half as big as T.
x₁ = ⌊x/S⌋ y₁ = ⌊y/S⌋
x₀ = x mod S y₀ = y mod S
x = x₁·S + x₀ 0 ≤ x₀ < S x/y < T
y = y₁·S + y₀ 0 ≤ y₀ < S T/2 ≤ y₁ < T
And consider the two truncated quotients:
q = ⌊x/y⌋
q̂ = ⌊x₁/y₁⌋
We will prove that q ≤ q̂ ≤ q+2.
The guarantee makes no real demands on the scaling factor S: it is simply the
magnitude of the digits cut from both x and y to produce x₁ and y₁.
The guarantee makes only limited demands on T: it must be large enough to hold
the quotient x/y, and y₁ must have roughly the same size.
To apply to the earlier discussion of 2-by-1 guesses in long division,
we would choose:
S = Bⁿ⁻¹
T = B
x = u
x₁ = uₙuₙ₋₁
x₀ = uₙ₋₂...u₀
y = v
y₁ = vₙ₋₁
y₀ = vₙ₋₂...u₀
These simpler variables avoid repeating those longer expressions in the proof.
Note also that, by definition, truncating division ⌊x/y⌋ satisfies
x/y - 1 < ⌊x/y⌋ ≤ x/y.
This fact will be used a few times in the proofs.
Proof that q ≤ q̂:
q̂·y₁ = ⌊x₁/y₁⌋·y₁ [by definition, q̂ = ⌊x₁/y₁⌋]
> (x₁/y₁ - 1)·y₁ [x₁/y₁ - 1 < ⌊x₁/y₁⌋]
= x₁ - y₁ [distribute y₁]
So q̂·y₁ > x₁ - y₁.
Since q̂·y₁ is an integer, q̂·y₁ ≥ x₁ - y₁ + 1.
q̂ - q = q̂ - ⌊x/y⌋ [by definition, q = ⌊x/y⌋]
≥ q̂ - x/y [⌊x/y⌋ < x/y]
= (1/y)·(q̂·y - x) [factor out 1/y]
≥ (1/y)·(q̂·y₁·S - x) [y = y₁·S + y₀ ≥ y₁·S]
≥ (1/y)·((x₁ - y₁ + 1)·S - x) [above: q̂·y₁ ≥ x₁ - y₁ + 1]
= (1/y)·(x₁·S - y₁·S + S - x) [distribute S]
= (1/y)·(S - x₀ - y₁·S) [-x = -x₁·S - x₀]
> -y₁·S / y [x₀ < S, so S - x₀ > 0; drop it]
≥ -1 [y₁·S ≤ y]
So q̂ - q > -1.
Since q̂ - q is an integer, q̂ - q ≥ 0, or equivalently q ≤ q̂.
Proof that q̂ ≤ q+2:
x₁/y₁ - x/y = x₁·S/y₁·S - x/y [multiply left term by S/S]
≤ x/y₁·S - x/y [x₁S ≤ x]
= (x/y)·(y/y₁·S - 1) [factor out x/y]
= (x/y)·((y - y₁·S)/y₁·S) [move -1 into y/y₁·S fraction]
= (x/y)·(y₀/y₁·S) [y - y₁·S = y₀]
= (x/y)·(1/y₁)·(y₀/S) [factor out 1/y₁]
< (x/y)·(1/y₁) [y₀ < S, so y₀/S < 1]
≤ (x/y)·(2/T) [y₁ ≥ T/2, so 1/y₁ ≤ 2/T]
< T·(2/T) [x/y < T]
= 2 [T·(2/T) = 2]
So x₁/y₁ - x/y < 2.
q̂ - q = ⌊x₁/y₁⌋ - q [by definition, q̂ = ⌊x₁/y₁⌋]
= ⌊x₁/y₁⌋ - ⌊x/y⌋ [by definition, q = ⌊x/y⌋]
≤ x₁/y₁ - ⌊x/y⌋ [⌊x₁/y₁⌋ ≤ x₁/y₁]
< x₁/y₁ - (x/y - 1) [⌊x/y⌋ > x/y - 1]
= (x₁/y₁ - x/y) + 1 [regrouping]
< 2 + 1 [above: x₁/y₁ - x/y < 2]
= 3
So q̂ - q < 3.
Since q̂ - q is an integer, q̂ - q ≤ 2.
Note that when x/y < T/2, the bounds tighten to x₁/y₁ - x/y < 1 and therefore
q̂ - q ≤ 1.
Note also that in the general case 2n-by-n division where we don't know that
x/y < T, we do know that x/y < 2T, yielding the bound q̂ - q ≤ 4. So we could
remove the special case first step of long division as long as we allow the
first fixup loop to run up to four times. (Using a simple comparison to decide
whether the first digit is 0 or 1 is still more efficient, though.)
Finally, note that when dividing three leading base-B digits by two (scaled),
we have T = B² and x/y < B = T/B, a much tighter bound than x/y < T.
This in turn yields the much tighter bound x₁/y₁ - x/y < 2/B. This means that
⌊x₁/y₁⌋ and ⌊x/y⌋ can only differ when x/y is less than 2/B greater than an
integer. For random x and y, the chance of this is 2/B, or, for large B,
approximately zero. This means that after we produce the 3-by-2 guess in the
long division algorithm, the fixup loop essentially never runs.
In the recursive algorithm, the extra digit in (2·⌊n/2⌋+1)-by-(⌊n/2⌋+1)-digit
division has exactly the same effect: the probability of needing a fixup is the
same 2/B. Even better, we can allow the general case x/y < 2T and the fixup
probability only grows to 4/B, still essentially zero.
References
There are no great references for implementing long division; thus this comment.
Here are some notes about what to expect from the obvious references.
Knuth Volume 2 (Seminumerical Algorithms) section 4.3.1 is the usual canonical
reference for long division, but that entire series is highly compressed, never
repeating a necessary fact and leaving important insights to the exercises.
For example, no rationale whatsoever is given for the calculation that extends
q̂ from a 2-by-1 to a 3-by-2 guess, nor why it reduces the error bound.
The proof that the calculation even has the desired effect is left to exercises.
The solutions to those exercises provided at the back of the book are entirely
calculations, still with no explanation as to what is going on or how you would
arrive at the idea of doing those exact calculations. Nowhere is it mentioned
that this test extends the 2-by-1 guess into a 3-by-2 guess. The proof of the
Good Guess Guarantee is only for the 2-by-1 guess and argues by contradiction,
making it difficult to understand how modifications like adding another digit
or adjusting the quotient range affects the overall bound.
All that said, Knuth remains the canonical reference. It is dense but packed
full of information and references, and the proofs are simpler than many other
presentations. The proofs above are reworkings of Knuth's to remove the
arguments by contradiction and add explanations or steps that Knuth omitted.
But beware of errors in older printings. Take the published errata with you.
Brinch Hansen's “Multiple-length Division Revisited: a Tour of the Minefield”
starts with a blunt critique of Knuth's presentation (among others) and then
presents a more detailed and easier to follow treatment of long division,
including an implementation in Pascal. But the algorithm and implementation
work entirely in terms of 3-by-2 division, which is much less useful on modern
hardware than an algorithm using 2-by-1 division. The proofs are a bit too
focused on digit counting and seem needlessly complex, especially compared to
the ones given above.
Burnikel and Ziegler's “Fast Recursive Division” introduced the key insight of
implementing division by an n-digit divisor using recursive calls to division
by an n/2-digit divisor, relying on Karatsuba multiplication to yield a
sub-quadratic run time. However, the presentation decisions are made almost
entirely for the purpose of simplifying the run-time analysis, rather than
simplifying the presentation. Instead of a single algorithm that loops over
quotient digits, the paper presents two mutually-recursive algorithms, for
2n-by-n and 3n-by-2n. The paper also does not present any general (n+m)-by-n
algorithm.
The proofs in the paper are remarkably complex, especially considering that
the algorithm is at its core just long division on wide digits, so that the
usual long division proofs apply essentially unaltered.
*/
package big
import "math/bits"
// rem returns r such that r = u%v.
// It uses z as the storage for r.
func (z nat) rem(stk *stack, u, v nat) (r nat) {
if alias(z, u) {
z = nil
}
defer stk.restore(stk.save())
q := stk.nat(max(1, len(u)-(len(v)-1)))
_, r = q.div(stk, z, u, v)
return r
}
// div returns q, r such that q = ⌊u/v⌋ and r = u%v = u - q·v.
// It uses z and z2 as the storage for q and r.
// The caller may pass stk == nil to request that div obtain and release one itself.
func (z nat) div(stk *stack, z2, u, v nat) (q, r nat) {
if len(v) == 0 {
panic("division by zero")
}
if len(v) == 1 {
// Short division: long optimized for a single-word divisor.
// In that case, the 2-by-1 guess is all we need at each step.
var r2 Word
q, r2 = z.divW(u, v[0])
r = z2.setWord(r2)
return
}
if u.cmp(v) < 0 {
q = z[:0]
r = z2.set(u)
return
}
if stk == nil {
stk = getStack()
defer stk.free()
}
q, r = z.divLarge(stk, z2, u, v)
return
}
// divW returns q, r such that q = ⌊x/y⌋ and r = x%y = x - q·y.
// It uses z as the storage for q.
// Note that y is a single digit (Word), not a big number.
func (z nat) divW(x nat, y Word) (q nat, r Word) {
m := len(x)
switch {
case y == 0:
panic("division by zero")
case y == 1:
q = z.set(x) // result is x
return
case m == 0:
q = z[:0] // result is 0
return
}
// m > 0
z = z.make(m)
r = divWVW(z, 0, x, y)
q = z.norm()
return
}
// modW returns x % d.
func (x nat) modW(d Word) (r Word) {
// TODO(agl): we don't actually need to store the q value.
var q nat
q = q.make(len(x))
return divWVW(q, 0, x, d)
}
// divWVW overwrites z with ⌊x/y⌋, returning the remainder r.
// The caller must ensure that len(z) = len(x).
func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) {
r = xn
if len(x) == 1 {
qq, rr := bits.Div(uint(r), uint(x[0]), uint(y))
z[0] = Word(qq)
return Word(rr)
}
rec := reciprocalWord(y)
for i := len(z) - 1; i >= 0; i-- {
z[i], r = divWW(r, x[i], y, rec)
}
return r
}
// div returns q, r such that q = ⌊uIn/vIn⌋ and r = uIn%vIn = uIn - q·vIn.
// It uses z and u as the storage for q and r.
// The caller must ensure that len(vIn) ≥ 2 (use divW otherwise)
// and that len(uIn) ≥ len(vIn) (the answer is 0, uIn otherwise).
func (z nat) divLarge(stk *stack, u, uIn, vIn nat) (q, r nat) {
n := len(vIn)
m := len(uIn) - n
// Scale the inputs so vIn's top bit is 1 (see “Scaling Inputs” above).
// vIn is treated as a read-only input (it may be in use by another
// goroutine), so we must make a copy.
// uIn is copied to u.
defer stk.restore(stk.save())
shift := nlz(vIn[n-1])
v := stk.nat(n)
u = u.make(len(uIn) + 1)
if shift == 0 {
copy(v, vIn)
copy(u[:len(uIn)], uIn)
u[len(uIn)] = 0
} else {
lshVU(v, vIn, shift)
u[len(uIn)] = lshVU(u[:len(uIn)], uIn, shift)
}
// The caller should not pass aliased z and u, since those are
// the two different outputs, but correct just in case.
if alias(z, u) {
z = nil
}
q = z.make(m + 1)
// Use basic or recursive long division depending on size.
if n < divRecursiveThreshold {
q.divBasic(stk, u, v)
} else {
q.divRecursive(stk, u, v)
}
q = q.norm()
// Undo scaling of remainder.
if shift != 0 {
rshVU(u, u, shift)
}
r = u.norm()
return q, r
}
// divBasic implements long division as described above.
// It overwrites q with ⌊u/v⌋ and overwrites u with the remainder r.
// q must be large enough to hold ⌊u/v⌋.
func (q nat) divBasic(stk *stack, u, v nat) {
n := len(v)
m := len(u) - n
defer stk.restore(stk.save())
qhatv := stk.nat(n + 1)
// Set up for divWW below, precomputing reciprocal argument.
vn1 := v[n-1]
rec := reciprocalWord(vn1)
// Invent a leading 0 for u, for the first iteration.
// Invariant: ujn == u[j+n] in each iteration.
ujn := Word(0)
// Compute each digit of quotient.
for j := m; j >= 0; j-- {
// Compute the 2-by-1 guess q̂.
qhat := Word(_M)
// ujn ≤ vn1, or else q̂ would be more than one digit.
// For ujn == vn1, we set q̂ to the max digit M above.
// Otherwise, we compute the 2-by-1 guess.
if ujn != vn1 {
var rhat Word
qhat, rhat = divWW(ujn, u[j+n-1], vn1, rec)
// Refine q̂ to a 3-by-2 guess. See “Refining Guesses” above.
vn2 := v[n-2]
x1, x2 := mulWW(qhat, vn2)
ujn2 := u[j+n-2]
for greaterThan(x1, x2, rhat, ujn2) { // x1x2 > r̂ u[j+n-2]
qhat--
prevRhat := rhat
rhat += vn1
// If r̂ overflows, then
// r̂ u[j+n-2]v[n-1] is now definitely > x1 x2.
if rhat < prevRhat {
break
}
// TODO(rsc): No need for a full mulWW.
// x2 += vn2; if x2 overflows, x1++
x1, x2 = mulWW(qhat, vn2)
}
}
// Compute q̂·v.
qhatv[n] = mulAddVWW(qhatv[0:n], v, qhat, 0)
qhl := len(qhatv)
if j+qhl > len(u) && qhatv[n] == 0 {
qhl--
}
// Subtract q̂·v from the current section of u.
// If it underflows, q̂·v > u, which we fix up
// by decrementing q̂ and adding v back.
c := subVV(u[j:j+qhl], u[j:j+qhl], qhatv[:qhl])
if c != 0 {
c := addVV(u[j:j+n], u[j:j+n], v)
// If n == qhl, the carry from subVV and the carry from addVV
// cancel out and don't affect u[j+n].
if n < qhl {
u[j+n] += c
}
qhat--
}
ujn = u[j+n-1]
// Save quotient digit.
// Caller may know the top digit is zero and not leave room for it.
if j == m && m == len(q) && qhat == 0 {
continue
}
q[j] = qhat
}
}
// greaterThan reports whether the two digit numbers x1 x2 > y1 y2.
// TODO(rsc): In contradiction to most of this file, x1 is the high
// digit and x2 is the low digit. This should be fixed.
func greaterThan(x1, x2, y1, y2 Word) bool {
return x1 > y1 || x1 == y1 && x2 > y2
}
// divRecursiveThreshold is the number of divisor digits
// at which point divRecursive is faster than divBasic.
var divRecursiveThreshold = 40 // see calibrate_test.go
// divRecursive implements recursive division as described above.
// It overwrites z with ⌊u/v⌋ and overwrites u with the remainder r.
// z must be large enough to hold ⌊u/v⌋.
// This function is just for allocating and freeing temporaries
// around divRecursiveStep, the real implementation.
func (z nat) divRecursive(stk *stack, u, v nat) {
clear(z)
z.divRecursiveStep(stk, u, v, 0)
}
// divRecursiveStep is the actual implementation of recursive division.
// It adds ⌊u/v⌋ to z and overwrites u with the remainder r.
// z must be large enough to hold ⌊u/v⌋.
// It uses temps[depth] (allocating if needed) as a temporary live across
// the recursive call. It also uses tmp, but not live across the recursion.
func (z nat) divRecursiveStep(stk *stack, u, v nat, depth int) {
// u is a subsection of the original and may have leading zeros.
// TODO(rsc): The v = v.norm() is useless and should be removed.
// We know (and require) that v's top digit is ≥ B/2.
u = u.norm()
v = v.norm()
if len(u) == 0 {
clear(z)
return
}
// Fall back to basic division if the problem is now small enough.
n := len(v)
if n < divRecursiveThreshold {
z.divBasic(stk, u, v)
return
}
// Nothing to do if u is shorter than v (implies u < v).
m := len(u) - n
if m < 0 {
return
}
// We consider B digits in a row as a single wide digit.
// (See “Recursive Division” above.)
//
// TODO(rsc): rename B to Wide, to avoid confusion with _B,
// which is something entirely different.
// TODO(rsc): Look into whether using ⌈n/2⌉ is better than ⌊n/2⌋.
B := n / 2
// Allocate a nat for qhat below.
defer stk.restore(stk.save())
qhat0 := stk.nat(B + 1)
// Compute each wide digit of the quotient.
//
// TODO(rsc): Change the loop to be
// for j := (m+B-1)/B*B; j > 0; j -= B {
// which will make the final step a regular step, letting us
// delete what amounts to an extra copy of the loop body below.
j := m
for j > B {
// Divide u[j-B:j+n] (3 wide digits) by v (2 wide digits).
// First make the 2-by-1-wide-digit guess using a recursive call.
// Then extend the guess to the full 3-by-2 (see “Refining Guesses”).
//
// For the 2-by-1-wide-digit guess, instead of doing 2B-by-B-digit,
// we use a (2B+1)-by-(B+1) digit, which handles the possibility that
// the result has an extra leading 1 digit as well as guaranteeing
// that the computed q̂ will be off by at most 1 instead of 2.
// s is the number of digits to drop from the 3B- and 2B-digit chunks.
// We drop B-1 to be left with 2B+1 and B+1.
s := (B - 1)
// uu is the up-to-3B-digit section of u we are working on.
uu := u[j-B:]
// Compute the 2-by-1 guess q̂, leaving r̂ in uu[s:B+n].
qhat := qhat0
clear(qhat)
qhat.divRecursiveStep(stk, uu[s:B+n], v[s:], depth+1)
qhat = qhat.norm()
// Extend to a 3-by-2 quotient and remainder.
// Because divRecursiveStep overwrote the top part of uu with
// the remainder r̂, the full uu already contains the equivalent
// of r̂·B + uₙ₋₂ from the “Refining Guesses” discussion.
// Subtracting q̂·vₙ₋₂ from it will compute the full-length remainder.
// If that subtraction underflows, q̂·v > u, which we fix up
// by decrementing q̂ and adding v back, same as in long division.
// TODO(rsc): Instead of subtract and fix-up, this code is computing
// q̂·vₙ₋₂ and decrementing q̂ until that product is ≤ u.
// But we can do the subtraction directly, as in the comment above
// and in long division, because we know that q̂ is wrong by at most one.
mark := stk.save()
qhatv := stk.nat(3 * n)
clear(qhatv)
qhatv = qhatv.mul(stk, qhat, v[:s])
for i := 0; i < 2; i++ {
e := qhatv.cmp(uu.norm())
if e <= 0 {
break
}
subVW(qhat, qhat, 1)
c := subVV(qhatv[:s], qhatv[:s], v[:s])
if len(qhatv) > s {
subVW(qhatv[s:], qhatv[s:], c)
}
addTo(uu[s:], v[s:])
}
if qhatv.cmp(uu.norm()) > 0 {
panic("impossible")
}
c := subVV(uu[:len(qhatv)], uu[:len(qhatv)], qhatv)
if c > 0 {
subVW(uu[len(qhatv):], uu[len(qhatv):], c)
}
addTo(z[j-B:], qhat)
j -= B
stk.restore(mark)
}
// TODO(rsc): Rewrite loop as described above and delete all this code.
// Now u < (v<<B), compute lower bits in the same way.
// Choose shift = B-1 again.
s := B - 1
qhat := qhat0
clear(qhat)
qhat.divRecursiveStep(stk, u[s:].norm(), v[s:], depth+1)
qhat = qhat.norm()
qhatv := stk.nat(3 * n)
clear(qhatv)
qhatv = qhatv.mul(stk, qhat, v[:s])
// Set the correct remainder as before.
for i := 0; i < 2; i++ {
if e := qhatv.cmp(u.norm()); e > 0 {
subVW(qhat, qhat, 1)
c := subVV(qhatv[:s], qhatv[:s], v[:s])
if len(qhatv) > s {
subVW(qhatv[s:], qhatv[s:], c)
}
addTo(u[s:], v[s:])
}
}
if qhatv.cmp(u.norm()) > 0 {
panic("impossible")
}
c := subVV(u[:len(qhatv)], u[:len(qhatv)], qhatv)
if c > 0 {
c = subVW(u[len(qhatv):], u[len(qhatv):], c)
}
if c > 0 {
panic("impossible")
}
// Done!
addTo(z, qhat.norm())
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Multiplication.
package big
// Operands that are shorter than karatsubaThreshold are multiplied using
// "grade school" multiplication; for longer operands the Karatsuba algorithm
// is used.
var karatsubaThreshold = 40 // see calibrate_test.go
// mul sets z = x*y, using stk for temporary storage.
// The caller may pass stk == nil to request that mul obtain and release one itself.
func (z nat) mul(stk *stack, x, y nat) nat {
m := len(x)
n := len(y)
switch {
case m < n:
return z.mul(stk, y, x)
case m == 0 || n == 0:
return z[:0]
case n == 1:
return z.mulAddWW(x, y[0], 0)
}
// m >= n > 1
// determine if z can be reused
if alias(z, x) || alias(z, y) {
z = nil // z is an alias for x or y - cannot reuse
}
z = z.make(m + n)
// use basic multiplication if the numbers are small
if n < karatsubaThreshold {
basicMul(z, x, y)
return z.norm()
}
if stk == nil {
stk = getStack()
defer stk.free()
}
// Let x = x1:x0 where x0 is the same length as y.
// Compute z = x0*y and then add in x1*y in sections
// if needed.
karatsuba(stk, z[:2*n], x[:n], y)
if n < m {
clear(z[2*n:])
defer stk.restore(stk.save())
t := stk.nat(2 * n)
for i := n; i < m; i += n {
t = t.mul(stk, x[i:min(i+n, len(x))], y)
addTo(z[i:], t)
}
}
return z.norm()
}
// Operands that are shorter than basicSqrThreshold are squared using
// "grade school" multiplication; for operands longer than karatsubaSqrThreshold
// we use the Karatsuba algorithm optimized for x == y.
var basicSqrThreshold = 12 // see calibrate_test.go
var karatsubaSqrThreshold = 80 // see calibrate_test.go
// sqr sets z = x*x, using stk for temporary storage.
// The caller may pass stk == nil to request that sqr obtain and release one itself.
func (z nat) sqr(stk *stack, x nat) nat {
n := len(x)
switch {
case n == 0:
return z[:0]
case n == 1:
d := x[0]
z = z.make(2)
z[1], z[0] = mulWW(d, d)
return z.norm()
}
if alias(z, x) {
z = nil // z is an alias for x - cannot reuse
}
z = z.make(2 * n)
if n < basicSqrThreshold && n < karatsubaSqrThreshold {
basicMul(z, x, x)
return z.norm()
}
if stk == nil {
stk = getStack()
defer stk.free()
}
if n < karatsubaSqrThreshold {
basicSqr(stk, z, x)
return z.norm()
}
karatsubaSqr(stk, z, x)
return z.norm()
}
// basicSqr sets z = x*x and is asymptotically faster than basicMul
// by about a factor of 2, but slower for small arguments due to overhead.
// Requirements: len(x) > 0, len(z) == 2*len(x)
// The (non-normalized) result is placed in z.
func basicSqr(stk *stack, z, x nat) {
n := len(x)
if n < basicSqrThreshold {
basicMul(z, x, x)
return
}
defer stk.restore(stk.save())
t := stk.nat(2 * n)
clear(t)
z[1], z[0] = mulWW(x[0], x[0]) // the initial square
for i := 1; i < n; i++ {
d := x[i]
// z collects the squares x[i] * x[i]
z[2*i+1], z[2*i] = mulWW(d, d)
// t collects the products x[i] * x[j] where j < i
t[2*i] = addMulVVWW(t[i:2*i], t[i:2*i], x[0:i], d, 0)
}
t[2*n-1] = lshVU(t[1:2*n-1], t[1:2*n-1], 1) // double the j < i products
addVV(z, z, t) // combine the result
}
// mulAddWW returns z = x*y + r.
func (z nat) mulAddWW(x nat, y, r Word) nat {
m := len(x)
if m == 0 || y == 0 {
return z.setWord(r) // result is r
}
// m > 0
z = z.make(m + 1)
z[m] = mulAddVWW(z[0:m], x, y, r)
return z.norm()
}
// basicMul multiplies x and y and leaves the result in z.
// The (non-normalized) result is placed in z[0 : len(x) + len(y)].
func basicMul(z, x, y nat) {
clear(z[0 : len(x)+len(y)]) // initialize z
for i, d := range y {
if d != 0 {
z[len(x)+i] = addMulVVWW(z[i:i+len(x)], z[i:i+len(x)], x, d, 0)
}
}
}
// karatsuba multiplies x and y,
// writing the (non-normalized) result to z.
// x and y must have the same length n,
// and z must have length twice that.
func karatsuba(stk *stack, z, x, y nat) {
n := len(y)
if len(x) != n || len(z) != 2*n {
panic("bad karatsuba length")
}
// Fall back to basic algorithm if small enough.
if n < karatsubaThreshold || n < 2 {
basicMul(z, x, y)
return
}
// Let the notation x1:x0 denote the nat (x1<<N)+x0 for some N,
// and similarly z2:z1:z0 = (z2<<2N)+(z1<<N)+z0.
//
// (Note that z0, z1, z2 might be ≥ 2**N, in which case the high
// bits of, say, z0 are being added to the low bits of z1 in this notation.)
//
// Karatsuba multiplication is based on the observation that
//
// x1:x0 * y1:y0 = x1*y1:(x0*y1+y0*x1):x0*y0
// = x1*y1:((x0-x1)*(y1-y0)+x1*y1+x0*y0):x0*y0
//
// The second form uses only three half-width multiplications
// instead of the four that the straightforward first form does.
//
// We call the three pieces z0, z1, z2:
//
// z0 = x0*y0
// z2 = x1*y1
// z1 = (x0-x1)*(y1-y0) + z0 + z2
n2 := (n + 1) / 2
x0, x1 := &Int{abs: x[:n2].norm()}, &Int{abs: x[n2:].norm()}
y0, y1 := &Int{abs: y[:n2].norm()}, &Int{abs: y[n2:].norm()}
z0 := &Int{abs: z[0 : 2*n2]}
z2 := &Int{abs: z[2*n2:]}
// Allocate temporary storage for z1; repurpose z0 to hold tx and ty.
defer stk.restore(stk.save())
z1 := &Int{abs: stk.nat(2*n2 + 1)}
tx := &Int{abs: z[0:n2]}
ty := &Int{abs: z[n2 : 2*n2]}
tx.Sub(x0, x1)
ty.Sub(y1, y0)
z1.mul(stk, tx, ty)
clear(z)
z0.mul(stk, x0, y0)
z2.mul(stk, x1, y1)
z1.Add(z1, z0)
z1.Add(z1, z2)
addTo(z[n2:], z1.abs)
// Debug mode: double-check answer and print trace on failure.
const debug = false
if debug {
zz := make(nat, len(z))
basicMul(zz, x, y)
if z.cmp(zz) != 0 {
// All the temps were aliased to z and gone. Recompute.
z0 = new(Int)
z0.mul(stk, x0, y0)
tx = new(Int).Sub(x1, x0)
ty = new(Int).Sub(y0, y1)
z2 = new(Int)
z2.mul(stk, x1, y1)
print("karatsuba wrong\n")
trace("x ", &Int{abs: x})
trace("y ", &Int{abs: y})
trace("z ", &Int{abs: z})
trace("zz", &Int{abs: zz})
trace("x0", x0)
trace("x1", x1)
trace("y0", y0)
trace("y1", y1)
trace("tx", tx)
trace("ty", ty)
trace("z0", z0)
trace("z1", z1)
trace("z2", z2)
panic("karatsuba")
}
}
}
// karatsubaSqr squares x,
// writing the (non-normalized) result to z.
// z must have length 2*len(x).
// It is analogous to [karatsuba] but can run faster
// knowing both multiplicands are the same value.
func karatsubaSqr(stk *stack, z, x nat) {
n := len(x)
if len(z) != 2*n {
panic("bad karatsubaSqr length")
}
if n < karatsubaSqrThreshold || n < 2 {
basicSqr(stk, z, x)
return
}
// Recall that for karatsuba we want to compute:
//
// x1:x0 * y1:y0 = x1y1:(x0y1+y0x1):x0y0
// = x1y1:((x0-x1)*(y1-y0)+x1y1+x0y0):x0y0
// = z2:z1:z0
// where:
//
// z0 = x0y0
// z2 = x1y1
// z1 = (x0-x1)*(y1-y0) + z0 + z2
//
// When x = y, these simplify to:
//
// z0 = x0²
// z2 = x1²
// z1 = z0 + z2 - (x0-x1)²
n2 := (n + 1) / 2
x0, x1 := &Int{abs: x[:n2].norm()}, &Int{abs: x[n2:].norm()}
z0 := &Int{abs: z[0 : 2*n2]}
z2 := &Int{abs: z[2*n2:]}
// Allocate temporary storage for z1; repurpose z0 to hold tx.
defer stk.restore(stk.save())
z1 := &Int{abs: stk.nat(2*n2 + 1)}
tx := &Int{abs: z[0:n2]}
tx.Sub(x0, x1)
z1.abs = z1.abs.sqr(stk, tx.abs)
z1.neg = true
clear(z)
z0.abs = z0.abs.sqr(stk, x0.abs)
z2.abs = z2.abs.sqr(stk, x1.abs)
z1.Add(z1, z0)
z1.Add(z1, z2)
addTo(z[n2:], z1.abs)
// Debug mode: double-check answer and print trace on failure.
const debug = false
if debug {
zz := make(nat, len(z))
basicSqr(stk, zz, x)
if z.cmp(zz) != 0 {
// All the temps were aliased to z and gone. Recompute.
tx = new(Int).Sub(x0, x1)
z0 = new(Int).Mul(x0, x0)
z2 = new(Int).Mul(x1, x1)
z1 = new(Int).Mul(tx, tx)
z1.Neg(z1)
z1.Add(z1, z0)
z1.Add(z1, z2)
print("karatsubaSqr wrong\n")
trace("x ", &Int{abs: x})
trace("z ", &Int{abs: z})
trace("zz", &Int{abs: zz})
trace("x0", x0)
trace("x1", x1)
trace("z0", z0)
trace("z1", z1)
trace("z2", z2)
panic("karatsubaSqr")
}
}
}
// ifmt returns the debug formatting of the Int x: 0xHEX.
func ifmt(x *Int) string {
neg, s, t := "", x.Text(16), ""
if s == "" { // happens for denormalized zero
s = "0x0"
}
if s[0] == '-' {
neg, s = "-", s[1:]
}
// Add _ between words.
const D = _W / 4 // digits per chunk
for len(s) > D {
s, t = s[:len(s)-D], s[len(s)-D:]+"_"+t
}
return neg + s + t
}
// trace prints a single debug value.
func trace(name string, x *Int) {
print(name, "=", ifmt(x), "\n")
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package big
import "math/rand"
// ProbablyPrime reports whether x is probably prime,
// applying the Miller-Rabin test with n pseudorandomly chosen bases
// as well as a Baillie-PSW test.
//
// If x is prime, ProbablyPrime returns true.
// If x is chosen randomly and not prime, ProbablyPrime probably returns false.
// The probability of returning true for a randomly chosen non-prime is at most ¼ⁿ.
//
// ProbablyPrime is 100% accurate for inputs less than 2⁶⁴.
// See Menezes et al., Handbook of Applied Cryptography, 1997, pp. 145-149,
// and FIPS 186-4 Appendix F for further discussion of the error probabilities.
//
// ProbablyPrime is not suitable for judging primes that an adversary may
// have crafted to fool the test.
//
// As of Go 1.8, ProbablyPrime(0) is allowed and applies only a Baillie-PSW test.
// Before Go 1.8, ProbablyPrime applied only the Miller-Rabin tests, and ProbablyPrime(0) panicked.
func (x *Int) ProbablyPrime(n int) bool {
// Note regarding the doc comment above:
// It would be more precise to say that the Baillie-PSW test uses the
// extra strong Lucas test as its Lucas test, but since no one knows
// how to tell any of the Lucas tests apart inside a Baillie-PSW test
// (they all work equally well empirically), that detail need not be
// documented or implicitly guaranteed.
// The comment does avoid saying "the" Baillie-PSW test
// because of this general ambiguity.
if n < 0 {
panic("negative n for ProbablyPrime")
}
if x.neg || len(x.abs) == 0 {
return false
}
// primeBitMask records the primes < 64.
const primeBitMask uint64 = 1<<2 | 1<<3 | 1<<5 | 1<<7 |
1<<11 | 1<<13 | 1<<17 | 1<<19 | 1<<23 | 1<<29 | 1<<31 |
1<<37 | 1<<41 | 1<<43 | 1<<47 | 1<<53 | 1<<59 | 1<<61
w := x.abs[0]
if len(x.abs) == 1 && w < 64 {
return primeBitMask&(1<<w) != 0
}
if w&1 == 0 {
return false // x is even
}
const primesA = 3 * 5 * 7 * 11 * 13 * 17 * 19 * 23 * 37
const primesB = 29 * 31 * 41 * 43 * 47 * 53
var rA, rB uint32
switch _W {
case 32:
rA = uint32(x.abs.modW(primesA))
rB = uint32(x.abs.modW(primesB))
case 64:
r := x.abs.modW((primesA * primesB) & _M)
rA = uint32(r % primesA)
rB = uint32(r % primesB)
default:
panic("math/big: invalid word size")
}
if rA%3 == 0 || rA%5 == 0 || rA%7 == 0 || rA%11 == 0 || rA%13 == 0 || rA%17 == 0 || rA%19 == 0 || rA%23 == 0 || rA%37 == 0 ||
rB%29 == 0 || rB%31 == 0 || rB%41 == 0 || rB%43 == 0 || rB%47 == 0 || rB%53 == 0 {
return false
}
stk := getStack()
defer stk.free()
return x.abs.probablyPrimeMillerRabin(stk, n+1, true) && x.abs.probablyPrimeLucas(stk)
}
// probablyPrimeMillerRabin reports whether n passes reps rounds of the
// Miller-Rabin primality test, using pseudo-randomly chosen bases.
// If force2 is true, one of the rounds is forced to use base 2.
// See Handbook of Applied Cryptography, p. 139, Algorithm 4.24.
// The number n is known to be non-zero.
func (n nat) probablyPrimeMillerRabin(stk *stack, reps int, force2 bool) bool {
nm1 := nat(nil).sub(n, natOne)
// determine q, k such that nm1 = q << k
k := nm1.trailingZeroBits()
q := nat(nil).rsh(nm1, k)
nm3 := nat(nil).sub(nm1, natTwo)
rand := rand.New(rand.NewSource(int64(n[0])))
var x, y, quotient nat
nm3Len := nm3.bitLen()
NextRandom:
for i := 0; i < reps; i++ {
if i == reps-1 && force2 {
x = x.set(natTwo)
} else {
x = x.random(rand, nm3, nm3Len)
x = x.add(x, natTwo)
}
y = y.expNN(stk, x, q, n, false)
if y.cmp(natOne) == 0 || y.cmp(nm1) == 0 {
continue
}
for j := uint(1); j < k; j++ {
y = y.sqr(stk, y)
quotient, y = quotient.div(stk, y, y, n)
if y.cmp(nm1) == 0 {
continue NextRandom
}
if y.cmp(natOne) == 0 {
return false
}
}
return false
}
return true
}
// probablyPrimeLucas reports whether n passes the "almost extra strong" Lucas probable prime test,
// using Baillie-OEIS parameter selection. This corresponds to "AESLPSP" on Jacobsen's tables (link below).
// The combination of this test and a Miller-Rabin/Fermat test with base 2 gives a Baillie-PSW test.
//
// References:
//
// Baillie and Wagstaff, "Lucas Pseudoprimes", Mathematics of Computation 35(152),
// October 1980, pp. 1391-1417, especially page 1401.
// https://www.ams.org/journals/mcom/1980-35-152/S0025-5718-1980-0583518-6/S0025-5718-1980-0583518-6.pdf
//
// Grantham, "Frobenius Pseudoprimes", Mathematics of Computation 70(234),
// March 2000, pp. 873-891.
// https://www.ams.org/journals/mcom/2001-70-234/S0025-5718-00-01197-2/S0025-5718-00-01197-2.pdf
//
// Baillie, "Extra strong Lucas pseudoprimes", OEIS A217719, https://oeis.org/A217719.
//
// Jacobsen, "Pseudoprime Statistics, Tables, and Data", http://ntheory.org/pseudoprimes.html.
//
// Nicely, "The Baillie-PSW Primality Test", https://web.archive.org/web/20191121062007/http://www.trnicely.net/misc/bpsw.html.
// (Note that Nicely's definition of the "extra strong" test gives the wrong Jacobi condition,
// as pointed out by Jacobsen.)
//
// Crandall and Pomerance, Prime Numbers: A Computational Perspective, 2nd ed.
// Springer, 2005.
func (n nat) probablyPrimeLucas(stk *stack) bool {
// Discard 0, 1.
if len(n) == 0 || n.cmp(natOne) == 0 {
return false
}
// Two is the only even prime.
// Already checked by caller, but here to allow testing in isolation.
if n[0]&1 == 0 {
return n.cmp(natTwo) == 0
}
// Baillie-OEIS "method C" for choosing D, P, Q,
// as in https://oeis.org/A217719/a217719.txt:
// try increasing P ≥ 3 such that D = P² - 4 (so Q = 1)
// until Jacobi(D, n) = -1.
// The search is expected to succeed for non-square n after just a few trials.
// After more than expected failures, check whether n is square
// (which would cause Jacobi(D, n) = 1 for all D not dividing n).
p := Word(3)
d := nat{1}
t1 := nat(nil) // temp
intD := &Int{abs: d}
intN := &Int{abs: n}
for ; ; p++ {
if p > 10000 {
// This is widely believed to be impossible.
// If we get a report, we'll want the exact number n.
panic("math/big: internal error: cannot find (D/n) = -1 for " + intN.String())
}
d[0] = p*p - 4
j := Jacobi(intD, intN)
if j == -1 {
break
}
if j == 0 {
// d = p²-4 = (p-2)(p+2).
// If (d/n) == 0 then d shares a prime factor with n.
// Since the loop proceeds in increasing p and starts with p-2==1,
// the shared prime factor must be p+2.
// If p+2 == n, then n is prime; otherwise p+2 is a proper factor of n.
return len(n) == 1 && n[0] == p+2
}
if p == 40 {
// We'll never find (d/n) = -1 if n is a square.
// If n is a non-square we expect to find a d in just a few attempts on average.
// After 40 attempts, take a moment to check if n is indeed a square.
t1 = t1.sqrt(stk, n)
t1 = t1.sqr(stk, t1)
if t1.cmp(n) == 0 {
return false
}
}
}
// Grantham definition of "extra strong Lucas pseudoprime", after Thm 2.3 on p. 876
// (D, P, Q above have become Δ, b, 1):
//
// Let U_n = U_n(b, 1), V_n = V_n(b, 1), and Δ = b²-4.
// An extra strong Lucas pseudoprime to base b is a composite n = 2^r s + Jacobi(Δ, n),
// where s is odd and gcd(n, 2*Δ) = 1, such that either (i) U_s ≡ 0 mod n and V_s ≡ ±2 mod n,
// or (ii) V_{2^t s} ≡ 0 mod n for some 0 ≤ t < r-1.
//
// We know gcd(n, Δ) = 1 or else we'd have found Jacobi(d, n) == 0 above.
// We know gcd(n, 2) = 1 because n is odd.
//
// Arrange s = (n - Jacobi(Δ, n)) / 2^r = (n+1) / 2^r.
s := nat(nil).add(n, natOne)
r := int(s.trailingZeroBits())
s = s.rsh(s, uint(r))
nm2 := nat(nil).sub(n, natTwo) // n-2
// We apply the "almost extra strong" test, which checks the above conditions
// except for U_s ≡ 0 mod n, which allows us to avoid computing any U_k values.
// Jacobsen points out that maybe we should just do the full extra strong test:
// "It is also possible to recover U_n using Crandall and Pomerance equation 3.13:
// U_n = D^-1 (2V_{n+1} - PV_n) allowing us to run the full extra-strong test
// at the cost of a single modular inversion. This computation is easy and fast in GMP,
// so we can get the full extra-strong test at essentially the same performance as the
// almost extra strong test."
// Compute Lucas sequence V_s(b, 1), where:
//
// V(0) = 2
// V(1) = P
// V(k) = P V(k-1) - Q V(k-2).
//
// (Remember that due to method C above, P = b, Q = 1.)
//
// In general V(k) = α^k + β^k, where α and β are roots of x² - Px + Q.
// Crandall and Pomerance (p.147) observe that for 0 ≤ j ≤ k,
//
// V(j+k) = V(j)V(k) - V(k-j).
//
// So in particular, to quickly double the subscript:
//
// V(2k) = V(k)² - 2
// V(2k+1) = V(k) V(k+1) - P
//
// We can therefore start with k=0 and build up to k=s in log₂(s) steps.
natP := nat(nil).setWord(p)
vk := nat(nil).setWord(2)
vk1 := nat(nil).setWord(p)
t2 := nat(nil) // temp
for i := int(s.bitLen()); i >= 0; i-- {
if s.bit(uint(i)) != 0 {
// k' = 2k+1
// V(k') = V(2k+1) = V(k) V(k+1) - P.
t1 = t1.mul(stk, vk, vk1)
t1 = t1.add(t1, n)
t1 = t1.sub(t1, natP)
t2, vk = t2.div(stk, vk, t1, n)
// V(k'+1) = V(2k+2) = V(k+1)² - 2.
t1 = t1.sqr(stk, vk1)
t1 = t1.add(t1, nm2)
t2, vk1 = t2.div(stk, vk1, t1, n)
} else {
// k' = 2k
// V(k'+1) = V(2k+1) = V(k) V(k+1) - P.
t1 = t1.mul(stk, vk, vk1)
t1 = t1.add(t1, n)
t1 = t1.sub(t1, natP)
t2, vk1 = t2.div(stk, vk1, t1, n)
// V(k') = V(2k) = V(k)² - 2
t1 = t1.sqr(stk, vk)
t1 = t1.add(t1, nm2)
t2, vk = t2.div(stk, vk, t1, n)
}
}
// Now k=s, so vk = V(s). Check V(s) ≡ ±2 (mod n).
if vk.cmp(natTwo) == 0 || vk.cmp(nm2) == 0 {
// Check U(s) ≡ 0.
// As suggested by Jacobsen, apply Crandall and Pomerance equation 3.13:
//
// U(k) = D⁻¹ (2 V(k+1) - P V(k))
//
// Since we are checking for U(k) == 0 it suffices to check 2 V(k+1) == P V(k) mod n,
// or P V(k) - 2 V(k+1) == 0 mod n.
t1 := t1.mul(stk, vk, natP)
t2 := t2.lsh(vk1, 1)
if t1.cmp(t2) < 0 {
t1, t2 = t2, t1
}
t1 = t1.sub(t1, t2)
t3 := vk1 // steal vk1, no longer needed below
vk1 = nil
_ = vk1
t2, t3 = t2.div(stk, t3, t1, n)
if len(t3) == 0 {
return true
}
}
// Check V(2^t s) ≡ 0 mod n for some 0 ≤ t < r-1.
for t := 0; t < r-1; t++ {
if len(vk) == 0 { // vk == 0
return true
}
// Optimization: V(k) = 2 is a fixed point for V(k') = V(k)² - 2,
// so if V(k) = 2, we can stop: we will never find a future V(k) == 0.
if len(vk) == 1 && vk[0] == 2 { // vk == 2
return false
}
// k' = 2k
// V(k') = V(2k) = V(k)² - 2
t1 = t1.sqr(stk, vk)
t1 = t1.sub(t1, natTwo)
t2, vk = t2.div(stk, vk, t1, n)
}
return false
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements multi-precision rational numbers.
package big
import (
"fmt"
"math"
)
// A Rat represents a quotient a/b of arbitrary precision.
// The zero value for a Rat represents the value 0.
//
// Operations always take pointer arguments (*Rat) rather
// than Rat values, and each unique Rat value requires
// its own unique *Rat pointer. To "copy" a Rat value,
// an existing (or newly allocated) Rat must be set to
// a new value using the [Rat.Set] method; shallow copies
// of Rats are not supported and may lead to errors.
type Rat struct {
// To make zero values for Rat work w/o initialization,
// a zero value of b (len(b) == 0) acts like b == 1. At
// the earliest opportunity (when an assignment to the Rat
// is made), such uninitialized denominators are set to 1.
// a.neg determines the sign of the Rat, b.neg is ignored.
a, b Int
}
// NewRat creates a new [Rat] with numerator a and denominator b.
func NewRat(a, b int64) *Rat {
return new(Rat).SetFrac64(a, b)
}
// SetFloat64 sets z to exactly f and returns z.
// If f is not finite, SetFloat returns nil.
func (z *Rat) SetFloat64(f float64) *Rat {
const expMask = 1<<11 - 1
bits := math.Float64bits(f)
mantissa := bits & (1<<52 - 1)
exp := int((bits >> 52) & expMask)
switch exp {
case expMask: // non-finite
return nil
case 0: // denormal
exp -= 1022
default: // normal
mantissa |= 1 << 52
exp -= 1023
}
shift := 52 - exp
// Optimization (?): partially pre-normalise.
for mantissa&1 == 0 && shift > 0 {
mantissa >>= 1
shift--
}
z.a.SetUint64(mantissa)
z.a.neg = f < 0
z.b.Set(intOne)
if shift > 0 {
z.b.Lsh(&z.b, uint(shift))
} else {
z.a.Lsh(&z.a, uint(-shift))
}
return z.norm()
}
// quotToFloat32 returns the non-negative float32 value
// nearest to the quotient a/b, using round-to-even in
// halfway cases. It does not mutate its arguments.
// Preconditions: b is non-zero; a and b have no common factors.
func quotToFloat32(stk *stack, a, b nat) (f float32, exact bool) {
const (
// float size in bits
Fsize = 32
// mantissa
Msize = 23
Msize1 = Msize + 1 // incl. implicit 1
Msize2 = Msize1 + 1
// exponent
Esize = Fsize - Msize1
Ebias = 1<<(Esize-1) - 1
Emin = 1 - Ebias
Emax = Ebias
)
// TODO(adonovan): specialize common degenerate cases: 1.0, integers.
alen := a.bitLen()
if alen == 0 {
return 0, true
}
blen := b.bitLen()
if blen == 0 {
panic("division by zero")
}
// 1. Left-shift A or B such that quotient A/B is in [1<<Msize1, 1<<(Msize2+1)
// (Msize2 bits if A < B when they are left-aligned, Msize2+1 bits if A >= B).
// This is 2 or 3 more than the float32 mantissa field width of Msize:
// - the optional extra bit is shifted away in step 3 below.
// - the high-order 1 is omitted in "normal" representation;
// - the low-order 1 will be used during rounding then discarded.
exp := alen - blen
var a2, b2 nat
a2 = a2.set(a)
b2 = b2.set(b)
if shift := Msize2 - exp; shift > 0 {
a2 = a2.lsh(a2, uint(shift))
} else if shift < 0 {
b2 = b2.lsh(b2, uint(-shift))
}
// 2. Compute quotient and remainder (q, r). NB: due to the
// extra shift, the low-order bit of q is logically the
// high-order bit of r.
var q nat
q, r := q.div(stk, a2, a2, b2) // (recycle a2)
mantissa := low32(q)
haveRem := len(r) > 0 // mantissa&1 && !haveRem => remainder is exactly half
// 3. If quotient didn't fit in Msize2 bits, redo division by b2<<1
// (in effect---we accomplish this incrementally).
if mantissa>>Msize2 == 1 {
if mantissa&1 == 1 {
haveRem = true
}
mantissa >>= 1
exp++
}
if mantissa>>Msize1 != 1 {
panic(fmt.Sprintf("expected exactly %d bits of result", Msize2))
}
// 4. Rounding.
if Emin-Msize <= exp && exp <= Emin {
// Denormal case; lose 'shift' bits of precision.
shift := uint(Emin - (exp - 1)) // [1..Esize1)
lostbits := mantissa & (1<<shift - 1)
haveRem = haveRem || lostbits != 0
mantissa >>= shift
exp = 2 - Ebias // == exp + shift
}
// Round q using round-half-to-even.
exact = !haveRem
if mantissa&1 != 0 {
exact = false
if haveRem || mantissa&2 != 0 {
if mantissa++; mantissa >= 1<<Msize2 {
// Complete rollover 11...1 => 100...0, so shift is safe
mantissa >>= 1
exp++
}
}
}
mantissa >>= 1 // discard rounding bit. Mantissa now scaled by 1<<Msize1.
f = float32(math.Ldexp(float64(mantissa), exp-Msize1))
if math.IsInf(float64(f), 0) {
exact = false
}
return
}
// quotToFloat64 returns the non-negative float64 value
// nearest to the quotient a/b, using round-to-even in
// halfway cases. It does not mutate its arguments.
// Preconditions: b is non-zero; a and b have no common factors.
func quotToFloat64(stk *stack, a, b nat) (f float64, exact bool) {
const (
// float size in bits
Fsize = 64
// mantissa
Msize = 52
Msize1 = Msize + 1 // incl. implicit 1
Msize2 = Msize1 + 1
// exponent
Esize = Fsize - Msize1
Ebias = 1<<(Esize-1) - 1
Emin = 1 - Ebias
Emax = Ebias
)
// TODO(adonovan): specialize common degenerate cases: 1.0, integers.
alen := a.bitLen()
if alen == 0 {
return 0, true
}
blen := b.bitLen()
if blen == 0 {
panic("division by zero")
}
// 1. Left-shift A or B such that quotient A/B is in [1<<Msize1, 1<<(Msize2+1)
// (Msize2 bits if A < B when they are left-aligned, Msize2+1 bits if A >= B).
// This is 2 or 3 more than the float64 mantissa field width of Msize:
// - the optional extra bit is shifted away in step 3 below.
// - the high-order 1 is omitted in "normal" representation;
// - the low-order 1 will be used during rounding then discarded.
exp := alen - blen
var a2, b2 nat
a2 = a2.set(a)
b2 = b2.set(b)
if shift := Msize2 - exp; shift > 0 {
a2 = a2.lsh(a2, uint(shift))
} else if shift < 0 {
b2 = b2.lsh(b2, uint(-shift))
}
// 2. Compute quotient and remainder (q, r). NB: due to the
// extra shift, the low-order bit of q is logically the
// high-order bit of r.
var q nat
q, r := q.div(stk, a2, a2, b2) // (recycle a2)
mantissa := low64(q)
haveRem := len(r) > 0 // mantissa&1 && !haveRem => remainder is exactly half
// 3. If quotient didn't fit in Msize2 bits, redo division by b2<<1
// (in effect---we accomplish this incrementally).
if mantissa>>Msize2 == 1 {
if mantissa&1 == 1 {
haveRem = true
}
mantissa >>= 1
exp++
}
if mantissa>>Msize1 != 1 {
panic(fmt.Sprintf("expected exactly %d bits of result", Msize2))
}
// 4. Rounding.
if Emin-Msize <= exp && exp <= Emin {
// Denormal case; lose 'shift' bits of precision.
shift := uint(Emin - (exp - 1)) // [1..Esize1)
lostbits := mantissa & (1<<shift - 1)
haveRem = haveRem || lostbits != 0
mantissa >>= shift
exp = 2 - Ebias // == exp + shift
}
// Round q using round-half-to-even.
exact = !haveRem
if mantissa&1 != 0 {
exact = false
if haveRem || mantissa&2 != 0 {
if mantissa++; mantissa >= 1<<Msize2 {
// Complete rollover 11...1 => 100...0, so shift is safe
mantissa >>= 1
exp++
}
}
}
mantissa >>= 1 // discard rounding bit. Mantissa now scaled by 1<<Msize1.
f = math.Ldexp(float64(mantissa), exp-Msize1)
if math.IsInf(f, 0) {
exact = false
}
return
}
// Float32 returns the nearest float32 value for x and a bool indicating
// whether f represents x exactly. If the magnitude of x is too large to
// be represented by a float32, f is an infinity and exact is false.
// The sign of f always matches the sign of x, even if f == 0.
func (x *Rat) Float32() (f float32, exact bool) {
b := x.b.abs
if len(b) == 0 {
b = natOne
}
stk := getStack()
defer stk.free()
f, exact = quotToFloat32(stk, x.a.abs, b)
if x.a.neg {
f = -f
}
return
}
// Float64 returns the nearest float64 value for x and a bool indicating
// whether f represents x exactly. If the magnitude of x is too large to
// be represented by a float64, f is an infinity and exact is false.
// The sign of f always matches the sign of x, even if f == 0.
func (x *Rat) Float64() (f float64, exact bool) {
b := x.b.abs
if len(b) == 0 {
b = natOne
}
stk := getStack()
defer stk.free()
f, exact = quotToFloat64(stk, x.a.abs, b)
if x.a.neg {
f = -f
}
return
}
// SetFrac sets z to a/b and returns z.
// If b == 0, SetFrac panics.
func (z *Rat) SetFrac(a, b *Int) *Rat {
z.a.neg = a.neg != b.neg
babs := b.abs
if len(babs) == 0 {
panic("division by zero")
}
if &z.a == b || alias(z.a.abs, babs) {
babs = nat(nil).set(babs) // make a copy
}
z.a.abs = z.a.abs.set(a.abs)
z.b.abs = z.b.abs.set(babs)
return z.norm()
}
// SetFrac64 sets z to a/b and returns z.
// If b == 0, SetFrac64 panics.
func (z *Rat) SetFrac64(a, b int64) *Rat {
if b == 0 {
panic("division by zero")
}
z.a.SetInt64(a)
if b < 0 {
b = -b
z.a.neg = !z.a.neg
}
z.b.abs = z.b.abs.setUint64(uint64(b))
return z.norm()
}
// SetInt sets z to x (by making a copy of x) and returns z.
func (z *Rat) SetInt(x *Int) *Rat {
z.a.Set(x)
z.b.abs = z.b.abs.setWord(1)
return z
}
// SetInt64 sets z to x and returns z.
func (z *Rat) SetInt64(x int64) *Rat {
z.a.SetInt64(x)
z.b.abs = z.b.abs.setWord(1)
return z
}
// SetUint64 sets z to x and returns z.
func (z *Rat) SetUint64(x uint64) *Rat {
z.a.SetUint64(x)
z.b.abs = z.b.abs.setWord(1)
return z
}
// Set sets z to x (by making a copy of x) and returns z.
func (z *Rat) Set(x *Rat) *Rat {
if z != x {
z.a.Set(&x.a)
z.b.Set(&x.b)
}
if len(z.b.abs) == 0 {
z.b.abs = z.b.abs.setWord(1)
}
return z
}
// Abs sets z to |x| (the absolute value of x) and returns z.
func (z *Rat) Abs(x *Rat) *Rat {
z.Set(x)
z.a.neg = false
return z
}
// Neg sets z to -x and returns z.
func (z *Rat) Neg(x *Rat) *Rat {
z.Set(x)
z.a.neg = len(z.a.abs) > 0 && !z.a.neg // 0 has no sign
return z
}
// Inv sets z to 1/x and returns z.
// If x == 0, Inv panics.
func (z *Rat) Inv(x *Rat) *Rat {
if len(x.a.abs) == 0 {
panic("division by zero")
}
z.Set(x)
z.a.abs, z.b.abs = z.b.abs, z.a.abs
return z
}
// Sign returns:
// - -1 if x < 0;
// - 0 if x == 0;
// - +1 if x > 0.
func (x *Rat) Sign() int {
return x.a.Sign()
}
// IsInt reports whether the denominator of x is 1.
func (x *Rat) IsInt() bool {
return len(x.b.abs) == 0 || x.b.abs.cmp(natOne) == 0
}
// Num returns the numerator of x; it may be <= 0.
// The result is a reference to x's numerator; it
// may change if a new value is assigned to x, and vice versa.
// The sign of the numerator corresponds to the sign of x.
func (x *Rat) Num() *Int {
return &x.a
}
// Denom returns the denominator of x; it is always > 0.
// The result is a reference to x's denominator, unless
// x is an uninitialized (zero value) [Rat], in which case
// the result is a new [Int] of value 1. (To initialize x,
// any operation that sets x will do, including x.Set(x).)
// If the result is a reference to x's denominator it
// may change if a new value is assigned to x, and vice versa.
func (x *Rat) Denom() *Int {
// Note that x.b.neg is guaranteed false.
if len(x.b.abs) == 0 {
// Note: If this proves problematic, we could
// panic instead and require the Rat to
// be explicitly initialized.
return &Int{abs: nat{1}}
}
return &x.b
}
func (z *Rat) norm() *Rat {
switch {
case len(z.a.abs) == 0:
// z == 0; normalize sign and denominator
z.a.neg = false
fallthrough
case len(z.b.abs) == 0:
// z is integer; normalize denominator
z.b.abs = z.b.abs.setWord(1)
default:
// z is fraction; normalize numerator and denominator
stk := getStack()
defer stk.free()
neg := z.a.neg
z.a.neg = false
z.b.neg = false
if f := NewInt(0).lehmerGCD(nil, nil, &z.a, &z.b); f.Cmp(intOne) != 0 {
z.a.abs, _ = z.a.abs.div(stk, nil, z.a.abs, f.abs)
z.b.abs, _ = z.b.abs.div(stk, nil, z.b.abs, f.abs)
}
z.a.neg = neg
}
return z
}
// mulDenom sets z to the denominator product x*y (by taking into
// account that 0 values for x or y must be interpreted as 1) and
// returns z.
func mulDenom(stk *stack, z, x, y nat) nat {
switch {
case len(x) == 0 && len(y) == 0:
return z.setWord(1)
case len(x) == 0:
return z.set(y)
case len(y) == 0:
return z.set(x)
}
return z.mul(stk, x, y)
}
// scaleDenom sets z to the product x*f.
// If f == 0 (zero value of denominator), z is set to (a copy of) x.
func (z *Int) scaleDenom(stk *stack, x *Int, f nat) {
if len(f) == 0 {
z.Set(x)
return
}
z.abs = z.abs.mul(stk, x.abs, f)
z.neg = x.neg
}
// Cmp compares x and y and returns:
// - -1 if x < y;
// - 0 if x == y;
// - +1 if x > y.
func (x *Rat) Cmp(y *Rat) int {
var a, b Int
stk := getStack()
defer stk.free()
a.scaleDenom(stk, &x.a, y.b.abs)
b.scaleDenom(stk, &y.a, x.b.abs)
return a.Cmp(&b)
}
// Add sets z to the sum x+y and returns z.
func (z *Rat) Add(x, y *Rat) *Rat {
stk := getStack()
defer stk.free()
var a1, a2 Int
a1.scaleDenom(stk, &x.a, y.b.abs)
a2.scaleDenom(stk, &y.a, x.b.abs)
z.a.Add(&a1, &a2)
z.b.abs = mulDenom(stk, z.b.abs, x.b.abs, y.b.abs)
return z.norm()
}
// Sub sets z to the difference x-y and returns z.
func (z *Rat) Sub(x, y *Rat) *Rat {
stk := getStack()
defer stk.free()
var a1, a2 Int
a1.scaleDenom(stk, &x.a, y.b.abs)
a2.scaleDenom(stk, &y.a, x.b.abs)
z.a.Sub(&a1, &a2)
z.b.abs = mulDenom(stk, z.b.abs, x.b.abs, y.b.abs)
return z.norm()
}
// Mul sets z to the product x*y and returns z.
func (z *Rat) Mul(x, y *Rat) *Rat {
stk := getStack()
defer stk.free()
if x == y {
// a squared Rat is positive and can't be reduced (no need to call norm())
z.a.neg = false
z.a.abs = z.a.abs.sqr(stk, x.a.abs)
if len(x.b.abs) == 0 {
z.b.abs = z.b.abs.setWord(1)
} else {
z.b.abs = z.b.abs.sqr(stk, x.b.abs)
}
return z
}
z.a.mul(stk, &x.a, &y.a)
z.b.abs = mulDenom(stk, z.b.abs, x.b.abs, y.b.abs)
return z.norm()
}
// Quo sets z to the quotient x/y and returns z.
// If y == 0, Quo panics.
func (z *Rat) Quo(x, y *Rat) *Rat {
stk := getStack()
defer stk.free()
if len(y.a.abs) == 0 {
panic("division by zero")
}
var a, b Int
a.scaleDenom(stk, &x.a, y.b.abs)
b.scaleDenom(stk, &y.a, x.b.abs)
z.a.abs = a.abs
z.b.abs = b.abs
z.a.neg = a.neg != b.neg
return z.norm()
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements rat-to-string conversion functions.
package big
import (
"errors"
"fmt"
"io"
"strconv"
"strings"
)
func ratTok(ch rune) bool {
return strings.ContainsRune("+-/0123456789.eE", ch)
}
var ratZero Rat
var _ fmt.Scanner = &ratZero // *Rat must implement fmt.Scanner
// Scan is a support routine for fmt.Scanner. It accepts the formats
// 'e', 'E', 'f', 'F', 'g', 'G', and 'v'. All formats are equivalent.
func (z *Rat) Scan(s fmt.ScanState, ch rune) error {
tok, err := s.Token(true, ratTok)
if err != nil {
return err
}
if !strings.ContainsRune("efgEFGv", ch) {
return errors.New("Rat.Scan: invalid verb")
}
if _, ok := z.SetString(string(tok)); !ok {
return errors.New("Rat.Scan: invalid syntax")
}
return nil
}
// SetString sets z to the value of s and returns z and a boolean indicating
// success. s can be given as a (possibly signed) fraction "a/b", or as a
// floating-point number optionally followed by an exponent.
// If a fraction is provided, both the dividend and the divisor may be a
// decimal integer or independently use a prefix of “0b”, “0” or “0o”,
// or “0x” (or their upper-case variants) to denote a binary, octal, or
// hexadecimal integer, respectively. The divisor may not be signed.
// If a floating-point number is provided, it may be in decimal form or
// use any of the same prefixes as above but for “0” to denote a non-decimal
// mantissa. A leading “0” is considered a decimal leading 0; it does not
// indicate octal representation in this case.
// An optional base-10 “e” or base-2 “p” (or their upper-case variants)
// exponent may be provided as well, except for hexadecimal floats which
// only accept an (optional) “p” exponent (because an “e” or “E” cannot
// be distinguished from a mantissa digit). If the exponent's absolute value
// is too large, the operation may fail.
// The entire string, not just a prefix, must be valid for success. If the
// operation failed, the value of z is undefined but the returned value is nil.
func (z *Rat) SetString(s string) (*Rat, bool) {
if len(s) == 0 {
return nil, false
}
// len(s) > 0
// parse fraction a/b, if any
if sep := strings.Index(s, "/"); sep >= 0 {
if _, ok := z.a.SetString(s[:sep], 0); !ok {
return nil, false
}
r := strings.NewReader(s[sep+1:])
var err error
if z.b.abs, _, _, err = z.b.abs.scan(r, 0, false); err != nil {
return nil, false
}
// entire string must have been consumed
if _, err = r.ReadByte(); err != io.EOF {
return nil, false
}
if len(z.b.abs) == 0 {
return nil, false
}
return z.norm(), true
}
// parse floating-point number
r := strings.NewReader(s)
// sign
neg, err := scanSign(r)
if err != nil {
return nil, false
}
// mantissa
var base int
var fcount int // fractional digit count; valid if <= 0
z.a.abs, base, fcount, err = z.a.abs.scan(r, 0, true)
if err != nil {
return nil, false
}
// exponent
var exp int64
var ebase int
exp, ebase, err = scanExponent(r, true, true)
if err != nil {
return nil, false
}
// there should be no unread characters left
if _, err = r.ReadByte(); err != io.EOF {
return nil, false
}
// special-case 0 (see also issue #16176)
if len(z.a.abs) == 0 {
return z.norm(), true
}
// len(z.a.abs) > 0
// The mantissa may have a radix point (fcount <= 0) and there
// may be a nonzero exponent exp. The radix point amounts to a
// division by base**(-fcount), which equals a multiplication by
// base**fcount. An exponent means multiplication by ebase**exp.
// Multiplications are commutative, so we can apply them in any
// order. We only have powers of 2 and 10, and we split powers
// of 10 into the product of the same powers of 2 and 5. This
// may reduce the size of shift/multiplication factors or
// divisors required to create the final fraction, depending
// on the actual floating-point value.
// determine binary or decimal exponent contribution of radix point
var exp2, exp5 int64
if fcount < 0 {
// The mantissa has a radix point ddd.dddd; and
// -fcount is the number of digits to the right
// of '.'. Adjust relevant exponent accordingly.
d := int64(fcount)
switch base {
case 10:
exp5 = d
fallthrough // 10**e == 5**e * 2**e
case 2:
exp2 = d
case 8:
exp2 = d * 3 // octal digits are 3 bits each
case 16:
exp2 = d * 4 // hexadecimal digits are 4 bits each
default:
panic("unexpected mantissa base")
}
// fcount consumed - not needed anymore
}
// take actual exponent into account
switch ebase {
case 10:
exp5 += exp
fallthrough // see fallthrough above
case 2:
exp2 += exp
default:
panic("unexpected exponent base")
}
// exp consumed - not needed anymore
stk := getStack()
defer stk.free()
// apply exp5 contributions
// (start with exp5 so the numbers to multiply are smaller)
if exp5 != 0 {
n := exp5
if n < 0 {
n = -n
if n < 0 {
// This can occur if -n overflows. -(-1 << 63) would become
// -1 << 63, which is still negative.
return nil, false
}
}
if n > 1e6 {
return nil, false // avoid excessively large exponents
}
pow5 := z.b.abs.expNN(stk, natFive, nat(nil).setWord(Word(n)), nil, false) // use underlying array of z.b.abs
if exp5 > 0 {
z.a.abs = z.a.abs.mul(stk, z.a.abs, pow5)
z.b.abs = z.b.abs.setWord(1)
} else {
z.b.abs = pow5
}
} else {
z.b.abs = z.b.abs.setWord(1)
}
// apply exp2 contributions
if exp2 < -1e7 || exp2 > 1e7 {
return nil, false // avoid excessively large exponents
}
if exp2 > 0 {
z.a.abs = z.a.abs.lsh(z.a.abs, uint(exp2))
} else if exp2 < 0 {
z.b.abs = z.b.abs.lsh(z.b.abs, uint(-exp2))
}
z.a.neg = neg && len(z.a.abs) > 0 // 0 has no sign
return z.norm(), true
}
// scanExponent scans the longest possible prefix of r representing a base 10
// (“e”, “E”) or a base 2 (“p”, “P”) exponent, if any. It returns the
// exponent, the exponent base (10 or 2), or a read or syntax error, if any.
//
// If sepOk is set, an underscore character “_” may appear between successive
// exponent digits; such underscores do not change the value of the exponent.
// Incorrect placement of underscores is reported as an error if there are no
// other errors. If sepOk is not set, underscores are not recognized and thus
// terminate scanning like any other character that is not a valid digit.
//
// exponent = ( "e" | "E" | "p" | "P" ) [ sign ] digits .
// sign = "+" | "-" .
// digits = digit { [ '_' ] digit } .
// digit = "0" ... "9" .
//
// A base 2 exponent is only permitted if base2ok is set.
func scanExponent(r io.ByteScanner, base2ok, sepOk bool) (exp int64, base int, err error) {
// one char look-ahead
ch, err := r.ReadByte()
if err != nil {
if err == io.EOF {
err = nil
}
return 0, 10, err
}
// exponent char
switch ch {
case 'e', 'E':
base = 10
case 'p', 'P':
if base2ok {
base = 2
break // ok
}
fallthrough // binary exponent not permitted
default:
r.UnreadByte() // ch does not belong to exponent anymore
return 0, 10, nil
}
// sign
var digits []byte
ch, err = r.ReadByte()
if err == nil && (ch == '+' || ch == '-') {
if ch == '-' {
digits = append(digits, '-')
}
ch, err = r.ReadByte()
}
// prev encodes the previously seen char: it is one
// of '_', '0' (a digit), or '.' (anything else). A
// valid separator '_' may only occur after a digit.
prev := '.'
invalSep := false
// exponent value
hasDigits := false
for err == nil {
if '0' <= ch && ch <= '9' {
digits = append(digits, ch)
prev = '0'
hasDigits = true
} else if ch == '_' && sepOk {
if prev != '0' {
invalSep = true
}
prev = '_'
} else {
r.UnreadByte() // ch does not belong to number anymore
break
}
ch, err = r.ReadByte()
}
if err == io.EOF {
err = nil
}
if err == nil && !hasDigits {
err = errNoDigits
}
if err == nil {
exp, err = strconv.ParseInt(string(digits), 10, 64)
}
// other errors take precedence over invalid separators
if err == nil && (invalSep || prev == '_') {
err = errInvalSep
}
return
}
// String returns a string representation of x in the form "a/b" (even if b == 1).
func (x *Rat) String() string {
return string(x.marshal(nil))
}
// marshal implements [Rat.String] returning a slice of bytes.
// It appends the string representation of x in the form "a/b" (even if b == 1) to buf,
// and returns the extended buffer.
func (x *Rat) marshal(buf []byte) []byte {
buf = x.a.Append(buf, 10)
buf = append(buf, '/')
if len(x.b.abs) != 0 {
buf = x.b.Append(buf, 10)
} else {
buf = append(buf, '1')
}
return buf
}
// RatString returns a string representation of x in the form "a/b" if b != 1,
// and in the form "a" if b == 1.
func (x *Rat) RatString() string {
if x.IsInt() {
return x.a.String()
}
return x.String()
}
// FloatString returns a string representation of x in decimal form with prec
// digits of precision after the radix point. The last digit is rounded to
// nearest, with halves rounded away from zero.
func (x *Rat) FloatString(prec int) string {
var buf []byte
if x.IsInt() {
buf = x.a.Append(buf, 10)
if prec > 0 {
buf = append(buf, '.')
for i := prec; i > 0; i-- {
buf = append(buf, '0')
}
}
return string(buf)
}
// x.b.abs != 0
stk := getStack()
defer stk.free()
q, r := nat(nil).div(stk, nat(nil), x.a.abs, x.b.abs)
p := natOne
if prec > 0 {
p = nat(nil).expNN(stk, natTen, nat(nil).setUint64(uint64(prec)), nil, false)
}
r = r.mul(stk, r, p)
r, r2 := r.div(stk, nat(nil), r, x.b.abs)
// see if we need to round up
r2 = r2.add(r2, r2)
if x.b.abs.cmp(r2) <= 0 {
r = r.add(r, natOne)
if r.cmp(p) >= 0 {
q = nat(nil).add(q, natOne)
r = nat(nil).sub(r, p)
}
}
if x.a.neg {
buf = append(buf, '-')
}
buf = append(buf, q.utoa(10)...) // itoa ignores sign if q == 0
if prec > 0 {
buf = append(buf, '.')
rs := r.utoa(10)
for i := prec - len(rs); i > 0; i-- {
buf = append(buf, '0')
}
buf = append(buf, rs...)
}
return string(buf)
}
// Note: FloatPrec (below) is in this file rather than rat.go because
// its results are relevant for decimal representation/printing.
// FloatPrec returns the number n of non-repeating digits immediately
// following the decimal point of the decimal representation of x.
// The boolean result indicates whether a decimal representation of x
// with that many fractional digits is exact or rounded.
//
// Examples:
//
// x n exact decimal representation n fractional digits
// 0 0 true 0
// 1 0 true 1
// 1/2 1 true 0.5
// 1/3 0 false 0 (0.333... rounded)
// 1/4 2 true 0.25
// 1/6 1 false 0.2 (0.166... rounded)
func (x *Rat) FloatPrec() (n int, exact bool) {
stk := getStack()
defer stk.free()
// Determine q and largest p2, p5 such that d = q·2^p2·5^p5.
// The results n, exact are:
//
// n = max(p2, p5)
// exact = q == 1
//
// For details see:
// https://en.wikipedia.org/wiki/Repeating_decimal#Reciprocals_of_integers_not_coprime_to_10
d := x.Denom().abs // d >= 1
// Determine p2 by counting factors of 2.
// p2 corresponds to the trailing zero bits in d.
// Do this first to reduce q as much as possible.
var q nat
p2 := d.trailingZeroBits()
q = q.rsh(d, p2)
// Determine p5 by counting factors of 5.
// Build a table starting with an initial power of 5,
// and use repeated squaring until the factor doesn't
// divide q anymore. Then use the table to determine
// the power of 5 in q.
const fp = 13 // f == 5^fp
var tab []nat // tab[i] == (5^fp)^(2^i) == 5^(fp·2^i)
f := nat{1220703125} // == 5^fp (must fit into a uint32 Word)
var t, r nat // temporaries
for {
if _, r = t.div(stk, r, q, f); len(r) != 0 {
break // f doesn't divide q evenly
}
tab = append(tab, f)
f = nat(nil).sqr(stk, f) // nat(nil) to ensure a new f for each table entry
}
// Factor q using the table entries, if any.
// We start with the largest factor f = tab[len(tab)-1]
// that evenly divides q. It does so at most once because
// otherwise f·f would also divide q. That can't be true
// because f·f is the next higher table entry, contradicting
// how f was chosen in the first place.
// The same reasoning applies to the subsequent factors.
var p5 uint
for i := len(tab) - 1; i >= 0; i-- {
if t, r = t.div(stk, r, q, tab[i]); len(r) == 0 {
p5 += fp * (1 << i) // tab[i] == 5^(fp·2^i)
q = q.set(t)
}
}
// If fp != 1, we may still have multiples of 5 left.
for {
if t, r = t.div(stk, r, q, natFive); len(r) != 0 {
break
}
p5++
q = q.set(t)
}
return int(max(p2, p5)), q.cmp(natOne) == 0
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements encoding/decoding of Rats.
package big
import (
"errors"
"fmt"
"internal/byteorder"
"math"
)
// Gob codec version. Permits backward-compatible changes to the encoding.
const ratGobVersion byte = 1
// GobEncode implements the [encoding/gob.GobEncoder] interface.
func (x *Rat) GobEncode() ([]byte, error) {
if x == nil {
return nil, nil
}
buf := make([]byte, 1+4+(len(x.a.abs)+len(x.b.abs))*_S) // extra bytes for version and sign bit (1), and numerator length (4)
i := x.b.abs.bytes(buf)
j := x.a.abs.bytes(buf[:i])
n := i - j
if int(uint32(n)) != n {
// this should never happen
return nil, errors.New("Rat.GobEncode: numerator too large")
}
byteorder.BEPutUint32(buf[j-4:j], uint32(n))
j -= 1 + 4
b := ratGobVersion << 1 // make space for sign bit
if x.a.neg {
b |= 1
}
buf[j] = b
return buf[j:], nil
}
// GobDecode implements the [encoding/gob.GobDecoder] interface.
func (z *Rat) GobDecode(buf []byte) error {
if len(buf) == 0 {
// Other side sent a nil or default value.
*z = Rat{}
return nil
}
if len(buf) < 5 {
return errors.New("Rat.GobDecode: buffer too small")
}
b := buf[0]
if b>>1 != ratGobVersion {
return fmt.Errorf("Rat.GobDecode: encoding version %d not supported", b>>1)
}
const j = 1 + 4
ln := byteorder.BEUint32(buf[j-4 : j])
if uint64(ln) > math.MaxInt-j {
return errors.New("Rat.GobDecode: invalid length")
}
i := j + int(ln)
if len(buf) < i {
return errors.New("Rat.GobDecode: buffer too small")
}
z.a.neg = b&1 != 0
z.a.abs = z.a.abs.setBytes(buf[j:i])
z.b.abs = z.b.abs.setBytes(buf[i:])
return nil
}
// AppendText implements the [encoding.TextAppender] interface.
func (x *Rat) AppendText(b []byte) ([]byte, error) {
if x.IsInt() {
return x.a.AppendText(b)
}
return x.marshal(b), nil
}
// MarshalText implements the [encoding.TextMarshaler] interface.
func (x *Rat) MarshalText() (text []byte, err error) {
return x.AppendText(nil)
}
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
func (z *Rat) UnmarshalText(text []byte) error {
// TODO(gri): get rid of the []byte/string conversion
if _, ok := z.SetString(string(text)); !ok {
return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Rat", text)
}
return nil
}
// Code generated by "stringer -type=RoundingMode"; DO NOT EDIT.
package big
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ToNearestEven-0]
_ = x[ToNearestAway-1]
_ = x[ToZero-2]
_ = x[AwayFromZero-3]
_ = x[ToNegativeInf-4]
_ = x[ToPositiveInf-5]
}
const _RoundingMode_name = "ToNearestEvenToNearestAwayToZeroAwayFromZeroToNegativeInfToPositiveInf"
var _RoundingMode_index = [...]uint8{0, 13, 26, 32, 44, 57, 70}
func (i RoundingMode) String() string {
if i >= RoundingMode(len(_RoundingMode_index)-1) {
return "RoundingMode(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _RoundingMode_name[_RoundingMode_index[i]:_RoundingMode_index[i+1]]
}
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package big
import (
"math"
"sync"
)
var threeOnce struct {
sync.Once
v *Float
}
func three() *Float {
threeOnce.Do(func() {
threeOnce.v = NewFloat(3.0)
})
return threeOnce.v
}
// Sqrt sets z to the rounded square root of x, and returns it.
//
// If z's precision is 0, it is changed to x's precision before the
// operation. Rounding is performed according to z's precision and
// rounding mode, but z's accuracy is not computed. Specifically, the
// result of z.Acc() is undefined.
//
// The function panics if z < 0. The value of z is undefined in that
// case.
func (z *Float) Sqrt(x *Float) *Float {
if debugFloat {
x.validate()
}
if z.prec == 0 {
z.prec = x.prec
}
if x.Sign() == -1 {
// following IEEE754-2008 (section 7.2)
panic(ErrNaN{"square root of negative operand"})
}
// handle ±0 and +∞
if x.form != finite {
z.acc = Exact
z.form = x.form
z.neg = x.neg // IEEE754-2008 requires √±0 = ±0
return z
}
// MantExp sets the argument's precision to the receiver's, and
// when z.prec > x.prec this will lower z.prec. Restore it after
// the MantExp call.
prec := z.prec
b := x.MantExp(z)
z.prec = prec
// Compute √(z·2**b) as
// √( z)·2**(½b) if b is even
// √(2z)·2**(⌊½b⌋) if b > 0 is odd
// √(½z)·2**(⌈½b⌉) if b < 0 is odd
switch b % 2 {
case 0:
// nothing to do
case 1:
z.exp++
case -1:
z.exp--
}
// 0.25 <= z < 2.0
// Solving 1/x² - z = 0 avoids Quo calls and is faster, especially
// for high precisions.
z.sqrtInverse(z)
// re-attach halved exponent
return z.SetMantExp(z, b/2)
}
// Compute √x (to z.prec precision) by solving
//
// 1/t² - x = 0
//
// for t (using Newton's method), and then inverting.
func (z *Float) sqrtInverse(x *Float) {
// let
// f(t) = 1/t² - x
// then
// g(t) = f(t)/f'(t) = -½t(1 - xt²)
// and the next guess is given by
// t2 = t - g(t) = ½t(3 - xt²)
u := newFloat(z.prec)
v := newFloat(z.prec)
three := three()
ng := func(t *Float) *Float {
u.prec = t.prec
v.prec = t.prec
u.Mul(t, t) // u = t²
u.Mul(x, u) // = xt²
v.Sub(three, u) // v = 3 - xt²
u.Mul(t, v) // u = t(3 - xt²)
u.exp-- // = ½t(3 - xt²)
return t.Set(u)
}
xf, _ := x.Float64()
sqi := newFloat(z.prec)
sqi.SetFloat64(1 / math.Sqrt(xf))
for prec := z.prec + 32; sqi.prec < prec; {
sqi.prec *= 2
sqi = ng(sqi)
}
// sqi = 1/√x
// x/√x = √x
z.Mul(x, sqi)
}
// newFloat returns a new *Float with space for twice the given
// precision.
func newFloat(prec2 uint32) *Float {
z := new(Float)
// nat.make ensures the slice length is > 0
z.mant = z.mant.make(int(prec2/_W) * 2)
return z
}
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run make_tables.go
// Package bits implements bit counting and manipulation
// functions for the predeclared unsigned integer types.
//
// Functions in this package may be implemented directly by
// the compiler, for better performance. For those functions
// the code in this package will not be used. Which
// functions are implemented by the compiler depends on the
// architecture and the Go release.
package bits
const uintSize = 32 << (^uint(0) >> 63) // 32 or 64
// UintSize is the size of a uint in bits.
const UintSize = uintSize
// --- LeadingZeros ---
// LeadingZeros returns the number of leading zero bits in x; the result is [UintSize] for x == 0.
func LeadingZeros(x uint) int { return UintSize - Len(x) }
// LeadingZeros8 returns the number of leading zero bits in x; the result is 8 for x == 0.
func LeadingZeros8(x uint8) int { return 8 - Len8(x) }
// LeadingZeros16 returns the number of leading zero bits in x; the result is 16 for x == 0.
func LeadingZeros16(x uint16) int { return 16 - Len16(x) }
// LeadingZeros32 returns the number of leading zero bits in x; the result is 32 for x == 0.
func LeadingZeros32(x uint32) int { return 32 - Len32(x) }
// LeadingZeros64 returns the number of leading zero bits in x; the result is 64 for x == 0.
func LeadingZeros64(x uint64) int { return 64 - Len64(x) }
// --- TrailingZeros ---
// See http://keithandkatie.com/keith/papers/debruijn.html
const deBruijn32 = 0x077CB531
var deBruijn32tab = [32]byte{
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9,
}
const deBruijn64 = 0x03f79d71b4ca8b09
var deBruijn64tab = [64]byte{
0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
}
// TrailingZeros returns the number of trailing zero bits in x; the result is [UintSize] for x == 0.
func TrailingZeros(x uint) int {
if UintSize == 32 {
return TrailingZeros32(uint32(x))
}
return TrailingZeros64(uint64(x))
}
// TrailingZeros8 returns the number of trailing zero bits in x; the result is 8 for x == 0.
func TrailingZeros8(x uint8) int {
return int(ntz8tab[x])
}
// TrailingZeros16 returns the number of trailing zero bits in x; the result is 16 for x == 0.
func TrailingZeros16(x uint16) int {
if x == 0 {
return 16
}
// see comment in TrailingZeros64
return int(deBruijn32tab[uint32(x&-x)*deBruijn32>>(32-5)])
}
// TrailingZeros32 returns the number of trailing zero bits in x; the result is 32 for x == 0.
func TrailingZeros32(x uint32) int {
if x == 0 {
return 32
}
// see comment in TrailingZeros64
return int(deBruijn32tab[(x&-x)*deBruijn32>>(32-5)])
}
// TrailingZeros64 returns the number of trailing zero bits in x; the result is 64 for x == 0.
func TrailingZeros64(x uint64) int {
if x == 0 {
return 64
}
// If popcount is fast, replace code below with return popcount(^x & (x - 1)).
//
// x & -x leaves only the right-most bit set in the word. Let k be the
// index of that bit. Since only a single bit is set, the value is two
// to the power of k. Multiplying by a power of two is equivalent to
// left shifting, in this case by k bits. The de Bruijn (64 bit) constant
// is such that all six bit, consecutive substrings are distinct.
// Therefore, if we have a left shifted version of this constant we can
// find by how many bits it was shifted by looking at which six bit
// substring ended up at the top of the word.
// (Knuth, volume 4, section 7.3.1)
return int(deBruijn64tab[(x&-x)*deBruijn64>>(64-6)])
}
// --- OnesCount ---
const m0 = 0x5555555555555555 // 01010101 ...
const m1 = 0x3333333333333333 // 00110011 ...
const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ...
const m3 = 0x00ff00ff00ff00ff // etc.
const m4 = 0x0000ffff0000ffff
// OnesCount returns the number of one bits ("population count") in x.
func OnesCount(x uint) int {
if UintSize == 32 {
return OnesCount32(uint32(x))
}
return OnesCount64(uint64(x))
}
// OnesCount8 returns the number of one bits ("population count") in x.
func OnesCount8(x uint8) int {
return int(pop8tab[x])
}
// OnesCount16 returns the number of one bits ("population count") in x.
func OnesCount16(x uint16) int {
return int(pop8tab[x>>8] + pop8tab[x&0xff])
}
// OnesCount32 returns the number of one bits ("population count") in x.
func OnesCount32(x uint32) int {
return int(pop8tab[x>>24] + pop8tab[x>>16&0xff] + pop8tab[x>>8&0xff] + pop8tab[x&0xff])
}
// OnesCount64 returns the number of one bits ("population count") in x.
func OnesCount64(x uint64) int {
// Implementation: Parallel summing of adjacent bits.
// See "Hacker's Delight", Chap. 5: Counting Bits.
// The following pattern shows the general approach:
//
// x = x>>1&(m0&m) + x&(m0&m)
// x = x>>2&(m1&m) + x&(m1&m)
// x = x>>4&(m2&m) + x&(m2&m)
// x = x>>8&(m3&m) + x&(m3&m)
// x = x>>16&(m4&m) + x&(m4&m)
// x = x>>32&(m5&m) + x&(m5&m)
// return int(x)
//
// Masking (& operations) can be left away when there's no
// danger that a field's sum will carry over into the next
// field: Since the result cannot be > 64, 8 bits is enough
// and we can ignore the masks for the shifts by 8 and up.
// Per "Hacker's Delight", the first line can be simplified
// more, but it saves at best one instruction, so we leave
// it alone for clarity.
const m = 1<<64 - 1
x = x>>1&(m0&m) + x&(m0&m)
x = x>>2&(m1&m) + x&(m1&m)
x = (x>>4 + x) & (m2 & m)
x += x >> 8
x += x >> 16
x += x >> 32
return int(x) & (1<<7 - 1)
}
// --- RotateLeft ---
// RotateLeft returns the value of x rotated left by (k mod [UintSize]) bits.
// To rotate x right by k bits, call RotateLeft(x, -k).
//
// This function's execution time does not depend on the inputs.
func RotateLeft(x uint, k int) uint {
if UintSize == 32 {
return uint(RotateLeft32(uint32(x), k))
}
return uint(RotateLeft64(uint64(x), k))
}
// RotateLeft8 returns the value of x rotated left by (k mod 8) bits.
// To rotate x right by k bits, call RotateLeft8(x, -k).
//
// This function's execution time does not depend on the inputs.
func RotateLeft8(x uint8, k int) uint8 {
const n = 8
s := uint(k) & (n - 1)
return x<<s | x>>(n-s)
}
// RotateLeft16 returns the value of x rotated left by (k mod 16) bits.
// To rotate x right by k bits, call RotateLeft16(x, -k).
//
// This function's execution time does not depend on the inputs.
func RotateLeft16(x uint16, k int) uint16 {
const n = 16
s := uint(k) & (n - 1)
return x<<s | x>>(n-s)
}
// RotateLeft32 returns the value of x rotated left by (k mod 32) bits.
// To rotate x right by k bits, call RotateLeft32(x, -k).
//
// This function's execution time does not depend on the inputs.
func RotateLeft32(x uint32, k int) uint32 {
const n = 32
s := uint(k) & (n - 1)
return x<<s | x>>(n-s)
}
// RotateLeft64 returns the value of x rotated left by (k mod 64) bits.
// To rotate x right by k bits, call RotateLeft64(x, -k).
//
// This function's execution time does not depend on the inputs.
func RotateLeft64(x uint64, k int) uint64 {
const n = 64
s := uint(k) & (n - 1)
return x<<s | x>>(n-s)
}
// --- Reverse ---
// Reverse returns the value of x with its bits in reversed order.
func Reverse(x uint) uint {
if UintSize == 32 {
return uint(Reverse32(uint32(x)))
}
return uint(Reverse64(uint64(x)))
}
// Reverse8 returns the value of x with its bits in reversed order.
func Reverse8(x uint8) uint8 {
return rev8tab[x]
}
// Reverse16 returns the value of x with its bits in reversed order.
func Reverse16(x uint16) uint16 {
return uint16(rev8tab[x>>8]) | uint16(rev8tab[x&0xff])<<8
}
// Reverse32 returns the value of x with its bits in reversed order.
func Reverse32(x uint32) uint32 {
const m = 1<<32 - 1
x = x>>1&(m0&m) | x&(m0&m)<<1
x = x>>2&(m1&m) | x&(m1&m)<<2
x = x>>4&(m2&m) | x&(m2&m)<<4
return ReverseBytes32(x)
}
// Reverse64 returns the value of x with its bits in reversed order.
func Reverse64(x uint64) uint64 {
const m = 1<<64 - 1
x = x>>1&(m0&m) | x&(m0&m)<<1
x = x>>2&(m1&m) | x&(m1&m)<<2
x = x>>4&(m2&m) | x&(m2&m)<<4
return ReverseBytes64(x)
}
// --- ReverseBytes ---
// ReverseBytes returns the value of x with its bytes in reversed order.
//
// This function's execution time does not depend on the inputs.
func ReverseBytes(x uint) uint {
if UintSize == 32 {
return uint(ReverseBytes32(uint32(x)))
}
return uint(ReverseBytes64(uint64(x)))
}
// ReverseBytes16 returns the value of x with its bytes in reversed order.
//
// This function's execution time does not depend on the inputs.
func ReverseBytes16(x uint16) uint16 {
return x>>8 | x<<8
}
// ReverseBytes32 returns the value of x with its bytes in reversed order.
//
// This function's execution time does not depend on the inputs.
func ReverseBytes32(x uint32) uint32 {
const m = 1<<32 - 1
x = x>>8&(m3&m) | x&(m3&m)<<8
return x>>16 | x<<16
}
// ReverseBytes64 returns the value of x with its bytes in reversed order.
//
// This function's execution time does not depend on the inputs.
func ReverseBytes64(x uint64) uint64 {
const m = 1<<64 - 1
x = x>>8&(m3&m) | x&(m3&m)<<8
x = x>>16&(m4&m) | x&(m4&m)<<16
return x>>32 | x<<32
}
// --- Len ---
// Len returns the minimum number of bits required to represent x; the result is 0 for x == 0.
func Len(x uint) int {
if UintSize == 32 {
return Len32(uint32(x))
}
return Len64(uint64(x))
}
// Len8 returns the minimum number of bits required to represent x; the result is 0 for x == 0.
func Len8(x uint8) int {
return int(len8tab[x])
}
// Len16 returns the minimum number of bits required to represent x; the result is 0 for x == 0.
func Len16(x uint16) (n int) {
if x >= 1<<8 {
x >>= 8
n = 8
}
return n + int(len8tab[uint8(x)])
}
// Len32 returns the minimum number of bits required to represent x; the result is 0 for x == 0.
func Len32(x uint32) (n int) {
if x >= 1<<16 {
x >>= 16
n = 16
}
if x >= 1<<8 {
x >>= 8
n += 8
}
return n + int(len8tab[uint8(x)])
}
// Len64 returns the minimum number of bits required to represent x; the result is 0 for x == 0.
func Len64(x uint64) (n int) {
if x >= 1<<32 {
x >>= 32
n = 32
}
if x >= 1<<16 {
x >>= 16
n += 16
}
if x >= 1<<8 {
x >>= 8
n += 8
}
return n + int(len8tab[uint8(x)])
}
// --- Add with carry ---
// Add returns the sum with carry of x, y and carry: sum = x + y + carry.
// The carry input must be 0 or 1; otherwise the behavior is undefined.
// The carryOut output is guaranteed to be 0 or 1.
//
// This function's execution time does not depend on the inputs.
func Add(x, y, carry uint) (sum, carryOut uint) {
if UintSize == 32 {
s32, c32 := Add32(uint32(x), uint32(y), uint32(carry))
return uint(s32), uint(c32)
}
s64, c64 := Add64(uint64(x), uint64(y), uint64(carry))
return uint(s64), uint(c64)
}
// Add32 returns the sum with carry of x, y and carry: sum = x + y + carry.
// The carry input must be 0 or 1; otherwise the behavior is undefined.
// The carryOut output is guaranteed to be 0 or 1.
//
// This function's execution time does not depend on the inputs.
func Add32(x, y, carry uint32) (sum, carryOut uint32) {
sum64 := uint64(x) + uint64(y) + uint64(carry)
sum = uint32(sum64)
carryOut = uint32(sum64 >> 32)
return
}
// Add64 returns the sum with carry of x, y and carry: sum = x + y + carry.
// The carry input must be 0 or 1; otherwise the behavior is undefined.
// The carryOut output is guaranteed to be 0 or 1.
//
// This function's execution time does not depend on the inputs.
func Add64(x, y, carry uint64) (sum, carryOut uint64) {
sum = x + y + carry
// The sum will overflow if both top bits are set (x & y) or if one of them
// is (x | y), and a carry from the lower place happened. If such a carry
// happens, the top bit will be 1 + 0 + 1 = 0 (&^ sum).
carryOut = ((x & y) | ((x | y) &^ sum)) >> 63
return
}
// --- Subtract with borrow ---
// Sub returns the difference of x, y and borrow: diff = x - y - borrow.
// The borrow input must be 0 or 1; otherwise the behavior is undefined.
// The borrowOut output is guaranteed to be 0 or 1.
//
// This function's execution time does not depend on the inputs.
func Sub(x, y, borrow uint) (diff, borrowOut uint) {
if UintSize == 32 {
d32, b32 := Sub32(uint32(x), uint32(y), uint32(borrow))
return uint(d32), uint(b32)
}
d64, b64 := Sub64(uint64(x), uint64(y), uint64(borrow))
return uint(d64), uint(b64)
}
// Sub32 returns the difference of x, y and borrow, diff = x - y - borrow.
// The borrow input must be 0 or 1; otherwise the behavior is undefined.
// The borrowOut output is guaranteed to be 0 or 1.
//
// This function's execution time does not depend on the inputs.
func Sub32(x, y, borrow uint32) (diff, borrowOut uint32) {
diff = x - y - borrow
// The difference will underflow if the top bit of x is not set and the top
// bit of y is set (^x & y) or if they are the same (^(x ^ y)) and a borrow
// from the lower place happens. If that borrow happens, the result will be
// 1 - 1 - 1 = 0 - 0 - 1 = 1 (& diff).
borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 31
return
}
// Sub64 returns the difference of x, y and borrow: diff = x - y - borrow.
// The borrow input must be 0 or 1; otherwise the behavior is undefined.
// The borrowOut output is guaranteed to be 0 or 1.
//
// This function's execution time does not depend on the inputs.
func Sub64(x, y, borrow uint64) (diff, borrowOut uint64) {
diff = x - y - borrow
// See Sub32 for the bit logic.
borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63
return
}
// --- Full-width multiply ---
// Mul returns the full-width product of x and y: (hi, lo) = x * y
// with the product bits' upper half returned in hi and the lower
// half returned in lo.
//
// This function's execution time does not depend on the inputs.
func Mul(x, y uint) (hi, lo uint) {
if UintSize == 32 {
h, l := Mul32(uint32(x), uint32(y))
return uint(h), uint(l)
}
h, l := Mul64(uint64(x), uint64(y))
return uint(h), uint(l)
}
// Mul32 returns the 64-bit product of x and y: (hi, lo) = x * y
// with the product bits' upper half returned in hi and the lower
// half returned in lo.
//
// This function's execution time does not depend on the inputs.
func Mul32(x, y uint32) (hi, lo uint32) {
tmp := uint64(x) * uint64(y)
hi, lo = uint32(tmp>>32), uint32(tmp)
return
}
// Mul64 returns the 128-bit product of x and y: (hi, lo) = x * y
// with the product bits' upper half returned in hi and the lower
// half returned in lo.
//
// This function's execution time does not depend on the inputs.
func Mul64(x, y uint64) (hi, lo uint64) {
const mask32 = 1<<32 - 1
x0 := x & mask32
x1 := x >> 32
y0 := y & mask32
y1 := y >> 32
w0 := x0 * y0
t := x1*y0 + w0>>32
w1 := t & mask32
w2 := t >> 32
w1 += x0 * y1
hi = x1*y1 + w2 + w1>>32
lo = x * y
return
}
// --- Full-width divide ---
// Div returns the quotient and remainder of (hi, lo) divided by y:
// quo = (hi, lo)/y, rem = (hi, lo)%y with the dividend bits' upper
// half in parameter hi and the lower half in parameter lo.
// Div panics for y == 0 (division by zero) or y <= hi (quotient overflow).
func Div(hi, lo, y uint) (quo, rem uint) {
if UintSize == 32 {
q, r := Div32(uint32(hi), uint32(lo), uint32(y))
return uint(q), uint(r)
}
q, r := Div64(uint64(hi), uint64(lo), uint64(y))
return uint(q), uint(r)
}
// Div32 returns the quotient and remainder of (hi, lo) divided by y:
// quo = (hi, lo)/y, rem = (hi, lo)%y with the dividend bits' upper
// half in parameter hi and the lower half in parameter lo.
// Div32 panics for y == 0 (division by zero) or y <= hi (quotient overflow).
func Div32(hi, lo, y uint32) (quo, rem uint32) {
if y != 0 && y <= hi {
panic(overflowError)
}
z := uint64(hi)<<32 | uint64(lo)
quo, rem = uint32(z/uint64(y)), uint32(z%uint64(y))
return
}
// Div64 returns the quotient and remainder of (hi, lo) divided by y:
// quo = (hi, lo)/y, rem = (hi, lo)%y with the dividend bits' upper
// half in parameter hi and the lower half in parameter lo.
// Div64 panics for y == 0 (division by zero) or y <= hi (quotient overflow).
func Div64(hi, lo, y uint64) (quo, rem uint64) {
if y == 0 {
panic(divideError)
}
if y <= hi {
panic(overflowError)
}
// If high part is zero, we can directly return the results.
if hi == 0 {
return lo / y, lo % y
}
s := uint(LeadingZeros64(y))
y <<= s
const (
two32 = 1 << 32
mask32 = two32 - 1
)
yn1 := y >> 32
yn0 := y & mask32
un32 := hi<<s | lo>>(64-s)
un10 := lo << s
un1 := un10 >> 32
un0 := un10 & mask32
q1 := un32 / yn1
rhat := un32 - q1*yn1
for q1 >= two32 || q1*yn0 > two32*rhat+un1 {
q1--
rhat += yn1
if rhat >= two32 {
break
}
}
un21 := un32*two32 + un1 - q1*y
q0 := un21 / yn1
rhat = un21 - q0*yn1
for q0 >= two32 || q0*yn0 > two32*rhat+un0 {
q0--
rhat += yn1
if rhat >= two32 {
break
}
}
return q1*two32 + q0, (un21*two32 + un0 - q0*y) >> s
}
// Rem returns the remainder of (hi, lo) divided by y. Rem panics for
// y == 0 (division by zero) but, unlike Div, it doesn't panic on a
// quotient overflow.
func Rem(hi, lo, y uint) uint {
if UintSize == 32 {
return uint(Rem32(uint32(hi), uint32(lo), uint32(y)))
}
return uint(Rem64(uint64(hi), uint64(lo), uint64(y)))
}
// Rem32 returns the remainder of (hi, lo) divided by y. Rem32 panics
// for y == 0 (division by zero) but, unlike [Div32], it doesn't panic
// on a quotient overflow.
func Rem32(hi, lo, y uint32) uint32 {
return uint32((uint64(hi)<<32 | uint64(lo)) % uint64(y))
}
// Rem64 returns the remainder of (hi, lo) divided by y. Rem64 panics
// for y == 0 (division by zero) but, unlike [Div64], it doesn't panic
// on a quotient overflow.
func Rem64(hi, lo, y uint64) uint64 {
// We scale down hi so that hi < y, then use Div64 to compute the
// rem with the guarantee that it won't panic on quotient overflow.
// Given that
// hi ≡ hi%y (mod y)
// we have
// hi<<64 + lo ≡ (hi%y)<<64 + lo (mod y)
_, rem := Div64(hi%y, lo, y)
return rem
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mime
import (
"bytes"
"encoding/base64"
"errors"
"fmt"
"io"
"strings"
"unicode"
"unicode/utf8"
)
// A WordEncoder is an RFC 2047 encoded-word encoder.
type WordEncoder byte
const (
// BEncoding represents Base64 encoding scheme as defined by RFC 2045.
BEncoding = WordEncoder('b')
// QEncoding represents the Q-encoding scheme as defined by RFC 2047.
QEncoding = WordEncoder('q')
)
var (
errInvalidWord = errors.New("mime: invalid RFC 2047 encoded-word")
)
// Encode returns the encoded-word form of s. If s is ASCII without special
// characters, it is returned unchanged. The provided charset is the IANA
// charset name of s. It is case insensitive.
func (e WordEncoder) Encode(charset, s string) string {
if !needsEncoding(s) {
return s
}
return e.encodeWord(charset, s)
}
func needsEncoding(s string) bool {
for _, b := range s {
if (b < ' ' || b > '~') && b != '\t' {
return true
}
}
return false
}
// encodeWord encodes a string into an encoded-word.
func (e WordEncoder) encodeWord(charset, s string) string {
var buf strings.Builder
// Could use a hint like len(s)*3, but that's not enough for cases
// with word splits and too much for simpler inputs.
// 48 is close to maxEncodedWordLen/2, but adjusted to allocator size class.
buf.Grow(48)
e.openWord(&buf, charset)
if e == BEncoding {
e.bEncode(&buf, charset, s)
} else {
e.qEncode(&buf, charset, s)
}
closeWord(&buf)
return buf.String()
}
const (
// The maximum length of an encoded-word is 75 characters.
// See RFC 2047, section 2.
maxEncodedWordLen = 75
// maxContentLen is how much content can be encoded, ignoring the header and
// 2-byte footer.
maxContentLen = maxEncodedWordLen - len("=?UTF-8?q?") - len("?=")
)
var maxBase64Len = base64.StdEncoding.DecodedLen(maxContentLen)
// bEncode encodes s using base64 encoding and writes it to buf.
func (e WordEncoder) bEncode(buf *strings.Builder, charset, s string) {
w := base64.NewEncoder(base64.StdEncoding, buf)
// If the charset is not UTF-8 or if the content is short, do not bother
// splitting the encoded-word.
if !isUTF8(charset) || base64.StdEncoding.EncodedLen(len(s)) <= maxContentLen {
io.WriteString(w, s)
w.Close()
return
}
var currentLen, last, runeLen int
for i := 0; i < len(s); i += runeLen {
// Multi-byte characters must not be split across encoded-words.
// See RFC 2047, section 5.3.
_, runeLen = utf8.DecodeRuneInString(s[i:])
if currentLen+runeLen <= maxBase64Len {
currentLen += runeLen
} else {
io.WriteString(w, s[last:i])
w.Close()
e.splitWord(buf, charset)
last = i
currentLen = runeLen
}
}
io.WriteString(w, s[last:])
w.Close()
}
// qEncode encodes s using Q encoding and writes it to buf. It splits the
// encoded-words when necessary.
func (e WordEncoder) qEncode(buf *strings.Builder, charset, s string) {
// We only split encoded-words when the charset is UTF-8.
if !isUTF8(charset) {
writeQString(buf, s)
return
}
var currentLen, runeLen int
for i := 0; i < len(s); i += runeLen {
b := s[i]
// Multi-byte characters must not be split across encoded-words.
// See RFC 2047, section 5.3.
var encLen int
if b >= ' ' && b <= '~' && b != '=' && b != '?' && b != '_' {
runeLen, encLen = 1, 1
} else {
_, runeLen = utf8.DecodeRuneInString(s[i:])
encLen = 3 * runeLen
}
if currentLen+encLen > maxContentLen {
e.splitWord(buf, charset)
currentLen = 0
}
writeQString(buf, s[i:i+runeLen])
currentLen += encLen
}
}
// writeQString encodes s using Q encoding and writes it to buf.
func writeQString(buf *strings.Builder, s string) {
for i := 0; i < len(s); i++ {
switch b := s[i]; {
case b == ' ':
buf.WriteByte('_')
case b >= '!' && b <= '~' && b != '=' && b != '?' && b != '_':
buf.WriteByte(b)
default:
buf.WriteByte('=')
buf.WriteByte(upperhex[b>>4])
buf.WriteByte(upperhex[b&0x0f])
}
}
}
// openWord writes the beginning of an encoded-word into buf.
func (e WordEncoder) openWord(buf *strings.Builder, charset string) {
buf.WriteString("=?")
buf.WriteString(charset)
buf.WriteByte('?')
buf.WriteByte(byte(e))
buf.WriteByte('?')
}
// closeWord writes the end of an encoded-word into buf.
func closeWord(buf *strings.Builder) {
buf.WriteString("?=")
}
// splitWord closes the current encoded-word and opens a new one.
func (e WordEncoder) splitWord(buf *strings.Builder, charset string) {
closeWord(buf)
buf.WriteByte(' ')
e.openWord(buf, charset)
}
func isUTF8(charset string) bool {
return strings.EqualFold(charset, "UTF-8")
}
const upperhex = "0123456789ABCDEF"
// A WordDecoder decodes MIME headers containing RFC 2047 encoded-words.
type WordDecoder struct {
// CharsetReader, if non-nil, defines a function to generate
// charset-conversion readers, converting from the provided
// charset into UTF-8.
// Charsets are always lower-case. utf-8, iso-8859-1 and us-ascii charsets
// are handled by default.
// One of the CharsetReader's result values must be non-nil.
CharsetReader func(charset string, input io.Reader) (io.Reader, error)
}
// Decode decodes an RFC 2047 encoded-word.
func (d *WordDecoder) Decode(word string) (string, error) {
// See https://tools.ietf.org/html/rfc2047#section-2 for details.
// Our decoder is permissive, we accept empty encoded-text.
if len(word) < 8 || !strings.HasPrefix(word, "=?") || !strings.HasSuffix(word, "?=") || strings.Count(word, "?") != 4 {
return "", errInvalidWord
}
word = word[2 : len(word)-2]
// split word "UTF-8?q?text" into "UTF-8", 'q', and "text"
charset, text, _ := strings.Cut(word, "?")
if charset == "" {
return "", errInvalidWord
}
encoding, text, _ := strings.Cut(text, "?")
if len(encoding) != 1 {
return "", errInvalidWord
}
content, err := decode(encoding[0], text)
if err != nil {
return "", err
}
var buf strings.Builder
if err := d.convert(&buf, charset, content); err != nil {
return "", err
}
return buf.String(), nil
}
// DecodeHeader decodes all encoded-words of the given string. It returns an
// error if and only if [WordDecoder.CharsetReader] of d returns an error.
func (d *WordDecoder) DecodeHeader(header string) (string, error) {
// If there is no encoded-word, returns before creating a buffer.
i := strings.Index(header, "=?")
if i == -1 {
return header, nil
}
var buf strings.Builder
buf.WriteString(header[:i])
header = header[i:]
betweenWords := false
for {
start := strings.Index(header, "=?")
if start == -1 {
break
}
cur := start + len("=?")
i := strings.Index(header[cur:], "?")
if i == -1 {
break
}
charset := header[cur : cur+i]
cur += i + len("?")
if len(header) < cur+len("Q??=") {
break
}
encoding := header[cur]
cur++
if header[cur] != '?' {
break
}
cur++
j := strings.Index(header[cur:], "?=")
if j == -1 {
break
}
text := header[cur : cur+j]
end := cur + j + len("?=")
content, err := decode(encoding, text)
if err != nil {
betweenWords = false
buf.WriteString(header[:start+2])
header = header[start+2:]
continue
}
// Write characters before the encoded-word. White-space and newline
// characters separating two encoded-words must be deleted.
if start > 0 && (!betweenWords || hasNonWhitespace(header[:start])) {
buf.WriteString(header[:start])
}
if err := d.convert(&buf, charset, content); err != nil {
return "", err
}
header = header[end:]
betweenWords = true
}
if len(header) > 0 {
buf.WriteString(header)
}
return buf.String(), nil
}
func decode(encoding byte, text string) ([]byte, error) {
switch encoding {
case 'B', 'b':
return base64.StdEncoding.DecodeString(text)
case 'Q', 'q':
return qDecode(text)
default:
return nil, errInvalidWord
}
}
func (d *WordDecoder) convert(buf *strings.Builder, charset string, content []byte) error {
switch {
case strings.EqualFold("utf-8", charset):
buf.Write(content)
case strings.EqualFold("iso-8859-1", charset):
for _, c := range content {
buf.WriteRune(rune(c))
}
case strings.EqualFold("us-ascii", charset):
for _, c := range content {
if c >= utf8.RuneSelf {
buf.WriteRune(unicode.ReplacementChar)
} else {
buf.WriteByte(c)
}
}
default:
if d.CharsetReader == nil {
return fmt.Errorf("mime: unhandled charset %q", charset)
}
r, err := d.CharsetReader(strings.ToLower(charset), bytes.NewReader(content))
if err != nil {
return err
}
if _, err = io.Copy(buf, r); err != nil {
return err
}
}
return nil
}
// hasNonWhitespace reports whether s (assumed to be ASCII) contains at least
// one byte of non-whitespace.
func hasNonWhitespace(s string) bool {
for _, b := range s {
switch b {
// Encoded-words can only be separated by linear white spaces which does
// not include vertical tabs (\v).
case ' ', '\t', '\n', '\r':
default:
return true
}
}
return false
}
// qDecode decodes a Q encoded string.
func qDecode(s string) ([]byte, error) {
dec := make([]byte, len(s))
n := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case c == '_':
dec[n] = ' '
case c == '=':
if i+2 >= len(s) {
return nil, errInvalidWord
}
b, err := readHexByte(s[i+1], s[i+2])
if err != nil {
return nil, err
}
dec[n] = b
i += 2
case (c <= '~' && c >= ' ') || c == '\n' || c == '\r' || c == '\t':
dec[n] = c
default:
return nil, errInvalidWord
}
n++
}
return dec[:n], nil
}
// readHexByte returns the byte from its quoted-printable representation.
func readHexByte(a, b byte) (byte, error) {
var hb, lb byte
var err error
if hb, err = fromHex(a); err != nil {
return 0, err
}
if lb, err = fromHex(b); err != nil {
return 0, err
}
return hb<<4 | lb, nil
}
func fromHex(b byte) (byte, error) {
switch {
case b >= '0' && b <= '9':
return b - '0', nil
case b >= 'A' && b <= 'F':
return b - 'A' + 10, nil
// Accept badly encoded bytes.
case b >= 'a' && b <= 'f':
return b - 'a' + 10, nil
}
return 0, fmt.Errorf("mime: invalid hex byte %#02x", b)
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mime
// isTSpecial reports whether c is in 'tspecials' as defined by RFC
// 1521 and RFC 2045.
func isTSpecial(c byte) bool {
// tspecials := "(" / ")" / "<" / ">" / "@" /
// "," / ";" / ":" / "\" / <">
// "/" / "[" / "]" / "?" / "="
//
// mask is a 128-bit bitmap with 1s for allowed bytes,
// so that the byte c can be tested with a shift and an and.
// If c >= 128, then 1<<c and 1<<(c-64) will both be zero,
// and this function will return false.
const mask = 0 |
1<<'(' |
1<<')' |
1<<'<' |
1<<'>' |
1<<'@' |
1<<',' |
1<<';' |
1<<':' |
1<<'\\' |
1<<'"' |
1<<'/' |
1<<'[' |
1<<']' |
1<<'?' |
1<<'='
return ((uint64(1)<<c)&(mask&(1<<64-1)) |
(uint64(1)<<(c-64))&(mask>>64)) != 0
}
// isTokenChar reports whether c is in 'token' as defined by RFC
// 1521 and RFC 2045.
func isTokenChar(c byte) bool {
// token := 1*<any (US-ASCII) CHAR except SPACE, CTLs,
// or tspecials>
//
// mask is a 128-bit bitmap with 1s for allowed bytes,
// so that the byte c can be tested with a shift and an and.
// If c >= 128, then 1<<c and 1<<(c-64) will both be zero,
// and this function will return false.
const mask = 0 |
(1<<(10)-1)<<'0' |
(1<<(26)-1)<<'a' |
(1<<(26)-1)<<'A' |
1<<'!' |
1<<'#' |
1<<'$' |
1<<'%' |
1<<'&' |
1<<'\'' |
1<<'*' |
1<<'+' |
1<<'-' |
1<<'.' |
1<<'^' |
1<<'_' |
1<<'`' |
1<<'{' |
1<<'|' |
1<<'}' |
1<<'~'
return ((uint64(1)<<c)&(mask&(1<<64-1)) |
(uint64(1)<<(c-64))&(mask>>64)) != 0
}
// isToken reports whether s is a 'token' as defined by RFC 1521
// and RFC 2045.
func isToken(s string) bool {
if s == "" {
return false
}
for _, c := range []byte(s) {
if !isTokenChar(c) {
return false
}
}
return true
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mime
import (
"errors"
"fmt"
"maps"
"slices"
"strings"
"unicode"
)
// FormatMediaType serializes mediatype t and the parameters
// param as a media type conforming to RFC 2045 and RFC 2616.
// The type and parameter names are written in lower-case.
// When any of the arguments result in a standard violation then
// FormatMediaType returns the empty string.
func FormatMediaType(t string, param map[string]string) string {
var b strings.Builder
if major, sub, ok := strings.Cut(t, "/"); !ok {
if !isToken(t) {
return ""
}
b.WriteString(strings.ToLower(t))
} else {
if !isToken(major) || !isToken(sub) {
return ""
}
b.WriteString(strings.ToLower(major))
b.WriteByte('/')
b.WriteString(strings.ToLower(sub))
}
for _, attribute := range slices.Sorted(maps.Keys(param)) {
value := param[attribute]
b.WriteByte(';')
b.WriteByte(' ')
if !isToken(attribute) {
return ""
}
b.WriteString(strings.ToLower(attribute))
needEnc := needsEncoding(value)
if needEnc {
// RFC 2231 section 4
b.WriteByte('*')
}
b.WriteByte('=')
if needEnc {
b.WriteString("utf-8''")
offset := 0
for index := 0; index < len(value); index++ {
ch := value[index]
// {RFC 2231 section 7}
// attribute-char := <any (US-ASCII) CHAR except SPACE, CTLs, "*", "'", "%", or tspecials>
if ch <= ' ' || ch >= 0x7F ||
ch == '*' || ch == '\'' || ch == '%' ||
isTSpecial(ch) {
b.WriteString(value[offset:index])
offset = index + 1
b.WriteByte('%')
b.WriteByte(upperhex[ch>>4])
b.WriteByte(upperhex[ch&0x0F])
}
}
b.WriteString(value[offset:])
continue
}
if isToken(value) {
b.WriteString(value)
continue
}
b.WriteByte('"')
offset := 0
for index := 0; index < len(value); index++ {
character := value[index]
if character == '"' || character == '\\' {
b.WriteString(value[offset:index])
offset = index
b.WriteByte('\\')
}
}
b.WriteString(value[offset:])
b.WriteByte('"')
}
return b.String()
}
func checkMediaTypeDisposition(s string) error {
typ, rest := consumeToken(s)
if typ == "" {
return errNoMediaType
}
if rest == "" {
return nil
}
var ok bool
if rest, ok = strings.CutPrefix(rest, "/"); !ok {
return errNoSlashAfterFirstToken
}
subtype, rest := consumeToken(rest)
if subtype == "" {
return errNoTokenAfterSlash
}
if rest != "" {
return errUnexpectedContentAfterMediaSubtype
}
return nil
}
var (
errNoMediaType = errors.New("mime: no media type")
errNoSlashAfterFirstToken = errors.New("mime: expected slash after first token")
errNoTokenAfterSlash = errors.New("mime: expected token after slash")
errUnexpectedContentAfterMediaSubtype = errors.New("mime: unexpected content after media subtype")
)
// ErrInvalidMediaParameter is returned by [ParseMediaType] if
// the media type value was found but there was an error parsing
// the optional parameters
var ErrInvalidMediaParameter = errors.New("mime: invalid media parameter")
// ParseMediaType parses a media type value and any optional
// parameters, per RFC 1521. Media types are the values in
// Content-Type and Content-Disposition headers (RFC 2183).
// On success, ParseMediaType returns the media type converted
// to lowercase and trimmed of white space and a non-nil map.
// If there is an error parsing the optional parameter,
// the media type will be returned along with the error
// [ErrInvalidMediaParameter].
// The returned map, params, maps from the lowercase
// attribute to the attribute value with its case preserved.
func ParseMediaType(v string) (mediatype string, params map[string]string, err error) {
base, _, _ := strings.Cut(v, ";")
mediatype = strings.TrimSpace(strings.ToLower(base))
err = checkMediaTypeDisposition(mediatype)
if err != nil {
return "", nil, err
}
params = make(map[string]string)
// Map of base parameter name -> parameter name -> value
// for parameters containing a '*' character.
// Lazily initialized.
var continuation map[string]map[string]string
v = v[len(base):]
for len(v) > 0 {
v = strings.TrimLeftFunc(v, unicode.IsSpace)
if len(v) == 0 {
break
}
key, value, rest := consumeMediaParam(v)
if key == "" {
if strings.TrimSpace(rest) == ";" {
// Ignore trailing semicolons.
// Not an error.
break
}
// Parse error.
return mediatype, nil, ErrInvalidMediaParameter
}
pmap := params
if baseName, _, ok := strings.Cut(key, "*"); ok {
if continuation == nil {
continuation = make(map[string]map[string]string)
}
if pmap, ok = continuation[baseName]; !ok {
continuation[baseName] = make(map[string]string)
pmap = continuation[baseName]
}
}
if v, exists := pmap[key]; exists && v != value {
// Duplicate parameter names are incorrect, but we allow them if they are equal.
return "", nil, errDuplicateParamName
}
pmap[key] = value
v = rest
}
// Stitch together any continuations or things with stars
// (i.e. RFC 2231 things with stars: "foo*0" or "foo*")
var buf strings.Builder
for key, pieceMap := range continuation {
singlePartKey := key + "*"
if v, ok := pieceMap[singlePartKey]; ok {
if decv, ok := decode2231Enc(v); ok {
params[key] = decv
}
continue
}
buf.Reset()
valid := false
for n := 0; ; n++ {
simplePart := fmt.Sprintf("%s*%d", key, n)
if v, ok := pieceMap[simplePart]; ok {
valid = true
buf.WriteString(v)
continue
}
encodedPart := simplePart + "*"
v, ok := pieceMap[encodedPart]
if !ok {
break
}
valid = true
if n == 0 {
if decv, ok := decode2231Enc(v); ok {
buf.WriteString(decv)
}
} else {
decv, _ := percentHexUnescape(v)
buf.WriteString(decv)
}
}
if valid {
params[key] = buf.String()
}
}
return
}
var errDuplicateParamName = errors.New("mime: duplicate parameter name")
func decode2231Enc(v string) (string, bool) {
charset, v, ok := strings.Cut(v, "'")
if !ok {
return "", false
}
// TODO: ignoring the language part for now. If anybody needs it, we'll
// need to decide how to expose it in the API. But I'm not sure
// anybody uses it in practice.
_, extOtherVals, ok := strings.Cut(v, "'")
if !ok {
return "", false
}
charset = strings.ToLower(charset)
switch charset {
case "us-ascii", "utf-8":
default:
// Empty or unsupported encoding.
return "", false
}
return percentHexUnescape(extOtherVals)
}
// consumeToken consumes a token from the beginning of provided
// string, per RFC 2045 section 5.1 (referenced from 2183), and return
// the token consumed and the rest of the string. Returns ("", v) on
// failure to consume at least one character.
func consumeToken(v string) (token, rest string) {
for i := range len(v) {
if !isTokenChar(v[i]) {
return v[:i], v[i:]
}
}
return v, ""
}
// consumeValue consumes a "value" per RFC 2045, where a value is
// either a 'token' or a 'quoted-string'. On success, consumeValue
// returns the value consumed (and de-quoted/escaped, if a
// quoted-string) and the rest of the string. On failure, returns
// ("", v).
func consumeValue(v string) (value, rest string) {
if v == "" {
return
}
if v[0] != '"' {
return consumeToken(v)
}
// parse a quoted-string
buffer := new(strings.Builder)
for i := 1; i < len(v); i++ {
r := v[i]
if r == '"' {
return buffer.String(), v[i+1:]
}
// When MSIE sends a full file path (in "intranet mode"), it does not
// escape backslashes: "C:\dev\go\foo.txt", not "C:\\dev\\go\\foo.txt".
//
// No known MIME generators emit unnecessary backslash escapes
// for simple token characters like numbers and letters.
//
// If we see an unnecessary backslash escape, assume it is from MSIE
// and intended as a literal backslash. This makes Go servers deal better
// with MSIE without affecting the way they handle conforming MIME
// generators.
if r == '\\' && i+1 < len(v) && isTSpecial(v[i+1]) {
buffer.WriteByte(v[i+1])
i++
continue
}
if r == '\r' || r == '\n' {
return "", v
}
buffer.WriteByte(v[i])
}
// Did not find end quote.
return "", v
}
func consumeMediaParam(v string) (param, value, rest string) {
rest = strings.TrimLeftFunc(v, unicode.IsSpace)
var ok bool
if rest, ok = strings.CutPrefix(rest, ";"); !ok {
return "", "", v
}
rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
param, rest = consumeToken(rest)
param = strings.ToLower(param)
if param == "" {
return "", "", v
}
rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
if rest, ok = strings.CutPrefix(rest, "="); !ok {
return "", "", v
}
rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
value, rest2 := consumeValue(rest)
if value == "" && rest2 == rest {
return "", "", v
}
rest = rest2
return param, value, rest
}
func percentHexUnescape(s string) (string, bool) {
// Count %, check that they're well-formed.
percents := 0
for i := 0; i < len(s); {
if s[i] != '%' {
i++
continue
}
percents++
if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
return "", false
}
i += 3
}
if percents == 0 {
return s, true
}
t := make([]byte, len(s)-2*percents)
j := 0
for i := 0; i < len(s); {
switch s[i] {
case '%':
t[j] = unhex(s[i+1])<<4 | unhex(s[i+2])
j++
i += 3
default:
t[j] = s[i]
j++
i++
}
}
return string(t), true
}
func ishex(c byte) bool {
switch {
case '0' <= c && c <= '9':
return true
case 'a' <= c && c <= 'f':
return true
case 'A' <= c && c <= 'F':
return true
}
return false
}
func unhex(c byte) byte {
switch {
case '0' <= c && c <= '9':
return c - '0'
case 'a' <= c && c <= 'f':
return c - 'a' + 10
case 'A' <= c && c <= 'F':
return c - 'A' + 10
}
return 0
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package multipart
import (
"bytes"
"errors"
"internal/godebug"
"io"
"math"
"net/textproto"
"os"
"strconv"
)
// ErrMessageTooLarge is returned by ReadForm if the message form
// data is too large to be processed.
var ErrMessageTooLarge = errors.New("multipart: message too large")
// TODO(adg,bradfitz): find a way to unify the DoS-prevention strategy here
// with that of the http package's ParseForm.
// ReadForm parses an entire multipart message whose parts have
// a Content-Disposition of "form-data".
// It stores up to maxMemory bytes + 10MB (reserved for non-file parts)
// in memory. File parts which can't be stored in memory will be stored on
// disk in temporary files.
// It returns [ErrMessageTooLarge] if all non-file parts can't be stored in
// memory.
func (r *Reader) ReadForm(maxMemory int64) (*Form, error) {
return r.readForm(maxMemory)
}
var (
multipartfiles = godebug.New("#multipartfiles") // TODO: document and remove #
multipartmaxparts = godebug.New("multipartmaxparts")
)
func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
form := &Form{make(map[string][]string), make(map[string][]*FileHeader)}
var (
file *os.File
fileOff int64
)
numDiskFiles := 0
combineFiles := true
if multipartfiles.Value() == "distinct" {
combineFiles = false
// multipartfiles.IncNonDefault() // TODO: uncomment after documenting
}
maxParts := 1000
if s := multipartmaxparts.Value(); s != "" {
if v, err := strconv.Atoi(s); err == nil && v >= 0 {
maxParts = v
multipartmaxparts.IncNonDefault()
}
}
maxHeaders := maxMIMEHeaders()
defer func() {
if file != nil {
if cerr := file.Close(); err == nil {
err = cerr
}
}
if combineFiles && numDiskFiles > 1 {
for _, fhs := range form.File {
for _, fh := range fhs {
fh.tmpshared = true
}
}
}
if err != nil {
form.RemoveAll()
if file != nil {
os.Remove(file.Name())
}
}
}()
// maxFileMemoryBytes is the maximum bytes of file data we will store in memory.
// Data past this limit is written to disk.
// This limit strictly applies to content, not metadata (filenames, MIME headers, etc.),
// since metadata is always stored in memory, not disk.
//
// maxMemoryBytes is the maximum bytes we will store in memory, including file content,
// non-file part values, metadata, and map entry overhead.
//
// We reserve an additional 10 MB in maxMemoryBytes for non-file data.
//
// The relationship between these parameters, as well as the overly-large and
// unconfigurable 10 MB added on to maxMemory, is unfortunate but difficult to change
// within the constraints of the API as documented.
maxFileMemoryBytes := maxMemory
if maxFileMemoryBytes == math.MaxInt64 {
maxFileMemoryBytes--
}
maxMemoryBytes := maxMemory + int64(10<<20)
if maxMemoryBytes <= 0 {
if maxMemory < 0 {
maxMemoryBytes = 0
} else {
maxMemoryBytes = math.MaxInt64
}
}
var copyBuf []byte
for {
p, err := r.nextPart(false, maxMemoryBytes, maxHeaders)
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if maxParts <= 0 {
return nil, ErrMessageTooLarge
}
maxParts--
name := p.FormName()
if name == "" {
continue
}
filename := p.FileName()
// Multiple values for the same key (one map entry, longer slice) are cheaper
// than the same number of values for different keys (many map entries), but
// using a consistent per-value cost for overhead is simpler.
const mapEntryOverhead = 200
maxMemoryBytes -= int64(len(name))
maxMemoryBytes -= mapEntryOverhead
if maxMemoryBytes < 0 {
// We can't actually take this path, since nextPart would already have
// rejected the MIME headers for being too large. Check anyway.
return nil, ErrMessageTooLarge
}
var b bytes.Buffer
if filename == "" {
// value, store as string in memory
n, err := io.CopyN(&b, p, maxMemoryBytes+1)
if err != nil && err != io.EOF {
return nil, err
}
maxMemoryBytes -= n
if maxMemoryBytes < 0 {
return nil, ErrMessageTooLarge
}
form.Value[name] = append(form.Value[name], b.String())
continue
}
// file, store in memory or on disk
const fileHeaderSize = 100
maxMemoryBytes -= mimeHeaderSize(p.Header)
maxMemoryBytes -= mapEntryOverhead
maxMemoryBytes -= fileHeaderSize
if maxMemoryBytes < 0 {
return nil, ErrMessageTooLarge
}
for _, v := range p.Header {
maxHeaders -= int64(len(v))
}
fh := &FileHeader{
Filename: filename,
Header: p.Header,
}
n, err := io.CopyN(&b, p, maxFileMemoryBytes+1)
if err != nil && err != io.EOF {
return nil, err
}
if n > maxFileMemoryBytes {
if file == nil {
file, err = os.CreateTemp(r.tempDir, "multipart-")
if err != nil {
return nil, err
}
}
numDiskFiles++
if _, err := file.Write(b.Bytes()); err != nil {
return nil, err
}
if copyBuf == nil {
copyBuf = make([]byte, 32*1024) // same buffer size as io.Copy uses
}
// os.File.ReadFrom will allocate its own copy buffer if we let io.Copy use it.
type writerOnly struct{ io.Writer }
remainingSize, err := io.CopyBuffer(writerOnly{file}, p, copyBuf)
if err != nil {
return nil, err
}
fh.tmpfile = file.Name()
fh.Size = int64(b.Len()) + remainingSize
fh.tmpoff = fileOff
fileOff += fh.Size
if !combineFiles {
if err := file.Close(); err != nil {
return nil, err
}
file = nil
}
} else {
fh.content = b.Bytes()
fh.Size = int64(len(fh.content))
maxFileMemoryBytes -= n
maxMemoryBytes -= n
}
form.File[name] = append(form.File[name], fh)
}
return form, nil
}
func mimeHeaderSize(h textproto.MIMEHeader) (size int64) {
size = 400
for k, vs := range h {
size += int64(len(k))
size += 200 // map entry overhead
for _, v := range vs {
size += int64(len(v))
}
}
return size
}
// Form is a parsed multipart form.
// Its File parts are stored either in memory or on disk,
// and are accessible via the [*FileHeader]'s Open method.
// Its Value parts are stored as strings.
// Both are keyed by field name.
type Form struct {
Value map[string][]string
File map[string][]*FileHeader
}
// RemoveAll removes any temporary files associated with a [Form].
func (f *Form) RemoveAll() error {
var err error
for _, fhs := range f.File {
for _, fh := range fhs {
if fh.tmpfile != "" {
e := os.Remove(fh.tmpfile)
if e != nil && !errors.Is(e, os.ErrNotExist) && err == nil {
err = e
}
}
}
}
return err
}
// A FileHeader describes a file part of a multipart request.
type FileHeader struct {
Filename string
Header textproto.MIMEHeader
Size int64
content []byte
tmpfile string
tmpoff int64
tmpshared bool
}
// Open opens and returns the [FileHeader]'s associated File.
func (fh *FileHeader) Open() (File, error) {
if b := fh.content; b != nil {
r := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
return sectionReadCloser{r, nil}, nil
}
if fh.tmpshared {
f, err := os.Open(fh.tmpfile)
if err != nil {
return nil, err
}
r := io.NewSectionReader(f, fh.tmpoff, fh.Size)
return sectionReadCloser{r, f}, nil
}
return os.Open(fh.tmpfile)
}
// File is an interface to access the file part of a multipart message.
// Its contents may be either stored in memory or on disk.
// If stored on disk, the File's underlying concrete type will be an *os.File.
type File interface {
io.Reader
io.ReaderAt
io.Seeker
io.Closer
}
// helper types to turn a []byte into a File
type sectionReadCloser struct {
*io.SectionReader
io.Closer
}
func (rc sectionReadCloser) Close() error {
if rc.Closer != nil {
return rc.Closer.Close()
}
return nil
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
/*
Package multipart implements MIME multipart parsing, as defined in RFC
2046.
The implementation is sufficient for HTTP (RFC 2388) and the multipart
bodies generated by popular browsers.
# Limits
To protect against malicious inputs, this package sets limits on the size
of the MIME data it processes.
[Reader.NextPart] and [Reader.NextRawPart] limit the number of headers in a
part to 10000 and [Reader.ReadForm] limits the total number of headers in all
FileHeaders to 10000.
These limits may be adjusted with the GODEBUG=multipartmaxheaders=<values>
setting.
Reader.ReadForm further limits the number of parts in a form to 1000.
This limit may be adjusted with the GODEBUG=multipartmaxparts=<value>
setting.
*/
package multipart
import (
"bufio"
"bytes"
"fmt"
"internal/godebug"
"io"
"mime"
"mime/quotedprintable"
"net/textproto"
"path/filepath"
"strconv"
"strings"
)
var emptyParams = make(map[string]string)
// This constant needs to be at least 76 for this package to work correctly.
// This is because \r\n--separator_of_len_70- would fill the buffer and it
// wouldn't be safe to consume a single byte from it.
const peekBufferSize = 4096
// A Part represents a single part in a multipart body.
type Part struct {
// The headers of the body, if any, with the keys canonicalized
// in the same fashion that the Go http.Request headers are.
// For example, "foo-bar" changes case to "Foo-Bar"
Header textproto.MIMEHeader
mr *Reader
disposition string
dispositionParams map[string]string
// r is either a reader directly reading from mr, or it's a
// wrapper around such a reader, decoding the
// Content-Transfer-Encoding
r io.Reader
n int // known data bytes waiting in mr.bufReader
total int64 // total data bytes read already
err error // error to return when n == 0
readErr error // read error observed from mr.bufReader
}
// FormName returns the name parameter if p has a Content-Disposition
// of type "form-data". Otherwise it returns the empty string.
func (p *Part) FormName() string {
// See https://tools.ietf.org/html/rfc2183 section 2 for EBNF
// of Content-Disposition value format.
if p.dispositionParams == nil {
p.parseContentDisposition()
}
if p.disposition != "form-data" {
return ""
}
return p.dispositionParams["name"]
}
// FileName returns the filename parameter of the [Part]'s Content-Disposition
// header. If not empty, the filename is passed through filepath.Base (which is
// platform dependent) before being returned.
func (p *Part) FileName() string {
if p.dispositionParams == nil {
p.parseContentDisposition()
}
filename := p.dispositionParams["filename"]
if filename == "" {
return ""
}
// RFC 7578, Section 4.2 requires that if a filename is provided, the
// directory path information must not be used.
return filepath.Base(filename)
}
func (p *Part) parseContentDisposition() {
v := p.Header.Get("Content-Disposition")
var err error
p.disposition, p.dispositionParams, err = mime.ParseMediaType(v)
if err != nil {
p.dispositionParams = emptyParams
}
}
// NewReader creates a new multipart [Reader] reading from r using the
// given MIME boundary.
//
// The boundary is usually obtained from the "boundary" parameter of
// the message's "Content-Type" header. Use [mime.ParseMediaType] to
// parse such headers.
func NewReader(r io.Reader, boundary string) *Reader {
b := []byte("\r\n--" + boundary + "--")
return &Reader{
bufReader: bufio.NewReaderSize(&stickyErrorReader{r: r}, peekBufferSize),
nl: b[:2],
nlDashBoundary: b[:len(b)-2],
dashBoundaryDash: b[2:],
dashBoundary: b[2 : len(b)-2],
}
}
// stickyErrorReader is an io.Reader which never calls Read on its
// underlying Reader once an error has been seen. (the io.Reader
// interface's contract promises nothing about the return values of
// Read calls after an error, yet this package does do multiple Reads
// after error)
type stickyErrorReader struct {
r io.Reader
err error
}
func (r *stickyErrorReader) Read(p []byte) (n int, _ error) {
if r.err != nil {
return 0, r.err
}
n, r.err = r.r.Read(p)
return n, r.err
}
func newPart(mr *Reader, rawPart bool, maxMIMEHeaderSize, maxMIMEHeaders int64) (*Part, error) {
bp := &Part{
Header: make(map[string][]string),
mr: mr,
}
if err := bp.populateHeaders(maxMIMEHeaderSize, maxMIMEHeaders); err != nil {
return nil, err
}
bp.r = partReader{bp}
// rawPart is used to switch between Part.NextPart and Part.NextRawPart.
if !rawPart {
const cte = "Content-Transfer-Encoding"
if strings.EqualFold(bp.Header.Get(cte), "quoted-printable") {
bp.Header.Del(cte)
bp.r = quotedprintable.NewReader(bp.r)
}
}
return bp, nil
}
func (p *Part) populateHeaders(maxMIMEHeaderSize, maxMIMEHeaders int64) error {
r := textproto.NewReader(p.mr.bufReader)
header, err := readMIMEHeader(r, maxMIMEHeaderSize, maxMIMEHeaders)
if err == nil {
p.Header = header
}
// TODO: Add a distinguishable error to net/textproto.
if err != nil && err.Error() == "message too large" {
err = ErrMessageTooLarge
}
return err
}
// Read reads the body of a part, after its headers and before the
// next part (if any) begins.
func (p *Part) Read(d []byte) (n int, err error) {
return p.r.Read(d)
}
// partReader implements io.Reader by reading raw bytes directly from the
// wrapped *Part, without doing any Transfer-Encoding decoding.
type partReader struct {
p *Part
}
func (pr partReader) Read(d []byte) (int, error) {
p := pr.p
br := p.mr.bufReader
// Read into buffer until we identify some data to return,
// or we find a reason to stop (boundary or read error).
for p.n == 0 && p.err == nil {
peek, _ := br.Peek(br.Buffered())
p.n, p.err = scanUntilBoundary(peek, p.mr.dashBoundary, p.mr.nlDashBoundary, p.total, p.readErr)
if p.n == 0 && p.err == nil {
// Force buffered I/O to read more into buffer.
_, p.readErr = br.Peek(len(peek) + 1)
if p.readErr == io.EOF {
p.readErr = io.ErrUnexpectedEOF
}
}
}
// Read out from "data to return" part of buffer.
if p.n == 0 {
return 0, p.err
}
n := len(d)
if n > p.n {
n = p.n
}
n, _ = br.Read(d[:n])
p.total += int64(n)
p.n -= n
if p.n == 0 {
return n, p.err
}
return n, nil
}
// scanUntilBoundary scans buf to identify how much of it can be safely
// returned as part of the Part body.
// dashBoundary is "--boundary".
// nlDashBoundary is "\r\n--boundary" or "\n--boundary", depending on what mode we are in.
// The comments below (and the name) assume "\n--boundary", but either is accepted.
// total is the number of bytes read out so far. If total == 0, then a leading "--boundary" is recognized.
// readErr is the read error, if any, that followed reading the bytes in buf.
// scanUntilBoundary returns the number of data bytes from buf that can be
// returned as part of the Part body and also the error to return (if any)
// once those data bytes are done.
func scanUntilBoundary(buf, dashBoundary, nlDashBoundary []byte, total int64, readErr error) (int, error) {
if total == 0 {
// At beginning of body, allow dashBoundary.
if bytes.HasPrefix(buf, dashBoundary) {
switch matchAfterPrefix(buf, dashBoundary, readErr) {
case -1:
return len(dashBoundary), nil
case 0:
return 0, nil
case +1:
return 0, io.EOF
}
}
if bytes.HasPrefix(dashBoundary, buf) {
return 0, readErr
}
}
// Search for "\n--boundary".
if i := bytes.Index(buf, nlDashBoundary); i >= 0 {
switch matchAfterPrefix(buf[i:], nlDashBoundary, readErr) {
case -1:
return i + len(nlDashBoundary), nil
case 0:
return i, nil
case +1:
return i, io.EOF
}
}
if bytes.HasPrefix(nlDashBoundary, buf) {
return 0, readErr
}
// Otherwise, anything up to the final \n is not part of the boundary
// and so must be part of the body.
// Also if the section from the final \n onward is not a prefix of the boundary,
// it too must be part of the body.
i := bytes.LastIndexByte(buf, nlDashBoundary[0])
if i >= 0 && bytes.HasPrefix(nlDashBoundary, buf[i:]) {
return i, nil
}
return len(buf), readErr
}
// matchAfterPrefix checks whether buf should be considered to match the boundary.
// The prefix is "--boundary" or "\r\n--boundary" or "\n--boundary",
// and the caller has verified already that bytes.HasPrefix(buf, prefix) is true.
//
// matchAfterPrefix returns +1 if the buffer does match the boundary,
// meaning the prefix is followed by a double dash, space, tab, cr, nl,
// or end of input.
// It returns -1 if the buffer definitely does NOT match the boundary,
// meaning the prefix is followed by some other character.
// For example, "--foobar" does not match "--foo".
// It returns 0 more input needs to be read to make the decision,
// meaning that len(buf) == len(prefix) and readErr == nil.
func matchAfterPrefix(buf, prefix []byte, readErr error) int {
if len(buf) == len(prefix) {
if readErr != nil {
return +1
}
return 0
}
c := buf[len(prefix)]
if c == ' ' || c == '\t' || c == '\r' || c == '\n' {
return +1
}
// Try to detect boundaryDash
if c == '-' {
if len(buf) == len(prefix)+1 {
if readErr != nil {
// Prefix + "-" does not match
return -1
}
return 0
}
if buf[len(prefix)+1] == '-' {
return +1
}
}
return -1
}
func (p *Part) Close() error {
io.Copy(io.Discard, p)
return nil
}
// Reader is an iterator over parts in a MIME multipart body.
// Reader's underlying parser consumes its input as needed. Seeking
// isn't supported.
type Reader struct {
bufReader *bufio.Reader
tempDir string // used in tests
currentPart *Part
partsRead int
nl []byte // "\r\n" or "\n" (set after seeing first boundary line)
nlDashBoundary []byte // nl + "--boundary"
dashBoundaryDash []byte // "--boundary--"
dashBoundary []byte // "--boundary"
}
// maxMIMEHeaderSize is the maximum size of a MIME header we will parse,
// including header keys, values, and map overhead.
const maxMIMEHeaderSize = 10 << 20
// multipartmaxheaders is the maximum number of header entries NextPart will return,
// as well as the maximum combined total of header entries Reader.ReadForm will return
// in FileHeaders.
var multipartmaxheaders = godebug.New("multipartmaxheaders")
func maxMIMEHeaders() int64 {
if s := multipartmaxheaders.Value(); s != "" {
if v, err := strconv.ParseInt(s, 10, 64); err == nil && v >= 0 {
multipartmaxheaders.IncNonDefault()
return v
}
}
return 10000
}
// NextPart returns the next part in the multipart or an error.
// When there are no more parts, the error [io.EOF] is returned.
//
// As a special case, if the "Content-Transfer-Encoding" header
// has a value of "quoted-printable", that header is instead
// hidden and the body is transparently decoded during Read calls.
func (r *Reader) NextPart() (*Part, error) {
return r.nextPart(false, maxMIMEHeaderSize, maxMIMEHeaders())
}
// NextRawPart returns the next part in the multipart or an error.
// When there are no more parts, the error [io.EOF] is returned.
//
// Unlike [Reader.NextPart], it does not have special handling for
// "Content-Transfer-Encoding: quoted-printable".
func (r *Reader) NextRawPart() (*Part, error) {
return r.nextPart(true, maxMIMEHeaderSize, maxMIMEHeaders())
}
func (r *Reader) nextPart(rawPart bool, maxMIMEHeaderSize, maxMIMEHeaders int64) (*Part, error) {
if r.currentPart != nil {
r.currentPart.Close()
}
if string(r.dashBoundary) == "--" {
return nil, fmt.Errorf("multipart: boundary is empty")
}
expectNewPart := false
for {
line, err := r.bufReader.ReadSlice('\n')
if err == io.EOF && r.isFinalBoundary(line) {
// If the buffer ends in "--boundary--" without the
// trailing "\r\n", ReadSlice will return an error
// (since it's missing the '\n'), but this is a valid
// multipart EOF so we need to return io.EOF instead of
// a fmt-wrapped one.
return nil, io.EOF
}
if err != nil {
return nil, fmt.Errorf("multipart: NextPart: %w", err)
}
if r.isBoundaryDelimiterLine(line) {
r.partsRead++
bp, err := newPart(r, rawPart, maxMIMEHeaderSize, maxMIMEHeaders)
if err != nil {
return nil, err
}
r.currentPart = bp
return bp, nil
}
if r.isFinalBoundary(line) {
// Expected EOF
return nil, io.EOF
}
if expectNewPart {
return nil, fmt.Errorf("multipart: expecting a new Part; got line %q", string(line))
}
if r.partsRead == 0 {
// skip line
continue
}
// Consume the "\n" or "\r\n" separator between the
// body of the previous part and the boundary line we
// now expect will follow. (either a new part or the
// end boundary)
if bytes.Equal(line, r.nl) {
expectNewPart = true
continue
}
return nil, fmt.Errorf("multipart: unexpected line in Next(): %q", line)
}
}
// isFinalBoundary reports whether line is the final boundary line
// indicating that all parts are over.
// It matches `^--boundary--[ \t]*(\r\n)?$`
func (r *Reader) isFinalBoundary(line []byte) bool {
if !bytes.HasPrefix(line, r.dashBoundaryDash) {
return false
}
rest := line[len(r.dashBoundaryDash):]
rest = skipLWSPChar(rest)
return len(rest) == 0 || bytes.Equal(rest, r.nl)
}
func (r *Reader) isBoundaryDelimiterLine(line []byte) (ret bool) {
// https://tools.ietf.org/html/rfc2046#section-5.1
// The boundary delimiter line is then defined as a line
// consisting entirely of two hyphen characters ("-",
// decimal value 45) followed by the boundary parameter
// value from the Content-Type header field, optional linear
// whitespace, and a terminating CRLF.
if !bytes.HasPrefix(line, r.dashBoundary) {
return false
}
rest := line[len(r.dashBoundary):]
rest = skipLWSPChar(rest)
// On the first part, see our lines are ending in \n instead of \r\n
// and switch into that mode if so. This is a violation of the spec,
// but occurs in practice.
if r.partsRead == 0 && len(rest) == 1 && rest[0] == '\n' {
r.nl = r.nl[1:]
r.nlDashBoundary = r.nlDashBoundary[1:]
}
return bytes.Equal(rest, r.nl)
}
// skipLWSPChar returns b with leading spaces and tabs removed.
// RFC 822 defines:
//
// LWSP-char = SPACE / HTAB
func skipLWSPChar(b []byte) []byte {
for len(b) > 0 && (b[0] == ' ' || b[0] == '\t') {
b = b[1:]
}
return b
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package multipart
import (
"bytes"
"crypto/rand"
"errors"
"fmt"
"io"
"maps"
"net/textproto"
"slices"
"strings"
)
// A Writer generates multipart messages.
type Writer struct {
w io.Writer
boundary string
lastpart *part
}
// NewWriter returns a new multipart [Writer] with a random boundary,
// writing to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
boundary: randomBoundary(),
}
}
// Boundary returns the [Writer]'s boundary.
func (w *Writer) Boundary() string {
return w.boundary
}
// SetBoundary overrides the [Writer]'s default randomly-generated
// boundary separator with an explicit value.
//
// SetBoundary must be called before any parts are created, may only
// contain certain ASCII characters, and must be non-empty and
// at most 70 bytes long.
func (w *Writer) SetBoundary(boundary string) error {
if w.lastpart != nil {
return errors.New("mime: SetBoundary called after write")
}
// rfc2046#section-5.1.1
if len(boundary) < 1 || len(boundary) > 70 {
return errors.New("mime: invalid boundary length")
}
end := len(boundary) - 1
for i, b := range boundary {
if 'A' <= b && b <= 'Z' || 'a' <= b && b <= 'z' || '0' <= b && b <= '9' {
continue
}
switch b {
case '\'', '(', ')', '+', '_', ',', '-', '.', '/', ':', '=', '?':
continue
case ' ':
if i != end {
continue
}
}
return errors.New("mime: invalid boundary character")
}
w.boundary = boundary
return nil
}
// FormDataContentType returns the Content-Type for an HTTP
// multipart/form-data with this [Writer]'s Boundary.
func (w *Writer) FormDataContentType() string {
b := w.boundary
// We must quote the boundary if it contains any of the
// tspecials characters defined by RFC 2045, or space.
if strings.ContainsAny(b, `()<>@,;:\"/[]?= `) {
b = `"` + b + `"`
}
return "multipart/form-data; boundary=" + b
}
func randomBoundary() string {
var buf [30]byte
_, err := io.ReadFull(rand.Reader, buf[:])
if err != nil {
panic(err)
}
return fmt.Sprintf("%x", buf[:])
}
// CreatePart creates a new multipart section with the provided
// header. The body of the part should be written to the returned
// [Writer]. After calling CreatePart, any previous part may no longer
// be written to.
func (w *Writer) CreatePart(header textproto.MIMEHeader) (io.Writer, error) {
if w.lastpart != nil {
if err := w.lastpart.close(); err != nil {
return nil, err
}
}
var b bytes.Buffer
if w.lastpart != nil {
fmt.Fprintf(&b, "\r\n--%s\r\n", w.boundary)
} else {
fmt.Fprintf(&b, "--%s\r\n", w.boundary)
}
for _, k := range slices.Sorted(maps.Keys(header)) {
for _, v := range header[k] {
fmt.Fprintf(&b, "%s: %s\r\n", k, v)
}
}
fmt.Fprintf(&b, "\r\n")
_, err := io.Copy(w.w, &b)
if err != nil {
return nil, err
}
p := &part{
mw: w,
}
w.lastpart = p
return p, nil
}
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"", "\r", "%0D", "\n", "%0A")
// escapeQuotes escapes special characters in field parameter values.
//
// For historical reasons, this uses \ escaping for " and \ characters,
// and percent encoding for CR and LF.
//
// The WhatWG specification for form data encoding suggests that we should
// use percent encoding for " (%22), and should not escape \.
// https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#multipart/form-data-encoding-algorithm
//
// Empirically, as of the time this comment was written, it is necessary
// to escape \ characters or else Chrome (and possibly other browsers) will
// interpet the unescaped \ as an escape.
func escapeQuotes(s string) string {
return quoteEscaper.Replace(s)
}
// CreateFormFile is a convenience wrapper around [Writer.CreatePart]. It creates
// a new form-data header with the provided field name and file name.
func (w *Writer) CreateFormFile(fieldname, filename string) (io.Writer, error) {
h := make(textproto.MIMEHeader)
h.Set("Content-Disposition", FileContentDisposition(fieldname, filename))
h.Set("Content-Type", "application/octet-stream")
return w.CreatePart(h)
}
// CreateFormField calls [Writer.CreatePart] with a header using the
// given field name.
func (w *Writer) CreateFormField(fieldname string) (io.Writer, error) {
h := make(textproto.MIMEHeader)
h.Set("Content-Disposition",
fmt.Sprintf(`form-data; name="%s"`, escapeQuotes(fieldname)))
return w.CreatePart(h)
}
// FileContentDisposition returns the value of a Content-Disposition header
// with the provided field name and file name.
func FileContentDisposition(fieldname, filename string) string {
return fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
escapeQuotes(fieldname), escapeQuotes(filename))
}
// WriteField calls [Writer.CreateFormField] and then writes the given value.
func (w *Writer) WriteField(fieldname, value string) error {
p, err := w.CreateFormField(fieldname)
if err != nil {
return err
}
_, err = p.Write([]byte(value))
return err
}
// Close finishes the multipart message and writes the trailing
// boundary end line to the output.
func (w *Writer) Close() error {
if w.lastpart != nil {
if err := w.lastpart.close(); err != nil {
return err
}
w.lastpart = nil
}
_, err := fmt.Fprintf(w.w, "\r\n--%s--\r\n", w.boundary)
return err
}
type part struct {
mw *Writer
closed bool
we error // last error that occurred writing
}
func (p *part) close() error {
p.closed = true
return p.we
}
func (p *part) Write(d []byte) (n int, err error) {
if p.closed {
return 0, errors.New("multipart: can't write to finished part")
}
n, err = p.mw.w.Write(d)
if err != nil {
p.we = err
}
return
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package quotedprintable implements quoted-printable encoding as specified by
// RFC 2045.
package quotedprintable
import (
"bufio"
"bytes"
"fmt"
"io"
)
// Reader is a quoted-printable decoder.
type Reader struct {
br *bufio.Reader
rerr error // last read error
line []byte // to be consumed before more of br
}
// NewReader returns a quoted-printable reader, decoding from r.
func NewReader(r io.Reader) *Reader {
return &Reader{
br: bufio.NewReader(r),
}
}
func fromHex(b byte) (byte, error) {
switch {
case b >= '0' && b <= '9':
return b - '0', nil
case b >= 'A' && b <= 'F':
return b - 'A' + 10, nil
// Accept badly encoded bytes.
case b >= 'a' && b <= 'f':
return b - 'a' + 10, nil
}
return 0, fmt.Errorf("quotedprintable: invalid hex byte 0x%02x", b)
}
func readHexByte(v []byte) (b byte, err error) {
if len(v) < 2 {
return 0, io.ErrUnexpectedEOF
}
var hb, lb byte
if hb, err = fromHex(v[0]); err != nil {
return 0, err
}
if lb, err = fromHex(v[1]); err != nil {
return 0, err
}
return hb<<4 | lb, nil
}
func isQPDiscardWhitespace(r rune) bool {
switch r {
case '\n', '\r', ' ', '\t':
return true
}
return false
}
var (
crlf = []byte("\r\n")
lf = []byte("\n")
softSuffix = []byte("=")
lwspChar = " \t"
)
// Read reads and decodes quoted-printable data from the underlying reader.
func (r *Reader) Read(p []byte) (n int, err error) {
// Deviations from RFC 2045:
// 1. in addition to "=\r\n", "=\n" is also treated as soft line break.
// 2. it will pass through a '\r' or '\n' not preceded by '=', consistent
// with other broken QP encoders & decoders.
// 3. it accepts soft line-break (=) at end of message (issue 15486); i.e.
// the final byte read from the underlying reader is allowed to be '=',
// and it will be silently ignored.
// 4. it takes = as literal = if not followed by two hex digits
// but not at end of line (issue 13219).
for len(p) > 0 {
if len(r.line) == 0 {
if r.rerr != nil {
return n, r.rerr
}
r.line, r.rerr = r.br.ReadSlice('\n')
// Does the line end in CRLF instead of just LF?
hasLF := bytes.HasSuffix(r.line, lf)
hasCR := bytes.HasSuffix(r.line, crlf)
wholeLine := r.line
r.line = bytes.TrimRightFunc(wholeLine, isQPDiscardWhitespace)
if bytes.HasSuffix(r.line, softSuffix) {
rightStripped := bytes.TrimLeft(wholeLine[len(r.line):], lwspChar)
r.line = r.line[:len(r.line)-1]
if !bytes.HasPrefix(rightStripped, lf) && !bytes.HasPrefix(rightStripped, crlf) &&
!(len(rightStripped) == 0 && len(r.line) > 0 && r.rerr == io.EOF) {
r.rerr = fmt.Errorf("quotedprintable: invalid bytes after =: %q", rightStripped)
}
} else if hasLF {
if hasCR {
r.line = append(r.line, '\r', '\n')
} else {
r.line = append(r.line, '\n')
}
}
continue
}
b := r.line[0]
switch {
case b == '=':
b, err = readHexByte(r.line[1:])
if err != nil {
if len(r.line) >= 2 && r.line[1] != '\r' && r.line[1] != '\n' {
// Take the = as a literal =.
b = '='
break
}
return n, err
}
r.line = r.line[2:] // 2 of the 3; other 1 is done below
case b == '\t' || b == '\r' || b == '\n':
break
case b >= 0x80:
// As an extension to RFC 2045, we accept
// values >= 0x80 without complaint. Issue 22597.
break
case b < ' ' || b > '~':
return n, fmt.Errorf("quotedprintable: invalid unescaped byte 0x%02x in body", b)
}
p[0] = b
p = p[1:]
r.line = r.line[1:]
n++
}
return n, nil
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package quotedprintable
import "io"
const lineMaxLen = 76
// A Writer is a quoted-printable writer that implements [io.WriteCloser].
type Writer struct {
// Binary mode treats the writer's input as pure binary and processes end of
// line bytes as binary data.
Binary bool
w io.Writer
i int
line [78]byte
cr bool
}
// NewWriter returns a new [Writer] that writes to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{w: w}
}
// Write encodes p using quoted-printable encoding and writes it to the
// underlying [io.Writer]. It limits line length to 76 characters. The encoded
// bytes are not necessarily flushed until the [Writer] is closed.
func (w *Writer) Write(p []byte) (n int, err error) {
for i, b := range p {
switch {
// Simple writes are done in batch.
case b >= '!' && b <= '~' && b != '=':
continue
case isWhitespace(b) || !w.Binary && (b == '\n' || b == '\r'):
continue
}
if i > n {
if err := w.write(p[n:i]); err != nil {
return n, err
}
n = i
}
if err := w.encode(b); err != nil {
return n, err
}
n++
}
if n == len(p) {
return n, nil
}
if err := w.write(p[n:]); err != nil {
return n, err
}
return len(p), nil
}
// Close closes the [Writer], flushing any unwritten data to the underlying
// [io.Writer], but does not close the underlying io.Writer.
func (w *Writer) Close() error {
if err := w.checkLastByte(); err != nil {
return err
}
return w.flush()
}
// write limits text encoded in quoted-printable to 76 characters per line.
func (w *Writer) write(p []byte) error {
for _, b := range p {
if b == '\n' || b == '\r' {
// If the previous byte was \r, the CRLF has already been inserted.
if w.cr && b == '\n' {
w.cr = false
continue
}
if b == '\r' {
w.cr = true
}
if err := w.checkLastByte(); err != nil {
return err
}
if err := w.insertCRLF(); err != nil {
return err
}
continue
}
if w.i == lineMaxLen-1 {
if err := w.insertSoftLineBreak(); err != nil {
return err
}
}
w.line[w.i] = b
w.i++
w.cr = false
}
return nil
}
func (w *Writer) encode(b byte) error {
if lineMaxLen-1-w.i < 3 {
if err := w.insertSoftLineBreak(); err != nil {
return err
}
}
w.line[w.i] = '='
w.line[w.i+1] = upperhex[b>>4]
w.line[w.i+2] = upperhex[b&0x0f]
w.i += 3
return nil
}
const upperhex = "0123456789ABCDEF"
// checkLastByte encodes the last buffered byte if it is a space or a tab.
func (w *Writer) checkLastByte() error {
if w.i == 0 {
return nil
}
b := w.line[w.i-1]
if isWhitespace(b) {
w.i--
if err := w.encode(b); err != nil {
return err
}
}
return nil
}
func (w *Writer) insertSoftLineBreak() error {
w.line[w.i] = '='
w.i++
return w.insertCRLF()
}
func (w *Writer) insertCRLF() error {
w.line[w.i] = '\r'
w.line[w.i+1] = '\n'
w.i += 2
return w.flush()
}
func (w *Writer) flush() error {
if _, err := w.w.Write(w.line[:w.i]); err != nil {
return err
}
w.i = 0
return nil
}
func isWhitespace(b byte) bool {
return b == ' ' || b == '\t'
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package mime implements parts of the MIME spec.
package mime
import (
"fmt"
"slices"
"strings"
"sync"
)
var (
mimeTypes sync.Map // map[string]string; ".Z" => "application/x-compress"
mimeTypesLower sync.Map // map[string]string; ".z" => "application/x-compress"
// extensions maps from MIME type to list of lowercase file
// extensions: "image/jpeg" => [".jfif", ".jpg", ".jpeg", ".pjp", ".pjpeg"]
extensionsMu sync.Mutex // Guards stores (but not loads) on extensions.
extensions sync.Map // map[string][]string; slice values are append-only.
)
// setMimeTypes is used by initMime's non-test path, and by tests.
func setMimeTypes(lowerExt, mixExt map[string]string) {
mimeTypes.Clear()
mimeTypesLower.Clear()
extensions.Clear()
for k, v := range lowerExt {
mimeTypesLower.Store(k, v)
}
for k, v := range mixExt {
mimeTypes.Store(k, v)
}
extensionsMu.Lock()
defer extensionsMu.Unlock()
for k, v := range lowerExt {
justType, _, err := ParseMediaType(v)
if err != nil {
panic(err)
}
var exts []string
if ei, ok := extensions.Load(justType); ok {
exts = ei.([]string)
}
extensions.Store(justType, append(exts, k))
}
}
// A type is listed here if both Firefox and Chrome included them in their own
// lists. In the case where they contradict they are deconflicted using IANA's
// listed media types https://www.iana.org/assignments/media-types/media-types.xhtml
//
// Chrome's MIME mappings to file extensions are defined at
// https://chromium.googlesource.com/chromium/src.git/+/refs/heads/main/net/base/mime_util.cc
//
// Firefox's MIME types can be found at
// https://github.com/mozilla-firefox/firefox/blob/main/netwerk/mime/nsMimeTypes.h
// and the mappings to file extensions at
// https://github.com/mozilla-firefox/firefox/blob/main/uriloader/exthandler/nsExternalHelperAppService.cpp
var builtinTypesLower = map[string]string{
".ai": "application/postscript",
".apk": "application/vnd.android.package-archive",
".apng": "image/apng",
".avif": "image/avif",
".bin": "application/octet-stream",
".bmp": "image/bmp",
".com": "application/octet-stream",
".css": "text/css; charset=utf-8",
".csv": "text/csv; charset=utf-8",
".doc": "application/msword",
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
".ehtml": "text/html; charset=utf-8",
".eml": "message/rfc822",
".eps": "application/postscript",
".exe": "application/octet-stream",
".flac": "audio/flac",
".gif": "image/gif",
".gz": "application/gzip",
".htm": "text/html; charset=utf-8",
".html": "text/html; charset=utf-8",
".ico": "image/vnd.microsoft.icon",
".ics": "text/calendar; charset=utf-8",
".jfif": "image/jpeg",
".jpeg": "image/jpeg",
".jpg": "image/jpeg",
".js": "text/javascript; charset=utf-8",
".json": "application/json",
".m4a": "audio/mp4",
".mjs": "text/javascript; charset=utf-8",
".mp3": "audio/mpeg",
".mp4": "video/mp4",
".oga": "audio/ogg",
".ogg": "audio/ogg",
".ogv": "video/ogg",
".opus": "audio/ogg",
".pdf": "application/pdf",
".pjp": "image/jpeg",
".pjpeg": "image/jpeg",
".png": "image/png",
".ppt": "application/vnd.ms-powerpoint",
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
".ps": "application/postscript",
".rdf": "application/rdf+xml",
".rtf": "application/rtf",
".shtml": "text/html; charset=utf-8",
".svg": "image/svg+xml",
".text": "text/plain; charset=utf-8",
".tif": "image/tiff",
".tiff": "image/tiff",
".txt": "text/plain; charset=utf-8",
".vtt": "text/vtt; charset=utf-8",
".wasm": "application/wasm",
".wav": "audio/wav",
".weba": "audio/webm",
".webm": "video/webm",
".webp": "image/webp",
".xbl": "text/xml; charset=utf-8",
".xbm": "image/x-xbitmap",
".xht": "application/xhtml+xml",
".xhtml": "application/xhtml+xml",
".xls": "application/vnd.ms-excel",
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
".xml": "text/xml; charset=utf-8",
".xsl": "text/xml; charset=utf-8",
".zip": "application/zip",
}
var once sync.Once // guards initMime
var testInitMime, osInitMime func()
func initMime() {
if fn := testInitMime; fn != nil {
fn()
} else {
setMimeTypes(builtinTypesLower, builtinTypesLower)
osInitMime()
}
}
// TypeByExtension returns the MIME type associated with the file extension ext.
// The extension ext should begin with a leading dot, as in ".html".
// When ext has no associated type, TypeByExtension returns "".
//
// Extensions are looked up first case-sensitively, then case-insensitively.
//
// The built-in table is small but on unix it is augmented by the local
// system's MIME-info database or mime.types file(s) if available under one or
// more of these names:
//
// /usr/local/share/mime/globs2
// /usr/share/mime/globs2
// /etc/mime.types
// /etc/apache2/mime.types
// /etc/apache/mime.types
// /etc/httpd/conf/mime.types
//
// On Windows, MIME types are extracted from the registry.
//
// Text types have the charset parameter set to "utf-8" by default.
func TypeByExtension(ext string) string {
once.Do(initMime)
// Case-sensitive lookup.
if v, ok := mimeTypes.Load(ext); ok {
return v.(string)
}
// Case-insensitive lookup.
// Optimistically assume a short ASCII extension and be
// allocation-free in that case.
var buf [10]byte
lower := buf[:0]
const utf8RuneSelf = 0x80 // from utf8 package, but not importing it.
for i := 0; i < len(ext); i++ {
c := ext[i]
if c >= utf8RuneSelf {
// Slow path.
si, _ := mimeTypesLower.Load(strings.ToLower(ext))
s, _ := si.(string)
return s
}
if 'A' <= c && c <= 'Z' {
lower = append(lower, c+('a'-'A'))
} else {
lower = append(lower, c)
}
}
si, _ := mimeTypesLower.Load(string(lower))
s, _ := si.(string)
return s
}
// ExtensionsByType returns the extensions known to be associated with the MIME
// type typ. The returned extensions will each begin with a leading dot, as in
// ".html". When typ has no associated extensions, ExtensionsByType returns an
// nil slice.
//
// The built-in table is small but on unix it is augmented by the local
// system's MIME-info database or mime.types file(s) if available under one or
// more of these names:
//
// /usr/local/share/mime/globs2
// /usr/share/mime/globs2
// /etc/mime.types
// /etc/apache2/mime.types
// /etc/apache/mime.types
// /etc/httpd/conf/mime.types
//
// On Windows, extensions are extracted from the registry.
func ExtensionsByType(typ string) ([]string, error) {
justType, _, err := ParseMediaType(typ)
if err != nil {
return nil, err
}
once.Do(initMime)
s, ok := extensions.Load(justType)
if !ok {
return nil, nil
}
ret := append([]string(nil), s.([]string)...)
slices.Sort(ret)
return ret, nil
}
// AddExtensionType sets the MIME type associated with
// the extension ext to typ. The extension should begin with
// a leading dot, as in ".html".
func AddExtensionType(ext, typ string) error {
if !strings.HasPrefix(ext, ".") {
return fmt.Errorf("mime: extension %q missing leading dot", ext)
}
once.Do(initMime)
return setExtensionType(ext, typ)
}
func setExtensionType(extension, mimeType string) error {
justType, param, err := ParseMediaType(mimeType)
if err != nil {
return err
}
if strings.HasPrefix(mimeType, "text/") && param["charset"] == "" {
param["charset"] = "utf-8"
mimeType = FormatMediaType(mimeType, param)
}
extLower := strings.ToLower(extension)
mimeTypes.Store(extension, mimeType)
mimeTypesLower.Store(extLower, mimeType)
extensionsMu.Lock()
defer extensionsMu.Unlock()
var exts []string
if ei, ok := extensions.Load(justType); ok {
exts = ei.([]string)
}
for _, v := range exts {
if v == extLower {
return nil
}
}
extensions.Store(justType, append(exts, extLower))
return nil
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix || (js && wasm) || wasip1
package mime
import (
"bufio"
"os"
"strings"
)
func init() {
osInitMime = initMimeUnix
}
// See https://specifications.freedesktop.org/shared-mime-info-spec/shared-mime-info-spec-0.21.html
// for the FreeDesktop Shared MIME-info Database specification.
var mimeGlobs = []string{
"/usr/local/share/mime/globs2",
"/usr/share/mime/globs2",
}
// Common locations for mime.types files on unix.
var typeFiles = []string{
"/etc/mime.types",
"/etc/apache2/mime.types",
"/etc/apache/mime.types",
"/etc/httpd/conf/mime.types",
}
func loadMimeGlobsFile(filename string) error {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
// Each line should be of format: weight:mimetype:glob[:morefields...]
fields := strings.Split(scanner.Text(), ":")
if len(fields) < 3 || len(fields[0]) < 1 || len(fields[2]) < 3 {
continue
} else if fields[0][0] == '#' || fields[2][0] != '*' || fields[2][1] != '.' {
continue
}
extension := fields[2][1:]
if strings.ContainsAny(extension, "?*[") {
// Not a bare extension, but a glob. Ignore for now:
// - we do not have an implementation for this glob
// syntax (translation to path/filepath.Match could
// be possible)
// - support for globs with weight ordering would have
// performance impact to all lookups to support the
// rarely seen glob entries
// - trying to match glob metacharacters literally is
// not useful
continue
}
if _, ok := mimeTypes.Load(extension); ok {
// We've already seen this extension.
// The file is in weight order, so we keep
// the first entry that we see.
continue
}
setExtensionType(extension, fields[1])
}
if err := scanner.Err(); err != nil {
panic(err)
}
return nil
}
func loadMimeFile(filename string) {
f, err := os.Open(filename)
if err != nil {
return
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
if len(fields) <= 1 || fields[0][0] == '#' {
continue
}
mimeType := fields[0]
for _, ext := range fields[1:] {
if ext[0] == '#' {
break
}
setExtensionType("."+ext, mimeType)
}
}
if err := scanner.Err(); err != nil {
panic(err)
}
}
func initMimeUnix() {
for _, filename := range mimeGlobs {
if err := loadMimeGlobsFile(filename); err == nil {
return // Stop checking more files if mimetype database is found.
}
}
// Fallback if no system-generated mimetype database exists.
for _, filename := range typeFiles {
loadMimeFile(filename)
}
}
func initMimeForTests() map[string]string {
mimeGlobs = []string{""}
typeFiles = []string{"testdata/test.types"}
return map[string]string{
".T1": "application/test",
".t2": "text/test; charset=utf-8",
".png": "image/png",
}
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cgi
import (
"fmt"
"io"
"maps"
"net/http"
"os"
"path"
"slices"
"strings"
"time"
)
func cgiMain() {
switch path.Join(os.Getenv("SCRIPT_NAME"), os.Getenv("PATH_INFO")) {
case "/bar", "/test.cgi", "/myscript/bar", "/test.cgi/extrapath":
testCGI()
return
}
childCGIProcess()
}
// testCGI is a CGI program translated from a Perl program to complete host_test.
// test cases in host_test should be provided by testCGI.
func testCGI() {
req, err := Request()
if err != nil {
panic(err)
}
err = req.ParseForm()
if err != nil {
panic(err)
}
params := req.Form
if params.Get("loc") != "" {
fmt.Printf("Location: %s\r\n\r\n", params.Get("loc"))
return
}
fmt.Printf("Content-Type: text/html\r\n")
fmt.Printf("X-CGI-Pid: %d\r\n", os.Getpid())
fmt.Printf("X-Test-Header: X-Test-Value\r\n")
fmt.Printf("\r\n")
if params.Get("writestderr") != "" {
fmt.Fprintf(os.Stderr, "Hello, stderr!\n")
}
if params.Get("bigresponse") != "" {
// 17 MB, for OS X: golang.org/issue/4958
line := strings.Repeat("A", 1024)
for i := 0; i < 17*1024; i++ {
fmt.Printf("%s\r\n", line)
}
return
}
fmt.Printf("test=Hello CGI\r\n")
for _, key := range slices.Sorted(maps.Keys(params)) {
fmt.Printf("param-%s=%s\r\n", key, params.Get(key))
}
envs := envMap(os.Environ())
for _, key := range slices.Sorted(maps.Keys(envs)) {
fmt.Printf("env-%s=%s\r\n", key, envs[key])
}
cwd, _ := os.Getwd()
fmt.Printf("cwd=%s\r\n", cwd)
}
type neverEnding byte
func (b neverEnding) Read(p []byte) (n int, err error) {
for i := range p {
p[i] = byte(b)
}
return len(p), nil
}
// childCGIProcess is used by integration_test to complete unit tests.
func childCGIProcess() {
if os.Getenv("REQUEST_METHOD") == "" {
// Not in a CGI environment; skipping test.
return
}
switch os.Getenv("REQUEST_URI") {
case "/immediate-disconnect":
os.Exit(0)
case "/no-content-type":
fmt.Printf("Content-Length: 6\n\nHello\n")
os.Exit(0)
case "/empty-headers":
fmt.Printf("\nHello")
os.Exit(0)
}
Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
if req.FormValue("nil-request-body") == "1" {
fmt.Fprintf(rw, "nil-request-body=%v\n", req.Body == nil)
return
}
rw.Header().Set("X-Test-Header", "X-Test-Value")
req.ParseForm()
if req.FormValue("no-body") == "1" {
return
}
if eb, ok := req.Form["exact-body"]; ok {
io.WriteString(rw, eb[0])
return
}
if req.FormValue("write-forever") == "1" {
io.Copy(rw, neverEnding('a'))
for {
time.Sleep(5 * time.Second) // hang forever, until killed
}
}
fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n")
for k, vv := range req.Form {
for _, v := range vv {
fmt.Fprintf(rw, "param-%s=%s\n", k, v)
}
}
for _, kv := range os.Environ() {
fmt.Fprintf(rw, "env-%s\n", kv)
}
}))
os.Exit(0)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements CGI from the perspective of a child
// process.
package cgi
import (
"bufio"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
)
// Request returns the HTTP request as represented in the current
// environment. This assumes the current program is being run
// by a web server in a CGI environment.
// The returned Request's Body is populated, if applicable.
func Request() (*http.Request, error) {
r, err := RequestFromMap(envMap(os.Environ()))
if err != nil {
return nil, err
}
if r.ContentLength > 0 {
r.Body = io.NopCloser(io.LimitReader(os.Stdin, r.ContentLength))
}
return r, nil
}
func envMap(env []string) map[string]string {
m := make(map[string]string)
for _, kv := range env {
if k, v, ok := strings.Cut(kv, "="); ok {
m[k] = v
}
}
return m
}
// RequestFromMap creates an [http.Request] from CGI variables.
// The returned Request's Body field is not populated.
func RequestFromMap(params map[string]string) (*http.Request, error) {
r := new(http.Request)
r.Method = params["REQUEST_METHOD"]
if r.Method == "" {
return nil, errors.New("cgi: no REQUEST_METHOD in environment")
}
r.Proto = params["SERVER_PROTOCOL"]
var ok bool
if r.Proto == "INCLUDED" {
// SSI (Server Side Include) use case
// CGI Specification RFC 3875 - section 4.1.16
r.ProtoMajor, r.ProtoMinor = 1, 0
} else if r.ProtoMajor, r.ProtoMinor, ok = http.ParseHTTPVersion(r.Proto); !ok {
return nil, errors.New("cgi: invalid SERVER_PROTOCOL version")
}
r.Close = true
r.Trailer = http.Header{}
r.Header = http.Header{}
r.Host = params["HTTP_HOST"]
if lenstr := params["CONTENT_LENGTH"]; lenstr != "" {
clen, err := strconv.ParseInt(lenstr, 10, 64)
if err != nil {
return nil, errors.New("cgi: bad CONTENT_LENGTH in environment: " + lenstr)
}
r.ContentLength = clen
}
if ct := params["CONTENT_TYPE"]; ct != "" {
r.Header.Set("Content-Type", ct)
}
// Copy "HTTP_FOO_BAR" variables to "Foo-Bar" Headers
for k, v := range params {
if k == "HTTP_HOST" {
continue
}
if after, found := strings.CutPrefix(k, "HTTP_"); found {
r.Header.Add(strings.ReplaceAll(after, "_", "-"), v)
}
}
uriStr := params["REQUEST_URI"]
if uriStr == "" {
// Fallback to SCRIPT_NAME, PATH_INFO and QUERY_STRING.
uriStr = params["SCRIPT_NAME"] + params["PATH_INFO"]
s := params["QUERY_STRING"]
if s != "" {
uriStr += "?" + s
}
}
// There's apparently a de-facto standard for this.
// https://web.archive.org/web/20170105004655/http://docstore.mik.ua/orelly/linux/cgi/ch03_02.htm#ch03-35636
if s := params["HTTPS"]; s == "on" || s == "ON" || s == "1" {
r.TLS = &tls.ConnectionState{HandshakeComplete: true}
}
if r.Host != "" {
// Hostname is provided, so we can reasonably construct a URL.
rawurl := r.Host + uriStr
if r.TLS == nil {
rawurl = "http://" + rawurl
} else {
rawurl = "https://" + rawurl
}
url, err := url.Parse(rawurl)
if err != nil {
return nil, errors.New("cgi: failed to parse host and REQUEST_URI into a URL: " + rawurl)
}
r.URL = url
}
// Fallback logic if we don't have a Host header or the URL
// failed to parse
if r.URL == nil {
url, err := url.Parse(uriStr)
if err != nil {
return nil, errors.New("cgi: failed to parse REQUEST_URI into a URL: " + uriStr)
}
r.URL = url
}
// Request.RemoteAddr has its port set by Go's standard http
// server, so we do here too.
remotePort, _ := strconv.Atoi(params["REMOTE_PORT"]) // zero if unset or invalid
r.RemoteAddr = net.JoinHostPort(params["REMOTE_ADDR"], strconv.Itoa(remotePort))
return r, nil
}
// Serve executes the provided [Handler] on the currently active CGI
// request, if any. If there's no current CGI environment
// an error is returned. The provided handler may be nil to use
// [http.DefaultServeMux].
func Serve(handler http.Handler) error {
req, err := Request()
if err != nil {
return err
}
if req.Body == nil {
req.Body = http.NoBody
}
if handler == nil {
handler = http.DefaultServeMux
}
rw := &response{
req: req,
header: make(http.Header),
bufw: bufio.NewWriter(os.Stdout),
}
handler.ServeHTTP(rw, req)
rw.Write(nil) // make sure a response is sent
if err = rw.bufw.Flush(); err != nil {
return err
}
return nil
}
type response struct {
req *http.Request
header http.Header
code int
wroteHeader bool
wroteCGIHeader bool
bufw *bufio.Writer
}
func (r *response) Flush() {
r.bufw.Flush()
}
func (r *response) Header() http.Header {
return r.header
}
func (r *response) Write(p []byte) (n int, err error) {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
if !r.wroteCGIHeader {
r.writeCGIHeader(p)
}
return r.bufw.Write(p)
}
func (r *response) WriteHeader(code int) {
if r.wroteHeader {
// Note: explicitly using Stderr, as Stdout is our HTTP output.
fmt.Fprintf(os.Stderr, "CGI attempted to write header twice on request for %s", r.req.URL)
return
}
r.wroteHeader = true
r.code = code
}
// writeCGIHeader finalizes the header sent to the client and writes it to the output.
// p is not written by writeHeader, but is the first chunk of the body
// that will be written. It is sniffed for a Content-Type if none is
// set explicitly.
func (r *response) writeCGIHeader(p []byte) {
if r.wroteCGIHeader {
return
}
r.wroteCGIHeader = true
fmt.Fprintf(r.bufw, "Status: %d %s\r\n", r.code, http.StatusText(r.code))
if _, hasType := r.header["Content-Type"]; !hasType {
r.header.Set("Content-Type", http.DetectContentType(p))
}
r.header.Write(r.bufw)
r.bufw.WriteString("\r\n")
r.bufw.Flush()
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements the host side of CGI (being the webserver
// parent process).
// Package cgi implements CGI (Common Gateway Interface) as specified
// in RFC 3875.
//
// Note that using CGI means starting a new process to handle each
// request, which is typically less efficient than using a
// long-running server. This package is intended primarily for
// compatibility with existing systems.
package cgi
import (
"bufio"
"fmt"
"io"
"log"
"net"
"net/http"
"net/textproto"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"golang.org/x/net/http/httpguts"
)
var trailingPort = regexp.MustCompile(`:([0-9]+)$`)
var osDefaultInheritEnv = func() []string {
switch runtime.GOOS {
case "darwin", "ios":
return []string{"DYLD_LIBRARY_PATH"}
case "android", "linux", "freebsd", "netbsd", "openbsd":
return []string{"LD_LIBRARY_PATH"}
case "hpux":
return []string{"LD_LIBRARY_PATH", "SHLIB_PATH"}
case "irix":
return []string{"LD_LIBRARY_PATH", "LD_LIBRARYN32_PATH", "LD_LIBRARY64_PATH"}
case "illumos", "solaris":
return []string{"LD_LIBRARY_PATH", "LD_LIBRARY_PATH_32", "LD_LIBRARY_PATH_64"}
case "windows":
return []string{"SystemRoot", "COMSPEC", "PATHEXT", "WINDIR"}
}
return nil
}()
// Handler runs an executable in a subprocess with a CGI environment.
type Handler struct {
Path string // path to the CGI executable
Root string // root URI prefix of handler or empty for "/"
// Dir specifies the CGI executable's working directory.
// If Dir is empty, the base directory of Path is used.
// If Path has no base directory, the current working
// directory is used.
Dir string
Env []string // extra environment variables to set, if any, as "key=value"
InheritEnv []string // environment variables to inherit from host, as "key"
Logger *log.Logger // optional log for errors or nil to use log.Print
Args []string // optional arguments to pass to child process
Stderr io.Writer // optional stderr for the child process; nil means os.Stderr
// PathLocationHandler specifies the root http Handler that
// should handle internal redirects when the CGI process
// returns a Location header value starting with a "/", as
// specified in RFC 3875 § 6.3.2. This will likely be
// http.DefaultServeMux.
//
// If nil, a CGI response with a local URI path is instead sent
// back to the client and not redirected internally.
PathLocationHandler http.Handler
}
func (h *Handler) stderr() io.Writer {
if h.Stderr != nil {
return h.Stderr
}
return os.Stderr
}
// removeLeadingDuplicates remove leading duplicate in environments.
// It's possible to override environment like following.
//
// cgi.Handler{
// ...
// Env: []string{"SCRIPT_FILENAME=foo.php"},
// }
func removeLeadingDuplicates(env []string) (ret []string) {
for i, e := range env {
found := false
if eq := strings.IndexByte(e, '='); eq != -1 {
keq := e[:eq+1] // "key="
for _, e2 := range env[i+1:] {
if strings.HasPrefix(e2, keq) {
found = true
break
}
}
}
if !found {
ret = append(ret, e)
}
}
return
}
func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" {
rw.WriteHeader(http.StatusBadRequest)
rw.Write([]byte("Chunked request bodies are not supported by CGI."))
return
}
root := strings.TrimRight(h.Root, "/")
pathInfo := strings.TrimPrefix(req.URL.Path, root)
port := "80"
if req.TLS != nil {
port = "443"
}
if matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 {
port = matches[1]
}
env := []string{
"SERVER_SOFTWARE=go",
"SERVER_PROTOCOL=HTTP/1.1",
"HTTP_HOST=" + req.Host,
"GATEWAY_INTERFACE=CGI/1.1",
"REQUEST_METHOD=" + req.Method,
"QUERY_STRING=" + req.URL.RawQuery,
"REQUEST_URI=" + req.URL.RequestURI(),
"PATH_INFO=" + pathInfo,
"SCRIPT_NAME=" + root,
"SCRIPT_FILENAME=" + h.Path,
"SERVER_PORT=" + port,
}
if remoteIP, remotePort, err := net.SplitHostPort(req.RemoteAddr); err == nil {
env = append(env, "REMOTE_ADDR="+remoteIP, "REMOTE_HOST="+remoteIP, "REMOTE_PORT="+remotePort)
} else {
// could not parse ip:port, let's use whole RemoteAddr and leave REMOTE_PORT undefined
env = append(env, "REMOTE_ADDR="+req.RemoteAddr, "REMOTE_HOST="+req.RemoteAddr)
}
if hostDomain, _, err := net.SplitHostPort(req.Host); err == nil {
env = append(env, "SERVER_NAME="+hostDomain)
} else {
env = append(env, "SERVER_NAME="+req.Host)
}
if req.TLS != nil {
env = append(env, "HTTPS=on")
}
for k, v := range req.Header {
k = strings.Map(upperCaseAndUnderscore, k)
if k == "PROXY" {
// See Issue 16405
continue
}
joinStr := ", "
if k == "COOKIE" {
joinStr = "; "
}
env = append(env, "HTTP_"+k+"="+strings.Join(v, joinStr))
}
if req.ContentLength > 0 {
env = append(env, fmt.Sprintf("CONTENT_LENGTH=%d", req.ContentLength))
}
if ctype := req.Header.Get("Content-Type"); ctype != "" {
env = append(env, "CONTENT_TYPE="+ctype)
}
envPath := os.Getenv("PATH")
if envPath == "" {
envPath = "/bin:/usr/bin:/usr/ucb:/usr/bsd:/usr/local/bin"
}
env = append(env, "PATH="+envPath)
for _, e := range h.InheritEnv {
if v := os.Getenv(e); v != "" {
env = append(env, e+"="+v)
}
}
for _, e := range osDefaultInheritEnv {
if v := os.Getenv(e); v != "" {
env = append(env, e+"="+v)
}
}
if h.Env != nil {
env = append(env, h.Env...)
}
env = removeLeadingDuplicates(env)
var cwd, path string
if h.Dir != "" {
path = h.Path
cwd = h.Dir
} else {
cwd, path = filepath.Split(h.Path)
}
if cwd == "" {
cwd = "."
}
internalError := func(err error) {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("CGI error: %v", err)
}
cmd := &exec.Cmd{
Path: path,
Args: append([]string{h.Path}, h.Args...),
Dir: cwd,
Env: env,
Stderr: h.stderr(),
}
if req.ContentLength != 0 {
cmd.Stdin = req.Body
}
stdoutRead, err := cmd.StdoutPipe()
if err != nil {
internalError(err)
return
}
err = cmd.Start()
if err != nil {
internalError(err)
return
}
if hook := testHookStartProcess; hook != nil {
hook(cmd.Process)
}
defer cmd.Wait()
defer stdoutRead.Close()
linebody := bufio.NewReaderSize(stdoutRead, 1024)
headers := make(http.Header)
statusCode := 0
headerLines := 0
sawBlankLine := false
for {
line, isPrefix, err := linebody.ReadLine()
if isPrefix {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("cgi: long header line from subprocess.")
return
}
if err == io.EOF {
break
}
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("cgi: error reading headers: %v", err)
return
}
if len(line) == 0 {
sawBlankLine = true
break
}
headerLines++
header, val, ok := strings.Cut(string(line), ":")
if !ok {
h.printf("cgi: bogus header line: %s", line)
continue
}
if !httpguts.ValidHeaderFieldName(header) {
h.printf("cgi: invalid header name: %q", header)
continue
}
val = textproto.TrimString(val)
switch {
case header == "Status":
if len(val) < 3 {
h.printf("cgi: bogus status (short): %q", val)
return
}
code, err := strconv.Atoi(val[0:3])
if err != nil {
h.printf("cgi: bogus status: %q", val)
h.printf("cgi: line was %q", line)
return
}
statusCode = code
default:
headers.Add(header, val)
}
}
if headerLines == 0 || !sawBlankLine {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("cgi: no headers")
return
}
if loc := headers.Get("Location"); loc != "" {
if strings.HasPrefix(loc, "/") && h.PathLocationHandler != nil {
h.handleInternalRedirect(rw, req, loc)
return
}
if statusCode == 0 {
statusCode = http.StatusFound
}
}
if statusCode == 0 && headers.Get("Content-Type") == "" {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("cgi: missing required Content-Type in headers")
return
}
if statusCode == 0 {
statusCode = http.StatusOK
}
// Copy headers to rw's headers, after we've decided not to
// go into handleInternalRedirect, which won't want its rw
// headers to have been touched.
for k, vv := range headers {
for _, v := range vv {
rw.Header().Add(k, v)
}
}
rw.WriteHeader(statusCode)
_, err = io.Copy(rw, linebody)
if err != nil {
h.printf("cgi: copy error: %v", err)
// And kill the child CGI process so we don't hang on
// the deferred cmd.Wait above if the error was just
// the client (rw) going away. If it was a read error
// (because the child died itself), then the extra
// kill of an already-dead process is harmless (the PID
// won't be reused until the Wait above).
cmd.Process.Kill()
}
}
func (h *Handler) printf(format string, v ...any) {
if h.Logger != nil {
h.Logger.Printf(format, v...)
} else {
log.Printf(format, v...)
}
}
func (h *Handler) handleInternalRedirect(rw http.ResponseWriter, req *http.Request, path string) {
url, err := req.URL.Parse(path)
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
h.printf("cgi: error resolving local URI path %q: %v", path, err)
return
}
// TODO: RFC 3875 isn't clear if only GET is supported, but it
// suggests so: "Note that any message-body attached to the
// request (such as for a POST request) may not be available
// to the resource that is the target of the redirect." We
// should do some tests against Apache to see how it handles
// POST, HEAD, etc. Does the internal redirect get the same
// method or just GET? What about incoming headers?
// (e.g. Cookies) Which headers, if any, are copied into the
// second request?
newReq := &http.Request{
Method: "GET",
URL: url,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: url.Host,
RemoteAddr: req.RemoteAddr,
TLS: req.TLS,
}
h.PathLocationHandler.ServeHTTP(rw, newReq)
}
func upperCaseAndUnderscore(r rune) rune {
switch {
case r >= 'a' && r <= 'z':
return r - ('a' - 'A')
case r == '-':
return '_'
case r == '=':
// Maybe not part of the CGI 'spec' but would mess up
// the environment in any case, as Go represents the
// environment as a slice of "key=value" strings.
return '_'
}
// TODO: other transformations in spec or practice?
return r
}
var testHookStartProcess func(*os.Process) // nil except for some tests
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package httptest provides utilities for HTTP testing.
package httptest
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"io"
"net/http"
"strings"
)
// NewRequest wraps NewRequestWithContext using context.Background.
func NewRequest(method, target string, body io.Reader) *http.Request {
return NewRequestWithContext(context.Background(), method, target, body)
}
// NewRequestWithContext returns a new incoming server Request, suitable
// for passing to an [http.Handler] for testing.
//
// The target is the RFC 7230 "request-target": it may be either a
// path or an absolute URL. If target is an absolute URL, the host name
// from the URL is used. Otherwise, "example.com" is used.
//
// The TLS field is set to a non-nil dummy value if target has scheme
// "https".
//
// The Request.Proto is always HTTP/1.1.
//
// An empty method means "GET".
//
// The provided body may be nil. If the body is of type [bytes.Reader],
// [strings.Reader], [bytes.Buffer], or the value [http.NoBody],
// the Request.ContentLength is set.
//
// NewRequest panics on error for ease of use in testing, where a
// panic is acceptable.
//
// To generate a client HTTP request instead of a server request, see
// the NewRequest function in the net/http package.
func NewRequestWithContext(ctx context.Context, method, target string, body io.Reader) *http.Request {
if method == "" {
method = "GET"
}
req, err := http.ReadRequest(bufio.NewReader(strings.NewReader(method + " " + target + " HTTP/1.0\r\n\r\n")))
if err != nil {
panic("invalid NewRequest arguments; " + err.Error())
}
req = req.WithContext(ctx)
// HTTP/1.0 was used above to avoid needing a Host field. Change it to 1.1 here.
req.Proto = "HTTP/1.1"
req.ProtoMinor = 1
req.Close = false
if body != nil {
switch v := body.(type) {
case *bytes.Buffer:
req.ContentLength = int64(v.Len())
case *bytes.Reader:
req.ContentLength = int64(v.Len())
case *strings.Reader:
req.ContentLength = int64(v.Len())
default:
req.ContentLength = -1
}
if body == http.NoBody {
req.ContentLength = 0
}
if rc, ok := body.(io.ReadCloser); ok {
req.Body = rc
} else {
req.Body = io.NopCloser(body)
}
}
// 192.0.2.0/24 is "TEST-NET" in RFC 5737 for use solely in
// documentation and example source code and should not be
// used publicly.
req.RemoteAddr = "192.0.2.1:1234"
if req.Host == "" {
req.Host = "example.com"
}
if strings.HasPrefix(target, "https://") {
req.TLS = &tls.ConnectionState{
Version: tls.VersionTLS12,
HandshakeComplete: true,
ServerName: req.Host,
}
}
return req
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httptest
import (
"bytes"
"fmt"
"io"
"net/http"
"net/textproto"
"strconv"
"strings"
"golang.org/x/net/http/httpguts"
)
// ResponseRecorder is an implementation of [http.ResponseWriter] that
// records its mutations for later inspection in tests.
type ResponseRecorder struct {
// Code is the HTTP response code set by WriteHeader.
//
// Note that if a Handler never calls WriteHeader or Write,
// this might end up being 0, rather than the implicit
// http.StatusOK. To get the implicit value, use the Result
// method.
Code int
// HeaderMap contains the headers explicitly set by the Handler.
// It is an internal detail.
//
// Deprecated: HeaderMap exists for historical compatibility
// and should not be used. To access the headers returned by a handler,
// use the Response.Header map as returned by the Result method.
HeaderMap http.Header
// Body is the buffer to which the Handler's Write calls are sent.
// If nil, the Writes are silently discarded.
Body *bytes.Buffer
// Flushed is whether the Handler called Flush.
Flushed bool
result *http.Response // cache of Result's return value
snapHeader http.Header // snapshot of HeaderMap at first Write
wroteHeader bool
}
// NewRecorder returns an initialized [ResponseRecorder].
func NewRecorder() *ResponseRecorder {
return &ResponseRecorder{
HeaderMap: make(http.Header),
Body: new(bytes.Buffer),
Code: 200,
}
}
// DefaultRemoteAddr is the default remote address to return in RemoteAddr if
// an explicit DefaultRemoteAddr isn't set on [ResponseRecorder].
const DefaultRemoteAddr = "1.2.3.4"
// Header implements [http.ResponseWriter]. It returns the response
// headers to mutate within a handler. To test the headers that were
// written after a handler completes, use the [ResponseRecorder.Result] method and see
// the returned Response value's Header.
func (rw *ResponseRecorder) Header() http.Header {
m := rw.HeaderMap
if m == nil {
m = make(http.Header)
rw.HeaderMap = m
}
return m
}
// writeHeader writes a header if it was not written yet and
// detects Content-Type if needed.
//
// bytes or str are the beginning of the response body.
// We pass both to avoid unnecessarily generate garbage
// in rw.WriteString which was created for performance reasons.
// Non-nil bytes win.
func (rw *ResponseRecorder) writeHeader(b []byte, str string) {
if rw.wroteHeader {
return
}
if len(str) > 512 {
str = str[:512]
}
m := rw.Header()
_, hasType := m["Content-Type"]
hasTE := m.Get("Transfer-Encoding") != ""
if !hasType && !hasTE {
if b == nil {
b = []byte(str)
}
m.Set("Content-Type", http.DetectContentType(b))
}
rw.WriteHeader(200)
}
// Write implements http.ResponseWriter. The data in buf is written to
// rw.Body, if not nil.
func (rw *ResponseRecorder) Write(buf []byte) (int, error) {
// Record the write, even if we're going to return an error.
rw.writeHeader(buf, "")
if rw.Body != nil {
rw.Body.Write(buf)
}
if !bodyAllowedForStatus(rw.Code) {
return 0, http.ErrBodyNotAllowed
}
return len(buf), nil
}
// WriteString implements [io.StringWriter]. The data in str is written
// to rw.Body, if not nil.
func (rw *ResponseRecorder) WriteString(str string) (int, error) {
// Record the write, even if we're going to return an error.
rw.writeHeader(nil, str)
if rw.Body != nil {
rw.Body.WriteString(str)
}
if !bodyAllowedForStatus(rw.Code) {
return 0, http.ErrBodyNotAllowed
}
return len(str), nil
}
// bodyAllowedForStatus reports whether a given response status code
// permits a body. See RFC 7230, section 3.3.
func bodyAllowedForStatus(status int) bool {
switch {
case status >= 100 && status <= 199:
return false
case status == 204:
return false
case status == 304:
return false
}
return true
}
func checkWriteHeaderCode(code int) {
// Issue 22880: require valid WriteHeader status codes.
// For now we only enforce that it's three digits.
// In the future we might block things over 599 (600 and above aren't defined
// at https://httpwg.org/specs/rfc7231.html#status.codes)
// and we might block under 200 (once we have more mature 1xx support).
// But for now any three digits.
//
// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
// no equivalent bogus thing we can realistically send in HTTP/2,
// so we'll consistently panic instead and help people find their bugs
// early. (We can't return an error from WriteHeader even if we wanted to.)
if code < 100 || code > 999 {
panic(fmt.Sprintf("invalid WriteHeader code %v", code))
}
}
// WriteHeader implements [http.ResponseWriter].
func (rw *ResponseRecorder) WriteHeader(code int) {
if rw.wroteHeader {
return
}
checkWriteHeaderCode(code)
rw.Code = code
rw.wroteHeader = true
if rw.HeaderMap == nil {
rw.HeaderMap = make(http.Header)
}
rw.snapHeader = rw.HeaderMap.Clone()
}
// Flush implements [http.Flusher]. To test whether Flush was
// called, see rw.Flushed.
func (rw *ResponseRecorder) Flush() {
if !rw.wroteHeader {
rw.WriteHeader(200)
}
rw.Flushed = true
}
// Result returns the response generated by the handler.
//
// The returned Response will have at least its StatusCode,
// Header, Body, and optionally Trailer populated.
// More fields may be populated in the future, so callers should
// not DeepEqual the result in tests.
//
// The Response.Header is a snapshot of the headers at the time of the
// first write call, or at the time of this call, if the handler never
// did a write.
//
// The Response.Body is guaranteed to be non-nil and Body.Read call is
// guaranteed to not return any error other than [io.EOF].
//
// Result must only be called after the handler has finished running.
func (rw *ResponseRecorder) Result() *http.Response {
if rw.result != nil {
return rw.result
}
if rw.snapHeader == nil {
rw.snapHeader = rw.HeaderMap.Clone()
}
res := &http.Response{
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
StatusCode: rw.Code,
Header: rw.snapHeader,
}
rw.result = res
if res.StatusCode == 0 {
res.StatusCode = 200
}
res.Status = fmt.Sprintf("%03d %s", res.StatusCode, http.StatusText(res.StatusCode))
if rw.Body != nil {
res.Body = io.NopCloser(bytes.NewReader(rw.Body.Bytes()))
} else {
res.Body = http.NoBody
}
res.ContentLength = parseContentLength(res.Header.Get("Content-Length"))
if trailers, ok := rw.snapHeader["Trailer"]; ok {
res.Trailer = make(http.Header, len(trailers))
for _, k := range trailers {
for k := range strings.SplitSeq(k, ",") {
k = http.CanonicalHeaderKey(textproto.TrimString(k))
if !httpguts.ValidTrailerHeader(k) {
// Ignore since forbidden by RFC 7230, section 4.1.2.
continue
}
vv, ok := rw.HeaderMap[k]
if !ok {
continue
}
vv2 := make([]string, len(vv))
copy(vv2, vv)
res.Trailer[k] = vv2
}
}
}
for k, vv := range rw.HeaderMap {
if !strings.HasPrefix(k, http.TrailerPrefix) {
continue
}
if res.Trailer == nil {
res.Trailer = make(http.Header)
}
for _, v := range vv {
res.Trailer.Add(strings.TrimPrefix(k, http.TrailerPrefix), v)
}
}
return res
}
// parseContentLength trims whitespace from s and returns -1 if no value
// is set, or the value if it's >= 0.
//
// This a modified version of same function found in net/http/transfer.go. This
// one just ignores an invalid header.
func parseContentLength(cl string) int64 {
cl = textproto.TrimString(cl)
if cl == "" {
return -1
}
n, err := strconv.ParseUint(cl, 10, 63)
if err != nil {
return -1
}
return int64(n)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Implementation of Server
package httptest
import (
"context"
"crypto/tls"
"crypto/x509"
"flag"
"fmt"
"log"
"net"
"net/http"
"net/http/internal/testcert"
"os"
"strings"
"sync"
"time"
)
// A Server is an HTTP server listening on a system-chosen port on the
// local loopback interface, for use in end-to-end HTTP tests.
type Server struct {
URL string // base URL of form http://ipaddr:port with no trailing slash
Listener net.Listener
// EnableHTTP2 controls whether HTTP/2 is enabled
// on the server. It must be set between calling
// NewUnstartedServer and calling Server.StartTLS.
EnableHTTP2 bool
// TLS is the optional TLS configuration, populated with a new config
// after TLS is started. If set on an unstarted server before StartTLS
// is called, existing fields are copied into the new config.
TLS *tls.Config
// Config may be changed after calling NewUnstartedServer and
// before Start or StartTLS.
Config *http.Server
// certificate is a parsed version of the TLS config certificate, if present.
certificate *x509.Certificate
// wg counts the number of outstanding HTTP requests on this server.
// Close blocks until all requests are finished.
wg sync.WaitGroup
mu sync.Mutex // guards closed and conns
closed bool
conns map[net.Conn]http.ConnState // except terminal states
// client is configured for use with the server.
// Its transport is automatically closed when Close is called.
client *http.Client
}
func newLocalListener() net.Listener {
if serveFlag != "" {
l, err := net.Listen("tcp", serveFlag)
if err != nil {
panic(fmt.Sprintf("httptest: failed to listen on %v: %v", serveFlag, err))
}
return l
}
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
if l, err = net.Listen("tcp6", "[::1]:0"); err != nil {
panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err))
}
}
return l
}
// When debugging a particular http server-based test,
// this flag lets you run
//
// go test -run='^BrokenTest$' -httptest.serve=127.0.0.1:8000
//
// to start the broken server so you can interact with it manually.
// We only register this flag if it looks like the caller knows about it
// and is trying to use it as we don't want to pollute flags and this
// isn't really part of our API. Don't depend on this.
var serveFlag string
func init() {
if strSliceContainsPrefix(os.Args, "-httptest.serve=") || strSliceContainsPrefix(os.Args, "--httptest.serve=") {
flag.StringVar(&serveFlag, "httptest.serve", "", "if non-empty, httptest.NewServer serves on this address and blocks.")
}
}
func strSliceContainsPrefix(v []string, pre string) bool {
for _, s := range v {
if strings.HasPrefix(s, pre) {
return true
}
}
return false
}
// NewServer starts and returns a new [Server].
// The caller should call Close when finished, to shut it down.
func NewServer(handler http.Handler) *Server {
ts := NewUnstartedServer(handler)
ts.Start()
return ts
}
// NewUnstartedServer returns a new [Server] but doesn't start it.
//
// After changing its configuration, the caller should call Start or
// StartTLS.
//
// The caller should call Close when finished, to shut it down.
func NewUnstartedServer(handler http.Handler) *Server {
return &Server{
Listener: newLocalListener(),
Config: &http.Server{Handler: handler},
}
}
// Start starts a server from NewUnstartedServer.
func (s *Server) Start() {
if s.URL != "" {
panic("Server already started")
}
if s.client == nil {
tr := &http.Transport{}
dialer := net.Dialer{}
// User code may set either of Dial or DialContext, with DialContext taking precedence.
// We set DialContext here to preserve any context values that are passed in,
// but fall back to Dial if the user has set it.
tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
if tr.Dial != nil {
return tr.Dial(network, addr)
}
if addr == "example.com:80" || strings.HasSuffix(addr, ".example.com:80") {
addr = s.Listener.Addr().String()
}
return dialer.DialContext(ctx, network, addr)
}
s.client = &http.Client{Transport: tr}
}
s.URL = "http://" + s.Listener.Addr().String()
s.wrap()
s.goServe()
if serveFlag != "" {
fmt.Fprintln(os.Stderr, "httptest: serving on", s.URL)
select {}
}
}
// StartTLS starts TLS on a server from NewUnstartedServer.
func (s *Server) StartTLS() {
if s.URL != "" {
panic("Server already started")
}
if s.client == nil {
s.client = &http.Client{}
}
cert, err := tls.X509KeyPair(testcert.LocalhostCert, testcert.LocalhostKey)
if err != nil {
panic(fmt.Sprintf("httptest: NewTLSServer: %v", err))
}
existingConfig := s.TLS
if existingConfig != nil {
s.TLS = existingConfig.Clone()
} else {
s.TLS = new(tls.Config)
}
if s.TLS.NextProtos == nil {
nextProtos := []string{"http/1.1"}
if s.EnableHTTP2 {
nextProtos = []string{"h2"}
}
s.TLS.NextProtos = nextProtos
}
if len(s.TLS.Certificates) == 0 {
s.TLS.Certificates = []tls.Certificate{cert}
}
s.certificate, err = x509.ParseCertificate(s.TLS.Certificates[0].Certificate[0])
if err != nil {
panic(fmt.Sprintf("httptest: NewTLSServer: %v", err))
}
certpool := x509.NewCertPool()
certpool.AddCert(s.certificate)
tr := &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: certpool,
},
ForceAttemptHTTP2: s.EnableHTTP2,
}
dialer := net.Dialer{}
tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
if tr.Dial != nil {
return tr.Dial(network, addr)
}
if addr == "example.com:443" || strings.HasSuffix(addr, ".example.com:443") {
addr = s.Listener.Addr().String()
}
return dialer.DialContext(ctx, network, addr)
}
s.client.Transport = tr
s.Listener = tls.NewListener(s.Listener, s.TLS)
s.URL = "https://" + s.Listener.Addr().String()
s.wrap()
s.goServe()
}
// NewTLSServer starts and returns a new [Server] using TLS.
// The caller should call Close when finished, to shut it down.
func NewTLSServer(handler http.Handler) *Server {
ts := NewUnstartedServer(handler)
ts.StartTLS()
return ts
}
type closeIdleTransport interface {
CloseIdleConnections()
}
// Close shuts down the server and blocks until all outstanding
// requests on this server have completed.
func (s *Server) Close() {
s.mu.Lock()
if !s.closed {
s.closed = true
s.Listener.Close()
s.Config.SetKeepAlivesEnabled(false)
for c, st := range s.conns {
// Force-close any idle connections (those between
// requests) and new connections (those which connected
// but never sent a request). StateNew connections are
// super rare and have only been seen (in
// previously-flaky tests) in the case of
// socket-late-binding races from the http Client
// dialing this server and then getting an idle
// connection before the dial completed. There is thus
// a connected connection in StateNew with no
// associated Request. We only close StateIdle and
// StateNew because they're not doing anything. It's
// possible StateNew is about to do something in a few
// milliseconds, but a previous CL to check again in a
// few milliseconds wasn't liked (early versions of
// https://golang.org/cl/15151) so now we just
// forcefully close StateNew. The docs for Server.Close say
// we wait for "outstanding requests", so we don't close things
// in StateActive.
if st == http.StateIdle || st == http.StateNew {
s.closeConn(c)
}
}
// If this server doesn't shut down in 5 seconds, tell the user why.
t := time.AfterFunc(5*time.Second, s.logCloseHangDebugInfo)
defer t.Stop()
}
s.mu.Unlock()
// Not part of httptest.Server's correctness, but assume most
// users of httptest.Server will be using the standard
// transport, so help them out and close any idle connections for them.
if t, ok := http.DefaultTransport.(closeIdleTransport); ok {
t.CloseIdleConnections()
}
// Also close the client idle connections.
if s.client != nil {
if t, ok := s.client.Transport.(closeIdleTransport); ok {
t.CloseIdleConnections()
}
}
s.wg.Wait()
}
func (s *Server) logCloseHangDebugInfo() {
s.mu.Lock()
defer s.mu.Unlock()
var buf strings.Builder
buf.WriteString("httptest.Server blocked in Close after 5 seconds, waiting for connections:\n")
for c, st := range s.conns {
fmt.Fprintf(&buf, " %T %p %v in state %v\n", c, c, c.RemoteAddr(), st)
}
log.Print(buf.String())
}
// CloseClientConnections closes any open HTTP connections to the test Server.
func (s *Server) CloseClientConnections() {
s.mu.Lock()
nconn := len(s.conns)
ch := make(chan struct{}, nconn)
for c := range s.conns {
go s.closeConnChan(c, ch)
}
s.mu.Unlock()
// Wait for outstanding closes to finish.
//
// Out of paranoia for making a late change in Go 1.6, we
// bound how long this can wait, since golang.org/issue/14291
// isn't fully understood yet. At least this should only be used
// in tests.
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
for i := 0; i < nconn; i++ {
select {
case <-ch:
case <-timer.C:
// Too slow. Give up.
return
}
}
}
// Certificate returns the certificate used by the server, or nil if
// the server doesn't use TLS.
func (s *Server) Certificate() *x509.Certificate {
return s.certificate
}
// Client returns an HTTP client configured for making requests to the server.
// It is configured to trust the server's TLS test certificate and will
// close its idle connections on [Server.Close].
// Use Server.URL as the base URL to send requests to the server.
// The returned client will also redirect any requests to "example.com"
// or its subdomains to the server.
func (s *Server) Client() *http.Client {
return s.client
}
func (s *Server) goServe() {
s.wg.Add(1)
go func() {
defer s.wg.Done()
s.Config.Serve(s.Listener)
}()
}
// wrap installs the connection state-tracking hook to know which
// connections are idle.
func (s *Server) wrap() {
oldHook := s.Config.ConnState
s.Config.ConnState = func(c net.Conn, cs http.ConnState) {
s.mu.Lock()
defer s.mu.Unlock()
switch cs {
case http.StateNew:
if _, exists := s.conns[c]; exists {
panic("invalid state transition")
}
if s.conns == nil {
s.conns = make(map[net.Conn]http.ConnState)
}
// Add c to the set of tracked conns and increment it to the
// waitgroup.
s.wg.Add(1)
s.conns[c] = cs
if s.closed {
// Probably just a socket-late-binding dial from
// the default transport that lost the race (and
// thus this connection is now idle and will
// never be used).
s.closeConn(c)
}
case http.StateActive:
if oldState, ok := s.conns[c]; ok {
if oldState != http.StateNew && oldState != http.StateIdle {
panic("invalid state transition")
}
s.conns[c] = cs
}
case http.StateIdle:
if oldState, ok := s.conns[c]; ok {
if oldState != http.StateActive {
panic("invalid state transition")
}
s.conns[c] = cs
}
if s.closed {
s.closeConn(c)
}
case http.StateHijacked, http.StateClosed:
// Remove c from the set of tracked conns and decrement it from the
// waitgroup, unless it was previously removed.
if _, ok := s.conns[c]; ok {
delete(s.conns, c)
// Keep Close from returning until the user's ConnState hook
// (if any) finishes.
defer s.wg.Done()
}
}
if oldHook != nil {
oldHook(c, cs)
}
}
}
// closeConn closes c.
// s.mu must be held.
func (s *Server) closeConn(c net.Conn) { s.closeConnChan(c, nil) }
// closeConnChan is like closeConn, but takes an optional channel to receive a value
// when the goroutine closing c is done.
func (s *Server) closeConnChan(c net.Conn, done chan<- struct{}) {
c.Close()
if done != nil {
done <- struct{}{}
}
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pprof serves via its HTTP server runtime profiling data
// in the format expected by the pprof visualization tool.
//
// The package is typically only imported for the side effect of
// registering its HTTP handlers.
// The handled paths all begin with /debug/pprof/.
// As of Go 1.22, all the paths must be requested with GET.
//
// To use pprof, link this package into your program:
//
// import _ "net/http/pprof"
//
// If your application is not already running an http server, you
// need to start one. Add "net/http" and "log" to your imports and
// the following code to your main function:
//
// go func() {
// log.Println(http.ListenAndServe("localhost:6060", nil))
// }()
//
// By default, all the profiles listed in [runtime/pprof.Profile] are
// available (via [Handler]), in addition to the [Cmdline], [Profile], [Symbol],
// and [Trace] profiles defined in this package.
// If you are not using DefaultServeMux, you will have to register handlers
// with the mux you are using.
//
// # Parameters
//
// Parameters can be passed via GET query params:
//
// - debug=N (all profiles): response format: N = 0: binary (default), N > 0: plaintext
// - gc=N (heap profile): N > 0: run a garbage collection cycle before profiling
// - seconds=N (allocs, block, goroutine, heap, mutex, threadcreate profiles): return a delta profile
// - seconds=N (cpu (profile), trace profiles): profile for the given duration
//
// # Usage examples
//
// Use the pprof tool to look at the heap profile:
//
// go tool pprof http://localhost:6060/debug/pprof/heap
//
// Or to look at a 30-second CPU profile:
//
// go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30
//
// Or to look at the goroutine blocking profile, after calling
// [runtime.SetBlockProfileRate] in your program:
//
// go tool pprof http://localhost:6060/debug/pprof/block
//
// Or to look at the holders of contended mutexes, after calling
// [runtime.SetMutexProfileFraction] in your program:
//
// go tool pprof http://localhost:6060/debug/pprof/mutex
//
// The package also exports a handler that serves execution trace data
// for the "go tool trace" command. To collect a 5-second execution trace:
//
// curl -o trace.out http://localhost:6060/debug/pprof/trace?seconds=5
// go tool trace trace.out
//
// To view all available profiles, open http://localhost:6060/debug/pprof/
// in your browser.
//
// For a study of the facility in action, visit
// https://go.dev/blog/pprof.
package pprof
import (
"bufio"
"bytes"
"context"
"fmt"
"html"
"internal/godebug"
"internal/goexperiment"
"internal/profile"
"io"
"log"
"net/http"
"net/url"
"os"
"runtime"
"runtime/pprof"
"runtime/trace"
"slices"
"strconv"
"strings"
"time"
)
func init() {
prefix := ""
if godebug.New("httpmuxgo121").Value() != "1" {
prefix = "GET "
}
http.HandleFunc(prefix+"/debug/pprof/", Index)
http.HandleFunc(prefix+"/debug/pprof/cmdline", Cmdline)
http.HandleFunc(prefix+"/debug/pprof/profile", Profile)
http.HandleFunc(prefix+"/debug/pprof/symbol", Symbol)
http.HandleFunc(prefix+"/debug/pprof/trace", Trace)
}
// Cmdline responds with the running program's
// command line, with arguments separated by NUL bytes.
// The package initialization registers it as /debug/pprof/cmdline.
func Cmdline(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprint(w, strings.Join(os.Args, "\x00"))
}
func sleep(r *http.Request, d time.Duration) {
select {
case <-time.After(d):
case <-r.Context().Done():
}
}
func configureWriteDeadline(w http.ResponseWriter, r *http.Request, seconds float64) {
srv, ok := r.Context().Value(http.ServerContextKey).(*http.Server)
if ok && srv.WriteTimeout > 0 {
timeout := srv.WriteTimeout + time.Duration(seconds*float64(time.Second))
rc := http.NewResponseController(w)
rc.SetWriteDeadline(time.Now().Add(timeout))
}
}
func serveError(w http.ResponseWriter, status int, txt string) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Go-Pprof", "1")
w.Header().Del("Content-Disposition")
w.WriteHeader(status)
fmt.Fprintln(w, txt)
}
// Profile responds with the pprof-formatted cpu profile.
// Profiling lasts for duration specified in seconds GET parameter, or for 30 seconds if not specified.
// The package initialization registers it as /debug/pprof/profile.
func Profile(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
sec, err := strconv.ParseInt(r.FormValue("seconds"), 10, 64)
if sec <= 0 || err != nil {
sec = 30
}
configureWriteDeadline(w, r, float64(sec))
// Set Content Type assuming StartCPUProfile will work,
// because if it does it starts writing.
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", `attachment; filename="profile"`)
if err := pprof.StartCPUProfile(w); err != nil {
// StartCPUProfile failed, so no writes yet.
serveError(w, http.StatusInternalServerError,
fmt.Sprintf("Could not enable CPU profiling: %s", err))
return
}
sleep(r, time.Duration(sec)*time.Second)
pprof.StopCPUProfile()
}
// Trace responds with the execution trace in binary form.
// Tracing lasts for duration specified in seconds GET parameter, or for 1 second if not specified.
// The package initialization registers it as /debug/pprof/trace.
func Trace(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
sec, err := strconv.ParseFloat(r.FormValue("seconds"), 64)
if sec <= 0 || err != nil {
sec = 1
}
configureWriteDeadline(w, r, sec)
// Set Content Type assuming trace.Start will work,
// because if it does it starts writing.
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", `attachment; filename="trace"`)
if err := trace.Start(w); err != nil {
// trace.Start failed, so no writes yet.
serveError(w, http.StatusInternalServerError,
fmt.Sprintf("Could not enable tracing: %s", err))
return
}
sleep(r, time.Duration(sec*float64(time.Second)))
trace.Stop()
}
// Symbol looks up the program counters listed in the request,
// responding with a table mapping program counters to function names.
// The package initialization registers it as /debug/pprof/symbol.
func Symbol(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
// We have to read the whole POST body before
// writing any output. Buffer the output here.
var buf bytes.Buffer
// We don't know how many symbols we have, but we
// do have symbol information. Pprof only cares whether
// this number is 0 (no symbols available) or > 0.
fmt.Fprintf(&buf, "num_symbols: 1\n")
var b *bufio.Reader
if r.Method == "POST" {
b = bufio.NewReader(r.Body)
} else {
b = bufio.NewReader(strings.NewReader(r.URL.RawQuery))
}
for {
word, err := b.ReadSlice('+')
if err == nil {
word = word[0 : len(word)-1] // trim +
}
pc, _ := strconv.ParseUint(string(word), 0, 64)
if pc != 0 {
f := runtime.FuncForPC(uintptr(pc))
if f != nil {
fmt.Fprintf(&buf, "%#x %s\n", pc, f.Name())
}
}
// Wait until here to check for err; the last
// symbol will have an err because it doesn't end in +.
if err != nil {
if err != io.EOF {
fmt.Fprintf(&buf, "reading request: %v\n", err)
}
break
}
}
w.Write(buf.Bytes())
}
// Handler returns an HTTP handler that serves the named profile.
// Available profiles can be found in [runtime/pprof.Profile].
func Handler(name string) http.Handler {
return handler(name)
}
type handler string
func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Content-Type-Options", "nosniff")
p := pprof.Lookup(string(name))
if p == nil {
serveError(w, http.StatusNotFound, "Unknown profile")
return
}
if sec := r.FormValue("seconds"); sec != "" {
name.serveDeltaProfile(w, r, p, sec)
return
}
gc, _ := strconv.Atoi(r.FormValue("gc"))
if name == "heap" && gc > 0 {
runtime.GC()
}
debug, _ := strconv.Atoi(r.FormValue("debug"))
if debug != 0 {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
} else {
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, name))
}
p.WriteTo(w, debug)
}
func (name handler) serveDeltaProfile(w http.ResponseWriter, r *http.Request, p *pprof.Profile, secStr string) {
sec, err := strconv.ParseInt(secStr, 10, 64)
if err != nil || sec <= 0 {
serveError(w, http.StatusBadRequest, `invalid value for "seconds" - must be a positive integer`)
return
}
// 'name' should be a key in profileSupportsDelta.
if !profileSupportsDelta[name] {
serveError(w, http.StatusBadRequest, `"seconds" parameter is not supported for this profile type`)
return
}
configureWriteDeadline(w, r, float64(sec))
debug, _ := strconv.Atoi(r.FormValue("debug"))
if debug != 0 {
serveError(w, http.StatusBadRequest, "seconds and debug params are incompatible")
return
}
p0, err := collectProfile(p)
if err != nil {
serveError(w, http.StatusInternalServerError, "failed to collect profile")
return
}
t := time.NewTimer(time.Duration(sec) * time.Second)
defer t.Stop()
select {
case <-r.Context().Done():
err := r.Context().Err()
if err == context.DeadlineExceeded {
serveError(w, http.StatusRequestTimeout, err.Error())
} else { // TODO: what's a good status code for canceled requests? 400?
serveError(w, http.StatusInternalServerError, err.Error())
}
return
case <-t.C:
}
p1, err := collectProfile(p)
if err != nil {
serveError(w, http.StatusInternalServerError, "failed to collect profile")
return
}
ts := p1.TimeNanos
dur := p1.TimeNanos - p0.TimeNanos
p0.Scale(-1)
p1, err = profile.Merge([]*profile.Profile{p0, p1})
if err != nil {
serveError(w, http.StatusInternalServerError, "failed to compute delta")
return
}
p1.TimeNanos = ts // set since we don't know what profile.Merge set for TimeNanos.
p1.DurationNanos = dur
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s-delta"`, name))
p1.Write(w)
}
func collectProfile(p *pprof.Profile) (*profile.Profile, error) {
var buf bytes.Buffer
if err := p.WriteTo(&buf, 0); err != nil {
return nil, err
}
ts := time.Now().UnixNano()
p0, err := profile.Parse(&buf)
if err != nil {
return nil, err
}
p0.TimeNanos = ts
return p0, nil
}
var profileSupportsDelta = map[handler]bool{
"allocs": true,
"block": true,
"goroutineleak": true,
"goroutine": true,
"heap": true,
"mutex": true,
"threadcreate": true,
}
var profileDescriptions = map[string]string{
"allocs": "A sampling of all past memory allocations",
"block": "Stack traces that led to blocking on synchronization primitives",
"cmdline": "The command line invocation of the current program",
"goroutine": "Stack traces of all current goroutines. Use debug=2 as a query parameter to export in the same format as an unrecovered panic.",
"heap": "A sampling of memory allocations of live objects. You can specify the gc GET parameter to run GC before taking the heap sample.",
"mutex": "Stack traces of holders of contended mutexes",
"profile": "CPU profile. You can specify the duration in the seconds GET parameter. After you get the profile file, use the go tool pprof command to investigate the profile.",
"symbol": "Maps given program counters to function names. Counters can be specified in a GET raw query or POST body, multiple counters are separated by '+'.",
"threadcreate": "Stack traces that led to the creation of new OS threads",
"trace": "A trace of execution of the current program. You can specify the duration in the seconds GET parameter. After you get the trace file, use the go tool trace command to investigate the trace.",
}
func init() {
if goexperiment.GoroutineLeakProfile {
profileDescriptions["goroutineleak"] = "Stack traces of all leaked goroutines. Use debug=2 as a query parameter to export in the same format as an unrecovered panic."
}
}
type profileEntry struct {
Name string
Href string
Desc string
Count int
}
// Index responds with the pprof-formatted profile named by the request.
// For example, "/debug/pprof/heap" serves the "heap" profile.
// Index responds to a request for "/debug/pprof/" with an HTML page
// listing the available profiles.
func Index(w http.ResponseWriter, r *http.Request) {
if name, found := strings.CutPrefix(r.URL.Path, "/debug/pprof/"); found {
if name != "" {
handler(name).ServeHTTP(w, r)
return
}
}
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("Content-Type", "text/html; charset=utf-8")
var profiles []profileEntry
for _, p := range pprof.Profiles() {
profiles = append(profiles, profileEntry{
Name: p.Name(),
Href: p.Name(),
Desc: profileDescriptions[p.Name()],
Count: p.Count(),
})
}
// Adding other profiles exposed from within this package
for _, p := range []string{"cmdline", "profile", "symbol", "trace"} {
profiles = append(profiles, profileEntry{
Name: p,
Href: p,
Desc: profileDescriptions[p],
})
}
slices.SortFunc(profiles, func(a, b profileEntry) int {
return strings.Compare(a.Name, b.Name)
})
if err := indexTmplExecute(w, profiles); err != nil {
log.Print(err)
}
}
func indexTmplExecute(w io.Writer, profiles []profileEntry) error {
var b bytes.Buffer
b.WriteString(`<html>
<head>
<title>/debug/pprof/</title>
<style>
.profile-name{
display:inline-block;
width:6rem;
}
</style>
</head>
<body>
/debug/pprof/
<br>
<p>Set debug=1 as a query parameter to export in legacy text format</p>
<br>
Types of profiles available:
<table>
<thead><td>Count</td><td>Profile</td></thead>
`)
for _, profile := range profiles {
link := &url.URL{Path: profile.Href, RawQuery: "debug=1"}
fmt.Fprintf(&b, "<tr><td>%d</td><td><a href='%s'>%s</a></td></tr>\n", profile.Count, link, html.EscapeString(profile.Name))
}
b.WriteString(`</table>
<a href="goroutine?debug=2">full goroutine stack dump</a>
<br>
<p>
Profile Descriptions:
<ul>
`)
for _, profile := range profiles {
fmt.Fprintf(&b, "<li><div class=profile-name>%s: </div> %s</li>\n", html.EscapeString(profile.Name), html.EscapeString(profile.Desc))
}
b.WriteString(`</ul>
</p>
</body>
</html>`)
_, err := w.Write(b.Bytes())
return err
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package mail implements parsing of mail messages.
For the most part, this package follows the syntax as specified by RFC 5322 and
extended by RFC 6532.
Notable divergences:
- Obsolete address formats are not parsed, including addresses with
embedded route information.
- The full range of spacing (the CFWS syntax element) is not supported,
such as breaking addresses across lines.
- No unicode normalization is performed.
- A leading From line is permitted, as in mbox format (RFC 4155).
*/
package mail
import (
"bufio"
"errors"
"fmt"
"io"
"log"
"mime"
"net"
"net/textproto"
"strings"
"sync"
"time"
"unicode/utf8"
)
var debug = debugT(false)
type debugT bool
func (d debugT) Printf(format string, args ...any) {
if d {
log.Printf(format, args...)
}
}
// A Message represents a parsed mail message.
type Message struct {
Header Header
Body io.Reader
}
// ReadMessage reads a message from r.
// The headers are parsed, and the body of the message will be available
// for reading from msg.Body.
func ReadMessage(r io.Reader) (msg *Message, err error) {
tp := textproto.NewReader(bufio.NewReader(r))
hdr, err := readHeader(tp)
if err != nil && (err != io.EOF || len(hdr) == 0) {
return nil, err
}
return &Message{
Header: Header(hdr),
Body: tp.R,
}, nil
}
// readHeader reads the message headers from r.
// This is like textproto.ReadMIMEHeader, but doesn't validate.
// The fix for issue #53188 tightened up net/textproto to enforce
// restrictions of RFC 7230.
// This package implements RFC 5322, which does not have those restrictions.
// This function copies the relevant code from net/textproto,
// simplified for RFC 5322.
func readHeader(r *textproto.Reader) (map[string][]string, error) {
m := make(map[string][]string)
// The first line cannot start with a leading space.
if buf, err := r.R.Peek(1); err == nil && (buf[0] == ' ' || buf[0] == '\t') {
line, err := r.ReadLine()
if err != nil {
return m, err
}
return m, errors.New("malformed initial line: " + line)
}
for {
kv, err := r.ReadContinuedLine()
if kv == "" {
return m, err
}
// Key ends at first colon.
k, v, ok := strings.Cut(kv, ":")
if !ok {
return m, errors.New("malformed header line: " + kv)
}
key := textproto.CanonicalMIMEHeaderKey(k)
// Permit empty key, because that is what we did in the past.
if key == "" {
continue
}
// Skip initial spaces in value.
value := strings.TrimLeft(v, " \t")
m[key] = append(m[key], value)
if err != nil {
return m, err
}
}
}
// Layouts suitable for passing to time.Parse.
// These are tried in order.
var dateLayouts = sync.OnceValue(func() []string {
// Generate layouts based on RFC 5322, section 3.3.
dows := [...]string{"", "Mon, "} // day-of-week
days := [...]string{"2", "02"} // day = 1*2DIGIT
years := [...]string{"2006", "06"} // year = 4*DIGIT / 2*DIGIT
seconds := [...]string{":05", ""} // second
// "-0700 (MST)" is not in RFC 5322, but is common.
zones := [...]string{"-0700", "MST", "UT"} // zone = (("+" / "-") 4DIGIT) / "UT" / "GMT" / ...
total := len(dows) * len(days) * len(years) * len(seconds) * len(zones)
layouts := make([]string, 0, total)
for _, dow := range dows {
for _, day := range days {
for _, year := range years {
for _, second := range seconds {
for _, zone := range zones {
s := dow + day + " Jan " + year + " 15:04" + second + " " + zone
layouts = append(layouts, s)
}
}
}
}
}
return layouts
})
// ParseDate parses an RFC 5322 date string.
func ParseDate(date string) (time.Time, error) {
// CR and LF must match and are tolerated anywhere in the date field.
date = strings.ReplaceAll(date, "\r\n", "")
if strings.Contains(date, "\r") {
return time.Time{}, errors.New("mail: header has a CR without LF")
}
// Re-using some addrParser methods which support obsolete text, i.e. non-printable ASCII
p := addrParser{date, nil}
p.skipSpace()
// RFC 5322: zone = (FWS ( "+" / "-" ) 4DIGIT) / obs-zone
// zone length is always 5 chars unless obsolete (obs-zone)
if ind := strings.IndexAny(p.s, "+-"); ind != -1 && len(p.s) >= ind+5 {
date = p.s[:ind+5]
p.s = p.s[ind+5:]
} else {
ind := strings.Index(p.s, "T")
if ind == 0 {
// In this case we have the following date formats:
// * Thu, 20 Nov 1997 09:55:06 MDT
// * Thu, 20 Nov 1997 09:55:06 MDT (MDT)
// * Thu, 20 Nov 1997 09:55:06 MDT (This comment)
ind = strings.Index(p.s[1:], "T")
if ind != -1 {
ind++
}
}
if ind != -1 && len(p.s) >= ind+5 {
// The last letter T of the obsolete time zone is checked when no standard time zone is found.
// If T is misplaced, the date to parse is garbage.
date = p.s[:ind+1]
p.s = p.s[ind+1:]
}
}
if !p.skipCFWS() {
return time.Time{}, errors.New("mail: misformatted parenthetical comment")
}
for _, layout := range dateLayouts() {
t, err := time.Parse(layout, date)
if err == nil {
return t, nil
}
}
return time.Time{}, errors.New("mail: header could not be parsed")
}
// A Header represents the key-value pairs in a mail message header.
type Header map[string][]string
// Get gets the first value associated with the given key.
// It is case insensitive; CanonicalMIMEHeaderKey is used
// to canonicalize the provided key.
// If there are no values associated with the key, Get returns "".
// To access multiple values of a key, or to use non-canonical keys,
// access the map directly.
func (h Header) Get(key string) string {
return textproto.MIMEHeader(h).Get(key)
}
var ErrHeaderNotPresent = errors.New("mail: header not in message")
// Date parses the Date header field.
func (h Header) Date() (time.Time, error) {
hdr := h.Get("Date")
if hdr == "" {
return time.Time{}, ErrHeaderNotPresent
}
return ParseDate(hdr)
}
// AddressList parses the named header field as a list of addresses.
func (h Header) AddressList(key string) ([]*Address, error) {
hdr := h.Get(key)
if hdr == "" {
return nil, ErrHeaderNotPresent
}
return ParseAddressList(hdr)
}
// Address represents a single mail address.
// An address such as "Barry Gibbs <bg@example.com>" is represented
// as Address{Name: "Barry Gibbs", Address: "bg@example.com"}.
type Address struct {
Name string // Proper name; may be empty.
Address string // user@domain
}
// ParseAddress parses a single RFC 5322 address, e.g. "Barry Gibbs <bg@example.com>"
func ParseAddress(address string) (*Address, error) {
return (&addrParser{s: address}).parseSingleAddress()
}
// ParseAddressList parses the given string as a list of addresses.
func ParseAddressList(list string) ([]*Address, error) {
return (&addrParser{s: list}).parseAddressList()
}
// An AddressParser is an RFC 5322 address parser.
type AddressParser struct {
// WordDecoder optionally specifies a decoder for RFC 2047 encoded-words.
WordDecoder *mime.WordDecoder
}
// Parse parses a single RFC 5322 address of the
// form "Gogh Fir <gf@example.com>" or "foo@example.com".
func (p *AddressParser) Parse(address string) (*Address, error) {
return (&addrParser{s: address, dec: p.WordDecoder}).parseSingleAddress()
}
// ParseList parses the given string as a list of comma-separated addresses
// of the form "Gogh Fir <gf@example.com>" or "foo@example.com".
func (p *AddressParser) ParseList(list string) ([]*Address, error) {
return (&addrParser{s: list, dec: p.WordDecoder}).parseAddressList()
}
// String formats the address as a valid RFC 5322 address.
// If the address's name contains non-ASCII characters
// the name will be rendered according to RFC 2047.
func (a *Address) String() string {
// Format address local@domain
at := strings.LastIndex(a.Address, "@")
var local, domain string
if at < 0 {
// This is a malformed address ("@" is required in addr-spec);
// treat the whole address as local-part.
local = a.Address
} else {
local, domain = a.Address[:at], a.Address[at+1:]
}
// Add quotes if needed
quoteLocal := false
for i, r := range local {
if isAtext(r, false) {
continue
}
if r == '.' {
// Dots are okay if they are surrounded by atext.
// We only need to check that the previous byte is
// not a dot, and this isn't the end of the string.
if i > 0 && local[i-1] != '.' && i < len(local)-1 {
continue
}
}
quoteLocal = true
break
}
if quoteLocal {
local = quoteString(local)
}
s := "<" + local + "@" + domain + ">"
if a.Name == "" {
return s
}
// If every character is printable ASCII, quoting is simple.
allPrintable := true
for _, r := range a.Name {
// isWSP here should actually be isFWS,
// but we don't support folding yet.
if !isVchar(r) && !isWSP(r) || isMultibyte(r) {
allPrintable = false
break
}
}
if allPrintable {
return quoteString(a.Name) + " " + s
}
// Text in an encoded-word in a display-name must not contain certain
// characters like quotes or parentheses (see RFC 2047 section 5.3).
// When this is the case encode the name using base64 encoding.
if strings.ContainsAny(a.Name, "\"#$%&'(),.:;<>@[]^`{|}~") {
return mime.BEncoding.Encode("utf-8", a.Name) + " " + s
}
return mime.QEncoding.Encode("utf-8", a.Name) + " " + s
}
type addrParser struct {
s string
dec *mime.WordDecoder // may be nil
}
func (p *addrParser) parseAddressList() ([]*Address, error) {
var list []*Address
for {
p.skipSpace()
// allow skipping empty entries (RFC5322 obs-addr-list)
if p.consume(',') {
continue
}
addrs, err := p.parseAddress(true)
if err != nil {
return nil, err
}
list = append(list, addrs...)
if !p.skipCFWS() {
return nil, errors.New("mail: misformatted parenthetical comment")
}
if p.empty() {
break
}
if p.peek() != ',' {
return nil, errors.New("mail: expected comma")
}
// Skip empty entries for obs-addr-list.
for p.consume(',') {
p.skipSpace()
}
if p.empty() {
break
}
}
return list, nil
}
func (p *addrParser) parseSingleAddress() (*Address, error) {
addrs, err := p.parseAddress(true)
if err != nil {
return nil, err
}
if !p.skipCFWS() {
return nil, errors.New("mail: misformatted parenthetical comment")
}
if !p.empty() {
return nil, fmt.Errorf("mail: expected single address, got %q", p.s)
}
if len(addrs) == 0 {
return nil, errors.New("mail: empty group")
}
if len(addrs) > 1 {
return nil, errors.New("mail: group with multiple addresses")
}
return addrs[0], nil
}
// parseAddress parses a single RFC 5322 address at the start of p.
func (p *addrParser) parseAddress(handleGroup bool) ([]*Address, error) {
debug.Printf("parseAddress: %q", p.s)
p.skipSpace()
if p.empty() {
return nil, errors.New("mail: no address")
}
// address = mailbox / group
// mailbox = name-addr / addr-spec
// group = display-name ":" [group-list] ";" [CFWS]
// addr-spec has a more restricted grammar than name-addr,
// so try parsing it first, and fallback to name-addr.
// TODO(dsymonds): Is this really correct?
spec, err := p.consumeAddrSpec()
if err == nil {
var displayName string
p.skipSpace()
if !p.empty() && p.peek() == '(' {
displayName, err = p.consumeDisplayNameComment()
if err != nil {
return nil, err
}
}
return []*Address{{
Name: displayName,
Address: spec,
}}, err
}
debug.Printf("parseAddress: not an addr-spec: %v", err)
debug.Printf("parseAddress: state is now %q", p.s)
// display-name
var displayName string
if p.peek() != '<' {
displayName, err = p.consumePhrase()
if err != nil {
return nil, err
}
}
debug.Printf("parseAddress: displayName=%q", displayName)
p.skipSpace()
if handleGroup {
if p.consume(':') {
return p.consumeGroupList()
}
}
// angle-addr = "<" addr-spec ">"
if !p.consume('<') {
atext := true
for _, r := range displayName {
if !isAtext(r, true) {
atext = false
break
}
}
if atext {
// The input is like "foo.bar"; it's possible the input
// meant to be "foo.bar@domain", or "foo.bar <...>".
return nil, errors.New("mail: missing '@' or angle-addr")
}
// The input is like "Full Name", which couldn't possibly be a
// valid email address if followed by "@domain"; the input
// likely meant to be "Full Name <...>".
return nil, errors.New("mail: no angle-addr")
}
spec, err = p.consumeAddrSpec()
if err != nil {
return nil, err
}
if !p.consume('>') {
return nil, errors.New("mail: unclosed angle-addr")
}
debug.Printf("parseAddress: spec=%q", spec)
return []*Address{{
Name: displayName,
Address: spec,
}}, nil
}
func (p *addrParser) consumeGroupList() ([]*Address, error) {
var group []*Address
// handle empty group.
p.skipSpace()
if p.consume(';') {
if !p.skipCFWS() {
return nil, errors.New("mail: misformatted parenthetical comment")
}
return group, nil
}
for {
p.skipSpace()
// embedded groups not allowed.
addrs, err := p.parseAddress(false)
if err != nil {
return nil, err
}
group = append(group, addrs...)
if !p.skipCFWS() {
return nil, errors.New("mail: misformatted parenthetical comment")
}
if p.consume(';') {
if !p.skipCFWS() {
return nil, errors.New("mail: misformatted parenthetical comment")
}
break
}
if !p.consume(',') {
return nil, errors.New("mail: expected comma")
}
}
return group, nil
}
// consumeAddrSpec parses a single RFC 5322 addr-spec at the start of p.
func (p *addrParser) consumeAddrSpec() (spec string, err error) {
debug.Printf("consumeAddrSpec: %q", p.s)
orig := *p
defer func() {
if err != nil {
*p = orig
}
}()
// local-part = dot-atom / quoted-string
var localPart string
p.skipSpace()
if p.empty() {
return "", errors.New("mail: no addr-spec")
}
if p.peek() == '"' {
// quoted-string
debug.Printf("consumeAddrSpec: parsing quoted-string")
localPart, err = p.consumeQuotedString()
if localPart == "" {
err = errors.New("mail: empty quoted string in addr-spec")
}
} else {
// dot-atom
debug.Printf("consumeAddrSpec: parsing dot-atom")
localPart, err = p.consumeAtom(true, false)
}
if err != nil {
debug.Printf("consumeAddrSpec: failed: %v", err)
return "", err
}
if !p.consume('@') {
return "", errors.New("mail: missing @ in addr-spec")
}
// domain = dot-atom / domain-literal
var domain string
p.skipSpace()
if p.empty() {
return "", errors.New("mail: no domain in addr-spec")
}
if p.peek() == '[' {
// domain-literal
domain, err = p.consumeDomainLiteral()
if err != nil {
return "", err
}
} else {
// dot-atom
domain, err = p.consumeAtom(true, false)
if err != nil {
return "", err
}
}
return localPart + "@" + domain, nil
}
// consumePhrase parses the RFC 5322 phrase at the start of p.
func (p *addrParser) consumePhrase() (phrase string, err error) {
debug.Printf("consumePhrase: [%s]", p.s)
// phrase = 1*word
var words []string
var isPrevEncoded bool
for {
// obs-phrase allows CFWS after one word
if len(words) > 0 {
if !p.skipCFWS() {
return "", errors.New("mail: misformatted parenthetical comment")
}
}
// word = atom / quoted-string
var word string
p.skipSpace()
if p.empty() {
break
}
isEncoded := false
if p.peek() == '"' {
// quoted-string
word, err = p.consumeQuotedString()
} else {
// atom
// We actually parse dot-atom here to be more permissive
// than what RFC 5322 specifies.
word, err = p.consumeAtom(true, true)
if err == nil {
word, isEncoded, err = p.decodeRFC2047Word(word)
}
}
if err != nil {
break
}
debug.Printf("consumePhrase: consumed %q", word)
if isPrevEncoded && isEncoded {
words[len(words)-1] += word
} else {
words = append(words, word)
}
isPrevEncoded = isEncoded
}
// Ignore any error if we got at least one word.
if err != nil && len(words) == 0 {
debug.Printf("consumePhrase: hit err: %v", err)
return "", fmt.Errorf("mail: missing word in phrase: %v", err)
}
phrase = strings.Join(words, " ")
return phrase, nil
}
// consumeQuotedString parses the quoted string at the start of p.
func (p *addrParser) consumeQuotedString() (qs string, err error) {
// Assume first byte is '"'.
i := 1
qsb := make([]rune, 0, 10)
escaped := false
Loop:
for {
r, size := utf8.DecodeRuneInString(p.s[i:])
switch {
case size == 0:
return "", errors.New("mail: unclosed quoted-string")
case size == 1 && r == utf8.RuneError:
return "", fmt.Errorf("mail: invalid utf-8 in quoted-string: %q", p.s)
case escaped:
// quoted-pair = ("\" (VCHAR / WSP))
if !isVchar(r) && !isWSP(r) {
return "", fmt.Errorf("mail: bad character in quoted-string: %q", r)
}
qsb = append(qsb, r)
escaped = false
case isQtext(r) || isWSP(r):
// qtext (printable US-ASCII excluding " and \), or
// FWS (almost; we're ignoring CRLF)
qsb = append(qsb, r)
case r == '"':
break Loop
case r == '\\':
escaped = true
default:
return "", fmt.Errorf("mail: bad character in quoted-string: %q", r)
}
i += size
}
p.s = p.s[i+1:]
return string(qsb), nil
}
// consumeAtom parses an RFC 5322 atom at the start of p.
// If dot is true, consumeAtom parses an RFC 5322 dot-atom instead.
// If permissive is true, consumeAtom will not fail on:
// - leading/trailing/double dots in the atom (see golang.org/issue/4938)
func (p *addrParser) consumeAtom(dot bool, permissive bool) (atom string, err error) {
i := 0
Loop:
for {
r, size := utf8.DecodeRuneInString(p.s[i:])
switch {
case size == 1 && r == utf8.RuneError:
return "", fmt.Errorf("mail: invalid utf-8 in address: %q", p.s)
case size == 0 || !isAtext(r, dot):
break Loop
default:
i += size
}
}
if i == 0 {
return "", errors.New("mail: invalid string")
}
atom, p.s = p.s[:i], p.s[i:]
if !permissive {
if strings.HasPrefix(atom, ".") {
return "", errors.New("mail: leading dot in atom")
}
if strings.Contains(atom, "..") {
return "", errors.New("mail: double dot in atom")
}
if strings.HasSuffix(atom, ".") {
return "", errors.New("mail: trailing dot in atom")
}
}
return atom, nil
}
// consumeDomainLiteral parses an RFC 5322 domain-literal at the start of p.
func (p *addrParser) consumeDomainLiteral() (string, error) {
// Skip the leading [
if !p.consume('[') {
return "", errors.New(`mail: missing "[" in domain-literal`)
}
// Parse the dtext
dtext := p.s
dtextLen := 0
for {
if p.empty() {
return "", errors.New("mail: unclosed domain-literal")
}
if p.peek() == ']' {
break
}
r, size := utf8.DecodeRuneInString(p.s)
if size == 1 && r == utf8.RuneError {
return "", fmt.Errorf("mail: invalid utf-8 in domain-literal: %q", p.s)
}
if !isDtext(r) {
return "", fmt.Errorf("mail: bad character in domain-literal: %q", r)
}
dtextLen += size
p.s = p.s[size:]
}
dtext = dtext[:dtextLen]
// Skip the trailing ]
if !p.consume(']') {
return "", errors.New("mail: unclosed domain-literal")
}
// Check if the domain literal is an IP address
if net.ParseIP(dtext) == nil {
return "", fmt.Errorf("mail: invalid IP address in domain-literal: %q", dtext)
}
return "[" + dtext + "]", nil
}
func (p *addrParser) consumeDisplayNameComment() (string, error) {
if !p.consume('(') {
return "", errors.New("mail: comment does not start with (")
}
comment, ok := p.consumeComment()
if !ok {
return "", errors.New("mail: misformatted parenthetical comment")
}
// TODO(stapelberg): parse quoted-string within comment
words := strings.FieldsFunc(comment, func(r rune) bool { return r == ' ' || r == '\t' })
for idx, word := range words {
decoded, isEncoded, err := p.decodeRFC2047Word(word)
if err != nil {
return "", err
}
if isEncoded {
words[idx] = decoded
}
}
return strings.Join(words, " "), nil
}
func (p *addrParser) consume(c byte) bool {
if p.empty() || p.peek() != c {
return false
}
p.s = p.s[1:]
return true
}
// skipSpace skips the leading space and tab characters.
func (p *addrParser) skipSpace() {
p.s = strings.TrimLeft(p.s, " \t")
}
func (p *addrParser) peek() byte {
return p.s[0]
}
func (p *addrParser) empty() bool {
return p.len() == 0
}
func (p *addrParser) len() int {
return len(p.s)
}
// skipCFWS skips CFWS as defined in RFC5322.
func (p *addrParser) skipCFWS() bool {
p.skipSpace()
for {
if !p.consume('(') {
break
}
if _, ok := p.consumeComment(); !ok {
return false
}
p.skipSpace()
}
return true
}
func (p *addrParser) consumeComment() (string, bool) {
// '(' already consumed.
depth := 1
var comment string
for {
if p.empty() || depth == 0 {
break
}
if p.peek() == '\\' && p.len() > 1 {
p.s = p.s[1:]
} else if p.peek() == '(' {
depth++
} else if p.peek() == ')' {
depth--
}
if depth > 0 {
comment += p.s[:1]
}
p.s = p.s[1:]
}
return comment, depth == 0
}
func (p *addrParser) decodeRFC2047Word(s string) (word string, isEncoded bool, err error) {
dec := p.dec
if dec == nil {
dec = &rfc2047Decoder
}
// Substitute our own CharsetReader function so that we can tell
// whether an error from the Decode method was due to the
// CharsetReader (meaning the charset is invalid).
// We used to look for the charsetError type in the error result,
// but that behaves badly with CharsetReaders other than the
// one in rfc2047Decoder.
adec := *dec
charsetReaderError := false
adec.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {
if dec.CharsetReader == nil {
charsetReaderError = true
return nil, charsetError(charset)
}
r, err := dec.CharsetReader(charset, input)
if err != nil {
charsetReaderError = true
}
return r, err
}
word, err = adec.Decode(s)
if err == nil {
return word, true, nil
}
// If the error came from the character set reader
// (meaning the character set itself is invalid
// but the decoding worked fine until then),
// return the original text and the error,
// with isEncoded=true.
if charsetReaderError {
return s, true, err
}
// Ignore invalid RFC 2047 encoded-word errors.
return s, false, nil
}
var rfc2047Decoder = mime.WordDecoder{
CharsetReader: func(charset string, input io.Reader) (io.Reader, error) {
return nil, charsetError(charset)
},
}
type charsetError string
func (e charsetError) Error() string {
return fmt.Sprintf("charset not supported: %q", string(e))
}
// isAtext reports whether r is an RFC 5322 atext character.
// If dot is true, period is included.
func isAtext(r rune, dot bool) bool {
switch r {
case '.':
return dot
// RFC 5322 3.2.3. specials
case '(', ')', '<', '>', '[', ']', ':', ';', '@', '\\', ',', '"': // RFC 5322 3.2.3. specials
return false
}
return isVchar(r)
}
// isQtext reports whether r is an RFC 5322 qtext character.
func isQtext(r rune) bool {
// Printable US-ASCII, excluding backslash or quote.
if r == '\\' || r == '"' {
return false
}
return isVchar(r)
}
// quoteString renders a string as an RFC 5322 quoted-string.
func quoteString(s string) string {
var b strings.Builder
b.WriteByte('"')
for _, r := range s {
if isQtext(r) || isWSP(r) {
b.WriteRune(r)
} else if isVchar(r) {
b.WriteByte('\\')
b.WriteRune(r)
}
}
b.WriteByte('"')
return b.String()
}
// isVchar reports whether r is an RFC 5322 VCHAR character.
func isVchar(r rune) bool {
// Visible (printing) characters.
return '!' <= r && r <= '~' || isMultibyte(r)
}
// isMultibyte reports whether r is a multi-byte UTF-8 character
// as supported by RFC 6532.
func isMultibyte(r rune) bool {
return r >= utf8.RuneSelf
}
// isWSP reports whether r is a WSP (white space).
// WSP is a space or horizontal tab (RFC 5234 Appendix B).
func isWSP(r rune) bool {
return r == ' ' || r == '\t'
}
// isDtext reports whether r is an RFC 5322 dtext character.
func isDtext(r rune) bool {
// Printable US-ASCII, excluding "[", "]", or "\".
if r == '[' || r == ']' || r == '\\' {
return false
}
return isVchar(r)
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package jsonrpc implements a JSON-RPC 1.0 ClientCodec and ServerCodec
// for the rpc package.
// For JSON-RPC 2.0 support, see https://godoc.org/?q=json-rpc+2.0
package jsonrpc
import (
"encoding/json"
"fmt"
"io"
"net"
"net/rpc"
"sync"
)
type clientCodec struct {
dec *json.Decoder // for reading JSON values
enc *json.Encoder // for writing JSON values
c io.Closer
// temporary work space
req clientRequest
resp clientResponse
// JSON-RPC responses include the request id but not the request method.
// Package rpc expects both.
// We save the request method in pending when sending a request
// and then look it up by request ID when filling out the rpc Response.
mutex sync.Mutex // protects pending
pending map[uint64]string // map request id to method name
}
// NewClientCodec returns a new [rpc.ClientCodec] using JSON-RPC on conn.
func NewClientCodec(conn io.ReadWriteCloser) rpc.ClientCodec {
return &clientCodec{
dec: json.NewDecoder(conn),
enc: json.NewEncoder(conn),
c: conn,
pending: make(map[uint64]string),
}
}
type clientRequest struct {
Method string `json:"method"`
Params [1]any `json:"params"`
Id uint64 `json:"id"`
}
func (c *clientCodec) WriteRequest(r *rpc.Request, param any) error {
c.mutex.Lock()
c.pending[r.Seq] = r.ServiceMethod
c.mutex.Unlock()
c.req.Method = r.ServiceMethod
c.req.Params[0] = param
c.req.Id = r.Seq
return c.enc.Encode(&c.req)
}
type clientResponse struct {
Id uint64 `json:"id"`
Result *json.RawMessage `json:"result"`
Error any `json:"error"`
}
func (r *clientResponse) reset() {
r.Id = 0
r.Result = nil
r.Error = nil
}
func (c *clientCodec) ReadResponseHeader(r *rpc.Response) error {
c.resp.reset()
if err := c.dec.Decode(&c.resp); err != nil {
return err
}
c.mutex.Lock()
r.ServiceMethod = c.pending[c.resp.Id]
delete(c.pending, c.resp.Id)
c.mutex.Unlock()
r.Error = ""
r.Seq = c.resp.Id
if c.resp.Error != nil || c.resp.Result == nil {
x, ok := c.resp.Error.(string)
if !ok {
return fmt.Errorf("invalid error %v", c.resp.Error)
}
if x == "" {
x = "unspecified error"
}
r.Error = x
}
return nil
}
func (c *clientCodec) ReadResponseBody(x any) error {
if x == nil {
return nil
}
return json.Unmarshal(*c.resp.Result, x)
}
func (c *clientCodec) Close() error {
return c.c.Close()
}
// NewClient returns a new [rpc.Client] to handle requests to the
// set of services at the other end of the connection.
func NewClient(conn io.ReadWriteCloser) *rpc.Client {
return rpc.NewClientWithCodec(NewClientCodec(conn))
}
// Dial connects to a JSON-RPC server at the specified network address.
func Dial(network, address string) (*rpc.Client, error) {
conn, err := net.Dial(network, address)
if err != nil {
return nil, err
}
return NewClient(conn), nil
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jsonrpc
import (
"encoding/json"
"errors"
"io"
"net/rpc"
"sync"
)
var errMissingParams = errors.New("jsonrpc: request body missing params")
type serverCodec struct {
dec *json.Decoder // for reading JSON values
enc *json.Encoder // for writing JSON values
c io.Closer
// temporary work space
req serverRequest
// JSON-RPC clients can use arbitrary json values as request IDs.
// Package rpc expects uint64 request IDs.
// We assign uint64 sequence numbers to incoming requests
// but save the original request ID in the pending map.
// When rpc responds, we use the sequence number in
// the response to find the original request ID.
mutex sync.Mutex // protects seq, pending
seq uint64
pending map[uint64]*json.RawMessage
}
// NewServerCodec returns a new [rpc.ServerCodec] using JSON-RPC on conn.
func NewServerCodec(conn io.ReadWriteCloser) rpc.ServerCodec {
return &serverCodec{
dec: json.NewDecoder(conn),
enc: json.NewEncoder(conn),
c: conn,
pending: make(map[uint64]*json.RawMessage),
}
}
type serverRequest struct {
Method string `json:"method"`
Params *json.RawMessage `json:"params"`
Id *json.RawMessage `json:"id"`
}
func (r *serverRequest) reset() {
r.Method = ""
r.Params = nil
r.Id = nil
}
type serverResponse struct {
Id *json.RawMessage `json:"id"`
Result any `json:"result"`
Error any `json:"error"`
}
func (c *serverCodec) ReadRequestHeader(r *rpc.Request) error {
c.req.reset()
if err := c.dec.Decode(&c.req); err != nil {
return err
}
r.ServiceMethod = c.req.Method
// JSON request id can be any JSON value;
// RPC package expects uint64. Translate to
// internal uint64 and save JSON on the side.
c.mutex.Lock()
c.seq++
c.pending[c.seq] = c.req.Id
c.req.Id = nil
r.Seq = c.seq
c.mutex.Unlock()
return nil
}
func (c *serverCodec) ReadRequestBody(x any) error {
if x == nil {
return nil
}
if c.req.Params == nil {
return errMissingParams
}
// JSON params is array value.
// RPC params is struct.
// Unmarshal into array containing struct for now.
// Should think about making RPC more general.
var params [1]any
params[0] = x
return json.Unmarshal(*c.req.Params, ¶ms)
}
var null = json.RawMessage([]byte("null"))
func (c *serverCodec) WriteResponse(r *rpc.Response, x any) error {
c.mutex.Lock()
b, ok := c.pending[r.Seq]
if !ok {
c.mutex.Unlock()
return errors.New("invalid sequence number in response")
}
delete(c.pending, r.Seq)
c.mutex.Unlock()
if b == nil {
// Invalid request so no id. Use JSON null.
b = &null
}
resp := serverResponse{Id: b}
if r.Error == "" {
resp.Result = x
} else {
resp.Error = r.Error
}
return c.enc.Encode(resp)
}
func (c *serverCodec) Close() error {
return c.c.Close()
}
// ServeConn runs the JSON-RPC server on a single connection.
// ServeConn blocks, serving the connection until the client hangs up.
// The caller typically invokes ServeConn in a go statement.
func ServeConn(conn io.ReadWriteCloser) {
rpc.ServeCodec(NewServerCodec(conn))
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package smtp
import (
"crypto/hmac"
"crypto/md5"
"errors"
"fmt"
)
// Auth is implemented by an SMTP authentication mechanism.
type Auth interface {
// Start begins an authentication with a server.
// It returns the name of the authentication protocol
// and optionally data to include in the initial AUTH message
// sent to the server.
// If it returns a non-nil error, the SMTP client aborts
// the authentication attempt and closes the connection.
Start(server *ServerInfo) (proto string, toServer []byte, err error)
// Next continues the authentication. The server has just sent
// the fromServer data. If more is true, the server expects a
// response, which Next should return as toServer; otherwise
// Next should return toServer == nil.
// If Next returns a non-nil error, the SMTP client aborts
// the authentication attempt and closes the connection.
Next(fromServer []byte, more bool) (toServer []byte, err error)
}
// ServerInfo records information about an SMTP server.
type ServerInfo struct {
Name string // SMTP server name
TLS bool // using TLS, with valid certificate for Name
Auth []string // advertised authentication mechanisms
}
type plainAuth struct {
identity, username, password string
host string
}
// PlainAuth returns an [Auth] that implements the PLAIN authentication
// mechanism as defined in RFC 4616. The returned Auth uses the given
// username and password to authenticate to host and act as identity.
// Usually identity should be the empty string, to act as username.
//
// PlainAuth will only send the credentials if the connection is using TLS
// or is connected to localhost. Otherwise authentication will fail with an
// error, without sending the credentials.
func PlainAuth(identity, username, password, host string) Auth {
return &plainAuth{identity, username, password, host}
}
func isLocalhost(name string) bool {
return name == "localhost" || name == "127.0.0.1" || name == "::1"
}
func (a *plainAuth) Start(server *ServerInfo) (string, []byte, error) {
// Must have TLS, or else localhost server.
// Note: If TLS is not true, then we can't trust ANYTHING in ServerInfo.
// In particular, it doesn't matter if the server advertises PLAIN auth.
// That might just be the attacker saying
// "it's ok, you can trust me with your password."
if !server.TLS && !isLocalhost(server.Name) {
return "", nil, errors.New("unencrypted connection")
}
if server.Name != a.host {
return "", nil, errors.New("wrong host name")
}
resp := []byte(a.identity + "\x00" + a.username + "\x00" + a.password)
return "PLAIN", resp, nil
}
func (a *plainAuth) Next(fromServer []byte, more bool) ([]byte, error) {
if more {
// We've already sent everything.
return nil, errors.New("unexpected server challenge")
}
return nil, nil
}
type cramMD5Auth struct {
username, secret string
}
// CRAMMD5Auth returns an [Auth] that implements the CRAM-MD5 authentication
// mechanism as defined in RFC 2195.
// The returned Auth uses the given username and secret to authenticate
// to the server using the challenge-response mechanism.
func CRAMMD5Auth(username, secret string) Auth {
return &cramMD5Auth{username, secret}
}
func (a *cramMD5Auth) Start(server *ServerInfo) (string, []byte, error) {
return "CRAM-MD5", nil, nil
}
func (a *cramMD5Auth) Next(fromServer []byte, more bool) ([]byte, error) {
if more {
d := hmac.New(md5.New, []byte(a.secret))
d.Write(fromServer)
s := make([]byte, 0, d.Size())
return fmt.Appendf(nil, "%s %x", a.username, d.Sum(s)), nil
}
return nil, nil
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package smtp implements the Simple Mail Transfer Protocol as defined in RFC 5321.
// It also implements the following extensions:
//
// 8BITMIME RFC 1652
// AUTH RFC 2554
// STARTTLS RFC 3207
//
// Additional extensions may be handled by clients.
//
// The smtp package is frozen and is not accepting new features.
// Some external packages provide more functionality. See:
//
// https://godoc.org/?q=smtp
package smtp
import (
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io"
"net"
"net/textproto"
"strings"
)
// A Client represents a client connection to an SMTP server.
type Client struct {
// Text is the textproto.Conn used by the Client. It is exported to allow for
// clients to add extensions.
Text *textproto.Conn
// keep a reference to the connection so it can be used to create a TLS
// connection later
conn net.Conn
// whether the Client is using TLS
tls bool
serverName string
// map of supported extensions
ext map[string]string
// supported auth mechanisms
auth []string
localName string // the name to use in HELO/EHLO
didHello bool // whether we've said HELO/EHLO
helloError error // the error from the hello
}
// Dial returns a new [Client] connected to an SMTP server at addr.
// The addr must include a port, as in "mail.example.com:smtp".
func Dial(addr string) (*Client, error) {
conn, err := net.Dial("tcp", addr)
if err != nil {
return nil, err
}
host, _, _ := net.SplitHostPort(addr)
return NewClient(conn, host)
}
// NewClient returns a new [Client] using an existing connection and host as a
// server name to be used when authenticating.
func NewClient(conn net.Conn, host string) (*Client, error) {
text := textproto.NewConn(conn)
_, _, err := text.ReadResponse(220)
if err != nil {
text.Close()
return nil, err
}
c := &Client{Text: text, conn: conn, serverName: host, localName: "localhost"}
_, c.tls = conn.(*tls.Conn)
return c, nil
}
// Close closes the connection.
func (c *Client) Close() error {
return c.Text.Close()
}
// hello runs a hello exchange if needed.
func (c *Client) hello() error {
if !c.didHello {
c.didHello = true
err := c.ehlo()
if err != nil {
c.helloError = c.helo()
}
}
return c.helloError
}
// Hello sends a HELO or EHLO to the server as the given host name.
// Calling this method is only necessary if the client needs control
// over the host name used. The client will introduce itself as "localhost"
// automatically otherwise. If Hello is called, it must be called before
// any of the other methods.
func (c *Client) Hello(localName string) error {
if err := validateLine(localName); err != nil {
return err
}
if c.didHello {
return errors.New("smtp: Hello called after other methods")
}
c.localName = localName
return c.hello()
}
// cmd is a convenience function that sends a command and returns the response
func (c *Client) cmd(expectCode int, format string, args ...any) (int, string, error) {
id, err := c.Text.Cmd(format, args...)
if err != nil {
return 0, "", err
}
c.Text.StartResponse(id)
defer c.Text.EndResponse(id)
code, msg, err := c.Text.ReadResponse(expectCode)
return code, msg, err
}
// helo sends the HELO greeting to the server. It should be used only when the
// server does not support ehlo.
func (c *Client) helo() error {
c.ext = nil
_, _, err := c.cmd(250, "HELO %s", c.localName)
return err
}
// ehlo sends the EHLO (extended hello) greeting to the server. It
// should be the preferred greeting for servers that support it.
func (c *Client) ehlo() error {
_, msg, err := c.cmd(250, "EHLO %s", c.localName)
if err != nil {
return err
}
ext := make(map[string]string)
extList := strings.Split(msg, "\n")
if len(extList) > 1 {
extList = extList[1:]
for _, line := range extList {
k, v, _ := strings.Cut(line, " ")
ext[k] = v
}
}
if mechs, ok := ext["AUTH"]; ok {
c.auth = strings.Split(mechs, " ")
}
c.ext = ext
return err
}
// StartTLS sends the STARTTLS command and encrypts all further communication.
// Only servers that advertise the STARTTLS extension support this function.
func (c *Client) StartTLS(config *tls.Config) error {
if err := c.hello(); err != nil {
return err
}
_, _, err := c.cmd(220, "STARTTLS")
if err != nil {
return err
}
c.conn = tls.Client(c.conn, config)
c.Text = textproto.NewConn(c.conn)
c.tls = true
return c.ehlo()
}
// TLSConnectionState returns the client's TLS connection state.
// The return values are their zero values if [Client.StartTLS] did
// not succeed.
func (c *Client) TLSConnectionState() (state tls.ConnectionState, ok bool) {
tc, ok := c.conn.(*tls.Conn)
if !ok {
return
}
return tc.ConnectionState(), true
}
// Verify checks the validity of an email address on the server.
// If Verify returns nil, the address is valid. A non-nil return
// does not necessarily indicate an invalid address. Many servers
// will not verify addresses for security reasons.
func (c *Client) Verify(addr string) error {
if err := validateLine(addr); err != nil {
return err
}
if err := c.hello(); err != nil {
return err
}
_, _, err := c.cmd(250, "VRFY %s", addr)
return err
}
// Auth authenticates a client using the provided authentication mechanism.
// A failed authentication closes the connection.
// Only servers that advertise the AUTH extension support this function.
func (c *Client) Auth(a Auth) error {
if err := c.hello(); err != nil {
return err
}
encoding := base64.StdEncoding
mech, resp, err := a.Start(&ServerInfo{c.serverName, c.tls, c.auth})
if err != nil {
c.Quit()
return err
}
resp64 := make([]byte, encoding.EncodedLen(len(resp)))
encoding.Encode(resp64, resp)
code, msg64, err := c.cmd(0, "%s", strings.TrimSpace(fmt.Sprintf("AUTH %s %s", mech, resp64)))
for err == nil {
var msg []byte
switch code {
case 334:
msg, err = encoding.DecodeString(msg64)
case 235:
// the last message isn't base64 because it isn't a challenge
msg = []byte(msg64)
default:
err = &textproto.Error{Code: code, Msg: msg64}
}
if err == nil {
resp, err = a.Next(msg, code == 334)
}
if err != nil {
// abort the AUTH
c.cmd(501, "*")
c.Quit()
break
}
if resp == nil {
break
}
resp64 = make([]byte, encoding.EncodedLen(len(resp)))
encoding.Encode(resp64, resp)
code, msg64, err = c.cmd(0, "%s", resp64)
}
return err
}
// Mail issues a MAIL command to the server using the provided email address.
// If the server supports the 8BITMIME extension, Mail adds the BODY=8BITMIME
// parameter. If the server supports the SMTPUTF8 extension, Mail adds the
// SMTPUTF8 parameter.
// This initiates a mail transaction and is followed by one or more [Client.Rcpt] calls.
func (c *Client) Mail(from string) error {
if err := validateLine(from); err != nil {
return err
}
if err := c.hello(); err != nil {
return err
}
cmdStr := "MAIL FROM:<%s>"
if c.ext != nil {
if _, ok := c.ext["8BITMIME"]; ok {
cmdStr += " BODY=8BITMIME"
}
if _, ok := c.ext["SMTPUTF8"]; ok {
cmdStr += " SMTPUTF8"
}
}
_, _, err := c.cmd(250, cmdStr, from)
return err
}
// Rcpt issues a RCPT command to the server using the provided email address.
// A call to Rcpt must be preceded by a call to [Client.Mail] and may be followed by
// a [Client.Data] call or another Rcpt call.
func (c *Client) Rcpt(to string) error {
if err := validateLine(to); err != nil {
return err
}
_, _, err := c.cmd(25, "RCPT TO:<%s>", to)
return err
}
type dataCloser struct {
c *Client
io.WriteCloser
}
func (d *dataCloser) Close() error {
d.WriteCloser.Close()
_, _, err := d.c.Text.ReadResponse(250)
return err
}
// Data issues a DATA command to the server and returns a writer that
// can be used to write the mail headers and body. The caller should
// close the writer before calling any more methods on c. A call to
// Data must be preceded by one or more calls to [Client.Rcpt].
func (c *Client) Data() (io.WriteCloser, error) {
_, _, err := c.cmd(354, "DATA")
if err != nil {
return nil, err
}
return &dataCloser{c, c.Text.DotWriter()}, nil
}
var testHookStartTLS func(*tls.Config) // nil, except for tests
// SendMail connects to the server at addr, switches to TLS if
// possible, authenticates with the optional mechanism a if possible,
// and then sends an email from address from, to addresses to, with
// message msg.
// The addr must include a port, as in "mail.example.com:smtp".
//
// The addresses in the to parameter are the SMTP RCPT addresses.
//
// The msg parameter should be an RFC 822-style email with headers
// first, a blank line, and then the message body. The lines of msg
// should be CRLF terminated. The msg headers should usually include
// fields such as "From", "To", "Subject", and "Cc". Sending "Bcc"
// messages is accomplished by including an email address in the to
// parameter but not including it in the msg headers.
//
// The SendMail function and the net/smtp package are low-level
// mechanisms and provide no support for DKIM signing, MIME
// attachments (see the mime/multipart package), or other mail
// functionality. Higher-level packages exist outside of the standard
// library.
func SendMail(addr string, a Auth, from string, to []string, msg []byte) error {
if err := validateLine(from); err != nil {
return err
}
for _, recp := range to {
if err := validateLine(recp); err != nil {
return err
}
}
c, err := Dial(addr)
if err != nil {
return err
}
defer c.Close()
if err = c.hello(); err != nil {
return err
}
if ok, _ := c.Extension("STARTTLS"); ok {
config := &tls.Config{ServerName: c.serverName}
if testHookStartTLS != nil {
testHookStartTLS(config)
}
if err = c.StartTLS(config); err != nil {
return err
}
}
if a != nil && c.ext != nil {
if _, ok := c.ext["AUTH"]; !ok {
return errors.New("smtp: server doesn't support AUTH")
}
if err = c.Auth(a); err != nil {
return err
}
}
if err = c.Mail(from); err != nil {
return err
}
for _, addr := range to {
if err = c.Rcpt(addr); err != nil {
return err
}
}
w, err := c.Data()
if err != nil {
return err
}
_, err = w.Write(msg)
if err != nil {
return err
}
err = w.Close()
if err != nil {
return err
}
return c.Quit()
}
// Extension reports whether an extension is support by the server.
// The extension name is case-insensitive. If the extension is supported,
// Extension also returns a string that contains any parameters the
// server specifies for the extension.
func (c *Client) Extension(ext string) (bool, string) {
if err := c.hello(); err != nil {
return false, ""
}
if c.ext == nil {
return false, ""
}
ext = strings.ToUpper(ext)
param, ok := c.ext[ext]
return ok, param
}
// Reset sends the RSET command to the server, aborting the current mail
// transaction.
func (c *Client) Reset() error {
if err := c.hello(); err != nil {
return err
}
_, _, err := c.cmd(250, "RSET")
return err
}
// Noop sends the NOOP command to the server. It does nothing but check
// that the connection to the server is okay.
func (c *Client) Noop() error {
if err := c.hello(); err != nil {
return err
}
_, _, err := c.cmd(250, "NOOP")
return err
}
// Quit sends the QUIT command and closes the connection to the server.
func (c *Client) Quit() error {
c.hello() // ignore error; we're quitting anyhow
_, _, err := c.cmd(221, "QUIT")
if err != nil {
return err
}
return c.Text.Close()
}
// validateLine checks to see if a line has CR or LF as per RFC 5321.
func validateLine(line string) error {
if strings.ContainsAny(line, "\n\r") {
return errors.New("smtp: A line must not contain CR or LF")
}
return nil
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package textproto
// A MIMEHeader represents a MIME-style header mapping
// keys to sets of values.
type MIMEHeader map[string][]string
// Add adds the key, value pair to the header.
// It appends to any existing values associated with key.
func (h MIMEHeader) Add(key, value string) {
key = CanonicalMIMEHeaderKey(key)
h[key] = append(h[key], value)
}
// Set sets the header entries associated with key to
// the single element value. It replaces any existing
// values associated with key.
func (h MIMEHeader) Set(key, value string) {
h[CanonicalMIMEHeaderKey(key)] = []string{value}
}
// Get gets the first value associated with the given key.
// It is case insensitive; [CanonicalMIMEHeaderKey] is used
// to canonicalize the provided key.
// If there are no values associated with the key, Get returns "".
// To use non-canonical keys, access the map directly.
func (h MIMEHeader) Get(key string) string {
if h == nil {
return ""
}
v := h[CanonicalMIMEHeaderKey(key)]
if len(v) == 0 {
return ""
}
return v[0]
}
// Values returns all values associated with the given key.
// It is case insensitive; [CanonicalMIMEHeaderKey] is
// used to canonicalize the provided key. To use non-canonical
// keys, access the map directly.
// The returned slice is not a copy.
func (h MIMEHeader) Values(key string) []string {
if h == nil {
return nil
}
return h[CanonicalMIMEHeaderKey(key)]
}
// Del deletes the values associated with key.
func (h MIMEHeader) Del(key string) {
delete(h, CanonicalMIMEHeaderKey(key))
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package textproto
import (
"sync"
)
// A Pipeline manages a pipelined in-order request/response sequence.
//
// To use a Pipeline p to manage multiple clients on a connection,
// each client should run:
//
// id := p.Next() // take a number
//
// p.StartRequest(id) // wait for turn to send request
// «send request»
// p.EndRequest(id) // notify Pipeline that request is sent
//
// p.StartResponse(id) // wait for turn to read response
// «read response»
// p.EndResponse(id) // notify Pipeline that response is read
//
// A pipelined server can use the same calls to ensure that
// responses computed in parallel are written in the correct order.
type Pipeline struct {
mu sync.Mutex
id uint
request sequencer
response sequencer
}
// Next returns the next id for a request/response pair.
func (p *Pipeline) Next() uint {
p.mu.Lock()
id := p.id
p.id++
p.mu.Unlock()
return id
}
// StartRequest blocks until it is time to send (or, if this is a server, receive)
// the request with the given id.
func (p *Pipeline) StartRequest(id uint) {
p.request.Start(id)
}
// EndRequest notifies p that the request with the given id has been sent
// (or, if this is a server, received).
func (p *Pipeline) EndRequest(id uint) {
p.request.End(id)
}
// StartResponse blocks until it is time to receive (or, if this is a server, send)
// the request with the given id.
func (p *Pipeline) StartResponse(id uint) {
p.response.Start(id)
}
// EndResponse notifies p that the response with the given id has been received
// (or, if this is a server, sent).
func (p *Pipeline) EndResponse(id uint) {
p.response.End(id)
}
// A sequencer schedules a sequence of numbered events that must
// happen in order, one after the other. The event numbering must start
// at 0 and increment without skipping. The event number wraps around
// safely as long as there are not 2^32 simultaneous events pending.
type sequencer struct {
mu sync.Mutex
id uint
wait map[uint]chan struct{}
}
// Start waits until it is time for the event numbered id to begin.
// That is, except for the first event, it waits until End(id-1) has
// been called.
func (s *sequencer) Start(id uint) {
s.mu.Lock()
if s.id == id {
s.mu.Unlock()
return
}
c := make(chan struct{})
if s.wait == nil {
s.wait = make(map[uint]chan struct{})
}
s.wait[id] = c
s.mu.Unlock()
<-c
}
// End notifies the sequencer that the event numbered id has completed,
// allowing it to schedule the event numbered id+1. It is a run-time error
// to call End with an id that is not the number of the active event.
func (s *sequencer) End(id uint) {
s.mu.Lock()
if s.id != id {
s.mu.Unlock()
panic("out of sync")
}
id++
s.id = id
if s.wait == nil {
s.wait = make(map[uint]chan struct{})
}
c, ok := s.wait[id]
if ok {
delete(s.wait, id)
}
s.mu.Unlock()
if ok {
close(c)
}
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package textproto
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"math"
"strconv"
"strings"
"sync"
_ "unsafe" // for linkname
)
// TODO: This should be a distinguishable error (ErrMessageTooLarge)
// to allow mime/multipart to detect it.
var errMessageTooLarge = errors.New("message too large")
// A Reader implements convenience methods for reading requests
// or responses from a text protocol network connection.
type Reader struct {
R *bufio.Reader
dot *dotReader
buf []byte // a re-usable buffer for readContinuedLineSlice
}
// NewReader returns a new [Reader] reading from r.
//
// To avoid denial of service attacks, the provided [bufio.Reader]
// should be reading from an [io.LimitReader] or similar Reader to bound
// the size of responses.
func NewReader(r *bufio.Reader) *Reader {
return &Reader{R: r}
}
// ReadLine reads a single line from r,
// eliding the final \n or \r\n from the returned string.
func (r *Reader) ReadLine() (string, error) {
line, err := r.readLineSlice(-1)
return string(line), err
}
// ReadLineBytes is like [Reader.ReadLine] but returns a []byte instead of a string.
func (r *Reader) ReadLineBytes() ([]byte, error) {
line, err := r.readLineSlice(-1)
if line != nil {
line = bytes.Clone(line)
}
return line, err
}
// readLineSlice reads a single line from r,
// up to lim bytes long (or unlimited if lim is less than 0),
// eliding the final \r or \r\n from the returned string.
func (r *Reader) readLineSlice(lim int64) ([]byte, error) {
r.closeDot()
var line []byte
for {
l, more, err := r.R.ReadLine()
if err != nil {
return nil, err
}
if lim >= 0 && int64(len(line))+int64(len(l)) > lim {
return nil, errMessageTooLarge
}
// Avoid the copy if the first call produced a full line.
if line == nil && !more {
return l, nil
}
line = append(line, l...)
if !more {
break
}
}
return line, nil
}
// ReadContinuedLine reads a possibly continued line from r,
// eliding the final trailing ASCII white space.
// Lines after the first are considered continuations if they
// begin with a space or tab character. In the returned data,
// continuation lines are separated from the previous line
// only by a single space: the newline and leading white space
// are removed.
//
// For example, consider this input:
//
// Line 1
// continued...
// Line 2
//
// The first call to ReadContinuedLine will return "Line 1 continued..."
// and the second will return "Line 2".
//
// Empty lines are never continued.
func (r *Reader) ReadContinuedLine() (string, error) {
line, err := r.readContinuedLineSlice(-1, noValidation)
return string(line), err
}
// trim returns s with leading and trailing spaces and tabs removed.
// It does not assume Unicode or UTF-8.
func trim(s []byte) []byte {
i := 0
for i < len(s) && (s[i] == ' ' || s[i] == '\t') {
i++
}
s = s[i:]
n := len(s) - 1
for n >= 0 && (s[n] == ' ' || s[n] == '\t') {
n--
}
return s[:n+1]
}
// ReadContinuedLineBytes is like [Reader.ReadContinuedLine] but
// returns a []byte instead of a string.
func (r *Reader) ReadContinuedLineBytes() ([]byte, error) {
line, err := r.readContinuedLineSlice(-1, noValidation)
if line != nil {
line = bytes.Clone(line)
}
return line, err
}
// readContinuedLineSlice reads continued lines from the reader buffer,
// returning a byte slice with all lines. The validateFirstLine function
// is run on the first read line, and if it returns an error then this
// error is returned from readContinuedLineSlice.
// It reads up to lim bytes of data (or unlimited if lim is less than 0).
func (r *Reader) readContinuedLineSlice(lim int64, validateFirstLine func([]byte) error) ([]byte, error) {
if validateFirstLine == nil {
return nil, fmt.Errorf("missing validateFirstLine func")
}
// Read the first line.
line, err := r.readLineSlice(lim)
if err != nil {
return nil, err
}
if len(line) == 0 { // blank line - no continuation
return line, nil
}
if err := validateFirstLine(line); err != nil {
return nil, err
}
// Optimistically assume that we have started to buffer the next line
// and it starts with an ASCII letter (the next header key), or a blank
// line, so we can avoid copying that buffered data around in memory
// and skipping over non-existent whitespace.
if r.R.Buffered() > 1 {
peek, _ := r.R.Peek(2)
if len(peek) > 0 && (isASCIILetter(peek[0]) || peek[0] == '\n') ||
len(peek) == 2 && peek[0] == '\r' && peek[1] == '\n' {
return trim(line), nil
}
}
// ReadByte or the next readLineSlice will flush the read buffer;
// copy the slice into buf.
r.buf = append(r.buf[:0], trim(line)...)
if lim < 0 {
lim = math.MaxInt64
}
lim -= int64(len(r.buf))
// Read continuation lines.
for r.skipSpace() > 0 {
r.buf = append(r.buf, ' ')
if int64(len(r.buf)) >= lim {
return nil, errMessageTooLarge
}
line, err := r.readLineSlice(lim - int64(len(r.buf)))
if err != nil {
break
}
r.buf = append(r.buf, trim(line)...)
}
return r.buf, nil
}
// skipSpace skips R over all spaces and returns the number of bytes skipped.
func (r *Reader) skipSpace() int {
n := 0
for {
c, err := r.R.ReadByte()
if err != nil {
// Bufio will keep err until next read.
break
}
if c != ' ' && c != '\t' {
r.R.UnreadByte()
break
}
n++
}
return n
}
func (r *Reader) readCodeLine(expectCode int) (code int, continued bool, message string, err error) {
line, err := r.ReadLine()
if err != nil {
return
}
return parseCodeLine(line, expectCode)
}
func parseCodeLine(line string, expectCode int) (code int, continued bool, message string, err error) {
if len(line) < 4 || line[3] != ' ' && line[3] != '-' {
err = ProtocolError("short response: " + line)
return
}
continued = line[3] == '-'
code, err = strconv.Atoi(line[0:3])
if err != nil || code < 100 {
err = ProtocolError("invalid response code: " + line)
return
}
message = line[4:]
if 1 <= expectCode && expectCode < 10 && code/100 != expectCode ||
10 <= expectCode && expectCode < 100 && code/10 != expectCode ||
100 <= expectCode && expectCode < 1000 && code != expectCode {
err = &Error{code, message}
}
return
}
// ReadCodeLine reads a response code line of the form
//
// code message
//
// where code is a three-digit status code and the message
// extends to the rest of the line. An example of such a line is:
//
// 220 plan9.bell-labs.com ESMTP
//
// If the prefix of the status does not match the digits in expectCode,
// ReadCodeLine returns with err set to &Error{code, message}.
// For example, if expectCode is 31, an error will be returned if
// the status is not in the range [310,319].
//
// If the response is multi-line, ReadCodeLine returns an error.
//
// An expectCode <= 0 disables the check of the status code.
func (r *Reader) ReadCodeLine(expectCode int) (code int, message string, err error) {
code, continued, message, err := r.readCodeLine(expectCode)
if err == nil && continued {
err = ProtocolError("unexpected multi-line response: " + message)
}
return
}
// ReadResponse reads a multi-line response of the form:
//
// code-message line 1
// code-message line 2
// ...
// code message line n
//
// where code is a three-digit status code. The first line starts with the
// code and a hyphen. The response is terminated by a line that starts
// with the same code followed by a space. Each line in message is
// separated by a newline (\n).
//
// See page 36 of RFC 959 (https://www.ietf.org/rfc/rfc959.txt) for
// details of another form of response accepted:
//
// code-message line 1
// message line 2
// ...
// code message line n
//
// If the prefix of the status does not match the digits in expectCode,
// ReadResponse returns with err set to &Error{code, message}.
// For example, if expectCode is 31, an error will be returned if
// the status is not in the range [310,319].
//
// An expectCode <= 0 disables the check of the status code.
func (r *Reader) ReadResponse(expectCode int) (code int, message string, err error) {
code, continued, first, err := r.readCodeLine(expectCode)
multi := continued
var messageBuilder strings.Builder
messageBuilder.WriteString(first)
for continued {
line, err := r.ReadLine()
if err != nil {
return 0, "", err
}
var code2 int
var moreMessage string
code2, continued, moreMessage, err = parseCodeLine(line, 0)
if err != nil || code2 != code {
messageBuilder.WriteByte('\n')
messageBuilder.WriteString(strings.TrimRight(line, "\r\n"))
continued = true
continue
}
messageBuilder.WriteByte('\n')
messageBuilder.WriteString(moreMessage)
}
message = messageBuilder.String()
if err != nil && multi && message != "" {
// replace one line error message with all lines (full message)
err = &Error{code, message}
}
return
}
// DotReader returns a new [Reader] that satisfies Reads using the
// decoded text of a dot-encoded block read from r.
// The returned Reader is only valid until the next call
// to a method on r.
//
// Dot encoding is a common framing used for data blocks
// in text protocols such as SMTP. The data consists of a sequence
// of lines, each of which ends in "\r\n". The sequence itself
// ends at a line containing just a dot: ".\r\n". Lines beginning
// with a dot are escaped with an additional dot to avoid
// looking like the end of the sequence.
//
// The decoded form returned by the Reader's Read method
// rewrites the "\r\n" line endings into the simpler "\n",
// removes leading dot escapes if present, and stops with error [io.EOF]
// after consuming (and discarding) the end-of-sequence line.
func (r *Reader) DotReader() io.Reader {
r.closeDot()
r.dot = &dotReader{r: r}
return r.dot
}
type dotReader struct {
r *Reader
state int
}
// Read satisfies reads by decoding dot-encoded data read from d.r.
func (d *dotReader) Read(b []byte) (n int, err error) {
// Run data through a simple state machine to
// elide leading dots, rewrite trailing \r\n into \n,
// and detect ending .\r\n line.
const (
stateBeginLine = iota // beginning of line; initial state; must be zero
stateDot // read . at beginning of line
stateDotCR // read .\r at beginning of line
stateCR // read \r (possibly at end of line)
stateData // reading data in middle of line
stateEOF // reached .\r\n end marker line
)
br := d.r.R
for n < len(b) && d.state != stateEOF {
var c byte
c, err = br.ReadByte()
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
break
}
switch d.state {
case stateBeginLine:
if c == '.' {
d.state = stateDot
continue
}
if c == '\r' {
d.state = stateCR
continue
}
d.state = stateData
case stateDot:
if c == '\r' {
d.state = stateDotCR
continue
}
if c == '\n' {
d.state = stateEOF
continue
}
d.state = stateData
case stateDotCR:
if c == '\n' {
d.state = stateEOF
continue
}
// Not part of .\r\n.
// Consume leading dot and emit saved \r.
br.UnreadByte()
c = '\r'
d.state = stateData
case stateCR:
if c == '\n' {
d.state = stateBeginLine
break
}
// Not part of \r\n. Emit saved \r
br.UnreadByte()
c = '\r'
d.state = stateData
case stateData:
if c == '\r' {
d.state = stateCR
continue
}
if c == '\n' {
d.state = stateBeginLine
}
}
b[n] = c
n++
}
if err == nil && d.state == stateEOF {
err = io.EOF
}
if err != nil && d.r.dot == d {
d.r.dot = nil
}
return
}
// closeDot drains the current DotReader if any,
// making sure that it reads until the ending dot line.
func (r *Reader) closeDot() {
if r.dot == nil {
return
}
buf := make([]byte, 128)
for r.dot != nil {
// When Read reaches EOF or an error,
// it will set r.dot == nil.
r.dot.Read(buf)
}
}
// ReadDotBytes reads a dot-encoding and returns the decoded data.
//
// See the documentation for the [Reader.DotReader] method for details about dot-encoding.
func (r *Reader) ReadDotBytes() ([]byte, error) {
return io.ReadAll(r.DotReader())
}
// ReadDotLines reads a dot-encoding and returns a slice
// containing the decoded lines, with the final \r\n or \n elided from each.
//
// See the documentation for the [Reader.DotReader] method for details about dot-encoding.
func (r *Reader) ReadDotLines() ([]string, error) {
// We could use ReadDotBytes and then Split it,
// but reading a line at a time avoids needing a
// large contiguous block of memory and is simpler.
var v []string
var err error
for {
var line string
line, err = r.ReadLine()
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
break
}
// Dot by itself marks end; otherwise cut one dot.
if len(line) > 0 && line[0] == '.' {
if len(line) == 1 {
break
}
line = line[1:]
}
v = append(v, line)
}
return v, err
}
var colon = []byte(":")
// ReadMIMEHeader reads a MIME-style header from r.
// The header is a sequence of possibly continued Key: Value lines
// ending in a blank line.
// The returned map m maps [CanonicalMIMEHeaderKey](key) to a
// sequence of values in the same order encountered in the input.
//
// For example, consider this input:
//
// My-Key: Value 1
// Long-Key: Even
// Longer Value
// My-Key: Value 2
//
// Given that input, ReadMIMEHeader returns the map:
//
// map[string][]string{
// "My-Key": {"Value 1", "Value 2"},
// "Long-Key": {"Even Longer Value"},
// }
func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
return readMIMEHeader(r, math.MaxInt64, math.MaxInt64)
}
// readMIMEHeader is accessed from mime/multipart.
//go:linkname readMIMEHeader
// readMIMEHeader is a version of ReadMIMEHeader which takes a limit on the header size.
// It is called by the mime/multipart package.
func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error) {
// Avoid lots of small slice allocations later by allocating one
// large one ahead of time which we'll cut up into smaller
// slices. If this isn't big enough later, we allocate small ones.
var strs []string
hint := r.upcomingHeaderKeys()
if hint > 0 {
if hint > 1000 {
hint = 1000 // set a cap to avoid overallocation
}
strs = make([]string, hint)
}
m := make(MIMEHeader, hint)
// Account for 400 bytes of overhead for the MIMEHeader, plus 200 bytes per entry.
// Benchmarking map creation as of go1.20, a one-entry MIMEHeader is 416 bytes and large
// MIMEHeaders average about 200 bytes per entry.
maxMemory -= 400
const mapEntryOverhead = 200
// The first line cannot start with a leading space.
if buf, err := r.R.Peek(1); err == nil && (buf[0] == ' ' || buf[0] == '\t') {
const errorLimit = 80 // arbitrary limit on how much of the line we'll quote
line, err := r.readLineSlice(errorLimit)
if err != nil {
return m, err
}
return m, ProtocolError("malformed MIME header initial line: " + string(line))
}
for {
kv, err := r.readContinuedLineSlice(maxMemory, mustHaveFieldNameColon)
if len(kv) == 0 {
return m, err
}
// Key ends at first colon.
k, v, ok := bytes.Cut(kv, colon)
if !ok {
return m, ProtocolError("malformed MIME header line: " + string(kv))
}
key, ok := canonicalMIMEHeaderKey(k)
if !ok {
return m, ProtocolError("malformed MIME header line: " + string(kv))
}
for _, c := range v {
if !validHeaderValueByte(c) {
return m, ProtocolError("malformed MIME header line: " + string(kv))
}
}
maxHeaders--
if maxHeaders < 0 {
return nil, errMessageTooLarge
}
// Skip initial spaces in value.
value := string(bytes.TrimLeft(v, " \t"))
vv := m[key]
if vv == nil {
maxMemory -= int64(len(key))
maxMemory -= mapEntryOverhead
}
maxMemory -= int64(len(value))
if maxMemory < 0 {
return m, errMessageTooLarge
}
if vv == nil && len(strs) > 0 {
// More than likely this will be a single-element key.
// Most headers aren't multi-valued.
// Set the capacity on strs[0] to 1, so any future append
// won't extend the slice into the other strings.
vv, strs = strs[:1:1], strs[1:]
vv[0] = value
m[key] = vv
} else {
m[key] = append(vv, value)
}
if err != nil {
return m, err
}
}
}
// noValidation is a no-op validation func for readContinuedLineSlice
// that permits any lines.
func noValidation(_ []byte) error { return nil }
// mustHaveFieldNameColon ensures that, per RFC 7230, the
// field-name is on a single line, so the first line must
// contain a colon.
func mustHaveFieldNameColon(line []byte) error {
if bytes.IndexByte(line, ':') < 0 {
return ProtocolError(fmt.Sprintf("malformed MIME header: missing colon: %q", line))
}
return nil
}
var nl = []byte("\n")
// upcomingHeaderKeys returns an approximation of the number of keys
// that will be in this header. If it gets confused, it returns 0.
func (r *Reader) upcomingHeaderKeys() (n int) {
// Try to determine the 'hint' size.
r.R.Peek(1) // force a buffer load if empty
s := r.R.Buffered()
if s == 0 {
return
}
peek, _ := r.R.Peek(s)
for len(peek) > 0 && n < 1000 {
var line []byte
line, peek, _ = bytes.Cut(peek, nl)
if len(line) == 0 || (len(line) == 1 && line[0] == '\r') {
// Blank line separating headers from the body.
break
}
if line[0] == ' ' || line[0] == '\t' {
// Folded continuation of the previous line.
continue
}
n++
}
return n
}
// CanonicalMIMEHeaderKey returns the canonical format of the
// MIME header key s. The canonicalization converts the first
// letter and any letter following a hyphen to upper case;
// the rest are converted to lowercase. For example, the
// canonical key for "accept-encoding" is "Accept-Encoding".
// MIME header keys are assumed to be ASCII only.
// If s contains a space or invalid header field bytes as
// defined by RFC 9112, it is returned without modifications.
func CanonicalMIMEHeaderKey(s string) string {
// Quick check for canonical encoding.
upper := true
for i := 0; i < len(s); i++ {
c := s[i]
if !validHeaderFieldByte(c) {
return s
}
if upper && 'a' <= c && c <= 'z' {
s, _ = canonicalMIMEHeaderKey([]byte(s))
return s
}
if !upper && 'A' <= c && c <= 'Z' {
s, _ = canonicalMIMEHeaderKey([]byte(s))
return s
}
upper = c == '-'
}
return s
}
const toLower = 'a' - 'A'
// validHeaderFieldByte reports whether c is a valid byte in a header
// field name. RFC 7230 says:
//
// header-field = field-name ":" OWS field-value OWS
// field-name = token
// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
// token = 1*tchar
func validHeaderFieldByte(c byte) bool {
// mask is a 128-bit bitmap with 1s for allowed bytes,
// so that the byte c can be tested with a shift and an and.
// If c >= 128, then 1<<c and 1<<(c-64) will both be zero,
// and this function will return false.
const mask = 0 |
(1<<(10)-1)<<'0' |
(1<<(26)-1)<<'a' |
(1<<(26)-1)<<'A' |
1<<'!' |
1<<'#' |
1<<'$' |
1<<'%' |
1<<'&' |
1<<'\'' |
1<<'*' |
1<<'+' |
1<<'-' |
1<<'.' |
1<<'^' |
1<<'_' |
1<<'`' |
1<<'|' |
1<<'~'
return ((uint64(1)<<c)&(mask&(1<<64-1)) |
(uint64(1)<<(c-64))&(mask>>64)) != 0
}
// validHeaderValueByte reports whether c is a valid byte in a header
// field value. RFC 7230 says:
//
// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
// field-vchar = VCHAR / obs-text
// obs-text = %x80-FF
//
// RFC 5234 says:
//
// HTAB = %x09
// SP = %x20
// VCHAR = %x21-7E
func validHeaderValueByte(c byte) bool {
// mask is a 128-bit bitmap with 1s for allowed bytes,
// so that the byte c can be tested with a shift and an and.
// If c >= 128, then 1<<c and 1<<(c-64) will both be zero.
// Since this is the obs-text range, we invert the mask to
// create a bitmap with 1s for disallowed bytes.
const mask = 0 |
(1<<(0x7f-0x21)-1)<<0x21 | // VCHAR: %x21-7E
1<<0x20 | // SP: %x20
1<<0x09 // HTAB: %x09
return ((uint64(1)<<c)&^(mask&(1<<64-1)) |
(uint64(1)<<(c-64))&^(mask>>64)) == 0
}
// canonicalMIMEHeaderKey is like CanonicalMIMEHeaderKey but is
// allowed to mutate the provided byte slice before returning the
// string.
//
// For invalid inputs (if a contains spaces or non-token bytes), a
// is unchanged and a string copy is returned.
//
// ok is true if the header key contains only valid characters and spaces.
// ReadMIMEHeader accepts header keys containing spaces, but does not
// canonicalize them.
func canonicalMIMEHeaderKey(a []byte) (_ string, ok bool) {
if len(a) == 0 {
return "", false
}
// See if a looks like a header key. If not, return it unchanged.
noCanon := false
for _, c := range a {
if validHeaderFieldByte(c) {
continue
}
// Don't canonicalize.
if c == ' ' {
// We accept invalid headers with a space before the
// colon, but must not canonicalize them.
// See https://go.dev/issue/34540.
noCanon = true
continue
}
return string(a), false
}
if noCanon {
return string(a), true
}
upper := true
for i, c := range a {
// Canonicalize: first letter upper case
// and upper case after each dash.
// (Host, User-Agent, If-Modified-Since).
// MIME headers are ASCII only, so no Unicode issues.
if upper && 'a' <= c && c <= 'z' {
c -= toLower
} else if !upper && 'A' <= c && c <= 'Z' {
c += toLower
}
a[i] = c
upper = c == '-' // for next time
}
commonHeaderOnce.Do(initCommonHeader)
// The compiler recognizes m[string(byteSlice)] as a special
// case, so a copy of a's bytes into a new string does not
// happen in this map lookup:
if v := commonHeader[string(a)]; v != "" {
return v, true
}
return string(a), true
}
// commonHeader interns common header strings.
var commonHeader map[string]string
var commonHeaderOnce sync.Once
func initCommonHeader() {
commonHeader = make(map[string]string)
for _, v := range []string{
"Accept",
"Accept-Charset",
"Accept-Encoding",
"Accept-Language",
"Accept-Ranges",
"Cache-Control",
"Cc",
"Connection",
"Content-Id",
"Content-Language",
"Content-Length",
"Content-Transfer-Encoding",
"Content-Type",
"Cookie",
"Date",
"Dkim-Signature",
"Etag",
"Expires",
"From",
"Host",
"If-Modified-Since",
"If-None-Match",
"In-Reply-To",
"Last-Modified",
"Location",
"Message-Id",
"Mime-Version",
"Pragma",
"Received",
"Referer",
"Return-Path",
"Server",
"Set-Cookie",
"Subject",
"To",
"User-Agent",
"Via",
"X-Forwarded-For",
"X-Imforwards",
"X-Powered-By",
} {
commonHeader[v] = v
}
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package textproto implements generic support for text-based request/response
// protocols in the style of HTTP, NNTP, and SMTP.
//
// This package enforces the HTTP/1.1 character set defined by
// RFC 9112 for header keys and values.
//
// The package provides:
//
// [Error], which represents a numeric error response from
// a server.
//
// [Pipeline], to manage pipelined requests and responses
// in a client.
//
// [Reader], to read numeric response code lines,
// key: value headers, lines wrapped with leading spaces
// on continuation lines, and whole text blocks ending
// with a dot on a line by itself.
//
// [Writer], to write dot-encoded text blocks.
//
// [Conn], a convenient packaging of [Reader], [Writer], and [Pipeline] for use
// with a single network connection.
package textproto
import (
"bufio"
"fmt"
"io"
"net"
)
// An Error represents a numeric error response from a server.
type Error struct {
Code int
Msg string
}
func (e *Error) Error() string {
return fmt.Sprintf("%03d %s", e.Code, e.Msg)
}
// A ProtocolError describes a protocol violation such
// as an invalid response or a hung-up connection.
type ProtocolError string
func (p ProtocolError) Error() string {
return string(p)
}
// A Conn represents a textual network protocol connection.
// It consists of a [Reader] and [Writer] to manage I/O
// and a [Pipeline] to sequence concurrent requests on the connection.
// These embedded types carry methods with them;
// see the documentation of those types for details.
type Conn struct {
Reader
Writer
Pipeline
conn io.ReadWriteCloser
}
// NewConn returns a new [Conn] using conn for I/O.
func NewConn(conn io.ReadWriteCloser) *Conn {
return &Conn{
Reader: Reader{R: bufio.NewReader(conn)},
Writer: Writer{W: bufio.NewWriter(conn)},
conn: conn,
}
}
// Close closes the connection.
func (c *Conn) Close() error {
return c.conn.Close()
}
// Dial connects to the given address on the given network using [net.Dial]
// and then returns a new [Conn] for the connection.
func Dial(network, addr string) (*Conn, error) {
c, err := net.Dial(network, addr)
if err != nil {
return nil, err
}
return NewConn(c), nil
}
// Cmd is a convenience method that sends a command after
// waiting its turn in the pipeline. The command text is the
// result of formatting format with args and appending \r\n.
// Cmd returns the id of the command, for use with StartResponse and EndResponse.
//
// For example, a client might run a HELP command that returns a dot-body
// by using:
//
// id, err := c.Cmd("HELP")
// if err != nil {
// return nil, err
// }
//
// c.StartResponse(id)
// defer c.EndResponse(id)
//
// if _, _, err = c.ReadCodeLine(110); err != nil {
// return nil, err
// }
// text, err := c.ReadDotBytes()
// if err != nil {
// return nil, err
// }
// return c.ReadCodeLine(250)
func (c *Conn) Cmd(format string, args ...any) (id uint, err error) {
id = c.Next()
c.StartRequest(id)
err = c.PrintfLine(format, args...)
c.EndRequest(id)
if err != nil {
return 0, err
}
return id, nil
}
// TrimString returns s without leading and trailing ASCII space.
func TrimString(s string) string {
for len(s) > 0 && isASCIISpace(s[0]) {
s = s[1:]
}
for len(s) > 0 && isASCIISpace(s[len(s)-1]) {
s = s[:len(s)-1]
}
return s
}
// TrimBytes returns b without leading and trailing ASCII space.
func TrimBytes(b []byte) []byte {
for len(b) > 0 && isASCIISpace(b[0]) {
b = b[1:]
}
for len(b) > 0 && isASCIISpace(b[len(b)-1]) {
b = b[:len(b)-1]
}
return b
}
func isASCIISpace(b byte) bool {
return b == ' ' || b == '\t' || b == '\n' || b == '\r'
}
func isASCIILetter(b byte) bool {
b |= 0x20 // make lower case
return 'a' <= b && b <= 'z'
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package textproto
import (
"bufio"
"fmt"
"io"
)
// A Writer implements convenience methods for writing
// requests or responses to a text protocol network connection.
type Writer struct {
W *bufio.Writer
dot *dotWriter
}
// NewWriter returns a new [Writer] writing to w.
func NewWriter(w *bufio.Writer) *Writer {
return &Writer{W: w}
}
var crnl = []byte{'\r', '\n'}
var dotcrnl = []byte{'.', '\r', '\n'}
// PrintfLine writes the formatted output followed by \r\n.
func (w *Writer) PrintfLine(format string, args ...any) error {
w.closeDot()
fmt.Fprintf(w.W, format, args...)
w.W.Write(crnl)
return w.W.Flush()
}
// DotWriter returns a writer that can be used to write a dot-encoding to w.
// It takes care of inserting leading dots when necessary,
// translating line-ending \n into \r\n, and adding the final .\r\n line
// when the DotWriter is closed. The caller should close the
// DotWriter before the next call to a method on w.
//
// See the documentation for the [Reader.DotReader] method for details about dot-encoding.
func (w *Writer) DotWriter() io.WriteCloser {
w.closeDot()
w.dot = &dotWriter{w: w}
return w.dot
}
func (w *Writer) closeDot() {
if w.dot != nil {
w.dot.Close() // sets w.dot = nil
}
}
type dotWriter struct {
w *Writer
state int
}
const (
wstateBegin = iota // initial state; must be zero
wstateBeginLine // beginning of line
wstateCR // wrote \r (possibly at end of line)
wstateData // writing data in middle of line
)
func (d *dotWriter) Write(b []byte) (n int, err error) {
bw := d.w.W
for n < len(b) {
c := b[n]
switch d.state {
case wstateBegin, wstateBeginLine:
d.state = wstateData
if c == '.' {
// escape leading dot
bw.WriteByte('.')
}
fallthrough
case wstateData:
if c == '\r' {
d.state = wstateCR
}
if c == '\n' {
bw.WriteByte('\r')
d.state = wstateBeginLine
}
case wstateCR:
d.state = wstateData
if c == '\n' {
d.state = wstateBeginLine
}
}
if err = bw.WriteByte(c); err != nil {
break
}
n++
}
return
}
func (d *dotWriter) Close() error {
if d.w.dot == d {
d.w.dot = nil
}
bw := d.w.W
switch d.state {
default:
bw.WriteByte('\r')
fallthrough
case wstateCR:
bw.WriteByte('\n')
fallthrough
case wstateBeginLine:
bw.Write(dotcrnl)
}
return bw.Flush()
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen_encoding_table.go
// Package url parses URLs and implements query escaping.
//
// See RFC 3986. This package generally follows RFC 3986, except where
// it deviates for compatibility reasons.
// RFC 6874 followed for IPv6 zone literals.
package url
// When sending changes, first search old issues for history on decisions.
// Unit tests should also contain references to issue numbers with details.
import (
"errors"
"fmt"
"internal/godebug"
"net/netip"
"path"
"slices"
"strconv"
"strings"
_ "unsafe" // for linkname
)
var urlstrictcolons = godebug.New("urlstrictcolons")
// Error reports an error and the operation and URL that caused it.
type Error struct {
Op string
URL string
Err error
}
func (e *Error) Unwrap() error { return e.Err }
func (e *Error) Error() string { return fmt.Sprintf("%s %q: %s", e.Op, e.URL, e.Err) }
func (e *Error) Timeout() bool {
t, ok := e.Err.(interface {
Timeout() bool
})
return ok && t.Timeout()
}
func (e *Error) Temporary() bool {
t, ok := e.Err.(interface {
Temporary() bool
})
return ok && t.Temporary()
}
const upperhex = "0123456789ABCDEF"
func ishex(c byte) bool {
return table[c]&hexChar != 0
}
// Precondition: ishex(c) is true.
func unhex(c byte) byte {
return 9*(c>>6) + (c & 15)
}
type EscapeError string
func (e EscapeError) Error() string {
return "invalid URL escape " + strconv.Quote(string(e))
}
type InvalidHostError string
func (e InvalidHostError) Error() string {
return "invalid character " + strconv.Quote(string(e)) + " in host name"
}
// See the reference implementation in gen_encoding_table.go.
func shouldEscape(c byte, mode encoding) bool {
return table[c]&mode == 0
}
// QueryUnescape does the inverse transformation of [QueryEscape],
// converting each 3-byte encoded substring of the form "%AB" into the
// hex-decoded byte 0xAB.
// It returns an error if any % is not followed by two hexadecimal
// digits.
func QueryUnescape(s string) (string, error) {
return unescape(s, encodeQueryComponent)
}
// PathUnescape does the inverse transformation of [PathEscape],
// converting each 3-byte encoded substring of the form "%AB" into the
// hex-decoded byte 0xAB. It returns an error if any % is not followed
// by two hexadecimal digits.
//
// PathUnescape is identical to [QueryUnescape] except that it does not
// unescape '+' to ' ' (space).
func PathUnescape(s string) (string, error) {
return unescape(s, encodePathSegment)
}
// unescape unescapes a string; the mode specifies
// which section of the URL string is being unescaped.
func unescape(s string, mode encoding) (string, error) {
// Count %, check that they're well-formed.
n := 0
hasPlus := false
for i := 0; i < len(s); {
switch s[i] {
case '%':
n++
if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
s = s[i:]
if len(s) > 3 {
s = s[:3]
}
return "", EscapeError(s)
}
// Per https://tools.ietf.org/html/rfc3986#page-21
// in the host component %-encoding can only be used
// for non-ASCII bytes.
// But https://tools.ietf.org/html/rfc6874#section-2
// introduces %25 being allowed to escape a percent sign
// in IPv6 scoped-address literals. Yay.
if mode == encodeHost && unhex(s[i+1]) < 8 && s[i:i+3] != "%25" {
return "", EscapeError(s[i : i+3])
}
if mode == encodeZone {
// RFC 6874 says basically "anything goes" for zone identifiers
// and that even non-ASCII can be redundantly escaped,
// but it seems prudent to restrict %-escaped bytes here to those
// that are valid host name bytes in their unescaped form.
// That is, you can use escaping in the zone identifier but not
// to introduce bytes you couldn't just write directly.
// But Windows puts spaces here! Yay.
v := unhex(s[i+1])<<4 | unhex(s[i+2])
if s[i:i+3] != "%25" && v != ' ' && shouldEscape(v, encodeHost) {
return "", EscapeError(s[i : i+3])
}
}
i += 3
case '+':
hasPlus = mode == encodeQueryComponent
i++
default:
if (mode == encodeHost || mode == encodeZone) && s[i] < 0x80 && shouldEscape(s[i], mode) {
return "", InvalidHostError(s[i : i+1])
}
i++
}
}
if n == 0 && !hasPlus {
return s, nil
}
var unescapedPlusSign byte
switch mode {
case encodeQueryComponent:
unescapedPlusSign = ' '
default:
unescapedPlusSign = '+'
}
var t strings.Builder
t.Grow(len(s) - 2*n)
for i := 0; i < len(s); i++ {
switch s[i] {
case '%':
// In the loop above, we established that unhex's precondition is
// fulfilled for both s[i+1] and s[i+2].
t.WriteByte(unhex(s[i+1])<<4 | unhex(s[i+2]))
i += 2
case '+':
t.WriteByte(unescapedPlusSign)
default:
t.WriteByte(s[i])
}
}
return t.String(), nil
}
// QueryEscape escapes the string so it can be safely placed
// inside a [URL] query.
func QueryEscape(s string) string {
return escape(s, encodeQueryComponent)
}
// PathEscape escapes the string so it can be safely placed inside a [URL] path segment,
// replacing special characters (including /) with %XX sequences as needed.
func PathEscape(s string) string {
return escape(s, encodePathSegment)
}
func escape(s string, mode encoding) string {
spaceCount, hexCount := 0, 0
for _, c := range []byte(s) {
if shouldEscape(c, mode) {
if c == ' ' && mode == encodeQueryComponent {
spaceCount++
} else {
hexCount++
}
}
}
if spaceCount == 0 && hexCount == 0 {
return s
}
var buf [64]byte
var t []byte
required := len(s) + 2*hexCount
if required <= len(buf) {
t = buf[:required]
} else {
t = make([]byte, required)
}
if hexCount == 0 {
copy(t, s)
for i := 0; i < len(s); i++ {
if s[i] == ' ' {
t[i] = '+'
}
}
return string(t)
}
j := 0
for _, c := range []byte(s) {
switch {
case c == ' ' && mode == encodeQueryComponent:
t[j] = '+'
j++
case shouldEscape(c, mode):
t[j] = '%'
t[j+1] = upperhex[c>>4]
t[j+2] = upperhex[c&15]
j += 3
default:
t[j] = c
j++
}
}
return string(t)
}
// A URL represents a parsed URL (technically, a URI reference).
//
// The general form represented is:
//
// [scheme:][//[userinfo@]host][/]path[?query][#fragment]
//
// URLs that do not start with a slash after the scheme are interpreted as:
//
// scheme:opaque[?query][#fragment]
//
// The Host field contains the host and port subcomponents of the URL.
// When the port is present, it is separated from the host with a colon.
// When the host is an IPv6 address, it must be enclosed in square brackets:
// "[fe80::1]:80". The [net.JoinHostPort] function combines a host and port
// into a string suitable for the Host field, adding square brackets to
// the host when necessary.
//
// Note that the Path field is stored in decoded form: /%47%6f%2f becomes /Go/.
// A consequence is that it is impossible to tell which slashes in the Path were
// slashes in the raw URL and which were %2f. This distinction is rarely important,
// but when it is, the code should use the [URL.EscapedPath] method, which preserves
// the original encoding of Path. The Fragment field is also stored in decoded form,
// use [URL.EscapedFragment] to retrieve the original encoding.
//
// The [URL.String] method uses the [URL.EscapedPath] method to obtain the path.
type URL struct {
Scheme string
Opaque string // encoded opaque data
User *Userinfo // username and password information
Host string // "host" or "host:port" (see Hostname and Port methods)
Path string // path (relative paths may omit leading slash)
Fragment string // fragment for references (without '#')
// RawQuery contains the encoded query values, without the initial '?'.
// Use URL.Query to decode the query.
RawQuery string
// RawPath is an optional field containing an encoded path hint.
// See the EscapedPath method for more details.
//
// In general, code should call EscapedPath instead of reading RawPath.
RawPath string
// RawFragment is an optional field containing an encoded fragment hint.
// See the EscapedFragment method for more details.
//
// In general, code should call EscapedFragment instead of reading RawFragment.
RawFragment string
// ForceQuery indicates whether the original URL contained a query ('?') character.
// When set, the String method will include a trailing '?', even when RawQuery is empty.
ForceQuery bool
// OmitHost indicates the URL has an empty host (authority).
// When set, the String method will not include the host when it is empty.
OmitHost bool
}
// User returns a [Userinfo] containing the provided username
// and no password set.
func User(username string) *Userinfo {
return &Userinfo{username, "", false}
}
// UserPassword returns a [Userinfo] containing the provided username
// and password.
//
// This functionality should only be used with legacy web sites.
// RFC 2396 warns that interpreting Userinfo this way
// “is NOT RECOMMENDED, because the passing of authentication
// information in clear text (such as URI) has proven to be a
// security risk in almost every case where it has been used.”
func UserPassword(username, password string) *Userinfo {
return &Userinfo{username, password, true}
}
// The Userinfo type is an immutable encapsulation of username and
// password details for a [URL]. An existing Userinfo value is guaranteed
// to have a username set (potentially empty, as allowed by RFC 2396),
// and optionally a password.
type Userinfo struct {
username string
password string
passwordSet bool
}
// Username returns the username.
func (u *Userinfo) Username() string {
if u == nil {
return ""
}
return u.username
}
// Password returns the password in case it is set, and whether it is set.
func (u *Userinfo) Password() (string, bool) {
if u == nil {
return "", false
}
return u.password, u.passwordSet
}
// String returns the encoded userinfo information in the standard form
// of "username[:password]".
func (u *Userinfo) String() string {
if u == nil {
return ""
}
s := escape(u.username, encodeUserPassword)
if u.passwordSet {
s += ":" + escape(u.password, encodeUserPassword)
}
return s
}
// Maybe rawURL is of the form scheme:path.
// (Scheme must be [a-zA-Z][a-zA-Z0-9+.-]*)
// If so, return scheme, path; else return "", rawURL.
func getScheme(rawURL string) (scheme, path string, err error) {
for i := 0; i < len(rawURL); i++ {
c := rawURL[i]
switch {
case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
// do nothing
case '0' <= c && c <= '9' || c == '+' || c == '-' || c == '.':
if i == 0 {
return "", rawURL, nil
}
case c == ':':
if i == 0 {
return "", "", errors.New("missing protocol scheme")
}
return rawURL[:i], rawURL[i+1:], nil
default:
// we have encountered an invalid character,
// so there is no valid scheme
return "", rawURL, nil
}
}
return "", rawURL, nil
}
// Parse parses a raw url into a [URL] structure.
//
// The url may be relative (a path, without a host) or absolute
// (starting with a scheme). Trying to parse a hostname and path
// without a scheme is invalid but may not necessarily return an
// error, due to parsing ambiguities.
func Parse(rawURL string) (*URL, error) {
// Cut off #frag
u, frag, _ := strings.Cut(rawURL, "#")
url, err := parse(u, false)
if err != nil {
return nil, &Error{"parse", u, err}
}
if frag == "" {
return url, nil
}
if err = url.setFragment(frag); err != nil {
return nil, &Error{"parse", rawURL, err}
}
return url, nil
}
// ParseRequestURI parses a raw url into a [URL] structure. It assumes that
// url was received in an HTTP request, so the url is interpreted
// only as an absolute URI or an absolute path.
// The string url is assumed not to have a #fragment suffix.
// (Web browsers strip #fragment before sending the URL to a web server.)
func ParseRequestURI(rawURL string) (*URL, error) {
url, err := parse(rawURL, true)
if err != nil {
return nil, &Error{"parse", rawURL, err}
}
return url, nil
}
// parse parses a URL from a string in one of two contexts. If
// viaRequest is true, the URL is assumed to have arrived via an HTTP request,
// in which case only absolute URLs or path-absolute relative URLs are allowed.
// If viaRequest is false, all forms of relative URLs are allowed.
func parse(rawURL string, viaRequest bool) (*URL, error) {
var rest string
var err error
if stringContainsCTLByte(rawURL) {
return nil, errors.New("net/url: invalid control character in URL")
}
if rawURL == "" && viaRequest {
return nil, errors.New("empty url")
}
url := new(URL)
if rawURL == "*" {
url.Path = "*"
return url, nil
}
// Split off possible leading "http:", "mailto:", etc.
// Cannot contain escaped characters.
if url.Scheme, rest, err = getScheme(rawURL); err != nil {
return nil, err
}
url.Scheme = strings.ToLower(url.Scheme)
if strings.HasSuffix(rest, "?") && strings.Count(rest, "?") == 1 {
url.ForceQuery = true
rest = rest[:len(rest)-1]
} else {
rest, url.RawQuery, _ = strings.Cut(rest, "?")
}
if !strings.HasPrefix(rest, "/") {
if url.Scheme != "" {
// We consider rootless paths per RFC 3986 as opaque.
url.Opaque = rest
return url, nil
}
if viaRequest {
return nil, errors.New("invalid URI for request")
}
// Avoid confusion with malformed schemes, like cache_object:foo/bar.
// See golang.org/issue/16822.
//
// RFC 3986, §3.3:
// In addition, a URI reference (Section 4.1) may be a relative-path reference,
// in which case the first path segment cannot contain a colon (":") character.
if segment, _, _ := strings.Cut(rest, "/"); strings.Contains(segment, ":") {
// First path segment has colon. Not allowed in relative URL.
return nil, errors.New("first path segment in URL cannot contain colon")
}
}
if (url.Scheme != "" || !viaRequest && !strings.HasPrefix(rest, "///")) && strings.HasPrefix(rest, "//") {
var authority string
authority, rest = rest[2:], ""
if i := strings.Index(authority, "/"); i >= 0 {
authority, rest = authority[:i], authority[i:]
}
url.User, url.Host, err = parseAuthority(url.Scheme, authority)
if err != nil {
return nil, err
}
} else if url.Scheme != "" && strings.HasPrefix(rest, "/") {
// OmitHost is set to true when rawURL has an empty host (authority).
// See golang.org/issue/46059.
url.OmitHost = true
}
// Set Path and, optionally, RawPath.
// RawPath is a hint of the encoding of Path. We don't want to set it if
// the default escaping of Path is equivalent, to help make sure that people
// don't rely on it in general.
if err := url.setPath(rest); err != nil {
return nil, err
}
return url, nil
}
func parseAuthority(scheme, authority string) (user *Userinfo, host string, err error) {
i := strings.LastIndex(authority, "@")
if i < 0 {
host, err = parseHost(scheme, authority)
} else {
host, err = parseHost(scheme, authority[i+1:])
}
if err != nil {
return nil, "", err
}
if i < 0 {
return nil, host, nil
}
userinfo := authority[:i]
if !validUserinfo(userinfo) {
return nil, "", errors.New("net/url: invalid userinfo")
}
if !strings.Contains(userinfo, ":") {
if userinfo, err = unescape(userinfo, encodeUserPassword); err != nil {
return nil, "", err
}
user = User(userinfo)
} else {
username, password, _ := strings.Cut(userinfo, ":")
if username, err = unescape(username, encodeUserPassword); err != nil {
return nil, "", err
}
if password, err = unescape(password, encodeUserPassword); err != nil {
return nil, "", err
}
user = UserPassword(username, password)
}
return user, host, nil
}
// parseHost parses host as an authority without user
// information. That is, as host[:port].
func parseHost(scheme, host string) (string, error) {
if openBracketIdx := strings.LastIndex(host, "["); openBracketIdx > 0 {
return "", errors.New("invalid IP-literal")
} else if openBracketIdx == 0 {
// Parse an IP-Literal in RFC 3986 and RFC 6874.
// E.g., "[fe80::1]", "[fe80::1%25en0]", "[fe80::1]:80".
closeBracketIdx := strings.LastIndex(host, "]")
if closeBracketIdx < 0 {
return "", errors.New("missing ']' in host")
}
colonPort := host[closeBracketIdx+1:]
if !validOptionalPort(colonPort) {
return "", fmt.Errorf("invalid port %q after host", colonPort)
}
unescapedColonPort, err := unescape(colonPort, encodeHost)
if err != nil {
return "", err
}
hostname := host[openBracketIdx+1 : closeBracketIdx]
var unescapedHostname string
// RFC 6874 defines that %25 (%-encoded percent) introduces
// the zone identifier, and the zone identifier can use basically
// any %-encoding it likes. That's different from the host, which
// can only %-encode non-ASCII bytes.
// We do impose some restrictions on the zone, to avoid stupidity
// like newlines.
zoneIdx := strings.Index(hostname, "%25")
if zoneIdx >= 0 {
hostPart, err := unescape(hostname[:zoneIdx], encodeHost)
if err != nil {
return "", err
}
zonePart, err := unescape(hostname[zoneIdx:], encodeZone)
if err != nil {
return "", err
}
unescapedHostname = hostPart + zonePart
} else {
var err error
unescapedHostname, err = unescape(hostname, encodeHost)
if err != nil {
return "", err
}
}
// Per RFC 3986, only a host identified by a valid
// IPv6 address can be enclosed by square brackets.
// This excludes any IPv4, but notably not IPv4-mapped addresses.
addr, err := netip.ParseAddr(unescapedHostname)
if err != nil {
return "", fmt.Errorf("invalid host: %w", err)
}
if addr.Is4() {
return "", errors.New("invalid IP-literal")
}
return "[" + unescapedHostname + "]" + unescapedColonPort, nil
} else if i := strings.Index(host, ":"); i != -1 {
lastColon := strings.LastIndex(host, ":")
if lastColon != i {
// RFC 3986 does not allow colons to appear in the host subcomponent.
//
// However, a number of databases including PostgreSQL and MongoDB
// permit a comma-separated list of hosts (with optional ports) in the
// host subcomponent.
//
// Since we historically permitted colons to appear in the host,
// enforce strict colons only for http and https URLs.
//
// See https://go.dev/issue/75223 and https://go.dev/issue/78077.
if scheme == "http" || scheme == "https" {
if urlstrictcolons.Value() == "0" {
urlstrictcolons.IncNonDefault()
i = lastColon
}
} else {
i = lastColon
}
}
colonPort := host[i:]
if !validOptionalPort(colonPort) {
return "", fmt.Errorf("invalid port %q after host", colonPort)
}
}
var err error
if host, err = unescape(host, encodeHost); err != nil {
return "", err
}
return host, nil
}
// setPath sets the Path and RawPath fields of the URL based on the provided
// escaped path p. It maintains the invariant that RawPath is only specified
// when it differs from the default encoding of the path.
// For example:
// - setPath("/foo/bar") will set Path="/foo/bar" and RawPath=""
// - setPath("/foo%2fbar") will set Path="/foo/bar" and RawPath="/foo%2fbar"
// setPath will return an error only if the provided path contains an invalid
// escaping.
//
// setPath should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/sagernet/sing
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname badSetPath net/url.(*URL).setPath
func (u *URL) setPath(p string) error {
path, err := unescape(p, encodePath)
if err != nil {
return err
}
u.Path = path
if escp := escape(path, encodePath); p == escp {
// Default encoding is fine.
u.RawPath = ""
} else {
u.RawPath = p
}
return nil
}
// for linkname because we cannot linkname methods directly
func badSetPath(*URL, string) error
// EscapedPath returns the escaped form of u.Path.
// In general there are multiple possible escaped forms of any path.
// EscapedPath returns u.RawPath when it is a valid escaping of u.Path.
// Otherwise EscapedPath ignores u.RawPath and computes an escaped
// form on its own.
// The [URL.String] and [URL.RequestURI] methods use EscapedPath to construct
// their results.
// In general, code should call EscapedPath instead of
// reading u.RawPath directly.
func (u *URL) EscapedPath() string {
if u.RawPath != "" && validEncoded(u.RawPath, encodePath) {
p, err := unescape(u.RawPath, encodePath)
if err == nil && p == u.Path {
return u.RawPath
}
}
if u.Path == "*" {
return "*" // don't escape (Issue 11202)
}
return escape(u.Path, encodePath)
}
// validEncoded reports whether s is a valid encoded path or fragment,
// according to mode.
// It must not contain any bytes that require escaping during encoding.
func validEncoded(s string, mode encoding) bool {
for i := 0; i < len(s); i++ {
// RFC 3986, Appendix A.
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@".
// shouldEscape is not quite compliant with the RFC,
// so we check the sub-delims ourselves and let
// shouldEscape handle the others.
switch s[i] {
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '@':
// ok
case '[', ']':
// ok - not specified in RFC 3986 but left alone by modern browsers
case '%':
// ok - percent encoded, will decode
default:
if shouldEscape(s[i], mode) {
return false
}
}
}
return true
}
// setFragment is like setPath but for Fragment/RawFragment.
func (u *URL) setFragment(f string) error {
frag, err := unescape(f, encodeFragment)
if err != nil {
return err
}
u.Fragment = frag
if escf := escape(frag, encodeFragment); f == escf {
// Default encoding is fine.
u.RawFragment = ""
} else {
u.RawFragment = f
}
return nil
}
// EscapedFragment returns the escaped form of u.Fragment.
// In general there are multiple possible escaped forms of any fragment.
// EscapedFragment returns u.RawFragment when it is a valid escaping of u.Fragment.
// Otherwise EscapedFragment ignores u.RawFragment and computes an escaped
// form on its own.
// The [URL.String] method uses EscapedFragment to construct its result.
// In general, code should call EscapedFragment instead of
// reading u.RawFragment directly.
func (u *URL) EscapedFragment() string {
if u.RawFragment != "" && validEncoded(u.RawFragment, encodeFragment) {
f, err := unescape(u.RawFragment, encodeFragment)
if err == nil && f == u.Fragment {
return u.RawFragment
}
}
return escape(u.Fragment, encodeFragment)
}
// validOptionalPort reports whether port is either an empty string
// or matches /^:\d*$/
func validOptionalPort(port string) bool {
if port == "" {
return true
}
if port[0] != ':' {
return false
}
for _, b := range port[1:] {
if b < '0' || b > '9' {
return false
}
}
return true
}
// String reassembles the [URL] into a valid URL string.
// The general form of the result is one of:
//
// scheme:opaque?query#fragment
// scheme://userinfo@host/path?query#fragment
//
// If u.Opaque is non-empty, String uses the first form;
// otherwise it uses the second form.
// Any non-ASCII characters in host are escaped.
// To obtain the path, String uses u.EscapedPath().
//
// In the second form, the following rules apply:
// - if u.Scheme is empty, scheme: is omitted.
// - if u.User is nil, userinfo@ is omitted.
// - if u.Host is empty, host/ is omitted.
// - if u.Scheme and u.Host are empty and u.User is nil,
// the entire scheme://userinfo@host/ is omitted.
// - if u.Host is non-empty and u.Path begins with a /,
// the form host/path does not add its own /.
// - if u.RawQuery is empty, ?query is omitted.
// - if u.Fragment is empty, #fragment is omitted.
func (u *URL) String() string {
var buf strings.Builder
n := len(u.Scheme)
if u.Opaque != "" {
n += len(u.Opaque)
} else {
if !u.OmitHost && (u.Scheme != "" || u.Host != "" || u.User != nil) {
username := u.User.Username()
password, _ := u.User.Password()
n += len(username) + len(password) + len(u.Host)
}
n += len(u.Path)
}
n += len(u.RawQuery) + len(u.RawFragment)
n += len(":" + "//" + "//" + ":" + "@" + "/" + "./" + "?" + "#")
buf.Grow(n)
if u.Scheme != "" {
buf.WriteString(u.Scheme)
buf.WriteByte(':')
}
if u.Opaque != "" {
buf.WriteString(u.Opaque)
} else {
if u.Scheme != "" || u.Host != "" || u.User != nil {
if u.OmitHost && u.Host == "" && u.User == nil {
// omit empty host
} else {
if u.Host != "" || u.Path != "" || u.User != nil {
buf.WriteString("//")
}
if ui := u.User; ui != nil {
buf.WriteString(ui.String())
buf.WriteByte('@')
}
if h := u.Host; h != "" {
buf.WriteString(escape(h, encodeHost))
}
}
}
path := u.EscapedPath()
if u.OmitHost && u.Host == "" && u.User == nil && strings.HasPrefix(path, "//") {
// Escape the first / in a path starting with "//" and no authority
// so that re-parsing the URL doesn't turn the path into an authority
// (e.g., Path="//host/p" producing "http://host/p").
buf.WriteString("%2F")
path = path[1:]
}
if path != "" && path[0] != '/' && u.Host != "" {
buf.WriteByte('/')
}
if buf.Len() == 0 {
// RFC 3986 §4.2
// A path segment that contains a colon character (e.g., "this:that")
// cannot be used as the first segment of a relative-path reference, as
// it would be mistaken for a scheme name. Such a segment must be
// preceded by a dot-segment (e.g., "./this:that") to make a relative-
// path reference.
if segment, _, _ := strings.Cut(path, "/"); strings.Contains(segment, ":") {
buf.WriteString("./")
}
}
buf.WriteString(path)
}
if u.ForceQuery || u.RawQuery != "" {
buf.WriteByte('?')
buf.WriteString(u.RawQuery)
}
if u.Fragment != "" {
buf.WriteByte('#')
buf.WriteString(u.EscapedFragment())
}
return buf.String()
}
// Redacted is like [URL.String] but replaces any password with "xxxxx".
// Only the password in u.User is redacted.
func (u *URL) Redacted() string {
if u == nil {
return ""
}
ru := *u
if _, has := ru.User.Password(); has {
ru.User = UserPassword(ru.User.Username(), "xxxxx")
}
return ru.String()
}
// Values maps a string key to a list of values.
// It is typically used for query parameters and form values.
// Unlike in the http.Header map, the keys in a Values map
// are case-sensitive.
type Values map[string][]string
// Get gets the first value associated with the given key.
// If there are no values associated with the key, Get returns
// the empty string. To access multiple values, use the map
// directly.
func (v Values) Get(key string) string {
vs := v[key]
if len(vs) == 0 {
return ""
}
return vs[0]
}
// Set sets the key to value. It replaces any existing
// values.
func (v Values) Set(key, value string) {
v[key] = []string{value}
}
// Add adds the value to key. It appends to any existing
// values associated with key.
func (v Values) Add(key, value string) {
v[key] = append(v[key], value)
}
// Del deletes the values associated with key.
func (v Values) Del(key string) {
delete(v, key)
}
// Has checks whether a given key is set.
func (v Values) Has(key string) bool {
_, ok := v[key]
return ok
}
// Clone creates a deep copy of the subject [Values].
func (vs Values) Clone() Values {
if vs == nil {
return nil
}
newVals := make(Values, len(vs))
for k, v := range vs {
newVals[k] = slices.Clone(v)
}
return newVals
}
// ParseQuery parses the URL-encoded query string and returns
// a map listing the values specified for each key.
// ParseQuery always returns a non-nil map containing all the
// valid query parameters found; err describes the first decoding error
// encountered, if any.
//
// Query is expected to be a list of key=value settings separated by ampersands.
// A setting without an equals sign is interpreted as a key set to an empty
// value.
// Settings containing a non-URL-encoded semicolon are considered invalid.
func ParseQuery(query string) (Values, error) {
m := make(Values)
err := parseQuery(m, query)
return m, err
}
var urlmaxqueryparams = godebug.New("urlmaxqueryparams")
const defaultMaxParams = 10000
func urlParamsWithinMax(params int) bool {
withinDefaultMax := params <= defaultMaxParams
if urlmaxqueryparams.Value() == "" {
return withinDefaultMax
}
customMax, err := strconv.Atoi(urlmaxqueryparams.Value())
if err != nil {
return withinDefaultMax
}
withinCustomMax := customMax == 0 || params < customMax
if withinDefaultMax != withinCustomMax {
urlmaxqueryparams.IncNonDefault()
}
return withinCustomMax
}
func parseQuery(m Values, query string) (err error) {
if !urlParamsWithinMax(strings.Count(query, "&") + 1) {
return errors.New("number of URL query parameters exceeded limit")
}
for query != "" {
var key string
key, query, _ = strings.Cut(query, "&")
if strings.Contains(key, ";") {
err = fmt.Errorf("invalid semicolon separator in query")
continue
}
if key == "" {
continue
}
key, value, _ := strings.Cut(key, "=")
key, err1 := QueryUnescape(key)
if err1 != nil {
if err == nil {
err = err1
}
continue
}
value, err1 = QueryUnescape(value)
if err1 != nil {
if err == nil {
err = err1
}
continue
}
m[key] = append(m[key], value)
}
return err
}
// Encode encodes the values into “URL encoded” form
// ("bar=baz&foo=quux") sorted by key.
func (v Values) Encode() string {
if len(v) == 0 {
return ""
}
var buf strings.Builder
// To minimize allocations, we eschew iterators and pre-size the slice in
// which we collect v's keys.
keys := make([]string, len(v))
var i int
for k := range v {
keys[i] = k
i++
}
slices.Sort(keys)
for _, k := range keys {
vs := v[k]
keyEscaped := QueryEscape(k)
for _, v := range vs {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(keyEscaped)
buf.WriteByte('=')
buf.WriteString(QueryEscape(v))
}
}
return buf.String()
}
// resolvePath applies special path segments from refs and applies
// them to base, per RFC 3986.
func resolvePath(base, ref string) string {
var full string
if ref == "" {
full = base
} else if ref[0] != '/' {
i := strings.LastIndex(base, "/")
full = base[:i+1] + ref
} else {
full = ref
}
if full == "" {
return ""
}
var (
elem string
dst strings.Builder
)
first := true
remaining := full
// We want to return a leading '/', so write it now.
dst.WriteByte('/')
found := true
for found {
elem, remaining, found = strings.Cut(remaining, "/")
if elem == "." {
first = false
// drop
continue
}
if elem == ".." {
// Ignore the leading '/' we already wrote.
str := dst.String()[1:]
index := strings.LastIndexByte(str, '/')
dst.Reset()
dst.WriteByte('/')
if index == -1 {
first = true
} else {
dst.WriteString(str[:index])
}
} else {
if !first {
dst.WriteByte('/')
}
dst.WriteString(elem)
first = false
}
}
if elem == "." || elem == ".." {
dst.WriteByte('/')
}
// We wrote an initial '/', but we don't want two.
r := dst.String()
if len(r) > 1 && r[1] == '/' {
r = r[1:]
}
return r
}
// IsAbs reports whether the [URL] is absolute.
// Absolute means that it has a non-empty scheme.
func (u *URL) IsAbs() bool {
return u.Scheme != ""
}
// Parse parses a [URL] in the context of the receiver. The provided URL
// may be relative or absolute. Parse returns nil, err on parse
// failure, otherwise its return value is the same as [URL.ResolveReference].
func (u *URL) Parse(ref string) (*URL, error) {
refURL, err := Parse(ref)
if err != nil {
return nil, err
}
return u.ResolveReference(refURL), nil
}
// ResolveReference resolves a URI reference to an absolute URI from
// an absolute base URI u, per RFC 3986 Section 5.2. The URI reference
// may be relative or absolute. ResolveReference always returns a new
// [URL] instance, even if the returned URL is identical to either the
// base or reference. If ref is an absolute URL, then ResolveReference
// ignores base and returns a copy of ref.
func (u *URL) ResolveReference(ref *URL) *URL {
url := *ref
if ref.Scheme == "" {
url.Scheme = u.Scheme
}
if ref.Scheme != "" || ref.Host != "" || ref.User != nil {
// The "absoluteURI" or "net_path" cases.
// We can ignore the error from setPath since we know we provided a
// validly-escaped path.
url.setPath(resolvePath(ref.EscapedPath(), ""))
return &url
}
if ref.Opaque != "" {
url.User = nil
url.Host = ""
url.Path = ""
return &url
}
if ref.Path == "" && !ref.ForceQuery && ref.RawQuery == "" {
url.RawQuery = u.RawQuery
if ref.Fragment == "" {
url.Fragment = u.Fragment
url.RawFragment = u.RawFragment
}
}
if ref.Path == "" && u.Opaque != "" {
url.Opaque = u.Opaque
url.User = nil
url.Host = ""
url.Path = ""
return &url
}
// The "abs_path" or "rel_path" cases.
url.Host = u.Host
url.User = u.User
url.setPath(resolvePath(u.EscapedPath(), ref.EscapedPath()))
return &url
}
// Query parses RawQuery and returns the corresponding values.
// It silently discards malformed value pairs.
// To check errors use [ParseQuery].
func (u *URL) Query() Values {
v, _ := ParseQuery(u.RawQuery)
return v
}
// RequestURI returns the encoded path?query or opaque?query
// string that would be used in an HTTP request for u.
func (u *URL) RequestURI() string {
result := u.Opaque
if result == "" {
result = u.EscapedPath()
if result == "" {
result = "/"
}
} else {
if strings.HasPrefix(result, "//") {
result = u.Scheme + ":" + result
}
}
if u.ForceQuery || u.RawQuery != "" {
result += "?" + u.RawQuery
}
return result
}
// Hostname returns u.Host, stripping any valid port number if present.
//
// If the result is enclosed in square brackets, as literal IPv6 addresses are,
// the square brackets are removed from the result.
func (u *URL) Hostname() string {
host, _ := splitHostPort(u.Host)
return host
}
// Port returns the port part of u.Host, without the leading colon.
//
// If u.Host doesn't contain a valid numeric port, Port returns an empty string.
func (u *URL) Port() string {
_, port := splitHostPort(u.Host)
return port
}
// splitHostPort separates host and port. If the port is not valid, it returns
// the entire input as host, and it doesn't check the validity of the host.
// Unlike net.SplitHostPort, but per RFC 3986, it requires ports to be numeric.
func splitHostPort(hostPort string) (host, port string) {
host = hostPort
colon := strings.LastIndexByte(host, ':')
if colon != -1 && validOptionalPort(host[colon:]) {
host, port = host[:colon], host[colon+1:]
}
if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
host = host[1 : len(host)-1]
}
return
}
// Marshaling interface implementations.
// Would like to implement MarshalText/UnmarshalText but that will change the JSON representation of URLs.
func (u *URL) MarshalBinary() (text []byte, err error) {
return u.AppendBinary(nil)
}
func (u *URL) AppendBinary(b []byte) ([]byte, error) {
return append(b, u.String()...), nil
}
func (u *URL) UnmarshalBinary(text []byte) error {
u1, err := Parse(string(text))
if err != nil {
return err
}
*u = *u1
return nil
}
// JoinPath returns a new [URL] with the provided path elements joined to
// any existing path and the resulting path cleaned of any ./ or ../ elements.
// Any sequences of multiple / characters will be reduced to a single /.
// Path elements must already be in escaped form, as produced by [PathEscape].
func (u *URL) JoinPath(elem ...string) *URL {
url, _ := u.joinPath(elem...)
return url
}
func (u *URL) joinPath(elem ...string) (*URL, error) {
elem = append([]string{u.EscapedPath()}, elem...)
var p string
if !strings.HasPrefix(elem[0], "/") {
// Return a relative path if u is relative,
// but ensure that it contains no ../ elements.
elem[0] = "/" + elem[0]
p = path.Join(elem...)[1:]
} else {
p = path.Join(elem...)
}
// path.Join will remove any trailing slashes.
// Preserve at least one.
if strings.HasSuffix(elem[len(elem)-1], "/") && !strings.HasSuffix(p, "/") {
p += "/"
}
url := *u
err := url.setPath(p)
return &url, err
}
// validUserinfo reports whether s is a valid userinfo string per RFC 3986
// Section 3.2.1:
//
// userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
// / "*" / "+" / "," / ";" / "="
//
// It doesn't validate pct-encoded. The caller does that via func unescape.
func validUserinfo(s string) bool {
for _, r := range s {
if 'A' <= r && r <= 'Z' {
continue
}
if 'a' <= r && r <= 'z' {
continue
}
if '0' <= r && r <= '9' {
continue
}
switch r {
case '-', '.', '_', ':', '~', '!', '$', '&', '\'',
'(', ')', '*', '+', ',', ';', '=', '%':
continue
case '@':
// `RFC 3986 section 3.2.1` does not allow '@' in userinfo.
// It is a delimiter between userinfo and host.
// However, URLs are diverse, and in some cases,
// the userinfo may contain an '@' character,
// for example, in "http://username:p@ssword@google.com",
// the string "username:p@ssword" should be treated as valid userinfo.
// Ref:
// https://go.dev/issue/3439
// https://go.dev/issue/22655
continue
default:
return false
}
}
return true
}
// stringContainsCTLByte reports whether s contains any ASCII control character.
func stringContainsCTLByte(s string) bool {
for i := 0; i < len(s); i++ {
b := s[i]
if b < ' ' || b == 0x7f {
return true
}
}
return false
}
// JoinPath returns a [URL] string with the provided path elements joined to
// the existing path of base and the resulting path cleaned of any ./ or ../ elements.
// Path elements must already be in escaped form, as produced by [PathEscape].
func JoinPath(base string, elem ...string) (result string, err error) {
url, err := Parse(base)
if err != nil {
return
}
res, err := url.joinPath(elem...)
if err != nil {
return "", err
}
return res.String(), nil
}
// Clone creates a deep copy of the fields of the subject [URL].
func (u *URL) Clone() *URL {
if u == nil {
return nil
}
uc := new(*u)
if u.User != nil {
uc.User = new(*u.User)
}
return uc
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package exec runs external commands. It wraps os.StartProcess to make it
// easier to remap stdin and stdout, connect I/O with pipes, and do other
// adjustments.
//
// Unlike the "system" library call from C and other languages, the
// os/exec package intentionally does not invoke the system shell and
// does not expand any glob patterns or handle other expansions,
// pipelines, or redirections typically done by shells. The package
// behaves more like C's "exec" family of functions. To expand glob
// patterns, either call the shell directly, taking care to escape any
// dangerous input, or use the [path/filepath] package's Glob function.
// To expand environment variables, use package os's ExpandEnv.
//
// Note that the examples in this package assume a Unix system.
// They may not run on Windows, and they do not run in the Go Playground
// used by go.dev and pkg.go.dev.
//
// # Executables in the current directory
//
// The functions [Command] and [LookPath] look for a program
// in the directories listed in the current path, following the
// conventions of the host operating system.
// Operating systems have for decades included the current
// directory in this search, sometimes implicitly and sometimes
// configured explicitly that way by default.
// Modern practice is that including the current directory
// is usually unexpected and often leads to security problems.
//
// To avoid those security problems, as of Go 1.19, this package will not resolve a program
// using an implicit or explicit path entry relative to the current directory.
// That is, if you run [LookPath]("go"), it will not successfully return
// ./go on Unix nor .\go.exe on Windows, no matter how the path is configured.
// Instead, if the usual path algorithms would result in that answer,
// these functions return an error err satisfying [errors.Is](err, [ErrDot]).
//
// For example, consider these two program snippets:
//
// path, err := exec.LookPath("prog")
// if err != nil {
// log.Fatal(err)
// }
// use(path)
//
// and
//
// cmd := exec.Command("prog")
// if err := cmd.Run(); err != nil {
// log.Fatal(err)
// }
//
// These will not find and run ./prog or .\prog.exe,
// no matter how the current path is configured.
//
// Code that always wants to run a program from the current directory
// can be rewritten to say "./prog" instead of "prog".
//
// Code that insists on including results from relative path entries
// can instead override the error using an errors.Is check:
//
// path, err := exec.LookPath("prog")
// if errors.Is(err, exec.ErrDot) {
// err = nil
// }
// if err != nil {
// log.Fatal(err)
// }
// use(path)
//
// and
//
// cmd := exec.Command("prog")
// if errors.Is(cmd.Err, exec.ErrDot) {
// cmd.Err = nil
// }
// if err := cmd.Run(); err != nil {
// log.Fatal(err)
// }
//
// Setting the environment variable GODEBUG=execerrdot=0
// disables generation of ErrDot entirely, temporarily restoring the pre-Go 1.19
// behavior for programs that are unable to apply more targeted fixes.
// A future version of Go may remove support for this variable.
//
// Before adding such overrides, make sure you understand the
// security implications of doing so.
// See https://go.dev/blog/path-security for more information.
package exec
import (
"bytes"
"context"
"errors"
"internal/godebug"
"internal/syscall/execenv"
"io"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync/atomic"
"syscall"
"time"
)
// Error is returned by [LookPath] when it fails to classify a file as an
// executable.
type Error struct {
// Name is the file name for which the error occurred.
Name string
// Err is the underlying error.
Err error
}
func (e *Error) Error() string {
return "exec: " + strconv.Quote(e.Name) + ": " + e.Err.Error()
}
func (e *Error) Unwrap() error { return e.Err }
// ErrWaitDelay is returned by [Cmd.Wait] if the process exits with a
// successful status code but its output pipes are not closed before the
// command's WaitDelay expires.
var ErrWaitDelay = errors.New("exec: WaitDelay expired before I/O complete")
// wrappedError wraps an error without relying on fmt.Errorf.
type wrappedError struct {
prefix string
err error
}
func (w wrappedError) Error() string {
return w.prefix + ": " + w.err.Error()
}
func (w wrappedError) Unwrap() error {
return w.err
}
// Cmd represents an external command being prepared or run.
//
// A Cmd cannot be reused after calling its [Cmd.Start], [Cmd.Run],
// [Cmd.Output], or [Cmd.CombinedOutput] methods.
type Cmd struct {
// Path is the path of the command to run.
//
// This is the only field that must be set to a non-zero
// value. If Path is relative, it is evaluated relative
// to Dir.
Path string
// Args holds command line arguments, including the command as Args[0].
// If the Args field is empty or nil, Run uses {Path}.
//
// In typical use, both Path and Args are set by calling Command.
Args []string
// Env specifies the environment of the process.
// Each entry is of the form "key=value".
// If Env is nil, the new process uses the current process's
// environment.
// If Env contains duplicate environment keys, only the last
// value in the slice for each duplicate key is used.
// As a special case on Windows, SYSTEMROOT is always added if
// missing and not explicitly set to the empty string.
//
// See also the Dir field, which may set PWD in the environment.
Env []string
// Dir specifies the working directory of the command.
// If Dir is the empty string, Run runs the command in the
// calling process's current directory.
//
// On Unix systems, the value of Dir also determines the
// child process's PWD environment variable if not otherwise
// specified. A Unix process represents its working directory
// not by name but as an implicit reference to a node in the
// file tree. So, if the child process obtains its working
// directory by calling a function such as C's getcwd, which
// computes the canonical name by walking up the file tree, it
// will not recover the original value of Dir if that value
// was an alias involving symbolic links. However, if the
// child process calls Go's [os.Getwd] or GNU C's
// get_current_dir_name, and the value of PWD is an alias for
// the current directory, those functions will return the
// value of PWD, which matches the value of Dir.
Dir string
// Stdin specifies the process's standard input.
//
// If Stdin is nil, the process reads from the null device (os.DevNull).
//
// If Stdin is an *os.File, the process's standard input is connected
// directly to that file.
//
// Otherwise, during the execution of the command a separate
// goroutine reads from Stdin and delivers that data to the command
// over a pipe. In this case, Wait does not complete until the goroutine
// stops copying, either because it has reached the end of Stdin
// (EOF or a read error), or because writing to the pipe returned an error,
// or because a nonzero WaitDelay was set and expired.
//
// Regardless of WaitDelay, Wait can block until a Read from
// Stdin completes. If you need to use a blocking io.Reader,
// use the StdinPipe method to get a pipe, copy from the Reader
// to the pipe, and arrange to close the Reader after Wait returns.
Stdin io.Reader
// Stdout and Stderr specify the process's standard output and error.
//
// If either is nil, Run connects the corresponding file descriptor
// to the null device (os.DevNull).
//
// If either is an *os.File, the corresponding output from the process
// is connected directly to that file.
//
// Otherwise, during the execution of the command a separate goroutine
// reads from the process over a pipe and delivers that data to the
// corresponding Writer. In this case, Wait does not complete until the
// goroutine reaches EOF or encounters an error or a nonzero WaitDelay
// expires.
//
// Regardless of WaitDelay, Wait can block until a Write to
// Stdout or Stderr completes. If you need to use a blocking io.Writer,
// use the StdoutPipe or StderrPipe method to get a pipe,
// copy from the pipe to the Writer, and arrange to close the
// Writer after Wait returns.
//
// If Stdout and Stderr are the same writer, and have a type that can
// be compared with ==, at most one goroutine at a time will call Write.
Stdout io.Writer
Stderr io.Writer
// ExtraFiles specifies additional open files to be inherited by the
// new process. It does not include standard input, standard output, or
// standard error. If non-nil, entry i becomes file descriptor 3+i.
//
// ExtraFiles is not supported on Windows.
ExtraFiles []*os.File
// SysProcAttr holds optional, operating system-specific attributes.
// Run passes it to os.StartProcess as the os.ProcAttr's Sys field.
SysProcAttr *syscall.SysProcAttr
// Process is the underlying process, once started.
Process *os.Process
// ProcessState contains information about an exited process.
// If the process was started successfully, Wait or Run will
// populate its ProcessState when the command completes.
ProcessState *os.ProcessState
// ctx is the context passed to CommandContext, if any.
ctx context.Context
Err error // LookPath error, if any.
// If Cancel is non-nil, the command must have been created with
// CommandContext and Cancel will be called when the command's
// Context is done. By default, CommandContext sets Cancel to
// call the Kill method on the command's Process.
//
// Typically a custom Cancel will send a signal to the command's
// Process, but it may instead take other actions to initiate cancellation,
// such as closing a stdin or stdout pipe or sending a shutdown request on a
// network socket.
//
// If the command exits with a success status after Cancel is
// called, and Cancel does not return an error equivalent to
// os.ErrProcessDone, then Wait and similar methods will return a non-nil
// error: either an error wrapping the one returned by Cancel,
// or the error from the Context.
// (If the command exits with a non-success status, or Cancel
// returns an error that wraps os.ErrProcessDone, Wait and similar methods
// continue to return the command's usual exit status.)
//
// If Cancel is set to nil, nothing will happen immediately when the command's
// Context is done, but a nonzero WaitDelay will still take effect. That may
// be useful, for example, to work around deadlocks in commands that do not
// support shutdown signals but are expected to always finish quickly.
//
// Cancel will not be called if Start returns a non-nil error.
Cancel func() error
// If WaitDelay is non-zero, it bounds the time spent waiting on two sources
// of unexpected delay in Wait: a child process that fails to exit after the
// associated Context is canceled, and a child process that exits but leaves
// its I/O pipes unclosed.
//
// The WaitDelay timer starts when either the associated Context is done or a
// call to Wait observes that the child process has exited, whichever occurs
// first. When the delay has elapsed, the command shuts down the child process
// and/or its I/O pipes.
//
// If the child process has failed to exit — perhaps because it ignored or
// failed to receive a shutdown signal from a Cancel function, or because no
// Cancel function was set — then it will be terminated using os.Process.Kill.
//
// Then, if the I/O pipes communicating with the child process are still open,
// those pipes are closed in order to unblock any goroutines currently blocked
// on Read or Write calls.
//
// If pipes are closed due to WaitDelay, no Cancel call has occurred,
// and the command has otherwise exited with a successful status, Wait and
// similar methods will return ErrWaitDelay instead of nil.
//
// If WaitDelay is zero (the default), I/O pipes will be read until EOF,
// which might not occur until orphaned subprocesses of the command have
// also closed their descriptors for the pipes.
WaitDelay time.Duration
// childIOFiles holds closers for any of the child process's
// stdin, stdout, and/or stderr files that were opened by the Cmd itself
// (not supplied by the caller). These should be closed as soon as they
// are inherited by the child process.
childIOFiles []io.Closer
// parentIOPipes holds closers for the parent's end of any pipes
// connected to the child's stdin, stdout, and/or stderr streams
// that were opened by the Cmd itself (not supplied by the caller).
// These should be closed after Wait sees the command and copying
// goroutines exit, or after WaitDelay has expired.
parentIOPipes []io.Closer
// goroutine holds a set of closures to execute to copy data
// to and/or from the command's I/O pipes.
goroutine []func() error
// If goroutineErr is non-nil, it receives the first error from a copying
// goroutine once all such goroutines have completed.
// goroutineErr is set to nil once its error has been received.
goroutineErr <-chan error
// If ctxResult is non-nil, it receives the result of watchCtx exactly once.
ctxResult <-chan ctxResult
// The stack saved when the Command was created, if GODEBUG contains
// execwait=2. Used for debugging leaks.
createdByStack []byte
// For a security release long ago, we created x/sys/execabs,
// which manipulated the unexported lookPathErr error field
// in this struct. For Go 1.19 we exported the field as Err error,
// above, but we have to keep lookPathErr around for use by
// old programs building against new toolchains.
// The String and Start methods look for an error in lookPathErr
// in preference to Err, to preserve the errors that execabs sets.
//
// In general we don't guarantee misuse of reflect like this,
// but the misuse of reflect was by us, the best of various bad
// options to fix the security problem, and people depend on
// those old copies of execabs continuing to work.
// The result is that we have to leave this variable around for the
// rest of time, a compatibility scar.
//
// See https://go.dev/blog/path-security
// and https://go.dev/issue/43724 for more context.
lookPathErr error
// cachedLookExtensions caches the result of calling lookExtensions.
// It is set when Command is called with an absolute path, letting it do
// the work of resolving the extension, so Start doesn't need to do it again.
// This is only used on Windows.
cachedLookExtensions struct{ in, out string }
// startCalled records that Start was attempted, regardless of outcome.
// (Until go.dev/issue/77075 is resolved, we use atomic.SwapInt32,
// not atomic.Bool.Swap, to avoid triggering the copylocks vet check.)
startCalled int32
}
// A ctxResult reports the result of watching the Context associated with a
// running command (and sending corresponding signals if needed).
type ctxResult struct {
err error
// If timer is non-nil, it expires after WaitDelay has elapsed after
// the Context is done.
//
// (If timer is nil, that means that the Context was not done before the
// command completed, or no WaitDelay was set, or the WaitDelay already
// expired and its effect was already applied.)
timer *time.Timer
}
var execwait = godebug.New("#execwait")
var execerrdot = godebug.New("execerrdot")
// Command returns the [Cmd] struct to execute the named program with
// the given arguments.
//
// It sets only the Path and Args in the returned structure.
//
// If name contains no path separators, Command uses [LookPath] to
// resolve name to a complete path if possible. Otherwise it uses name
// directly as Path.
//
// The returned Cmd's Args field is constructed from the command name
// followed by the elements of arg, so arg should not include the
// command name itself. For example, Command("echo", "hello").
// Args[0] is always name, not the possibly resolved Path.
//
// On Windows, processes receive the whole command line as a single string
// and do their own parsing. Command combines and quotes Args into a command
// line string with an algorithm compatible with applications using
// CommandLineToArgvW (which is the most common way). Notable exceptions are
// msiexec.exe and cmd.exe (and thus, all batch files), which have a different
// unquoting algorithm. In these or other similar cases, you can do the
// quoting yourself and provide the full command line in SysProcAttr.CmdLine,
// leaving Args empty.
func Command(name string, arg ...string) *Cmd {
cmd := &Cmd{
Path: name,
Args: append([]string{name}, arg...),
}
if v := execwait.Value(); v != "" {
if v == "2" {
// Obtain the caller stack. (This is equivalent to runtime/debug.Stack,
// copied to avoid importing the whole package.)
stack := make([]byte, 1024)
for {
n := runtime.Stack(stack, false)
if n < len(stack) {
stack = stack[:n]
break
}
stack = make([]byte, 2*len(stack))
}
if i := bytes.Index(stack, []byte("\nos/exec.Command(")); i >= 0 {
stack = stack[i+1:]
}
cmd.createdByStack = stack
}
runtime.SetFinalizer(cmd, func(c *Cmd) {
if c.Process != nil && c.ProcessState == nil {
debugHint := ""
if c.createdByStack == nil {
debugHint = " (set GODEBUG=execwait=2 to capture stacks for debugging)"
} else {
os.Stderr.WriteString("GODEBUG=execwait=2 detected a leaked exec.Cmd created by:\n")
os.Stderr.Write(c.createdByStack)
os.Stderr.WriteString("\n")
debugHint = ""
}
panic("exec: Cmd started a Process but leaked without a call to Wait" + debugHint)
}
})
}
if filepath.Base(name) == name {
lp, err := LookPath(name)
if lp != "" {
// Update cmd.Path even if err is non-nil.
// If err is ErrDot (especially on Windows), lp may include a resolved
// extension (like .exe or .bat) that should be preserved.
cmd.Path = lp
}
if err != nil {
cmd.Err = err
}
} else if runtime.GOOS == "windows" && filepath.IsAbs(name) {
// We may need to add a filename extension from PATHEXT
// or verify an extension that is already present.
// Since the path is absolute, its extension should be unambiguous
// and independent of cmd.Dir, and we can go ahead and cache the lookup now.
//
// Note that we don't cache anything here for relative paths, because
// cmd.Dir may be set after we return from this function and that may
// cause the command to resolve to a different extension.
if lp, err := lookExtensions(name, ""); err == nil {
cmd.cachedLookExtensions.in, cmd.cachedLookExtensions.out = name, lp
} else {
cmd.Err = err
}
}
return cmd
}
// CommandContext is like [Command] but includes a context.
//
// The provided context is used to interrupt the process
// (by calling cmd.Cancel or [os.Process.Kill])
// if the context becomes done before the command completes on its own.
//
// CommandContext sets the command's Cancel function to invoke the Kill method
// on its Process, and leaves its WaitDelay unset. The caller may change the
// cancellation behavior by modifying those fields before starting the command.
func CommandContext(ctx context.Context, name string, arg ...string) *Cmd {
if ctx == nil {
panic("nil Context")
}
cmd := Command(name, arg...)
cmd.ctx = ctx
cmd.Cancel = func() error {
return cmd.Process.Kill()
}
return cmd
}
// String returns a human-readable description of c.
// It is intended only for debugging.
// In particular, it is not suitable for use as input to a shell.
// The output of String may vary across Go releases.
func (c *Cmd) String() string {
if c.Err != nil || c.lookPathErr != nil {
// failed to resolve path; report the original requested path (plus args)
return strings.Join(c.Args, " ")
}
// report the exact executable path (plus args)
b := new(strings.Builder)
b.WriteString(c.Path)
for _, a := range c.Args[1:] {
b.WriteByte(' ')
b.WriteString(a)
}
return b.String()
}
// interfaceEqual protects against panics from doing equality tests on
// two interfaces with non-comparable underlying types.
func interfaceEqual(a, b any) bool {
defer func() {
recover()
}()
return a == b
}
func (c *Cmd) argv() []string {
if len(c.Args) > 0 {
return c.Args
}
return []string{c.Path}
}
func (c *Cmd) childStdin() (*os.File, error) {
if c.Stdin == nil {
f, err := os.Open(os.DevNull)
if err != nil {
return nil, err
}
c.childIOFiles = append(c.childIOFiles, f)
return f, nil
}
if f, ok := c.Stdin.(*os.File); ok {
return f, nil
}
pr, pw, err := os.Pipe()
if err != nil {
return nil, err
}
c.childIOFiles = append(c.childIOFiles, pr)
c.parentIOPipes = append(c.parentIOPipes, pw)
c.goroutine = append(c.goroutine, func() error {
_, err := io.Copy(pw, c.Stdin)
if skipStdinCopyError(err) {
err = nil
}
if err1 := pw.Close(); err == nil {
err = err1
}
return err
})
return pr, nil
}
func (c *Cmd) childStdout() (*os.File, error) {
return c.writerDescriptor(c.Stdout)
}
func (c *Cmd) childStderr(childStdout *os.File) (*os.File, error) {
if c.Stderr != nil && interfaceEqual(c.Stderr, c.Stdout) {
return childStdout, nil
}
return c.writerDescriptor(c.Stderr)
}
// writerDescriptor returns an os.File to which the child process
// can write to send data to w.
//
// If w is nil, writerDescriptor returns a File that writes to os.DevNull.
func (c *Cmd) writerDescriptor(w io.Writer) (*os.File, error) {
if w == nil {
f, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
if err != nil {
return nil, err
}
c.childIOFiles = append(c.childIOFiles, f)
return f, nil
}
if f, ok := w.(*os.File); ok {
return f, nil
}
pr, pw, err := os.Pipe()
if err != nil {
return nil, err
}
c.childIOFiles = append(c.childIOFiles, pw)
c.parentIOPipes = append(c.parentIOPipes, pr)
c.goroutine = append(c.goroutine, func() error {
_, err := io.Copy(w, pr)
pr.Close() // in case io.Copy stopped due to write error
return err
})
return pw, nil
}
func closeDescriptors(closers []io.Closer) {
for _, fd := range closers {
fd.Close()
}
}
// Run starts the specified command and waits for it to complete.
//
// The returned error is nil if the command runs, has no problems
// copying stdin, stdout, and stderr, and exits with a zero exit
// status.
//
// If the command starts but does not complete successfully, the error is of
// type [*ExitError]. Other error types may be returned for other situations.
//
// If the calling goroutine has locked the operating system thread
// with [runtime.LockOSThread] and modified any inheritable OS-level
// thread state (for example, Linux or Plan 9 name spaces), the new
// process will inherit the caller's thread state.
func (c *Cmd) Run() error {
if err := c.Start(); err != nil {
return err
}
return c.Wait()
}
// Start starts the specified command but does not wait for it to complete.
//
// If Start returns successfully, the c.Process field will be set.
//
// After a successful call to Start the [Cmd.Wait] method must be called in
// order to release associated system resources.
func (c *Cmd) Start() error {
// Check for doubled Start calls before we defer failure cleanup. If the prior
// call to Start succeeded, we don't want to spuriously close its pipes.
// It is an error to call Start twice even if the first call did not create a process.
if atomic.SwapInt32(&c.startCalled, 1) != 0 {
return errors.New("exec: already started")
}
started := false
defer func() {
closeDescriptors(c.childIOFiles)
c.childIOFiles = nil
if !started {
closeDescriptors(c.parentIOPipes)
c.parentIOPipes = nil
c.goroutine = nil // aid GC, finalization of pipe fds
}
}()
if c.Path == "" && c.Err == nil && c.lookPathErr == nil {
c.Err = errors.New("exec: no command")
}
if c.Err != nil || c.lookPathErr != nil {
if c.lookPathErr != nil {
return c.lookPathErr
}
return c.Err
}
lp := c.Path
if runtime.GOOS == "windows" {
if c.Path == c.cachedLookExtensions.in {
// If Command was called with an absolute path, we already resolved
// its extension and shouldn't need to do so again (provided c.Path
// wasn't set to another value between the calls to Command and Start).
lp = c.cachedLookExtensions.out
} else {
// If *Cmd was made without using Command at all, or if Command was
// called with a relative path, we had to wait until now to resolve
// it in case c.Dir was changed.
//
// Unfortunately, we cannot write the result back to c.Path because programs
// may assume that they can call Start concurrently with reading the path.
// (It is safe and non-racy to do so on Unix platforms, and users might not
// test with the race detector on all platforms;
// see https://go.dev/issue/62596.)
//
// So we will pass the fully resolved path to os.StartProcess, but leave
// c.Path as is: missing a bit of logging information seems less harmful
// than triggering a surprising data race, and if the user really cares
// about that bit of logging they can always use LookPath to resolve it.
var err error
lp, err = lookExtensions(c.Path, c.Dir)
if err != nil {
return err
}
}
}
if c.Cancel != nil && c.ctx == nil {
return errors.New("exec: command with a non-nil Cancel was not created with CommandContext")
}
if c.ctx != nil {
select {
case <-c.ctx.Done():
return c.ctx.Err()
default:
}
}
childFiles := make([]*os.File, 0, 3+len(c.ExtraFiles))
stdin, err := c.childStdin()
if err != nil {
return err
}
childFiles = append(childFiles, stdin)
stdout, err := c.childStdout()
if err != nil {
return err
}
childFiles = append(childFiles, stdout)
stderr, err := c.childStderr(stdout)
if err != nil {
return err
}
childFiles = append(childFiles, stderr)
childFiles = append(childFiles, c.ExtraFiles...)
env, err := c.environ()
if err != nil {
return err
}
c.Process, err = os.StartProcess(lp, c.argv(), &os.ProcAttr{
Dir: c.Dir,
Files: childFiles,
Env: env,
Sys: c.SysProcAttr,
})
if err != nil {
return err
}
started = true
// Don't allocate the goroutineErr channel unless there are goroutines to start.
if len(c.goroutine) > 0 {
goroutineErr := make(chan error, 1)
c.goroutineErr = goroutineErr
type goroutineStatus struct {
running int
firstErr error
}
statusc := make(chan goroutineStatus, 1)
statusc <- goroutineStatus{running: len(c.goroutine)}
for _, fn := range c.goroutine {
go func(fn func() error) {
err := fn()
status := <-statusc
if status.firstErr == nil {
status.firstErr = err
}
status.running--
if status.running == 0 {
goroutineErr <- status.firstErr
} else {
statusc <- status
}
}(fn)
}
c.goroutine = nil // Allow the goroutines' closures to be GC'd when they complete.
}
// If we have anything to do when the command's Context expires,
// start a goroutine to watch for cancellation.
//
// (Even if the command was created by CommandContext, a helper library may
// have explicitly set its Cancel field back to nil, indicating that it should
// be allowed to continue running after cancellation after all.)
if (c.Cancel != nil || c.WaitDelay != 0) && c.ctx != nil && c.ctx.Done() != nil {
resultc := make(chan ctxResult)
c.ctxResult = resultc
go c.watchCtx(resultc)
}
return nil
}
// watchCtx watches c.ctx until it is able to send a result to resultc.
//
// If c.ctx is done before a result can be sent, watchCtx calls c.Cancel,
// and/or kills cmd.Process it after c.WaitDelay has elapsed.
//
// watchCtx manipulates c.goroutineErr, so its result must be received before
// c.awaitGoroutines is called.
func (c *Cmd) watchCtx(resultc chan<- ctxResult) {
select {
case resultc <- ctxResult{}:
return
case <-c.ctx.Done():
}
var err error
if c.Cancel != nil {
if interruptErr := c.Cancel(); interruptErr == nil {
// We appear to have successfully interrupted the command, so any
// program behavior from this point may be due to ctx even if the
// command exits with code 0.
err = c.ctx.Err()
} else if errors.Is(interruptErr, os.ErrProcessDone) {
// The process already finished: we just didn't notice it yet.
// (Perhaps c.Wait hadn't been called, or perhaps it happened to race with
// c.ctx being canceled.) Don't inject a needless error.
} else {
err = wrappedError{
prefix: "exec: canceling Cmd",
err: interruptErr,
}
}
}
if c.WaitDelay == 0 {
resultc <- ctxResult{err: err}
return
}
timer := time.NewTimer(c.WaitDelay)
select {
case resultc <- ctxResult{err: err, timer: timer}:
// c.Process.Wait returned and we've handed the timer off to c.Wait.
// It will take care of goroutine shutdown from here.
return
case <-timer.C:
}
killed := false
if killErr := c.Process.Kill(); killErr == nil {
// We appear to have killed the process. c.Process.Wait should return a
// non-nil error to c.Wait unless the Kill signal races with a successful
// exit, and if that does happen we shouldn't report a spurious error,
// so don't set err to anything here.
killed = true
} else if !errors.Is(killErr, os.ErrProcessDone) {
err = wrappedError{
prefix: "exec: killing Cmd",
err: killErr,
}
}
if c.goroutineErr != nil {
select {
case goroutineErr := <-c.goroutineErr:
// Forward goroutineErr only if we don't have reason to believe it was
// caused by a call to Cancel or Kill above.
if err == nil && !killed {
err = goroutineErr
}
default:
// Close the child process's I/O pipes, in case it abandoned some
// subprocess that inherited them and is still holding them open
// (see https://go.dev/issue/23019).
//
// We close the goroutine pipes only after we have sent any signals we're
// going to send to the process (via Signal or Kill above): if we send
// SIGKILL to the process, we would prefer for it to die of SIGKILL, not
// SIGPIPE. (However, this may still cause any orphaned subprocesses to
// terminate with SIGPIPE.)
closeDescriptors(c.parentIOPipes)
// Wait for the copying goroutines to finish, but report ErrWaitDelay for
// the error: any other error here could result from closing the pipes.
_ = <-c.goroutineErr
if err == nil {
err = ErrWaitDelay
}
}
// Since we have already received the only result from c.goroutineErr,
// set it to nil to prevent awaitGoroutines from blocking on it.
c.goroutineErr = nil
}
resultc <- ctxResult{err: err}
}
// An ExitError reports an unsuccessful exit by a command.
type ExitError struct {
*os.ProcessState
// Stderr holds a subset of the standard error output from the
// Cmd.Output method if standard error was not otherwise being
// collected.
//
// If the error output is long, Stderr may contain only a prefix
// and suffix of the output, with the middle replaced with
// text about the number of omitted bytes.
//
// Stderr is provided for debugging, for inclusion in error messages.
// Users with other needs should redirect Cmd.Stderr as needed.
Stderr []byte
}
func (e *ExitError) Error() string {
return e.ProcessState.String()
}
// Wait waits for the command to exit and waits for any copying to
// stdin or copying from stdout or stderr to complete.
//
// The command must have been started by [Cmd.Start].
//
// The returned error is nil if the command runs, has no problems
// copying stdin, stdout, and stderr, and exits with a zero exit
// status.
//
// If the command fails to run or doesn't complete successfully, the
// error is of type [*ExitError]. Other error types may be
// returned for I/O problems.
//
// If any of c.Stdin, c.Stdout or c.Stderr are not an [*os.File], Wait also waits
// for the respective I/O loop copying to or from the process to complete.
//
// Wait must not be called concurrently from multiple goroutines.
// A custom Cmd.Cancel function should not call Wait.
//
// Wait releases any resources associated with the [Cmd].
func (c *Cmd) Wait() error {
if c.Process == nil {
return errors.New("exec: not started")
}
if c.ProcessState != nil {
return errors.New("exec: Wait was already called")
}
state, err := c.Process.Wait()
if err == nil && !state.Success() {
err = &ExitError{ProcessState: state}
}
c.ProcessState = state
var timer *time.Timer
if c.ctxResult != nil {
watch := <-c.ctxResult
timer = watch.timer
// If c.Process.Wait returned an error, prefer that.
// Otherwise, report any error from the watchCtx goroutine,
// such as a Context cancellation or a WaitDelay overrun.
if err == nil && watch.err != nil {
err = watch.err
}
}
if goroutineErr := c.awaitGoroutines(timer); err == nil {
// Report an error from the copying goroutines only if the program otherwise
// exited normally on its own. Otherwise, the copying error may be due to the
// abnormal termination.
err = goroutineErr
}
closeDescriptors(c.parentIOPipes)
c.parentIOPipes = nil
return err
}
// awaitGoroutines waits for the results of the goroutines copying data to or
// from the command's I/O pipes.
//
// If c.WaitDelay elapses before the goroutines complete, awaitGoroutines
// forcibly closes their pipes and returns ErrWaitDelay.
//
// If timer is non-nil, it must send to timer.C at the end of c.WaitDelay.
func (c *Cmd) awaitGoroutines(timer *time.Timer) error {
defer func() {
if timer != nil {
timer.Stop()
}
c.goroutineErr = nil
}()
if c.goroutineErr == nil {
return nil // No running goroutines to await.
}
if timer == nil {
if c.WaitDelay == 0 {
return <-c.goroutineErr
}
select {
case err := <-c.goroutineErr:
// Avoid the overhead of starting a timer.
return err
default:
}
// No existing timer was started: either there is no Context associated with
// the command, or c.Process.Wait completed before the Context was done.
timer = time.NewTimer(c.WaitDelay)
}
select {
case <-timer.C:
closeDescriptors(c.parentIOPipes)
// Wait for the copying goroutines to finish, but ignore any error
// (since it was probably caused by closing the pipes).
_ = <-c.goroutineErr
return ErrWaitDelay
case err := <-c.goroutineErr:
return err
}
}
// Output runs the command and returns its standard output.
// Any returned error will usually be of type [*ExitError].
// If c.Stderr was nil and the returned error is of type
// [*ExitError], Output populates the Stderr field of the
// returned error.
func (c *Cmd) Output() ([]byte, error) {
if c.Stdout != nil {
return nil, errors.New("exec: Stdout already set")
}
var stdout bytes.Buffer
c.Stdout = &stdout
captureErr := c.Stderr == nil
if captureErr {
c.Stderr = &prefixSuffixSaver{N: 32 << 10}
}
err := c.Run()
if err != nil && captureErr {
if ee, ok := err.(*ExitError); ok {
ee.Stderr = c.Stderr.(*prefixSuffixSaver).Bytes()
}
}
return stdout.Bytes(), err
}
// CombinedOutput runs the command and returns its combined standard
// output and standard error.
func (c *Cmd) CombinedOutput() ([]byte, error) {
if c.Stdout != nil {
return nil, errors.New("exec: Stdout already set")
}
if c.Stderr != nil {
return nil, errors.New("exec: Stderr already set")
}
var b bytes.Buffer
c.Stdout = &b
c.Stderr = &b
err := c.Run()
return b.Bytes(), err
}
// StdinPipe returns a pipe that will be connected to the command's
// standard input when the command starts.
// The pipe will be closed automatically after [Cmd.Wait] sees the command exit.
// A caller need only call Close to force the pipe to close sooner.
// For example, if the command being run will not exit until standard input
// is closed, the caller must close the pipe.
func (c *Cmd) StdinPipe() (io.WriteCloser, error) {
if c.Stdin != nil {
return nil, errors.New("exec: Stdin already set")
}
if c.Process != nil {
return nil, errors.New("exec: StdinPipe after process started")
}
pr, pw, err := os.Pipe()
if err != nil {
return nil, err
}
c.Stdin = pr
c.childIOFiles = append(c.childIOFiles, pr)
c.parentIOPipes = append(c.parentIOPipes, pw)
return pw, nil
}
// StdoutPipe returns a pipe that will be connected to the command's
// standard output when the command starts.
//
// [Cmd.Wait] will close the pipe after seeing the command exit, so most callers
// need not close the pipe themselves. It is thus incorrect to call Wait
// before all reads from the pipe have completed.
// For the same reason, it is incorrect to call [Cmd.Run] when using StdoutPipe.
// See the example for idiomatic usage.
func (c *Cmd) StdoutPipe() (io.ReadCloser, error) {
if c.Stdout != nil {
return nil, errors.New("exec: Stdout already set")
}
if c.Process != nil {
return nil, errors.New("exec: StdoutPipe after process started")
}
pr, pw, err := os.Pipe()
if err != nil {
return nil, err
}
c.Stdout = pw
c.childIOFiles = append(c.childIOFiles, pw)
c.parentIOPipes = append(c.parentIOPipes, pr)
return pr, nil
}
// StderrPipe returns a pipe that will be connected to the command's
// standard error when the command starts.
//
// [Cmd.Wait] will close the pipe after seeing the command exit, so most callers
// need not close the pipe themselves. It is thus incorrect to call Wait
// before all reads from the pipe have completed.
// For the same reason, it is incorrect to use [Cmd.Run] when using StderrPipe.
// See the StdoutPipe example for idiomatic usage.
func (c *Cmd) StderrPipe() (io.ReadCloser, error) {
if c.Stderr != nil {
return nil, errors.New("exec: Stderr already set")
}
if c.Process != nil {
return nil, errors.New("exec: StderrPipe after process started")
}
pr, pw, err := os.Pipe()
if err != nil {
return nil, err
}
c.Stderr = pw
c.childIOFiles = append(c.childIOFiles, pw)
c.parentIOPipes = append(c.parentIOPipes, pr)
return pr, nil
}
// prefixSuffixSaver is an io.Writer which retains the first N bytes
// and the last N bytes written to it. The Bytes() methods reconstructs
// it with a pretty error message.
type prefixSuffixSaver struct {
N int // max size of prefix or suffix
prefix []byte
suffix []byte // ring buffer once len(suffix) == N
suffixOff int // offset to write into suffix
skipped int64
// TODO(bradfitz): we could keep one large []byte and use part of it for
// the prefix, reserve space for the '... Omitting N bytes ...' message,
// then the ring buffer suffix, and just rearrange the ring buffer
// suffix when Bytes() is called, but it doesn't seem worth it for
// now just for error messages. It's only ~64KB anyway.
}
func (w *prefixSuffixSaver) Write(p []byte) (n int, err error) {
lenp := len(p)
p = w.fill(&w.prefix, p)
// Only keep the last w.N bytes of suffix data.
if overage := len(p) - w.N; overage > 0 {
p = p[overage:]
w.skipped += int64(overage)
}
p = w.fill(&w.suffix, p)
// w.suffix is full now if p is non-empty. Overwrite it in a circle.
for len(p) > 0 { // 0, 1, or 2 iterations.
n := copy(w.suffix[w.suffixOff:], p)
p = p[n:]
w.skipped += int64(n)
w.suffixOff += n
if w.suffixOff == w.N {
w.suffixOff = 0
}
}
return lenp, nil
}
// fill appends up to len(p) bytes of p to *dst, such that *dst does not
// grow larger than w.N. It returns the un-appended suffix of p.
func (w *prefixSuffixSaver) fill(dst *[]byte, p []byte) (pRemain []byte) {
if remain := w.N - len(*dst); remain > 0 {
add := min(len(p), remain)
*dst = append(*dst, p[:add]...)
p = p[add:]
}
return p
}
func (w *prefixSuffixSaver) Bytes() []byte {
if w.suffix == nil {
return w.prefix
}
if w.skipped == 0 {
return append(w.prefix, w.suffix...)
}
var buf bytes.Buffer
buf.Grow(len(w.prefix) + len(w.suffix) + 50)
buf.Write(w.prefix)
buf.WriteString("\n... omitting ")
buf.WriteString(strconv.FormatInt(w.skipped, 10))
buf.WriteString(" bytes ...\n")
buf.Write(w.suffix[w.suffixOff:])
buf.Write(w.suffix[:w.suffixOff])
return buf.Bytes()
}
// environ returns a best-effort copy of the environment in which the command
// would be run as it is currently configured. If an error occurs in computing
// the environment, it is returned alongside the best-effort copy.
func (c *Cmd) environ() ([]string, error) {
var err error
env := c.Env
if env == nil {
env, err = execenv.Default(c.SysProcAttr)
if err != nil {
env = os.Environ()
// Note that the non-nil err is preserved despite env being overridden.
}
if c.Dir != "" {
switch runtime.GOOS {
case "windows", "plan9":
// Windows and Plan 9 do not use the PWD variable, so we don't need to
// keep it accurate.
default:
// On POSIX platforms, PWD represents “an absolute pathname of the
// current working directory.” Since we are changing the working
// directory for the command, we should also update PWD to reflect that.
//
// Unfortunately, we didn't always do that, so (as proposed in
// https://go.dev/issue/50599) to avoid unintended collateral damage we
// only implicitly update PWD when Env is nil. That way, we're much
// less likely to override an intentional change to the variable.
if pwd, absErr := filepath.Abs(c.Dir); absErr == nil {
env = append(env, "PWD="+pwd)
} else if err == nil {
err = absErr
}
}
}
}
env, dedupErr := dedupEnv(env)
if err == nil {
err = dedupErr
}
return addCriticalEnv(env), err
}
// Environ returns a copy of the environment in which the command would be run
// as it is currently configured.
func (c *Cmd) Environ() []string {
// Intentionally ignore errors: environ returns a best-effort environment no matter what.
env, _ := c.environ()
return env
}
// dedupEnv returns a copy of env with any duplicates removed, in favor of
// later values.
// Items not of the normal environment "key=value" form are preserved unchanged.
// Except on Plan 9, items containing NUL characters are removed, and
// an error is returned along with the remaining values.
func dedupEnv(env []string) ([]string, error) {
return dedupEnvCase(runtime.GOOS == "windows", runtime.GOOS == "plan9", env)
}
// dedupEnvCase is dedupEnv with a case option for testing.
// If caseInsensitive is true, the case of keys is ignored.
// If nulOK is false, items containing NUL characters are allowed.
func dedupEnvCase(caseInsensitive, nulOK bool, env []string) ([]string, error) {
// Construct the output in reverse order, to preserve the
// last occurrence of each key.
var err error
out := make([]string, 0, len(env))
saw := make(map[string]bool, len(env))
for n := len(env); n > 0; n-- {
kv := env[n-1]
// Reject NUL in environment variables to prevent security issues (#56284);
// except on Plan 9, which uses NUL as os.PathListSeparator (#56544).
if !nulOK && strings.IndexByte(kv, 0) != -1 {
err = errors.New("exec: environment variable contains NUL")
continue
}
i := strings.Index(kv, "=")
if i == 0 {
// We observe in practice keys with a single leading "=" on Windows.
// TODO(#49886): Should we consume only the first leading "=" as part
// of the key, or parse through arbitrarily many of them until a non-"="?
i = strings.Index(kv[1:], "=") + 1
}
if i < 0 {
if kv != "" {
// The entry is not of the form "key=value" (as it is required to be).
// Leave it as-is for now.
// TODO(#52436): should we strip or reject these bogus entries?
out = append(out, kv)
}
continue
}
k := kv[:i]
if caseInsensitive {
k = strings.ToLower(k)
}
if saw[k] {
continue
}
saw[k] = true
out = append(out, kv)
}
// Now reverse the slice to restore the original order.
for i := 0; i < len(out)/2; i++ {
j := len(out) - i - 1
out[i], out[j] = out[j], out[i]
}
return out, err
}
// addCriticalEnv adds any critical environment variables that are required
// (or at least almost always required) on the operating system.
// Currently this is only used for Windows.
func addCriticalEnv(env []string) []string {
if runtime.GOOS != "windows" {
return env
}
for _, kv := range env {
k, _, ok := strings.Cut(kv, "=")
if !ok {
continue
}
if strings.EqualFold(k, "SYSTEMROOT") {
// We already have it.
return env
}
}
return append(env, "SYSTEMROOT="+os.Getenv("SYSTEMROOT"))
}
// ErrDot indicates that a path lookup resolved to an executable
// in the current directory due to ‘.’ being in the path, either
// implicitly or explicitly. See the package documentation for details.
//
// Note that functions in this package do not return ErrDot directly.
// Code should use errors.Is(err, ErrDot), not err == ErrDot,
// to test whether a returned error err is due to this condition.
var ErrDot = errors.New("cannot run executable found relative to current directory")
// validateLookPath excludes paths that can't be valid
// executable names. See issue #74466 and CVE-2025-47906.
func validateLookPath(s string) error {
switch s {
case "", ".", "..":
return ErrNotFound
}
return nil
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !plan9 && !windows
package exec
import (
"io/fs"
"syscall"
)
// skipStdinCopyError optionally specifies a function which reports
// whether the provided stdin copy error should be ignored.
func skipStdinCopyError(err error) bool {
// Ignore EPIPE errors copying to stdin if the program
// completed successfully otherwise.
// See Issue 9173.
pe, ok := err.(*fs.PathError)
return ok &&
pe.Op == "write" && pe.Path == "|1" &&
pe.Err == syscall.EPIPE
}
// Copyright 2026 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package exec
// LookPath searches for an executable named file in the current path,
// following the conventions of the host operating system.
// If file contains a slash, it is tried directly and the default path is not consulted.
// Otherwise, on success the result is an absolute path.
//
// LookPath returns an error satisfying [errors.Is](err, [ErrDot])
// if the resolved path is relative to the current directory.
// See the package documentation for more details.
//
// LookPath looks for an executable named file in the
// directories named by the PATH environment variable,
// except as described below.
//
// - On Windows, the file must have an extension named by
// the PATHEXT environment variable.
// When PATHEXT is unset, the file must have
// a ".com", ".exe", ".bat", or ".cmd" extension.
// - On Plan 9, LookPath consults the path environment variable.
// If file begins with "/", "#", "./", or "../", it is tried
// directly and the path is not consulted.
// - On Wasm, LookPath always returns an error.
func LookPath(file string) (string, error) {
return lookPath(file)
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package exec
import (
"errors"
"internal/syscall/unix"
"io/fs"
"os"
"path/filepath"
"strings"
"syscall"
)
// ErrNotFound is the error resulting if a path search failed to find an executable file.
var ErrNotFound = errors.New("executable file not found in $PATH")
func findExecutable(file string) error {
d, err := os.Stat(file)
if err != nil {
return err
}
m := d.Mode()
if m.IsDir() {
return syscall.EISDIR
}
err = unix.Eaccess(file, unix.X_OK)
// ENOSYS means Eaccess is not available or not implemented.
// EPERM can be returned by Linux containers employing seccomp.
// In both cases, fall back to checking the permission bits.
if err == nil || (err != syscall.ENOSYS && err != syscall.EPERM) {
return err
}
if m&0111 != 0 {
return nil
}
return fs.ErrPermission
}
func lookPath(file string) (string, error) {
// NOTE(rsc): I wish we could use the Plan 9 behavior here
// (only bypass the path if file begins with / or ./ or ../)
// but that would not match all the Unix shells.
if err := validateLookPath(file); err != nil {
return "", &Error{file, err}
}
if strings.Contains(file, "/") {
err := findExecutable(file)
if err == nil {
return file, nil
}
return "", &Error{file, err}
}
path := os.Getenv("PATH")
for _, dir := range filepath.SplitList(path) {
if dir == "" {
// Unix shell semantics: path element "" means "."
dir = "."
}
path := filepath.Join(dir, file)
if err := findExecutable(path); err == nil {
if !filepath.IsAbs(path) {
if execerrdot.Value() != "0" {
return path, &Error{file, ErrDot}
}
execerrdot.IncNonDefault()
}
return path, nil
}
}
return "", &Error{file, ErrNotFound}
}
// lookExtensions is a no-op on non-Windows platforms, since
// they do not restrict executables to specific extensions.
func lookExtensions(path, dir string) (string, error) {
return path, nil
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (cgo || darwin) && !osusergo && unix && !android && !aix
package user
import (
"fmt"
"strconv"
"unsafe"
)
const maxGroups = 2048
func listGroups(u *User) ([]string, error) {
ug, err := strconv.Atoi(u.Gid)
if err != nil {
return nil, fmt.Errorf("user: list groups for %s: invalid gid %q", u.Username, u.Gid)
}
userGID := _C_gid_t(ug)
nameC := make([]byte, len(u.Username)+1)
copy(nameC, u.Username)
n := _C_int(256)
gidsC := make([]_C_gid_t, n)
rv := getGroupList((*_C_char)(unsafe.Pointer(&nameC[0])), userGID, &gidsC[0], &n)
if rv == -1 {
// Mac is the only Unix that does not set n properly when rv == -1, so
// we need to use different logic for Mac vs. the other OS's.
if err := groupRetry(u.Username, nameC, userGID, &gidsC, &n); err != nil {
return nil, err
}
}
gidsC = gidsC[:n]
gids := make([]string, 0, n)
for _, g := range gidsC[:n] {
gids = append(gids, strconv.Itoa(int(g)))
}
return gids, nil
}
// groupRetry retries getGroupList with much larger size for n. The result is
// stored in gids.
func groupRetry(username string, name []byte, userGID _C_gid_t, gids *[]_C_gid_t, n *_C_int) error {
// More than initial buffer, but now n contains the correct size.
if *n > maxGroups {
return fmt.Errorf("user: %q is a member of more than %d groups", username, maxGroups)
}
*gids = make([]_C_gid_t, *n)
rv := getGroupList((*_C_char)(unsafe.Pointer(&name[0])), userGID, &(*gids)[0], n)
if rv == -1 {
return fmt.Errorf("user: list groups for %s failed", username)
}
return nil
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cgo && !osusergo && unix && !android && !darwin
package user
import (
"syscall"
)
/*
#cgo solaris CFLAGS: -D_POSIX_PTHREAD_SEMANTICS
#cgo CFLAGS: -fno-stack-protector
#include <unistd.h>
#include <sys/types.h>
#include <pwd.h>
#include <grp.h>
#include <stdlib.h>
#include <string.h>
static struct passwd mygetpwuid_r(int uid, char *buf, size_t buflen, int *found, int *perr) {
struct passwd pwd;
struct passwd *result;
memset (&pwd, 0, sizeof(pwd));
*perr = getpwuid_r(uid, &pwd, buf, buflen, &result);
*found = result != NULL;
return pwd;
}
static struct passwd mygetpwnam_r(const char *name, char *buf, size_t buflen, int *found, int *perr) {
struct passwd pwd;
struct passwd *result;
memset(&pwd, 0, sizeof(pwd));
*perr = getpwnam_r(name, &pwd, buf, buflen, &result);
*found = result != NULL;
return pwd;
}
static struct group mygetgrgid_r(int gid, char *buf, size_t buflen, int *found, int *perr) {
struct group grp;
struct group *result;
memset(&grp, 0, sizeof(grp));
*perr = getgrgid_r(gid, &grp, buf, buflen, &result);
*found = result != NULL;
return grp;
}
static struct group mygetgrnam_r(const char *name, char *buf, size_t buflen, int *found, int *perr) {
struct group grp;
struct group *result;
memset(&grp, 0, sizeof(grp));
*perr = getgrnam_r(name, &grp, buf, buflen, &result);
*found = result != NULL;
return grp;
}
*/
import "C"
type _C_char = C.char
type _C_int = C.int
type _C_gid_t = C.gid_t
type _C_uid_t = C.uid_t
type _C_size_t = C.size_t
type _C_struct_group = C.struct_group
type _C_struct_passwd = C.struct_passwd
type _C_long = C.long
func _C_pw_uid(p *_C_struct_passwd) _C_uid_t { return p.pw_uid }
func _C_pw_uidp(p *_C_struct_passwd) *_C_uid_t { return &p.pw_uid }
func _C_pw_gid(p *_C_struct_passwd) _C_gid_t { return p.pw_gid }
func _C_pw_gidp(p *_C_struct_passwd) *_C_gid_t { return &p.pw_gid }
func _C_pw_name(p *_C_struct_passwd) *_C_char { return p.pw_name }
func _C_pw_gecos(p *_C_struct_passwd) *_C_char { return p.pw_gecos }
func _C_pw_dir(p *_C_struct_passwd) *_C_char { return p.pw_dir }
func _C_gr_gid(g *_C_struct_group) _C_gid_t { return g.gr_gid }
func _C_gr_name(g *_C_struct_group) *_C_char { return g.gr_name }
func _C_GoString(p *_C_char) string { return C.GoString(p) }
func _C_getpwnam_r(name *_C_char, buf *_C_char, size _C_size_t) (pwd _C_struct_passwd, found bool, errno syscall.Errno) {
var f, e _C_int
pwd = C.mygetpwnam_r(name, buf, size, &f, &e)
return pwd, f != 0, syscall.Errno(e)
}
func _C_getpwuid_r(uid _C_uid_t, buf *_C_char, size _C_size_t) (pwd _C_struct_passwd, found bool, errno syscall.Errno) {
var f, e _C_int
pwd = C.mygetpwuid_r(_C_int(uid), buf, size, &f, &e)
return pwd, f != 0, syscall.Errno(e)
}
func _C_getgrnam_r(name *_C_char, buf *_C_char, size _C_size_t) (grp _C_struct_group, found bool, errno syscall.Errno) {
var f, e _C_int
grp = C.mygetgrnam_r(name, buf, size, &f, &e)
return grp, f != 0, syscall.Errno(e)
}
func _C_getgrgid_r(gid _C_gid_t, buf *_C_char, size _C_size_t) (grp _C_struct_group, found bool, errno syscall.Errno) {
var f, e _C_int
grp = C.mygetgrgid_r(_C_int(gid), buf, size, &f, &e)
return grp, f != 0, syscall.Errno(e)
}
const (
_C__SC_GETPW_R_SIZE_MAX = C._SC_GETPW_R_SIZE_MAX
_C__SC_GETGR_R_SIZE_MAX = C._SC_GETGR_R_SIZE_MAX
)
func _C_sysconf(key _C_int) _C_long { return C.sysconf(key) }
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (cgo || darwin) && !osusergo && unix && !android
package user
import (
"fmt"
"runtime"
"strconv"
"strings"
"syscall"
"unsafe"
)
func current() (*User, error) {
return lookupUnixUid(syscall.Getuid())
}
func lookupUser(username string) (*User, error) {
var pwd _C_struct_passwd
var found bool
nameC := make([]byte, len(username)+1)
copy(nameC, username)
err := retryWithBuffer(userBuffer, func(buf []byte) syscall.Errno {
var errno syscall.Errno
pwd, found, errno = _C_getpwnam_r((*_C_char)(unsafe.Pointer(&nameC[0])),
(*_C_char)(unsafe.Pointer(&buf[0])), _C_size_t(len(buf)))
return errno
})
if err == syscall.ENOENT || (err == nil && !found) {
return nil, UnknownUserError(username)
}
if err != nil {
return nil, fmt.Errorf("user: lookup username %s: %v", username, err)
}
return buildUser(&pwd), nil
}
func lookupUserId(uid string) (*User, error) {
i, e := strconv.Atoi(uid)
if e != nil {
return nil, e
}
return lookupUnixUid(i)
}
func lookupUnixUid(uid int) (*User, error) {
var pwd _C_struct_passwd
var found bool
err := retryWithBuffer(userBuffer, func(buf []byte) syscall.Errno {
var errno syscall.Errno
pwd, found, errno = _C_getpwuid_r(_C_uid_t(uid),
(*_C_char)(unsafe.Pointer(&buf[0])), _C_size_t(len(buf)))
return errno
})
if err == syscall.ENOENT || (err == nil && !found) {
return nil, UnknownUserIdError(uid)
}
if err != nil {
return nil, fmt.Errorf("user: lookup userid %d: %v", uid, err)
}
return buildUser(&pwd), nil
}
func buildUser(pwd *_C_struct_passwd) *User {
u := &User{
Uid: strconv.FormatUint(uint64(_C_pw_uid(pwd)), 10),
Gid: strconv.FormatUint(uint64(_C_pw_gid(pwd)), 10),
Username: _C_GoString(_C_pw_name(pwd)),
Name: _C_GoString(_C_pw_gecos(pwd)),
HomeDir: _C_GoString(_C_pw_dir(pwd)),
}
// The pw_gecos field isn't quite standardized. Some docs
// say: "It is expected to be a comma separated list of
// personal data where the first item is the full name of the
// user."
u.Name, _, _ = strings.Cut(u.Name, ",")
return u
}
func lookupGroup(groupname string) (*Group, error) {
var grp _C_struct_group
var found bool
cname := make([]byte, len(groupname)+1)
copy(cname, groupname)
err := retryWithBuffer(groupBuffer, func(buf []byte) syscall.Errno {
var errno syscall.Errno
grp, found, errno = _C_getgrnam_r((*_C_char)(unsafe.Pointer(&cname[0])),
(*_C_char)(unsafe.Pointer(&buf[0])), _C_size_t(len(buf)))
return errno
})
if err == syscall.ENOENT || (err == nil && !found) {
return nil, UnknownGroupError(groupname)
}
if err != nil {
return nil, fmt.Errorf("user: lookup groupname %s: %v", groupname, err)
}
return buildGroup(&grp), nil
}
func lookupGroupId(gid string) (*Group, error) {
i, e := strconv.Atoi(gid)
if e != nil {
return nil, e
}
return lookupUnixGid(i)
}
func lookupUnixGid(gid int) (*Group, error) {
var grp _C_struct_group
var found bool
err := retryWithBuffer(groupBuffer, func(buf []byte) syscall.Errno {
var errno syscall.Errno
grp, found, errno = _C_getgrgid_r(_C_gid_t(gid),
(*_C_char)(unsafe.Pointer(&buf[0])), _C_size_t(len(buf)))
return syscall.Errno(errno)
})
if err == syscall.ENOENT || (err == nil && !found) {
return nil, UnknownGroupIdError(strconv.Itoa(gid))
}
if err != nil {
return nil, fmt.Errorf("user: lookup groupid %d: %v", gid, err)
}
return buildGroup(&grp), nil
}
func buildGroup(grp *_C_struct_group) *Group {
g := &Group{
Gid: strconv.Itoa(int(_C_gr_gid(grp))),
Name: _C_GoString(_C_gr_name(grp)),
}
return g
}
type bufferKind _C_int
var (
userBuffer = bufferKind(_C__SC_GETPW_R_SIZE_MAX)
groupBuffer = bufferKind(_C__SC_GETGR_R_SIZE_MAX)
)
func (k bufferKind) initialSize() _C_size_t {
sz := _C_sysconf(_C_int(k))
if sz == -1 {
// DragonFly and FreeBSD do not have _SC_GETPW_R_SIZE_MAX.
// Additionally, not all Linux systems have it, either. For
// example, the musl libc returns -1.
return 1024
}
if !isSizeReasonable(int64(sz)) {
// Truncate. If this truly isn't enough, retryWithBuffer will error on the first run.
return maxBufferSize
}
return _C_size_t(sz)
}
// retryWithBuffer repeatedly calls f(), increasing the size of the
// buffer each time, until f succeeds, fails with a non-ERANGE error,
// or the buffer exceeds a reasonable limit.
func retryWithBuffer(kind bufferKind, f func([]byte) syscall.Errno) error {
buf := make([]byte, kind.initialSize())
for {
errno := f(buf)
if errno == 0 {
return nil
} else if runtime.GOOS == "aix" && errno+1 == 0 {
// On AIX getpwuid_r appears to return -1,
// not ERANGE, on buffer overflow.
} else if errno != syscall.ERANGE {
return errno
}
newSize := len(buf) * 2
if !isSizeReasonable(int64(newSize)) {
return fmt.Errorf("internal buffer exceeds %d bytes", maxBufferSize)
}
buf = make([]byte, newSize)
}
}
const maxBufferSize = 1 << 20
func isSizeReasonable(sz int64) bool {
return sz > 0 && sz <= maxBufferSize
}
// Because we can't use cgo in tests:
func structPasswdForNegativeTest() _C_struct_passwd {
sp := _C_struct_passwd{}
*_C_pw_uidp(&sp) = 1<<32 - 2
*_C_pw_gidp(&sp) = 1<<32 - 3
return sp
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cgo && !osusergo && unix && !android && !aix && !darwin
package user
/*
#include <unistd.h>
#include <sys/types.h>
#include <grp.h>
static int mygetgrouplist(const char* user, gid_t group, gid_t* groups, int* ngroups) {
return getgrouplist(user, group, groups, ngroups);
}
*/
import "C"
func getGroupList(name *_C_char, userGID _C_gid_t, gids *_C_gid_t, n *_C_int) _C_int {
return C.mygetgrouplist(name, userGID, gids, n)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package user
import "sync"
const (
userFile = "/etc/passwd"
groupFile = "/etc/group"
)
var colon = []byte{':'}
// Current returns the current user.
//
// The first call will cache the current user information.
// Subsequent calls will return the cached value and will not reflect
// changes to the current user.
func Current() (*User, error) {
cache.Do(func() { cache.u, cache.err = current() })
if cache.err != nil {
return nil, cache.err
}
u := *cache.u // copy
return &u, nil
}
// cache of the current user
var cache struct {
sync.Once
u *User
err error
}
// Lookup looks up a user by username. If the user cannot be found, the
// returned error is of type [UnknownUserError].
func Lookup(username string) (*User, error) {
if u, err := Current(); err == nil && u.Username == username {
return u, err
}
return lookupUser(username)
}
// LookupId looks up a user by userid. If the user cannot be found, the
// returned error is of type [UnknownUserIdError].
func LookupId(uid string) (*User, error) {
if u, err := Current(); err == nil && u.Uid == uid {
return u, err
}
return lookupUserId(uid)
}
// LookupGroup looks up a group by name. If the group cannot be found, the
// returned error is of type [UnknownGroupError].
func LookupGroup(name string) (*Group, error) {
return lookupGroup(name)
}
// LookupGroupId looks up a group by groupid. If the group cannot be found, the
// returned error is of type [UnknownGroupIdError].
func LookupGroupId(gid string) (*Group, error) {
return lookupGroupId(gid)
}
// GroupIds returns the list of group IDs that the user is a member of.
func (u *User) GroupIds() ([]string, error) {
return listGroups(u)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package user allows user account lookups by name or id.
For most Unix systems, this package has two internal implementations of
resolving user and group ids to names, and listing supplementary group IDs.
One is written in pure Go and parses /etc/passwd and /etc/group. The other
is cgo-based and relies on the standard C library (libc) routines such as
getpwuid_r, getgrnam_r, and getgrouplist.
When cgo is available, and the required routines are implemented in libc
for a particular platform, cgo-based (libc-backed) code is used.
This can be overridden by using osusergo build tag, which enforces
the pure Go implementation.
*/
package user
import (
"strconv"
)
// These may be set to false in init() for a particular platform and/or
// build flags to let the tests know to skip tests of some features.
var (
userImplemented = true
groupImplemented = true
groupListImplemented = true
)
// User represents a user account.
type User struct {
// Uid is the user ID.
// On POSIX systems, this is a decimal number representing the uid.
// On Windows, this is a security identifier (SID) in a string format.
// On Plan 9, this is the contents of /dev/user.
Uid string
// Gid is the primary group ID.
// On POSIX systems, this is a decimal number representing the gid.
// On Windows, this is a SID in a string format.
// On Plan 9, this is the contents of /dev/user.
Gid string
// Username is the login name.
Username string
// Name is the user's real or display name.
// It might be blank.
// On POSIX systems, this is the first (or only) entry in the GECOS field
// list.
// On Windows, this is the user's display name.
// On Plan 9, this is the contents of /dev/user.
Name string
// HomeDir is the path to the user's home directory (if they have one).
HomeDir string
}
// Group represents a grouping of users.
//
// On POSIX systems Gid contains a decimal number representing the group ID.
type Group struct {
Gid string // group ID
Name string // group name
}
// UnknownUserIdError is returned by [LookupId] when a user cannot be found.
type UnknownUserIdError int
func (e UnknownUserIdError) Error() string {
return "user: unknown userid " + strconv.Itoa(int(e))
}
// UnknownUserError is returned by [Lookup] when
// a user cannot be found.
type UnknownUserError string
func (e UnknownUserError) Error() string {
return "user: unknown user " + string(e)
}
// UnknownGroupIdError is returned by [LookupGroupId] when
// a group cannot be found.
type UnknownGroupIdError string
func (e UnknownGroupIdError) Error() string {
return "group: unknown groupid " + string(e)
}
// UnknownGroupError is returned by [LookupGroup] when
// a group cannot be found.
type UnknownGroupError string
func (e UnknownGroupError) Error() string {
return "group: unknown group " + string(e)
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filepath
import (
"errors"
"internal/filepathlite"
"os"
"runtime"
"slices"
"strings"
"unicode/utf8"
)
// ErrBadPattern indicates a pattern was malformed.
var ErrBadPattern = errors.New("syntax error in pattern")
// Match reports whether name matches the shell file name pattern.
// The pattern syntax is:
//
// pattern:
// { term }
// term:
// '*' matches any sequence of non-Separator characters
// '?' matches any single non-Separator character
// '[' [ '^' ] { character-range } ']'
// character class (must be non-empty)
// c matches character c (c != '*', '?', '\\', '[')
// '\\' c matches character c (except on Windows)
//
// character-range:
// c matches character c (c != '\\', '-', ']')
// '\\' c matches character c (except on Windows)
// lo '-' hi matches character c for lo <= c <= hi
//
// Path segments in the pattern must be separated by [Separator].
//
// Match requires pattern to match all of name, not just a substring.
// The only possible returned error is [ErrBadPattern], when pattern
// is malformed.
//
// On Windows, escaping is disabled. Instead, '\\' is treated as
// path separator.
func Match(pattern, name string) (matched bool, err error) {
Pattern:
for len(pattern) > 0 {
var star bool
var chunk string
star, chunk, pattern = scanChunk(pattern)
if star && chunk == "" {
// Trailing * matches rest of string unless it has a /.
return !strings.Contains(name, string(Separator)), nil
}
// Look for match at current position.
t, ok, err := matchChunk(chunk, name)
// if we're the last chunk, make sure we've exhausted the name
// otherwise we'll give a false result even if we could still match
// using the star
if ok && (len(t) == 0 || len(pattern) > 0) {
name = t
continue
}
if err != nil {
return false, err
}
if star {
// Look for match skipping i+1 bytes.
// Cannot skip /.
for i := 0; i < len(name) && name[i] != Separator; i++ {
t, ok, err := matchChunk(chunk, name[i+1:])
if ok {
// if we're the last chunk, make sure we exhausted the name
if len(pattern) == 0 && len(t) > 0 {
continue
}
name = t
continue Pattern
}
if err != nil {
return false, err
}
}
}
return false, nil
}
return len(name) == 0, nil
}
// scanChunk gets the next segment of pattern, which is a non-star string
// possibly preceded by a star.
func scanChunk(pattern string) (star bool, chunk, rest string) {
for len(pattern) > 0 && pattern[0] == '*' {
pattern = pattern[1:]
star = true
}
inrange := false
for i := 0; i < len(pattern); i++ {
switch pattern[i] {
case '\\':
// error check handled in matchChunk: bad pattern.
if runtime.GOOS != "windows" && i+1 < len(pattern) {
i++
}
case '[':
inrange = true
case ']':
inrange = false
case '*':
if !inrange {
return star, pattern[:i], pattern[i:]
}
}
}
return star, pattern, ""
}
// matchChunk checks whether chunk matches the beginning of s.
// If so, it returns the remainder of s (after the match).
// Chunk is all single-character operators: literals, char classes, and ?.
func matchChunk(chunk, s string) (rest string, ok bool, err error) {
// failed records whether the match has failed.
// After the match fails, the loop continues on processing chunk,
// checking that the pattern is well-formed but no longer reading s.
failed := false
for len(chunk) > 0 {
failed = failed || len(s) == 0
switch chunk[0] {
case '[':
// character class
var r rune
if !failed {
var n int
r, n = utf8.DecodeRuneInString(s)
s = s[n:]
}
chunk = chunk[1:]
// possibly negated
negated := false
if len(chunk) > 0 && chunk[0] == '^' {
negated = true
chunk = chunk[1:]
}
// parse all ranges
match := false
nrange := 0
for {
if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
chunk = chunk[1:]
break
}
var lo, hi rune
if lo, chunk, err = getEsc(chunk); err != nil {
return "", false, err
}
hi = lo
if chunk[0] == '-' {
if hi, chunk, err = getEsc(chunk[1:]); err != nil {
return "", false, err
}
}
match = match || lo <= r && r <= hi
nrange++
}
failed = failed || match == negated
case '?':
if !failed {
failed = s[0] == Separator
_, n := utf8.DecodeRuneInString(s)
s = s[n:]
}
chunk = chunk[1:]
case '\\':
if runtime.GOOS != "windows" {
chunk = chunk[1:]
if len(chunk) == 0 {
return "", false, ErrBadPattern
}
}
fallthrough
default:
if !failed {
failed = chunk[0] != s[0]
s = s[1:]
}
chunk = chunk[1:]
}
}
if failed {
return "", false, nil
}
return s, true, nil
}
// getEsc gets a possibly-escaped character from chunk, for a character class.
func getEsc(chunk string) (r rune, nchunk string, err error) {
if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
err = ErrBadPattern
return
}
if chunk[0] == '\\' && runtime.GOOS != "windows" {
chunk = chunk[1:]
if len(chunk) == 0 {
err = ErrBadPattern
return
}
}
r, n := utf8.DecodeRuneInString(chunk)
if r == utf8.RuneError && n == 1 {
err = ErrBadPattern
}
nchunk = chunk[n:]
if len(nchunk) == 0 {
err = ErrBadPattern
}
return
}
// Glob returns the names of all files matching pattern or nil
// if there is no matching file. The syntax of patterns is the same
// as in [Match]. The pattern may describe hierarchical names such as
// /usr/*/bin/ed (assuming the [Separator] is '/').
//
// Glob ignores file system errors such as I/O errors reading directories.
// The only possible returned error is [ErrBadPattern], when pattern
// is malformed.
func Glob(pattern string) (matches []string, err error) {
return globWithLimit(pattern, 0)
}
func globWithLimit(pattern string, depth int) (matches []string, err error) {
// This limit is used prevent stack exhaustion issues. See CVE-2022-30632.
const pathSeparatorsLimit = 10000
if depth == pathSeparatorsLimit {
return nil, ErrBadPattern
}
// Check pattern is well-formed.
if _, err := Match(pattern, ""); err != nil {
return nil, err
}
if !hasMeta(pattern) {
if _, err = os.Lstat(pattern); err != nil {
return nil, nil
}
return []string{pattern}, nil
}
dir, file := Split(pattern)
volumeLen := 0
if runtime.GOOS == "windows" {
volumeLen, dir = cleanGlobPathWindows(dir)
} else {
dir = cleanGlobPath(dir)
}
if !hasMeta(dir[volumeLen:]) {
return glob(dir, file, nil)
}
// Prevent infinite recursion. See issue 15879.
if dir == pattern {
return nil, ErrBadPattern
}
var m []string
m, err = globWithLimit(dir, depth+1)
if err != nil {
return
}
for _, d := range m {
matches, err = glob(d, file, matches)
if err != nil {
return
}
}
return
}
// cleanGlobPath prepares path for glob matching.
func cleanGlobPath(path string) string {
switch path {
case "":
return "."
case string(Separator):
// do nothing to the path
return path
default:
return path[0 : len(path)-1] // chop off trailing separator
}
}
// cleanGlobPathWindows is windows version of cleanGlobPath.
func cleanGlobPathWindows(path string) (prefixLen int, cleaned string) {
vollen := filepathlite.VolumeNameLen(path)
switch {
case path == "":
return 0, "."
case vollen+1 == len(path) && os.IsPathSeparator(path[len(path)-1]): // /, \, C:\ and C:/
// do nothing to the path
return vollen + 1, path
case vollen == len(path) && len(path) == 2: // C:
return vollen, path + "." // convert C: into C:.
default:
if vollen >= len(path) {
vollen = len(path) - 1
}
return vollen, path[0 : len(path)-1] // chop off trailing separator
}
}
// glob searches for files matching pattern in the directory dir
// and appends them to matches. If the directory cannot be
// opened, it returns the existing matches. New matches are
// added in lexicographical order.
func glob(dir, pattern string, matches []string) (m []string, e error) {
m = matches
fi, err := os.Stat(dir)
if err != nil {
return // ignore I/O error
}
if !fi.IsDir() {
return // ignore I/O error
}
d, err := os.Open(dir)
if err != nil {
return // ignore I/O error
}
defer d.Close()
names, _ := d.Readdirnames(-1)
slices.Sort(names)
for _, n := range names {
matched, err := Match(pattern, n)
if err != nil {
return m, err
}
if matched {
m = append(m, Join(dir, n))
}
}
return
}
// hasMeta reports whether path contains any of the magic characters
// recognized by Match.
func hasMeta(path string) bool {
magicChars := `*?[`
if runtime.GOOS != "windows" {
magicChars = `*?[\`
}
return strings.ContainsAny(path, magicChars)
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package filepath implements utility routines for manipulating filename paths
// in a way compatible with the target operating system-defined file paths.
//
// The filepath package uses either forward slashes or backslashes,
// depending on the operating system. To process paths such as URLs
// that always use forward slashes regardless of the operating
// system, see the [path] package.
package filepath
import (
"errors"
"internal/bytealg"
"internal/filepathlite"
"io/fs"
"os"
"slices"
)
const (
Separator = os.PathSeparator
ListSeparator = os.PathListSeparator
)
// Clean returns the shortest path name equivalent to path
// by purely lexical processing. It applies the following rules
// iteratively until no further processing can be done:
//
// 1. Replace multiple [Separator] elements with a single one.
// 2. Eliminate each . path name element (the current directory).
// 3. Eliminate each inner .. path name element (the parent directory)
// along with the non-.. element that precedes it.
// 4. Eliminate .. elements that begin a rooted path:
// that is, replace "/.." by "/" at the beginning of a path,
// assuming Separator is '/'.
//
// The returned path ends in a slash only if it represents a root directory,
// such as "/" on Unix or `C:\` on Windows.
//
// Finally, any occurrences of slash are replaced by Separator.
//
// If the result of this process is an empty string, Clean
// returns the string ".".
//
// On Windows, Clean does not modify the volume name other than to replace
// occurrences of "/" with `\`.
// For example, Clean("//host/share/../x") returns `\\host\share\x`.
//
// See also Rob Pike, “Lexical File Names in Plan 9 or
// Getting Dot-Dot Right,”
// https://9p.io/sys/doc/lexnames.html
func Clean(path string) string {
return filepathlite.Clean(path)
}
// IsLocal reports whether path, using lexical analysis only, has all of these properties:
//
// - is within the subtree rooted at the directory in which path is evaluated
// - is not an absolute path
// - is not empty
// - on Windows, is not a reserved name such as "NUL"
//
// If IsLocal(path) returns true, then
// Join(base, path) will always produce a path contained within base and
// Clean(path) will always produce an unrooted path with no ".." path elements.
//
// IsLocal is a purely lexical operation.
// In particular, it does not account for the effect of any symbolic links
// that may exist in the filesystem.
func IsLocal(path string) bool {
return filepathlite.IsLocal(path)
}
// Localize converts a slash-separated path into an operating system path.
// The input path must be a valid path as reported by [io/fs.ValidPath].
//
// Localize returns an error if the path cannot be represented by the operating system.
// For example, the path a\b is rejected on Windows, on which \ is a separator
// character and cannot be part of a filename.
//
// The path returned by Localize will always be local, as reported by IsLocal.
func Localize(path string) (string, error) {
return filepathlite.Localize(path)
}
// ToSlash returns the result of replacing each separator character
// in path with a slash ('/') character. Multiple separators are
// replaced by multiple slashes.
func ToSlash(path string) string {
return filepathlite.ToSlash(path)
}
// FromSlash returns the result of replacing each slash ('/') character
// in path with a separator character. Multiple slashes are replaced
// by multiple separators.
//
// See also the Localize function, which converts a slash-separated path
// as used by the io/fs package to an operating system path.
func FromSlash(path string) string {
return filepathlite.FromSlash(path)
}
// SplitList splits a list of paths joined by the OS-specific [ListSeparator],
// usually found in PATH or GOPATH environment variables.
// Unlike strings.Split, SplitList returns an empty slice when passed an empty
// string.
func SplitList(path string) []string {
return splitList(path)
}
// Split splits path immediately following the final [Separator],
// separating it into a directory and file name component.
// If there is no Separator in path, Split returns an empty dir
// and file set to path.
// The returned values have the property that path = dir+file.
func Split(path string) (dir, file string) {
return filepathlite.Split(path)
}
// Join joins any number of path elements into a single path,
// separating them with an OS specific [Separator]. Empty elements
// are ignored. The result is Cleaned. However, if the argument
// list is empty or all its elements are empty, Join returns
// an empty string.
// On Windows, the result will only be a UNC path if the first
// non-empty element is a UNC path.
func Join(elem ...string) string {
return join(elem)
}
// Ext returns the file name extension used by path.
// The extension is the suffix beginning at the final dot
// in the final element of path; it is empty if there is
// no dot.
func Ext(path string) string {
return filepathlite.Ext(path)
}
// EvalSymlinks returns the path name after the evaluation of any symbolic
// links.
// If path is relative the result will be relative to the current directory,
// unless one of the components is an absolute symbolic link.
// EvalSymlinks calls [Clean] on the result.
func EvalSymlinks(path string) (string, error) {
return evalSymlinks(path)
}
// IsAbs reports whether the path is absolute.
func IsAbs(path string) bool {
return filepathlite.IsAbs(path)
}
// Abs returns an absolute representation of path.
// If the path is not absolute it will be joined with the current
// working directory to turn it into an absolute path. The absolute
// path name for a given file is not guaranteed to be unique.
// Abs calls [Clean] on the result.
func Abs(path string) (string, error) {
return abs(path)
}
func unixAbs(path string) (string, error) {
if IsAbs(path) {
return Clean(path), nil
}
wd, err := os.Getwd()
if err != nil {
return "", err
}
return Join(wd, path), nil
}
// Rel returns a relative path that is lexically equivalent to targPath when
// joined to basePath with an intervening separator. That is,
// [Join](basePath, Rel(basePath, targPath)) is equivalent to targPath itself.
//
// The returned path will always be relative to basePath, even if basePath and
// targPath share no elements. Rel calls [Clean] on the result.
//
// An error is returned if targPath can't be made relative to basePath
// or if knowing the current working directory would be necessary to compute it.
func Rel(basePath, targPath string) (string, error) {
baseVol := VolumeName(basePath)
targVol := VolumeName(targPath)
base := Clean(basePath)
targ := Clean(targPath)
if sameWord(targ, base) {
return ".", nil
}
base = base[len(baseVol):]
targ = targ[len(targVol):]
if base == "." {
base = ""
} else if base == "" && filepathlite.VolumeNameLen(baseVol) > 2 /* isUNC */ {
// Treat any targetpath matching `\\host\share` basePath as absolute path.
base = string(Separator)
}
// Can't use IsAbs - `\a` and `a` are both relative in Windows.
baseSlashed := len(base) > 0 && base[0] == Separator
targSlashed := len(targ) > 0 && targ[0] == Separator
if baseSlashed != targSlashed || !sameWord(baseVol, targVol) {
return "", errors.New("Rel: can't make " + targPath + " relative to " + basePath)
}
// Position base[b0:bi] and targ[t0:ti] at the first differing elements.
bl := len(base)
tl := len(targ)
var b0, bi, t0, ti int
for {
for bi < bl && base[bi] != Separator {
bi++
}
for ti < tl && targ[ti] != Separator {
ti++
}
if !sameWord(targ[t0:ti], base[b0:bi]) {
break
}
if bi < bl {
bi++
}
if ti < tl {
ti++
}
b0 = bi
t0 = ti
}
if base[b0:bi] == ".." {
return "", errors.New("Rel: can't make " + targPath + " relative to " + basePath)
}
if b0 != bl {
// Base elements left. Must go up before going down.
seps := bytealg.CountString(base[b0:bl], Separator)
size := 2 + seps*3
if tl != t0 {
size += 1 + tl - t0
}
buf := make([]byte, size)
n := copy(buf, "..")
for i := 0; i < seps; i++ {
buf[n] = Separator
copy(buf[n+1:], "..")
n += 3
}
if t0 != tl {
buf[n] = Separator
copy(buf[n+1:], targ[t0:])
}
return Clean(string(buf)), nil
}
return targ[t0:], nil
}
// SkipDir is used as a return value from [WalkFunc] to indicate that
// the directory named in the call is to be skipped. It is not returned
// as an error by any function.
var SkipDir error = fs.SkipDir
// SkipAll is used as a return value from [WalkFunc] to indicate that
// all remaining files and directories are to be skipped. It is not returned
// as an error by any function.
var SkipAll error = fs.SkipAll
// WalkFunc is the type of the function called by [Walk] to visit each
// file or directory.
//
// The path argument contains the argument to Walk as a prefix.
// That is, if Walk is called with root argument "dir" and finds a file
// named "a" in that directory, the walk function will be called with
// argument "dir/a".
//
// The directory and file are joined with Join, which may clean the
// directory name: if Walk is called with the root argument "x/../dir"
// and finds a file named "a" in that directory, the walk function will
// be called with argument "dir/a", not "x/../dir/a".
//
// The info argument is the fs.FileInfo for the named path.
//
// The error result returned by the function controls how Walk continues.
// If the function returns the special value [SkipDir], Walk skips the
// current directory (path if info.IsDir() is true, otherwise path's
// parent directory). If the function returns the special value [SkipAll],
// Walk skips all remaining files and directories. Otherwise, if the function
// returns a non-nil error, Walk stops entirely and returns that error.
//
// The err argument reports an error related to path, signaling that Walk
// will not walk into that directory. The function can decide how to
// handle that error; as described earlier, returning the error will
// cause Walk to stop walking the entire tree.
//
// Walk calls the function with a non-nil err argument in two cases.
//
// First, if an [os.Lstat] on the root directory or any directory or file
// in the tree fails, Walk calls the function with path set to that
// directory or file's path, info set to nil, and err set to the error
// from os.Lstat.
//
// Second, if a directory's Readdirnames method fails, Walk calls the
// function with path set to the directory's path, info, set to an
// [fs.FileInfo] describing the directory, and err set to the error from
// Readdirnames.
type WalkFunc func(path string, info fs.FileInfo, err error) error
var lstat = os.Lstat // for testing
// walkDir recursively descends path, calling walkDirFn.
func walkDir(path string, d fs.DirEntry, walkDirFn fs.WalkDirFunc) error {
if err := walkDirFn(path, d, nil); err != nil || !d.IsDir() {
if err == SkipDir && d.IsDir() {
// Successfully skipped directory.
err = nil
}
return err
}
dirs, err := os.ReadDir(path)
if err != nil {
// Second call, to report ReadDir error.
err = walkDirFn(path, d, err)
if err != nil {
if err == SkipDir && d.IsDir() {
err = nil
}
return err
}
}
for _, d1 := range dirs {
path1 := Join(path, d1.Name())
if err := walkDir(path1, d1, walkDirFn); err != nil {
if err == SkipDir {
break
}
return err
}
}
return nil
}
// walk recursively descends path, calling walkFn.
func walk(path string, info fs.FileInfo, walkFn WalkFunc) error {
if !info.IsDir() {
return walkFn(path, info, nil)
}
names, err := readDirNames(path)
err1 := walkFn(path, info, err)
// If err != nil, walk can't walk into this directory.
// err1 != nil means walkFn want walk to skip this directory or stop walking.
// Therefore, if one of err and err1 isn't nil, walk will return.
if err != nil || err1 != nil {
// The caller's behavior is controlled by the return value, which is decided
// by walkFn. walkFn may ignore err and return nil.
// If walkFn returns SkipDir or SkipAll, it will be handled by the caller.
// So walk should return whatever walkFn returns.
return err1
}
for _, name := range names {
filename := Join(path, name)
fileInfo, err := lstat(filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != SkipDir {
return err
}
} else {
err = walk(filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != SkipDir {
return err
}
}
}
}
return nil
}
// WalkDir walks the file tree rooted at root, calling fn for each file or
// directory in the tree, including root.
//
// All errors that arise visiting files and directories are filtered by fn:
// see the [fs.WalkDirFunc] documentation for details.
//
// The files are walked in lexical order, which makes the output deterministic
// but requires WalkDir to read an entire directory into memory before proceeding
// to walk that directory.
//
// WalkDir does not follow symbolic links.
//
// WalkDir calls fn with paths that use the separator character appropriate
// for the operating system. This is unlike [io/fs.WalkDir], which always
// uses slash separated paths.
func WalkDir(root string, fn fs.WalkDirFunc) error {
info, err := os.Lstat(root)
if err != nil {
err = fn(root, nil, err)
} else {
err = walkDir(root, fs.FileInfoToDirEntry(info), fn)
}
if err == SkipDir || err == SkipAll {
return nil
}
return err
}
// Walk walks the file tree rooted at root, calling fn for each file or
// directory in the tree, including root.
//
// All errors that arise visiting files and directories are filtered by fn:
// see the [WalkFunc] documentation for details.
//
// The files are walked in lexical order, which makes the output deterministic
// but requires Walk to read an entire directory into memory before proceeding
// to walk that directory.
//
// Walk does not follow symbolic links.
//
// Walk is less efficient than [WalkDir], introduced in Go 1.16,
// which avoids calling os.Lstat on every visited file or directory.
func Walk(root string, fn WalkFunc) error {
info, err := os.Lstat(root)
if err != nil {
err = fn(root, nil, err)
} else {
err = walk(root, info, fn)
}
if err == SkipDir || err == SkipAll {
return nil
}
return err
}
// readDirNames reads the directory named by dirname and returns
// a sorted list of directory entry names.
func readDirNames(dirname string) ([]string, error) {
f, err := os.Open(dirname)
if err != nil {
return nil, err
}
names, err := f.Readdirnames(-1)
f.Close()
if err != nil {
return nil, err
}
slices.Sort(names)
return names, nil
}
// Base returns the last element of path.
// Trailing path separators are removed before extracting the last element.
// If the path is empty, Base returns ".".
// If the path consists entirely of separators, Base returns a single separator.
func Base(path string) string {
return filepathlite.Base(path)
}
// Dir returns all but the last element of path, typically the path's directory.
// After dropping the final element, Dir calls [Clean] on the path and trailing
// slashes are removed.
// If the path is empty, Dir returns ".".
// If the path consists entirely of separators, Dir returns a single separator.
// The returned path does not end in a separator unless it is the root directory.
func Dir(path string) string {
return filepathlite.Dir(path)
}
// VolumeName returns leading volume name.
// Given "C:\foo\bar" it returns "C:" on Windows.
// Given "\\host\share\foo" it returns "\\host\share".
// On other platforms it returns "".
func VolumeName(path string) string {
return filepathlite.VolumeName(path)
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix || (js && wasm) || wasip1
package filepath
import (
"strings"
)
// HasPrefix exists for historical compatibility and should not be used.
//
// Deprecated: HasPrefix does not respect path boundaries and
// does not ignore case when required.
func HasPrefix(p, prefix string) bool {
return strings.HasPrefix(p, prefix)
}
func splitList(path string) []string {
if path == "" {
return []string{}
}
return strings.Split(path, string(ListSeparator))
}
func abs(path string) (string, error) {
return unixAbs(path)
}
func join(elem []string) string {
// If there's a bug here, fix the logic in ./path_plan9.go too.
for i, e := range elem {
if e != "" {
return Clean(strings.Join(elem[i:], string(Separator)))
}
}
return ""
}
func sameWord(a, b string) bool {
return a == b
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filepath
import (
"errors"
"internal/filepathlite"
"io/fs"
"os"
"runtime"
"syscall"
)
func walkSymlinks(path string) (string, error) {
volLen := filepathlite.VolumeNameLen(path)
pathSeparator := string(os.PathSeparator)
if volLen < len(path) && os.IsPathSeparator(path[volLen]) {
volLen++
}
vol := path[:volLen]
dest := vol
linksWalked := 0
for start, end := volLen, volLen; start < len(path); start = end {
for start < len(path) && os.IsPathSeparator(path[start]) {
start++
}
end = start
for end < len(path) && !os.IsPathSeparator(path[end]) {
end++
}
// On Windows, "." can be a symlink.
// We look it up, and use the value if it is absolute.
// If not, we just return ".".
isWindowsDot := runtime.GOOS == "windows" && path[filepathlite.VolumeNameLen(path):] == "."
// The next path component is in path[start:end].
if end == start {
// No more path components.
break
} else if path[start:end] == "." && !isWindowsDot {
// Ignore path component ".".
continue
} else if path[start:end] == ".." {
// Back up to previous component if possible.
// Note that volLen includes any leading slash.
// Set r to the index of the last slash in dest,
// after the volume.
var r int
for r = len(dest) - 1; r >= volLen; r-- {
if os.IsPathSeparator(dest[r]) {
break
}
}
if r < volLen || dest[r+1:] == ".." {
// Either path has no slashes
// (it's empty or just "C:")
// or it ends in a ".." we had to keep.
// Either way, keep this "..".
if len(dest) > volLen {
dest += pathSeparator
}
dest += ".."
} else {
// Discard everything since the last slash.
dest = dest[:r]
}
continue
}
// Ordinary path component. Add it to result.
if len(dest) > filepathlite.VolumeNameLen(dest) && !os.IsPathSeparator(dest[len(dest)-1]) {
dest += pathSeparator
}
dest += path[start:end]
// Resolve symlink.
fi, err := os.Lstat(dest)
if err != nil {
return "", err
}
if fi.Mode()&fs.ModeSymlink == 0 {
if !fi.Mode().IsDir() && end < len(path) {
return "", syscall.ENOTDIR
}
continue
}
// Found symlink.
linksWalked++
if linksWalked > 255 {
return "", errors.New("EvalSymlinks: too many links")
}
link, err := os.Readlink(dest)
if err != nil {
return "", err
}
if isWindowsDot && !IsAbs(link) {
// On Windows, if "." is a relative symlink,
// just return ".".
break
}
path = link + path[end:]
v := filepathlite.VolumeNameLen(link)
if v > 0 {
// Symlink to drive name is an absolute path.
if v < len(link) && os.IsPathSeparator(link[v]) {
v++
}
vol = link[:v]
dest = vol
end = len(vol)
} else if len(link) > 0 && os.IsPathSeparator(link[0]) {
// Symlink to absolute path.
dest = link[:1]
end = 1
vol = link[:1]
volLen = 1
} else {
// Symlink to relative path; replace last
// path component in dest.
var r int
for r = len(dest) - 1; r >= volLen; r-- {
if os.IsPathSeparator(dest[r]) {
break
}
}
if r < volLen {
dest = vol
} else {
dest = dest[:r]
}
end = 0
}
}
return Clean(dest), nil
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows && !plan9
package filepath
func evalSymlinks(path string) (string, error) {
return walkSymlinks(path)
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package path
import (
"errors"
"internal/bytealg"
"unicode/utf8"
)
// ErrBadPattern indicates a pattern was malformed.
var ErrBadPattern = errors.New("syntax error in pattern")
// Match reports whether name matches the shell pattern.
// The pattern syntax is:
//
// pattern:
// { term }
// term:
// '*' matches any sequence of non-/ characters
// '?' matches any single non-/ character
// '[' [ '^' ] { character-range } ']'
// character class (must be non-empty)
// c matches character c (c != '*', '?', '\\', '[')
// '\\' c matches character c
//
// character-range:
// c matches character c (c != '\\', '-', ']')
// '\\' c matches character c
// lo '-' hi matches character c for lo <= c <= hi
//
// Match requires pattern to match all of name, not just a substring.
// The only possible returned error is [ErrBadPattern], when pattern
// is malformed.
func Match(pattern, name string) (matched bool, err error) {
Pattern:
for len(pattern) > 0 {
var star bool
var chunk string
star, chunk, pattern = scanChunk(pattern)
if star && chunk == "" {
// Trailing * matches rest of string unless it has a /.
return bytealg.IndexByteString(name, '/') < 0, nil
}
// Look for match at current position.
t, ok, err := matchChunk(chunk, name)
// if we're the last chunk, make sure we've exhausted the name
// otherwise we'll give a false result even if we could still match
// using the star
if ok && (len(t) == 0 || len(pattern) > 0) {
name = t
continue
}
if err != nil {
return false, err
}
if star {
// Look for match skipping i+1 bytes.
// Cannot skip /.
for i := 0; i < len(name) && name[i] != '/'; i++ {
t, ok, err := matchChunk(chunk, name[i+1:])
if ok {
// if we're the last chunk, make sure we exhausted the name
if len(pattern) == 0 && len(t) > 0 {
continue
}
name = t
continue Pattern
}
if err != nil {
return false, err
}
}
}
// Before returning false with no error,
// check that the remainder of the pattern is syntactically valid.
for len(pattern) > 0 {
_, chunk, pattern = scanChunk(pattern)
if _, _, err := matchChunk(chunk, ""); err != nil {
return false, err
}
}
return false, nil
}
return len(name) == 0, nil
}
// scanChunk gets the next segment of pattern, which is a non-star string
// possibly preceded by a star.
func scanChunk(pattern string) (star bool, chunk, rest string) {
for len(pattern) > 0 && pattern[0] == '*' {
pattern = pattern[1:]
star = true
}
inrange := false
for i := 0; i < len(pattern); i++ {
switch pattern[i] {
case '\\':
// error check handled in matchChunk: bad pattern.
if i+1 < len(pattern) {
i++
}
case '[':
inrange = true
case ']':
inrange = false
case '*':
if !inrange {
return star, pattern[:i], pattern[i:]
}
}
}
return star, pattern, ""
}
// matchChunk checks whether chunk matches the beginning of s.
// If so, it returns the remainder of s (after the match).
// Chunk is all single-character operators: literals, char classes, and ?.
func matchChunk(chunk, s string) (rest string, ok bool, err error) {
// failed records whether the match has failed.
// After the match fails, the loop continues on processing chunk,
// checking that the pattern is well-formed but no longer reading s.
failed := false
for len(chunk) > 0 {
failed = failed || len(s) == 0
switch chunk[0] {
case '[':
// character class
var r rune
if !failed {
var n int
r, n = utf8.DecodeRuneInString(s)
s = s[n:]
}
chunk = chunk[1:]
// possibly negated
negated := false
if len(chunk) > 0 && chunk[0] == '^' {
negated = true
chunk = chunk[1:]
}
// parse all ranges
match := false
nrange := 0
for {
if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
chunk = chunk[1:]
break
}
var lo, hi rune
if lo, chunk, err = getEsc(chunk); err != nil {
return "", false, err
}
hi = lo
if chunk[0] == '-' {
if hi, chunk, err = getEsc(chunk[1:]); err != nil {
return "", false, err
}
}
match = match || lo <= r && r <= hi
nrange++
}
failed = failed || match == negated
case '?':
if !failed {
failed = s[0] == '/'
_, n := utf8.DecodeRuneInString(s)
s = s[n:]
}
chunk = chunk[1:]
case '\\':
chunk = chunk[1:]
if len(chunk) == 0 {
return "", false, ErrBadPattern
}
fallthrough
default:
if !failed {
failed = chunk[0] != s[0]
s = s[1:]
}
chunk = chunk[1:]
}
}
if failed {
return "", false, nil
}
return s, true, nil
}
// getEsc gets a possibly-escaped character from chunk, for a character class.
func getEsc(chunk string) (r rune, nchunk string, err error) {
if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
err = ErrBadPattern
return
}
if chunk[0] == '\\' {
chunk = chunk[1:]
if len(chunk) == 0 {
err = ErrBadPattern
return
}
}
r, n := utf8.DecodeRuneInString(chunk)
if r == utf8.RuneError && n == 1 {
err = ErrBadPattern
}
nchunk = chunk[n:]
if len(nchunk) == 0 {
err = ErrBadPattern
}
return
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package path implements utility routines for manipulating slash-separated
// paths.
//
// The path package should only be used for paths separated by forward
// slashes, such as the paths in URLs. This package does not deal with
// Windows paths with drive letters or backslashes; to manipulate
// operating system paths, use the [path/filepath] package.
package path
import "internal/bytealg"
// A lazybuf is a lazily constructed path buffer.
// It supports append, reading previously appended bytes,
// and retrieving the final string. It does not allocate a buffer
// to hold the output until that output diverges from s.
type lazybuf struct {
s string
buf []byte
w int
}
func (b *lazybuf) index(i int) byte {
if b.buf != nil {
return b.buf[i]
}
return b.s[i]
}
func (b *lazybuf) append(c byte) {
if b.buf == nil {
if b.w < len(b.s) && b.s[b.w] == c {
b.w++
return
}
b.buf = make([]byte, len(b.s))
copy(b.buf, b.s[:b.w])
}
b.buf[b.w] = c
b.w++
}
func (b *lazybuf) string() string {
if b.buf == nil {
return b.s[:b.w]
}
return string(b.buf[:b.w])
}
// Clean returns the shortest path name equivalent to path
// by purely lexical processing. It applies the following rules
// iteratively until no further processing can be done:
//
// 1. Replace multiple slashes with a single slash.
// 2. Eliminate each . path name element (the current directory).
// 3. Eliminate each inner .. path name element (the parent directory)
// along with the non-.. element that precedes it.
// 4. Eliminate .. elements that begin a rooted path:
// that is, replace "/.." by "/" at the beginning of a path.
//
// The returned path ends in a slash only if it is the root "/".
//
// If the result of this process is an empty string, Clean
// returns the string ".".
//
// See also Rob Pike, “Lexical File Names in Plan 9 or
// Getting Dot-Dot Right,”
// https://9p.io/sys/doc/lexnames.html
func Clean(path string) string {
if path == "" {
return "."
}
rooted := path[0] == '/'
n := len(path)
// Invariants:
// reading from path; r is index of next byte to process.
// writing to buf; w is index of next byte to write.
// dotdot is index in buf where .. must stop, either because
// it is the leading slash or it is a leading ../../.. prefix.
out := lazybuf{s: path}
r, dotdot := 0, 0
if rooted {
out.append('/')
r, dotdot = 1, 1
}
for r < n {
switch {
case path[r] == '/':
// empty path element
r++
case path[r] == '.' && (r+1 == n || path[r+1] == '/'):
// . element
r++
case path[r] == '.' && path[r+1] == '.' && (r+2 == n || path[r+2] == '/'):
// .. element: remove to last /
r += 2
switch {
case out.w > dotdot:
// can backtrack
out.w--
for out.w > dotdot && out.index(out.w) != '/' {
out.w--
}
case !rooted:
// cannot backtrack, but not rooted, so append .. element.
if out.w > 0 {
out.append('/')
}
out.append('.')
out.append('.')
dotdot = out.w
}
default:
// real path element.
// add slash if needed
if rooted && out.w != 1 || !rooted && out.w != 0 {
out.append('/')
}
// copy element
for ; r < n && path[r] != '/'; r++ {
out.append(path[r])
}
}
}
// Turn empty string into "."
if out.w == 0 {
return "."
}
return out.string()
}
// Split splits path immediately following the final slash,
// separating it into a directory and file name component.
// If there is no slash in path, Split returns an empty dir and
// file set to path.
// The returned values have the property that path = dir+file.
func Split(path string) (dir, file string) {
i := bytealg.LastIndexByteString(path, '/')
return path[:i+1], path[i+1:]
}
// Join joins any number of path elements into a single path,
// separating them with slashes. Empty elements are ignored.
// The result is Cleaned. However, if the argument list is
// empty or all its elements are empty, Join returns
// an empty string.
func Join(elem ...string) string {
size := 0
for _, e := range elem {
size += len(e)
}
if size == 0 {
return ""
}
buf := make([]byte, 0, size+len(elem)-1)
for _, e := range elem {
if len(buf) > 0 || e != "" {
if len(buf) > 0 {
buf = append(buf, '/')
}
buf = append(buf, e...)
}
}
return Clean(string(buf))
}
// Ext returns the file name extension used by path.
// The extension is the suffix beginning at the final dot
// in the final slash-separated element of path;
// it is empty if there is no dot.
func Ext(path string) string {
for i := len(path) - 1; i >= 0 && path[i] != '/'; i-- {
if path[i] == '.' {
return path[i:]
}
}
return ""
}
// Base returns the last element of path.
// Trailing slashes are removed before extracting the last element.
// If the path is empty, Base returns ".".
// If the path consists entirely of slashes, Base returns "/".
func Base(path string) string {
if path == "" {
return "."
}
// Strip trailing slashes.
for len(path) > 0 && path[len(path)-1] == '/' {
path = path[0 : len(path)-1]
}
// Find the last element
if i := bytealg.LastIndexByteString(path, '/'); i >= 0 {
path = path[i+1:]
}
// If empty now, it had only slashes.
if path == "" {
return "/"
}
return path
}
// IsAbs reports whether the path is absolute.
func IsAbs(path string) bool {
return len(path) > 0 && path[0] == '/'
}
// Dir returns all but the last element of path, typically the path's directory.
// After dropping the final element using [Split], the path is Cleaned and trailing
// slashes are removed.
// If the path is empty, Dir returns ".".
// If the path consists entirely of slashes followed by non-slash bytes, Dir
// returns a single slash. In any other case, the returned path does not end in a
// slash.
func Dir(path string) string {
dir, _ := Split(path)
return Clean(dir)
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package plugin implements loading and symbol resolution of Go plugins.
//
// A plugin is a Go main package with exported functions and variables that
// has been built with:
//
// go build -buildmode=plugin
//
// When a plugin is first opened, the init functions of all packages not
// already part of the program are called. The main function is not run.
// A plugin is only initialized once, and cannot be closed.
//
// # Warnings
//
// The ability to dynamically load parts of an application during
// execution, perhaps based on user-defined configuration, may be a
// useful building block in some designs. In particular, because
// applications and dynamically loaded functions can share data
// structures directly, plugins may enable very high-performance
// integration of separate parts.
//
// However, the plugin mechanism has many significant drawbacks that
// should be considered carefully during the design. For example:
//
// - Plugins are currently supported only on Linux, FreeBSD, and
// macOS, making them unsuitable for applications intended to be
// portable.
//
// - Plugins are poorly supported by the Go race detector. Even simple
// race conditions may not be automatically detected. See
// https://go.dev/issue/24245 for more information.
//
// - Applications that use plugins may require careful configuration
// to ensure that the various parts of the program be made available
// in the correct location in the file system (or container image).
// By contrast, deploying an application consisting of a single static
// executable is straightforward.
//
// - Reasoning about program initialization is more difficult when
// some packages may not be initialized until long after the
// application has started running.
//
// - Bugs in applications that load plugins could be exploited by
// an attacker to load dangerous or untrusted libraries.
//
// - Runtime crashes are likely to occur unless all parts of the
// program (the application and all its plugins) are compiled
// using exactly the same version of the toolchain, the same build
// tags, and the same values of certain flags and environment
// variables.
//
// - Similar crashing problems are likely to arise unless all common
// dependencies of the application and its plugins are built from
// exactly the same source code.
//
// - Together, these restrictions mean that, in practice, the
// application and its plugins must all be built together by a
// single person or component of a system. In that case, it may
// be simpler for that person or component to generate Go source
// files that blank-import the desired set of plugins and then
// compile a static executable in the usual way.
//
// For these reasons, many users decide that traditional interprocess
// communication (IPC) mechanisms such as sockets, pipes, remote
// procedure call (RPC), shared memory mappings, or file system
// operations may be more suitable despite the performance overheads.
package plugin
// Plugin is a loaded Go plugin.
type Plugin struct {
pluginpath string
err string // set if plugin failed to load
loaded chan struct{} // closed when loaded
syms map[string]any
}
// Open opens a Go plugin.
// If a path has already been opened, then the existing *[Plugin] is returned.
// It is safe for concurrent use by multiple goroutines.
func Open(path string) (*Plugin, error) {
return open(path)
}
// Lookup searches for a symbol named symName in plugin p.
// A symbol is any exported variable or function.
// It reports an error if the symbol is not found.
// It is safe for concurrent use by multiple goroutines.
func (p *Plugin) Lookup(symName string) (Symbol, error) {
return lookup(p, symName)
}
// A Symbol is a pointer to a variable or function.
//
// For example, a plugin defined as
//
// package main
//
// import "fmt"
//
// var V int
//
// func F() { fmt.Printf("Hello, number %d\n", V) }
//
// may be loaded with the [Open] function and then the exported package
// symbols V and F can be accessed
//
// p, err := plugin.Open("plugin_name.so")
// if err != nil {
// panic(err)
// }
// v, err := p.Lookup("V")
// if err != nil {
// panic(err)
// }
// f, err := p.Lookup("F")
// if err != nil {
// panic(err)
// }
// *v.(*int) = 7
// f.(func())() // prints "Hello, number 7"
type Symbol any
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (linux && cgo) || (darwin && cgo) || (freebsd && cgo)
package plugin
/*
#cgo linux LDFLAGS: -ldl
#include <dlfcn.h>
#include <limits.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
static uintptr_t pluginOpen(const char* path, char** err) {
void* h = dlopen(path, RTLD_NOW|RTLD_GLOBAL);
if (h == NULL) {
*err = (char*)dlerror();
}
return (uintptr_t)h;
}
static void* pluginLookup(uintptr_t h, const char* name, char** err) {
void* r = dlsym((void*)h, name);
if (r == NULL) {
*err = (char*)dlerror();
}
return r;
}
*/
import "C"
import (
"errors"
"sync"
"unsafe"
)
func open(name string) (*Plugin, error) {
cPath := make([]byte, C.PATH_MAX+1)
cRelName := make([]byte, len(name)+1)
copy(cRelName, name)
if C.realpath(
(*C.char)(unsafe.Pointer(&cRelName[0])),
(*C.char)(unsafe.Pointer(&cPath[0]))) == nil {
return nil, errors.New(`plugin.Open("` + name + `"): realpath failed`)
}
filepath := C.GoString((*C.char)(unsafe.Pointer(&cPath[0])))
pluginsMu.Lock()
if p := plugins[filepath]; p != nil {
pluginsMu.Unlock()
if p.err != "" {
return nil, errors.New(`plugin.Open("` + name + `"): ` + p.err + ` (previous failure)`)
}
<-p.loaded
return p, nil
}
var cErr *C.char
h := C.pluginOpen((*C.char)(unsafe.Pointer(&cPath[0])), &cErr)
if h == 0 {
pluginsMu.Unlock()
return nil, errors.New(`plugin.Open("` + name + `"): ` + C.GoString(cErr))
}
// TODO(crawshaw): look for plugin note, confirm it is a Go plugin
// and it was built with the correct toolchain.
if len(name) > 3 && name[len(name)-3:] == ".so" {
name = name[:len(name)-3]
}
if plugins == nil {
plugins = make(map[string]*Plugin)
}
pluginpath, syms, initTasks, errstr := lastmoduleinit()
if errstr != "" {
plugins[filepath] = &Plugin{
pluginpath: pluginpath,
err: errstr,
}
pluginsMu.Unlock()
return nil, errors.New(`plugin.Open("` + name + `"): ` + errstr)
}
// This function can be called from the init function of a plugin.
// Drop a placeholder in the map so subsequent opens can wait on it.
p := &Plugin{
pluginpath: pluginpath,
loaded: make(chan struct{}),
}
plugins[filepath] = p
pluginsMu.Unlock()
doInit(initTasks)
// Fill out the value of each plugin symbol.
updatedSyms := map[string]any{}
for symName, sym := range syms {
isFunc := symName[0] == '.'
if isFunc {
delete(syms, symName)
symName = symName[1:]
}
fullName := pluginpath + "." + symName
cname := make([]byte, len(fullName)+1)
copy(cname, fullName)
p := C.pluginLookup(h, (*C.char)(unsafe.Pointer(&cname[0])), &cErr)
if p == nil {
return nil, errors.New(`plugin.Open("` + name + `"): could not find symbol ` + symName + `: ` + C.GoString(cErr))
}
valp := (*[2]unsafe.Pointer)(unsafe.Pointer(&sym))
if isFunc {
(*valp)[1] = unsafe.Pointer(&p)
} else {
(*valp)[1] = p
}
// we can't add to syms during iteration as we'll end up processing
// some symbols twice with the inability to tell if the symbol is a function
updatedSyms[symName] = sym
}
p.syms = updatedSyms
close(p.loaded)
return p, nil
}
func lookup(p *Plugin, symName string) (Symbol, error) {
if s := p.syms[symName]; s != nil {
return s, nil
}
return nil, errors.New("plugin: symbol " + symName + " not found in plugin " + p.pluginpath)
}
var (
pluginsMu sync.Mutex
plugins map[string]*Plugin
)
// lastmoduleinit is defined in package runtime.
func lastmoduleinit() (pluginpath string, syms map[string]any, inittasks []*initTask, errstr string)
// doInit is defined in package runtime.
//
//go:linkname doInit runtime.doInit
func doInit(t []*initTask)
type initTask struct {
// fields defined in runtime.initTask. We only handle pointers to an initTask
// in this package, so the contents are irrelevant.
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// backtrack is a regular expression search with submatch
// tracking for small regular expressions and texts. It allocates
// a bit vector with (length of input) * (length of prog) bits,
// to make sure it never explores the same (character position, instruction)
// state multiple times. This limits the search to run in time linear in
// the length of the test.
//
// backtrack is a fast replacement for the NFA code on small
// regexps when onepass cannot be used.
package regexp
import (
"regexp/syntax"
"sync"
)
// A job is an entry on the backtracker's job stack. It holds
// the instruction pc and the position in the input.
type job struct {
pc uint32
arg bool
pos int
}
const (
visitedBits = 32
maxBacktrackProg = 500 // len(prog.Inst) <= max
maxBacktrackVector = 256 * 1024 // bit vector size <= max (bits)
)
// bitState holds state for the backtracker.
type bitState struct {
end int
cap []int
matchcap []int
jobs []job
visited []uint32
inputs inputs
}
var bitStatePool sync.Pool
func newBitState() *bitState {
b, ok := bitStatePool.Get().(*bitState)
if !ok {
b = new(bitState)
}
return b
}
func freeBitState(b *bitState) {
b.inputs.clear()
bitStatePool.Put(b)
}
// maxBitStateLen returns the maximum length of a string to search with
// the backtracker using prog.
func maxBitStateLen(prog *syntax.Prog) int {
if !shouldBacktrack(prog) {
return 0
}
return maxBacktrackVector / len(prog.Inst)
}
// shouldBacktrack reports whether the program is too
// long for the backtracker to run.
func shouldBacktrack(prog *syntax.Prog) bool {
return len(prog.Inst) <= maxBacktrackProg
}
// reset resets the state of the backtracker.
// end is the end position in the input.
// ncap is the number of captures.
func (b *bitState) reset(prog *syntax.Prog, end int, ncap int) {
b.end = end
if cap(b.jobs) == 0 {
b.jobs = make([]job, 0, 256)
} else {
b.jobs = b.jobs[:0]
}
visitedSize := (len(prog.Inst)*(end+1) + visitedBits - 1) / visitedBits
if cap(b.visited) < visitedSize {
b.visited = make([]uint32, visitedSize, maxBacktrackVector/visitedBits)
} else {
b.visited = b.visited[:visitedSize]
clear(b.visited) // set to 0
}
if cap(b.cap) < ncap {
b.cap = make([]int, ncap)
} else {
b.cap = b.cap[:ncap]
}
for i := range b.cap {
b.cap[i] = -1
}
if cap(b.matchcap) < ncap {
b.matchcap = make([]int, ncap)
} else {
b.matchcap = b.matchcap[:ncap]
}
for i := range b.matchcap {
b.matchcap[i] = -1
}
}
// shouldVisit reports whether the combination of (pc, pos) has not
// been visited yet.
func (b *bitState) shouldVisit(pc uint32, pos int) bool {
n := uint(int(pc)*(b.end+1) + pos)
if b.visited[n/visitedBits]&(1<<(n&(visitedBits-1))) != 0 {
return false
}
b.visited[n/visitedBits] |= 1 << (n & (visitedBits - 1))
return true
}
// push pushes (pc, pos, arg) onto the job stack if it should be
// visited.
func (b *bitState) push(re *Regexp, pc uint32, pos int, arg bool) {
// Only check shouldVisit when arg is false.
// When arg is true, we are continuing a previous visit.
if re.prog.Inst[pc].Op != syntax.InstFail && (arg || b.shouldVisit(pc, pos)) {
b.jobs = append(b.jobs, job{pc: pc, arg: arg, pos: pos})
}
}
// tryBacktrack runs a backtracking search starting at pos.
func (re *Regexp) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool {
longest := re.longest
b.push(re, pc, pos, false)
for len(b.jobs) > 0 {
l := len(b.jobs) - 1
// Pop job off the stack.
pc := b.jobs[l].pc
pos := b.jobs[l].pos
arg := b.jobs[l].arg
b.jobs = b.jobs[:l]
// Optimization: rather than push and pop,
// code that is going to Push and continue
// the loop simply updates ip, p, and arg
// and jumps to CheckAndLoop. We have to
// do the ShouldVisit check that Push
// would have, but we avoid the stack
// manipulation.
goto Skip
CheckAndLoop:
if !b.shouldVisit(pc, pos) {
continue
}
Skip:
inst := &re.prog.Inst[pc]
switch inst.Op {
default:
panic("bad inst")
case syntax.InstFail:
panic("unexpected InstFail")
case syntax.InstAlt:
// Cannot just
// b.push(inst.Out, pos, false)
// b.push(inst.Arg, pos, false)
// If during the processing of inst.Out, we encounter
// inst.Arg via another path, we want to process it then.
// Pushing it here will inhibit that. Instead, re-push
// inst with arg==true as a reminder to push inst.Arg out
// later.
if arg {
// Finished inst.Out; try inst.Arg.
arg = false
pc = inst.Arg
goto CheckAndLoop
} else {
b.push(re, pc, pos, true)
pc = inst.Out
goto CheckAndLoop
}
case syntax.InstAltMatch:
// One opcode consumes runes; the other leads to match.
switch re.prog.Inst[inst.Out].Op {
case syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
// inst.Arg is the match.
b.push(re, inst.Arg, pos, false)
pc = inst.Arg
pos = b.end
goto CheckAndLoop
}
// inst.Out is the match - non-greedy
b.push(re, inst.Out, b.end, false)
pc = inst.Out
goto CheckAndLoop
case syntax.InstRune:
r, width := i.step(pos)
if !inst.MatchRune(r) {
continue
}
pos += width
pc = inst.Out
goto CheckAndLoop
case syntax.InstRune1:
r, width := i.step(pos)
if r != inst.Rune[0] {
continue
}
pos += width
pc = inst.Out
goto CheckAndLoop
case syntax.InstRuneAnyNotNL:
r, width := i.step(pos)
if r == '\n' || r == endOfText {
continue
}
pos += width
pc = inst.Out
goto CheckAndLoop
case syntax.InstRuneAny:
r, width := i.step(pos)
if r == endOfText {
continue
}
pos += width
pc = inst.Out
goto CheckAndLoop
case syntax.InstCapture:
if arg {
// Finished inst.Out; restore the old value.
b.cap[inst.Arg] = pos
continue
} else {
if inst.Arg < uint32(len(b.cap)) {
// Capture pos to register, but save old value.
b.push(re, pc, b.cap[inst.Arg], true) // come back when we're done.
b.cap[inst.Arg] = pos
}
pc = inst.Out
goto CheckAndLoop
}
case syntax.InstEmptyWidth:
flag := i.context(pos)
if !flag.match(syntax.EmptyOp(inst.Arg)) {
continue
}
pc = inst.Out
goto CheckAndLoop
case syntax.InstNop:
pc = inst.Out
goto CheckAndLoop
case syntax.InstMatch:
// We found a match. If the caller doesn't care
// where the match is, no point going further.
if len(b.cap) == 0 {
return true
}
// Record best match so far.
// Only need to check end point, because this entire
// call is only considering one start position.
if len(b.cap) > 1 {
b.cap[1] = pos
}
if old := b.matchcap[1]; old == -1 || (longest && pos > 0 && pos > old) {
copy(b.matchcap, b.cap)
}
// If going for first match, we're done.
if !longest {
return true
}
// If we used the entire text, no longer match is possible.
if pos == b.end {
return true
}
// Otherwise, continue on in hope of a longer match.
continue
}
}
return longest && len(b.matchcap) > 1 && b.matchcap[1] >= 0
}
// backtrack runs a backtracking search of prog on the input starting at pos.
func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []int) []int {
startCond := re.cond
if startCond == ^syntax.EmptyOp(0) { // impossible
return nil
}
if startCond&syntax.EmptyBeginText != 0 && pos != 0 {
// Anchored match, past beginning of text.
return nil
}
b := newBitState()
i, end := b.inputs.init(nil, ib, is)
b.reset(re.prog, end, ncap)
// Anchored search must start at the beginning of the input
if startCond&syntax.EmptyBeginText != 0 {
if len(b.cap) > 0 {
b.cap[0] = pos
}
if !re.tryBacktrack(b, i, uint32(re.prog.Start), pos) {
freeBitState(b)
return nil
}
} else {
// Unanchored search, starting from each possible text position.
// Notice that we have to try the empty string at the end of
// the text, so the loop condition is pos <= end, not pos < end.
// This looks like it's quadratic in the size of the text,
// but we are not clearing visited between calls to TrySearch,
// so no work is duplicated and it ends up still being linear.
width := -1
for ; pos <= end && width != 0; pos += width {
if len(re.prefix) > 0 {
// Match requires literal prefix; fast search for it.
advance := i.index(re, pos)
if advance < 0 {
freeBitState(b)
return nil
}
pos += advance
}
if len(b.cap) > 0 {
b.cap[0] = pos
}
if re.tryBacktrack(b, i, uint32(re.prog.Start), pos) {
// Match must be leftmost; done.
goto Match
}
_, width = i.step(pos)
}
freeBitState(b)
return nil
}
Match:
dstCap = append(dstCap, b.matchcap...)
freeBitState(b)
return dstCap
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package regexp
import (
"io"
"regexp/syntax"
"sync"
)
// A queue is a 'sparse array' holding pending threads of execution.
// See https://research.swtch.com/2008/03/using-uninitialized-memory-for-fun-and.html
type queue struct {
sparse []uint32
dense []entry
}
// An entry is an entry on a queue.
// It holds both the instruction pc and the actual thread.
// Some queue entries are just place holders so that the machine
// knows it has considered that pc. Such entries have t == nil.
type entry struct {
pc uint32
t *thread
}
// A thread is the state of a single path through the machine:
// an instruction and a corresponding capture array.
// See https://swtch.com/~rsc/regexp/regexp2.html
type thread struct {
inst *syntax.Inst
cap []int
}
// A machine holds all the state during an NFA simulation for p.
type machine struct {
re *Regexp // corresponding Regexp
p *syntax.Prog // compiled program
q0, q1 queue // two queues for runq, nextq
pool []*thread // pool of available threads
matched bool // whether a match was found
matchcap []int // capture information for the match
inputs inputs
}
type inputs struct {
// cached inputs, to avoid allocation
bytes inputBytes
string inputString
reader inputReader
}
func (i *inputs) newBytes(b []byte) input {
i.bytes.str = b
return &i.bytes
}
func (i *inputs) newString(s string) input {
i.string.str = s
return &i.string
}
func (i *inputs) newReader(r io.RuneReader) input {
i.reader.r = r
i.reader.atEOT = false
i.reader.pos = 0
return &i.reader
}
func (i *inputs) clear() {
// We need to clear 1 of these.
// Avoid the expense of clearing the others (pointer write barrier).
if i.bytes.str != nil {
i.bytes.str = nil
} else if i.reader.r != nil {
i.reader.r = nil
} else {
i.string.str = ""
}
}
func (i *inputs) init(r io.RuneReader, b []byte, s string) (input, int) {
if r != nil {
return i.newReader(r), 0
}
if b != nil {
return i.newBytes(b), len(b)
}
return i.newString(s), len(s)
}
func (m *machine) init(ncap int) {
for _, t := range m.pool {
t.cap = t.cap[:ncap]
}
m.matchcap = m.matchcap[:ncap]
}
// alloc allocates a new thread with the given instruction.
// It uses the free pool if possible.
func (m *machine) alloc(i *syntax.Inst) *thread {
var t *thread
if n := len(m.pool); n > 0 {
t = m.pool[n-1]
m.pool = m.pool[:n-1]
} else {
t = new(thread)
t.cap = make([]int, len(m.matchcap), cap(m.matchcap))
}
t.inst = i
return t
}
// A lazyFlag is a lazily-evaluated syntax.EmptyOp,
// for checking zero-width flags like ^ $ \A \z \B \b.
// It records the pair of relevant runes and does not
// determine the implied flags until absolutely necessary
// (most of the time, that means never).
type lazyFlag uint64
func newLazyFlag(r1, r2 rune) lazyFlag {
return lazyFlag(uint64(r1)<<32 | uint64(uint32(r2)))
}
func (f lazyFlag) match(op syntax.EmptyOp) bool {
if op == 0 {
return true
}
r1 := rune(f >> 32)
if op&syntax.EmptyBeginLine != 0 {
if r1 != '\n' && r1 >= 0 {
return false
}
op &^= syntax.EmptyBeginLine
}
if op&syntax.EmptyBeginText != 0 {
if r1 >= 0 {
return false
}
op &^= syntax.EmptyBeginText
}
if op == 0 {
return true
}
r2 := rune(f)
if op&syntax.EmptyEndLine != 0 {
if r2 != '\n' && r2 >= 0 {
return false
}
op &^= syntax.EmptyEndLine
}
if op&syntax.EmptyEndText != 0 {
if r2 >= 0 {
return false
}
op &^= syntax.EmptyEndText
}
if op == 0 {
return true
}
if syntax.IsWordChar(r1) != syntax.IsWordChar(r2) {
op &^= syntax.EmptyWordBoundary
} else {
op &^= syntax.EmptyNoWordBoundary
}
return op == 0
}
// match runs the machine over the input starting at pos.
// It reports whether a match was found.
// If so, m.matchcap holds the submatch information.
func (m *machine) match(i input, pos int) bool {
startCond := m.re.cond
if startCond == ^syntax.EmptyOp(0) { // impossible
return false
}
m.matched = false
for i := range m.matchcap {
m.matchcap[i] = -1
}
runq, nextq := &m.q0, &m.q1
r, r1 := endOfText, endOfText
width, width1 := 0, 0
r, width = i.step(pos)
if r != endOfText {
r1, width1 = i.step(pos + width)
}
var flag lazyFlag
if pos == 0 {
flag = newLazyFlag(-1, r)
} else {
flag = i.context(pos)
}
for {
if len(runq.dense) == 0 {
if startCond&syntax.EmptyBeginText != 0 && pos != 0 {
// Anchored match, past beginning of text.
break
}
if m.matched {
// Have match; finished exploring alternatives.
break
}
if len(m.re.prefix) > 0 && r1 != m.re.prefixRune && i.canCheckPrefix() {
// Match requires literal prefix; fast search for it.
advance := i.index(m.re, pos)
if advance < 0 {
break
}
pos += advance
r, width = i.step(pos)
r1, width1 = i.step(pos + width)
}
}
if !m.matched {
if len(m.matchcap) > 0 {
m.matchcap[0] = pos
}
m.add(runq, uint32(m.p.Start), pos, m.matchcap, &flag, nil)
}
flag = newLazyFlag(r, r1)
m.step(runq, nextq, pos, pos+width, r, &flag)
if width == 0 {
break
}
if len(m.matchcap) == 0 && m.matched {
// Found a match and not paying attention
// to where it is, so any match will do.
break
}
pos += width
r, width = r1, width1
if r != endOfText {
r1, width1 = i.step(pos + width)
}
runq, nextq = nextq, runq
}
m.clear(nextq)
return m.matched
}
// clear frees all threads on the thread queue.
func (m *machine) clear(q *queue) {
for _, d := range q.dense {
if d.t != nil {
m.pool = append(m.pool, d.t)
}
}
q.dense = q.dense[:0]
}
// step executes one step of the machine, running each of the threads
// on runq and appending new threads to nextq.
// The step processes the rune c (which may be endOfText),
// which starts at position pos and ends at nextPos.
// nextCond gives the setting for the empty-width flags after c.
func (m *machine) step(runq, nextq *queue, pos, nextPos int, c rune, nextCond *lazyFlag) {
longest := m.re.longest
for j := 0; j < len(runq.dense); j++ {
d := &runq.dense[j]
t := d.t
if t == nil {
continue
}
if longest && m.matched && len(t.cap) > 0 && m.matchcap[0] < t.cap[0] {
m.pool = append(m.pool, t)
continue
}
i := t.inst
add := false
switch i.Op {
default:
panic("bad inst")
case syntax.InstMatch:
if len(t.cap) > 0 && (!longest || !m.matched || m.matchcap[1] < pos) {
t.cap[1] = pos
copy(m.matchcap, t.cap)
}
if !longest {
// First-match mode: cut off all lower-priority threads.
for _, d := range runq.dense[j+1:] {
if d.t != nil {
m.pool = append(m.pool, d.t)
}
}
runq.dense = runq.dense[:0]
}
m.matched = true
case syntax.InstRune:
add = i.MatchRune(c)
case syntax.InstRune1:
add = c == i.Rune[0]
case syntax.InstRuneAny:
add = true
case syntax.InstRuneAnyNotNL:
add = c != '\n'
}
if add {
t = m.add(nextq, i.Out, nextPos, t.cap, nextCond, t)
}
if t != nil {
m.pool = append(m.pool, t)
}
}
runq.dense = runq.dense[:0]
}
// add adds an entry to q for pc, unless the q already has such an entry.
// It also recursively adds an entry for all instructions reachable from pc by following
// empty-width conditions satisfied by cond. pos gives the current position
// in the input.
func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond *lazyFlag, t *thread) *thread {
Again:
if pc == 0 {
return t
}
if j := q.sparse[pc]; j < uint32(len(q.dense)) && q.dense[j].pc == pc {
return t
}
j := len(q.dense)
q.dense = q.dense[:j+1]
d := &q.dense[j]
d.t = nil
d.pc = pc
q.sparse[pc] = uint32(j)
i := &m.p.Inst[pc]
switch i.Op {
default:
panic("unhandled")
case syntax.InstFail:
// nothing
case syntax.InstAlt, syntax.InstAltMatch:
t = m.add(q, i.Out, pos, cap, cond, t)
pc = i.Arg
goto Again
case syntax.InstEmptyWidth:
if cond.match(syntax.EmptyOp(i.Arg)) {
pc = i.Out
goto Again
}
case syntax.InstNop:
pc = i.Out
goto Again
case syntax.InstCapture:
if int(i.Arg) < len(cap) {
opos := cap[i.Arg]
cap[i.Arg] = pos
m.add(q, i.Out, pos, cap, cond, nil)
cap[i.Arg] = opos
} else {
pc = i.Out
goto Again
}
case syntax.InstMatch, syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
if t == nil {
t = m.alloc(i)
} else {
t.inst = i
}
if len(cap) > 0 && &t.cap[0] != &cap[0] {
copy(t.cap, cap)
}
d.t = t
t = nil
}
return t
}
type onePassMachine struct {
inputs inputs
matchcap []int
}
var onePassPool sync.Pool
func newOnePassMachine() *onePassMachine {
m, ok := onePassPool.Get().(*onePassMachine)
if !ok {
m = new(onePassMachine)
}
return m
}
func freeOnePassMachine(m *onePassMachine) {
m.inputs.clear()
onePassPool.Put(m)
}
// doOnePass implements r.doExecute using the one-pass execution engine.
func (re *Regexp) doOnePass(ir io.RuneReader, ib []byte, is string, pos, ncap int, dstCap []int) []int {
startCond := re.cond
if startCond == ^syntax.EmptyOp(0) { // impossible
return nil
}
m := newOnePassMachine()
if cap(m.matchcap) < ncap {
m.matchcap = make([]int, ncap)
} else {
m.matchcap = m.matchcap[:ncap]
}
matched := false
for i := range m.matchcap {
m.matchcap[i] = -1
}
i, _ := m.inputs.init(ir, ib, is)
r, r1 := endOfText, endOfText
width, width1 := 0, 0
r, width = i.step(pos)
if r != endOfText {
r1, width1 = i.step(pos + width)
}
var flag lazyFlag
if pos == 0 {
flag = newLazyFlag(-1, r)
} else {
flag = i.context(pos)
}
pc := re.onepass.Start
inst := &re.onepass.Inst[pc]
// If there is a simple literal prefix, skip over it.
if pos == 0 && flag.match(syntax.EmptyOp(inst.Arg)) &&
len(re.prefix) > 0 && i.canCheckPrefix() {
// Match requires literal prefix; fast search for it.
if !i.hasPrefix(re) {
goto Return
}
pos += len(re.prefix)
r, width = i.step(pos)
r1, width1 = i.step(pos + width)
flag = i.context(pos)
pc = int(re.prefixEnd)
}
for {
inst = &re.onepass.Inst[pc]
pc = int(inst.Out)
switch inst.Op {
default:
panic("bad inst")
case syntax.InstMatch:
matched = true
if len(m.matchcap) > 0 {
m.matchcap[0] = 0
m.matchcap[1] = pos
}
goto Return
case syntax.InstRune:
if !inst.MatchRune(r) {
goto Return
}
case syntax.InstRune1:
if r != inst.Rune[0] {
goto Return
}
case syntax.InstRuneAny:
// Nothing
case syntax.InstRuneAnyNotNL:
if r == '\n' {
goto Return
}
// peek at the input rune to see which branch of the Alt to take
case syntax.InstAlt, syntax.InstAltMatch:
pc = int(onePassNext(inst, r))
continue
case syntax.InstFail:
goto Return
case syntax.InstNop:
continue
case syntax.InstEmptyWidth:
if !flag.match(syntax.EmptyOp(inst.Arg)) {
goto Return
}
continue
case syntax.InstCapture:
if int(inst.Arg) < len(m.matchcap) {
m.matchcap[inst.Arg] = pos
}
continue
}
if width == 0 {
break
}
flag = newLazyFlag(r, r1)
pos += width
r, width = r1, width1
if r != endOfText {
r1, width1 = i.step(pos + width)
}
}
Return:
if !matched {
freeOnePassMachine(m)
return nil
}
dstCap = append(dstCap, m.matchcap...)
freeOnePassMachine(m)
return dstCap
}
// doMatch reports whether either r, b or s match the regexp.
func (re *Regexp) doMatch(r io.RuneReader, b []byte, s string) bool {
return re.doExecute(r, b, s, 0, 0, nil) != nil
}
// doExecute finds the leftmost match in the input, appends the position
// of its subexpressions to dstCap and returns dstCap.
//
// nil is returned if no matches are found and non-nil if matches are found.
func (re *Regexp) doExecute(r io.RuneReader, b []byte, s string, pos int, ncap int, dstCap []int) []int {
if dstCap == nil {
// Make sure 'return dstCap' is non-nil.
dstCap = arrayNoInts[:0:0]
}
if r == nil && len(b)+len(s) < re.minInputLen {
return nil
}
if re.onepass != nil {
return re.doOnePass(r, b, s, pos, ncap, dstCap)
}
if r == nil && len(b)+len(s) < re.maxBitStateLen {
return re.backtrack(b, s, pos, ncap, dstCap)
}
m := re.get()
i, _ := m.inputs.init(r, b, s)
m.init(ncap)
if !m.match(i, pos) {
re.put(m)
return nil
}
dstCap = append(dstCap, m.matchcap...)
re.put(m)
return dstCap
}
// arrayNoInts is returned by doExecute match if nil dstCap is passed
// to it with ncap=0.
var arrayNoInts [0]int
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package regexp
import (
"regexp/syntax"
"slices"
"strings"
"unicode"
"unicode/utf8"
)
// "One-pass" regexp execution.
// Some regexps can be analyzed to determine that they never need
// backtracking: they are guaranteed to run in one pass over the string
// without bothering to save all the usual NFA state.
// Detect those and execute them more quickly.
// A onePassProg is a compiled one-pass regular expression program.
// It is the same as syntax.Prog except for the use of onePassInst.
type onePassProg struct {
Inst []onePassInst
Start int // index of start instruction
NumCap int // number of InstCapture insts in re
}
// A onePassInst is a single instruction in a one-pass regular expression program.
// It is the same as syntax.Inst except for the new 'Next' field.
type onePassInst struct {
syntax.Inst
Next []uint32
}
// onePassPrefix returns a literal string that all matches for the
// regexp must start with. Complete is true if the prefix
// is the entire match. Pc is the index of the last rune instruction
// in the string. The onePassPrefix skips over the mandatory
// EmptyBeginText.
func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) {
i := &p.Inst[p.Start]
if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 {
return "", i.Op == syntax.InstMatch, uint32(p.Start)
}
pc = i.Out
i = &p.Inst[pc]
for i.Op == syntax.InstNop {
pc = i.Out
i = &p.Inst[pc]
}
// Avoid allocation of buffer if prefix is empty.
if iop(i) != syntax.InstRune || len(i.Rune) != 1 {
return "", i.Op == syntax.InstMatch, uint32(p.Start)
}
// Have prefix; gather characters.
var buf strings.Builder
for iop(i) == syntax.InstRune && len(i.Rune) == 1 && syntax.Flags(i.Arg)&syntax.FoldCase == 0 && i.Rune[0] != utf8.RuneError {
buf.WriteRune(i.Rune[0])
pc, i = i.Out, &p.Inst[i.Out]
}
if i.Op == syntax.InstEmptyWidth &&
syntax.EmptyOp(i.Arg)&syntax.EmptyEndText != 0 &&
p.Inst[i.Out].Op == syntax.InstMatch {
complete = true
}
return buf.String(), complete, pc
}
// onePassNext selects the next actionable state of the prog, based on the input character.
// It should only be called when i.Op == InstAlt or InstAltMatch, and from the one-pass machine.
// One of the alternates may ultimately lead without input to end of line. If the instruction
// is InstAltMatch the path to the InstMatch is in i.Out, the normal node in i.Next.
func onePassNext(i *onePassInst, r rune) uint32 {
next := i.MatchRunePos(r)
if next >= 0 {
return i.Next[next]
}
if i.Op == syntax.InstAltMatch {
return i.Out
}
return 0
}
func iop(i *syntax.Inst) syntax.InstOp {
op := i.Op
switch op {
case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
op = syntax.InstRune
}
return op
}
// Sparse Array implementation is used as a queueOnePass.
type queueOnePass struct {
sparse []uint32
dense []uint32
size, nextIndex uint32
}
func (q *queueOnePass) empty() bool {
return q.nextIndex >= q.size
}
func (q *queueOnePass) next() (n uint32) {
n = q.dense[q.nextIndex]
q.nextIndex++
return
}
func (q *queueOnePass) clear() {
q.size = 0
q.nextIndex = 0
}
func (q *queueOnePass) contains(u uint32) bool {
if u >= uint32(len(q.sparse)) {
return false
}
return q.sparse[u] < q.size && q.dense[q.sparse[u]] == u
}
func (q *queueOnePass) insert(u uint32) {
if !q.contains(u) {
q.insertNew(u)
}
}
func (q *queueOnePass) insertNew(u uint32) {
if u >= uint32(len(q.sparse)) {
return
}
q.sparse[u] = q.size
q.dense[q.size] = u
q.size++
}
func newQueue(size int) (q *queueOnePass) {
return &queueOnePass{
sparse: make([]uint32, size),
dense: make([]uint32, size),
}
}
// mergeRuneSets merges two non-intersecting runesets, and returns the merged result,
// and a NextIp array. The idea is that if a rune matches the OnePassRunes at index
// i, NextIp[i/2] is the target. If the input sets intersect, an empty runeset and a
// NextIp array with the single element mergeFailed is returned.
// The code assumes that both inputs contain ordered and non-intersecting rune pairs.
const mergeFailed = uint32(0xffffffff)
var (
noRune = []rune{}
noNext = []uint32{mergeFailed}
)
func mergeRuneSets(leftRunes, rightRunes *[]rune, leftPC, rightPC uint32) ([]rune, []uint32) {
leftLen := len(*leftRunes)
rightLen := len(*rightRunes)
if leftLen&0x1 != 0 || rightLen&0x1 != 0 {
panic("mergeRuneSets odd length []rune")
}
var (
lx, rx int
)
merged := make([]rune, 0)
next := make([]uint32, 0)
ok := true
defer func() {
if !ok {
merged = nil
next = nil
}
}()
ix := -1
extend := func(newLow *int, newArray *[]rune, pc uint32) bool {
if ix > 0 && (*newArray)[*newLow] <= merged[ix] {
return false
}
merged = append(merged, (*newArray)[*newLow], (*newArray)[*newLow+1])
*newLow += 2
ix += 2
next = append(next, pc)
return true
}
for lx < leftLen || rx < rightLen {
switch {
case rx >= rightLen:
ok = extend(&lx, leftRunes, leftPC)
case lx >= leftLen:
ok = extend(&rx, rightRunes, rightPC)
case (*rightRunes)[rx] < (*leftRunes)[lx]:
ok = extend(&rx, rightRunes, rightPC)
default:
ok = extend(&lx, leftRunes, leftPC)
}
if !ok {
return noRune, noNext
}
}
return merged, next
}
// cleanupOnePass drops working memory, and restores certain shortcut instructions.
func cleanupOnePass(prog *onePassProg, original *syntax.Prog) {
for ix, instOriginal := range original.Inst {
switch instOriginal.Op {
case syntax.InstAlt, syntax.InstAltMatch, syntax.InstRune:
case syntax.InstCapture, syntax.InstEmptyWidth, syntax.InstNop, syntax.InstMatch, syntax.InstFail:
prog.Inst[ix].Next = nil
case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL:
prog.Inst[ix].Next = nil
prog.Inst[ix] = onePassInst{Inst: instOriginal}
}
}
}
// onePassCopy creates a copy of the original Prog, as we'll be modifying it.
func onePassCopy(prog *syntax.Prog) *onePassProg {
p := &onePassProg{
Start: prog.Start,
NumCap: prog.NumCap,
Inst: make([]onePassInst, len(prog.Inst)),
}
for i, inst := range prog.Inst {
p.Inst[i] = onePassInst{Inst: inst}
}
// rewrites one or more common Prog constructs that enable some otherwise
// non-onepass Progs to be onepass. A:BD (for example) means an InstAlt at
// ip A, that points to ips B & C.
// A:BC + B:DA => A:BC + B:CD
// A:BC + B:DC => A:DC + B:DC
for pc := range p.Inst {
switch p.Inst[pc].Op {
default:
continue
case syntax.InstAlt, syntax.InstAltMatch:
// A:Bx + B:Ay
p_A_Other := &p.Inst[pc].Out
p_A_Alt := &p.Inst[pc].Arg
// make sure a target is another Alt
instAlt := p.Inst[*p_A_Alt]
if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) {
p_A_Alt, p_A_Other = p_A_Other, p_A_Alt
instAlt = p.Inst[*p_A_Alt]
if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) {
continue
}
}
instOther := p.Inst[*p_A_Other]
// Analyzing both legs pointing to Alts is for another day
if instOther.Op == syntax.InstAlt || instOther.Op == syntax.InstAltMatch {
// too complicated
continue
}
// simple empty transition loop
// A:BC + B:DA => A:BC + B:DC
p_B_Alt := &p.Inst[*p_A_Alt].Out
p_B_Other := &p.Inst[*p_A_Alt].Arg
patch := false
if instAlt.Out == uint32(pc) {
patch = true
} else if instAlt.Arg == uint32(pc) {
patch = true
p_B_Alt, p_B_Other = p_B_Other, p_B_Alt
}
if patch {
*p_B_Alt = *p_A_Other
}
// empty transition to common target
// A:BC + B:DC => A:DC + B:DC
if *p_A_Other == *p_B_Alt {
*p_A_Alt = *p_B_Other
}
}
}
return p
}
var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune}
var anyRune = []rune{0, unicode.MaxRune}
// makeOnePass creates a onepass Prog, if possible. It is possible if at any alt,
// the match engine can always tell which branch to take. The routine may modify
// p if it is turned into a onepass Prog. If it isn't possible for this to be a
// onepass Prog, the Prog nil is returned. makeOnePass is recursive
// to the size of the Prog.
func makeOnePass(p *onePassProg) *onePassProg {
// If the machine is very long, it's not worth the time to check if we can use one pass.
if len(p.Inst) >= 1000 {
return nil
}
var (
instQueue = newQueue(len(p.Inst))
visitQueue = newQueue(len(p.Inst))
check func(uint32, []bool) bool
onePassRunes = make([][]rune, len(p.Inst))
)
// check that paths from Alt instructions are unambiguous, and rebuild the new
// program as a onepass program
check = func(pc uint32, m []bool) (ok bool) {
ok = true
inst := &p.Inst[pc]
if visitQueue.contains(pc) {
return
}
visitQueue.insert(pc)
switch inst.Op {
case syntax.InstAlt, syntax.InstAltMatch:
ok = check(inst.Out, m) && check(inst.Arg, m)
// check no-input paths to InstMatch
matchOut := m[inst.Out]
matchArg := m[inst.Arg]
if matchOut && matchArg {
ok = false
break
}
// Match on empty goes in inst.Out
if matchArg {
inst.Out, inst.Arg = inst.Arg, inst.Out
matchOut, matchArg = matchArg, matchOut
}
if matchOut {
m[pc] = true
inst.Op = syntax.InstAltMatch
}
// build a dispatch operator from the two legs of the alt.
onePassRunes[pc], inst.Next = mergeRuneSets(
&onePassRunes[inst.Out], &onePassRunes[inst.Arg], inst.Out, inst.Arg)
if len(inst.Next) > 0 && inst.Next[0] == mergeFailed {
ok = false
break
}
case syntax.InstCapture, syntax.InstNop:
ok = check(inst.Out, m)
m[pc] = m[inst.Out]
// pass matching runes back through these no-ops.
onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...)
inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
for i := range inst.Next {
inst.Next[i] = inst.Out
}
case syntax.InstEmptyWidth:
ok = check(inst.Out, m)
m[pc] = m[inst.Out]
onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...)
inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
for i := range inst.Next {
inst.Next[i] = inst.Out
}
case syntax.InstMatch, syntax.InstFail:
m[pc] = inst.Op == syntax.InstMatch
case syntax.InstRune:
m[pc] = false
if len(inst.Next) > 0 {
break
}
instQueue.insert(inst.Out)
if len(inst.Rune) == 0 {
onePassRunes[pc] = []rune{}
inst.Next = []uint32{inst.Out}
break
}
runes := make([]rune, 0)
if len(inst.Rune) == 1 && syntax.Flags(inst.Arg)&syntax.FoldCase != 0 {
r0 := inst.Rune[0]
runes = append(runes, r0, r0)
for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) {
runes = append(runes, r1, r1)
}
slices.Sort(runes)
} else {
runes = append(runes, inst.Rune...)
}
onePassRunes[pc] = runes
inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
for i := range inst.Next {
inst.Next[i] = inst.Out
}
inst.Op = syntax.InstRune
case syntax.InstRune1:
m[pc] = false
if len(inst.Next) > 0 {
break
}
instQueue.insert(inst.Out)
runes := []rune{}
// expand case-folded runes
if syntax.Flags(inst.Arg)&syntax.FoldCase != 0 {
r0 := inst.Rune[0]
runes = append(runes, r0, r0)
for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) {
runes = append(runes, r1, r1)
}
slices.Sort(runes)
} else {
runes = append(runes, inst.Rune[0], inst.Rune[0])
}
onePassRunes[pc] = runes
inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
for i := range inst.Next {
inst.Next[i] = inst.Out
}
inst.Op = syntax.InstRune
case syntax.InstRuneAny:
m[pc] = false
if len(inst.Next) > 0 {
break
}
instQueue.insert(inst.Out)
onePassRunes[pc] = append([]rune{}, anyRune...)
inst.Next = []uint32{inst.Out}
case syntax.InstRuneAnyNotNL:
m[pc] = false
if len(inst.Next) > 0 {
break
}
instQueue.insert(inst.Out)
onePassRunes[pc] = append([]rune{}, anyRuneNotNL...)
inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
for i := range inst.Next {
inst.Next[i] = inst.Out
}
}
return
}
instQueue.clear()
instQueue.insert(uint32(p.Start))
m := make([]bool, len(p.Inst))
for !instQueue.empty() {
visitQueue.clear()
pc := instQueue.next()
if !check(pc, m) {
p = nil
break
}
}
if p != nil {
for i := range p.Inst {
p.Inst[i].Rune = onePassRunes[i]
}
}
return p
}
// compileOnePass returns a new *syntax.Prog suitable for onePass execution if the original Prog
// can be recharacterized as a one-pass regexp program, or syntax.nil if the
// Prog cannot be converted. For a one pass prog, the fundamental condition that must
// be true is: at any InstAlt, there must be no ambiguity about what branch to take.
func compileOnePass(prog *syntax.Prog) (p *onePassProg) {
if prog.Start == 0 {
return nil
}
// onepass regexp is anchored
if prog.Inst[prog.Start].Op != syntax.InstEmptyWidth ||
syntax.EmptyOp(prog.Inst[prog.Start].Arg)&syntax.EmptyBeginText != syntax.EmptyBeginText {
return nil
}
hasAlt := false
for _, inst := range prog.Inst {
if inst.Op == syntax.InstAlt || inst.Op == syntax.InstAltMatch {
hasAlt = true
break
}
}
// If we have alternates, every instruction leading to InstMatch must be EmptyEndText.
// Also, any match on empty text must be $.
for _, inst := range prog.Inst {
opOut := prog.Inst[inst.Out].Op
switch inst.Op {
default:
if opOut == syntax.InstMatch && hasAlt {
return nil
}
case syntax.InstAlt, syntax.InstAltMatch:
if opOut == syntax.InstMatch || prog.Inst[inst.Arg].Op == syntax.InstMatch {
return nil
}
case syntax.InstEmptyWidth:
if opOut == syntax.InstMatch {
if syntax.EmptyOp(inst.Arg)&syntax.EmptyEndText == syntax.EmptyEndText {
continue
}
return nil
}
}
}
// Creates a slightly optimized copy of the original Prog
// that cleans up some Prog idioms that block valid onepass programs
p = onePassCopy(prog)
// checkAmbiguity on InstAlts, build onepass Prog if possible
p = makeOnePass(p)
if p != nil {
cleanupOnePass(p, prog)
}
return p
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package regexp implements regular expression search.
//
// The syntax of the regular expressions accepted is the same
// general syntax used by Perl, Python, and other languages.
// More precisely, it is the syntax accepted by RE2 and described at
// https://golang.org/s/re2syntax, except for \C.
// For an overview of the syntax, see the [regexp/syntax] package.
//
// The regexp implementation provided by this package is
// guaranteed to run in time linear in the size of the input.
// (This is a property not guaranteed by most open source
// implementations of regular expressions.) For more information
// about this property, see https://swtch.com/~rsc/regexp/regexp1.html
// or any book about automata theory.
//
// All characters are UTF-8-encoded code points.
// Following [utf8.DecodeRune], each byte of an invalid UTF-8 sequence
// is treated as if it encoded utf8.RuneError (U+FFFD).
//
// There are 16 methods of [Regexp] that match a regular expression and identify
// the matched text. Their names are matched by this regular expression:
//
// Find(All)?(String)?(Submatch)?(Index)?
//
// If 'All' is present, the routine matches successive non-overlapping
// matches of the entire expression. Empty matches abutting a preceding
// match are ignored. The return value is a slice containing the successive
// return values of the corresponding non-'All' routine. These routines take
// an extra integer argument, n. If n >= 0, the function returns at most n
// matches/submatches; otherwise, it returns all of them.
//
// If 'String' is present, the argument is a string; otherwise it is a slice
// of bytes; return values are adjusted as appropriate.
//
// If 'Submatch' is present, the return value is a slice identifying the
// successive submatches of the expression. Submatches are matches of
// parenthesized subexpressions (also known as capturing groups) within the
// regular expression, numbered from left to right in order of opening
// parenthesis. Submatch 0 is the match of the entire expression, submatch 1 is
// the match of the first parenthesized subexpression, and so on.
//
// If 'Index' is present, matches and submatches are identified by byte index
// pairs within the input string: result[2*n:2*n+2] identifies the indexes of
// the nth submatch. The pair for n==0 identifies the match of the entire
// expression. If 'Index' is not present, the match is identified by the text
// of the match/submatch. If an index is negative or text is nil, it means that
// subexpression did not match any string in the input. For 'String' versions
// an empty string means either no match or an empty match.
//
// There is also a subset of the methods that can be applied to text read from
// an [io.RuneReader]: [Regexp.MatchReader], [Regexp.FindReaderIndex],
// [Regexp.FindReaderSubmatchIndex].
//
// This set may grow. Note that regular expression matches may need to
// examine text beyond the text returned by a match, so the methods that
// match text from an [io.RuneReader] may read arbitrarily far into the input
// before returning.
//
// (There are a few other methods that do not match this pattern.)
package regexp
import (
"bytes"
"io"
"regexp/syntax"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
// Regexp is the representation of a compiled regular expression.
// A Regexp is safe for concurrent use by multiple goroutines,
// except for configuration methods, such as [Regexp.Longest].
type Regexp struct {
expr string // as passed to Compile
prog *syntax.Prog // compiled program
onepass *onePassProg // onepass program or nil
numSubexp int
maxBitStateLen int
subexpNames []string
prefix string // required prefix in unanchored matches
prefixBytes []byte // prefix, as a []byte
prefixRune rune // first rune in prefix
prefixEnd uint32 // pc for last rune in prefix
mpool int // pool for machines
matchcap int // size of recorded match lengths
prefixComplete bool // prefix is the entire regexp
cond syntax.EmptyOp // empty-width conditions required at start of match
minInputLen int // minimum length of the input in bytes
// This field can be modified by the Longest method,
// but it is otherwise read-only.
longest bool // whether regexp prefers leftmost-longest match
}
// String returns the source text used to compile the regular expression.
func (re *Regexp) String() string {
return re.expr
}
// Copy returns a new [Regexp] object copied from re.
// Calling [Regexp.Longest] on one copy does not affect another.
//
// Deprecated: In earlier releases, when using a [Regexp] in multiple goroutines,
// giving each goroutine its own copy helped to avoid lock contention.
// As of Go 1.12, using Copy is no longer necessary to avoid lock contention.
// Copy may still be appropriate if the reason for its use is to make
// two copies with different [Regexp.Longest] settings.
func (re *Regexp) Copy() *Regexp {
re2 := *re
return &re2
}
// Compile parses a regular expression and returns, if successful,
// a [Regexp] object that can be used to match against text.
//
// When matching against text, the regexp returns a match that
// begins as early as possible in the input (leftmost), and among those
// it chooses the one that a backtracking search would have found first.
// This so-called leftmost-first matching is the same semantics
// that Perl, Python, and other implementations use, although this
// package implements it without the expense of backtracking.
// For POSIX leftmost-longest matching, see [CompilePOSIX].
func Compile(expr string) (*Regexp, error) {
return compile(expr, syntax.Perl, false)
}
// CompilePOSIX is like [Compile] but restricts the regular expression
// to POSIX ERE (egrep) syntax and changes the match semantics to
// leftmost-longest.
//
// That is, when matching against text, the regexp returns a match that
// begins as early as possible in the input (leftmost), and among those
// it chooses a match that is as long as possible.
// This so-called leftmost-longest matching is the same semantics
// that early regular expression implementations used and that POSIX
// specifies.
//
// However, there can be multiple leftmost-longest matches, with different
// submatch choices, and here this package diverges from POSIX.
// Among the possible leftmost-longest matches, this package chooses
// the one that a backtracking search would have found first, while POSIX
// specifies that the match be chosen to maximize the length of the first
// subexpression, then the second, and so on from left to right.
// The POSIX rule is computationally prohibitive and not even well-defined.
// See https://swtch.com/~rsc/regexp/regexp2.html#posix for details.
func CompilePOSIX(expr string) (*Regexp, error) {
return compile(expr, syntax.POSIX, true)
}
// Longest makes future searches prefer the leftmost-longest match.
// That is, when matching against text, the regexp returns a match that
// begins as early as possible in the input (leftmost), and among those
// it chooses a match that is as long as possible.
// This method modifies the [Regexp] and may not be called concurrently
// with any other methods.
func (re *Regexp) Longest() {
re.longest = true
}
func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) {
re, err := syntax.Parse(expr, mode)
if err != nil {
return nil, err
}
maxCap := re.MaxCap()
capNames := re.CapNames()
re = re.Simplify()
prog, err := syntax.Compile(re)
if err != nil {
return nil, err
}
matchcap := prog.NumCap
if matchcap < 2 {
matchcap = 2
}
regexp := &Regexp{
expr: expr,
prog: prog,
onepass: compileOnePass(prog),
numSubexp: maxCap,
subexpNames: capNames,
cond: prog.StartCond(),
longest: longest,
matchcap: matchcap,
minInputLen: minInputLen(re),
}
if regexp.onepass == nil {
regexp.prefix, regexp.prefixComplete = prog.Prefix()
regexp.maxBitStateLen = maxBitStateLen(prog)
} else {
regexp.prefix, regexp.prefixComplete, regexp.prefixEnd = onePassPrefix(prog)
}
if regexp.prefix != "" {
// TODO(rsc): Remove this allocation by adding
// IndexString to package bytes.
regexp.prefixBytes = []byte(regexp.prefix)
regexp.prefixRune, _ = utf8.DecodeRuneInString(regexp.prefix)
}
n := len(prog.Inst)
i := 0
for matchSize[i] != 0 && matchSize[i] < n {
i++
}
regexp.mpool = i
return regexp, nil
}
// Pools of *machine for use during (*Regexp).doExecute,
// split up by the size of the execution queues.
// matchPool[i] machines have queue size matchSize[i].
// On a 64-bit system each queue entry is 16 bytes,
// so matchPool[0] has 16*2*128 = 4kB queues, etc.
// The final matchPool is a catch-all for very large queues.
var (
matchSize = [...]int{128, 512, 2048, 16384, 0}
matchPool [len(matchSize)]sync.Pool
)
// get returns a machine to use for matching re.
// It uses the re's machine cache if possible, to avoid
// unnecessary allocation.
func (re *Regexp) get() *machine {
m, ok := matchPool[re.mpool].Get().(*machine)
if !ok {
m = new(machine)
}
m.re = re
m.p = re.prog
if cap(m.matchcap) < re.matchcap {
m.matchcap = make([]int, re.matchcap)
for _, t := range m.pool {
t.cap = make([]int, re.matchcap)
}
}
// Allocate queues if needed.
// Or reallocate, for "large" match pool.
n := matchSize[re.mpool]
if n == 0 { // large pool
n = len(re.prog.Inst)
}
if len(m.q0.sparse) < n {
m.q0 = queue{make([]uint32, n), make([]entry, 0, n)}
m.q1 = queue{make([]uint32, n), make([]entry, 0, n)}
}
return m
}
// put returns a machine to the correct machine pool.
func (re *Regexp) put(m *machine) {
m.re = nil
m.p = nil
m.inputs.clear()
matchPool[re.mpool].Put(m)
}
// minInputLen walks the regexp to find the minimum length of any matchable input.
func minInputLen(re *syntax.Regexp) int {
switch re.Op {
default:
return 0
case syntax.OpAnyChar, syntax.OpAnyCharNotNL, syntax.OpCharClass:
return 1
case syntax.OpLiteral:
l := 0
for _, r := range re.Rune {
if r == utf8.RuneError {
l++
} else {
l += utf8.RuneLen(r)
}
}
return l
case syntax.OpCapture, syntax.OpPlus:
return minInputLen(re.Sub[0])
case syntax.OpRepeat:
return re.Min * minInputLen(re.Sub[0])
case syntax.OpConcat:
l := 0
for _, sub := range re.Sub {
l += minInputLen(sub)
}
return l
case syntax.OpAlternate:
l := minInputLen(re.Sub[0])
var lnext int
for _, sub := range re.Sub[1:] {
lnext = minInputLen(sub)
if lnext < l {
l = lnext
}
}
return l
}
}
// MustCompile is like [Compile] but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompile(str string) *Regexp {
regexp, err := Compile(str)
if err != nil {
panic(`regexp: Compile(` + quote(str) + `): ` + err.Error())
}
return regexp
}
// MustCompilePOSIX is like [CompilePOSIX] but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompilePOSIX(str string) *Regexp {
regexp, err := CompilePOSIX(str)
if err != nil {
panic(`regexp: CompilePOSIX(` + quote(str) + `): ` + err.Error())
}
return regexp
}
func quote(s string) string {
if strconv.CanBackquote(s) {
return "`" + s + "`"
}
return strconv.Quote(s)
}
// NumSubexp returns the number of parenthesized subexpressions in this [Regexp].
func (re *Regexp) NumSubexp() int {
return re.numSubexp
}
// SubexpNames returns the names of the parenthesized subexpressions
// in this [Regexp]. The name for the first sub-expression is names[1],
// so that if m is a match slice, the name for m[i] is SubexpNames()[i].
// Since the Regexp as a whole cannot be named, names[0] is always
// the empty string. The slice should not be modified.
func (re *Regexp) SubexpNames() []string {
return re.subexpNames
}
// SubexpIndex returns the index of the first subexpression with the given name,
// or -1 if there is no subexpression with that name.
//
// Note that multiple subexpressions can be written using the same name, as in
// (?P<bob>a+)(?P<bob>b+), which declares two subexpressions named "bob".
// In this case, SubexpIndex returns the index of the leftmost such subexpression
// in the regular expression.
func (re *Regexp) SubexpIndex(name string) int {
if name != "" {
for i, s := range re.subexpNames {
if name == s {
return i
}
}
}
return -1
}
const endOfText rune = -1
// input abstracts different representations of the input text. It provides
// one-character lookahead.
type input interface {
step(pos int) (r rune, width int) // advance one rune
canCheckPrefix() bool // can we look ahead without losing info?
hasPrefix(re *Regexp) bool
index(re *Regexp, pos int) int
context(pos int) lazyFlag
}
// inputString scans a string.
type inputString struct {
str string
}
func (i *inputString) step(pos int) (rune, int) {
if pos < len(i.str) {
return utf8.DecodeRuneInString(i.str[pos:])
}
return endOfText, 0
}
func (i *inputString) canCheckPrefix() bool {
return true
}
func (i *inputString) hasPrefix(re *Regexp) bool {
return strings.HasPrefix(i.str, re.prefix)
}
func (i *inputString) index(re *Regexp, pos int) int {
return strings.Index(i.str[pos:], re.prefix)
}
func (i *inputString) context(pos int) lazyFlag {
r1, r2 := endOfText, endOfText
// 0 < pos && pos <= len(i.str)
if uint(pos-1) < uint(len(i.str)) {
r1, _ = utf8.DecodeLastRuneInString(i.str[:pos])
}
// 0 <= pos && pos < len(i.str)
if uint(pos) < uint(len(i.str)) {
r2, _ = utf8.DecodeRuneInString(i.str[pos:])
}
return newLazyFlag(r1, r2)
}
// inputBytes scans a byte slice.
type inputBytes struct {
str []byte
}
func (i *inputBytes) step(pos int) (rune, int) {
if pos < len(i.str) {
return utf8.DecodeRune(i.str[pos:])
}
return endOfText, 0
}
func (i *inputBytes) canCheckPrefix() bool {
return true
}
func (i *inputBytes) hasPrefix(re *Regexp) bool {
return bytes.HasPrefix(i.str, re.prefixBytes)
}
func (i *inputBytes) index(re *Regexp, pos int) int {
return bytes.Index(i.str[pos:], re.prefixBytes)
}
func (i *inputBytes) context(pos int) lazyFlag {
r1, r2 := endOfText, endOfText
// 0 < pos && pos <= len(i.str)
if uint(pos-1) < uint(len(i.str)) {
r1, _ = utf8.DecodeLastRune(i.str[:pos])
}
// 0 <= pos && pos < len(i.str)
if uint(pos) < uint(len(i.str)) {
r2, _ = utf8.DecodeRune(i.str[pos:])
}
return newLazyFlag(r1, r2)
}
// inputReader scans a RuneReader.
type inputReader struct {
r io.RuneReader
atEOT bool
pos int
}
func (i *inputReader) step(pos int) (rune, int) {
if !i.atEOT && pos != i.pos {
return endOfText, 0
}
r, w, err := i.r.ReadRune()
if err != nil {
i.atEOT = true
return endOfText, 0
}
i.pos += w
return r, w
}
func (i *inputReader) canCheckPrefix() bool {
return false
}
func (i *inputReader) hasPrefix(re *Regexp) bool {
return false
}
func (i *inputReader) index(re *Regexp, pos int) int {
return -1
}
func (i *inputReader) context(pos int) lazyFlag {
return 0 // not used
}
// LiteralPrefix returns a literal string that must begin any match
// of the regular expression re. It returns the boolean true if the
// literal string comprises the entire regular expression.
func (re *Regexp) LiteralPrefix() (prefix string, complete bool) {
return re.prefix, re.prefixComplete
}
// MatchReader reports whether the text returned by the [io.RuneReader]
// contains any match of the regular expression re.
func (re *Regexp) MatchReader(r io.RuneReader) bool {
return re.doMatch(r, nil, "")
}
// MatchString reports whether the string s
// contains any match of the regular expression re.
func (re *Regexp) MatchString(s string) bool {
return re.doMatch(nil, nil, s)
}
// Match reports whether the byte slice b
// contains any match of the regular expression re.
func (re *Regexp) Match(b []byte) bool {
return re.doMatch(nil, b, "")
}
// MatchReader reports whether the text returned by the [io.RuneReader]
// contains any match of the regular expression pattern.
// More complicated queries need to use [Compile] and the full [Regexp] interface.
func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.MatchReader(r), nil
}
// MatchString reports whether the string s
// contains any match of the regular expression pattern.
// More complicated queries need to use [Compile] and the full [Regexp] interface.
func MatchString(pattern string, s string) (matched bool, err error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.MatchString(s), nil
}
// Match reports whether the byte slice b
// contains any match of the regular expression pattern.
// More complicated queries need to use [Compile] and the full [Regexp] interface.
func Match(pattern string, b []byte) (matched bool, err error) {
re, err := Compile(pattern)
if err != nil {
return false, err
}
return re.Match(b), nil
}
// ReplaceAllString returns a copy of src, replacing matches of the [Regexp]
// with the replacement string repl.
// Inside repl, $ signs are interpreted as in [Regexp.Expand].
func (re *Regexp) ReplaceAllString(src, repl string) string {
n := 2
if strings.Contains(repl, "$") {
n = 2 * (re.numSubexp + 1)
}
b := re.replaceAll(nil, src, n, func(dst []byte, match []int) []byte {
return re.expand(dst, repl, nil, src, match)
})
return string(b)
}
// ReplaceAllLiteralString returns a copy of src, replacing matches of the [Regexp]
// with the replacement string repl. The replacement repl is substituted directly,
// without using [Regexp.Expand].
func (re *Regexp) ReplaceAllLiteralString(src, repl string) string {
return string(re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte {
return append(dst, repl...)
}))
}
// ReplaceAllStringFunc returns a copy of src in which all matches of the
// [Regexp] have been replaced by the return value of function repl applied
// to the matched substring. The replacement returned by repl is substituted
// directly, without using [Regexp.Expand].
func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string {
b := re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte {
return append(dst, repl(src[match[0]:match[1]])...)
})
return string(b)
}
func (re *Regexp) replaceAll(bsrc []byte, src string, nmatch int, repl func(dst []byte, m []int) []byte) []byte {
lastMatchEnd := 0 // end position of the most recent match
searchPos := 0 // position where we next look for a match
var buf []byte
var endPos int
if bsrc != nil {
endPos = len(bsrc)
} else {
endPos = len(src)
}
if nmatch > re.prog.NumCap {
nmatch = re.prog.NumCap
}
var dstCap [2]int
for searchPos <= endPos {
a := re.doExecute(nil, bsrc, src, searchPos, nmatch, dstCap[:0])
if len(a) == 0 {
break // no more matches
}
// Copy the unmatched characters before this match.
if bsrc != nil {
buf = append(buf, bsrc[lastMatchEnd:a[0]]...)
} else {
buf = append(buf, src[lastMatchEnd:a[0]]...)
}
// Now insert a copy of the replacement string, but not for a
// match of the empty string immediately after another match.
// (Otherwise, we get double replacement for patterns that
// match both empty and nonempty strings.)
if a[1] > lastMatchEnd || a[0] == 0 {
buf = repl(buf, a)
}
lastMatchEnd = a[1]
// Advance past this match; always advance at least one character.
var width int
if bsrc != nil {
_, width = utf8.DecodeRune(bsrc[searchPos:])
} else {
_, width = utf8.DecodeRuneInString(src[searchPos:])
}
if searchPos+width > a[1] {
searchPos += width
} else if searchPos+1 > a[1] {
// This clause is only needed at the end of the input
// string. In that case, DecodeRuneInString returns width=0.
searchPos++
} else {
searchPos = a[1]
}
}
// Copy the unmatched characters after the last match.
if bsrc != nil {
buf = append(buf, bsrc[lastMatchEnd:]...)
} else {
buf = append(buf, src[lastMatchEnd:]...)
}
return buf
}
// ReplaceAll returns a copy of src, replacing matches of the [Regexp]
// with the replacement text repl.
// Inside repl, $ signs are interpreted as in [Regexp.Expand].
func (re *Regexp) ReplaceAll(src, repl []byte) []byte {
n := 2
if bytes.IndexByte(repl, '$') >= 0 {
n = 2 * (re.numSubexp + 1)
}
srepl := ""
b := re.replaceAll(src, "", n, func(dst []byte, match []int) []byte {
if len(srepl) != len(repl) {
srepl = string(repl)
}
return re.expand(dst, srepl, src, "", match)
})
return b
}
// ReplaceAllLiteral returns a copy of src, replacing matches of the [Regexp]
// with the replacement bytes repl. The replacement repl is substituted directly,
// without using [Regexp.Expand].
func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte {
return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte {
return append(dst, repl...)
})
}
// ReplaceAllFunc returns a copy of src in which all matches of the
// [Regexp] have been replaced by the return value of function repl applied
// to the matched byte slice. The replacement returned by repl is substituted
// directly, without using [Regexp.Expand].
func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte {
return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte {
return append(dst, repl(src[match[0]:match[1]])...)
})
}
// Bitmap used by func special to check whether a character needs to be escaped.
var specialBytes [16]byte
// special reports whether byte b needs to be escaped by QuoteMeta.
func special(b byte) bool {
return b < utf8.RuneSelf && specialBytes[b%16]&(1<<(b/16)) != 0
}
func init() {
for _, b := range []byte(`\.+*?()|[]{}^$`) {
specialBytes[b%16] |= 1 << (b / 16)
}
}
// QuoteMeta returns a string that escapes all regular expression metacharacters
// inside the argument text; the returned string is a regular expression matching
// the literal text.
func QuoteMeta(s string) string {
// A byte loop is correct because all metacharacters are ASCII.
var i int
for i = 0; i < len(s); i++ {
if special(s[i]) {
break
}
}
// No meta characters found, so return original string.
if i >= len(s) {
return s
}
b := make([]byte, 2*len(s)-i)
copy(b, s[:i])
j := i
for ; i < len(s); i++ {
if special(s[i]) {
b[j] = '\\'
j++
}
b[j] = s[i]
j++
}
return string(b[:j])
}
// The number of capture values in the program may correspond
// to fewer capturing expressions than are in the regexp.
// For example, "(a){0}" turns into an empty program, so the
// maximum capture in the program is 0 but we need to return
// an expression for \1. Pad appends -1s to the slice a as needed.
func (re *Regexp) pad(a []int) []int {
if a == nil {
// No match.
return nil
}
n := (1 + re.numSubexp) * 2
for len(a) < n {
a = append(a, -1)
}
return a
}
// allMatches calls deliver at most n times
// with the location of successive matches in the input text.
// The input text is b if non-nil, otherwise s.
func (re *Regexp) allMatches(s string, b []byte, n int, deliver func([]int)) {
var end int
if b == nil {
end = len(s)
} else {
end = len(b)
}
for pos, i, prevMatchEnd := 0, 0, -1; i < n && pos <= end; {
matches := re.doExecute(nil, b, s, pos, re.prog.NumCap, nil)
if len(matches) == 0 {
break
}
accept := true
if matches[1] == pos {
// We've found an empty match.
if matches[0] == prevMatchEnd {
// We don't allow an empty match right
// after a previous match, so ignore it.
accept = false
}
var width int
if b == nil {
is := inputString{str: s}
_, width = is.step(pos)
} else {
ib := inputBytes{str: b}
_, width = ib.step(pos)
}
if width > 0 {
pos += width
} else {
pos = end + 1
}
} else {
pos = matches[1]
}
prevMatchEnd = matches[1]
if accept {
deliver(re.pad(matches))
i++
}
}
}
// Find returns a slice holding the text of the leftmost match in b of the regular expression.
// A return value of nil indicates no match.
func (re *Regexp) Find(b []byte) []byte {
var dstCap [2]int
a := re.doExecute(nil, b, "", 0, 2, dstCap[:0])
if a == nil {
return nil
}
return b[a[0]:a[1]:a[1]]
}
// FindIndex returns a two-element slice of integers defining the location of
// the leftmost match in b of the regular expression. The match itself is at
// b[loc[0]:loc[1]].
// A return value of nil indicates no match.
func (re *Regexp) FindIndex(b []byte) (loc []int) {
a := re.doExecute(nil, b, "", 0, 2, nil)
if a == nil {
return nil
}
return a[0:2]
}
// FindString returns a string holding the text of the leftmost match in s of the regular
// expression. If there is no match, the return value is an empty string,
// but it will also be empty if the regular expression successfully matches
// an empty string. Use [Regexp.FindStringIndex] or [Regexp.FindStringSubmatch] if it is
// necessary to distinguish these cases.
func (re *Regexp) FindString(s string) string {
var dstCap [2]int
a := re.doExecute(nil, nil, s, 0, 2, dstCap[:0])
if a == nil {
return ""
}
return s[a[0]:a[1]]
}
// FindStringIndex returns a two-element slice of integers defining the
// location of the leftmost match in s of the regular expression. The match
// itself is at s[loc[0]:loc[1]].
// A return value of nil indicates no match.
func (re *Regexp) FindStringIndex(s string) (loc []int) {
a := re.doExecute(nil, nil, s, 0, 2, nil)
if a == nil {
return nil
}
return a[0:2]
}
// FindReaderIndex returns a two-element slice of integers defining the
// location of the leftmost match of the regular expression in text read from
// the [io.RuneReader]. The match text was found in the input stream at
// byte offset loc[0] through loc[1]-1.
// A return value of nil indicates no match.
func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) {
a := re.doExecute(r, nil, "", 0, 2, nil)
if a == nil {
return nil
}
return a[0:2]
}
// FindSubmatch returns a slice of slices holding the text of the leftmost
// match of the regular expression in b and the matches, if any, of its
// subexpressions, as defined by the 'Submatch' descriptions in the package
// comment.
// A return value of nil indicates no match.
func (re *Regexp) FindSubmatch(b []byte) [][]byte {
var dstCap [4]int
a := re.doExecute(nil, b, "", 0, re.prog.NumCap, dstCap[:0])
if a == nil {
return nil
}
ret := make([][]byte, 1+re.numSubexp)
for i := range ret {
if 2*i < len(a) && a[2*i] >= 0 {
ret[i] = b[a[2*i]:a[2*i+1]:a[2*i+1]]
}
}
return ret
}
// Expand appends template to dst and returns the result; during the
// append, Expand replaces variables in the template with corresponding
// matches drawn from src. The match slice should have been returned by
// [Regexp.FindSubmatchIndex].
//
// In the template, a variable is denoted by a substring of the form
// $name or ${name}, where name is a non-empty sequence of letters,
// digits, and underscores. A purely numeric name like $1 refers to
// the submatch with the corresponding index; other names refer to
// capturing parentheses named with the (?P<name>...) syntax. A
// reference to an out of range or unmatched index or a name that is not
// present in the regular expression is replaced with an empty slice.
//
// In the $name form, name is taken to be as long as possible: $1x is
// equivalent to ${1x}, not ${1}x, and, $10 is equivalent to ${10}, not ${1}0.
//
// To insert a literal $ in the output, use $$ in the template.
func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte {
return re.expand(dst, string(template), src, "", match)
}
// ExpandString is like [Regexp.Expand] but the template and source are strings.
// It appends to and returns a byte slice in order to give the calling
// code control over allocation.
func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte {
return re.expand(dst, template, nil, src, match)
}
func (re *Regexp) expand(dst []byte, template string, bsrc []byte, src string, match []int) []byte {
for len(template) > 0 {
before, after, ok := strings.Cut(template, "$")
if !ok {
break
}
dst = append(dst, before...)
template = after
if template != "" && template[0] == '$' {
// Treat $$ as $.
dst = append(dst, '$')
template = template[1:]
continue
}
name, num, rest, ok := extract(template)
if !ok {
// Malformed; treat $ as raw text.
dst = append(dst, '$')
continue
}
template = rest
if num >= 0 {
if 2*num+1 < len(match) && match[2*num] >= 0 {
if bsrc != nil {
dst = append(dst, bsrc[match[2*num]:match[2*num+1]]...)
} else {
dst = append(dst, src[match[2*num]:match[2*num+1]]...)
}
}
} else {
for i, namei := range re.subexpNames {
if name == namei && 2*i+1 < len(match) && match[2*i] >= 0 {
if bsrc != nil {
dst = append(dst, bsrc[match[2*i]:match[2*i+1]]...)
} else {
dst = append(dst, src[match[2*i]:match[2*i+1]]...)
}
break
}
}
}
}
dst = append(dst, template...)
return dst
}
// extract returns the name from a leading "name" or "{name}" in str.
// (The $ has already been removed by the caller.)
// If it is a number, extract returns num set to that number; otherwise num = -1.
func extract(str string) (name string, num int, rest string, ok bool) {
if str == "" {
return
}
brace := false
if str[0] == '{' {
brace = true
str = str[1:]
}
i := 0
for i < len(str) {
rune, size := utf8.DecodeRuneInString(str[i:])
if !unicode.IsLetter(rune) && !unicode.IsDigit(rune) && rune != '_' {
break
}
i += size
}
if i == 0 {
// empty name is not okay
return
}
name = str[:i]
if brace {
if i >= len(str) || str[i] != '}' {
// missing closing brace
return
}
i++
}
// Parse number.
num = 0
for i := 0; i < len(name); i++ {
if name[i] < '0' || '9' < name[i] || num >= 1e8 {
num = -1
break
}
num = num*10 + int(name[i]) - '0'
}
// Disallow leading zeros.
if name[0] == '0' && len(name) > 1 {
num = -1
}
rest = str[i:]
ok = true
return
}
// FindSubmatchIndex returns a slice holding the index pairs identifying the
// leftmost match of the regular expression in b and the matches, if any, of
// its subexpressions, as defined by the 'Submatch' and 'Index' descriptions
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindSubmatchIndex(b []byte) []int {
return re.pad(re.doExecute(nil, b, "", 0, re.prog.NumCap, nil))
}
// FindStringSubmatch returns a slice of strings holding the text of the
// leftmost match of the regular expression in s and the matches, if any, of
// its subexpressions, as defined by the 'Submatch' description in the
// package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindStringSubmatch(s string) []string {
var dstCap [4]int
a := re.doExecute(nil, nil, s, 0, re.prog.NumCap, dstCap[:0])
if a == nil {
return nil
}
ret := make([]string, 1+re.numSubexp)
for i := range ret {
if 2*i < len(a) && a[2*i] >= 0 {
ret[i] = s[a[2*i]:a[2*i+1]]
}
}
return ret
}
// FindStringSubmatchIndex returns a slice holding the index pairs
// identifying the leftmost match of the regular expression in s and the
// matches, if any, of its subexpressions, as defined by the 'Submatch' and
// 'Index' descriptions in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindStringSubmatchIndex(s string) []int {
return re.pad(re.doExecute(nil, nil, s, 0, re.prog.NumCap, nil))
}
// FindReaderSubmatchIndex returns a slice holding the index pairs
// identifying the leftmost match of the regular expression of text read by
// the [io.RuneReader], and the matches, if any, of its subexpressions, as defined
// by the 'Submatch' and 'Index' descriptions in the package comment. A
// return value of nil indicates no match.
func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int {
return re.pad(re.doExecute(r, nil, "", 0, re.prog.NumCap, nil))
}
const startSize = 10 // The size at which to start a slice in the 'All' routines.
// FindAll is the 'All' version of [Regexp.Find]; it returns a slice of all successive
// matches of the expression, as defined by the 'All' description in the
// package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAll(b []byte, n int) [][]byte {
if n < 0 {
n = len(b) + 1
}
var result [][]byte
re.allMatches("", b, n, func(match []int) {
if result == nil {
result = make([][]byte, 0, startSize)
}
result = append(result, b[match[0]:match[1]:match[1]])
})
return result
}
// FindAllIndex is the 'All' version of [Regexp.FindIndex]; it returns a slice of all
// successive matches of the expression, as defined by the 'All' description
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllIndex(b []byte, n int) [][]int {
if n < 0 {
n = len(b) + 1
}
var result [][]int
re.allMatches("", b, n, func(match []int) {
if result == nil {
result = make([][]int, 0, startSize)
}
result = append(result, match[0:2])
})
return result
}
// FindAllString is the 'All' version of [Regexp.FindString]; it returns a slice of all
// successive matches of the expression, as defined by the 'All' description
// in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllString(s string, n int) []string {
if n < 0 {
n = len(s) + 1
}
var result []string
re.allMatches(s, nil, n, func(match []int) {
if result == nil {
result = make([]string, 0, startSize)
}
result = append(result, s[match[0]:match[1]])
})
return result
}
// FindAllStringIndex is the 'All' version of [Regexp.FindStringIndex]; it returns a
// slice of all successive matches of the expression, as defined by the 'All'
// description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringIndex(s string, n int) [][]int {
if n < 0 {
n = len(s) + 1
}
var result [][]int
re.allMatches(s, nil, n, func(match []int) {
if result == nil {
result = make([][]int, 0, startSize)
}
result = append(result, match[0:2])
})
return result
}
// FindAllSubmatch is the 'All' version of [Regexp.FindSubmatch]; it returns a slice
// of all successive matches of the expression, as defined by the 'All'
// description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte {
if n < 0 {
n = len(b) + 1
}
var result [][][]byte
re.allMatches("", b, n, func(match []int) {
if result == nil {
result = make([][][]byte, 0, startSize)
}
slice := make([][]byte, len(match)/2)
for j := range slice {
if match[2*j] >= 0 {
slice[j] = b[match[2*j]:match[2*j+1]:match[2*j+1]]
}
}
result = append(result, slice)
})
return result
}
// FindAllSubmatchIndex is the 'All' version of [Regexp.FindSubmatchIndex]; it returns
// a slice of all successive matches of the expression, as defined by the
// 'All' description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int {
if n < 0 {
n = len(b) + 1
}
var result [][]int
re.allMatches("", b, n, func(match []int) {
if result == nil {
result = make([][]int, 0, startSize)
}
result = append(result, match)
})
return result
}
// FindAllStringSubmatch is the 'All' version of [Regexp.FindStringSubmatch]; it
// returns a slice of all successive matches of the expression, as defined by
// the 'All' description in the package comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string {
if n < 0 {
n = len(s) + 1
}
var result [][]string
re.allMatches(s, nil, n, func(match []int) {
if result == nil {
result = make([][]string, 0, startSize)
}
slice := make([]string, len(match)/2)
for j := range slice {
if match[2*j] >= 0 {
slice[j] = s[match[2*j]:match[2*j+1]]
}
}
result = append(result, slice)
})
return result
}
// FindAllStringSubmatchIndex is the 'All' version of
// [Regexp.FindStringSubmatchIndex]; it returns a slice of all successive matches of
// the expression, as defined by the 'All' description in the package
// comment.
// A return value of nil indicates no match.
func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int {
if n < 0 {
n = len(s) + 1
}
var result [][]int
re.allMatches(s, nil, n, func(match []int) {
if result == nil {
result = make([][]int, 0, startSize)
}
result = append(result, match)
})
return result
}
// Split slices s into substrings separated by the expression and returns a slice of
// the substrings between those expression matches.
//
// The slice returned by this method consists of all the substrings of s
// not contained in the slice returned by [Regexp.FindAllString]. When called on an expression
// that contains no metacharacters, it is equivalent to [strings.SplitN].
//
// Example:
//
// s := regexp.MustCompile("a*").Split("abaabaccadaaae", 5)
// // s: ["", "b", "b", "c", "cadaaae"]
//
// The count determines the number of substrings to return:
// - n > 0: at most n substrings; the last substring will be the unsplit remainder;
// - n == 0: the result is nil (zero substrings);
// - n < 0: all substrings.
func (re *Regexp) Split(s string, n int) []string {
if n == 0 {
return nil
}
if len(re.expr) > 0 && len(s) == 0 {
return []string{""}
}
matches := re.FindAllStringIndex(s, n)
strings := make([]string, 0, len(matches))
beg := 0
end := 0
for _, match := range matches {
if n > 0 && len(strings) >= n-1 {
break
}
end = match[0]
if match[1] != 0 {
strings = append(strings, s[beg:end])
}
beg = match[1]
}
if end != len(s) {
strings = append(strings, s[beg:])
}
return strings
}
// AppendText implements [encoding.TextAppender]. The output
// matches that of calling the [Regexp.String] method.
//
// Note that the output is lossy in some cases: This method does not indicate
// POSIX regular expressions (i.e. those compiled by calling [CompilePOSIX]), or
// those for which the [Regexp.Longest] method has been called.
func (re *Regexp) AppendText(b []byte) ([]byte, error) {
return append(b, re.String()...), nil
}
// MarshalText implements [encoding.TextMarshaler]. The output
// matches that of calling the [Regexp.AppendText] method.
//
// See [Regexp.AppendText] for more information.
func (re *Regexp) MarshalText() ([]byte, error) {
return re.AppendText(nil)
}
// UnmarshalText implements [encoding.TextUnmarshaler] by calling
// [Compile] on the encoded value.
func (re *Regexp) UnmarshalText(text []byte) error {
newRE, err := Compile(string(text))
if err != nil {
return err
}
*re = *newRE
return nil
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syntax
import "unicode"
// A patchList is a list of instruction pointers that need to be filled in (patched).
// Because the pointers haven't been filled in yet, we can reuse their storage
// to hold the list. It's kind of sleazy, but works well in practice.
// See https://swtch.com/~rsc/regexp/regexp1.html for inspiration.
//
// These aren't really pointers: they're integers, so we can reinterpret them
// this way without using package unsafe. A value l.head denotes
// p.inst[l.head>>1].Out (l.head&1==0) or .Arg (l.head&1==1).
// head == 0 denotes the empty list, okay because we start every program
// with a fail instruction, so we'll never want to point at its output link.
type patchList struct {
head, tail uint32
}
func makePatchList(n uint32) patchList {
return patchList{n, n}
}
func (l patchList) patch(p *Prog, val uint32) {
head := l.head
for head != 0 {
i := &p.Inst[head>>1]
if head&1 == 0 {
head = i.Out
i.Out = val
} else {
head = i.Arg
i.Arg = val
}
}
}
func (l1 patchList) append(p *Prog, l2 patchList) patchList {
if l1.head == 0 {
return l2
}
if l2.head == 0 {
return l1
}
i := &p.Inst[l1.tail>>1]
if l1.tail&1 == 0 {
i.Out = l2.head
} else {
i.Arg = l2.head
}
return patchList{l1.head, l2.tail}
}
// A frag represents a compiled program fragment.
type frag struct {
i uint32 // index of first instruction
out patchList // where to record end instruction
nullable bool // whether fragment can match empty string
}
type compiler struct {
p *Prog
}
// Compile compiles the regexp into a program to be executed.
// The regexp should have been simplified already (returned from re.Simplify).
func Compile(re *Regexp) (*Prog, error) {
var c compiler
c.init()
f := c.compile(re)
f.out.patch(c.p, c.inst(InstMatch).i)
c.p.Start = int(f.i)
return c.p, nil
}
func (c *compiler) init() {
c.p = new(Prog)
c.p.NumCap = 2 // implicit ( and ) for whole match $0
c.inst(InstFail)
}
var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune}
var anyRune = []rune{0, unicode.MaxRune}
func (c *compiler) compile(re *Regexp) frag {
switch re.Op {
case OpNoMatch:
return c.fail()
case OpEmptyMatch:
return c.nop()
case OpLiteral:
if len(re.Rune) == 0 {
return c.nop()
}
var f frag
for j := range re.Rune {
f1 := c.rune(re.Rune[j:j+1], re.Flags)
if j == 0 {
f = f1
} else {
f = c.cat(f, f1)
}
}
return f
case OpCharClass:
return c.rune(re.Rune, re.Flags)
case OpAnyCharNotNL:
return c.rune(anyRuneNotNL, 0)
case OpAnyChar:
return c.rune(anyRune, 0)
case OpBeginLine:
return c.empty(EmptyBeginLine)
case OpEndLine:
return c.empty(EmptyEndLine)
case OpBeginText:
return c.empty(EmptyBeginText)
case OpEndText:
return c.empty(EmptyEndText)
case OpWordBoundary:
return c.empty(EmptyWordBoundary)
case OpNoWordBoundary:
return c.empty(EmptyNoWordBoundary)
case OpCapture:
bra := c.cap(uint32(re.Cap << 1))
sub := c.compile(re.Sub[0])
ket := c.cap(uint32(re.Cap<<1 | 1))
return c.cat(c.cat(bra, sub), ket)
case OpStar:
return c.star(c.compile(re.Sub[0]), re.Flags&NonGreedy != 0)
case OpPlus:
return c.plus(c.compile(re.Sub[0]), re.Flags&NonGreedy != 0)
case OpQuest:
return c.quest(c.compile(re.Sub[0]), re.Flags&NonGreedy != 0)
case OpConcat:
if len(re.Sub) == 0 {
return c.nop()
}
var f frag
for i, sub := range re.Sub {
if i == 0 {
f = c.compile(sub)
} else {
f = c.cat(f, c.compile(sub))
}
}
return f
case OpAlternate:
var f frag
for _, sub := range re.Sub {
f = c.alt(f, c.compile(sub))
}
return f
}
panic("regexp: unhandled case in compile")
}
func (c *compiler) inst(op InstOp) frag {
// TODO: impose length limit
f := frag{i: uint32(len(c.p.Inst)), nullable: true}
c.p.Inst = append(c.p.Inst, Inst{Op: op})
return f
}
func (c *compiler) nop() frag {
f := c.inst(InstNop)
f.out = makePatchList(f.i << 1)
return f
}
func (c *compiler) fail() frag {
return frag{}
}
func (c *compiler) cap(arg uint32) frag {
f := c.inst(InstCapture)
f.out = makePatchList(f.i << 1)
c.p.Inst[f.i].Arg = arg
if c.p.NumCap < int(arg)+1 {
c.p.NumCap = int(arg) + 1
}
return f
}
func (c *compiler) cat(f1, f2 frag) frag {
// concat of failure is failure
if f1.i == 0 || f2.i == 0 {
return frag{}
}
// TODO: elide nop
f1.out.patch(c.p, f2.i)
return frag{f1.i, f2.out, f1.nullable && f2.nullable}
}
func (c *compiler) alt(f1, f2 frag) frag {
// alt of failure is other
if f1.i == 0 {
return f2
}
if f2.i == 0 {
return f1
}
f := c.inst(InstAlt)
i := &c.p.Inst[f.i]
i.Out = f1.i
i.Arg = f2.i
f.out = f1.out.append(c.p, f2.out)
f.nullable = f1.nullable || f2.nullable
return f
}
func (c *compiler) quest(f1 frag, nongreedy bool) frag {
f := c.inst(InstAlt)
i := &c.p.Inst[f.i]
if nongreedy {
i.Arg = f1.i
f.out = makePatchList(f.i << 1)
} else {
i.Out = f1.i
f.out = makePatchList(f.i<<1 | 1)
}
f.out = f.out.append(c.p, f1.out)
return f
}
// loop returns the fragment for the main loop of a plus or star.
// For plus, it can be used after changing the entry to f1.i.
// For star, it can be used directly when f1 can't match an empty string.
// (When f1 can match an empty string, f1* must be implemented as (f1+)?
// to get the priority match order correct.)
func (c *compiler) loop(f1 frag, nongreedy bool) frag {
f := c.inst(InstAlt)
i := &c.p.Inst[f.i]
if nongreedy {
i.Arg = f1.i
f.out = makePatchList(f.i << 1)
} else {
i.Out = f1.i
f.out = makePatchList(f.i<<1 | 1)
}
f1.out.patch(c.p, f.i)
return f
}
func (c *compiler) star(f1 frag, nongreedy bool) frag {
if f1.nullable {
// Use (f1+)? to get priority match order correct.
// See golang.org/issue/46123.
return c.quest(c.plus(f1, nongreedy), nongreedy)
}
return c.loop(f1, nongreedy)
}
func (c *compiler) plus(f1 frag, nongreedy bool) frag {
return frag{f1.i, c.loop(f1, nongreedy).out, f1.nullable}
}
func (c *compiler) empty(op EmptyOp) frag {
f := c.inst(InstEmptyWidth)
c.p.Inst[f.i].Arg = uint32(op)
f.out = makePatchList(f.i << 1)
return f
}
func (c *compiler) rune(r []rune, flags Flags) frag {
f := c.inst(InstRune)
f.nullable = false
i := &c.p.Inst[f.i]
i.Rune = r
flags &= FoldCase // only relevant flag is FoldCase
if len(r) != 1 || unicode.SimpleFold(r[0]) == r[0] {
// and sometimes not even that
flags &^= FoldCase
}
i.Arg = uint32(flags)
f.out = makePatchList(f.i << 1)
// Special cases for exec machine.
switch {
case flags&FoldCase == 0 && (len(r) == 1 || len(r) == 2 && r[0] == r[1]):
i.Op = InstRune1
case len(r) == 2 && r[0] == 0 && r[1] == unicode.MaxRune:
i.Op = InstRuneAny
case len(r) == 4 && r[0] == 0 && r[1] == '\n'-1 && r[2] == '\n'+1 && r[3] == unicode.MaxRune:
i.Op = InstRuneAnyNotNL
}
return f
}
// Code generated by "stringer -type Op -trimprefix Op"; DO NOT EDIT.
package syntax
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[OpNoMatch-1]
_ = x[OpEmptyMatch-2]
_ = x[OpLiteral-3]
_ = x[OpCharClass-4]
_ = x[OpAnyCharNotNL-5]
_ = x[OpAnyChar-6]
_ = x[OpBeginLine-7]
_ = x[OpEndLine-8]
_ = x[OpBeginText-9]
_ = x[OpEndText-10]
_ = x[OpWordBoundary-11]
_ = x[OpNoWordBoundary-12]
_ = x[OpCapture-13]
_ = x[OpStar-14]
_ = x[OpPlus-15]
_ = x[OpQuest-16]
_ = x[OpRepeat-17]
_ = x[OpConcat-18]
_ = x[OpAlternate-19]
_ = x[opPseudo-128]
}
const (
_Op_name_0 = "NoMatchEmptyMatchLiteralCharClassAnyCharNotNLAnyCharBeginLineEndLineBeginTextEndTextWordBoundaryNoWordBoundaryCaptureStarPlusQuestRepeatConcatAlternate"
_Op_name_1 = "opPseudo"
)
var (
_Op_index_0 = [...]uint8{0, 7, 17, 24, 33, 45, 52, 61, 68, 77, 84, 96, 110, 117, 121, 125, 130, 136, 142, 151}
)
func (i Op) String() string {
switch {
case 1 <= i && i <= 19:
i -= 1
return _Op_name_0[_Op_index_0[i]:_Op_index_0[i+1]]
case i == 128:
return _Op_name_1
default:
return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syntax
import (
"sort"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
// An Error describes a failure to parse a regular expression
// and gives the offending expression.
type Error struct {
Code ErrorCode
Expr string
}
func (e *Error) Error() string {
return "error parsing regexp: " + e.Code.String() + ": `" + e.Expr + "`"
}
// An ErrorCode describes a failure to parse a regular expression.
type ErrorCode string
const (
// Unexpected error
ErrInternalError ErrorCode = "regexp/syntax: internal error"
// Parse errors
ErrInvalidCharClass ErrorCode = "invalid character class"
ErrInvalidCharRange ErrorCode = "invalid character class range"
ErrInvalidEscape ErrorCode = "invalid escape sequence"
ErrInvalidNamedCapture ErrorCode = "invalid named capture"
ErrInvalidPerlOp ErrorCode = "invalid or unsupported Perl syntax"
ErrInvalidRepeatOp ErrorCode = "invalid nested repetition operator"
ErrInvalidRepeatSize ErrorCode = "invalid repeat count"
ErrInvalidUTF8 ErrorCode = "invalid UTF-8"
ErrMissingBracket ErrorCode = "missing closing ]"
ErrMissingParen ErrorCode = "missing closing )"
ErrMissingRepeatArgument ErrorCode = "missing argument to repetition operator"
ErrTrailingBackslash ErrorCode = "trailing backslash at end of expression"
ErrUnexpectedParen ErrorCode = "unexpected )"
ErrNestingDepth ErrorCode = "expression nests too deeply"
ErrLarge ErrorCode = "expression too large"
)
func (e ErrorCode) String() string {
return string(e)
}
// Flags control the behavior of the parser and record information about regexp context.
type Flags uint16
const (
FoldCase Flags = 1 << iota // case-insensitive match
Literal // treat pattern as literal string
ClassNL // allow character classes like [^a-z] and [[:space:]] to match newline
DotNL // allow . to match newline
OneLine // treat ^ and $ as only matching at beginning and end of text
NonGreedy // make repetition operators default to non-greedy
PerlX // allow Perl extensions
UnicodeGroups // allow \p{Han}, \P{Han} for Unicode group and negation
WasDollar // regexp OpEndText was $, not \z
Simple // regexp contains no counted repetition
MatchNL = ClassNL | DotNL
Perl = ClassNL | OneLine | PerlX | UnicodeGroups // as close to Perl as possible
POSIX Flags = 0 // POSIX syntax
)
// Pseudo-ops for parsing stack.
const (
opLeftParen = opPseudo + iota
opVerticalBar
)
// maxHeight is the maximum height of a regexp parse tree.
// It is somewhat arbitrarily chosen, but the idea is to be large enough
// that no one will actually hit in real use but at the same time small enough
// that recursion on the Regexp tree will not hit the 1GB Go stack limit.
// The maximum amount of stack for a single recursive frame is probably
// closer to 1kB, so this could potentially be raised, but it seems unlikely
// that people have regexps nested even this deeply.
// We ran a test on Google's C++ code base and turned up only
// a single use case with depth > 100; it had depth 128.
// Using depth 1000 should be plenty of margin.
// As an optimization, we don't even bother calculating heights
// until we've allocated at least maxHeight Regexp structures.
const maxHeight = 1000
// maxSize is the maximum size of a compiled regexp in Insts.
// It too is somewhat arbitrarily chosen, but the idea is to be large enough
// to allow significant regexps while at the same time small enough that
// the compiled form will not take up too much memory.
// 128 MB is enough for a 3.3 million Inst structures, which roughly
// corresponds to a 3.3 MB regexp.
const (
maxSize = 128 << 20 / instSize
instSize = 5 * 8 // byte, 2 uint32, slice is 5 64-bit words
)
// maxRunes is the maximum number of runes allowed in a regexp tree
// counting the runes in all the nodes.
// Ignoring character classes p.numRunes is always less than the length of the regexp.
// Character classes can make it much larger: each \pL adds 1292 runes.
// 128 MB is enough for 32M runes, which is over 26k \pL instances.
// Note that repetitions do not make copies of the rune slices,
// so \pL{1000} is only one rune slice, not 1000.
// We could keep a cache of character classes we've seen,
// so that all the \pL we see use the same rune list,
// but that doesn't remove the problem entirely:
// consider something like [\pL01234][\pL01235][\pL01236]...[\pL^&*()].
// And because the Rune slice is exposed directly in the Regexp,
// there is not an opportunity to change the representation to allow
// partial sharing between different character classes.
// So the limit is the best we can do.
const (
maxRunes = 128 << 20 / runeSize
runeSize = 4 // rune is int32
)
type parser struct {
flags Flags // parse mode flags
stack []*Regexp // stack of parsed expressions
free *Regexp
numCap int // number of capturing groups seen
wholeRegexp string
tmpClass []rune // temporary char class work space
numRegexp int // number of regexps allocated
numRunes int // number of runes in char classes
repeats int64 // product of all repetitions seen
height map[*Regexp]int // regexp height, for height limit check
size map[*Regexp]int64 // regexp compiled size, for size limit check
}
func (p *parser) newRegexp(op Op) *Regexp {
re := p.free
if re != nil {
p.free = re.Sub0[0]
*re = Regexp{}
} else {
re = new(Regexp)
p.numRegexp++
}
re.Op = op
return re
}
func (p *parser) reuse(re *Regexp) {
if p.height != nil {
delete(p.height, re)
}
re.Sub0[0] = p.free
p.free = re
}
func (p *parser) checkLimits(re *Regexp) {
if p.numRunes > maxRunes {
panic(ErrLarge)
}
p.checkSize(re)
p.checkHeight(re)
}
func (p *parser) checkSize(re *Regexp) {
if p.size == nil {
// We haven't started tracking size yet.
// Do a relatively cheap check to see if we need to start.
// Maintain the product of all the repeats we've seen
// and don't track if the total number of regexp nodes
// we've seen times the repeat product is in budget.
if p.repeats == 0 {
p.repeats = 1
}
if re.Op == OpRepeat {
n := re.Max
if n == -1 {
n = re.Min
}
if n <= 0 {
n = 1
}
if int64(n) > maxSize/p.repeats {
p.repeats = maxSize
} else {
p.repeats *= int64(n)
}
}
if int64(p.numRegexp) < maxSize/p.repeats {
return
}
// We need to start tracking size.
// Make the map and belatedly populate it
// with info about everything we've constructed so far.
p.size = make(map[*Regexp]int64)
for _, re := range p.stack {
p.checkSize(re)
}
}
if p.calcSize(re, true) > maxSize {
panic(ErrLarge)
}
}
func (p *parser) calcSize(re *Regexp, force bool) int64 {
if !force {
if size, ok := p.size[re]; ok {
return size
}
}
var size int64
switch re.Op {
case OpLiteral:
size = int64(len(re.Rune))
case OpCapture, OpStar:
// star can be 1+ or 2+; assume 2 pessimistically
size = 2 + p.calcSize(re.Sub[0], false)
case OpPlus, OpQuest:
size = 1 + p.calcSize(re.Sub[0], false)
case OpConcat:
for _, sub := range re.Sub {
size += p.calcSize(sub, false)
}
case OpAlternate:
for _, sub := range re.Sub {
size += p.calcSize(sub, false)
}
if len(re.Sub) > 1 {
size += int64(len(re.Sub)) - 1
}
case OpRepeat:
sub := p.calcSize(re.Sub[0], false)
if re.Max == -1 {
if re.Min == 0 {
size = 2 + sub // x*
} else {
size = 1 + int64(re.Min)*sub // xxx+
}
break
}
// x{2,5} = xx(x(x(x)?)?)?
size = int64(re.Max)*sub + int64(re.Max-re.Min)
}
size = max(1, size)
p.size[re] = size
return size
}
func (p *parser) checkHeight(re *Regexp) {
if p.numRegexp < maxHeight {
return
}
if p.height == nil {
p.height = make(map[*Regexp]int)
for _, re := range p.stack {
p.checkHeight(re)
}
}
if p.calcHeight(re, true) > maxHeight {
panic(ErrNestingDepth)
}
}
func (p *parser) calcHeight(re *Regexp, force bool) int {
if !force {
if h, ok := p.height[re]; ok {
return h
}
}
h := 1
for _, sub := range re.Sub {
hsub := p.calcHeight(sub, false)
if h < 1+hsub {
h = 1 + hsub
}
}
p.height[re] = h
return h
}
// Parse stack manipulation.
// push pushes the regexp re onto the parse stack and returns the regexp.
func (p *parser) push(re *Regexp) *Regexp {
p.numRunes += len(re.Rune)
if re.Op == OpCharClass && len(re.Rune) == 2 && re.Rune[0] == re.Rune[1] {
// Single rune.
if p.maybeConcat(re.Rune[0], p.flags&^FoldCase) {
return nil
}
re.Op = OpLiteral
re.Rune = re.Rune[:1]
re.Flags = p.flags &^ FoldCase
} else if re.Op == OpCharClass && len(re.Rune) == 4 &&
re.Rune[0] == re.Rune[1] && re.Rune[2] == re.Rune[3] &&
unicode.SimpleFold(re.Rune[0]) == re.Rune[2] &&
unicode.SimpleFold(re.Rune[2]) == re.Rune[0] ||
re.Op == OpCharClass && len(re.Rune) == 2 &&
re.Rune[0]+1 == re.Rune[1] &&
unicode.SimpleFold(re.Rune[0]) == re.Rune[1] &&
unicode.SimpleFold(re.Rune[1]) == re.Rune[0] {
// Case-insensitive rune like [Aa] or [Δδ].
if p.maybeConcat(re.Rune[0], p.flags|FoldCase) {
return nil
}
// Rewrite as (case-insensitive) literal.
re.Op = OpLiteral
re.Rune = re.Rune[:1]
re.Flags = p.flags | FoldCase
} else {
// Incremental concatenation.
p.maybeConcat(-1, 0)
}
p.stack = append(p.stack, re)
p.checkLimits(re)
return re
}
// maybeConcat implements incremental concatenation
// of literal runes into string nodes. The parser calls this
// before each push, so only the top fragment of the stack
// might need processing. Since this is called before a push,
// the topmost literal is no longer subject to operators like *
// (Otherwise ab* would turn into (ab)*.)
// If r >= 0 and there's a node left over, maybeConcat uses it
// to push r with the given flags.
// maybeConcat reports whether r was pushed.
func (p *parser) maybeConcat(r rune, flags Flags) bool {
n := len(p.stack)
if n < 2 {
return false
}
re1 := p.stack[n-1]
re2 := p.stack[n-2]
if re1.Op != OpLiteral || re2.Op != OpLiteral || re1.Flags&FoldCase != re2.Flags&FoldCase {
return false
}
// Push re1 into re2.
re2.Rune = append(re2.Rune, re1.Rune...)
// Reuse re1 if possible.
if r >= 0 {
re1.Rune = re1.Rune0[:1]
re1.Rune[0] = r
re1.Flags = flags
return true
}
p.stack = p.stack[:n-1]
p.reuse(re1)
return false // did not push r
}
// literal pushes a literal regexp for the rune r on the stack.
func (p *parser) literal(r rune) {
re := p.newRegexp(OpLiteral)
re.Flags = p.flags
if p.flags&FoldCase != 0 {
r = minFoldRune(r)
}
re.Rune0[0] = r
re.Rune = re.Rune0[:1]
p.push(re)
}
// minFoldRune returns the minimum rune fold-equivalent to r.
func minFoldRune(r rune) rune {
if r < minFold || r > maxFold {
return r
}
m := r
r0 := r
for r = unicode.SimpleFold(r); r != r0; r = unicode.SimpleFold(r) {
m = min(m, r)
}
return m
}
// op pushes a regexp with the given op onto the stack
// and returns that regexp.
func (p *parser) op(op Op) *Regexp {
re := p.newRegexp(op)
re.Flags = p.flags
return p.push(re)
}
// repeat replaces the top stack element with itself repeated according to op, min, max.
// before is the regexp suffix starting at the repetition operator.
// after is the regexp suffix following after the repetition operator.
// repeat returns an updated 'after' and an error, if any.
func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) (string, error) {
flags := p.flags
if p.flags&PerlX != 0 {
if len(after) > 0 && after[0] == '?' {
after = after[1:]
flags ^= NonGreedy
}
if lastRepeat != "" {
// In Perl it is not allowed to stack repetition operators:
// a** is a syntax error, not a doubled star, and a++ means
// something else entirely, which we don't support!
return "", &Error{ErrInvalidRepeatOp, lastRepeat[:len(lastRepeat)-len(after)]}
}
}
n := len(p.stack)
if n == 0 {
return "", &Error{ErrMissingRepeatArgument, before[:len(before)-len(after)]}
}
sub := p.stack[n-1]
if sub.Op >= opPseudo {
return "", &Error{ErrMissingRepeatArgument, before[:len(before)-len(after)]}
}
re := p.newRegexp(op)
re.Min = min
re.Max = max
re.Flags = flags
re.Sub = re.Sub0[:1]
re.Sub[0] = sub
p.stack[n-1] = re
p.checkLimits(re)
if op == OpRepeat && (min >= 2 || max >= 2) && !repeatIsValid(re, 1000) {
return "", &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]}
}
return after, nil
}
// repeatIsValid reports whether the repetition re is valid.
// Valid means that the combination of the top-level repetition
// and any inner repetitions does not exceed n copies of the
// innermost thing.
// This function rewalks the regexp tree and is called for every repetition,
// so we have to worry about inducing quadratic behavior in the parser.
// We avoid this by only calling repeatIsValid when min or max >= 2.
// In that case the depth of any >= 2 nesting can only get to 9 without
// triggering a parse error, so each subtree can only be rewalked 9 times.
func repeatIsValid(re *Regexp, n int) bool {
if re.Op == OpRepeat {
m := re.Max
if m == 0 {
return true
}
if m < 0 {
m = re.Min
}
if m > n {
return false
}
if m > 0 {
n /= m
}
}
for _, sub := range re.Sub {
if !repeatIsValid(sub, n) {
return false
}
}
return true
}
// concat replaces the top of the stack (above the topmost '|' or '(') with its concatenation.
func (p *parser) concat() *Regexp {
p.maybeConcat(-1, 0)
// Scan down to find pseudo-operator | or (.
i := len(p.stack)
for i > 0 && p.stack[i-1].Op < opPseudo {
i--
}
subs := p.stack[i:]
p.stack = p.stack[:i]
// Empty concatenation is special case.
if len(subs) == 0 {
return p.push(p.newRegexp(OpEmptyMatch))
}
return p.push(p.collapse(subs, OpConcat))
}
// alternate replaces the top of the stack (above the topmost '(') with its alternation.
func (p *parser) alternate() *Regexp {
// Scan down to find pseudo-operator (.
// There are no | above (.
i := len(p.stack)
for i > 0 && p.stack[i-1].Op < opPseudo {
i--
}
subs := p.stack[i:]
p.stack = p.stack[:i]
// Make sure top class is clean.
// All the others already are (see swapVerticalBar).
if len(subs) > 0 {
cleanAlt(subs[len(subs)-1])
}
// Empty alternate is special case
// (shouldn't happen but easy to handle).
if len(subs) == 0 {
return p.push(p.newRegexp(OpNoMatch))
}
return p.push(p.collapse(subs, OpAlternate))
}
// cleanAlt cleans re for eventual inclusion in an alternation.
func cleanAlt(re *Regexp) {
switch re.Op {
case OpCharClass:
re.Rune = cleanClass(&re.Rune)
if len(re.Rune) == 2 && re.Rune[0] == 0 && re.Rune[1] == unicode.MaxRune {
re.Rune = nil
re.Op = OpAnyChar
return
}
if len(re.Rune) == 4 && re.Rune[0] == 0 && re.Rune[1] == '\n'-1 && re.Rune[2] == '\n'+1 && re.Rune[3] == unicode.MaxRune {
re.Rune = nil
re.Op = OpAnyCharNotNL
return
}
if cap(re.Rune)-len(re.Rune) > 100 {
// re.Rune will not grow any more.
// Make a copy or inline to reclaim storage.
re.Rune = append(re.Rune0[:0], re.Rune...)
}
}
}
// collapse returns the result of applying op to sub.
// If sub contains op nodes, they all get hoisted up
// so that there is never a concat of a concat or an
// alternate of an alternate.
func (p *parser) collapse(subs []*Regexp, op Op) *Regexp {
if len(subs) == 1 {
return subs[0]
}
re := p.newRegexp(op)
re.Sub = re.Sub0[:0]
for _, sub := range subs {
if sub.Op == op {
re.Sub = append(re.Sub, sub.Sub...)
p.reuse(sub)
} else {
re.Sub = append(re.Sub, sub)
}
}
if op == OpAlternate {
re.Sub = p.factor(re.Sub)
if len(re.Sub) == 1 {
old := re
re = re.Sub[0]
p.reuse(old)
}
}
return re
}
// factor factors common prefixes from the alternation list sub.
// It returns a replacement list that reuses the same storage and
// frees (passes to p.reuse) any removed *Regexps.
//
// For example,
//
// ABC|ABD|AEF|BCX|BCY
//
// simplifies by literal prefix extraction to
//
// A(B(C|D)|EF)|BC(X|Y)
//
// which simplifies by character class introduction to
//
// A(B[CD]|EF)|BC[XY]
func (p *parser) factor(sub []*Regexp) []*Regexp {
if len(sub) < 2 {
return sub
}
// Round 1: Factor out common literal prefixes.
var str []rune
var strflags Flags
start := 0
out := sub[:0]
for i := 0; i <= len(sub); i++ {
// Invariant: the Regexps that were in sub[0:start] have been
// used or marked for reuse, and the slice space has been reused
// for out (len(out) <= start).
//
// Invariant: sub[start:i] consists of regexps that all begin
// with str as modified by strflags.
var istr []rune
var iflags Flags
if i < len(sub) {
istr, iflags = p.leadingString(sub[i])
if iflags == strflags {
same := 0
for same < len(str) && same < len(istr) && str[same] == istr[same] {
same++
}
if same > 0 {
// Matches at least one rune in current range.
// Keep going around.
str = str[:same]
continue
}
}
}
// Found end of a run with common leading literal string:
// sub[start:i] all begin with str[:len(str)], but sub[i]
// does not even begin with str[0].
//
// Factor out common string and append factored expression to out.
if i == start {
// Nothing to do - run of length 0.
} else if i == start+1 {
// Just one: don't bother factoring.
out = append(out, sub[start])
} else {
// Construct factored form: prefix(suffix1|suffix2|...)
prefix := p.newRegexp(OpLiteral)
prefix.Flags = strflags
prefix.Rune = append(prefix.Rune[:0], str...)
for j := start; j < i; j++ {
sub[j] = p.removeLeadingString(sub[j], len(str))
p.checkLimits(sub[j])
}
suffix := p.collapse(sub[start:i], OpAlternate) // recurse
re := p.newRegexp(OpConcat)
re.Sub = append(re.Sub[:0], prefix, suffix)
out = append(out, re)
}
// Prepare for next iteration.
start = i
str = istr
strflags = iflags
}
sub = out
// Round 2: Factor out common simple prefixes,
// just the first piece of each concatenation.
// This will be good enough a lot of the time.
//
// Complex subexpressions (e.g. involving quantifiers)
// are not safe to factor because that collapses their
// distinct paths through the automaton, which affects
// correctness in some cases.
start = 0
out = sub[:0]
var first *Regexp
for i := 0; i <= len(sub); i++ {
// Invariant: the Regexps that were in sub[0:start] have been
// used or marked for reuse, and the slice space has been reused
// for out (len(out) <= start).
//
// Invariant: sub[start:i] consists of regexps that all begin with ifirst.
var ifirst *Regexp
if i < len(sub) {
ifirst = p.leadingRegexp(sub[i])
if first != nil && first.Equal(ifirst) &&
// first must be a character class OR a fixed repeat of a character class.
(isCharClass(first) || (first.Op == OpRepeat && first.Min == first.Max && isCharClass(first.Sub[0]))) {
continue
}
}
// Found end of a run with common leading regexp:
// sub[start:i] all begin with first but sub[i] does not.
//
// Factor out common regexp and append factored expression to out.
if i == start {
// Nothing to do - run of length 0.
} else if i == start+1 {
// Just one: don't bother factoring.
out = append(out, sub[start])
} else {
// Construct factored form: prefix(suffix1|suffix2|...)
prefix := first
for j := start; j < i; j++ {
reuse := j != start // prefix came from sub[start]
sub[j] = p.removeLeadingRegexp(sub[j], reuse)
p.checkLimits(sub[j])
}
suffix := p.collapse(sub[start:i], OpAlternate) // recurse
re := p.newRegexp(OpConcat)
re.Sub = append(re.Sub[:0], prefix, suffix)
out = append(out, re)
}
// Prepare for next iteration.
start = i
first = ifirst
}
sub = out
// Round 3: Collapse runs of single literals into character classes.
start = 0
out = sub[:0]
for i := 0; i <= len(sub); i++ {
// Invariant: the Regexps that were in sub[0:start] have been
// used or marked for reuse, and the slice space has been reused
// for out (len(out) <= start).
//
// Invariant: sub[start:i] consists of regexps that are either
// literal runes or character classes.
if i < len(sub) && isCharClass(sub[i]) {
continue
}
// sub[i] is not a char or char class;
// emit char class for sub[start:i]...
if i == start {
// Nothing to do - run of length 0.
} else if i == start+1 {
out = append(out, sub[start])
} else {
// Make new char class.
// Start with most complex regexp in sub[start].
max := start
for j := start + 1; j < i; j++ {
if sub[max].Op < sub[j].Op || sub[max].Op == sub[j].Op && len(sub[max].Rune) < len(sub[j].Rune) {
max = j
}
}
sub[start], sub[max] = sub[max], sub[start]
for j := start + 1; j < i; j++ {
mergeCharClass(sub[start], sub[j])
p.reuse(sub[j])
}
cleanAlt(sub[start])
out = append(out, sub[start])
}
// ... and then emit sub[i].
if i < len(sub) {
out = append(out, sub[i])
}
start = i + 1
}
sub = out
// Round 4: Collapse runs of empty matches into a single empty match.
start = 0
out = sub[:0]
for i := range sub {
if i+1 < len(sub) && sub[i].Op == OpEmptyMatch && sub[i+1].Op == OpEmptyMatch {
continue
}
out = append(out, sub[i])
}
sub = out
return sub
}
// leadingString returns the leading literal string that re begins with.
// The string refers to storage in re or its children.
func (p *parser) leadingString(re *Regexp) ([]rune, Flags) {
if re.Op == OpConcat && len(re.Sub) > 0 {
re = re.Sub[0]
}
if re.Op != OpLiteral {
return nil, 0
}
return re.Rune, re.Flags & FoldCase
}
// removeLeadingString removes the first n leading runes
// from the beginning of re. It returns the replacement for re.
func (p *parser) removeLeadingString(re *Regexp, n int) *Regexp {
if re.Op == OpConcat && len(re.Sub) > 0 {
// Removing a leading string in a concatenation
// might simplify the concatenation.
sub := re.Sub[0]
sub = p.removeLeadingString(sub, n)
re.Sub[0] = sub
if sub.Op == OpEmptyMatch {
p.reuse(sub)
switch len(re.Sub) {
case 0, 1:
// Impossible but handle.
re.Op = OpEmptyMatch
re.Sub = nil
case 2:
old := re
re = re.Sub[1]
p.reuse(old)
default:
copy(re.Sub, re.Sub[1:])
re.Sub = re.Sub[:len(re.Sub)-1]
}
}
return re
}
if re.Op == OpLiteral {
re.Rune = re.Rune[:copy(re.Rune, re.Rune[n:])]
if len(re.Rune) == 0 {
re.Op = OpEmptyMatch
}
}
return re
}
// leadingRegexp returns the leading regexp that re begins with.
// The regexp refers to storage in re or its children.
func (p *parser) leadingRegexp(re *Regexp) *Regexp {
if re.Op == OpEmptyMatch {
return nil
}
if re.Op == OpConcat && len(re.Sub) > 0 {
sub := re.Sub[0]
if sub.Op == OpEmptyMatch {
return nil
}
return sub
}
return re
}
// removeLeadingRegexp removes the leading regexp in re.
// It returns the replacement for re.
// If reuse is true, it passes the removed regexp (if no longer needed) to p.reuse.
func (p *parser) removeLeadingRegexp(re *Regexp, reuse bool) *Regexp {
if re.Op == OpConcat && len(re.Sub) > 0 {
if reuse {
p.reuse(re.Sub[0])
}
re.Sub = re.Sub[:copy(re.Sub, re.Sub[1:])]
switch len(re.Sub) {
case 0:
re.Op = OpEmptyMatch
re.Sub = nil
case 1:
old := re
re = re.Sub[0]
p.reuse(old)
}
return re
}
if reuse {
p.reuse(re)
}
return p.newRegexp(OpEmptyMatch)
}
func literalRegexp(s string, flags Flags) *Regexp {
re := &Regexp{Op: OpLiteral}
re.Flags = flags
re.Rune = re.Rune0[:0] // use local storage for small strings
for _, c := range s {
if len(re.Rune) >= cap(re.Rune) {
// string is too long to fit in Rune0. let Go handle it
re.Rune = []rune(s)
break
}
re.Rune = append(re.Rune, c)
}
return re
}
// Parsing.
// Parse parses a regular expression string s, controlled by the specified
// Flags, and returns a regular expression parse tree. The syntax is
// described in the top-level comment.
func Parse(s string, flags Flags) (*Regexp, error) {
return parse(s, flags)
}
func parse(s string, flags Flags) (_ *Regexp, err error) {
defer func() {
switch r := recover(); r {
default:
panic(r)
case nil:
// ok
case ErrLarge: // too big
err = &Error{Code: ErrLarge, Expr: s}
case ErrNestingDepth:
err = &Error{Code: ErrNestingDepth, Expr: s}
}
}()
if flags&Literal != 0 {
// Trivial parser for literal string.
if err := checkUTF8(s); err != nil {
return nil, err
}
return literalRegexp(s, flags), nil
}
// Otherwise, must do real work.
var (
p parser
c rune
op Op
lastRepeat string
)
p.flags = flags
p.wholeRegexp = s
t := s
for t != "" {
repeat := ""
BigSwitch:
switch t[0] {
default:
if c, t, err = nextRune(t); err != nil {
return nil, err
}
p.literal(c)
case '(':
if p.flags&PerlX != 0 && len(t) >= 2 && t[1] == '?' {
// Flag changes and non-capturing groups.
if t, err = p.parsePerlFlags(t); err != nil {
return nil, err
}
break
}
p.numCap++
p.op(opLeftParen).Cap = p.numCap
t = t[1:]
case '|':
p.parseVerticalBar()
t = t[1:]
case ')':
if err = p.parseRightParen(); err != nil {
return nil, err
}
t = t[1:]
case '^':
if p.flags&OneLine != 0 {
p.op(OpBeginText)
} else {
p.op(OpBeginLine)
}
t = t[1:]
case '$':
if p.flags&OneLine != 0 {
p.op(OpEndText).Flags |= WasDollar
} else {
p.op(OpEndLine)
}
t = t[1:]
case '.':
if p.flags&DotNL != 0 {
p.op(OpAnyChar)
} else {
p.op(OpAnyCharNotNL)
}
t = t[1:]
case '[':
if t, err = p.parseClass(t); err != nil {
return nil, err
}
case '*', '+', '?':
before := t
switch t[0] {
case '*':
op = OpStar
case '+':
op = OpPlus
case '?':
op = OpQuest
}
after := t[1:]
if after, err = p.repeat(op, 0, 0, before, after, lastRepeat); err != nil {
return nil, err
}
repeat = before
t = after
case '{':
op = OpRepeat
before := t
min, max, after, ok := p.parseRepeat(t)
if !ok {
// If the repeat cannot be parsed, { is a literal.
p.literal('{')
t = t[1:]
break
}
if min < 0 || min > 1000 || max > 1000 || max >= 0 && min > max {
// Numbers were too big, or max is present and min > max.
return nil, &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]}
}
if after, err = p.repeat(op, min, max, before, after, lastRepeat); err != nil {
return nil, err
}
repeat = before
t = after
case '\\':
if p.flags&PerlX != 0 && len(t) >= 2 {
switch t[1] {
case 'A':
p.op(OpBeginText)
t = t[2:]
break BigSwitch
case 'b':
p.op(OpWordBoundary)
t = t[2:]
break BigSwitch
case 'B':
p.op(OpNoWordBoundary)
t = t[2:]
break BigSwitch
case 'C':
// any byte; not supported
return nil, &Error{ErrInvalidEscape, t[:2]}
case 'Q':
// \Q ... \E: the ... is always literals
var lit string
lit, t, _ = strings.Cut(t[2:], `\E`)
for lit != "" {
c, rest, err := nextRune(lit)
if err != nil {
return nil, err
}
p.literal(c)
lit = rest
}
break BigSwitch
case 'z':
p.op(OpEndText)
t = t[2:]
break BigSwitch
}
}
re := p.newRegexp(OpCharClass)
re.Flags = p.flags
// Look for Unicode character group like \p{Han}
if len(t) >= 2 && (t[1] == 'p' || t[1] == 'P') {
r, rest, err := p.parseUnicodeClass(t, re.Rune0[:0])
if err != nil {
return nil, err
}
if r != nil {
re.Rune = r
t = rest
p.push(re)
break BigSwitch
}
}
// Perl character class escape.
if r, rest := p.parsePerlClassEscape(t, re.Rune0[:0]); r != nil {
re.Rune = r
t = rest
p.push(re)
break BigSwitch
}
p.reuse(re)
// Ordinary single-character escape.
if c, t, err = p.parseEscape(t); err != nil {
return nil, err
}
p.literal(c)
}
lastRepeat = repeat
}
p.concat()
if p.swapVerticalBar() {
// pop vertical bar
p.stack = p.stack[:len(p.stack)-1]
}
p.alternate()
n := len(p.stack)
if n != 1 {
return nil, &Error{ErrMissingParen, s}
}
return p.stack[0], nil
}
// parseRepeat parses {min} (max=min) or {min,} (max=-1) or {min,max}.
// If s is not of that form, it returns ok == false.
// If s has the right form but the values are too big, it returns min == -1, ok == true.
func (p *parser) parseRepeat(s string) (min, max int, rest string, ok bool) {
if s == "" || s[0] != '{' {
return
}
s = s[1:]
var ok1 bool
if min, s, ok1 = p.parseInt(s); !ok1 {
return
}
if s == "" {
return
}
if s[0] != ',' {
max = min
} else {
s = s[1:]
if s == "" {
return
}
if s[0] == '}' {
max = -1
} else if max, s, ok1 = p.parseInt(s); !ok1 {
return
} else if max < 0 {
// parseInt found too big a number
min = -1
}
}
if s == "" || s[0] != '}' {
return
}
rest = s[1:]
ok = true
return
}
// parsePerlFlags parses a Perl flag setting or non-capturing group or both,
// like (?i) or (?: or (?i:. It removes the prefix from s and updates the parse state.
// The caller must have ensured that s begins with "(?".
func (p *parser) parsePerlFlags(s string) (rest string, err error) {
t := s
// Check for named captures, first introduced in Python's regexp library.
// As usual, there are three slightly different syntaxes:
//
// (?P<name>expr) the original, introduced by Python
// (?<name>expr) the .NET alteration, adopted by Perl 5.10
// (?'name'expr) another .NET alteration, adopted by Perl 5.10
//
// Perl 5.10 gave in and implemented the Python version too,
// but they claim that the last two are the preferred forms.
// PCRE and languages based on it (specifically, PHP and Ruby)
// support all three as well. EcmaScript 4 uses only the Python form.
//
// In both the open source world (via Code Search) and the
// Google source tree, (?P<expr>name) and (?<expr>name) are the
// dominant forms of named captures and both are supported.
startsWithP := len(t) > 4 && t[2] == 'P' && t[3] == '<'
startsWithName := len(t) > 3 && t[2] == '<'
if startsWithP || startsWithName {
// position of expr start
exprStartPos := 4
if startsWithName {
exprStartPos = 3
}
// Pull out name.
end := strings.IndexRune(t, '>')
if end < 0 {
if err = checkUTF8(t); err != nil {
return "", err
}
return "", &Error{ErrInvalidNamedCapture, s}
}
capture := t[:end+1] // "(?P<name>" or "(?<name>"
name := t[exprStartPos:end] // "name"
if err = checkUTF8(name); err != nil {
return "", err
}
if !isValidCaptureName(name) {
return "", &Error{ErrInvalidNamedCapture, capture}
}
// Like ordinary capture, but named.
p.numCap++
re := p.op(opLeftParen)
re.Cap = p.numCap
re.Name = name
return t[end+1:], nil
}
// Non-capturing group. Might also twiddle Perl flags.
var c rune
t = t[2:] // skip (?
flags := p.flags
sign := +1
sawFlag := false
Loop:
for t != "" {
if c, t, err = nextRune(t); err != nil {
return "", err
}
switch c {
default:
break Loop
// Flags.
case 'i':
flags |= FoldCase
sawFlag = true
case 'm':
flags &^= OneLine
sawFlag = true
case 's':
flags |= DotNL
sawFlag = true
case 'U':
flags |= NonGreedy
sawFlag = true
// Switch to negation.
case '-':
if sign < 0 {
break Loop
}
sign = -1
// Invert flags so that | above turn into &^ and vice versa.
// We'll invert flags again before using it below.
flags = ^flags
sawFlag = false
// End of flags, starting group or not.
case ':', ')':
if sign < 0 {
if !sawFlag {
break Loop
}
flags = ^flags
}
if c == ':' {
// Open new group
p.op(opLeftParen)
}
p.flags = flags
return t, nil
}
}
return "", &Error{ErrInvalidPerlOp, s[:len(s)-len(t)]}
}
// isValidCaptureName reports whether name
// is a valid capture name: [A-Za-z0-9_]+.
// PCRE limits names to 32 bytes.
// Python rejects names starting with digits.
// We don't enforce either of those.
func isValidCaptureName(name string) bool {
if name == "" {
return false
}
for _, c := range name {
if c != '_' && !isalnum(c) {
return false
}
}
return true
}
// parseInt parses a decimal integer.
func (p *parser) parseInt(s string) (n int, rest string, ok bool) {
if s == "" || s[0] < '0' || '9' < s[0] {
return
}
// Disallow leading zeros.
if len(s) >= 2 && s[0] == '0' && '0' <= s[1] && s[1] <= '9' {
return
}
t := s
for s != "" && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
}
rest = s
ok = true
// Have digits, compute value.
t = t[:len(t)-len(s)]
for i := 0; i < len(t); i++ {
// Avoid overflow.
if n >= 1e8 {
n = -1
break
}
n = n*10 + int(t[i]) - '0'
}
return
}
// can this be represented as a character class?
// single-rune literal string, char class, ., and .|\n.
func isCharClass(re *Regexp) bool {
return re.Op == OpLiteral && len(re.Rune) == 1 ||
re.Op == OpCharClass ||
re.Op == OpAnyCharNotNL ||
re.Op == OpAnyChar
}
// does re match r?
func matchRune(re *Regexp, r rune) bool {
switch re.Op {
case OpLiteral:
return len(re.Rune) == 1 && re.Rune[0] == r
case OpCharClass:
for i := 0; i < len(re.Rune); i += 2 {
if re.Rune[i] <= r && r <= re.Rune[i+1] {
return true
}
}
return false
case OpAnyCharNotNL:
return r != '\n'
case OpAnyChar:
return true
}
return false
}
// parseVerticalBar handles a | in the input.
func (p *parser) parseVerticalBar() {
p.concat()
// The concatenation we just parsed is on top of the stack.
// If it sits above an opVerticalBar, swap it below
// (things below an opVerticalBar become an alternation).
// Otherwise, push a new vertical bar.
if !p.swapVerticalBar() {
p.op(opVerticalBar)
}
}
// mergeCharClass makes dst = dst|src.
// The caller must ensure that dst.Op >= src.Op,
// to reduce the amount of copying.
func mergeCharClass(dst, src *Regexp) {
switch dst.Op {
case OpAnyChar:
// src doesn't add anything.
case OpAnyCharNotNL:
// src might add \n
if matchRune(src, '\n') {
dst.Op = OpAnyChar
}
case OpCharClass:
// src is simpler, so either literal or char class
if src.Op == OpLiteral {
dst.Rune = appendLiteral(dst.Rune, src.Rune[0], src.Flags)
} else {
dst.Rune = appendClass(dst.Rune, src.Rune)
}
case OpLiteral:
// both literal
if src.Rune[0] == dst.Rune[0] && src.Flags == dst.Flags {
break
}
dst.Op = OpCharClass
dst.Rune = appendLiteral(dst.Rune[:0], dst.Rune[0], dst.Flags)
dst.Rune = appendLiteral(dst.Rune, src.Rune[0], src.Flags)
}
}
// If the top of the stack is an element followed by an opVerticalBar
// swapVerticalBar swaps the two and returns true.
// Otherwise it returns false.
func (p *parser) swapVerticalBar() bool {
// If above and below vertical bar are literal or char class,
// can merge into a single char class.
n := len(p.stack)
if n >= 3 && p.stack[n-2].Op == opVerticalBar && isCharClass(p.stack[n-1]) && isCharClass(p.stack[n-3]) {
re1 := p.stack[n-1]
re3 := p.stack[n-3]
// Make re3 the more complex of the two.
if re1.Op > re3.Op {
re1, re3 = re3, re1
p.stack[n-3] = re3
}
mergeCharClass(re3, re1)
p.reuse(re1)
p.stack = p.stack[:n-1]
return true
}
if n >= 2 {
re1 := p.stack[n-1]
re2 := p.stack[n-2]
if re2.Op == opVerticalBar {
if n >= 3 {
// Now out of reach.
// Clean opportunistically.
cleanAlt(p.stack[n-3])
}
p.stack[n-2] = re1
p.stack[n-1] = re2
return true
}
}
return false
}
// parseRightParen handles a ) in the input.
func (p *parser) parseRightParen() error {
p.concat()
if p.swapVerticalBar() {
// pop vertical bar
p.stack = p.stack[:len(p.stack)-1]
}
p.alternate()
n := len(p.stack)
if n < 2 {
return &Error{ErrUnexpectedParen, p.wholeRegexp}
}
re1 := p.stack[n-1]
re2 := p.stack[n-2]
p.stack = p.stack[:n-2]
if re2.Op != opLeftParen {
return &Error{ErrUnexpectedParen, p.wholeRegexp}
}
// Restore flags at time of paren.
p.flags = re2.Flags
if re2.Cap == 0 {
// Just for grouping.
p.push(re1)
} else {
re2.Op = OpCapture
re2.Sub = re2.Sub0[:1]
re2.Sub[0] = re1
p.push(re2)
}
return nil
}
// parseEscape parses an escape sequence at the beginning of s
// and returns the rune.
func (p *parser) parseEscape(s string) (r rune, rest string, err error) {
t := s[1:]
if t == "" {
return 0, "", &Error{ErrTrailingBackslash, ""}
}
c, t, err := nextRune(t)
if err != nil {
return 0, "", err
}
Switch:
switch c {
default:
if c < utf8.RuneSelf && !isalnum(c) {
// Escaped non-word characters are always themselves.
// PCRE is not quite so rigorous: it accepts things like
// \q, but we don't. We once rejected \_, but too many
// programs and people insist on using it, so allow \_.
return c, t, nil
}
// Octal escapes.
case '1', '2', '3', '4', '5', '6', '7':
// Single non-zero digit is a backreference; not supported
if t == "" || t[0] < '0' || t[0] > '7' {
break
}
fallthrough
case '0':
// Consume up to three octal digits; already have one.
r = c - '0'
for i := 1; i < 3; i++ {
if t == "" || t[0] < '0' || t[0] > '7' {
break
}
r = r*8 + rune(t[0]) - '0'
t = t[1:]
}
return r, t, nil
// Hexadecimal escapes.
case 'x':
if t == "" {
break
}
if c, t, err = nextRune(t); err != nil {
return 0, "", err
}
if c == '{' {
// Any number of digits in braces.
// Perl accepts any text at all; it ignores all text
// after the first non-hex digit. We require only hex digits,
// and at least one.
nhex := 0
r = 0
for {
if t == "" {
break Switch
}
if c, t, err = nextRune(t); err != nil {
return 0, "", err
}
if c == '}' {
break
}
v := unhex(c)
if v < 0 {
break Switch
}
r = r*16 + v
if r > unicode.MaxRune {
break Switch
}
nhex++
}
if nhex == 0 {
break Switch
}
return r, t, nil
}
// Easy case: two hex digits.
x := unhex(c)
if c, t, err = nextRune(t); err != nil {
return 0, "", err
}
y := unhex(c)
if x < 0 || y < 0 {
break
}
return x*16 + y, t, nil
// C escapes. There is no case 'b', to avoid misparsing
// the Perl word-boundary \b as the C backspace \b
// when in POSIX mode. In Perl, /\b/ means word-boundary
// but /[\b]/ means backspace. We don't support that.
// If you want a backspace, embed a literal backspace
// character or use \x08.
case 'a':
return '\a', t, err
case 'f':
return '\f', t, err
case 'n':
return '\n', t, err
case 'r':
return '\r', t, err
case 't':
return '\t', t, err
case 'v':
return '\v', t, err
}
return 0, "", &Error{ErrInvalidEscape, s[:len(s)-len(t)]}
}
// parseClassChar parses a character class character at the beginning of s
// and returns it.
func (p *parser) parseClassChar(s, wholeClass string) (r rune, rest string, err error) {
if s == "" {
return 0, "", &Error{Code: ErrMissingBracket, Expr: wholeClass}
}
// Allow regular escape sequences even though
// many need not be escaped in this context.
if s[0] == '\\' {
return p.parseEscape(s)
}
return nextRune(s)
}
type charGroup struct {
sign int
class []rune
}
//go:generate perl make_perl_groups.pl perl_groups.go
// parsePerlClassEscape parses a leading Perl character class escape like \d
// from the beginning of s. If one is present, it appends the characters to r
// and returns the new slice r and the remainder of the string.
func (p *parser) parsePerlClassEscape(s string, r []rune) (out []rune, rest string) {
if p.flags&PerlX == 0 || len(s) < 2 || s[0] != '\\' {
return
}
g := perlGroup[s[0:2]]
if g.sign == 0 {
return
}
return p.appendGroup(r, g), s[2:]
}
// parseNamedClass parses a leading POSIX named character class like [:alnum:]
// from the beginning of s. If one is present, it appends the characters to r
// and returns the new slice r and the remainder of the string.
func (p *parser) parseNamedClass(s string, r []rune) (out []rune, rest string, err error) {
if len(s) < 2 || s[0] != '[' || s[1] != ':' {
return
}
i := strings.Index(s[2:], ":]")
if i < 0 {
return
}
i += 2
name, s := s[0:i+2], s[i+2:]
g := posixGroup[name]
if g.sign == 0 {
return nil, "", &Error{ErrInvalidCharRange, name}
}
return p.appendGroup(r, g), s, nil
}
func (p *parser) appendGroup(r []rune, g charGroup) []rune {
if p.flags&FoldCase == 0 {
if g.sign < 0 {
r = appendNegatedClass(r, g.class)
} else {
r = appendClass(r, g.class)
}
} else {
tmp := p.tmpClass[:0]
tmp = appendFoldedClass(tmp, g.class)
p.tmpClass = tmp
tmp = cleanClass(&p.tmpClass)
if g.sign < 0 {
r = appendNegatedClass(r, tmp)
} else {
r = appendClass(r, tmp)
}
}
return r
}
var anyTable = &unicode.RangeTable{
R16: []unicode.Range16{{Lo: 0, Hi: 1<<16 - 1, Stride: 1}},
R32: []unicode.Range32{{Lo: 1 << 16, Hi: unicode.MaxRune, Stride: 1}},
}
var asciiTable = &unicode.RangeTable{
R16: []unicode.Range16{{Lo: 0, Hi: 0x7F, Stride: 1}},
}
var asciiFoldTable = &unicode.RangeTable{
R16: []unicode.Range16{
{Lo: 0, Hi: 0x7F, Stride: 1},
{Lo: 0x017F, Hi: 0x017F, Stride: 1}, // Old English long s (ſ), folds to S/s.
{Lo: 0x212A, Hi: 0x212A, Stride: 1}, // Kelvin K, folds to K/k.
},
}
// categoryAliases is a lazily constructed copy of unicode.CategoryAliases
// but with the keys passed through canonicalName, to support inexact matches.
var categoryAliases struct {
once sync.Once
m map[string]string
}
// initCategoryAliases initializes categoryAliases by canonicalizing unicode.CategoryAliases.
func initCategoryAliases() {
categoryAliases.m = make(map[string]string)
for name, actual := range unicode.CategoryAliases {
categoryAliases.m[canonicalName(name)] = actual
}
}
// canonicalName returns the canonical lookup string for name.
// The canonical name has a leading uppercase letter and then lowercase letters,
// and it omits all underscores, spaces, and hyphens.
// (We could have used all lowercase, but this way most package unicode
// map keys are already canonical.)
func canonicalName(name string) string {
var b []byte
first := true
for i := range len(name) {
c := name[i]
switch {
case c == '_' || c == '-' || c == ' ':
c = ' '
case first:
if 'a' <= c && c <= 'z' {
c -= 'a' - 'A'
}
first = false
default:
if 'A' <= c && c <= 'Z' {
c += 'a' - 'A'
}
}
if b == nil {
if c == name[i] && c != ' ' {
// No changes so far, avoid allocating b.
continue
}
b = make([]byte, i, len(name))
copy(b, name[:i])
}
if c == ' ' {
continue
}
b = append(b, c)
}
if b == nil {
return name
}
return string(b)
}
// unicodeTable returns the unicode.RangeTable identified by name
// and the table of additional fold-equivalent code points.
// If sign < 0, the result should be inverted.
func unicodeTable(name string) (tab, fold *unicode.RangeTable, sign int) {
name = canonicalName(name)
// Special cases: Any, Assigned, and ASCII.
// Also LC is the only non-canonical Categories key, so handle it here.
switch name {
case "Any":
return anyTable, anyTable, +1
case "Assigned":
return unicode.Cn, unicode.Cn, -1 // invert Cn (unassigned)
case "Ascii":
return asciiTable, asciiFoldTable, +1
case "Lc":
return unicode.Categories["LC"], unicode.FoldCategory["LC"], +1
}
if t := unicode.Categories[name]; t != nil {
return t, unicode.FoldCategory[name], +1
}
if t := unicode.Scripts[name]; t != nil {
return t, unicode.FoldScript[name], +1
}
// unicode.CategoryAliases makes liberal use of underscores in its names
// (they are defined that way by Unicode), but we want to match ignoring
// the underscores, so make our own map with canonical names.
categoryAliases.once.Do(initCategoryAliases)
if actual := categoryAliases.m[name]; actual != "" {
t := unicode.Categories[actual]
return t, unicode.FoldCategory[actual], +1
}
return nil, nil, 0
}
// parseUnicodeClass parses a leading Unicode character class like \p{Han}
// from the beginning of s. If one is present, it appends the characters to r
// and returns the new slice r and the remainder of the string.
func (p *parser) parseUnicodeClass(s string, r []rune) (out []rune, rest string, err error) {
if p.flags&UnicodeGroups == 0 || len(s) < 2 || s[0] != '\\' || s[1] != 'p' && s[1] != 'P' {
return
}
// Committed to parse or return error.
sign := +1
if s[1] == 'P' {
sign = -1
}
t := s[2:]
c, t, err := nextRune(t)
if err != nil {
return
}
var seq, name string
if c != '{' {
// Single-letter name.
seq = s[:len(s)-len(t)]
name = seq[2:]
} else {
// Name is in braces.
end := strings.IndexRune(s, '}')
if end < 0 {
if err = checkUTF8(s); err != nil {
return
}
return nil, "", &Error{ErrInvalidCharRange, s}
}
seq, t = s[:end+1], s[end+1:]
name = s[3:end]
if err = checkUTF8(name); err != nil {
return
}
}
// Group can have leading negation too. \p{^Han} == \P{Han}, \P{^Han} == \p{Han}.
if name != "" && name[0] == '^' {
sign = -sign
name = name[1:]
}
tab, fold, tsign := unicodeTable(name)
if tab == nil {
return nil, "", &Error{ErrInvalidCharRange, seq}
}
if tsign < 0 {
sign = -sign
}
if p.flags&FoldCase == 0 || fold == nil {
if sign > 0 {
r = appendTable(r, tab)
} else {
r = appendNegatedTable(r, tab)
}
} else {
// Merge and clean tab and fold in a temporary buffer.
// This is necessary for the negative case and just tidy
// for the positive case.
tmp := p.tmpClass[:0]
tmp = appendTable(tmp, tab)
tmp = appendTable(tmp, fold)
p.tmpClass = tmp
tmp = cleanClass(&p.tmpClass)
if sign > 0 {
r = appendClass(r, tmp)
} else {
r = appendNegatedClass(r, tmp)
}
}
return r, t, nil
}
// parseClass parses a character class at the beginning of s
// and pushes it onto the parse stack.
func (p *parser) parseClass(s string) (rest string, err error) {
t := s[1:] // chop [
re := p.newRegexp(OpCharClass)
re.Flags = p.flags
re.Rune = re.Rune0[:0]
sign := +1
if t != "" && t[0] == '^' {
sign = -1
t = t[1:]
// If character class does not match \n, add it here,
// so that negation later will do the right thing.
if p.flags&ClassNL == 0 {
re.Rune = append(re.Rune, '\n', '\n')
}
}
class := re.Rune
first := true // ] and - are okay as first char in class
for t == "" || t[0] != ']' || first {
// POSIX: - is only okay unescaped as first or last in class.
// Perl: - is okay anywhere.
if t != "" && t[0] == '-' && p.flags&PerlX == 0 && !first && (len(t) == 1 || t[1] != ']') {
_, size := utf8.DecodeRuneInString(t[1:])
return "", &Error{Code: ErrInvalidCharRange, Expr: t[:1+size]}
}
first = false
// Look for POSIX [:alnum:] etc.
if len(t) > 2 && t[0] == '[' && t[1] == ':' {
nclass, nt, err := p.parseNamedClass(t, class)
if err != nil {
return "", err
}
if nclass != nil {
class, t = nclass, nt
continue
}
}
// Look for Unicode character group like \p{Han}.
nclass, nt, err := p.parseUnicodeClass(t, class)
if err != nil {
return "", err
}
if nclass != nil {
class, t = nclass, nt
continue
}
// Look for Perl character class symbols (extension).
if nclass, nt := p.parsePerlClassEscape(t, class); nclass != nil {
class, t = nclass, nt
continue
}
// Single character or simple range.
rng := t
var lo, hi rune
if lo, t, err = p.parseClassChar(t, s); err != nil {
return "", err
}
hi = lo
// [a-] means (a|-) so check for final ].
if len(t) >= 2 && t[0] == '-' && t[1] != ']' {
t = t[1:]
if hi, t, err = p.parseClassChar(t, s); err != nil {
return "", err
}
if hi < lo {
rng = rng[:len(rng)-len(t)]
return "", &Error{Code: ErrInvalidCharRange, Expr: rng}
}
}
if p.flags&FoldCase == 0 {
class = appendRange(class, lo, hi)
} else {
class = appendFoldedRange(class, lo, hi)
}
}
t = t[1:] // chop ]
// Use &re.Rune instead of &class to avoid allocation.
re.Rune = class
class = cleanClass(&re.Rune)
if sign < 0 {
class = negateClass(class)
}
re.Rune = class
p.push(re)
return t, nil
}
// cleanClass sorts the ranges (pairs of elements of r),
// merges them, and eliminates duplicates.
func cleanClass(rp *[]rune) []rune {
// Sort by lo increasing, hi decreasing to break ties.
sort.Sort(ranges{rp})
r := *rp
if len(r) < 2 {
return r
}
// Merge abutting, overlapping.
w := 2 // write index
for i := 2; i < len(r); i += 2 {
lo, hi := r[i], r[i+1]
if lo <= r[w-1]+1 {
// merge with previous range
if hi > r[w-1] {
r[w-1] = hi
}
continue
}
// new disjoint range
r[w] = lo
r[w+1] = hi
w += 2
}
return r[:w]
}
// inCharClass reports whether r is in the class.
// It assumes the class has been cleaned by cleanClass.
func inCharClass(r rune, class []rune) bool {
_, ok := sort.Find(len(class)/2, func(i int) int {
lo, hi := class[2*i], class[2*i+1]
if r > hi {
return +1
}
if r < lo {
return -1
}
return 0
})
return ok
}
// appendLiteral returns the result of appending the literal x to the class r.
func appendLiteral(r []rune, x rune, flags Flags) []rune {
if flags&FoldCase != 0 {
return appendFoldedRange(r, x, x)
}
return appendRange(r, x, x)
}
// appendRange returns the result of appending the range lo-hi to the class r.
func appendRange(r []rune, lo, hi rune) []rune {
// Expand last range or next to last range if it overlaps or abuts.
// Checking two ranges helps when appending case-folded
// alphabets, so that one range can be expanding A-Z and the
// other expanding a-z.
n := len(r)
for i := 2; i <= 4; i += 2 { // twice, using i=2, i=4
if n >= i {
rlo, rhi := r[n-i], r[n-i+1]
if lo <= rhi+1 && rlo <= hi+1 {
if lo < rlo {
r[n-i] = lo
}
if hi > rhi {
r[n-i+1] = hi
}
return r
}
}
}
return append(r, lo, hi)
}
const (
// minimum and maximum runes involved in folding.
// checked during test.
minFold = 0x0041
maxFold = 0x1e943
)
// appendFoldedRange returns the result of appending the range lo-hi
// and its case folding-equivalent runes to the class r.
func appendFoldedRange(r []rune, lo, hi rune) []rune {
// Optimizations.
if lo <= minFold && hi >= maxFold {
// Range is full: folding can't add more.
return appendRange(r, lo, hi)
}
if hi < minFold || lo > maxFold {
// Range is outside folding possibilities.
return appendRange(r, lo, hi)
}
if lo < minFold {
// [lo, minFold-1] needs no folding.
r = appendRange(r, lo, minFold-1)
lo = minFold
}
if hi > maxFold {
// [maxFold+1, hi] needs no folding.
r = appendRange(r, maxFold+1, hi)
hi = maxFold
}
// Brute force. Depend on appendRange to coalesce ranges on the fly.
for c := lo; c <= hi; c++ {
r = appendRange(r, c, c)
f := unicode.SimpleFold(c)
for f != c {
r = appendRange(r, f, f)
f = unicode.SimpleFold(f)
}
}
return r
}
// appendClass returns the result of appending the class x to the class r.
// It assume x is clean.
func appendClass(r []rune, x []rune) []rune {
for i := 0; i < len(x); i += 2 {
r = appendRange(r, x[i], x[i+1])
}
return r
}
// appendFoldedClass returns the result of appending the case folding of the class x to the class r.
func appendFoldedClass(r []rune, x []rune) []rune {
for i := 0; i < len(x); i += 2 {
r = appendFoldedRange(r, x[i], x[i+1])
}
return r
}
// appendNegatedClass returns the result of appending the negation of the class x to the class r.
// It assumes x is clean.
func appendNegatedClass(r []rune, x []rune) []rune {
nextLo := '\u0000'
for i := 0; i < len(x); i += 2 {
lo, hi := x[i], x[i+1]
if nextLo <= lo-1 {
r = appendRange(r, nextLo, lo-1)
}
nextLo = hi + 1
}
if nextLo <= unicode.MaxRune {
r = appendRange(r, nextLo, unicode.MaxRune)
}
return r
}
// appendTable returns the result of appending x to the class r.
func appendTable(r []rune, x *unicode.RangeTable) []rune {
for _, xr := range x.R16 {
lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride)
if stride == 1 {
r = appendRange(r, lo, hi)
continue
}
for c := lo; c <= hi; c += stride {
r = appendRange(r, c, c)
}
}
for _, xr := range x.R32 {
lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride)
if stride == 1 {
r = appendRange(r, lo, hi)
continue
}
for c := lo; c <= hi; c += stride {
r = appendRange(r, c, c)
}
}
return r
}
// appendNegatedTable returns the result of appending the negation of x to the class r.
func appendNegatedTable(r []rune, x *unicode.RangeTable) []rune {
nextLo := '\u0000' // lo end of next class to add
for _, xr := range x.R16 {
lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride)
if stride == 1 {
if nextLo <= lo-1 {
r = appendRange(r, nextLo, lo-1)
}
nextLo = hi + 1
continue
}
for c := lo; c <= hi; c += stride {
if nextLo <= c-1 {
r = appendRange(r, nextLo, c-1)
}
nextLo = c + 1
}
}
for _, xr := range x.R32 {
lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride)
if stride == 1 {
if nextLo <= lo-1 {
r = appendRange(r, nextLo, lo-1)
}
nextLo = hi + 1
continue
}
for c := lo; c <= hi; c += stride {
if nextLo <= c-1 {
r = appendRange(r, nextLo, c-1)
}
nextLo = c + 1
}
}
if nextLo <= unicode.MaxRune {
r = appendRange(r, nextLo, unicode.MaxRune)
}
return r
}
// negateClass overwrites r and returns r's negation.
// It assumes the class r is already clean.
func negateClass(r []rune) []rune {
nextLo := '\u0000' // lo end of next class to add
w := 0 // write index
for i := 0; i < len(r); i += 2 {
lo, hi := r[i], r[i+1]
if nextLo <= lo-1 {
r[w] = nextLo
r[w+1] = lo - 1
w += 2
}
nextLo = hi + 1
}
r = r[:w]
if nextLo <= unicode.MaxRune {
// It's possible for the negation to have one more
// range - this one - than the original class, so use append.
r = append(r, nextLo, unicode.MaxRune)
}
return r
}
// ranges implements sort.Interface on a []rune.
// The choice of receiver type definition is strange
// but avoids an allocation since we already have
// a *[]rune.
type ranges struct {
p *[]rune
}
func (ra ranges) Less(i, j int) bool {
p := *ra.p
i *= 2
j *= 2
return p[i] < p[j] || p[i] == p[j] && p[i+1] > p[j+1]
}
func (ra ranges) Len() int {
return len(*ra.p) / 2
}
func (ra ranges) Swap(i, j int) {
p := *ra.p
i *= 2
j *= 2
p[i], p[i+1], p[j], p[j+1] = p[j], p[j+1], p[i], p[i+1]
}
func checkUTF8(s string) error {
for s != "" {
rune, size := utf8.DecodeRuneInString(s)
if rune == utf8.RuneError && size == 1 {
return &Error{Code: ErrInvalidUTF8, Expr: s}
}
s = s[size:]
}
return nil
}
func nextRune(s string) (c rune, t string, err error) {
c, size := utf8.DecodeRuneInString(s)
if c == utf8.RuneError && size == 1 {
return 0, "", &Error{Code: ErrInvalidUTF8, Expr: s}
}
return c, s[size:], nil
}
func isalnum(c rune) bool {
return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
}
func unhex(c rune) rune {
if '0' <= c && c <= '9' {
return c - '0'
}
if 'a' <= c && c <= 'f' {
return c - 'a' + 10
}
if 'A' <= c && c <= 'F' {
return c - 'A' + 10
}
return -1
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syntax
import (
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
// Compiled program.
// May not belong in this package, but convenient for now.
// A Prog is a compiled regular expression program.
type Prog struct {
Inst []Inst
Start int // index of start instruction
NumCap int // number of InstCapture insts in re
}
// An InstOp is an instruction opcode.
type InstOp uint8
const (
InstAlt InstOp = iota
InstAltMatch
InstCapture
InstEmptyWidth
InstMatch
InstFail
InstNop
InstRune
InstRune1
InstRuneAny
InstRuneAnyNotNL
)
var instOpNames = []string{
"InstAlt",
"InstAltMatch",
"InstCapture",
"InstEmptyWidth",
"InstMatch",
"InstFail",
"InstNop",
"InstRune",
"InstRune1",
"InstRuneAny",
"InstRuneAnyNotNL",
}
func (i InstOp) String() string {
if uint(i) >= uint(len(instOpNames)) {
return ""
}
return instOpNames[i]
}
// An EmptyOp specifies a kind or mixture of zero-width assertions.
type EmptyOp uint8
const (
EmptyBeginLine EmptyOp = 1 << iota
EmptyEndLine
EmptyBeginText
EmptyEndText
EmptyWordBoundary
EmptyNoWordBoundary
)
// EmptyOpContext returns the zero-width assertions
// satisfied at the position between the runes r1 and r2.
// Passing r1 == -1 indicates that the position is
// at the beginning of the text.
// Passing r2 == -1 indicates that the position is
// at the end of the text.
func EmptyOpContext(r1, r2 rune) EmptyOp {
var op EmptyOp = EmptyNoWordBoundary
var boundary byte
switch {
case IsWordChar(r1):
boundary = 1
case r1 == '\n':
op |= EmptyBeginLine
case r1 < 0:
op |= EmptyBeginText | EmptyBeginLine
}
switch {
case IsWordChar(r2):
boundary ^= 1
case r2 == '\n':
op |= EmptyEndLine
case r2 < 0:
op |= EmptyEndText | EmptyEndLine
}
if boundary != 0 { // IsWordChar(r1) != IsWordChar(r2)
op ^= (EmptyWordBoundary | EmptyNoWordBoundary)
}
return op
}
// IsWordChar reports whether r is considered a “word character”
// during the evaluation of the \b and \B zero-width assertions.
// These assertions are ASCII-only: the word characters are [A-Za-z0-9_].
func IsWordChar(r rune) bool {
// Test for lowercase letters first, as these occur more
// frequently than uppercase letters in common cases.
return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || '0' <= r && r <= '9' || r == '_'
}
// An Inst is a single instruction in a regular expression program.
type Inst struct {
Op InstOp
Out uint32 // all but InstMatch, InstFail
Arg uint32 // InstAlt, InstAltMatch, InstCapture, InstEmptyWidth
Rune []rune
}
func (p *Prog) String() string {
var b strings.Builder
dumpProg(&b, p)
return b.String()
}
// skipNop follows any no-op or capturing instructions.
func (p *Prog) skipNop(pc uint32) *Inst {
i := &p.Inst[pc]
for i.Op == InstNop || i.Op == InstCapture {
i = &p.Inst[i.Out]
}
return i
}
// op returns i.Op but merges all the Rune special cases into InstRune
func (i *Inst) op() InstOp {
op := i.Op
switch op {
case InstRune1, InstRuneAny, InstRuneAnyNotNL:
op = InstRune
}
return op
}
// Prefix returns a literal string that all matches for the
// regexp must start with. Complete is true if the prefix
// is the entire match.
func (p *Prog) Prefix() (prefix string, complete bool) {
i := p.skipNop(uint32(p.Start))
// Avoid allocation of buffer if prefix is empty.
if i.op() != InstRune || len(i.Rune) != 1 {
return "", i.Op == InstMatch
}
// Have prefix; gather characters.
var buf strings.Builder
for i.op() == InstRune && len(i.Rune) == 1 && Flags(i.Arg)&FoldCase == 0 && i.Rune[0] != utf8.RuneError {
buf.WriteRune(i.Rune[0])
i = p.skipNop(i.Out)
}
return buf.String(), i.Op == InstMatch
}
// StartCond returns the leading empty-width conditions that must
// be true in any match. It returns ^EmptyOp(0) if no matches are possible.
func (p *Prog) StartCond() EmptyOp {
var flag EmptyOp
pc := uint32(p.Start)
i := &p.Inst[pc]
Loop:
for {
switch i.Op {
case InstEmptyWidth:
flag |= EmptyOp(i.Arg)
case InstFail:
return ^EmptyOp(0)
case InstCapture, InstNop:
// skip
default:
break Loop
}
pc = i.Out
i = &p.Inst[pc]
}
return flag
}
const noMatch = -1
// MatchRune reports whether the instruction matches (and consumes) r.
// It should only be called when i.Op == [InstRune].
func (i *Inst) MatchRune(r rune) bool {
return i.MatchRunePos(r) != noMatch
}
// MatchRunePos checks whether the instruction matches (and consumes) r.
// If so, MatchRunePos returns the index of the matching rune pair
// (or, when len(i.Rune) == 1, rune singleton).
// If not, MatchRunePos returns -1.
// MatchRunePos should only be called when i.Op == [InstRune].
func (i *Inst) MatchRunePos(r rune) int {
rune := i.Rune
switch len(rune) {
case 0:
return noMatch
case 1:
// Special case: single-rune slice is from literal string, not char class.
r0 := rune[0]
if r == r0 {
return 0
}
if Flags(i.Arg)&FoldCase != 0 {
for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) {
if r == r1 {
return 0
}
}
}
return noMatch
case 2:
if r >= rune[0] && r <= rune[1] {
return 0
}
return noMatch
case 4, 6, 8:
// Linear search for a few pairs.
// Should handle ASCII well.
for j := 0; j < len(rune); j += 2 {
if r < rune[j] {
return noMatch
}
if r <= rune[j+1] {
return j / 2
}
}
return noMatch
}
// Otherwise binary search.
lo := 0
hi := len(rune) / 2
for lo < hi {
m := int(uint(lo+hi) >> 1)
if c := rune[2*m]; c <= r {
if r <= rune[2*m+1] {
return m
}
lo = m + 1
} else {
hi = m
}
}
return noMatch
}
// MatchEmptyWidth reports whether the instruction matches
// an empty string between the runes before and after.
// It should only be called when i.Op == [InstEmptyWidth].
func (i *Inst) MatchEmptyWidth(before rune, after rune) bool {
switch EmptyOp(i.Arg) {
case EmptyBeginLine:
return before == '\n' || before == -1
case EmptyEndLine:
return after == '\n' || after == -1
case EmptyBeginText:
return before == -1
case EmptyEndText:
return after == -1
case EmptyWordBoundary:
return IsWordChar(before) != IsWordChar(after)
case EmptyNoWordBoundary:
return IsWordChar(before) == IsWordChar(after)
}
panic("unknown empty width arg")
}
func (i *Inst) String() string {
var b strings.Builder
dumpInst(&b, i)
return b.String()
}
func bw(b *strings.Builder, args ...string) {
for _, s := range args {
b.WriteString(s)
}
}
func dumpProg(b *strings.Builder, p *Prog) {
for j := range p.Inst {
i := &p.Inst[j]
pc := strconv.Itoa(j)
if len(pc) < 3 {
b.WriteString(" "[len(pc):])
}
if j == p.Start {
pc += "*"
}
bw(b, pc, "\t")
dumpInst(b, i)
bw(b, "\n")
}
}
func u32(i uint32) string {
return strconv.FormatUint(uint64(i), 10)
}
func dumpInst(b *strings.Builder, i *Inst) {
switch i.Op {
case InstAlt:
bw(b, "alt -> ", u32(i.Out), ", ", u32(i.Arg))
case InstAltMatch:
bw(b, "altmatch -> ", u32(i.Out), ", ", u32(i.Arg))
case InstCapture:
bw(b, "cap ", u32(i.Arg), " -> ", u32(i.Out))
case InstEmptyWidth:
bw(b, "empty ", u32(i.Arg), " -> ", u32(i.Out))
case InstMatch:
bw(b, "match")
case InstFail:
bw(b, "fail")
case InstNop:
bw(b, "nop -> ", u32(i.Out))
case InstRune:
if i.Rune == nil {
// shouldn't happen
bw(b, "rune <nil>")
}
bw(b, "rune ", strconv.QuoteToASCII(string(i.Rune)))
if Flags(i.Arg)&FoldCase != 0 {
bw(b, "/i")
}
bw(b, " -> ", u32(i.Out))
case InstRune1:
bw(b, "rune1 ", strconv.QuoteToASCII(string(i.Rune)), " -> ", u32(i.Out))
case InstRuneAny:
bw(b, "any -> ", u32(i.Out))
case InstRuneAnyNotNL:
bw(b, "anynotnl -> ", u32(i.Out))
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syntax
// Note to implementers:
// In this package, re is always a *Regexp and r is always a rune.
import (
"slices"
"strconv"
"strings"
"unicode"
)
// A Regexp is a node in a regular expression syntax tree.
type Regexp struct {
Op Op // operator
Flags Flags
Sub []*Regexp // subexpressions, if any
Sub0 [1]*Regexp // storage for short Sub
Rune []rune // matched runes, for OpLiteral, OpCharClass
Rune0 [2]rune // storage for short Rune
Min, Max int // min, max for OpRepeat
Cap int // capturing index, for OpCapture
Name string // capturing name, for OpCapture
}
//go:generate stringer -type Op -trimprefix Op
// An Op is a single regular expression operator.
type Op uint8
// Operators are listed in precedence order, tightest binding to weakest.
// Character class operators are listed simplest to most complex
// (OpLiteral, OpCharClass, OpAnyCharNotNL, OpAnyChar).
const (
OpNoMatch Op = 1 + iota // matches no strings
OpEmptyMatch // matches empty string
OpLiteral // matches Runes sequence
OpCharClass // matches Runes interpreted as range pair list
OpAnyCharNotNL // matches any character except newline
OpAnyChar // matches any character
OpBeginLine // matches empty string at beginning of line
OpEndLine // matches empty string at end of line
OpBeginText // matches empty string at beginning of text
OpEndText // matches empty string at end of text
OpWordBoundary // matches word boundary `\b`
OpNoWordBoundary // matches word non-boundary `\B`
OpCapture // capturing subexpression with index Cap, optional name Name
OpStar // matches Sub[0] zero or more times
OpPlus // matches Sub[0] one or more times
OpQuest // matches Sub[0] zero or one times
OpRepeat // matches Sub[0] at least Min times, at most Max (Max == -1 is no limit)
OpConcat // matches concatenation of Subs
OpAlternate // matches alternation of Subs
)
const opPseudo Op = 128 // where pseudo-ops start
// Equal reports whether x and y have identical structure.
func (x *Regexp) Equal(y *Regexp) bool {
if x == nil || y == nil {
return x == y
}
if x.Op != y.Op {
return false
}
switch x.Op {
case OpEndText:
// The parse flags remember whether this is \z or \Z.
if x.Flags&WasDollar != y.Flags&WasDollar {
return false
}
case OpLiteral, OpCharClass:
return x.Flags&FoldCase == y.Flags&FoldCase && slices.Equal(x.Rune, y.Rune)
case OpAlternate, OpConcat:
return slices.EqualFunc(x.Sub, y.Sub, (*Regexp).Equal)
case OpStar, OpPlus, OpQuest:
if x.Flags&NonGreedy != y.Flags&NonGreedy || !x.Sub[0].Equal(y.Sub[0]) {
return false
}
case OpRepeat:
if x.Flags&NonGreedy != y.Flags&NonGreedy || x.Min != y.Min || x.Max != y.Max || !x.Sub[0].Equal(y.Sub[0]) {
return false
}
case OpCapture:
if x.Cap != y.Cap || x.Name != y.Name || !x.Sub[0].Equal(y.Sub[0]) {
return false
}
}
return true
}
// printFlags is a bit set indicating which flags (including non-capturing parens) to print around a regexp.
type printFlags uint8
const (
flagI printFlags = 1 << iota // (?i:
flagM // (?m:
flagS // (?s:
flagOff // )
flagPrec // (?: )
negShift = 5 // flagI<<negShift is (?-i:
)
// addSpan enables the flags f around start..last,
// by setting flags[start] = f and flags[last] = flagOff.
func addSpan(start, last *Regexp, f printFlags, flags *map[*Regexp]printFlags) {
if *flags == nil {
*flags = make(map[*Regexp]printFlags)
}
(*flags)[start] = f
(*flags)[last] |= flagOff // maybe start==last
}
// calcFlags calculates the flags to print around each subexpression in re,
// storing that information in (*flags)[sub] for each affected subexpression.
// The first time an entry needs to be written to *flags, calcFlags allocates the map.
// calcFlags also calculates the flags that must be active or can't be active
// around re and returns those flags.
func calcFlags(re *Regexp, flags *map[*Regexp]printFlags) (must, cant printFlags) {
switch re.Op {
default:
return 0, 0
case OpLiteral:
// If literal is fold-sensitive, return (flagI, 0) or (0, flagI)
// according to whether (?i) is active.
// If literal is not fold-sensitive, return 0, 0.
for _, r := range re.Rune {
if minFold <= r && r <= maxFold && unicode.SimpleFold(r) != r {
if re.Flags&FoldCase != 0 {
return flagI, 0
} else {
return 0, flagI
}
}
}
return 0, 0
case OpCharClass:
// If literal is fold-sensitive, return 0, flagI - (?i) has been compiled out.
// If literal is not fold-sensitive, return 0, 0.
for i := 0; i < len(re.Rune); i += 2 {
lo := max(minFold, re.Rune[i])
hi := min(maxFold, re.Rune[i+1])
for r := lo; r <= hi; r++ {
for f := unicode.SimpleFold(r); f != r; f = unicode.SimpleFold(f) {
if !(lo <= f && f <= hi) && !inCharClass(f, re.Rune) {
return 0, flagI
}
}
}
}
return 0, 0
case OpAnyCharNotNL: // (?-s).
return 0, flagS
case OpAnyChar: // (?s).
return flagS, 0
case OpBeginLine, OpEndLine: // (?m)^ (?m)$
return flagM, 0
case OpEndText:
if re.Flags&WasDollar != 0 { // (?-m)$
return 0, flagM
}
return 0, 0
case OpCapture, OpStar, OpPlus, OpQuest, OpRepeat:
return calcFlags(re.Sub[0], flags)
case OpConcat, OpAlternate:
// Gather the must and cant for each subexpression.
// When we find a conflicting subexpression, insert the necessary
// flags around the previously identified span and start over.
var must, cant, allCant printFlags
start := 0
last := 0
did := false
for i, sub := range re.Sub {
subMust, subCant := calcFlags(sub, flags)
if must&subCant != 0 || subMust&cant != 0 {
if must != 0 {
addSpan(re.Sub[start], re.Sub[last], must, flags)
}
must = 0
cant = 0
start = i
did = true
}
must |= subMust
cant |= subCant
allCant |= subCant
if subMust != 0 {
last = i
}
if must == 0 && start == i {
start++
}
}
if !did {
// No conflicts: pass the accumulated must and cant upward.
return must, cant
}
if must != 0 {
// Conflicts found; need to finish final span.
addSpan(re.Sub[start], re.Sub[last], must, flags)
}
return 0, allCant
}
}
// writeRegexp writes the Perl syntax for the regular expression re to b.
func writeRegexp(b *strings.Builder, re *Regexp, f printFlags, flags map[*Regexp]printFlags) {
f |= flags[re]
if f&flagPrec != 0 && f&^(flagOff|flagPrec) != 0 && f&flagOff != 0 {
// flagPrec is redundant with other flags being added and terminated
f &^= flagPrec
}
if f&^(flagOff|flagPrec) != 0 {
b.WriteString(`(?`)
if f&flagI != 0 {
b.WriteString(`i`)
}
if f&flagM != 0 {
b.WriteString(`m`)
}
if f&flagS != 0 {
b.WriteString(`s`)
}
if f&((flagM|flagS)<<negShift) != 0 {
b.WriteString(`-`)
if f&(flagM<<negShift) != 0 {
b.WriteString(`m`)
}
if f&(flagS<<negShift) != 0 {
b.WriteString(`s`)
}
}
b.WriteString(`:`)
}
if f&flagOff != 0 {
defer b.WriteString(`)`)
}
if f&flagPrec != 0 {
b.WriteString(`(?:`)
defer b.WriteString(`)`)
}
switch re.Op {
default:
b.WriteString("<invalid op" + strconv.Itoa(int(re.Op)) + ">")
case OpNoMatch:
b.WriteString(`[^\x00-\x{10FFFF}]`)
case OpEmptyMatch:
b.WriteString(`(?:)`)
case OpLiteral:
for _, r := range re.Rune {
escape(b, r, false)
}
case OpCharClass:
if len(re.Rune)%2 != 0 {
b.WriteString(`[invalid char class]`)
break
}
b.WriteRune('[')
if len(re.Rune) == 0 {
b.WriteString(`^\x00-\x{10FFFF}`)
} else if re.Rune[0] == 0 && re.Rune[len(re.Rune)-1] == unicode.MaxRune && len(re.Rune) > 2 {
// Contains 0 and MaxRune. Probably a negated class.
// Print the gaps.
b.WriteRune('^')
for i := 1; i < len(re.Rune)-1; i += 2 {
lo, hi := re.Rune[i]+1, re.Rune[i+1]-1
escape(b, lo, lo == '-')
if lo != hi {
if hi != lo+1 {
b.WriteRune('-')
}
escape(b, hi, hi == '-')
}
}
} else {
for i := 0; i < len(re.Rune); i += 2 {
lo, hi := re.Rune[i], re.Rune[i+1]
escape(b, lo, lo == '-')
if lo != hi {
if hi != lo+1 {
b.WriteRune('-')
}
escape(b, hi, hi == '-')
}
}
}
b.WriteRune(']')
case OpAnyCharNotNL, OpAnyChar:
b.WriteString(`.`)
case OpBeginLine:
b.WriteString(`^`)
case OpEndLine:
b.WriteString(`$`)
case OpBeginText:
b.WriteString(`\A`)
case OpEndText:
if re.Flags&WasDollar != 0 {
b.WriteString(`$`)
} else {
b.WriteString(`\z`)
}
case OpWordBoundary:
b.WriteString(`\b`)
case OpNoWordBoundary:
b.WriteString(`\B`)
case OpCapture:
if re.Name != "" {
b.WriteString(`(?P<`)
b.WriteString(re.Name)
b.WriteRune('>')
} else {
b.WriteRune('(')
}
if re.Sub[0].Op != OpEmptyMatch {
writeRegexp(b, re.Sub[0], flags[re.Sub[0]], flags)
}
b.WriteRune(')')
case OpStar, OpPlus, OpQuest, OpRepeat:
p := printFlags(0)
sub := re.Sub[0]
if sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 {
p = flagPrec
}
writeRegexp(b, sub, p, flags)
switch re.Op {
case OpStar:
b.WriteRune('*')
case OpPlus:
b.WriteRune('+')
case OpQuest:
b.WriteRune('?')
case OpRepeat:
b.WriteRune('{')
b.WriteString(strconv.Itoa(re.Min))
if re.Max != re.Min {
b.WriteRune(',')
if re.Max >= 0 {
b.WriteString(strconv.Itoa(re.Max))
}
}
b.WriteRune('}')
}
if re.Flags&NonGreedy != 0 {
b.WriteRune('?')
}
case OpConcat:
for _, sub := range re.Sub {
p := printFlags(0)
if sub.Op == OpAlternate {
p = flagPrec
}
writeRegexp(b, sub, p, flags)
}
case OpAlternate:
for i, sub := range re.Sub {
if i > 0 {
b.WriteRune('|')
}
writeRegexp(b, sub, 0, flags)
}
}
}
func (re *Regexp) String() string {
var b strings.Builder
var flags map[*Regexp]printFlags
must, cant := calcFlags(re, &flags)
must |= (cant &^ flagI) << negShift
if must != 0 {
must |= flagOff
}
writeRegexp(&b, re, must, flags)
return b.String()
}
const meta = `\.+*?()|[]{}^$`
func escape(b *strings.Builder, r rune, force bool) {
if unicode.IsPrint(r) {
if strings.ContainsRune(meta, r) || force {
b.WriteRune('\\')
}
b.WriteRune(r)
return
}
switch r {
case '\a':
b.WriteString(`\a`)
case '\f':
b.WriteString(`\f`)
case '\n':
b.WriteString(`\n`)
case '\r':
b.WriteString(`\r`)
case '\t':
b.WriteString(`\t`)
case '\v':
b.WriteString(`\v`)
default:
if r < 0x100 {
b.WriteString(`\x`)
s := strconv.FormatInt(int64(r), 16)
if len(s) == 1 {
b.WriteRune('0')
}
b.WriteString(s)
break
}
b.WriteString(`\x{`)
b.WriteString(strconv.FormatInt(int64(r), 16))
b.WriteString(`}`)
}
}
// MaxCap walks the regexp to find the maximum capture index.
func (re *Regexp) MaxCap() int {
m := 0
if re.Op == OpCapture {
m = re.Cap
}
for _, sub := range re.Sub {
if n := sub.MaxCap(); m < n {
m = n
}
}
return m
}
// CapNames walks the regexp to find the names of capturing groups.
func (re *Regexp) CapNames() []string {
names := make([]string, re.MaxCap()+1)
re.capNames(names)
return names
}
func (re *Regexp) capNames(names []string) {
if re.Op == OpCapture {
names[re.Cap] = re.Name
}
for _, sub := range re.Sub {
sub.capNames(names)
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syntax
// Simplify returns a regexp equivalent to re but without counted repetitions
// and with various other simplifications, such as rewriting /(?:a+)+/ to /a+/.
// The resulting regexp will execute correctly but its string representation
// will not produce the same parse tree, because capturing parentheses
// may have been duplicated or removed. For example, the simplified form
// for /(x){1,2}/ is /(x)(x)?/ but both parentheses capture as $1.
// The returned regexp may share structure with or be the original.
func (re *Regexp) Simplify() *Regexp {
if re == nil {
return nil
}
switch re.Op {
case OpCapture, OpConcat, OpAlternate:
// Simplify children, building new Regexp if children change.
nre := re
for i, sub := range re.Sub {
nsub := sub.Simplify()
if nre == re && nsub != sub {
// Start a copy.
nre = new(Regexp)
*nre = *re
nre.Rune = nil
nre.Sub = append(nre.Sub0[:0], re.Sub[:i]...)
}
if nre != re {
nre.Sub = append(nre.Sub, nsub)
}
}
return nre
case OpStar, OpPlus, OpQuest:
sub := re.Sub[0].Simplify()
return simplify1(re.Op, re.Flags, sub, re)
case OpRepeat:
// Special special case: x{0} matches the empty string
// and doesn't even need to consider x.
if re.Min == 0 && re.Max == 0 {
return &Regexp{Op: OpEmptyMatch}
}
// The fun begins.
sub := re.Sub[0].Simplify()
// x{n,} means at least n matches of x.
if re.Max == -1 {
// Special case: x{0,} is x*.
if re.Min == 0 {
return simplify1(OpStar, re.Flags, sub, nil)
}
// Special case: x{1,} is x+.
if re.Min == 1 {
return simplify1(OpPlus, re.Flags, sub, nil)
}
// General case: x{4,} is xxxx+.
nre := &Regexp{Op: OpConcat}
nre.Sub = nre.Sub0[:0]
for i := 0; i < re.Min-1; i++ {
nre.Sub = append(nre.Sub, sub)
}
nre.Sub = append(nre.Sub, simplify1(OpPlus, re.Flags, sub, nil))
return nre
}
// Special case x{0} handled above.
// Special case: x{1} is just x.
if re.Min == 1 && re.Max == 1 {
return sub
}
// General case: x{n,m} means n copies of x and m copies of x?
// The machine will do less work if we nest the final m copies,
// so that x{2,5} = xx(x(x(x)?)?)?
// Build leading prefix: xx.
var prefix *Regexp
if re.Min > 0 {
prefix = &Regexp{Op: OpConcat}
prefix.Sub = prefix.Sub0[:0]
for i := 0; i < re.Min; i++ {
prefix.Sub = append(prefix.Sub, sub)
}
}
// Build and attach suffix: (x(x(x)?)?)?
if re.Max > re.Min {
suffix := simplify1(OpQuest, re.Flags, sub, nil)
for i := re.Min + 1; i < re.Max; i++ {
nre2 := &Regexp{Op: OpConcat}
nre2.Sub = append(nre2.Sub0[:0], sub, suffix)
suffix = simplify1(OpQuest, re.Flags, nre2, nil)
}
if prefix == nil {
return suffix
}
prefix.Sub = append(prefix.Sub, suffix)
}
if prefix != nil {
return prefix
}
// Some degenerate case like min > max or min < max < 0.
// Handle as impossible match.
return &Regexp{Op: OpNoMatch}
}
return re
}
// simplify1 implements Simplify for the unary OpStar,
// OpPlus, and OpQuest operators. It returns the simple regexp
// equivalent to
//
// Regexp{Op: op, Flags: flags, Sub: {sub}}
//
// under the assumption that sub is already simple, and
// without first allocating that structure. If the regexp
// to be returned turns out to be equivalent to re, simplify1
// returns re instead.
//
// simplify1 is factored out of Simplify because the implementation
// for other operators generates these unary expressions.
// Letting them call simplify1 makes sure the expressions they
// generate are simple.
func simplify1(op Op, flags Flags, sub, re *Regexp) *Regexp {
// Special case: repeat the empty string as much as
// you want, but it's still the empty string.
if sub.Op == OpEmptyMatch {
return sub
}
// The operators are idempotent if the flags match.
if op == sub.Op && flags&NonGreedy == sub.Flags&NonGreedy {
return sub
}
if re != nil && re.Op == op && re.Flags&NonGreedy == flags&NonGreedy && sub == re.Sub[0] {
return re
}
re = &Regexp{Op: op, Flags: flags}
re.Sub = append(re.Sub0[:0], sub)
return re
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/byteorder"
"internal/cpu"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
)
const (
// We use 32-bit hash on Wasm, see hash32.go.
hashSize = (1-goarch.IsWasm)*goarch.PtrSize + goarch.IsWasm*4
c0 = uintptr((8-hashSize)/4*2860486313 + (hashSize-4)/4*33054211828000289)
c1 = uintptr((8-hashSize)/4*3267000013 + (hashSize-4)/4*23344194077549503)
)
func trimHash(h uintptr) uintptr {
if goarch.IsWasm != 0 {
// On Wasm, we use 32-bit hash, despite that uintptr is 64-bit.
// memhash* always returns a uintptr with high 32-bit being 0
// (see hash32.go). We trim the hash in other places where we
// compute the hash manually, e.g. in interhash.
return uintptr(uint32(h))
}
return h
}
func memhash0(p unsafe.Pointer, h uintptr) uintptr {
return h
}
func memhash8(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 1)
}
func memhash16(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 2)
}
func memhash128(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 16)
}
//go:nosplit
func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr {
ptr := sys.GetClosurePtr()
size := *(*uintptr)(unsafe.Pointer(ptr + unsafe.Sizeof(h)))
return memhash(p, h, size)
}
// runtime variable to check if the processor we're running on
// actually supports the instructions used by the AES-based
// hash implementation.
var useAeshash bool
// in asm_*.s
// memhash should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/aacfactory/fns
// - github.com/dgraph-io/ristretto
// - github.com/minio/simdjson-go
// - github.com/nbd-wtf/go-nostr
// - github.com/outcaste-io/ristretto
// - github.com/puzpuzpuz/xsync/v2
// - github.com/puzpuzpuz/xsync/v3
// - github.com/authzed/spicedb
// - github.com/pingcap/badger
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname memhash
func memhash(p unsafe.Pointer, h, s uintptr) uintptr
func memhash32(p unsafe.Pointer, h uintptr) uintptr
func memhash64(p unsafe.Pointer, h uintptr) uintptr
// strhash should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/aristanetworks/goarista
// - github.com/bytedance/sonic
// - github.com/bytedance/go-tagexpr/v2
// - github.com/cloudwego/dynamicgo
// - github.com/v2fly/v2ray-core/v5
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname strhash
func strhash(p unsafe.Pointer, h uintptr) uintptr
func strhashFallback(a unsafe.Pointer, h uintptr) uintptr {
x := (*stringStruct)(a)
return memhashFallback(x.str, h, uintptr(x.len))
}
// NOTE: Because NaN != NaN, a map can contain any
// number of (mostly useless) entries keyed with NaNs.
// To avoid long hash chains, we assign a random number
// as the hash value for a NaN.
func f32hash(p unsafe.Pointer, h uintptr) uintptr {
f := *(*float32)(p)
switch {
case f == 0:
return trimHash(c1 * (c0 ^ h)) // +0, -0
case f != f:
return trimHash(c1 * (c0 ^ h ^ uintptr(rand()))) // any kind of NaN
default:
return memhash(p, h, 4)
}
}
func f64hash(p unsafe.Pointer, h uintptr) uintptr {
f := *(*float64)(p)
switch {
case f == 0:
return trimHash(c1 * (c0 ^ h)) // +0, -0
case f != f:
return trimHash(c1 * (c0 ^ h ^ uintptr(rand()))) // any kind of NaN
default:
return memhash(p, h, 8)
}
}
func c64hash(p unsafe.Pointer, h uintptr) uintptr {
x := (*[2]float32)(p)
return f32hash(unsafe.Pointer(&x[1]), f32hash(unsafe.Pointer(&x[0]), h))
}
func c128hash(p unsafe.Pointer, h uintptr) uintptr {
x := (*[2]float64)(p)
return f64hash(unsafe.Pointer(&x[1]), f64hash(unsafe.Pointer(&x[0]), h))
}
func interhash(p unsafe.Pointer, h uintptr) uintptr {
a := (*iface)(p)
tab := a.tab
if tab == nil {
return h
}
t := tab.Type
if t.Equal == nil {
// Check hashability here. We could do this check inside
// typehash, but we want to report the topmost type in
// the error text (e.g. in a struct with a field of slice type
// we want to report the struct, not the slice).
panic(errorString("hash of unhashable type " + toRType(t).string()))
}
if t.IsDirectIface() {
return trimHash(c1 * typehash(t, unsafe.Pointer(&a.data), h^c0))
} else {
return trimHash(c1 * typehash(t, a.data, h^c0))
}
}
// nilinterhash should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/anacrolix/stm
// - github.com/aristanetworks/goarista
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname nilinterhash
func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
a := (*eface)(p)
t := a._type
if t == nil {
return h
}
if t.Equal == nil {
// See comment in interhash above.
panic(errorString("hash of unhashable type " + toRType(t).string()))
}
if t.IsDirectIface() {
return trimHash(c1 * typehash(t, unsafe.Pointer(&a.data), h^c0))
} else {
return trimHash(c1 * typehash(t, a.data, h^c0))
}
}
// typehash computes the hash of the object of type t at address p.
// h is the seed.
// This function is seldom used. Most maps use for hashing either
// fixed functions (e.g. f32hash) or compiler-generated functions
// (e.g. for a type like struct { x, y string }). This implementation
// is slower but more general and is used for hashing interface types
// (called from interhash or nilinterhash, above) or for hashing in
// maps generated by reflect.MapOf (reflect_typehash, below).
//
// typehash should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/puzpuzpuz/xsync/v2
// - github.com/puzpuzpuz/xsync/v3
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname typehash
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
if t.TFlag&abi.TFlagRegularMemory != 0 {
// Handle ptr sizes specially, see issue 37086.
switch t.Size_ {
case 4:
return memhash32(p, h)
case 8:
return memhash64(p, h)
default:
return memhash(p, h, t.Size_)
}
}
switch t.Kind() {
case abi.Float32:
return f32hash(p, h)
case abi.Float64:
return f64hash(p, h)
case abi.Complex64:
return c64hash(p, h)
case abi.Complex128:
return c128hash(p, h)
case abi.String:
return strhash(p, h)
case abi.Interface:
i := (*interfacetype)(unsafe.Pointer(t))
if len(i.Methods) == 0 {
return nilinterhash(p, h)
}
return interhash(p, h)
case abi.Array:
a := (*arraytype)(unsafe.Pointer(t))
for i := uintptr(0); i < a.Len; i++ {
h = typehash(a.Elem, add(p, i*a.Elem.Size_), h)
}
return h
case abi.Struct:
s := (*structtype)(unsafe.Pointer(t))
for _, f := range s.Fields {
if f.Name.IsBlank() {
continue
}
h = typehash(f.Typ, add(p, f.Offset), h)
}
return h
default:
// Should never happen, as typehash should only be called
// with comparable types.
panic(errorString("hash of unhashable type " + toRType(t).string()))
}
}
//go:linkname reflect_typehash reflect.typehash
func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
return typehash(t, p, h)
}
func memequal0(p, q unsafe.Pointer) bool {
return true
}
func memequal8(p, q unsafe.Pointer) bool {
return *(*int8)(p) == *(*int8)(q)
}
func memequal16(p, q unsafe.Pointer) bool {
return *(*int16)(p) == *(*int16)(q)
}
func memequal32(p, q unsafe.Pointer) bool {
return *(*int32)(p) == *(*int32)(q)
}
func memequal64(p, q unsafe.Pointer) bool {
return *(*int64)(p) == *(*int64)(q)
}
func memequal128(p, q unsafe.Pointer) bool {
return *(*[2]int64)(p) == *(*[2]int64)(q)
}
func f32equal(p, q unsafe.Pointer) bool {
return *(*float32)(p) == *(*float32)(q)
}
func f64equal(p, q unsafe.Pointer) bool {
return *(*float64)(p) == *(*float64)(q)
}
func c64equal(p, q unsafe.Pointer) bool {
return *(*complex64)(p) == *(*complex64)(q)
}
func c128equal(p, q unsafe.Pointer) bool {
return *(*complex128)(p) == *(*complex128)(q)
}
func strequal(p, q unsafe.Pointer) bool {
return *(*string)(p) == *(*string)(q)
}
func interequal(p, q unsafe.Pointer) bool {
x := *(*iface)(p)
y := *(*iface)(q)
return x.tab == y.tab && ifaceeq(x.tab, x.data, y.data)
}
func nilinterequal(p, q unsafe.Pointer) bool {
x := *(*eface)(p)
y := *(*eface)(q)
return x._type == y._type && efaceeq(x._type, x.data, y.data)
}
func efaceeq(t *_type, x, y unsafe.Pointer) bool {
if t == nil {
return true
}
eq := t.Equal
if eq == nil {
panic(errorString("comparing uncomparable type " + toRType(t).string()))
}
if t.IsDirectIface() {
// Direct interface types are ptr, chan, map, func, and single-element structs/arrays thereof.
// Maps and funcs are not comparable, so they can't reach here.
// Ptrs, chans, and single-element items can be compared directly using ==.
return x == y
}
return eq(x, y)
}
func ifaceeq(tab *itab, x, y unsafe.Pointer) bool {
if tab == nil {
return true
}
t := tab.Type
eq := t.Equal
if eq == nil {
panic(errorString("comparing uncomparable type " + toRType(t).string()))
}
if t.IsDirectIface() {
// See comment in efaceeq.
return x == y
}
return eq(x, y)
}
// Testing adapters for hash quality tests (see hash_test.go)
//
// stringHash should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/k14s/starlark-go
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname stringHash
func stringHash(s string, seed uintptr) uintptr {
return strhash(noescape(unsafe.Pointer(&s)), seed)
}
func bytesHash(b []byte, seed uintptr) uintptr {
s := (*slice)(unsafe.Pointer(&b))
return memhash(s.array, seed, uintptr(s.len))
}
func int32Hash(i uint32, seed uintptr) uintptr {
return memhash32(noescape(unsafe.Pointer(&i)), seed)
}
func int64Hash(i uint64, seed uintptr) uintptr {
return memhash64(noescape(unsafe.Pointer(&i)), seed)
}
func efaceHash(i any, seed uintptr) uintptr {
return nilinterhash(noescape(unsafe.Pointer(&i)), seed)
}
func ifaceHash(i interface {
F()
}, seed uintptr) uintptr {
return interhash(noescape(unsafe.Pointer(&i)), seed)
}
const hashRandomBytes = goarch.PtrSize / 4 * 64
// used in asm_{386,amd64,arm64}.s to seed the hash function
var aeskeysched [hashRandomBytes]byte
// used in hash{32,64}.go to seed the hash function
var hashkey [4]uintptr
func alginit() {
// Install AES hash algorithms if the instructions needed are present.
if (GOARCH == "386" || GOARCH == "amd64") &&
cpu.X86.HasAES && // AESENC
cpu.X86.HasSSSE3 && // PSHUFB
cpu.X86.HasSSE41 { // PINSR{D,Q}
initAlgAES()
return
}
if GOARCH == "arm64" && cpu.ARM64.HasAES {
initAlgAES()
return
}
for i := range hashkey {
hashkey[i] = uintptr(bootstrapRand())
}
}
func initAlgAES() {
useAeshash = true
// Initialize with random data so hash collisions will be hard to engineer.
key := (*[hashRandomBytes / 8]uint64)(unsafe.Pointer(&aeskeysched))
for i := range key {
key[i] = bootstrapRand()
}
}
// Note: These routines perform the read with a native endianness.
func readUnaligned32(p unsafe.Pointer) uint32 {
q := (*[4]byte)(p)
if goarch.BigEndian {
return byteorder.BEUint32(q[:])
}
return byteorder.LEUint32(q[:])
}
func readUnaligned64(p unsafe.Pointer) uint64 {
q := (*[8]byte)(p)
if goarch.BigEndian {
return byteorder.BEUint64(q[:])
}
return byteorder.LEUint64(q[:])
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Implementation of (safe) user arenas.
//
// This file contains the implementation of user arenas wherein Go values can
// be manually allocated and freed in bulk. The act of manually freeing memory,
// potentially before a GC cycle, means that a garbage collection cycle can be
// delayed, improving efficiency by reducing GC cycle frequency. There are other
// potential efficiency benefits, such as improved locality and access to a more
// efficient allocation strategy.
//
// What makes the arenas here safe is that once they are freed, accessing the
// arena's memory will cause an explicit program fault, and the arena's address
// space will not be reused until no more pointers into it are found. There's one
// exception to this: if an arena allocated memory that isn't exhausted, it's placed
// back into a pool for reuse. This means that a crash is not always guaranteed.
//
// While this may seem unsafe, it still prevents memory corruption, and is in fact
// necessary in order to make new(T) a valid implementation of arenas. Such a property
// is desirable to allow for a trivial implementation. (It also avoids complexities
// that arise from synchronization with the GC when trying to set the arena chunks to
// fault while the GC is active.)
//
// The implementation works in layers. At the bottom, arenas are managed in chunks.
// Each chunk must be a multiple of the heap arena size, or the heap arena size must
// be divisible by the arena chunks. The address space for each chunk, and each
// corresponding heapArena for that address space, are eternally reserved for use as
// arena chunks. That is, they can never be used for the general heap. Each chunk
// is also represented by a single mspan, and is modeled as a single large heap
// allocation. It must be, because each chunk contains ordinary Go values that may
// point into the heap, so it must be scanned just like any other object. Any
// pointer into a chunk will therefore always cause the whole chunk to be scanned
// while its corresponding arena is still live.
//
// Chunks may be allocated either from new memory mapped by the OS on our behalf,
// or by reusing old freed chunks. When chunks are freed, their underlying memory
// is returned to the OS, set to fault on access, and may not be reused until the
// program doesn't point into the chunk anymore (the code refers to this state as
// "quarantined"), a property checked by the GC.
//
// The sweeper handles moving chunks out of this quarantine state to be ready for
// reuse. When the chunk is placed into the quarantine state, its corresponding
// span is marked as noscan so that the GC doesn't try to scan memory that would
// cause a fault.
//
// At the next layer are the user arenas themselves. They consist of a single
// active chunk which new Go values are bump-allocated into and a list of chunks
// that were exhausted when allocating into the arena. Once the arena is freed,
// it frees all full chunks it references, and places the active one onto a reuse
// list for a future arena to use. Each arena keeps its list of referenced chunks
// explicitly live until it is freed. Each user arena also maps to an object which
// has a finalizer attached that ensures the arena's chunks are all freed even if
// the arena itself is never explicitly freed.
//
// Pointer-ful memory is bump-allocated from low addresses to high addresses in each
// chunk, while pointer-free memory is bump-allocated from high address to low
// addresses. The reason for this is to take advantage of a GC optimization wherein
// the GC will stop scanning an object when there are no more pointers in it, which
// also allows us to elide clearing the heap bitmap for pointer-free Go values
// allocated into arenas.
//
// Note that arenas are not safe to use concurrently.
//
// In summary, there are 2 resources: arenas, and arena chunks. They exist in the
// following lifecycle:
//
// (1) A new arena is created via newArena.
// (2) Chunks are allocated to hold memory allocated into the arena with new or slice.
// (a) Chunks are first allocated from the reuse list of partially-used chunks.
// (b) If there are no such chunks, then chunks on the ready list are taken.
// (c) Failing all the above, memory for a new chunk is mapped.
// (3) The arena is freed, or all references to it are dropped, triggering its finalizer.
// (a) If the GC is not active, exhausted chunks are set to fault and placed on a
// quarantine list.
// (b) If the GC is active, exhausted chunks are placed on a fault list and will
// go through step (a) at a later point in time.
// (c) Any remaining partially-used chunk is placed on a reuse list.
// (4) Once no more pointers are found into quarantined arena chunks, the sweeper
// takes these chunks out of quarantine and places them on the ready list.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
)
// Functions starting with arena_ are meant to be exported to downstream users
// of arenas. They should wrap these functions in a higher-lever API.
//
// The underlying arena and its resources are managed through an opaque unsafe.Pointer.
// arena_newArena is a wrapper around newUserArena.
//
//go:linkname arena_newArena arena.runtime_arena_newArena
func arena_newArena() unsafe.Pointer {
return unsafe.Pointer(newUserArena())
}
// arena_arena_New is a wrapper around (*userArena).new, except that typ
// is an any (must be a *_type, still) and typ must be a type descriptor
// for a pointer to the type to actually be allocated, i.e. pass a *T
// to allocate a T. This is necessary because this function returns a *T.
//
//go:linkname arena_arena_New arena.runtime_arena_arena_New
func arena_arena_New(arena unsafe.Pointer, typ any) any {
t := (*_type)(efaceOf(&typ).data)
if t.Kind() != abi.Pointer {
throw("arena_New: non-pointer type")
}
te := (*ptrtype)(unsafe.Pointer(t)).Elem
x := ((*userArena)(arena)).new(te)
var result any
e := efaceOf(&result)
e._type = t
e.data = x
return result
}
// arena_arena_Slice is a wrapper around (*userArena).slice.
//
//go:linkname arena_arena_Slice arena.runtime_arena_arena_Slice
func arena_arena_Slice(arena unsafe.Pointer, slice any, cap int) {
((*userArena)(arena)).slice(slice, cap)
}
// arena_arena_Free is a wrapper around (*userArena).free.
//
//go:linkname arena_arena_Free arena.runtime_arena_arena_Free
func arena_arena_Free(arena unsafe.Pointer) {
((*userArena)(arena)).free()
}
// arena_heapify takes a value that lives in an arena and makes a copy
// of it on the heap. Values that don't live in an arena are returned unmodified.
//
//go:linkname arena_heapify arena.runtime_arena_heapify
func arena_heapify(s any) any {
var v unsafe.Pointer
e := efaceOf(&s)
t := e._type
switch t.Kind() {
case abi.String:
v = stringStructOf((*string)(e.data)).str
case abi.Slice:
v = (*slice)(e.data).array
case abi.Pointer:
v = e.data
default:
panic("arena: Clone only supports pointers, slices, and strings")
}
span := spanOf(uintptr(v))
if span == nil || !span.isUserArenaChunk {
// Not stored in a user arena chunk.
return s
}
// Heap-allocate storage for a copy.
var x any
switch t.Kind() {
case abi.String:
s1 := s.(string)
s2, b := rawstring(len(s1))
copy(b, s1)
x = s2
case abi.Slice:
len := (*slice)(e.data).len
et := (*slicetype)(unsafe.Pointer(t)).Elem
sl := new(slice)
*sl = slice{makeslicecopy(et, len, len, (*slice)(e.data).array), len, len}
xe := efaceOf(&x)
xe._type = t
xe.data = unsafe.Pointer(sl)
case abi.Pointer:
et := (*ptrtype)(unsafe.Pointer(t)).Elem
e2 := newobject(et)
typedmemmove(et, e2, e.data)
xe := efaceOf(&x)
xe._type = t
xe.data = e2
}
return x
}
const (
// userArenaChunkBytes is the size of a user arena chunk.
userArenaChunkBytesMax = 8 << 20
userArenaChunkBytes = uintptr(int64(userArenaChunkBytesMax-heapArenaBytes)&(int64(userArenaChunkBytesMax-heapArenaBytes)>>63) + heapArenaBytes) // min(userArenaChunkBytesMax, heapArenaBytes)
// userArenaChunkPages is the number of pages a user arena chunk uses.
userArenaChunkPages = userArenaChunkBytes / pageSize
// userArenaChunkMaxAllocBytes is the maximum size of an object that can
// be allocated from an arena. This number is chosen to cap worst-case
// fragmentation of user arenas to 25%. Larger allocations are redirected
// to the heap.
userArenaChunkMaxAllocBytes = userArenaChunkBytes / 4
)
func init() {
if userArenaChunkPages*pageSize != userArenaChunkBytes {
throw("user arena chunk size is not a multiple of the page size")
}
if userArenaChunkBytes%physPageSize != 0 {
throw("user arena chunk size is not a multiple of the physical page size")
}
if userArenaChunkBytes < heapArenaBytes {
if heapArenaBytes%userArenaChunkBytes != 0 {
throw("user arena chunk size is smaller than a heap arena, but doesn't divide it")
}
} else {
if userArenaChunkBytes%heapArenaBytes != 0 {
throw("user arena chunks size is larger than a heap arena, but not a multiple")
}
}
lockInit(&userArenaState.lock, lockRankUserArenaState)
}
// userArenaChunkReserveBytes returns the amount of additional bytes to reserve for
// heap metadata.
func userArenaChunkReserveBytes() uintptr {
// In the allocation headers experiment, we reserve the end of the chunk for
// a pointer/scalar bitmap. We also reserve space for a dummy _type that
// refers to the bitmap. The PtrBytes field of the dummy _type indicates how
// many of those bits are valid.
return userArenaChunkBytes/goarch.PtrSize/8 + unsafe.Sizeof(_type{})
}
type userArena struct {
// fullList is a list of full chunks that have not enough free memory left, and
// that we'll free once this user arena is freed.
//
// Can't use mSpanList here because it's not-in-heap.
fullList *mspan
// active is the user arena chunk we're currently allocating into.
active *mspan
// refs is a set of references to the arena chunks so that they're kept alive.
//
// The last reference in the list always refers to active, while the rest of
// them correspond to fullList. Specifically, the head of fullList is the
// second-to-last one, fullList.next is the third-to-last, and so on.
//
// In other words, every time a new chunk becomes active, its appended to this
// list.
refs []unsafe.Pointer
// defunct is true if free has been called on this arena.
//
// This is just a best-effort way to discover a concurrent allocation
// and free. Also used to detect a double-free.
defunct atomic.Bool
}
// newUserArena creates a new userArena ready to be used.
func newUserArena() *userArena {
a := new(userArena)
SetFinalizer(a, func(a *userArena) {
// If arena handle is dropped without being freed, then call
// free on the arena, so the arena chunks are never reclaimed
// by the garbage collector.
a.free()
})
a.refill()
return a
}
// new allocates a new object of the provided type into the arena, and returns
// its pointer.
//
// This operation is not safe to call concurrently with other operations on the
// same arena.
func (a *userArena) new(typ *_type) unsafe.Pointer {
return a.alloc(typ, -1)
}
// slice allocates a new slice backing store. slice must be a pointer to a slice
// (i.e. *[]T), because userArenaSlice will update the slice directly.
//
// cap determines the capacity of the slice backing store and must be non-negative.
//
// This operation is not safe to call concurrently with other operations on the
// same arena.
func (a *userArena) slice(sl any, cap int) {
if cap < 0 {
panic("userArena.slice: negative cap")
}
i := efaceOf(&sl)
typ := i._type
if typ.Kind() != abi.Pointer {
panic("slice result of non-ptr type")
}
typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
if typ.Kind() != abi.Slice {
panic("slice of non-ptr-to-slice type")
}
typ = (*slicetype)(unsafe.Pointer(typ)).Elem
// t is now the element type of the slice we want to allocate.
*((*slice)(i.data)) = slice{a.alloc(typ, cap), cap, cap}
}
// free returns the userArena's chunks back to mheap and marks it as defunct.
//
// Must be called at most once for any given arena.
//
// This operation is not safe to call concurrently with other operations on the
// same arena.
func (a *userArena) free() {
// Check for a double-free.
if a.defunct.Load() {
panic("arena double free")
}
// Mark ourselves as defunct.
a.defunct.Store(true)
SetFinalizer(a, nil)
// Free all the full arenas.
//
// The refs on this list are in reverse order from the second-to-last.
s := a.fullList
i := len(a.refs) - 2
for s != nil {
a.fullList = s.next
s.next = nil
freeUserArenaChunk(s, a.refs[i])
s = a.fullList
i--
}
if a.fullList != nil || i >= 0 {
// There's still something left on the full list, or we
// failed to actually iterate over the entire refs list.
throw("full list doesn't match refs list in length")
}
// Put the active chunk onto the reuse list.
//
// Note that active's reference is always the last reference in refs.
s = a.active
if s != nil {
if raceenabled || msanenabled || asanenabled {
// Don't reuse arenas with sanitizers enabled. We want to catch
// any use-after-free errors aggressively.
freeUserArenaChunk(s, a.refs[len(a.refs)-1])
} else {
lock(&userArenaState.lock)
userArenaState.reuse = append(userArenaState.reuse, liveUserArenaChunk{s, a.refs[len(a.refs)-1]})
unlock(&userArenaState.lock)
}
}
// nil out a.active so that a race with freeing will more likely cause a crash.
a.active = nil
a.refs = nil
}
// alloc reserves space in the current chunk or calls refill and reserves space
// in a new chunk. If cap is negative, the type will be taken literally, otherwise
// it will be considered as an element type for a slice backing store with capacity
// cap.
func (a *userArena) alloc(typ *_type, cap int) unsafe.Pointer {
s := a.active
var x unsafe.Pointer
for {
x = s.userArenaNextFree(typ, cap)
if x != nil {
break
}
s = a.refill()
}
return x
}
// refill inserts the current arena chunk onto the full list and obtains a new
// one, either from the partial list or allocating a new one, both from mheap.
func (a *userArena) refill() *mspan {
// If there's an active chunk, assume it's full.
s := a.active
if s != nil {
if s.userArenaChunkFree.size() > userArenaChunkMaxAllocBytes {
// It's difficult to tell when we're actually out of memory
// in a chunk because the allocation that failed may still leave
// some free space available. However, that amount of free space
// should never exceed the maximum allocation size.
throw("wasted too much memory in an arena chunk")
}
s.next = a.fullList
a.fullList = s
a.active = nil
s = nil
}
var x unsafe.Pointer
// Check the partially-used list.
lock(&userArenaState.lock)
if len(userArenaState.reuse) > 0 {
// Pick off the last arena chunk from the list.
n := len(userArenaState.reuse) - 1
x = userArenaState.reuse[n].x
s = userArenaState.reuse[n].mspan
userArenaState.reuse[n].x = nil
userArenaState.reuse[n].mspan = nil
userArenaState.reuse = userArenaState.reuse[:n]
}
unlock(&userArenaState.lock)
if s == nil {
// Allocate a new one.
x, s = newUserArenaChunk()
if s == nil {
throw("out of memory")
}
}
a.refs = append(a.refs, x)
a.active = s
return s
}
type liveUserArenaChunk struct {
*mspan // Must represent a user arena chunk.
// Reference to mspan.base() to keep the chunk alive.
x unsafe.Pointer
}
var userArenaState struct {
lock mutex
// reuse contains a list of partially-used and already-live
// user arena chunks that can be quickly reused for another
// arena.
//
// Protected by lock.
reuse []liveUserArenaChunk
// fault contains full user arena chunks that need to be faulted.
//
// Protected by lock.
fault []liveUserArenaChunk
}
// userArenaNextFree reserves space in the user arena for an item of the specified
// type. If cap is not -1, this is for an array of cap elements of type t.
func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer {
size := typ.Size_
if cap > 0 {
if size > ^uintptr(0)/uintptr(cap) {
// Overflow.
throw("out of memory")
}
size *= uintptr(cap)
}
if size == 0 || cap == 0 {
return unsafe.Pointer(&zerobase)
}
if size > userArenaChunkMaxAllocBytes {
// Redirect allocations that don't fit into a chunk well directly
// from the heap.
if cap >= 0 {
return newarray(typ, cap)
}
return newobject(typ)
}
// Prevent preemption as we set up the space for a new object.
//
// Act like we're allocating.
mp := acquirem()
if mp.mallocing != 0 {
throw("malloc deadlock")
}
if mp.gsignal == getg() {
throw("malloc during signal")
}
mp.mallocing = 1
var ptr unsafe.Pointer
if !typ.Pointers() {
// Allocate pointer-less objects from the tail end of the chunk.
v, ok := s.userArenaChunkFree.takeFromBack(size, typ.Align_)
if ok {
ptr = unsafe.Pointer(v)
}
} else {
v, ok := s.userArenaChunkFree.takeFromFront(size, typ.Align_)
if ok {
ptr = unsafe.Pointer(v)
}
}
if ptr == nil {
// Failed to allocate.
mp.mallocing = 0
releasem(mp)
return nil
}
if s.needzero != 0 {
throw("arena chunk needs zeroing, but should already be zeroed")
}
// Set up heap bitmap and do extra accounting.
if typ.Pointers() {
if cap >= 0 {
userArenaHeapBitsSetSliceType(typ, cap, ptr, s)
} else {
userArenaHeapBitsSetType(typ, ptr, s)
}
c := getMCache(mp)
if c == nil {
throw("mallocgc called without a P or outside bootstrapping")
}
if cap > 0 {
c.scanAlloc += size - (typ.Size_ - typ.PtrBytes)
} else {
c.scanAlloc += typ.PtrBytes
}
}
// Ensure that the stores above that initialize x to
// type-safe memory and set the heap bits occur before
// the caller can make ptr observable to the garbage
// collector. Otherwise, on weakly ordered machines,
// the garbage collector could follow a pointer to x,
// but see uninitialized memory or stale heap bits.
publicationBarrier()
mp.mallocing = 0
releasem(mp)
return ptr
}
// userArenaHeapBitsSetSliceType is the equivalent of heapBitsSetType but for
// Go slice backing store values allocated in a user arena chunk. It sets up the
// heap bitmap for n consecutive values with type typ allocated at address ptr.
func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, s *mspan) {
mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
if overflow || n < 0 || mem > maxAlloc {
panic(plainError("runtime: allocation size out of range"))
}
for i := 0; i < n; i++ {
userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), s)
}
}
// userArenaHeapBitsSetType is the equivalent of heapSetType but for
// non-slice-backing-store Go values allocated in a user arena chunk. It
// sets up the type metadata for the value with type typ allocated at address ptr.
// base is the base address of the arena chunk.
func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan) {
base := s.base()
h := s.writeUserArenaHeapBits(uintptr(ptr))
p := getGCMask(typ) // start of 1-bit pointer mask
nb := typ.PtrBytes / goarch.PtrSize
for i := uintptr(0); i < nb; i += ptrBits {
k := nb - i
if k > ptrBits {
k = ptrBits
}
// N.B. On big endian platforms we byte swap the data that we
// read from GCData, which is always stored in little-endian order
// by the compiler. writeUserArenaHeapBits handles data in
// a platform-ordered way for efficiency, but stores back the
// data in little endian order, since we expose the bitmap through
// a dummy type.
h = h.write(s, readUintptr(addb(p, i/8)), k)
}
// Note: we call pad here to ensure we emit explicit 0 bits
// for the pointerless tail of the object. This ensures that
// there's only a single noMorePtrs mark for the next object
// to clear. We don't need to do this to clear stale noMorePtrs
// markers from previous uses because arena chunk pointer bitmaps
// are always fully cleared when reused.
h = h.pad(s, typ.Size_-typ.PtrBytes)
h.flush(s, uintptr(ptr), typ.Size_)
// Update the PtrBytes value in the type information. After this
// point, the GC will observe the new bitmap.
s.largeType.PtrBytes = uintptr(ptr) - base + typ.PtrBytes
// Double-check that the bitmap was written out correctly.
const doubleCheck = false
if doubleCheck {
doubleCheckHeapPointersInterior(uintptr(ptr), uintptr(ptr), typ.Size_, typ.Size_, typ, &s.largeType, s)
}
}
type writeUserArenaHeapBits struct {
offset uintptr // offset in span that the low bit of mask represents the pointer state of.
mask uintptr // some pointer bits starting at the address addr.
valid uintptr // number of bits in buf that are valid (including low)
low uintptr // number of low-order bits to not overwrite
}
func (s *mspan) writeUserArenaHeapBits(addr uintptr) (h writeUserArenaHeapBits) {
offset := addr - s.base()
// We start writing bits maybe in the middle of a heap bitmap word.
// Remember how many bits into the word we started, so we can be sure
// not to overwrite the previous bits.
h.low = offset / goarch.PtrSize % ptrBits
// round down to heap word that starts the bitmap word.
h.offset = offset - h.low*goarch.PtrSize
// We don't have any bits yet.
h.mask = 0
h.valid = h.low
return
}
// write appends the pointerness of the next valid pointer slots
// using the low valid bits of bits. 1=pointer, 0=scalar.
func (h writeUserArenaHeapBits) write(s *mspan, bits, valid uintptr) writeUserArenaHeapBits {
if h.valid+valid <= ptrBits {
// Fast path - just accumulate the bits.
h.mask |= bits << h.valid
h.valid += valid
return h
}
// Too many bits to fit in this word. Write the current word
// out and move on to the next word.
data := h.mask | bits<<h.valid // mask for this word
h.mask = bits >> (ptrBits - h.valid) // leftover for next word
h.valid += valid - ptrBits // have h.valid+valid bits, writing ptrBits of them
// Flush mask to the memory bitmap.
idx := h.offset / (ptrBits * goarch.PtrSize)
m := uintptr(1)<<h.low - 1
bitmap := s.heapBits()
bitmap[idx] = bswapIfBigEndian(bswapIfBigEndian(bitmap[idx])&m | data)
// Note: no synchronization required for this write because
// the allocator has exclusive access to the page, and the bitmap
// entries are all for a single page. Also, visibility of these
// writes is guaranteed by the publication barrier in mallocgc.
// Move to next word of bitmap.
h.offset += ptrBits * goarch.PtrSize
h.low = 0
return h
}
// Add padding of size bytes.
func (h writeUserArenaHeapBits) pad(s *mspan, size uintptr) writeUserArenaHeapBits {
if size == 0 {
return h
}
words := size / goarch.PtrSize
for words > ptrBits {
h = h.write(s, 0, ptrBits)
words -= ptrBits
}
return h.write(s, 0, words)
}
// Flush the bits that have been written, and add zeros as needed
// to cover the full object [addr, addr+size).
func (h writeUserArenaHeapBits) flush(s *mspan, addr, size uintptr) {
offset := addr - s.base()
// zeros counts the number of bits needed to represent the object minus the
// number of bits we've already written. This is the number of 0 bits
// that need to be added.
zeros := (offset+size-h.offset)/goarch.PtrSize - h.valid
// Add zero bits up to the bitmap word boundary
if zeros > 0 {
z := ptrBits - h.valid
if z > zeros {
z = zeros
}
h.valid += z
zeros -= z
}
// Find word in bitmap that we're going to write.
bitmap := s.heapBits()
idx := h.offset / (ptrBits * goarch.PtrSize)
// Write remaining bits.
if h.valid != h.low {
m := uintptr(1)<<h.low - 1 // don't clear existing bits below "low"
m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
bitmap[idx] = bswapIfBigEndian(bswapIfBigEndian(bitmap[idx])&m | h.mask)
}
if zeros == 0 {
return
}
// Advance to next bitmap word.
h.offset += ptrBits * goarch.PtrSize
// Continue on writing zeros for the rest of the object.
// For standard use of the ptr bits this is not required, as
// the bits are read from the beginning of the object. Some uses,
// like noscan spans, oblets, bulk write barriers, and cgocheck, might
// start mid-object, so these writes are still required.
for {
// Write zero bits.
idx := h.offset / (ptrBits * goarch.PtrSize)
if zeros < ptrBits {
bitmap[idx] = bswapIfBigEndian(bswapIfBigEndian(bitmap[idx]) &^ (uintptr(1)<<zeros - 1))
break
} else if zeros == ptrBits {
bitmap[idx] = 0
break
} else {
bitmap[idx] = 0
zeros -= ptrBits
}
h.offset += ptrBits * goarch.PtrSize
}
}
// bswapIfBigEndian swaps the byte order of the uintptr on goarch.BigEndian platforms,
// and leaves it alone elsewhere.
func bswapIfBigEndian(x uintptr) uintptr {
if goarch.BigEndian {
if goarch.PtrSize == 8 {
return uintptr(sys.Bswap64(uint64(x)))
}
return uintptr(sys.Bswap32(uint32(x)))
}
return x
}
// newUserArenaChunk allocates a user arena chunk, which maps to a single
// heap arena and single span. Returns a pointer to the base of the chunk
// (this is really important: we need to keep the chunk alive) and the span.
func newUserArenaChunk() (unsafe.Pointer, *mspan) {
if gcphase == _GCmarktermination {
throw("newUserArenaChunk called with gcphase == _GCmarktermination")
}
// Deduct assist credit. Because user arena chunks are modeled as one
// giant heap object which counts toward heapLive, we're obligated to
// assist the GC proportionally (and it's worth noting that the arena
// does represent additional work for the GC, but we also have no idea
// what that looks like until we actually allocate things into the
// arena).
if gcBlackenEnabled != 0 {
deductAssistCredit(userArenaChunkBytes)
}
// Set mp.mallocing to keep from being preempted by GC.
mp := acquirem()
if mp.mallocing != 0 {
throw("malloc deadlock")
}
if mp.gsignal == getg() {
throw("malloc during signal")
}
mp.mallocing = 1
// Allocate a new user arena.
var span *mspan
systemstack(func() {
span = mheap_.allocUserArenaChunk()
})
if span == nil {
throw("out of memory")
}
x := unsafe.Pointer(span.base())
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
if gcphase != _GCoff {
gcmarknewobject(span, span.base())
}
if raceenabled {
// TODO(mknyszek): Track individual objects.
racemalloc(unsafe.Pointer(span.base()), span.elemsize)
}
if msanenabled {
// TODO(mknyszek): Track individual objects.
msanmalloc(unsafe.Pointer(span.base()), span.elemsize)
}
if asanenabled {
// TODO(mknyszek): Track individual objects.
// N.B. span.elemsize includes a redzone already.
rzStart := span.base() + span.elemsize
asanpoison(unsafe.Pointer(rzStart), span.limit-rzStart)
asanunpoison(unsafe.Pointer(span.base()), span.elemsize)
}
if rate := MemProfileRate; rate > 0 {
c := getMCache(mp)
if c == nil {
throw("newUserArenaChunk called without a P or outside bootstrapping")
}
// Note cache c only valid while m acquired; see #47302
if rate != 1 && int64(userArenaChunkBytes) < c.nextSample {
c.nextSample -= int64(userArenaChunkBytes)
} else {
profilealloc(mp, unsafe.Pointer(span.base()), userArenaChunkBytes)
}
}
mp.mallocing = 0
releasem(mp)
// Again, because this chunk counts toward heapLive, potentially trigger a GC.
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
if debug.malloc {
if inittrace.active && inittrace.id == getg().goid {
// Init functions are executed sequentially in a single goroutine.
inittrace.bytes += uint64(userArenaChunkBytes)
}
}
// Double-check it's aligned to the physical page size. Based on the current
// implementation this is trivially true, but it need not be in the future.
// However, if it's not aligned to the physical page size then we can't properly
// set it to fault later.
if uintptr(x)%physPageSize != 0 {
throw("user arena chunk is not aligned to the physical page size")
}
return x, span
}
// isUnusedUserArenaChunk indicates that the arena chunk has been set to fault
// and doesn't contain any scannable memory anymore. However, it might still be
// mSpanInUse as it sits on the quarantine list, since it needs to be swept.
//
// This is not safe to execute unless the caller has ownership of the mspan or
// the world is stopped (preemption is prevented while the relevant state changes).
//
// This is really only meant to be used by accounting tests in the runtime to
// distinguish when a span shouldn't be counted (since mSpanInUse might not be
// enough).
func (s *mspan) isUnusedUserArenaChunk() bool {
return s.isUserArenaChunk && s.spanclass == makeSpanClass(0, true)
}
// setUserArenaChunkToFault sets the address space for the user arena chunk to fault
// and releases any underlying memory resources.
//
// Must be in a non-preemptible state to ensure the consistency of statistics
// exported to MemStats.
func (s *mspan) setUserArenaChunkToFault() {
if !s.isUserArenaChunk {
throw("invalid span in heapArena for user arena")
}
if s.npages*pageSize != userArenaChunkBytes {
throw("span on userArena.faultList has invalid size")
}
// Update the span class to be noscan. What we want to happen is that
// any pointer into the span keeps it from getting recycled, so we want
// the mark bit to get set, but we're about to set the address space to fault,
// so we have to prevent the GC from scanning this memory.
//
// It's OK to set it here because (1) a GC isn't in progress, so the scanning code
// won't make a bad decision, (2) we're currently non-preemptible and in the runtime,
// so a GC is blocked from starting. We might race with sweeping, which could
// put it on the "wrong" sweep list, but really don't care because the chunk is
// treated as a large object span and there's no meaningful difference between scan
// and noscan large objects in the sweeper. The STW at the start of the GC acts as a
// barrier for this update.
s.spanclass = makeSpanClass(0, true)
// Actually set the arena chunk to fault, so we'll get dangling pointer errors.
// sysFault currently uses a method on each OS that forces it to evacuate all
// memory backing the chunk.
sysFault(unsafe.Pointer(s.base()), s.npages*pageSize)
// Everything on the list is counted as in-use, however sysFault transitions to
// Reserved, not Prepared, so we skip updating heapFree or heapReleased and just
// remove the memory from the total altogether; it's just address space now.
gcController.heapInUse.add(-int64(s.npages * pageSize))
// Count this as a free of an object right now as opposed to when
// the span gets off the quarantine list. The main reason is so that the
// amount of bytes allocated doesn't exceed how much is counted as
// "mapped ready," which could cause a deadlock in the pacer.
gcController.totalFree.Add(int64(s.elemsize))
// Update consistent stats to match.
//
// We're non-preemptible, so it's safe to update consistent stats (our P
// won't change out from under us).
stats := memstats.heapStats.acquire()
atomic.Xaddint64(&stats.committed, -int64(s.npages*pageSize))
atomic.Xaddint64(&stats.inHeap, -int64(s.npages*pageSize))
atomic.Xadd64(&stats.largeFreeCount, 1)
atomic.Xadd64(&stats.largeFree, int64(s.elemsize))
memstats.heapStats.release()
// This counts as a free, so update heapLive.
gcController.update(-int64(s.elemsize), 0)
// Mark it as free for the race detector.
if raceenabled {
racefree(unsafe.Pointer(s.base()), s.elemsize)
}
systemstack(func() {
// Add the user arena to the quarantine list.
lock(&mheap_.lock)
mheap_.userArena.quarantineList.insert(s)
unlock(&mheap_.lock)
})
}
// inUserArenaChunk returns true if p points to a user arena chunk.
func inUserArenaChunk(p uintptr) bool {
s := spanOf(p)
if s == nil {
return false
}
return s.isUserArenaChunk
}
// freeUserArenaChunk releases the user arena represented by s back to the runtime.
//
// x must be a live pointer within s.
//
// The runtime will set the user arena to fault once it's safe (the GC is no longer running)
// and then once the user arena is no longer referenced by the application, will allow it to
// be reused.
func freeUserArenaChunk(s *mspan, x unsafe.Pointer) {
if !s.isUserArenaChunk {
throw("span is not for a user arena")
}
if s.npages*pageSize != userArenaChunkBytes {
throw("invalid user arena span size")
}
// Mark the region as free to various sanitizers immediately instead
// of handling them at sweep time.
if raceenabled {
racefree(unsafe.Pointer(s.base()), s.elemsize)
}
if msanenabled {
msanfree(unsafe.Pointer(s.base()), s.elemsize)
}
if asanenabled {
asanpoison(unsafe.Pointer(s.base()), s.elemsize)
}
if valgrindenabled {
valgrindFree(unsafe.Pointer(s.base()))
}
// Make ourselves non-preemptible as we manipulate state and statistics.
//
// Also required by setUserArenaChunksToFault.
mp := acquirem()
// We can only set user arenas to fault if we're in the _GCoff phase.
if gcphase == _GCoff {
lock(&userArenaState.lock)
faultList := userArenaState.fault
userArenaState.fault = nil
unlock(&userArenaState.lock)
s.setUserArenaChunkToFault()
for _, lc := range faultList {
lc.mspan.setUserArenaChunkToFault()
}
// Until the chunks are set to fault, keep them alive via the fault list.
KeepAlive(x)
KeepAlive(faultList)
} else {
// Put the user arena on the fault list.
lock(&userArenaState.lock)
userArenaState.fault = append(userArenaState.fault, liveUserArenaChunk{s, x})
unlock(&userArenaState.lock)
}
releasem(mp)
}
// allocUserArenaChunk attempts to reuse a free user arena chunk represented
// as a span.
//
// Must be in a non-preemptible state to ensure the consistency of statistics
// exported to MemStats.
//
// Acquires the heap lock. Must run on the system stack for that reason.
//
//go:systemstack
func (h *mheap) allocUserArenaChunk() *mspan {
var s *mspan
var base uintptr
// First check the free list.
lock(&h.lock)
if !h.userArena.readyList.isEmpty() {
s = h.userArena.readyList.first
h.userArena.readyList.remove(s)
base = s.base()
} else {
// Free list was empty, so allocate a new arena.
hintList := &h.userArena.arenaHints
if raceenabled {
// In race mode just use the regular heap hints. We might fragment
// the address space, but the race detector requires that the heap
// is mapped contiguously.
hintList = &h.arenaHints
}
v, size := h.sysAlloc(userArenaChunkBytes, hintList, &mheap_.userArenaArenas)
if size%userArenaChunkBytes != 0 {
throw("sysAlloc size is not divisible by userArenaChunkBytes")
}
if size > userArenaChunkBytes {
// We got more than we asked for. This can happen if
// heapArenaSize > userArenaChunkSize, or if sysAlloc just returns
// some extra as a result of trying to find an aligned region.
//
// Divide it up and put it on the ready list.
for i := userArenaChunkBytes; i < size; i += userArenaChunkBytes {
s := h.allocMSpanLocked()
s.init(uintptr(v)+i, userArenaChunkPages)
h.userArena.readyList.insertBack(s)
}
size = userArenaChunkBytes
}
base = uintptr(v)
if base == 0 {
// Out of memory.
unlock(&h.lock)
return nil
}
s = h.allocMSpanLocked()
}
unlock(&h.lock)
// sysAlloc returns Reserved address space, and any span we're
// reusing is set to fault (so, also Reserved), so transition
// it to Prepared and then Ready.
//
// Unlike (*mheap).grow, just map in everything that we
// asked for. We're likely going to use it all.
sysMap(unsafe.Pointer(base), userArenaChunkBytes, &gcController.heapReleased, "user arena chunk")
sysUsed(unsafe.Pointer(base), userArenaChunkBytes, userArenaChunkBytes)
// Model the user arena as a heap span for a large object.
spc := makeSpanClass(0, false)
// A user arena chunk is always fresh from the OS. It's either newly allocated
// via sysAlloc() or reused from the readyList after a sysFault(). The memory is
// then re-mapped via sysMap(), so we can safely treat it as scavenged; the
// kernel guarantees it will be zero-filled on its next use.
h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages, userArenaChunkBytes)
s.isUserArenaChunk = true
s.elemsize -= userArenaChunkReserveBytes()
s.freeindex = 1
s.allocCount = 1
// Adjust s.limit down to the object-containing part of the span.
//
// This is just to create a slightly tighter bound on the limit.
// It's totally OK if the garbage collector, in particular
// conservative scanning, can temporarily observes an inflated
// limit. It will simply mark the whole chunk or just skip it
// since we're in the mark phase anyway.
s.limit = s.base() + s.elemsize
// Adjust size to include redzone.
if asanenabled {
s.elemsize -= redZoneSize(s.elemsize)
}
// Account for this new arena chunk memory.
gcController.heapInUse.add(int64(userArenaChunkBytes))
gcController.heapReleased.add(-int64(userArenaChunkBytes))
stats := memstats.heapStats.acquire()
atomic.Xaddint64(&stats.inHeap, int64(userArenaChunkBytes))
atomic.Xaddint64(&stats.committed, int64(userArenaChunkBytes))
// Model the arena as a single large malloc.
atomic.Xadd64(&stats.largeAlloc, int64(s.elemsize))
atomic.Xadd64(&stats.largeAllocCount, 1)
memstats.heapStats.release()
// Count the alloc in inconsistent, internal stats.
gcController.totalAlloc.Add(int64(s.elemsize))
// Update heapLive.
gcController.update(int64(s.elemsize), 0)
// This must clear the entire heap bitmap so that it's safe
// to allocate noscan data without writing anything out.
s.initHeapBits()
// Clear the span preemptively. It's an arena chunk, so let's assume
// everything is going to be used.
//
// This also seems to make a massive difference as to whether or
// not Linux decides to back this memory with transparent huge
// pages. There's latency involved in this zeroing, but the hugepage
// gains are almost always worth it. Note: it's important that we
// clear even if it's freshly mapped and we know there's no point
// to zeroing as *that* is the critical signal to use huge pages.
memclrNoHeapPointers(unsafe.Pointer(s.base()), s.elemsize)
s.needzero = 0
s.freeIndexForScan = 1
// Set up the range for allocation.
s.userArenaChunkFree = makeAddrRange(base, base+s.elemsize)
// Put the large span in the mcentral swept list so that it's
// visible to the background sweeper.
h.central[spc].mcentral.fullSwept(h.sweepgen).push(s)
// Set up an allocation header. Avoid write barriers here because this type
// is not a real type, and it exists in an invalid location.
*(*uintptr)(unsafe.Pointer(&s.largeType)) = uintptr(unsafe.Pointer(s.limit))
*(*uintptr)(unsafe.Pointer(&s.largeType.GCData)) = s.limit + unsafe.Sizeof(_type{})
s.largeType.PtrBytes = 0
s.largeType.Size_ = s.elemsize
return s
}
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !asan
// Dummy ASan support API, used when not built with -asan.
package runtime
import (
"unsafe"
)
const asanenabled = false
const asanenabledBit = 0
// Because asanenabled is false, none of these functions should be called.
func asanread(addr unsafe.Pointer, sz uintptr) { throw("asan") }
func asanwrite(addr unsafe.Pointer, sz uintptr) { throw("asan") }
func asanunpoison(addr unsafe.Pointer, sz uintptr) { throw("asan") }
func asanpoison(addr unsafe.Pointer, sz uintptr) { throw("asan") }
func asanregisterglobals(addr unsafe.Pointer, sz uintptr) { throw("asan") }
func lsanregisterrootregion(unsafe.Pointer, uintptr) { throw("asan") }
func lsanunregisterrootregion(unsafe.Pointer, uintptr) { throw("asan") }
func lsandoleakcheck() { throw("asan") }
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/goexperiment"
"internal/runtime/atomic"
"unsafe"
)
// These functions cannot have go:noescape annotations,
// because while ptr does not escape, new does.
// If new is marked as not escaping, the compiler will make incorrect
// escape analysis decisions about the pointer value being stored.
// atomicwb performs a write barrier before an atomic pointer write.
// The caller should guard the call with "if writeBarrier.enabled".
//
// atomicwb should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/gopkg
// - github.com/songzhibin97/gkit
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname atomicwb
//go:nosplit
func atomicwb(ptr *unsafe.Pointer, new unsafe.Pointer) {
slot := (*uintptr)(unsafe.Pointer(ptr))
buf := getg().m.p.ptr().wbBuf.get2()
buf[0] = *slot
buf[1] = uintptr(new)
}
// atomicstorep performs *ptr = new atomically and invokes a write barrier.
//
//go:nosplit
func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
if writeBarrier.enabled {
atomicwb((*unsafe.Pointer)(ptr), new)
}
if goexperiment.CgoCheck2 {
cgoCheckPtrWrite((*unsafe.Pointer)(ptr), new)
}
atomic.StorepNoWB(noescape(ptr), new)
}
// atomic_storePointer is the implementation of internal/runtime/atomic.UnsafePointer.Store
// (like StoreNoWB but with the write barrier).
//
//go:nosplit
//go:linkname atomic_storePointer internal/runtime/atomic.storePointer
func atomic_storePointer(ptr *unsafe.Pointer, new unsafe.Pointer) {
atomicstorep(unsafe.Pointer(ptr), new)
}
// atomic_casPointer is the implementation of internal/runtime/atomic.UnsafePointer.CompareAndSwap
// (like CompareAndSwapNoWB but with the write barrier).
//
//go:nosplit
//go:linkname atomic_casPointer internal/runtime/atomic.casPointer
func atomic_casPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
if writeBarrier.enabled {
atomicwb(ptr, new)
}
if goexperiment.CgoCheck2 {
cgoCheckPtrWrite(ptr, new)
}
return atomic.Casp1(ptr, old, new)
}
// Like above, but implement in terms of sync/atomic's uintptr operations.
// We cannot just call the runtime routines, because the race detector expects
// to be able to intercept the sync/atomic forms but not the runtime forms.
//go:linkname sync_atomic_StoreUintptr sync/atomic.StoreUintptr
func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr)
//go:linkname sync_atomic_StorePointer sync/atomic.StorePointer
//go:nosplit
func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) {
if writeBarrier.enabled {
atomicwb(ptr, new)
}
if goexperiment.CgoCheck2 {
cgoCheckPtrWrite(ptr, new)
}
sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
}
//go:linkname sync_atomic_SwapUintptr sync/atomic.SwapUintptr
func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr
//go:linkname sync_atomic_SwapPointer sync/atomic.SwapPointer
//go:nosplit
func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
if writeBarrier.enabled {
atomicwb(ptr, new)
}
if goexperiment.CgoCheck2 {
cgoCheckPtrWrite(ptr, new)
}
old := unsafe.Pointer(sync_atomic_SwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(new)))
return old
}
//go:linkname sync_atomic_CompareAndSwapUintptr sync/atomic.CompareAndSwapUintptr
func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool
//go:linkname sync_atomic_CompareAndSwapPointer sync/atomic.CompareAndSwapPointer
//go:nosplit
func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
if writeBarrier.enabled {
atomicwb(ptr, new)
}
if goexperiment.CgoCheck2 {
cgoCheckPtrWrite(ptr, new)
}
return sync_atomic_CompareAndSwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(old), uintptr(new))
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
//go:cgo_export_static main
// Filled in by runtime/cgo when linked into binary.
//go:linkname _cgo_init _cgo_init
//go:linkname _cgo_thread_start _cgo_thread_start
//go:linkname _cgo_sys_thread_create _cgo_sys_thread_create
//go:linkname _cgo_notify_runtime_init_done _cgo_notify_runtime_init_done
//go:linkname _cgo_callers _cgo_callers
//go:linkname _cgo_set_traceback_functions _cgo_set_traceback_functions
//go:linkname _cgo_call_traceback_function _cgo_call_traceback_function
//go:linkname _cgo_call_symbolizer_function _cgo_call_symbolizer_function
//go:linkname _cgo_yield _cgo_yield
//go:linkname _cgo_pthread_key_created _cgo_pthread_key_created
//go:linkname _cgo_bindm _cgo_bindm
//go:linkname _cgo_getstackbound _cgo_getstackbound
var (
_cgo_init unsafe.Pointer
_cgo_thread_start unsafe.Pointer
_cgo_sys_thread_create unsafe.Pointer
_cgo_notify_runtime_init_done unsafe.Pointer
_cgo_callers unsafe.Pointer
_cgo_set_traceback_functions unsafe.Pointer
_cgo_call_traceback_function unsafe.Pointer
_cgo_call_symbolizer_function unsafe.Pointer
_cgo_yield unsafe.Pointer
_cgo_pthread_key_created unsafe.Pointer
_cgo_bindm unsafe.Pointer
_cgo_getstackbound unsafe.Pointer
)
// iscgo is set to true by the runtime/cgo package
//
// iscgo should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ebitengine/purego
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname iscgo
var iscgo bool
// set_crosscall2 is set by the runtime/cgo package
// set_crosscall2 should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ebitengine/purego
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname set_crosscall2
var set_crosscall2 func()
// cgoHasExtraM is set on startup when an extra M is created for cgo.
// The extra M must be created before any C/C++ code calls cgocallback.
var cgoHasExtraM bool
// cgoUse is called by cgo-generated code (using go:linkname to get at
// an unexported name). The calls serve two purposes:
// 1) they are opaque to escape analysis, so the argument is considered to
// escape to the heap.
// 2) they keep the argument alive until the call site; the call is emitted after
// the end of the (presumed) use of the argument by C.
// cgoUse should not actually be called (see cgoAlwaysFalse).
func cgoUse(any) { throw("cgoUse should not be called") }
// cgoKeepAlive is called by cgo-generated code (using go:linkname to get at
// an unexported name). This call keeps its argument alive until the call site;
// cgo emits the call after the last possible use of the argument by C code.
// cgoKeepAlive is marked in the cgo-generated code as //go:noescape, so
// unlike cgoUse it does not force the argument to escape to the heap.
// This is used to implement the #cgo noescape directive.
func cgoKeepAlive(any) { throw("cgoKeepAlive should not be called") }
// cgoAlwaysFalse is a boolean value that is always false.
// The cgo-generated code says if cgoAlwaysFalse { cgoUse(p) },
// or if cgoAlwaysFalse { cgoKeepAlive(p) }.
// The compiler cannot see that cgoAlwaysFalse is always false,
// so it emits the test and keeps the call, giving the desired
// escape/alive analysis result. The test is cheaper than the call.
var cgoAlwaysFalse bool
var cgo_yield = &_cgo_yield
func cgoNoCallback(v bool) {
g := getg()
if g.nocgocallback && v {
panic("runtime: unexpected setting cgoNoCallback")
}
g.nocgocallback = v
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cgo
import "unsafe"
// These utility functions are available to be called from code
// compiled with gcc via crosscall2.
// The declaration of crosscall2 is:
// void crosscall2(void (*fn)(void *), void *, int);
//
// We need to export the symbol crosscall2 in order to support
// callbacks from shared libraries. This applies regardless of
// linking mode.
//
// Compatibility note: SWIG uses crosscall2 in exactly one situation:
// to call _cgo_panic using the pattern shown below. We need to keep
// that pattern working. In particular, crosscall2 actually takes four
// arguments, but it works to call it with three arguments when
// calling _cgo_panic.
//
//go:cgo_export_static crosscall2
//go:cgo_export_dynamic crosscall2
// Panic. The argument is converted into a Go string.
// Call like this in code compiled with gcc:
// struct { const char *p; } a;
// a.p = /* string to pass to panic */;
// crosscall2(_cgo_panic, &a, sizeof a);
// /* The function call will not return. */
// TODO: We should export a regular C function to panic, change SWIG
// to use that instead of the above pattern, and then we can drop
// backwards-compatibility from crosscall2 and stop exporting it.
//go:linkname _runtime_cgo_panic_internal runtime._cgo_panic_internal
func _runtime_cgo_panic_internal(p *byte)
//go:linkname _cgo_panic _cgo_panic
//go:cgo_export_static _cgo_panic
//go:cgo_export_dynamic _cgo_panic
func _cgo_panic(a *struct{ cstr *byte }) {
_runtime_cgo_panic_internal(a.cstr)
}
//go:cgo_import_static _cgo_init
//go:linkname _cgo_init _cgo_init
var _cgo_init unsafe.Pointer
//go:cgo_import_static _cgo_thread_start
//go:linkname _cgo_thread_start _cgo_thread_start
var _cgo_thread_start unsafe.Pointer
// Creates a new system thread without updating any Go state.
//
// This method is invoked during shared library loading to create a new OS
// thread to perform the runtime initialization. This method is similar to
// x_cgo_thread_start except that it doesn't update any Go state.
//go:cgo_import_static _cgo_sys_thread_create
//go:linkname _cgo_sys_thread_create _cgo_sys_thread_create
var _cgo_sys_thread_create unsafe.Pointer
// Indicates whether a dummy thread key has been created or not.
//
// When calling go exported function from C, we register a destructor
// callback, for a dummy thread key, by using pthread_key_create.
//go:cgo_import_static x_cgo_pthread_key_created
//go:linkname x_cgo_pthread_key_created x_cgo_pthread_key_created
//go:linkname _cgo_pthread_key_created _cgo_pthread_key_created
var x_cgo_pthread_key_created byte
var _cgo_pthread_key_created = &x_cgo_pthread_key_created
// Export crosscall2 to a c function pointer variable.
// Used to dropm in pthread key destructor, while C thread is exiting.
//go:cgo_import_static x_crosscall2_ptr
//go:linkname x_crosscall2_ptr x_crosscall2_ptr
//go:linkname _crosscall2_ptr _crosscall2_ptr
var x_crosscall2_ptr byte
var _crosscall2_ptr = &x_crosscall2_ptr
// Set the x_crosscall2_ptr C function pointer variable point to crosscall2.
// It's for the runtime package to call at init time.
func set_crosscall2()
//go:linkname _set_crosscall2 runtime.set_crosscall2
var _set_crosscall2 = set_crosscall2
// Store the g into the thread-specific value.
// So that pthread_key_destructor will dropm when the thread is exiting.
//go:cgo_import_static _cgo_bindm
//go:linkname _cgo_bindm _cgo_bindm
var _cgo_bindm unsafe.Pointer
// Notifies that the runtime has been initialized.
//
// We currently block at every CGO entry point (via _cgo_wait_runtime_init_done)
// to ensure that the runtime has been initialized before the CGO call is
// executed. This is necessary for shared libraries where we kickoff runtime
// initialization in a separate thread and return without waiting for this
// thread to complete the init.
//go:cgo_import_static x_cgo_notify_runtime_init_done
//go:linkname x_cgo_notify_runtime_init_done x_cgo_notify_runtime_init_done
//go:linkname _cgo_notify_runtime_init_done _cgo_notify_runtime_init_done
var x_cgo_notify_runtime_init_done byte
var _cgo_notify_runtime_init_done = &x_cgo_notify_runtime_init_done
// Sets the traceback, context, and symbolizer functions. See
// runtime.SetCgoTraceback.
//go:cgo_import_static x_cgo_set_traceback_functions
//go:linkname x_cgo_set_traceback_functions x_cgo_set_traceback_functions
//go:linkname _cgo_set_traceback_functions _cgo_set_traceback_functions
var x_cgo_set_traceback_functions byte
var _cgo_set_traceback_functions = &x_cgo_set_traceback_functions
// Call the traceback function registered with x_cgo_set_traceback_functions.
//go:cgo_import_static x_cgo_call_traceback_function
//go:linkname x_cgo_call_traceback_function x_cgo_call_traceback_function
//go:linkname _cgo_call_traceback_function _cgo_call_traceback_function
var x_cgo_call_traceback_function byte
var _cgo_call_traceback_function = &x_cgo_call_traceback_function
// Call the symbolizer function registered with x_cgo_set_symbolizer_functions.
//go:cgo_import_static x_cgo_call_symbolizer_function
//go:linkname x_cgo_call_symbolizer_function x_cgo_call_symbolizer_function
//go:linkname _cgo_call_symbolizer_function _cgo_call_symbolizer_function
var x_cgo_call_symbolizer_function byte
var _cgo_call_symbolizer_function = &x_cgo_call_symbolizer_function
// Calls a libc function to execute background work injected via libc
// interceptors, such as processing pending signals under the thread
// sanitizer.
//
// Left as a nil pointer if no libc interceptors are expected.
//go:cgo_import_static _cgo_yield
//go:linkname _cgo_yield _cgo_yield
var _cgo_yield unsafe.Pointer
//go:cgo_export_static _cgo_topofstack
//go:cgo_export_dynamic _cgo_topofstack
// x_cgo_getstackbound gets the thread's C stack size and
// set the G's stack bound based on the stack size.
//go:cgo_import_static _cgo_getstackbound
//go:linkname _cgo_getstackbound _cgo_getstackbound
var _cgo_getstackbound unsafe.Pointer
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cgo
import (
"sync"
"sync/atomic"
)
// Handle provides a way to pass values that contain Go pointers
// (pointers to memory allocated by Go) between Go and C without
// breaking the cgo pointer passing rules. A Handle is an integer
// value that can represent any Go value. A Handle can be passed
// through C and back to Go, and Go code can use the Handle to
// retrieve the original Go value.
//
// The underlying type of Handle is guaranteed to fit in an integer type
// that is large enough to hold the bit pattern of any pointer. The zero
// value of a Handle is not valid, and thus is safe to use as a sentinel
// in C APIs.
//
// For instance, on the Go side:
//
// package main
//
// /*
// #include <stdint.h> // for uintptr_t
//
// extern void MyGoPrint(uintptr_t handle);
// void myprint(uintptr_t handle);
// */
// import "C"
// import "runtime/cgo"
//
// //export MyGoPrint
// func MyGoPrint(handle C.uintptr_t) {
// h := cgo.Handle(handle)
// val := h.Value().(string)
// println(val)
// h.Delete()
// }
//
// func main() {
// val := "hello Go"
// C.myprint(C.uintptr_t(cgo.NewHandle(val)))
// // Output: hello Go
// }
//
// and on the C side:
//
// #include <stdint.h> // for uintptr_t
//
// // A Go function
// extern void MyGoPrint(uintptr_t handle);
//
// // A C function
// void myprint(uintptr_t handle) {
// MyGoPrint(handle);
// }
//
// Some C functions accept a void* argument that points to an arbitrary
// data value supplied by the caller. It is not safe to coerce a Handle
// (an integer) to a Go [unsafe.Pointer], but instead we can pass the address
// of the cgo.Handle to the void* parameter, as in this variant of the
// previous example.
//
// Note that, as described in the [cmd/cgo] documentation,
// the C code must not keep a copy of the Go pointer that it receives,
// unless the memory is explicitly pinned using [runtime.Pinner].
// This example is OK because the C function myprint does not keep
// a copy of the pointer.
//
// package main
//
// /*
// extern void MyGoPrint(void *context);
// static inline void myprint(void *context) {
// MyGoPrint(context);
// }
// */
// import "C"
// import (
// "runtime/cgo"
// "unsafe"
// )
//
// //export MyGoPrint
// func MyGoPrint(context unsafe.Pointer) {
// h := *(*cgo.Handle)(context)
// val := h.Value().(string)
// println(val)
// h.Delete()
// }
//
// func main() {
// val := "hello Go"
// h := cgo.NewHandle(val)
// // In this example, unsafe.Pointer(&h) is valid because myprint
// // does not keep a copy of the pointer. If the C code keeps the
// // pointer after the call returns, use runtime.Pinner to pin it.
// C.myprint(unsafe.Pointer(&h))
// // Output: hello Go
// }
type Handle uintptr
// NewHandle returns a handle for a given value.
//
// The handle is valid until the program calls Delete on it. The handle
// uses resources, and this package assumes that C code may hold on to
// the handle, so a program must explicitly call Delete when the handle
// is no longer needed.
//
// The intended use is to pass the returned handle to C code, which
// passes it back to Go, which calls Value.
func NewHandle(v any) Handle {
h := handleIdx.Add(1)
if h == 0 {
panic("runtime/cgo: ran out of handle space")
}
handles.Store(h, v)
return Handle(h)
}
// Value returns the associated Go value for a valid handle.
//
// The method panics if the handle is invalid.
func (h Handle) Value() any {
v, ok := handles.Load(uintptr(h))
if !ok {
panic("runtime/cgo: misuse of an invalid Handle")
}
return v
}
// Delete invalidates a handle. This method should only be called once
// the program no longer needs to pass the handle to C and the C code
// no longer has a copy of the handle value.
//
// The method panics if the handle is invalid.
func (h Handle) Delete() {
_, ok := handles.LoadAndDelete(uintptr(h))
if !ok {
panic("runtime/cgo: misuse of an invalid Handle")
}
}
var (
handles = sync.Map{} // map[Handle]interface{}
handleIdx atomic.Uintptr
)
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Support for memory sanitizer. See runtime/cgo/mmap.go.
//go:build (linux && (amd64 || arm64 || loong64)) || (freebsd && amd64)
package runtime
import "unsafe"
// _cgo_mmap is filled in by runtime/cgo when it is linked into the
// program, so it is only non-nil when using cgo.
//
//go:linkname _cgo_mmap _cgo_mmap
var _cgo_mmap unsafe.Pointer
// _cgo_munmap is filled in by runtime/cgo when it is linked into the
// program, so it is only non-nil when using cgo.
//
//go:linkname _cgo_munmap _cgo_munmap
var _cgo_munmap unsafe.Pointer
// mmap is used to route the mmap system call through C code when using cgo, to
// support sanitizer interceptors. Don't allow stack splits, since this function
// (used by sysAlloc) is called in a lot of low-level parts of the runtime and
// callers often assume it won't acquire any locks.
//
//go:nosplit
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) {
if _cgo_mmap != nil {
// Make ret a uintptr so that writing to it in the
// function literal does not trigger a write barrier.
// A write barrier here could break because of the way
// that mmap uses the same value both as a pointer and
// an errno value.
var ret uintptr
systemstack(func() {
ret = callCgoMmap(addr, n, prot, flags, fd, off)
})
if ret < 4096 {
return nil, int(ret)
}
return unsafe.Pointer(ret), 0
}
return sysMmap(addr, n, prot, flags, fd, off)
}
func munmap(addr unsafe.Pointer, n uintptr) {
if _cgo_munmap != nil {
systemstack(func() { callCgoMunmap(addr, n) })
return
}
sysMunmap(addr, n)
}
// sysMmap calls the mmap system call. It is implemented in assembly.
func sysMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int)
// callCgoMmap calls the mmap function in the runtime/cgo package
// using the GCC calling convention. It is implemented in assembly.
func callCgoMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) uintptr
// sysMunmap calls the munmap system call. It is implemented in assembly.
func sysMunmap(addr unsafe.Pointer, n uintptr)
// callCgoMunmap calls the munmap function in the runtime/cgo package
// using the GCC calling convention. It is implemented in assembly.
func callCgoMunmap(addr unsafe.Pointer, n uintptr)
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Support for sanitizers. See runtime/cgo/sigaction.go.
// Also used on linux/386 to clear the SA_RESTORER flag
// when using cgo; see issue #75253.
//go:build (linux && (386 || amd64 || arm64 || loong64 || ppc64le)) || (freebsd && amd64)
package runtime
import "unsafe"
// _cgo_sigaction is filled in by runtime/cgo when it is linked into the
// program, so it is only non-nil when using cgo.
//
//go:linkname _cgo_sigaction _cgo_sigaction
var _cgo_sigaction unsafe.Pointer
//go:nosplit
//go:nowritebarrierrec
func sigaction(sig uint32, new, old *sigactiont) {
// racewalk.go avoids adding sanitizing instrumentation to package runtime,
// but we might be calling into instrumented C functions here,
// so we need the pointer parameters to be properly marked.
//
// Mark the input as having been written before the call
// and the output as read after.
if msanenabled && new != nil {
msanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
}
if asanenabled && new != nil {
asanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
}
if _cgo_sigaction == nil || inForkedChild {
sysSigaction(sig, new, old)
} else {
// We need to call _cgo_sigaction, which means we need a big enough stack
// for C. To complicate matters, we may be in libpreinit (before the
// runtime has been initialized) or in an asynchronous signal handler (with
// the current thread in transition between goroutines, or with the g0
// system stack already in use).
var ret int32
fixSigactionForCgo(new)
var g *g
if mainStarted {
g = getg()
}
sp := uintptr(unsafe.Pointer(&sig))
switch {
case g == nil:
// No g: we're on a C stack or a signal stack.
ret = callCgoSigaction(uintptr(sig), new, old)
case sp < g.stack.lo || sp >= g.stack.hi:
// We're no longer on g's stack, so we must be handling a signal. It's
// possible that we interrupted the thread during a transition between g
// and g0, so we should stay on the current stack to avoid corrupting g0.
ret = callCgoSigaction(uintptr(sig), new, old)
default:
// We're running on g's stack, so either we're not in a signal handler or
// the signal handler has set the correct g. If we're on gsignal or g0,
// systemstack will make the call directly; otherwise, it will switch to
// g0 to ensure we have enough room to call a libc function.
//
// The function literal that we pass to systemstack is not nosplit, but
// that's ok: we'll be running on a fresh, clean system stack so the stack
// check will always succeed anyway.
systemstack(func() {
ret = callCgoSigaction(uintptr(sig), new, old)
})
}
const EINVAL = 22
if ret == EINVAL {
// libc reserves certain signals — normally 32-33 — for pthreads, and
// returns EINVAL for sigaction calls on those signals. If we get EINVAL,
// fall back to making the syscall directly.
sysSigaction(sig, new, old)
}
}
if msanenabled && old != nil {
msanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
}
if asanenabled && old != nil {
asanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
}
}
// callCgoSigaction calls the sigaction function in the runtime/cgo package
// using the GCC calling convention. It is implemented in assembly.
//
//go:noescape
func callCgoSigaction(sig uintptr, new, old *sigactiont) int32
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Cgo call and callback support.
//
// To call into the C function f from Go, the cgo-generated code calls
// runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a
// gcc-compiled function written by cgo.
//
// runtime.cgocall (below) calls entersyscall so as not to block
// other goroutines or the garbage collector, and then calls
// runtime.asmcgocall(_cgo_Cfunc_f, frame).
//
// runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack
// (assumed to be an operating system-allocated stack, so safe to run
// gcc-compiled code on) and calls _cgo_Cfunc_f(frame).
//
// _cgo_Cfunc_f invokes the actual C function f with arguments
// taken from the frame structure, records the results in the frame,
// and returns to runtime.asmcgocall.
//
// After it regains control, runtime.asmcgocall switches back to the
// original g (m->curg)'s stack and returns to runtime.cgocall.
//
// After it regains control, runtime.cgocall calls exitsyscall, which blocks
// until this m can run Go code without violating the $GOMAXPROCS limit,
// and then unlocks g from m.
//
// The above description skipped over the possibility of the gcc-compiled
// function f calling back into Go. If that happens, we continue down
// the rabbit hole during the execution of f.
//
// To make it possible for gcc-compiled C code to call a Go function p.GoF,
// cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't
// know about packages). The gcc-compiled C function f calls GoF.
//
// GoF initializes "frame", a structure containing all of its
// arguments and slots for p.GoF's results. It calls
// crosscall2(_cgoexp_GoF, frame, framesize, ctxt) using the gcc ABI.
//
// crosscall2 (in cgo/asm_$GOARCH.s) is a four-argument adapter from
// the gcc function call ABI to the gc function call ABI. At this
// point we're in the Go runtime, but we're still running on m.g0's
// stack and outside the $GOMAXPROCS limit. crosscall2 calls
// runtime.cgocallback(_cgoexp_GoF, frame, ctxt) using the gc ABI.
// (crosscall2's framesize argument is no longer used, but there's one
// case where SWIG calls crosscall2 directly and expects to pass this
// argument. See _cgo_panic.)
//
// runtime.cgocallback (in asm_$GOARCH.s) switches from m.g0's stack
// to the original g (m.curg)'s stack, on which it calls
// runtime.cgocallbackg(_cgoexp_GoF, frame, ctxt). As part of the
// stack switch, runtime.cgocallback saves the current SP as
// m.g0.sched.sp, so that any use of m.g0's stack during the execution
// of the callback will be done below the existing stack frames.
// Before overwriting m.g0.sched.sp, it pushes the old value on the
// m.g0 stack, so that it can be restored later.
//
// runtime.cgocallbackg (below) is now running on a real goroutine
// stack (not an m.g0 stack). First it calls runtime.exitsyscall, which will
// block until the $GOMAXPROCS limit allows running this goroutine.
// Once exitsyscall has returned, it is safe to do things like call the memory
// allocator or invoke the Go callback function. runtime.cgocallbackg
// first defers a function to unwind m.g0.sched.sp, so that if p.GoF
// panics, m.g0.sched.sp will be restored to its old value: the m.g0 stack
// and the m.curg stack will be unwound in lock step.
// Then it calls _cgoexp_GoF(frame).
//
// _cgoexp_GoF, which was generated by cmd/cgo, unpacks the arguments
// from frame, calls p.GoF, writes the results back to frame, and
// returns. Now we start unwinding this whole process.
//
// runtime.cgocallbackg pops but does not execute the deferred
// function to unwind m.g0.sched.sp, calls runtime.entersyscall, and
// returns to runtime.cgocallback.
//
// After it regains control, runtime.cgocallback switches back to
// m.g0's stack (the pointer is still in m.g0.sched.sp), restores the old
// m.g0.sched.sp value from the stack, and returns to crosscall2.
//
// crosscall2 restores the callee-save registers for gcc and returns
// to GoF, which unpacks any result values and returns to f.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"internal/runtime/sys"
"unsafe"
)
// Addresses collected in a cgo backtrace when crashing.
// Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c.
type cgoCallers [32]uintptr
// argset matches runtime/cgo/linux_syscall.c:argset_t
type argset struct {
args unsafe.Pointer
retval uintptr
}
// wrapper for syscall package to call cgocall for libc (cgo) calls.
//
//go:linkname syscall_cgocaller syscall.cgocaller
//go:nosplit
//go:uintptrescapes
func syscall_cgocaller(fn unsafe.Pointer, args ...uintptr) uintptr {
as := argset{args: unsafe.Pointer(&args[0])}
cgocall(fn, unsafe.Pointer(&as))
return as.retval
}
var ncgocall uint64 // number of cgo calls in total for dead m
// Call from Go to C.
//
// This must be nosplit because it's used for syscalls on some
// platforms. Syscalls may have untyped arguments on the stack, so
// it's not safe to grow or scan the stack.
//
// cgocall should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ebitengine/purego
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname cgocall
//go:nosplit
func cgocall(fn, arg unsafe.Pointer) int32 {
if !iscgo && GOOS != "solaris" && GOOS != "illumos" && GOOS != "windows" {
throw("cgocall unavailable")
}
if fn == nil {
throw("cgocall nil")
}
if raceenabled {
racereleasemerge(unsafe.Pointer(&racecgosync))
}
mp := getg().m
mp.ncgocall++
// Reset traceback.
mp.cgoCallers[0] = 0
// Announce we are entering a system call
// so that the scheduler knows to create another
// M to run goroutines while we are in the
// foreign code.
//
// The call to asmcgocall is guaranteed not to
// grow the stack and does not allocate memory,
// so it is safe to call while "in a system call", outside
// the $GOMAXPROCS accounting.
//
// fn may call back into Go code, in which case we'll exit the
// "system call", run the Go code (which may grow the stack),
// and then re-enter the "system call" reusing the PC and SP
// saved by entersyscall here.
entersyscall()
// Tell asynchronous preemption that we're entering external
// code. We do this after entersyscall because this may block
// and cause an async preemption to fail, but at this point a
// sync preemption will succeed (though this is not a matter
// of correctness).
osPreemptExtEnter(mp)
mp.incgo = true
// We use ncgo as a check during execution tracing for whether there is
// any C on the call stack, which there will be after this point. If
// there isn't, we can use frame pointer unwinding to collect call
// stacks efficiently. This will be the case for the first Go-to-C call
// on a stack, so it's preferable to update it here, after we emit a
// trace event in entersyscall above.
mp.ncgo++
errno := asmcgocall(fn, arg)
// Update accounting before exitsyscall because exitsyscall may
// reschedule us on to a different M.
mp.incgo = false
mp.ncgo--
osPreemptExtExit(mp)
// After exitsyscall we can be rescheduled on a different M,
// so we need to restore the original M's winsyscall.
winsyscall := mp.winsyscall
exitsyscall()
getg().m.winsyscall = winsyscall
// Note that raceacquire must be called only after exitsyscall has
// wired this M to a P.
if raceenabled {
raceacquire(unsafe.Pointer(&racecgosync))
}
if sys.DITSupported {
// C code may have enabled or disabled DIT on this thread, restore
// our state to the expected one.
ditEnabled := sys.DITEnabled()
gp := getg()
if !gp.ditWanted && ditEnabled {
sys.DisableDIT()
} else if gp.ditWanted && !ditEnabled {
sys.EnableDIT()
}
}
// From the garbage collector's perspective, time can move
// backwards in the sequence above. If there's a callback into
// Go code, GC will see this function at the call to
// asmcgocall. When the Go call later returns to C, the
// syscall PC/SP is rolled back and the GC sees this function
// back at the call to entersyscall. Normally, fn and arg
// would be live at entersyscall and dead at asmcgocall, so if
// time moved backwards, GC would see these arguments as dead
// and then live. Prevent these undead arguments from crashing
// GC by forcing them to stay live across this time warp.
KeepAlive(fn)
KeepAlive(arg)
KeepAlive(mp)
return errno
}
// Set or reset the system stack bounds for a callback on sp.
//
// Must be nosplit because it is called by needm prior to fully initializing
// the M.
//
//go:nosplit
func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) {
g0 := mp.g0
if !mp.isextra {
// We allocated the stack for standard Ms. Don't replace the
// stack bounds with estimated ones when we already initialized
// with the exact ones.
return
}
inBound := sp > g0.stack.lo && sp <= g0.stack.hi
if inBound && mp.g0StackAccurate {
// This M has called into Go before and has the stack bounds
// initialized. We have the accurate stack bounds, and the SP
// is in bounds. We expect it continues to run within the same
// bounds.
return
}
// We don't have an accurate stack bounds (either it never calls
// into Go before, or we couldn't get the accurate bounds), or the
// current SP is not within the previous bounds (the stack may have
// changed between calls). We need to update the stack bounds.
//
// N.B. we need to update the stack bounds even if SP appears to
// already be in bounds, if our bounds are estimated dummy bounds
// (below). We may be in a different region within the same actual
// stack bounds, but our estimates were not accurate. Or the actual
// stack bounds could have shifted but still have partial overlap with
// our dummy bounds. If we failed to update in that case, we could find
// ourselves seemingly called near the bottom of the stack bounds, where
// we quickly run out of space.
// Set the stack bounds to match the current stack. If we don't
// actually know how big the stack is, like we don't know how big any
// scheduling stack is, but we assume there's at least 32 kB. If we
// can get a more accurate stack bound from pthread, use that, provided
// it actually contains SP.
g0.stack.hi = sp + 1024
g0.stack.lo = sp - 32*1024
mp.g0StackAccurate = false
if !signal && _cgo_getstackbound != nil {
// Don't adjust if called from the signal handler.
// We are on the signal stack, not the pthread stack.
// (We could get the stack bounds from sigaltstack, but
// we're getting out of the signal handler very soon
// anyway. Not worth it.)
var bounds [2]uintptr
asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds))
// getstackbound is an unsupported no-op on Windows.
//
// On Unix systems, if the API to get accurate stack bounds is
// not available, it returns zeros.
//
// Don't use these bounds if they don't contain SP. Perhaps we
// were called by something not using the standard thread
// stack.
if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] {
g0.stack.lo = bounds[0]
g0.stack.hi = bounds[1]
mp.g0StackAccurate = true
}
}
g0.stackguard0 = g0.stack.lo + stackGuard
g0.stackguard1 = g0.stackguard0
}
// Call from C back to Go. fn must point to an ABIInternal Go entry-point.
//
//go:nosplit
func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) {
gp := getg()
if gp != gp.m.curg {
println("runtime: bad g in cgocallback")
exit(2)
}
sp := gp.m.g0.sched.sp // system sp saved by cgocallback.
oldStack := gp.m.g0.stack
oldAccurate := gp.m.g0StackAccurate
callbackUpdateSystemStack(gp.m, sp, false)
// The call from C is on gp.m's g0 stack, so we must ensure
// that we stay on that M. We have to do this before calling
// exitsyscall, since it would otherwise be free to move us to
// a different M. The call to unlockOSThread is in this function
// after cgocallbackg1, or in the case of panicking, in unwindm.
lockOSThread()
checkm := gp.m
// Save current syscall parameters, so m.winsyscall can be
// used again if callback decide to make syscall.
winsyscall := gp.m.winsyscall
// entersyscall saves the caller's SP to allow the GC to trace the Go
// stack. However, since we're returning to an earlier stack frame and
// need to pair with the entersyscall() call made by cgocall, we must
// save syscall* and let reentersyscall restore them.
//
// Note: savedsp and savedbp MUST be held in locals as an unsafe.Pointer.
// When we call into Go, the stack is free to be moved. If these locals
// aren't visible in the stack maps, they won't get updated properly,
// and will end up being stale when restored by reentersyscall.
savedsp := unsafe.Pointer(gp.syscallsp)
savedpc := gp.syscallpc
savedbp := unsafe.Pointer(gp.syscallbp)
exitsyscall() // coming out of cgo call
gp.m.incgo = false
if gp.m.isextra {
gp.m.isExtraInC = false
}
osPreemptExtExit(gp.m)
if gp.nocgocallback {
panic("runtime: function marked with #cgo nocallback called back into Go")
}
cgocallbackg1(fn, frame, ctxt)
// At this point we're about to call unlockOSThread.
// The following code must not change to a different m.
// This is enforced by checking incgo in the schedule function.
gp.m.incgo = true
unlockOSThread()
if gp.m.isextra && gp.m.ncgo == 0 {
// There are no active cgocalls above this frame (ncgo == 0),
// thus there can't be more Go frames above this frame.
gp.m.isExtraInC = true
}
if gp.m != checkm {
throw("m changed unexpectedly in cgocallbackg")
}
osPreemptExtEnter(gp.m)
// going back to cgo call
reentersyscall(savedpc, uintptr(savedsp), uintptr(savedbp))
gp.m.winsyscall = winsyscall
// Restore the old g0 stack bounds
gp.m.g0.stack = oldStack
gp.m.g0.stackguard0 = oldStack.lo + stackGuard
gp.m.g0.stackguard1 = gp.m.g0.stackguard0
gp.m.g0StackAccurate = oldAccurate
}
func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) {
gp := getg()
if gp.m.needextram || extraMWaiters.Load() > 0 {
gp.m.needextram = false
systemstack(newextram)
}
if ctxt != 0 {
s := append(gp.cgoCtxt, ctxt)
// Now we need to set gp.cgoCtxt = s, but we could get
// a SIGPROF signal while manipulating the slice, and
// the SIGPROF handler could pick up gp.cgoCtxt while
// tracing up the stack. We need to ensure that the
// handler always sees a valid slice, so set the
// values in an order such that it always does.
p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
atomicstorep(unsafe.Pointer(&p.array), unsafe.Pointer(&s[0]))
p.cap = cap(s)
p.len = len(s)
defer func(gp *g) {
// Decrease the length of the slice by one, safely.
p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
p.len--
}(gp)
}
if gp.m.ncgo == 0 {
// The C call to Go came from a thread not currently running
// any Go. In the case of -buildmode=c-archive or c-shared,
// this call may be coming in before package initialization
// is complete. Don't proceed until it is.
//
// We check a bool first for speed, and wait on a channel
// if it's not ready.
//
// In race mode, skip the optimization and always use the
// channel, which has the race instrumentation.
if raceenabled || !mainInitDone.Load() {
<-mainInitDoneChan
}
}
// Check whether the profiler needs to be turned on or off; this route to
// run Go code does not use runtime.execute, so bypasses the check there.
hz := sched.profilehz
if gp.m.profilehz != hz {
setThreadCPUProfiler(hz)
}
// Add entry to defer stack in case of panic.
restore := true
defer unwindm(&restore)
var ditStateM, ditStateG bool
if debug.dataindependenttiming == 1 && gp.m.isextra {
// We only need to enable DIT for threads that were created by C, as it
// should already by enabled on threads that were created by Go.
ditStateM = sys.EnableDIT()
} else if sys.DITSupported && debug.dataindependenttiming != 1 {
// C code may have enabled or disabled DIT on this thread. Set the flag
// on the M and G accordingly, saving their previous state to restore
// on return from the callback.
ditStateM, ditStateG = gp.m.ditEnabled, gp.ditWanted
ditEnabled := sys.DITEnabled()
gp.ditWanted = ditEnabled
gp.m.ditEnabled = ditEnabled
}
if raceenabled {
raceacquire(unsafe.Pointer(&racecgosync))
}
// Invoke callback. This function is generated by cmd/cgo and
// will unpack the argument frame and call the Go function.
var cb func(frame unsafe.Pointer)
cbFV := funcval{uintptr(fn)}
*(*unsafe.Pointer)(unsafe.Pointer(&cb)) = noescape(unsafe.Pointer(&cbFV))
cb(frame)
if raceenabled {
racereleasemerge(unsafe.Pointer(&racecgosync))
}
if debug.dataindependenttiming == 1 && !ditStateM {
// Only unset DIT if it wasn't already enabled when cgocallback was called.
sys.DisableDIT()
} else if sys.DITSupported && debug.dataindependenttiming != 1 {
// Restore DIT state on M and G.
gp.ditWanted = ditStateG
gp.m.ditEnabled = ditStateM
if !ditStateM {
sys.DisableDIT()
}
}
// Do not unwind m->g0->sched.sp.
// Our caller, cgocallback, will do that.
restore = false
}
func unwindm(restore *bool) {
if *restore {
// Restore sp saved by cgocallback during
// unwind of g's stack (see comment at top of file).
mp := acquirem()
sched := &mp.g0.sched
sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + alignUp(sys.MinFrameSize, sys.StackAlign)))
// Do the accounting that cgocall will not have a chance to do
// during an unwind.
//
// In the case where a Go call originates from C, ncgo is 0
// and there is no matching cgocall to end.
if mp.ncgo > 0 {
mp.incgo = false
mp.ncgo--
osPreemptExtExit(mp)
}
// Undo the call to lockOSThread in cgocallbackg, only on the
// panicking path. In normal return case cgocallbackg will call
// unlockOSThread, ensuring no preemption point after the unlock.
// Here we don't need to worry about preemption, because we're
// panicking out of the callback and unwinding the g0 stack,
// instead of reentering cgo (which requires the same thread).
unlockOSThread()
releasem(mp)
}
}
// called from assembly.
func badcgocallback() {
throw("misaligned stack in cgocallback")
}
// called from (incomplete) assembly.
func cgounimpl() {
throw("cgo not implemented")
}
var racecgosync uint64 // represents possible synchronization in C code
// Pointer checking for cgo code.
// We want to detect all cases where a program that does not use
// unsafe makes a cgo call passing a Go pointer to memory that
// contains an unpinned Go pointer. Here a Go pointer is defined as a
// pointer to memory allocated by the Go runtime. Programs that use
// unsafe can evade this restriction easily, so we don't try to catch
// them. The cgo program will rewrite all possibly bad pointer
// arguments to call cgoCheckPointer, where we can catch cases of a Go
// pointer pointing to an unpinned Go pointer.
// Complicating matters, taking the address of a slice or array
// element permits the C program to access all elements of the slice
// or array. In that case we will see a pointer to a single element,
// but we need to check the entire data structure.
// The cgoCheckPointer call takes additional arguments indicating that
// it was called on an address expression. An additional argument of
// true means that it only needs to check a single element. An
// additional argument of a slice or array means that it needs to
// check the entire slice/array, but nothing else. Otherwise, the
// pointer could be anything, and we check the entire heap object,
// which is conservative but safe.
// When and if we implement a moving garbage collector,
// cgoCheckPointer will pin the pointer for the duration of the cgo
// call. (This is necessary but not sufficient; the cgo program will
// also have to change to pin Go pointers that cannot point to Go
// pointers.)
// cgoCheckPointer checks if the argument contains a Go pointer that
// points to an unpinned Go pointer, and panics if it does.
func cgoCheckPointer(ptr any, arg any) {
if !goexperiment.CgoCheck2 && debug.cgocheck == 0 {
return
}
ep := efaceOf(&ptr)
t := ep._type
top := true
if arg != nil && (t.Kind() == abi.Pointer || t.Kind() == abi.UnsafePointer) {
p := ep.data
if !t.IsDirectIface() {
p = *(*unsafe.Pointer)(p)
}
if p == nil || !cgoIsGoPointer(p) {
return
}
aep := efaceOf(&arg)
switch aep._type.Kind() {
case abi.Bool:
if t.Kind() == abi.UnsafePointer {
// We don't know the type of the element.
break
}
pt := (*ptrtype)(unsafe.Pointer(t))
cgoCheckArg(pt.Elem, p, true, false, cgoCheckPointerFail)
return
case abi.Slice:
// Check the slice rather than the pointer.
ep = aep
t = ep._type
case abi.Array:
// Check the array rather than the pointer.
// Pass top as false since we have a pointer
// to the array.
ep = aep
t = ep._type
top = false
case abi.Pointer:
// The Go code is indexing into a pointer to an array,
// and we have been passed the pointer-to-array.
// Check the array rather than the pointer.
pt := (*abi.PtrType)(unsafe.Pointer(aep._type))
t = pt.Elem
if t.Kind() != abi.Array {
throw("can't happen")
}
ep = aep
top = false
default:
throw("can't happen")
}
}
cgoCheckArg(t, ep.data, !t.IsDirectIface(), top, cgoCheckPointerFail)
}
type cgoErrorMsg int
const (
cgoCheckPointerFail cgoErrorMsg = iota
cgoResultFail
)
// cgoCheckArg is the real work of cgoCheckPointer and cgoCheckResult.
// The argument p is either a pointer to the value (of type t), or the value
// itself, depending on indir. The top parameter is whether we are at the top
// level, where Go pointers are allowed. Go pointers to pinned objects are
// allowed as long as they don't reference other unpinned pointers.
func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg cgoErrorMsg) {
if !t.Pointers() || p == nil {
// If the type has no pointers there is nothing to do.
return
}
switch t.Kind() {
default:
throw("can't happen")
case abi.Array:
at := (*arraytype)(unsafe.Pointer(t))
if !indir {
if at.Len != 1 {
throw("can't happen")
}
cgoCheckArg(at.Elem, p, !at.Elem.IsDirectIface(), top, msg)
return
}
for i := uintptr(0); i < at.Len; i++ {
cgoCheckArg(at.Elem, p, true, top, msg)
p = add(p, at.Elem.Size_)
}
case abi.Chan, abi.Map:
// These types contain internal pointers that will
// always be allocated in the Go heap. It's never OK
// to pass them to C.
panic(cgoFormatErr(msg, t.Kind()))
case abi.Func:
if indir {
p = *(*unsafe.Pointer)(p)
}
if !cgoIsGoPointer(p) {
return
}
panic(cgoFormatErr(msg, t.Kind()))
case abi.Interface:
it := *(**_type)(p)
if it == nil {
return
}
// A type known at compile time is OK since it's
// constant. A type not known at compile time will be
// in the heap and will not be OK.
if inheap(uintptr(unsafe.Pointer(it))) {
panic(cgoFormatErr(msg, t.Kind()))
}
p = *(*unsafe.Pointer)(add(p, goarch.PtrSize))
if !cgoIsGoPointer(p) {
return
}
if !top && !isPinned(p) {
panic(cgoFormatErr(msg, t.Kind()))
}
cgoCheckArg(it, p, !it.IsDirectIface(), false, msg)
case abi.Slice:
st := (*slicetype)(unsafe.Pointer(t))
s := (*slice)(p)
p = s.array
if p == nil || !cgoIsGoPointer(p) {
return
}
if !top && !isPinned(p) {
panic(cgoFormatErr(msg, t.Kind()))
}
if !st.Elem.Pointers() {
return
}
for i := 0; i < s.cap; i++ {
cgoCheckArg(st.Elem, p, true, false, msg)
p = add(p, st.Elem.Size_)
}
case abi.String:
ss := (*stringStruct)(p)
if !cgoIsGoPointer(ss.str) {
return
}
if !top && !isPinned(ss.str) {
panic(cgoFormatErr(msg, t.Kind()))
}
case abi.Struct:
st := (*structtype)(unsafe.Pointer(t))
if !indir {
if len(st.Fields) != 1 {
throw("can't happen")
}
cgoCheckArg(st.Fields[0].Typ, p, !st.Fields[0].Typ.IsDirectIface(), top, msg)
return
}
for _, f := range st.Fields {
if !f.Typ.Pointers() {
continue
}
cgoCheckArg(f.Typ, add(p, f.Offset), true, top, msg)
}
case abi.Pointer, abi.UnsafePointer:
if indir {
p = *(*unsafe.Pointer)(p)
if p == nil {
return
}
}
if !cgoIsGoPointer(p) {
return
}
if !top && !isPinned(p) {
panic(cgoFormatErr(msg, t.Kind()))
}
cgoCheckUnknownPointer(p, msg)
}
}
// cgoCheckUnknownPointer is called for an arbitrary pointer into Go
// memory. It checks whether that Go memory contains any other
// pointer into unpinned Go memory. If it does, we panic.
// The return values are unused but useful to see in panic tracebacks.
func cgoCheckUnknownPointer(p unsafe.Pointer, msg cgoErrorMsg) (base, i uintptr) {
if inheap(uintptr(p)) {
b, span, _ := findObject(uintptr(p), 0, 0)
base = b
if base == 0 {
return
}
tp := span.typePointersOfUnchecked(base)
for {
var addr uintptr
if tp, addr = tp.next(base + span.elemsize); addr == 0 {
break
}
pp := *(*unsafe.Pointer)(unsafe.Pointer(addr))
if cgoIsGoPointer(pp) && !isPinned(pp) {
panic(cgoFormatErr(msg, abi.Pointer))
}
}
return
}
for _, datap := range activeModules() {
if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
// We have no way to know the size of the object.
// We have to assume that it might contain a pointer.
panic(cgoFormatErr(msg, abi.Pointer))
}
// In the text or noptr sections, we know that the
// pointer does not point to a Go pointer.
}
return
}
// cgoIsGoPointer reports whether the pointer is a Go pointer--a
// pointer to Go memory. We only care about Go memory that might
// contain pointers.
//
//go:nosplit
//go:nowritebarrierrec
func cgoIsGoPointer(p unsafe.Pointer) bool {
if p == nil {
return false
}
if inHeapOrStack(uintptr(p)) {
return true
}
for _, datap := range activeModules() {
if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
return true
}
}
return false
}
// cgoInRange reports whether p is between start and end.
//
//go:nosplit
//go:nowritebarrierrec
func cgoInRange(p unsafe.Pointer, start, end uintptr) bool {
return start <= uintptr(p) && uintptr(p) < end
}
// cgoCheckResult is called to check the result parameter of an
// exported Go function. It panics if the result is or contains any
// other pointer into unpinned Go memory.
func cgoCheckResult(val any) {
if !goexperiment.CgoCheck2 && debug.cgocheck == 0 {
return
}
ep := efaceOf(&val)
t := ep._type
if t == nil {
return
}
cgoCheckArg(t, ep.data, !t.IsDirectIface(), false, cgoResultFail)
}
// cgoFormatErr is called by cgoCheckArg and cgoCheckUnknownPointer
// to format panic error messages.
func cgoFormatErr(error cgoErrorMsg, kind abi.Kind) errorString {
var msg, kindname string
var cgoFunction string = "unknown"
var offset int
var buf [20]byte
// We expect one of these abi.Kind from cgoCheckArg
switch kind {
case abi.Chan:
kindname = "channel"
case abi.Func:
kindname = "function"
case abi.Interface:
kindname = "interface"
case abi.Map:
kindname = "map"
case abi.Pointer:
kindname = "pointer"
case abi.Slice:
kindname = "slice"
case abi.String:
kindname = "string"
case abi.Struct:
kindname = "struct"
case abi.UnsafePointer:
kindname = "unsafe pointer"
default:
kindname = "pointer"
}
// The cgo function name might need an offset to be obtained
if error == cgoResultFail {
offset = 21
}
// Relatively to cgoFormatErr, this is the stack frame:
// 0. cgoFormatErr
// 1. cgoCheckArg or cgoCheckUnknownPointer
// 2. cgoCheckPointer or cgoCheckResult
// 3. cgo function
pc, path, line, ok := Caller(3)
if ok && error == cgoResultFail {
function := FuncForPC(pc)
if function != nil {
// Expected format of cgo function name:
// - caller: _cgoexp_3c910ddb72c4_foo
if offset > len(function.Name()) {
cgoFunction = function.Name()
} else {
cgoFunction = function.Name()[offset:]
}
}
}
switch error {
case cgoResultFail:
msg = path + ":" + string(itoa(buf[:], uint64(line)))
msg += ": result of Go function " + cgoFunction + " called from cgo"
msg += " is unpinned Go " + kindname + " or points to unpinned Go " + kindname
case cgoCheckPointerFail:
msg += "argument of cgo function has Go pointer to unpinned Go " + kindname
}
return errorString(msg)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// These functions are called from C code via cgo/callbacks.go.
// Panic.
func _cgo_panic_internal(p *byte) {
panic(gostringnocopy(p))
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code to check that pointer writes follow the cgo rules.
// These functions are invoked when GOEXPERIMENT=cgocheck2 is enabled.
package runtime
import (
"internal/goarch"
"unsafe"
)
const cgoWriteBarrierFail = "unpinned Go pointer stored into non-Go memory"
// cgoCheckPtrWrite is called whenever a pointer is stored into memory.
// It throws if the program is storing an unpinned Go pointer into non-Go
// memory.
//
// This is called from generated code when GOEXPERIMENT=cgocheck2 is enabled.
//
//go:nosplit
//go:nowritebarrier
func cgoCheckPtrWrite(dst *unsafe.Pointer, src unsafe.Pointer) {
if !mainStarted {
// Something early in startup hates this function.
// Don't start doing any actual checking until the
// runtime has set itself up.
return
}
if !cgoIsGoPointer(src) {
return
}
if cgoIsGoPointer(unsafe.Pointer(dst)) {
return
}
// If we are running on the system stack then dst might be an
// address on the stack, which is OK.
gp := getg()
if gp == gp.m.g0 || gp == gp.m.gsignal {
return
}
// Allocating memory can write to various mfixalloc structs
// that look like they are non-Go memory.
if gp.m.mallocing != 0 {
return
}
// If the object is pinned, it's safe to store it in C memory. The GC
// ensures it will not be moved or freed.
if isPinned(src) {
return
}
// It's OK if writing to memory allocated by persistentalloc.
// Do this check last because it is more expensive and rarely true.
// If it is false the expense doesn't matter since we are crashing.
if inPersistentAlloc(uintptr(unsafe.Pointer(dst))) {
return
}
systemstack(func() {
println("write of unpinned Go pointer", hex(uintptr(src)), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst))))
throw(cgoWriteBarrierFail)
})
}
// cgoCheckMemmove is called when moving a block of memory.
// It throws if the program is copying a block that contains an unpinned Go
// pointer into non-Go memory.
//
// This is called from generated code when GOEXPERIMENT=cgocheck2 is enabled.
//
//go:nosplit
//go:nowritebarrier
func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer) {
cgoCheckMemmove2(typ, dst, src, 0, typ.Size_)
}
// cgoCheckMemmove2 is called when moving a block of memory.
// dst and src point off bytes into the value to copy.
// size is the number of bytes to copy.
// It throws if the program is copying a block that contains an unpinned Go
// pointer into non-Go memory.
//
//go:nosplit
//go:nowritebarrier
func cgoCheckMemmove2(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
if !typ.Pointers() {
return
}
if !cgoIsGoPointer(src) {
return
}
if cgoIsGoPointer(dst) {
return
}
cgoCheckTypedBlock(typ, src, off, size)
}
// cgoCheckSliceCopy is called when copying n elements of a slice.
// src and dst are pointers to the first element of the slice.
// typ is the element type of the slice.
// It throws if the program is copying slice elements that contain unpinned Go
// pointers into non-Go memory.
//
//go:nosplit
//go:nowritebarrier
func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) {
if !typ.Pointers() {
return
}
if !cgoIsGoPointer(src) {
return
}
if cgoIsGoPointer(dst) {
return
}
p := src
for i := 0; i < n; i++ {
cgoCheckTypedBlock(typ, p, 0, typ.Size_)
p = add(p, typ.Size_)
}
}
// cgoCheckTypedBlock checks the block of memory at src, for up to size bytes,
// and throws if it finds an unpinned Go pointer. The type of the memory is typ,
// and src is off bytes into that type.
//
//go:nosplit
//go:nowritebarrier
func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
// Anything past typ.PtrBytes is not a pointer.
if typ.PtrBytes <= off {
return
}
if ptrdataSize := typ.PtrBytes - off; size > ptrdataSize {
size = ptrdataSize
}
cgoCheckBits(src, getGCMask(typ), off, size)
}
// cgoCheckBits checks the block of memory at src, for up to size
// bytes, and throws if it finds an unpinned Go pointer. The gcbits mark each
// pointer value. The src pointer is off bytes into the gcbits.
//
//go:nosplit
//go:nowritebarrier
func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
skipMask := off / goarch.PtrSize / 8
skipBytes := skipMask * goarch.PtrSize * 8
ptrmask := addb(gcbits, skipMask)
src = add(src, skipBytes)
off -= skipBytes
size += off
var bits uint32
for i := uintptr(0); i < size; i += goarch.PtrSize {
if i&(goarch.PtrSize*8-1) == 0 {
bits = uint32(*ptrmask)
ptrmask = addb(ptrmask, 1)
} else {
bits >>= 1
}
if off > 0 {
off -= goarch.PtrSize
} else {
if bits&1 != 0 {
v := *(*unsafe.Pointer)(add(src, i))
if cgoIsGoPointer(v) && !isPinned(v) {
throw(cgoWriteBarrierFail)
}
}
}
}
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/runtime/cgroup"
)
// cgroup-aware GOMAXPROCS default
//
// At startup (defaultGOMAXPROCSInit), we read /proc/self/cgroup and /proc/self/mountinfo
// to find our current CPU cgroup and open its limit file(s), which remain open
// for the entire process lifetime. We periodically read the current limit by
// rereading the limit file(s) from the beginning.
//
// This makes reading updated limits simple, but has a few downsides:
//
// 1. We only read the limit from the leaf cgroup that actually contains this
// process. But a parent cgroup may have a tighter limit. That tighter limit
// would be our effective limit. That said, container runtimes tend to hide
// parent cgroups from the container anyway.
//
// 2. If the process is migrated to another cgroup while it is running it will
// not notice, as we only check which cgroup we are in once at startup.
var (
// We can't allocate during early initialization when we need to find
// the cgroup. Simply use a fixed global as a scratch parsing buffer.
cgroupScratch [cgroup.ScratchSize]byte
cgroupOK bool
cgroupCPU cgroup.CPU
// defaultGOMAXPROCSInit runs before internal/godebug init, so we can't
// directly update the GODEBUG counter. Store the result until after
// init runs.
containermaxprocsNonDefault bool
containermaxprocs = &godebugInc{name: "containermaxprocs"}
)
// Prepare for defaultGOMAXPROCS.
//
// Must run after parsedebugvars.
func defaultGOMAXPROCSInit() {
c, err := cgroup.OpenCPU(cgroupScratch[:])
if err != nil {
// Likely cgroup.ErrNoCgroup.
return
}
if debug.containermaxprocs > 0 {
// Normal operation.
cgroupCPU = c
cgroupOK = true
return
}
// cgroup-aware GOMAXPROCS is disabled. We still check the cgroup once
// at startup to see if enabling the GODEBUG would result in a
// different default GOMAXPROCS. If so, we increment runtime/metrics
// /godebug/non-default-behavior/cgroupgomaxprocs:events.
procs := getCPUCount()
cgroupProcs := adjustCgroupGOMAXPROCS(procs, c)
if procs != cgroupProcs {
containermaxprocsNonDefault = true
}
// Don't need the cgroup for remaining execution.
c.Close()
}
// defaultGOMAXPROCSUpdateGODEBUG updates the internal/godebug counter for
// container GOMAXPROCS, once internal/godebug is initialized.
func defaultGOMAXPROCSUpdateGODEBUG() {
if containermaxprocsNonDefault {
containermaxprocs.IncNonDefault()
}
}
// Return the default value for GOMAXPROCS when it has not been set explicitly.
//
// ncpu is the optional precomputed value of getCPUCount. If passed as 0,
// defaultGOMAXPROCS will call getCPUCount.
func defaultGOMAXPROCS(ncpu int32) int32 {
// GOMAXPROCS is the minimum of:
//
// 1. Total number of logical CPUs available from sched_getaffinity.
//
// 2. The average CPU cgroup throughput limit (average throughput =
// quota/period). A limit less than 2 is rounded up to 2, and any
// fractional component is rounded up.
//
// TODO: add rationale.
procs := ncpu
if procs <= 0 {
procs = getCPUCount()
}
if !cgroupOK {
// No cgroup, or disabled by debug.containermaxprocs.
return procs
}
return adjustCgroupGOMAXPROCS(procs, cgroupCPU)
}
// Lower procs as necessary for the current cgroup CPU limit.
func adjustCgroupGOMAXPROCS(procs int32, cpu cgroup.CPU) int32 {
limit, ok, err := cgroup.ReadCPULimit(cpu)
if err == nil && ok {
limit = ceil(limit)
limit = max(limit, 2)
if int32(limit) < procs {
procs = int32(limit)
}
}
return procs
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// This file contains the implementation of Go channels.
// Invariants:
// At least one of c.sendq and c.recvq is empty,
// except for the case of an unbuffered channel with a single goroutine
// blocked on it for both sending and receiving using a select statement,
// in which case the length of c.sendq and c.recvq is limited only by the
// size of the select statement.
//
// For buffered channels, also:
// c.qcount > 0 implies that c.recvq is empty.
// c.qcount < c.dataqsiz implies that c.sendq is empty.
import (
"internal/abi"
"internal/runtime/atomic"
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
)
const (
maxAlign = 8
hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
debugChan = false
)
type hchan struct {
qcount uint // total data in the queue
dataqsiz uint // size of the circular queue
buf unsafe.Pointer // points to an array of dataqsiz elements
elemsize uint16
closed uint32
timer *timer // timer feeding this chan
elemtype *_type // element type
sendx uint // send index
recvx uint // receive index
recvq waitq // list of recv waiters
sendq waitq // list of send waiters
bubble *synctestBubble
// lock protects all fields in hchan, as well as several
// fields in sudogs blocked on this channel.
//
// Do not change another G's status while holding this lock
// (in particular, do not ready a G), as this can deadlock
// with stack shrinking.
lock mutex
}
type waitq struct {
first *sudog
last *sudog
}
//go:linkname reflect_makechan reflect.makechan
func reflect_makechan(t *chantype, size int) *hchan {
return makechan(t, size)
}
func makechan64(t *chantype, size int64) *hchan {
if int64(int(size)) != size {
panic(plainError("makechan: size out of range"))
}
return makechan(t, int(size))
}
func makechan(t *chantype, size int) *hchan {
elem := t.Elem
// compiler checks this but be safe.
if elem.Size_ >= 1<<16 {
throw("makechan: invalid channel element type")
}
if hchanSize%maxAlign != 0 || elem.Align_ > maxAlign {
throw("makechan: bad alignment")
}
mem, overflow := math.MulUintptr(elem.Size_, uintptr(size))
if overflow || mem > maxAlloc-hchanSize || size < 0 {
panic(plainError("makechan: size out of range"))
}
// Hchan does not contain pointers interesting for GC when elements stored in buf do not contain pointers.
// buf points into the same allocation, elemtype is persistent.
// SudoG's are referenced from their owning thread so they can't be collected.
// TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
var c *hchan
switch {
case mem == 0:
// Queue or element size is zero.
c = (*hchan)(mallocgc(hchanSize, nil, true))
// Race detector uses this location for synchronization.
c.buf = c.raceaddr()
case !elem.Pointers():
// Elements do not contain pointers.
// Allocate hchan and buf in one call.
c = (*hchan)(mallocgc(hchanSize+mem, nil, true))
c.buf = add(unsafe.Pointer(c), hchanSize)
default:
// Elements contain pointers.
c = new(hchan)
c.buf = mallocgc(mem, elem, true)
}
c.elemsize = uint16(elem.Size_)
c.elemtype = elem
c.dataqsiz = uint(size)
if b := getg().bubble; b != nil {
c.bubble = b
}
lockInit(&c.lock, lockRankHchan)
if debugChan {
print("makechan: chan=", c, "; elemsize=", elem.Size_, "; dataqsiz=", size, "\n")
}
return c
}
// chanbuf(c, i) is pointer to the i'th slot in the buffer.
//
// chanbuf should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/fjl/memsize
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname chanbuf
func chanbuf(c *hchan, i uint) unsafe.Pointer {
return add(c.buf, uintptr(i)*uintptr(c.elemsize))
}
// full reports whether a send on c would block (that is, the channel is full).
// It uses a single word-sized read of mutable state, so although
// the answer is instantaneously true, the correct answer may have changed
// by the time the calling function receives the return value.
func full(c *hchan) bool {
// c.dataqsiz is immutable (never written after the channel is created)
// so it is safe to read at any time during channel operation.
if c.dataqsiz == 0 {
// Assumes that a pointer read is relaxed-atomic.
return c.recvq.first == nil
}
// Assumes that a uint read is relaxed-atomic.
return c.qcount == c.dataqsiz
}
// entry point for c <- x from compiled code.
//
//go:nosplit
func chansend1(c *hchan, elem unsafe.Pointer) {
chansend(c, elem, true, sys.GetCallerPC())
}
/*
* generic single channel send/recv
* If block is not nil,
* then the protocol will not
* sleep but return if it could
* not complete.
*
* sleep can wake up with g.param == nil
* when a channel involved in the sleep has
* been closed. it is easiest to loop and re-run
* the operation; we'll see that it's now closed.
*/
func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
if c == nil {
if !block {
return false
}
gopark(nil, nil, waitReasonChanSendNilChan, traceBlockForever, 2)
throw("unreachable")
}
if debugChan {
print("chansend: chan=", c, "\n")
}
if raceenabled {
racereadpc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(chansend))
}
if c.bubble != nil && getg().bubble != c.bubble {
fatal("send on synctest channel from outside bubble")
}
// Fast path: check for failed non-blocking operation without acquiring the lock.
//
// After observing that the channel is not closed, we observe that the channel is
// not ready for sending. Each of these observations is a single word-sized read
// (first c.closed and second full()).
// Because a closed channel cannot transition from 'ready for sending' to
// 'not ready for sending', even if the channel is closed between the two observations,
// they imply a moment between the two when the channel was both not yet closed
// and not ready for sending. We behave as if we observed the channel at that moment,
// and report that the send cannot proceed.
//
// It is okay if the reads are reordered here: if we observe that the channel is not
// ready for sending and then observe that it is not closed, that implies that the
// channel wasn't closed during the first observation. However, nothing here
// guarantees forward progress. We rely on the side effects of lock release in
// chanrecv() and closechan() to update this thread's view of c.closed and full().
if !block && c.closed == 0 && full(c) {
return false
}
var t0 int64
if blockprofilerate > 0 {
t0 = cputicks()
}
lock(&c.lock)
if c.closed != 0 {
unlock(&c.lock)
panic(plainError("send on closed channel"))
}
if sg := c.recvq.dequeue(); sg != nil {
// Found a waiting receiver. We pass the value we want to send
// directly to the receiver, bypassing the channel buffer (if any).
send(c, sg, ep, func() { unlock(&c.lock) }, 3)
return true
}
if c.qcount < c.dataqsiz {
// Space is available in the channel buffer. Enqueue the element to send.
qp := chanbuf(c, c.sendx)
if raceenabled {
racenotify(c, c.sendx, nil)
}
typedmemmove(c.elemtype, qp, ep)
c.sendx++
if c.sendx == c.dataqsiz {
c.sendx = 0
}
c.qcount++
unlock(&c.lock)
return true
}
if !block {
unlock(&c.lock)
return false
}
// Block on the channel. Some receiver will complete our operation for us.
gp := getg()
mysg := acquireSudog()
mysg.releasetime = 0
if t0 != 0 {
mysg.releasetime = -1
}
// No stack splits between assigning elem and enqueuing mysg
// on gp.waiting where copystack can find it.
mysg.elem.set(ep)
mysg.waitlink = nil
mysg.g = gp
mysg.isSelect = false
mysg.c.set(c)
gp.waiting = mysg
gp.param = nil
c.sendq.enqueue(mysg)
// Signal to anyone trying to shrink our stack that we're about
// to park on a channel. The window between when this G's status
// changes and when we set gp.activeStackChans is not safe for
// stack shrinking.
gp.parkingOnChan.Store(true)
reason := waitReasonChanSend
if c.bubble != nil {
reason = waitReasonSynctestChanSend
}
gopark(chanparkcommit, unsafe.Pointer(&c.lock), reason, traceBlockChanSend, 2)
// Ensure the value being sent is kept alive until the
// receiver copies it out. The sudog has a pointer to the
// stack object, but sudogs aren't considered as roots of the
// stack tracer.
KeepAlive(ep)
// someone woke us up.
if mysg != gp.waiting {
throw("G waiting list is corrupted")
}
gp.waiting = nil
gp.activeStackChans = false
closed := !mysg.success
gp.param = nil
if mysg.releasetime > 0 {
blockevent(mysg.releasetime-t0, 2)
}
mysg.c.set(nil)
releaseSudog(mysg)
if closed {
if c.closed == 0 {
throw("chansend: spurious wakeup")
}
panic(plainError("send on closed channel"))
}
return true
}
// send processes a send operation on an empty channel c.
// The value ep sent by the sender is copied to the receiver sg.
// The receiver is then woken up to go on its merry way.
// Channel c must be empty and locked. send unlocks c with unlockf.
// sg must already be dequeued from c.
// ep must be non-nil and point to the heap or the caller's stack.
func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
if c.bubble != nil && getg().bubble != c.bubble {
unlockf()
fatal("send on synctest channel from outside bubble")
}
if raceenabled {
if c.dataqsiz == 0 {
racesync(c, sg)
} else {
// Pretend we go through the buffer, even though
// we copy directly. Note that we need to increment
// the head/tail locations only when raceenabled.
racenotify(c, c.recvx, nil)
racenotify(c, c.recvx, sg)
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
}
}
if sg.elem.get() != nil {
sendDirect(c.elemtype, sg, ep)
sg.elem.set(nil)
}
gp := sg.g
unlockf()
gp.param = unsafe.Pointer(sg)
sg.success = true
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
goready(gp, skip+1)
}
// timerchandrain removes all elements in channel c's buffer.
// It reports whether any elements were removed.
// Because it is only intended for timers, it does not
// handle waiting senders at all (all timer channels
// use non-blocking sends to fill the buffer).
func timerchandrain(c *hchan) bool {
// Note: Cannot use empty(c) because we are called
// while holding c.timer.sendLock, and empty(c) will
// call c.timer.maybeRunChan, which will deadlock.
// We are emptying the channel, so we only care about
// the count, not about potentially filling it up.
if atomic.Loaduint(&c.qcount) == 0 {
return false
}
lock(&c.lock)
any := false
for c.qcount > 0 {
any = true
typedmemclr(c.elemtype, chanbuf(c, c.recvx))
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.qcount--
}
unlock(&c.lock)
return any
}
// Sends and receives on unbuffered or empty-buffered channels are the
// only operations where one running goroutine writes to the stack of
// another running goroutine. The GC assumes that stack writes only
// happen when the goroutine is running and are only done by that
// goroutine. Using a write barrier is sufficient to make up for
// violating that assumption, but the write barrier has to work.
// typedmemmove will call bulkBarrierPreWrite, but the target bytes
// are not in the heap, so that will not help. We arrange to call
// memmove and typeBitsBulkBarrier instead.
func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
// src is on our stack, dst is a slot on another stack.
// Once we read sg.elem out of sg, it will no longer
// be updated if the destination's stack gets copied (shrunk).
// So make sure that no preemption points can happen between read & use.
dst := sg.elem.get()
typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_)
// No need for cgo write barrier checks because dst is always
// Go memory.
memmove(dst, src, t.Size_)
}
func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) {
// dst is on our stack or the heap, src is on another stack.
// The channel is locked, so src will not move during this
// operation.
src := sg.elem.get()
typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_)
memmove(dst, src, t.Size_)
}
func closechan(c *hchan) {
if c == nil {
panic(plainError("close of nil channel"))
}
if c.bubble != nil && getg().bubble != c.bubble {
fatal("close of synctest channel from outside bubble")
}
lock(&c.lock)
if c.closed != 0 {
unlock(&c.lock)
panic(plainError("close of closed channel"))
}
if raceenabled {
callerpc := sys.GetCallerPC()
racewritepc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(closechan))
racerelease(c.raceaddr())
}
c.closed = 1
var glist gList
// release all readers
for {
sg := c.recvq.dequeue()
if sg == nil {
break
}
if sg.elem.get() != nil {
typedmemclr(c.elemtype, sg.elem.get())
sg.elem.set(nil)
}
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
gp := sg.g
gp.param = unsafe.Pointer(sg)
sg.success = false
if raceenabled {
raceacquireg(gp, c.raceaddr())
}
glist.push(gp)
}
// release all writers (they will panic)
for {
sg := c.sendq.dequeue()
if sg == nil {
break
}
sg.elem.set(nil)
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
gp := sg.g
gp.param = unsafe.Pointer(sg)
sg.success = false
if raceenabled {
raceacquireg(gp, c.raceaddr())
}
glist.push(gp)
}
unlock(&c.lock)
// Ready all Gs now that we've dropped the channel lock.
for !glist.empty() {
gp := glist.pop()
gp.schedlink = 0
goready(gp, 3)
}
}
// empty reports whether a read from c would block (that is, the channel is
// empty). It is atomically correct and sequentially consistent at the moment
// it returns, but since the channel is unlocked, the channel may become
// non-empty immediately afterward.
func empty(c *hchan) bool {
// c.dataqsiz is immutable.
if c.dataqsiz == 0 {
return atomic.Loadp(unsafe.Pointer(&c.sendq.first)) == nil
}
// c.timer is also immutable (it is set after make(chan) but before any channel operations).
// All timer channels have dataqsiz > 0.
if c.timer != nil {
c.timer.maybeRunChan(c)
}
return atomic.Loaduint(&c.qcount) == 0
}
// entry points for <- c from compiled code.
//
//go:nosplit
func chanrecv1(c *hchan, elem unsafe.Pointer) {
chanrecv(c, elem, true)
}
//go:nosplit
func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool) {
_, received = chanrecv(c, elem, true)
return
}
// chanrecv receives on channel c and writes the received data to ep.
// ep may be nil, in which case received data is ignored.
// If block == false and no elements are available, returns (false, false).
// Otherwise, if c is closed, zeros *ep and returns (true, false).
// Otherwise, fills in *ep with an element and returns (true, true).
// A non-nil ep must point to the heap or the caller's stack.
func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
// raceenabled: don't need to check ep, as it is always on the stack
// or is new memory allocated by reflect.
if debugChan {
print("chanrecv: chan=", c, "\n")
}
if c == nil {
if !block {
return
}
gopark(nil, nil, waitReasonChanReceiveNilChan, traceBlockForever, 2)
throw("unreachable")
}
if c.bubble != nil && getg().bubble != c.bubble {
fatal("receive on synctest channel from outside bubble")
}
if c.timer != nil {
c.timer.maybeRunChan(c)
}
// Fast path: check for failed non-blocking operation without acquiring the lock.
if !block && empty(c) {
// After observing that the channel is not ready for receiving, we observe whether the
// channel is closed.
//
// Reordering of these checks could lead to incorrect behavior when racing with a close.
// For example, if the channel was open and not empty, was closed, and then drained,
// reordered reads could incorrectly indicate "open and empty". To prevent reordering,
// we use atomic loads for both checks, and rely on emptying and closing to happen in
// separate critical sections under the same lock. This assumption fails when closing
// an unbuffered channel with a blocked send, but that is an error condition anyway.
if atomic.Load(&c.closed) == 0 {
// Because a channel cannot be reopened, the later observation of the channel
// being not closed implies that it was also not closed at the moment of the
// first observation. We behave as if we observed the channel at that moment
// and report that the receive cannot proceed.
return
}
// The channel is irreversibly closed. Re-check whether the channel has any pending data
// to receive, which could have arrived between the empty and closed checks above.
// Sequential consistency is also required here, when racing with such a send.
if empty(c) {
// The channel is irreversibly closed and empty.
if raceenabled {
raceacquire(c.raceaddr())
}
if ep != nil {
typedmemclr(c.elemtype, ep)
}
return true, false
}
}
var t0 int64
if blockprofilerate > 0 {
t0 = cputicks()
}
lock(&c.lock)
if c.closed != 0 {
if c.qcount == 0 {
if raceenabled {
raceacquire(c.raceaddr())
}
unlock(&c.lock)
if ep != nil {
typedmemclr(c.elemtype, ep)
}
return true, false
}
// The channel has been closed, but the channel's buffer have data.
} else {
// Just found waiting sender with not closed.
if sg := c.sendq.dequeue(); sg != nil {
// Found a waiting sender. If buffer is size 0, receive value
// directly from sender. Otherwise, receive from head of queue
// and add sender's value to the tail of the queue (both map to
// the same buffer slot because the queue is full).
recv(c, sg, ep, func() { unlock(&c.lock) }, 3)
return true, true
}
}
if c.qcount > 0 {
// Receive directly from queue
qp := chanbuf(c, c.recvx)
if raceenabled {
racenotify(c, c.recvx, nil)
}
if ep != nil {
typedmemmove(c.elemtype, ep, qp)
}
typedmemclr(c.elemtype, qp)
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.qcount--
unlock(&c.lock)
return true, true
}
if !block {
unlock(&c.lock)
return false, false
}
// no sender available: block on this channel.
gp := getg()
mysg := acquireSudog()
mysg.releasetime = 0
if t0 != 0 {
mysg.releasetime = -1
}
// No stack splits between assigning elem and enqueuing mysg
// on gp.waiting where copystack can find it.
mysg.elem.set(ep)
mysg.waitlink = nil
gp.waiting = mysg
mysg.g = gp
mysg.isSelect = false
mysg.c.set(c)
gp.param = nil
c.recvq.enqueue(mysg)
if c.timer != nil {
blockTimerChan(c)
}
// Signal to anyone trying to shrink our stack that we're about
// to park on a channel. The window between when this G's status
// changes and when we set gp.activeStackChans is not safe for
// stack shrinking.
gp.parkingOnChan.Store(true)
reason := waitReasonChanReceive
if c.bubble != nil {
reason = waitReasonSynctestChanReceive
}
gopark(chanparkcommit, unsafe.Pointer(&c.lock), reason, traceBlockChanRecv, 2)
// someone woke us up
if mysg != gp.waiting {
throw("G waiting list is corrupted")
}
if c.timer != nil {
unblockTimerChan(c)
}
gp.waiting = nil
gp.activeStackChans = false
if mysg.releasetime > 0 {
blockevent(mysg.releasetime-t0, 2)
}
success := mysg.success
gp.param = nil
mysg.c.set(nil)
releaseSudog(mysg)
return true, success
}
// recv processes a receive operation on a full channel c.
// There are 2 parts:
// 1. The value sent by the sender sg is put into the channel
// and the sender is woken up to go on its merry way.
// 2. The value received by the receiver (the current G) is
// written to ep.
//
// For synchronous channels, both values are the same.
// For asynchronous channels, the receiver gets its data from
// the channel buffer and the sender's data is put in the
// channel buffer.
// Channel c must be full and locked. recv unlocks c with unlockf.
// sg must already be dequeued from c.
// A non-nil ep must point to the heap or the caller's stack.
func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
if c.bubble != nil && getg().bubble != c.bubble {
unlockf()
fatal("receive on synctest channel from outside bubble")
}
if c.dataqsiz == 0 {
if raceenabled {
racesync(c, sg)
}
if ep != nil {
// copy data from sender
recvDirect(c.elemtype, sg, ep)
}
} else {
// Queue is full. Take the item at the
// head of the queue. Make the sender enqueue
// its item at the tail of the queue. Since the
// queue is full, those are both the same slot.
qp := chanbuf(c, c.recvx)
if raceenabled {
racenotify(c, c.recvx, nil)
racenotify(c, c.recvx, sg)
}
// copy data from queue to receiver
if ep != nil {
typedmemmove(c.elemtype, ep, qp)
}
// copy data from sender to queue
typedmemmove(c.elemtype, qp, sg.elem.get())
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
}
sg.elem.set(nil)
gp := sg.g
unlockf()
gp.param = unsafe.Pointer(sg)
sg.success = true
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
goready(gp, skip+1)
}
func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
// There are unlocked sudogs that point into gp's stack. Stack
// copying must lock the channels of those sudogs.
// Set activeStackChans here instead of before we try parking
// because we could self-deadlock in stack growth on the
// channel lock.
gp.activeStackChans = true
// Mark that it's safe for stack shrinking to occur now,
// because any thread acquiring this G's stack for shrinking
// is guaranteed to observe activeStackChans after this store.
gp.parkingOnChan.Store(false)
// Make sure we unlock after setting activeStackChans and
// unsetting parkingOnChan. The moment we unlock chanLock
// we risk gp getting readied by a channel operation and
// so gp could continue running before everything before
// the unlock is visible (even to gp itself).
unlock((*mutex)(chanLock))
return true
}
// compiler implements
//
// select {
// case c <- v:
// ... foo
// default:
// ... bar
// }
//
// as
//
// if selectnbsend(c, v) {
// ... foo
// } else {
// ... bar
// }
func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool) {
return chansend(c, elem, false, sys.GetCallerPC())
}
// compiler implements
//
// select {
// case v, ok = <-c:
// ... foo
// default:
// ... bar
// }
//
// as
//
// if selected, ok = selectnbrecv(&v, c); selected {
// ... foo
// } else {
// ... bar
// }
func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected, received bool) {
return chanrecv(c, elem, false)
}
//go:linkname reflect_chansend reflect.chansend0
func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
return chansend(c, elem, !nb, sys.GetCallerPC())
}
//go:linkname reflect_chanrecv reflect.chanrecv
func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
return chanrecv(c, elem, !nb)
}
func chanlen(c *hchan) int {
if c == nil {
return 0
}
async := debug.asynctimerchan.Load() != 0
if c.timer != nil && async {
c.timer.maybeRunChan(c)
}
if c.timer != nil && !async {
// timer channels have a buffered implementation
// but present to users as unbuffered, so that we can
// undo sends without users noticing.
return 0
}
return int(c.qcount)
}
func chancap(c *hchan) int {
if c == nil {
return 0
}
if c.timer != nil {
async := debug.asynctimerchan.Load() != 0
if async {
return int(c.dataqsiz)
}
// timer channels have a buffered implementation
// but present to users as unbuffered, so that we can
// undo sends without users noticing.
return 0
}
return int(c.dataqsiz)
}
//go:linkname reflect_chanlen reflect.chanlen
func reflect_chanlen(c *hchan) int {
return chanlen(c)
}
//go:linkname reflectlite_chanlen internal/reflectlite.chanlen
func reflectlite_chanlen(c *hchan) int {
return chanlen(c)
}
//go:linkname reflect_chancap reflect.chancap
func reflect_chancap(c *hchan) int {
return chancap(c)
}
//go:linkname reflect_chanclose reflect.chanclose
func reflect_chanclose(c *hchan) {
closechan(c)
}
func (q *waitq) enqueue(sgp *sudog) {
sgp.next = nil
x := q.last
if x == nil {
sgp.prev = nil
q.first = sgp
q.last = sgp
return
}
sgp.prev = x
x.next = sgp
q.last = sgp
}
func (q *waitq) dequeue() *sudog {
for {
sgp := q.first
if sgp == nil {
return nil
}
y := sgp.next
if y == nil {
q.first = nil
q.last = nil
} else {
y.prev = nil
q.first = y
sgp.next = nil // mark as removed (see dequeueSudoG)
}
// if a goroutine was put on this queue because of a
// select, there is a small window between the goroutine
// being woken up by a different case and it grabbing the
// channel locks. Once it has the lock
// it removes itself from the queue, so we won't see it after that.
// We use a flag in the G struct to tell us when someone
// else has won the race to signal this goroutine but the goroutine
// hasn't removed itself from the queue yet.
if sgp.isSelect {
if !sgp.g.selectDone.CompareAndSwap(0, 1) {
// We lost the race to wake this goroutine.
continue
}
}
return sgp
}
}
func (c *hchan) raceaddr() unsafe.Pointer {
// Treat read-like and write-like operations on the channel to
// happen at this address. Avoid using the address of qcount
// or dataqsiz, because the len() and cap() builtins read
// those addresses, and we don't want them racing with
// operations like close().
return unsafe.Pointer(&c.buf)
}
func racesync(c *hchan, sg *sudog) {
racerelease(chanbuf(c, 0))
raceacquireg(sg.g, chanbuf(c, 0))
racereleaseg(sg.g, chanbuf(c, 0))
raceacquire(chanbuf(c, 0))
}
// Notify the race detector of a send or receive involving buffer entry idx
// and a channel c or its communicating partner sg.
// This function handles the special case of c.elemsize==0.
func racenotify(c *hchan, idx uint, sg *sudog) {
// We could have passed the unsafe.Pointer corresponding to entry idx
// instead of idx itself. However, in a future version of this function,
// we can use idx to better handle the case of elemsize==0.
// A future improvement to the detector is to call TSan with c and idx:
// this way, Go will continue to not allocating buffer entries for channels
// of elemsize==0, yet the race detector can be made to handle multiple
// sync objects underneath the hood (one sync object per idx)
qp := chanbuf(c, idx)
// When elemsize==0, we don't allocate a full buffer for the channel.
// Instead of individual buffer entries, the race detector uses the
// c.buf as the only buffer entry. This simplification prevents us from
// following the memory model's happens-before rules (rules that are
// implemented in racereleaseacquire). Instead, we accumulate happens-before
// information in the synchronization object associated with c.buf.
if c.elemsize == 0 {
if sg == nil {
raceacquire(qp)
racerelease(qp)
} else {
raceacquireg(sg.g, qp)
racereleaseg(sg.g, qp)
}
} else {
if sg == nil {
racereleaseacquire(qp)
} else {
racereleaseacquireg(sg.g, qp)
}
}
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr) {
// nil pointer is always suitably aligned (#47430).
if p == nil {
return
}
// Check that (*[n]elem)(p) is appropriately aligned.
// Note that we allow unaligned pointers if the types they point to contain
// no pointers themselves. See issue 37298.
// TODO(mdempsky): What about fieldAlign?
if elem.Pointers() && uintptr(p)&(uintptr(elem.Align_)-1) != 0 {
throw("checkptr: misaligned pointer conversion")
}
// Check that (*[n]elem)(p) doesn't straddle multiple heap objects.
// TODO(mdempsky): Fix #46938 so we don't need to worry about overflow here.
if checkptrStraddles(p, n*elem.Size_) {
throw("checkptr: converted pointer straddles multiple allocations")
}
}
// checkptrStraddles reports whether the first size-bytes of memory
// addressed by ptr is known to straddle more than one Go allocation.
func checkptrStraddles(ptr unsafe.Pointer, size uintptr) bool {
if size <= 1 {
return false
}
// Check that add(ptr, size-1) won't overflow. This avoids the risk
// of producing an illegal pointer value (assuming ptr is legal).
if uintptr(ptr) >= -(size - 1) {
return true
}
end := add(ptr, size-1)
// TODO(mdempsky): Detect when [ptr, end] contains Go allocations,
// but neither ptr nor end point into one themselves.
return checkptrBase(ptr) != checkptrBase(end)
}
func checkptrArithmetic(p unsafe.Pointer, originals []unsafe.Pointer) {
if 0 < uintptr(p) && uintptr(p) < minLegalPointer {
throw("checkptr: pointer arithmetic computed bad pointer value")
}
// Check that if the computed pointer p points into a heap
// object, then one of the original pointers must have pointed
// into the same object.
base := checkptrBase(p)
if base == 0 {
return
}
for _, original := range originals {
if base == checkptrBase(original) {
return
}
}
throw("checkptr: pointer arithmetic result points to invalid allocation")
}
// checkptrBase returns the base address for the allocation containing
// the address p.
//
// Importantly, if p1 and p2 point into the same variable, then
// checkptrBase(p1) == checkptrBase(p2). However, the converse/inverse
// is not necessarily true as allocations can have trailing padding,
// and multiple variables may be packed into a single allocation.
//
// checkptrBase should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname checkptrBase
func checkptrBase(p unsafe.Pointer) uintptr {
// stack
if gp := getg(); gp.stack.lo <= uintptr(p) && uintptr(p) < gp.stack.hi {
// TODO(mdempsky): Walk the stack to identify the
// specific stack frame or even stack object that p
// points into.
//
// In the mean time, use "1" as a pseudo-address to
// represent the stack. This is an invalid address on
// all platforms, so it's guaranteed to be distinct
// from any of the addresses we might return below.
return 1
}
// heap (must check after stack because of #35068)
if base, _, _ := findObject(uintptr(p), 0, 0); base != 0 {
return base
}
// data or bss
for _, datap := range activeModules() {
if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
return datap.data
}
if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
return datap.bss
}
}
return 0
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// inf2one returns a signed 1 if f is an infinity and a signed 0 otherwise.
// The sign of the result is the sign of f.
func inf2one(f float64) float64 {
g := 0.0
if isInf(f) {
g = 1.0
}
return copysign(g, f)
}
func complex128div(n complex128, m complex128) complex128 {
var e, f float64 // complex(e, f) = n/m
// Algorithm for robust complex division as described in
// Robert L. Smith: Algorithm 116: Complex division. Commun. ACM 5(8): 435 (1962).
if abs(real(m)) >= abs(imag(m)) {
ratio := imag(m) / real(m)
denom := real(m) + ratio*imag(m)
e = (real(n) + imag(n)*ratio) / denom
f = (imag(n) - real(n)*ratio) / denom
} else {
ratio := real(m) / imag(m)
denom := imag(m) + ratio*real(m)
e = (real(n)*ratio + imag(n)) / denom
f = (imag(n)*ratio - real(n)) / denom
}
if isNaN(e) && isNaN(f) {
// Correct final result to infinities and zeros if applicable.
// Matches C99: ISO/IEC 9899:1999 - G.5.1 Multiplicative operators.
a, b := real(n), imag(n)
c, d := real(m), imag(m)
switch {
case m == 0 && (!isNaN(a) || !isNaN(b)):
e = copysign(inf, c) * a
f = copysign(inf, c) * b
case (isInf(a) || isInf(b)) && isFinite(c) && isFinite(d):
a = inf2one(a)
b = inf2one(b)
e = inf * (a*c + b*d)
f = inf * (b*c - a*d)
case (isInf(c) || isInf(d)) && isFinite(a) && isFinite(b):
c = inf2one(c)
d = inf2one(d)
e = 0 * (a*c + b*d)
f = 0 * (b*c - a*d)
}
}
return complex(e, f)
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/runtime/sys"
"unsafe"
)
// A coro represents extra concurrency without extra parallelism,
// as would be needed for a coroutine implementation.
// The coro does not represent a specific coroutine, only the ability
// to do coroutine-style control transfers.
// It can be thought of as like a special channel that always has
// a goroutine blocked on it. If another goroutine calls coroswitch(c),
// the caller becomes the goroutine blocked in c, and the goroutine
// formerly blocked in c starts running.
// These switches continue until a call to coroexit(c),
// which ends the use of the coro by releasing the blocked
// goroutine in c and exiting the current goroutine.
//
// Coros are heap allocated and garbage collected, so that user code
// can hold a pointer to a coro without causing potential dangling
// pointer errors.
type coro struct {
gp guintptr
f func(*coro)
// State for validating thread-lock interactions.
mp *m
lockedExt uint32 // mp's external LockOSThread counter at coro creation time.
lockedInt uint32 // mp's internal lockOSThread counter at coro creation time.
}
//go:linkname newcoro
// newcoro creates a new coro containing a
// goroutine blocked waiting to run f
// and returns that coro.
func newcoro(f func(*coro)) *coro {
c := new(coro)
c.f = f
pc := sys.GetCallerPC()
gp := getg()
systemstack(func() {
mp := gp.m
start := corostart
startfv := *(**funcval)(unsafe.Pointer(&start))
gp = newproc1(startfv, gp, pc, true, waitReasonCoroutine)
// Scribble down locked thread state if needed and/or donate
// thread-lock state to the new goroutine.
if mp.lockedExt+mp.lockedInt != 0 {
c.mp = mp
c.lockedExt = mp.lockedExt
c.lockedInt = mp.lockedInt
}
})
gp.coroarg = c
c.gp.set(gp)
return c
}
// corostart is the entry func for a new coroutine.
// It runs the coroutine user function f passed to corostart
// and then calls coroexit to remove the extra concurrency.
func corostart() {
gp := getg()
c := gp.coroarg
gp.coroarg = nil
defer coroexit(c)
c.f(c)
}
// coroexit is like coroswitch but closes the coro
// and exits the current goroutine
func coroexit(c *coro) {
gp := getg()
gp.coroarg = c
gp.coroexit = true
mcall(coroswitch_m)
}
//go:linkname coroswitch
// coroswitch switches to the goroutine blocked on c
// and then blocks the current goroutine on c.
func coroswitch(c *coro) {
gp := getg()
gp.coroarg = c
mcall(coroswitch_m)
}
// coroswitch_m is the implementation of coroswitch
// that runs on the m stack.
//
// Note: Coroutine switches are expected to happen at
// an order of magnitude (or more) higher frequency
// than regular goroutine switches, so this path is heavily
// optimized to remove unnecessary work.
// The fast path here is three CAS: the one at the top on gp.atomicstatus,
// the one in the middle to choose the next g,
// and the one at the bottom on gnext.atomicstatus.
// It is important not to add more atomic operations or other
// expensive operations to the fast path.
func coroswitch_m(gp *g) {
c := gp.coroarg
gp.coroarg = nil
exit := gp.coroexit
gp.coroexit = false
mp := gp.m
// Track and validate thread-lock interactions.
//
// The rules with thread-lock interactions are simple. When a coro goroutine is switched to,
// the same thread must be used, and the locked state must match with the thread-lock state of
// the goroutine which called newcoro. Thread-lock state consists of the thread and the number
// of internal (cgo callback, etc.) and external (LockOSThread) thread locks.
locked := gp.lockedm != 0
if c.mp != nil || locked {
if mp != c.mp || mp.lockedInt != c.lockedInt || mp.lockedExt != c.lockedExt {
print("coro: got thread ", unsafe.Pointer(mp), ", want ", unsafe.Pointer(c.mp), "\n")
print("coro: got lock internal ", mp.lockedInt, ", want ", c.lockedInt, "\n")
print("coro: got lock external ", mp.lockedExt, ", want ", c.lockedExt, "\n")
throw("coro: OS thread locking must match locking at coroutine creation")
}
}
// Acquire tracer for writing for the duration of this call.
//
// There's a lot of state manipulation performed with shortcuts
// but we need to make sure the tracer can only observe the
// start and end states to maintain a coherent model and avoid
// emitting an event for every single transition.
trace := traceAcquire()
canCAS := true
bubble := gp.bubble
if bubble != nil {
// If we're in a synctest group, always use casgstatus (which tracks
// group idleness) rather than directly CASing. Mark the group as active
// while we're in the process of transferring control.
canCAS = false
bubble.incActive()
}
if locked {
// Detach the goroutine from the thread; we'll attach to the goroutine we're
// switching to before returning.
gp.lockedm.set(nil)
}
if exit {
// The M might have a non-zero OS thread lock count when we get here, gdestroy
// will avoid destroying the M if the G isn't explicitly locked to it via lockedm,
// which we cleared above. It's fine to gdestroy here also, even when locked to
// the thread, because we'll be switching back to another goroutine anyway, which
// will take back its thread-lock state before returning.
gdestroy(gp)
gp = nil
} else {
// If we can CAS ourselves directly from running to waiting, so do,
// keeping the control transfer as lightweight as possible.
gp.waitreason = waitReasonCoroutine
if !canCAS || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gwaiting) {
// The CAS failed: use casgstatus, which will take care of
// coordinating with the garbage collector about the state change.
casgstatus(gp, _Grunning, _Gwaiting)
}
// Clear gp.m.
setMNoWB(&gp.m, nil)
}
// The goroutine stored in c is the one to run next.
// Swap it with ourselves.
var gnext *g
for {
// Note: this is a racy load, but it will eventually
// get the right value, and if it gets the wrong value,
// the c.gp.cas will fail, so no harm done other than
// a wasted loop iteration.
// The cas will also sync c.gp's
// memory enough that the next iteration of the racy load
// should see the correct value.
// We are avoiding the atomic load to keep this path
// as lightweight as absolutely possible.
// (The atomic load is free on x86 but not free elsewhere.)
next := c.gp
if next.ptr() == nil {
throw("coroswitch on exited coro")
}
var self guintptr
self.set(gp)
if c.gp.cas(next, self) {
gnext = next.ptr()
break
}
}
// Check if we're switching to ourselves. This case is able to break our
// thread-lock invariants and an unbuffered channel implementation of
// coroswitch would deadlock. It's clear that this case should just not
// work.
if gnext == gp {
throw("coroswitch of a goroutine to itself")
}
// Emit the trace event after getting gnext but before changing curg.
// GoSwitch expects that the current G is running and that we haven't
// switched yet for correct status emission.
if trace.ok() {
trace.GoSwitch(gnext, exit)
}
// Start running next, without heavy scheduling machinery.
// Set mp.curg and gnext.m and then update scheduling state
// directly if possible.
setGNoWB(&mp.curg, gnext)
setMNoWB(&gnext.m, mp)
// Synchronize with any out-standing goroutine profile. We're about to start
// executing, and an invariant of the profiler is that we tryRecordGoroutineProfile
// whenever a goroutine is about to start running.
//
// N.B. We must do this before transitioning to _Grunning but after installing gnext
// in curg, so that we have a valid curg for allocation (tryRecordGoroutineProfile
// may allocate).
if goroutineProfile.active {
tryRecordGoroutineProfile(gnext, nil, osyield)
}
if !canCAS || !gnext.atomicstatus.CompareAndSwap(_Gwaiting, _Grunning) {
// The CAS failed: use casgstatus, which will take care of
// coordinating with the garbage collector about the state change.
casgstatus(gnext, _Gwaiting, _Grunnable)
casgstatus(gnext, _Grunnable, _Grunning)
}
// Donate locked state.
if locked {
mp.lockedg.set(gnext)
gnext.lockedm.set(mp)
}
// Release the trace locker. We've completed all the necessary transitions..
if trace.ok() {
traceRelease(trace)
}
if bubble != nil {
bubble.decActive()
}
// Switch to gnext. Does not return.
gogo(&gnext.sched)
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package coverage contains APIs for writing coverage profile data at runtime
// from long-running and/or server programs that do not terminate via [os.Exit].
package coverage
import (
"internal/coverage/cfile"
"io"
)
// initHook is invoked from main.init in programs built with -cover.
// The call is emitted by the compiler.
func initHook(istest bool) {
cfile.InitHook(istest)
}
// WriteMetaDir writes a coverage meta-data file for the currently
// running program to the directory specified in 'dir'. An error will
// be returned if the operation can't be completed successfully (for
// example, if the currently running program was not built with
// "-cover", or if the directory does not exist).
func WriteMetaDir(dir string) error {
return cfile.WriteMetaDir(dir)
}
// WriteMeta writes the meta-data content (the payload that would
// normally be emitted to a meta-data file) for the currently running
// program to the writer 'w'. An error will be returned if the
// operation can't be completed successfully (for example, if the
// currently running program was not built with "-cover", or if a
// write fails).
func WriteMeta(w io.Writer) error {
return cfile.WriteMeta(w)
}
// WriteCountersDir writes a coverage counter-data file for the
// currently running program to the directory specified in 'dir'. An
// error will be returned if the operation can't be completed
// successfully (for example, if the currently running program was not
// built with "-cover", or if the directory does not exist). The
// counter data written will be a snapshot taken at the point of the
// call.
func WriteCountersDir(dir string) error {
return cfile.WriteCountersDir(dir)
}
// WriteCounters writes coverage counter-data content for the
// currently running program to the writer 'w'. An error will be
// returned if the operation can't be completed successfully (for
// example, if the currently running program was not built with
// "-cover", or if a write fails). The counter data written will be a
// snapshot taken at the point of the invocation.
func WriteCounters(w io.Writer) error {
return cfile.WriteCounters(w)
}
// ClearCounters clears/resets all coverage counter variables in the
// currently running program. It returns an error if the program in
// question was not built with the "-cover" flag. Clearing of coverage
// counters is also not supported for programs not using atomic
// counter mode (see more detailed comments below for the rationale
// here).
func ClearCounters() error {
return cfile.ClearCounters()
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/coverage/rtcov"
"unsafe"
)
//go:linkname coverage_getCovCounterList internal/coverage/cfile.getCovCounterList
func coverage_getCovCounterList() []rtcov.CovCounterBlob {
res := []rtcov.CovCounterBlob{}
u32sz := unsafe.Sizeof(uint32(0))
for datap := &firstmoduledata; datap != nil; datap = datap.next {
if datap.covctrs == datap.ecovctrs {
continue
}
res = append(res, rtcov.CovCounterBlob{
Counters: (*uint32)(unsafe.Pointer(datap.covctrs)),
Len: uint64((datap.ecovctrs - datap.covctrs) / u32sz),
})
}
return res
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/coverage/rtcov"
"unsafe"
)
// The compiler emits calls to runtime.addCovMeta
// but this code has moved to rtcov.AddMeta.
func addCovMeta(p unsafe.Pointer, dlen uint32, hash [16]byte, pkgpath string, pkgid int, cmode uint8, cgran uint8) uint32 {
id := rtcov.AddMeta(p, dlen, hash, pkgpath, pkgid, cmode, cgran)
if id == 0 {
throw("runtime.addCovMeta: coverage package map collision")
}
return id
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/cpu"
)
var memmoveBits uint8
const (
// avxSupported indicates that the CPU supports AVX instructions.
avxSupported = 1 << 0
// repmovsPreferred indicates that REP MOVSx instruction is more
// efficient on the CPU.
repmovsPreferred = 1 << 1
)
func init() {
// Here we assume that on modern CPUs with both FSRM and ERMS features,
// copying data blocks of 2KB or larger using the REP MOVSB instruction
// will be more efficient to avoid having to keep up with CPU generations.
// Therefore, we may retain a BlockList mechanism to ensure that microarchitectures
// that do not fit this case may appear in the future.
// We enable it on Intel CPUs first, and we may support more platforms
// in the future.
isERMSNiceCPU := isIntel
useREPMOV := isERMSNiceCPU && cpu.X86.HasERMS && cpu.X86.HasFSRM
if cpu.X86.HasAVX {
memmoveBits |= avxSupported
}
if useREPMOV {
memmoveBits |= repmovsPreferred
}
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// CPU profiling.
//
// The signal handler for the profiling clock tick adds a new stack trace
// to a log of recent traces. The log is read by a user goroutine that
// turns it into formatted profile data. If the reader does not keep up
// with the log, those writes will be recorded as a count of lost records.
// The actual profile buffer is in profbuf.go.
package runtime
import (
"internal/abi"
"internal/runtime/sys"
"unsafe"
)
const (
maxCPUProfStack = 64
// profBufWordCount is the size of the CPU profile buffer's storage for the
// header and stack of each sample, measured in 64-bit words. Every sample
// has a required header of two words. With a small additional header (a
// word or two) and stacks at the profiler's maximum length of 64 frames,
// that capacity can support 1900 samples or 19 thread-seconds at a 100 Hz
// sample rate, at a cost of 1 MiB.
profBufWordCount = 1 << 17
// profBufTagCount is the size of the CPU profile buffer's storage for the
// goroutine tags associated with each sample. A capacity of 1<<14 means
// room for 16k samples, or 160 thread-seconds at a 100 Hz sample rate.
profBufTagCount = 1 << 14
)
type cpuProfile struct {
lock mutex
on bool // profiling is on
log *profBuf // profile events written here
// extra holds extra stacks accumulated in addNonGo
// corresponding to profiling signals arriving on
// non-Go-created threads. Those stacks are written
// to log the next time a normal Go thread gets the
// signal handler.
// Assuming the stacks are 2 words each (we don't get
// a full traceback from those threads), plus one word
// size for framing, 100 Hz profiling would generate
// 300 words per second.
// Hopefully a normal Go thread will get the profiling
// signal at least once every few seconds.
extra [1000]uintptr
numExtra int
lostExtra uint64 // count of frames lost because extra is full
lostAtomic uint64 // count of frames lost because of being in atomic64 on mips/arm; updated racily
}
var cpuprof cpuProfile
// SetCPUProfileRate sets the CPU profiling rate to hz samples per second.
// If hz <= 0, SetCPUProfileRate turns off profiling.
// If the profiler is on, the rate cannot be changed without first turning it off.
//
// Most clients should use the [runtime/pprof] package or
// the [testing] package's -test.cpuprofile flag instead of calling
// SetCPUProfileRate directly.
func SetCPUProfileRate(hz int) {
// Clamp hz to something reasonable.
if hz < 0 {
hz = 0
}
if hz > 1000000 {
hz = 1000000
}
lock(&cpuprof.lock)
if hz > 0 {
if cpuprof.on || cpuprof.log != nil {
print("runtime: cannot set cpu profile rate until previous profile has finished.\n")
unlock(&cpuprof.lock)
return
}
cpuprof.on = true
cpuprof.log = newProfBuf(1, profBufWordCount, profBufTagCount)
hdr := [1]uint64{uint64(hz)}
cpuprof.log.write(nil, nanotime(), hdr[:], nil)
setcpuprofilerate(int32(hz))
} else if cpuprof.on {
setcpuprofilerate(0)
cpuprof.on = false
cpuprof.addExtra()
cpuprof.log.close()
}
unlock(&cpuprof.lock)
}
// add adds the stack trace to the profile.
// It is called from signal handlers and other limited environments
// and cannot allocate memory or acquire locks that might be
// held at the time of the signal, nor can it use substantial amounts
// of stack.
//
//go:nowritebarrierrec
func (p *cpuProfile) add(tagPtr *unsafe.Pointer, stk []uintptr) {
// Simple cas-lock to coordinate with setcpuprofilerate.
for !prof.signalLock.CompareAndSwap(0, 1) {
// TODO: Is it safe to osyield here? https://go.dev/issue/52672
osyield()
}
if prof.hz.Load() != 0 { // implies cpuprof.log != nil
if p.numExtra > 0 || p.lostExtra > 0 || p.lostAtomic > 0 {
p.addExtra()
}
hdr := [1]uint64{1}
// Note: write "knows" that the argument is &gp.labels,
// because otherwise its write barrier behavior may not
// be correct. See the long comment there before
// changing the argument here.
cpuprof.log.write(tagPtr, nanotime(), hdr[:], stk)
}
prof.signalLock.Store(0)
}
// addNonGo adds the non-Go stack trace to the profile.
// It is called from a non-Go thread, so we cannot use much stack at all,
// nor do anything that needs a g or an m.
// In particular, we can't call cpuprof.log.write.
// Instead, we copy the stack into cpuprof.extra,
// which will be drained the next time a Go thread
// gets the signal handling event.
//
//go:nosplit
//go:nowritebarrierrec
func (p *cpuProfile) addNonGo(stk []uintptr) {
// Simple cas-lock to coordinate with SetCPUProfileRate.
// (Other calls to add or addNonGo should be blocked out
// by the fact that only one SIGPROF can be handled by the
// process at a time. If not, this lock will serialize those too.
// The use of timer_create(2) on Linux to request process-targeted
// signals may have changed this.)
for !prof.signalLock.CompareAndSwap(0, 1) {
// TODO: Is it safe to osyield here? https://go.dev/issue/52672
osyield()
}
if cpuprof.numExtra+1+len(stk) < len(cpuprof.extra) {
i := cpuprof.numExtra
cpuprof.extra[i] = uintptr(1 + len(stk))
copy(cpuprof.extra[i+1:], stk)
cpuprof.numExtra += 1 + len(stk)
} else {
cpuprof.lostExtra++
}
prof.signalLock.Store(0)
}
// addExtra adds the "extra" profiling events,
// queued by addNonGo, to the profile log.
// addExtra is called either from a signal handler on a Go thread
// or from an ordinary goroutine; either way it can use stack
// and has a g. The world may be stopped, though.
func (p *cpuProfile) addExtra() {
// Copy accumulated non-Go profile events.
hdr := [1]uint64{1}
for i := 0; i < p.numExtra; {
p.log.write(nil, 0, hdr[:], p.extra[i+1:i+int(p.extra[i])])
i += int(p.extra[i])
}
p.numExtra = 0
// Report any lost events.
if p.lostExtra > 0 {
hdr := [1]uint64{p.lostExtra}
lostStk := [2]uintptr{
abi.FuncPCABIInternal(_LostExternalCode) + sys.PCQuantum,
abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
}
p.log.write(nil, 0, hdr[:], lostStk[:])
p.lostExtra = 0
}
if p.lostAtomic > 0 {
hdr := [1]uint64{p.lostAtomic}
lostStk := [2]uintptr{
abi.FuncPCABIInternal(_LostSIGPROFDuringAtomic64) + sys.PCQuantum,
abi.FuncPCABIInternal(_System) + sys.PCQuantum,
}
p.log.write(nil, 0, hdr[:], lostStk[:])
p.lostAtomic = 0
}
}
// CPUProfile panics.
// It formerly provided raw access to chunks of
// a pprof-format profile generated by the runtime.
// The details of generating that format have changed,
// so this functionality has been removed.
//
// Deprecated: Use the [runtime/pprof] package,
// or the handlers in the [net/http/pprof] package,
// or the [testing] package's -test.cpuprofile flag instead.
func CPUProfile() []byte {
panic("CPUProfile no longer available")
}
// runtime/pprof.runtime_cyclesPerSecond should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/grafana/pyroscope-go/godeltaprof
// - github.com/pyroscope-io/godeltaprof
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname pprof_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond
func pprof_cyclesPerSecond() int64 {
return ticksPerSecond()
}
// readProfile, provided to runtime/pprof, returns the next chunk of
// binary CPU profiling stack trace data, blocking until data is available.
// If profiling is turned off and all the profile data accumulated while it was
// on has been returned, readProfile returns eof=true.
// The caller must save the returned data and tags before calling readProfile again.
// The returned data contains a whole number of records, and tags contains
// exactly one entry per record.
//
// runtime_pprof_readProfile should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/pyroscope-io/pyroscope
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname runtime_pprof_readProfile runtime/pprof.readProfile
func runtime_pprof_readProfile() ([]uint64, []unsafe.Pointer, bool) {
lock(&cpuprof.lock)
log := cpuprof.log
unlock(&cpuprof.lock)
readMode := profBufBlocking
if GOOS == "darwin" || GOOS == "ios" {
readMode = profBufNonBlocking // For #61768; on Darwin notes are not async-signal-safe. See sigNoteSetup in os_darwin.go.
}
data, tags, eof := log.read(readMode)
if len(data) == 0 && eof {
lock(&cpuprof.lock)
cpuprof.log = nil
unlock(&cpuprof.lock)
}
return data, tags, eof
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package runtime
const canCreateFile = true
// create returns an fd to a write-only file.
func create(name *byte, perm int32) int32 {
return open(name, _O_CREAT|_O_WRONLY|_O_TRUNC, perm)
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/runtime/atomic"
"unsafe"
)
// GOMAXPROCS sets the maximum number of CPUs that can be executing
// simultaneously and returns the previous setting. If n < 1, it does not change
// the current setting.
//
// # Default
//
// If the GOMAXPROCS environment variable is set to a positive whole number,
// GOMAXPROCS defaults to that value.
//
// Otherwise, the Go runtime selects an appropriate default value from a combination of
// - the number of logical CPUs on the machine,
// - the process’s CPU affinity mask,
// - and, on Linux, the process’s average CPU throughput limit based on cgroup CPU
// quota, if any.
//
// If GODEBUG=containermaxprocs=0 is set and GOMAXPROCS is not set by the
// environment variable, then GOMAXPROCS instead defaults to the value of
// [runtime.NumCPU]. Note that GODEBUG=containermaxprocs=0 is [default] for
// language version 1.24 and below.
//
// # Updates
//
// The Go runtime periodically updates the default value based on changes to
// the total logical CPU count, the CPU affinity mask, or cgroup quota. Setting
// a custom value with the GOMAXPROCS environment variable or by calling
// GOMAXPROCS disables automatic updates. The default value and automatic
// updates can be restored by calling [SetDefaultGOMAXPROCS].
//
// If GODEBUG=updatemaxprocs=0 is set, the Go runtime does not perform
// automatic GOMAXPROCS updating. Note that GODEBUG=updatemaxprocs=0 is
// [default] for language version 1.24 and below.
//
// # Compatibility
//
// Note that the default GOMAXPROCS behavior may change as the scheduler
// improves, especially the implementation detail below.
//
// # Implementation details
//
// When computing default GOMAXPROCS via cgroups, the Go runtime computes the
// "average CPU throughput limit" as the cgroup CPU quota / period. In cgroup
// v2, these values come from the cpu.max file. In cgroup v1, they come from
// cpu.cfs_quota_us and cpu.cfs_period_us, respectively. In container runtimes
// that allow configuring CPU limits, this value usually corresponds to the
// "CPU limit" option, not "CPU request".
//
// The Go runtime typically selects the default GOMAXPROCS as the minimum of
// the logical CPU count, the CPU affinity mask count, or the cgroup CPU
// throughput limit. However, it will never set GOMAXPROCS less than 2 unless
// the logical CPU count or CPU affinity mask count are below 2.
//
// If the cgroup CPU throughput limit is not a whole number, the Go runtime
// rounds up to the next whole number.
//
// GOMAXPROCS updates are performed up to once per second, or less if the
// application is idle.
//
// [default]: https://go.dev/doc/godebug#default
func GOMAXPROCS(n int) int {
if GOARCH == "wasm" && n > 1 {
n = 1 // WebAssembly has no threads yet, so only one CPU is possible.
}
lock(&sched.lock)
ret := int(gomaxprocs)
if n <= 0 {
unlock(&sched.lock)
return ret
}
// Set early so we can wait for sysmon befor STW. See comment on
// computeMaxProcsLock.
sched.customGOMAXPROCS = true
unlock(&sched.lock)
// Wait for sysmon to complete running defaultGOMAXPROCS.
lock(&computeMaxProcsLock)
unlock(&computeMaxProcsLock)
if n == ret {
// sched.customGOMAXPROCS set, but no need to actually STW
// since the gomaxprocs itself isn't changing.
return ret
}
stw := stopTheWorldGC(stwGOMAXPROCS)
// newprocs will be processed by startTheWorld
//
// TODO(prattmic): this could use a nicer API. Perhaps add it to the
// stw parameter?
newprocs = int32(n)
startTheWorldGC(stw)
return ret
}
// SetDefaultGOMAXPROCS updates the GOMAXPROCS setting to the runtime
// default, as described by [GOMAXPROCS], ignoring the GOMAXPROCS
// environment variable.
//
// SetDefaultGOMAXPROCS can be used to enable the default automatic updating
// GOMAXPROCS behavior if it has been disabled by the GOMAXPROCS
// environment variable or a prior call to [GOMAXPROCS], or to force an immediate
// update if the caller is aware of a change to the total logical CPU count, CPU
// affinity mask or cgroup quota.
func SetDefaultGOMAXPROCS() {
// SetDefaultGOMAXPROCS conceptually means "[re]do what the runtime
// would do at startup if the GOMAXPROCS environment variable were
// unset." It still respects GODEBUG.
procs := defaultGOMAXPROCS(0)
lock(&sched.lock)
curr := gomaxprocs
custom := sched.customGOMAXPROCS
unlock(&sched.lock)
if !custom && procs == curr {
// Nothing to do if we're already using automatic GOMAXPROCS
// and the limit is unchanged.
return
}
stw := stopTheWorldGC(stwGOMAXPROCS)
// newprocs will be processed by startTheWorld
//
// TODO(prattmic): this could use a nicer API. Perhaps add it to the
// stw parameter?
newprocs = procs
lock(&sched.lock)
sched.customGOMAXPROCS = false
unlock(&sched.lock)
startTheWorldGC(stw)
}
// NumCPU returns the number of logical CPUs usable by the current process.
//
// The set of available CPUs is checked by querying the operating system
// at process startup. Changes to operating system CPU allocation after
// process startup are not reflected.
func NumCPU() int {
return int(numCPUStartup)
}
// NumCgoCall returns the number of cgo calls made by the current process.
func NumCgoCall() int64 {
var n = int64(atomic.Load64(&ncgocall))
for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
n += int64(mp.ncgocall)
}
return n
}
func totalMutexWaitTimeNanos() int64 {
total := sched.totalMutexWaitTime.Load()
total += sched.totalRuntimeLockWaitTime.Load()
for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
total += mp.mLockProfile.waitTime.Load()
}
return total
}
// NumGoroutine returns the number of goroutines that currently exist.
func NumGoroutine() int {
return int(gcount(false))
}
//go:linkname debug_modinfo runtime/debug.modinfo
func debug_modinfo() string {
return modinfo
}
// mayMoreStackPreempt is a maymorestack hook that forces a preemption
// at every possible cooperative preemption point.
//
// This is valuable to apply to the runtime, which can be sensitive to
// preemption points. To apply this to all preemption points in the
// runtime and runtime-like code, use the following in bash or zsh:
//
// X=(-{gc,asm}flags={runtime/...,reflect,sync}=-d=maymorestack=runtime.mayMoreStackPreempt) GOFLAGS=${X[@]}
//
// This must be deeply nosplit because it is called from a function
// prologue before the stack is set up and because the compiler will
// call it from any splittable prologue (leading to infinite
// recursion).
//
// Ideally it should also use very little stack because the linker
// doesn't currently account for this in nosplit stack depth checking.
//
// Ensure mayMoreStackPreempt can be called for all ABIs.
//
//go:nosplit
//go:linkname mayMoreStackPreempt
func mayMoreStackPreempt() {
// Don't do anything on the g0 or gsignal stack.
gp := getg()
if gp == gp.m.g0 || gp == gp.m.gsignal {
return
}
// Force a preemption, unless the stack is already poisoned.
if gp.stackguard0 < stackPoisonMin {
gp.stackguard0 = stackPreempt
}
}
// mayMoreStackMove is a maymorestack hook that forces stack movement
// at every possible point.
//
// See mayMoreStackPreempt.
//
//go:nosplit
//go:linkname mayMoreStackMove
func mayMoreStackMove() {
// Don't do anything on the g0 or gsignal stack.
gp := getg()
if gp == gp.m.g0 || gp == gp.m.gsignal {
return
}
// Force stack movement, unless the stack is already poisoned.
if gp.stackguard0 < stackPoisonMin {
gp.stackguard0 = stackForceMove
}
}
// debugPinnerKeepUnpin is used to make runtime.(*Pinner).Unpin reachable.
var debugPinnerKeepUnpin bool = false
// debugPinnerV1 returns a new Pinner that pins itself. This function can be
// used by debuggers to easily obtain a Pinner that will not be garbage
// collected (or moved in memory) even if no references to it exist in the
// target program. This pinner in turn can be used to extend this property
// to other objects, which debuggers can use to simplify the evaluation of
// expressions involving multiple call injections.
func debugPinnerV1() *Pinner {
p := new(Pinner)
p.Pin(unsafe.Pointer(p))
if debugPinnerKeepUnpin {
// Make Unpin reachable.
p.Unpin()
}
return p
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package debug
import (
"runtime"
"slices"
"time"
)
// GCStats collect information about recent garbage collections.
type GCStats struct {
LastGC time.Time // time of last collection
NumGC int64 // number of garbage collections
PauseTotal time.Duration // total pause for all collections
Pause []time.Duration // pause history, most recent first
PauseEnd []time.Time // pause end times history, most recent first
PauseQuantiles []time.Duration
}
// ReadGCStats reads statistics about garbage collection into stats.
// The number of entries in the pause history is system-dependent;
// stats.Pause slice will be reused if large enough, reallocated otherwise.
// ReadGCStats may use the full capacity of the stats.Pause slice.
// If stats.PauseQuantiles is non-empty, ReadGCStats fills it with quantiles
// summarizing the distribution of pause time. For example, if
// len(stats.PauseQuantiles) is 5, it will be filled with the minimum,
// 25%, 50%, 75%, and maximum pause times.
func ReadGCStats(stats *GCStats) {
// Create a buffer with space for at least two copies of the
// pause history tracked by the runtime. One will be returned
// to the caller and the other will be used as transfer buffer
// for end times history and as a temporary buffer for
// computing quantiles.
const maxPause = len(((*runtime.MemStats)(nil)).PauseNs)
if cap(stats.Pause) < 2*maxPause+3 {
stats.Pause = make([]time.Duration, 2*maxPause+3)
}
// readGCStats fills in the pause and end times histories (up to
// maxPause entries) and then three more: Unix ns time of last GC,
// number of GC, and total pause time in nanoseconds. Here we
// depend on the fact that time.Duration's native unit is
// nanoseconds, so the pauses and the total pause time do not need
// any conversion.
readGCStats(&stats.Pause)
n := len(stats.Pause) - 3
stats.LastGC = time.Unix(0, int64(stats.Pause[n]))
stats.NumGC = int64(stats.Pause[n+1])
stats.PauseTotal = stats.Pause[n+2]
n /= 2 // buffer holds pauses and end times
stats.Pause = stats.Pause[:n]
if cap(stats.PauseEnd) < maxPause {
stats.PauseEnd = make([]time.Time, 0, maxPause)
}
stats.PauseEnd = stats.PauseEnd[:0]
for _, ns := range stats.Pause[n : n+n] {
stats.PauseEnd = append(stats.PauseEnd, time.Unix(0, int64(ns)))
}
if len(stats.PauseQuantiles) > 0 {
if n == 0 {
clear(stats.PauseQuantiles)
} else {
// There's room for a second copy of the data in stats.Pause.
// See the allocation at the top of the function.
sorted := stats.Pause[n : n+n]
copy(sorted, stats.Pause)
slices.Sort(sorted)
nq := len(stats.PauseQuantiles) - 1
for i := 0; i < nq; i++ {
stats.PauseQuantiles[i] = sorted[len(sorted)*i/nq]
}
stats.PauseQuantiles[nq] = sorted[len(sorted)-1]
}
}
}
// SetGCPercent sets the garbage collection target percentage:
// a collection is triggered when the ratio of freshly allocated data
// to live data remaining after the previous collection reaches this percentage.
// SetGCPercent returns the previous setting.
// The initial setting is the value of the GOGC environment variable
// at startup, or 100 if the variable is not set.
// This setting may be effectively reduced in order to maintain a memory
// limit.
// A negative percentage effectively disables garbage collection, unless
// the memory limit is reached.
// See SetMemoryLimit for more details.
func SetGCPercent(percent int) int {
return int(setGCPercent(int32(percent)))
}
// FreeOSMemory forces a garbage collection followed by an
// attempt to return as much memory to the operating system
// as possible. (Even if this is not called, the runtime gradually
// returns memory to the operating system in a background task.)
func FreeOSMemory() {
freeOSMemory()
}
// SetMaxStack sets the maximum amount of memory that
// can be used by a single goroutine stack.
// If any goroutine exceeds this limit while growing its stack,
// the program crashes.
// SetMaxStack returns the previous setting.
// The initial setting is 1 GB on 64-bit systems, 250 MB on 32-bit systems.
// There may be a system-imposed maximum stack limit regardless
// of the value provided to SetMaxStack.
//
// SetMaxStack is useful mainly for limiting the damage done by
// goroutines that enter an infinite recursion. It only limits future
// stack growth.
func SetMaxStack(bytes int) int {
return setMaxStack(bytes)
}
// SetMaxThreads sets the maximum number of operating system
// threads that the Go program can use. If it attempts to use more than
// this many, the program crashes.
// SetMaxThreads returns the previous setting.
// The initial setting is 10,000 threads.
//
// The limit controls the number of operating system threads, not the number
// of goroutines. A Go program creates a new thread only when a goroutine
// is ready to run but all the existing threads are blocked in system calls, cgo calls,
// or are locked to other goroutines due to use of runtime.LockOSThread.
//
// SetMaxThreads is useful mainly for limiting the damage done by
// programs that create an unbounded number of threads. The idea is
// to take down the program before it takes down the operating system.
func SetMaxThreads(threads int) int {
return setMaxThreads(threads)
}
// SetPanicOnFault controls the runtime's behavior when a program faults
// at an unexpected (non-nil) address. Such faults are typically caused by
// bugs such as runtime memory corruption, so the default response is to crash
// the program. Programs working with memory-mapped files or unsafe
// manipulation of memory may cause faults at non-nil addresses in less
// dramatic situations; SetPanicOnFault allows such programs to request
// that the runtime trigger only a panic, not a crash.
// The runtime.Error that the runtime panics with may have an additional method:
//
// Addr() uintptr
//
// If that method exists, it returns the memory address which triggered the fault.
// The results of Addr are best-effort and the veracity of the result
// may depend on the platform.
// SetPanicOnFault applies only to the current goroutine.
// It returns the previous setting.
func SetPanicOnFault(enabled bool) bool {
return setPanicOnFault(enabled)
}
// WriteHeapDump writes a description of the heap and the objects in
// it to the given file descriptor.
//
// WriteHeapDump suspends the execution of all goroutines until the heap
// dump is completely written. Thus, the file descriptor must not be
// connected to a pipe or socket whose other end is in the same Go
// process; instead, use a temporary file or network socket.
//
// The heap dump format is defined at https://golang.org/s/go15heapdump.
func WriteHeapDump(fd uintptr)
// SetTraceback sets the amount of detail printed by the runtime in
// the traceback it prints before exiting due to an unrecovered panic
// or an internal runtime error.
// The level argument takes the same values as the GOTRACEBACK
// environment variable. For example, SetTraceback("all") ensure
// that the program prints all goroutines when it crashes.
// See the package runtime documentation for details.
// If SetTraceback is called with a level lower than that of the
// environment variable, the call is ignored.
func SetTraceback(level string)
// SetMemoryLimit provides the runtime with a soft memory limit.
//
// The runtime undertakes several processes to try to respect this
// memory limit, including adjustments to the frequency of garbage
// collections and returning memory to the underlying system more
// aggressively. This limit will be respected even if GOGC=off (or,
// if SetGCPercent(-1) is executed).
//
// The input limit is provided as bytes, and includes all memory
// mapped, managed, and not released by the Go runtime. Notably, it
// does not account for space used by the Go binary and memory
// external to Go, such as memory managed by the underlying system
// on behalf of the process, or memory managed by non-Go code inside
// the same process. Examples of excluded memory sources include: OS
// kernel memory held on behalf of the process, memory allocated by
// C code, and memory mapped by syscall.Mmap (because it is not
// managed by the Go runtime).
//
// More specifically, the following expression accurately reflects
// the value the runtime attempts to maintain as the limit:
//
// runtime.MemStats.Sys - runtime.MemStats.HeapReleased
//
// or in terms of the runtime/metrics package:
//
// /memory/classes/total:bytes - /memory/classes/heap/released:bytes
//
// A zero limit or a limit that's lower than the amount of memory
// used by the Go runtime may cause the garbage collector to run
// nearly continuously. However, the application may still make
// progress.
//
// The memory limit is always respected by the Go runtime, so to
// effectively disable this behavior, set the limit very high.
// [math.MaxInt64] is the canonical value for disabling the limit,
// but values much greater than the available memory on the underlying
// system work just as well.
//
// See https://go.dev/doc/gc-guide for a detailed guide explaining
// the soft memory limit in more detail, as well as a variety of common
// use-cases and scenarios.
//
// The initial setting is math.MaxInt64 unless the GOMEMLIMIT
// environment variable is set, in which case it provides the initial
// setting. GOMEMLIMIT is a numeric value in bytes with an optional
// unit suffix. The supported suffixes include B, KiB, MiB, GiB, and
// TiB. These suffixes represent quantities of bytes as defined by
// the IEC 80000-13 standard. That is, they are based on powers of
// two: KiB means 2^10 bytes, MiB means 2^20 bytes, and so on.
//
// SetMemoryLimit returns the previously set memory limit.
// A negative input does not adjust the limit, and allows for
// retrieval of the currently set memory limit.
func SetMemoryLimit(limit int64) int64 {
return setMemoryLimit(limit)
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package debug
import (
"fmt"
"runtime"
"strconv"
"strings"
)
// exported from runtime.
func modinfo() string
// ReadBuildInfo returns the build information embedded
// in the running binary. The information is available only
// in binaries built with module support.
func ReadBuildInfo() (info *BuildInfo, ok bool) {
data := modinfo()
if len(data) < 32 {
return nil, false
}
data = data[16 : len(data)-16]
bi, err := ParseBuildInfo(data)
if err != nil {
return nil, false
}
// The go version is stored separately from other build info, mostly for
// historical reasons. It is not part of the modinfo() string, and
// ParseBuildInfo does not recognize it. We inject it here to hide this
// awkwardness from the user.
bi.GoVersion = runtime.Version()
return bi, true
}
// BuildInfo represents the build information read from a Go binary.
type BuildInfo struct {
// GoVersion is the version of the Go toolchain that built the binary
// (for example, "go1.19.2").
GoVersion string `json:",omitempty"`
// Path is the package path of the main package for the binary
// (for example, "golang.org/x/tools/cmd/stringer").
Path string `json:",omitempty"`
// Main describes the module that contains the main package for the binary.
Main Module `json:""`
// Deps describes all the dependency modules, both direct and indirect,
// that contributed packages to the build of this binary.
Deps []*Module `json:",omitempty"`
// Settings describes the build settings used to build the binary.
Settings []BuildSetting `json:",omitempty"`
}
// A Module describes a single module included in a build.
type Module struct {
Path string `json:",omitempty"` // module path
Version string `json:",omitempty"` // module version
Sum string `json:",omitempty"` // checksum
Replace *Module `json:",omitempty"` // replaced by this module
}
// A BuildSetting is a key-value pair describing one setting that influenced a build.
//
// Defined keys include:
//
// - -buildmode: the buildmode flag used (typically "exe")
// - -compiler: the compiler toolchain flag used (typically "gc")
// - CGO_ENABLED: the effective CGO_ENABLED environment variable
// - CGO_CFLAGS: the effective CGO_CFLAGS environment variable
// - CGO_CPPFLAGS: the effective CGO_CPPFLAGS environment variable
// - CGO_CXXFLAGS: the effective CGO_CXXFLAGS environment variable
// - CGO_LDFLAGS: the effective CGO_LDFLAGS environment variable
// - DefaultGODEBUG: the effective GODEBUG settings
// - GOARCH: the architecture target
// - GOAMD64/GOARM/GO386/etc: the architecture feature level for GOARCH
// - GOOS: the operating system target
// - GOFIPS140: the frozen FIPS 140-3 module version, if any
// - vcs: the version control system for the source tree where the build ran
// - vcs.revision: the revision identifier for the current commit or checkout
// - vcs.time: the modification time associated with vcs.revision, in RFC3339 format
// - vcs.modified: true or false indicating whether the source tree had local modifications
type BuildSetting struct {
// Key and Value describe the build setting.
// Key must not contain an equals sign, space, tab, or newline.
Key string `json:",omitempty"`
// Value must not contain newlines ('\n').
Value string `json:",omitempty"`
}
// quoteKey reports whether key is required to be quoted.
func quoteKey(key string) bool {
return len(key) == 0 || strings.ContainsAny(key, "= \t\r\n\"`")
}
// quoteValue reports whether value is required to be quoted.
func quoteValue(value string) bool {
return strings.ContainsAny(value, " \t\r\n\"`")
}
// String returns a string representation of a [BuildInfo].
func (bi *BuildInfo) String() string {
buf := new(strings.Builder)
if bi.GoVersion != "" {
fmt.Fprintf(buf, "go\t%s\n", bi.GoVersion)
}
if bi.Path != "" {
fmt.Fprintf(buf, "path\t%s\n", bi.Path)
}
var formatMod func(string, Module)
formatMod = func(word string, m Module) {
buf.WriteString(word)
buf.WriteByte('\t')
buf.WriteString(m.Path)
buf.WriteByte('\t')
buf.WriteString(m.Version)
if m.Replace == nil {
buf.WriteByte('\t')
buf.WriteString(m.Sum)
} else {
buf.WriteByte('\n')
formatMod("=>", *m.Replace)
}
buf.WriteByte('\n')
}
if bi.Main != (Module{}) {
formatMod("mod", bi.Main)
}
for _, dep := range bi.Deps {
formatMod("dep", *dep)
}
for _, s := range bi.Settings {
key := s.Key
if quoteKey(key) {
key = strconv.Quote(key)
}
value := s.Value
if quoteValue(value) {
value = strconv.Quote(value)
}
fmt.Fprintf(buf, "build\t%s=%s\n", key, value)
}
return buf.String()
}
// ParseBuildInfo parses the string returned by [*BuildInfo.String],
// restoring the original BuildInfo,
// except that the GoVersion field is not set.
// Programs should normally not call this function,
// but instead call [ReadBuildInfo], [debug/buildinfo.ReadFile],
// or [debug/buildinfo.Read].
func ParseBuildInfo(data string) (bi *BuildInfo, err error) {
lineNum := 1
defer func() {
if err != nil {
err = fmt.Errorf("could not parse Go build info: line %d: %w", lineNum, err)
}
}()
const (
pathLine = "path\t"
modLine = "mod\t"
depLine = "dep\t"
repLine = "=>\t"
buildLine = "build\t"
newline = "\n"
tab = "\t"
)
readModuleLine := func(elem []string) (Module, error) {
if len(elem) != 2 && len(elem) != 3 {
return Module{}, fmt.Errorf("expected 2 or 3 columns; got %d", len(elem))
}
version := elem[1]
sum := ""
if len(elem) == 3 {
sum = elem[2]
}
return Module{
Path: elem[0],
Version: version,
Sum: sum,
}, nil
}
bi = new(BuildInfo)
var (
last *Module
line string
ok bool
)
// Reverse of BuildInfo.String(), except for go version.
for len(data) > 0 {
line, data, ok = strings.Cut(data, newline)
if !ok {
break
}
switch {
case strings.HasPrefix(line, pathLine):
elem := line[len(pathLine):]
bi.Path = elem
case strings.HasPrefix(line, modLine):
elem := strings.Split(line[len(modLine):], tab)
last = &bi.Main
*last, err = readModuleLine(elem)
if err != nil {
return nil, err
}
case strings.HasPrefix(line, depLine):
elem := strings.Split(line[len(depLine):], tab)
last = new(Module)
bi.Deps = append(bi.Deps, last)
*last, err = readModuleLine(elem)
if err != nil {
return nil, err
}
case strings.HasPrefix(line, repLine):
elem := strings.Split(line[len(repLine):], tab)
if len(elem) != 3 {
return nil, fmt.Errorf("expected 3 columns for replacement; got %d", len(elem))
}
if last == nil {
return nil, fmt.Errorf("replacement with no module on previous line")
}
last.Replace = &Module{
Path: elem[0],
Version: elem[1],
Sum: elem[2],
}
last = nil
case strings.HasPrefix(line, buildLine):
kv := line[len(buildLine):]
if len(kv) < 1 {
return nil, fmt.Errorf("build line missing '='")
}
var key, rawValue string
switch kv[0] {
case '=':
return nil, fmt.Errorf("build line with missing key")
case '`', '"':
rawKey, err := strconv.QuotedPrefix(kv)
if err != nil {
return nil, fmt.Errorf("invalid quoted key in build line")
}
if len(kv) == len(rawKey) {
return nil, fmt.Errorf("build line missing '=' after quoted key")
}
if c := kv[len(rawKey)]; c != '=' {
return nil, fmt.Errorf("unexpected character after quoted key: %q", c)
}
key, _ = strconv.Unquote(rawKey)
rawValue = kv[len(rawKey)+1:]
default:
var ok bool
key, rawValue, ok = strings.Cut(kv, "=")
if !ok {
return nil, fmt.Errorf("build line missing '=' after key")
}
if quoteKey(key) {
return nil, fmt.Errorf("unquoted key %q must be quoted", key)
}
}
var value string
if len(rawValue) > 0 {
switch rawValue[0] {
case '`', '"':
var err error
value, err = strconv.Unquote(rawValue)
if err != nil {
return nil, fmt.Errorf("invalid quoted value in build line")
}
default:
value = rawValue
if quoteValue(value) {
return nil, fmt.Errorf("unquoted value %q must be quoted", value)
}
}
}
bi.Settings = append(bi.Settings, BuildSetting{Key: key, Value: value})
}
lineNum++
}
return bi, nil
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package debug contains facilities for programs to debug themselves while
// they are running.
package debug
import (
"internal/poll"
"os"
"runtime"
_ "unsafe" // for linkname
)
// PrintStack prints to standard error the stack trace returned by runtime.Stack.
func PrintStack() {
os.Stderr.Write(Stack())
}
// Stack returns a formatted stack trace of the goroutine that calls it.
// It calls [runtime.Stack] with a large enough buffer to capture the entire trace.
func Stack() []byte {
buf := make([]byte, 1024)
for {
n := runtime.Stack(buf, false)
if n < len(buf) {
return buf[:n]
}
buf = make([]byte, 2*len(buf))
}
}
// CrashOptions provides options that control the formatting of the
// fatal crash message.
type CrashOptions struct {
/* for future expansion */
}
// SetCrashOutput configures a single additional file where unhandled
// panics and other fatal errors are printed, in addition to standard error.
// There is only one additional file: calling SetCrashOutput again overrides
// any earlier call.
// SetCrashOutput duplicates f's file descriptor, so the caller may safely
// close f as soon as SetCrashOutput returns.
// To disable this additional crash output, call SetCrashOutput(nil).
// If called concurrently with a crash, some in-progress output may be written
// to the old file even after an overriding SetCrashOutput returns.
func SetCrashOutput(f *os.File, opts CrashOptions) error {
fd := ^uintptr(0)
if f != nil {
// The runtime will write to this file descriptor from
// low-level routines during a panic, possibly without
// a G, so we must call f.Fd() eagerly. This creates a
// danger that the file descriptor is no longer
// valid at the time of the write, because the caller
// (incorrectly) called f.Close() and the kernel
// reissued the fd in a later call to open(2), leading
// to crashes being written to the wrong file.
//
// So, we duplicate the fd to obtain a private one
// that cannot be closed by the user.
// This also alleviates us from concerns about the
// lifetime and finalization of f.
// (DupCloseOnExec returns an fd, not a *File, so
// there is no finalizer, and we are responsible for
// closing it.)
//
// The new fd must be close-on-exec, otherwise if the
// crash monitor is a child process, it may inherit
// it, so it will never see EOF from the pipe even
// when this process crashes.
//
// A side effect of Fd() is that it calls SetBlocking,
// which is important so that writes of a crash report
// to a full pipe buffer don't get lost.
fd2, _, err := poll.DupCloseOnExec(int(f.Fd()))
if err != nil {
return err
}
runtime.KeepAlive(f) // prevent finalization before dup
fd = uintptr(fd2)
}
if prev := runtime_setCrashFD(fd); prev != ^uintptr(0) {
// We use NewFile+Close because it is portable
// unlike syscall.Close, whose parameter type varies.
os.NewFile(prev, "").Close() // ignore error
}
return nil
}
//go:linkname runtime_setCrashFD runtime.setCrashFD
func runtime_setCrashFD(uintptr) uintptr
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Though the debug call function feature is not enabled on
// ppc64, inserted ppc64 to avoid missing Go declaration error
// for debugCallPanicked while building runtime.test
//go:build amd64 || arm64 || loong64 || ppc64le || ppc64
package runtime
import (
"internal/abi"
"internal/runtime/sys"
"unsafe"
)
const (
debugCallSystemStack = "executing on Go runtime stack"
debugCallUnknownFunc = "call from unknown function"
debugCallRuntime = "call from within the Go runtime"
debugCallUnsafePoint = "call not at safe point"
)
func debugCallV2()
func debugCallPanicked(val any)
// debugCallCheck checks whether it is safe to inject a debugger
// function call with return PC pc. If not, it returns a string
// explaining why.
//
//go:nosplit
func debugCallCheck(pc uintptr) string {
// No user calls from the system stack.
if getg() != getg().m.curg {
return debugCallSystemStack
}
if sp := sys.GetCallerSP(); !(getg().stack.lo < sp && sp <= getg().stack.hi) {
// Fast syscalls (nanotime) and racecall switch to the
// g0 stack without switching g. We can't safely make
// a call in this state. (We can't even safely
// systemstack.)
return debugCallSystemStack
}
// Switch to the system stack to avoid overflowing the user
// stack.
var ret string
systemstack(func() {
f := findfunc(pc)
if !f.valid() {
ret = debugCallUnknownFunc
return
}
name := funcname(f)
switch name {
case "debugCall32",
"debugCall64",
"debugCall128",
"debugCall256",
"debugCall512",
"debugCall1024",
"debugCall2048",
"debugCall4096",
"debugCall8192",
"debugCall16384",
"debugCall32768",
"debugCall65536":
// These functions are allowed so that the debugger can initiate multiple function calls.
// See: https://golang.org/cl/161137/
return
}
// Disallow calls from the runtime. We could
// potentially make this condition tighter (e.g., not
// when locks are held), but there are enough tightly
// coded sequences (e.g., defer handling) that it's
// better to play it safe.
if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx {
ret = debugCallRuntime
return
}
// Check that this isn't an unsafe-point.
if pc != f.entry() {
pc--
}
up := pcdatavalue(f, abi.PCDATA_UnsafePoint, pc)
if up != abi.UnsafePointSafe {
// Not at a safe point.
ret = debugCallUnsafePoint
}
})
return ret
}
// debugCallWrap starts a new goroutine to run a debug call and blocks
// the calling goroutine. On the goroutine, it prepares to recover
// panics from the debug call, and then calls the call dispatching
// function at PC dispatch.
//
// This must be deeply nosplit because there are untyped values on the
// stack from debugCallV2.
//
//go:nosplit
func debugCallWrap(dispatch uintptr) {
var lockedExt uint32
callerpc := sys.GetCallerPC()
gp := getg()
// Lock ourselves to the OS thread.
//
// Debuggers rely on us running on the same thread until we get to
// dispatch the function they asked as to.
//
// We're going to transfer this to the new G we just created.
lockOSThread()
// Create a new goroutine to execute the call on. Run this on
// the system stack to avoid growing our stack.
systemstack(func() {
// TODO(mknyszek): It would be nice to wrap these arguments in an allocated
// closure and start the goroutine with that closure, but the compiler disallows
// implicit closure allocation in the runtime.
fn := debugCallWrap1
newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc, false, waitReasonZero)
args := &debugCallWrapArgs{
dispatch: dispatch,
callingG: gp,
}
newg.param = unsafe.Pointer(args)
// Transfer locked-ness to the new goroutine.
// Save lock state to restore later.
mp := gp.m
if mp != gp.lockedm.ptr() {
throw("inconsistent lockedm")
}
// Save the external lock count and clear it so
// that it can't be unlocked from the debug call.
// Note: we already locked internally to the thread,
// so if we were locked before we're still locked now.
lockedExt = mp.lockedExt
mp.lockedExt = 0
mp.lockedg.set(newg)
newg.lockedm.set(mp)
gp.lockedm = 0
// Mark the calling goroutine as being at an async
// safe-point, since it has a few conservative frames
// at the bottom of the stack. This also prevents
// stack shrinks.
gp.asyncSafePoint = true
// Stash newg away so we can execute it below (mcall's
// closure can't capture anything).
gp.schedlink.set(newg)
})
// Switch to the new goroutine.
mcall(func(gp *g) {
// Get newg.
newg := gp.schedlink.ptr()
gp.schedlink = 0
// Park the calling goroutine.
trace := traceAcquire()
if trace.ok() {
// Trace the event before the transition. It may take a
// stack trace, but we won't own the stack after the
// transition anymore.
trace.GoPark(traceBlockDebugCall, 1)
}
casGToWaiting(gp, _Grunning, waitReasonDebugCall)
if trace.ok() {
traceRelease(trace)
}
dropg()
// Directly execute the new goroutine. The debug
// protocol will continue on the new goroutine, so
// it's important we not just let the scheduler do
// this or it may resume a different goroutine.
execute(newg, true)
})
// We'll resume here when the call returns.
// Restore locked state.
mp := gp.m
mp.lockedExt = lockedExt
mp.lockedg.set(gp)
gp.lockedm.set(mp)
// Undo the lockOSThread we did earlier.
unlockOSThread()
gp.asyncSafePoint = false
}
type debugCallWrapArgs struct {
dispatch uintptr
callingG *g
}
// debugCallWrap1 is the continuation of debugCallWrap on the callee
// goroutine.
func debugCallWrap1() {
gp := getg()
args := (*debugCallWrapArgs)(gp.param)
dispatch, callingG := args.dispatch, args.callingG
gp.param = nil
// Dispatch call and trap panics.
debugCallWrap2(dispatch)
// Resume the caller goroutine.
getg().schedlink.set(callingG)
mcall(func(gp *g) {
callingG := gp.schedlink.ptr()
gp.schedlink = 0
// Unlock this goroutine from the M if necessary. The
// calling G will relock.
if gp.lockedm != 0 {
gp.lockedm = 0
gp.m.lockedg = 0
}
// Switch back to the calling goroutine. At some point
// the scheduler will schedule us again and we'll
// finish exiting.
trace := traceAcquire()
if trace.ok() {
// Trace the event before the transition. It may take a
// stack trace, but we won't own the stack after the
// transition anymore.
trace.GoSched()
}
casgstatus(gp, _Grunning, _Grunnable)
if trace.ok() {
traceRelease(trace)
}
dropg()
lock(&sched.lock)
globrunqput(gp)
unlock(&sched.lock)
trace = traceAcquire()
casgstatus(callingG, _Gwaiting, _Grunnable)
if trace.ok() {
trace.GoUnpark(callingG, 0)
traceRelease(trace)
}
execute(callingG, true)
})
}
func debugCallWrap2(dispatch uintptr) {
// Call the dispatch function and trap panics.
var dispatchF func()
dispatchFV := funcval{dispatch}
*(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV))
var ok bool
defer func() {
if !ok {
err := recover()
debugCallPanicked(err)
}
}()
dispatchF()
ok = true
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file provides an internal debug logging facility. The debug
// log is a lightweight, in-memory, per-M ring buffer. By default, the
// runtime prints the debug log on panic.
//
// To print something to the debug log, call dlog to obtain a dlogger
// and use the methods on that to add values. The values will be
// space-separated in the output (much like println).
//
// This facility can be enabled by passing -tags debuglog when
// building. Without this tag, dlog calls compile to nothing.
//
// Implementation notes
//
// There are two implementations of the dlog interface: dloggerImpl and
// dloggerFake. dloggerFake is a no-op implementation. dlogger is type-aliased
// to one or the other depending on the debuglog build tag. However, both types
// always exist and are always built. This helps ensure we compile as much of
// the implementation as possible in the default build configuration, while also
// enabling us to achieve good test coverage of the real debuglog implementation
// even when the debuglog build tag is not set.
package runtime
import (
"internal/abi"
"internal/byteorder"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
// debugLogBytes is the size of each per-M ring buffer. This is
// allocated off-heap to avoid blowing up the M and hence the GC'd
// heap size.
const debugLogBytes = 16 << 10
// debugLogStringLimit is the maximum number of bytes in a string.
// Above this, the string will be truncated with "..(n more bytes).."
const debugLogStringLimit = debugLogBytes / 8
// dlog returns a debug logger. The caller can use methods on the
// returned logger to add values, which will be space-separated in the
// final output, much like println. The caller must call end() to
// finish the message.
//
// dlog can be used from highly-constrained corners of the runtime: it
// is safe to use in the signal handler, from within the write
// barrier, from within the stack implementation, and in places that
// must be recursively nosplit.
//
// This will be compiled away if built without the debuglog build tag.
// However, argument construction may not be. If any of the arguments
// are not literals or trivial expressions, consider protecting the
// call with "if dlogEnabled".
//
//go:nosplit
//go:nowritebarrierrec
func dlog() dlogger {
// dlog1 is defined to either dlogImpl or dlogFake.
return dlog1()
}
//go:nosplit
//go:nowritebarrierrec
func dlogFake() dloggerFake {
return dloggerFake{}
}
//go:nosplit
//go:nowritebarrierrec
func dlogImpl() *dloggerImpl {
// Get the time.
tick, nano := uint64(cputicks()), uint64(nanotime())
// Try to get a cached logger.
l := getCachedDlogger()
// If we couldn't get a cached logger, try to get one from the
// global pool.
if l == nil {
allp := (*uintptr)(unsafe.Pointer(&allDloggers))
all := (*dloggerImpl)(unsafe.Pointer(atomic.Loaduintptr(allp)))
for l1 := all; l1 != nil; l1 = l1.allLink {
if l1.owned.Load() == 0 && l1.owned.CompareAndSwap(0, 1) {
l = l1
break
}
}
}
// If that failed, allocate a new logger.
if l == nil {
// Use sysAllocOS instead of sysAlloc because we want to interfere
// with the runtime as little as possible, and sysAlloc updates accounting.
l = (*dloggerImpl)(sysAllocOS(unsafe.Sizeof(dloggerImpl{}), "debug log"))
if l == nil {
throw("failed to allocate debug log")
}
l.w.r.data = &l.w.data
l.owned.Store(1)
// Prepend to allDloggers list.
headp := (*uintptr)(unsafe.Pointer(&allDloggers))
for {
head := atomic.Loaduintptr(headp)
l.allLink = (*dloggerImpl)(unsafe.Pointer(head))
if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) {
break
}
}
}
// If the time delta is getting too high, write a new sync
// packet. We set the limit so we don't write more than 6
// bytes of delta in the record header.
const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets
if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit {
l.w.writeSync(tick, nano)
}
// Reserve space for framing header.
l.w.ensure(debugLogHeaderSize)
l.w.write += debugLogHeaderSize
// Write record header.
l.w.uvarint(tick - l.w.tick)
l.w.uvarint(nano - l.w.nano)
gp := getg()
if gp != nil && gp.m != nil && gp.m.p != 0 {
l.w.varint(int64(gp.m.p.ptr().id))
} else {
l.w.varint(-1)
}
return l
}
// A dloggerImpl writes to the debug log.
//
// To obtain a dloggerImpl, call dlog(). When done with the dloggerImpl, call
// end().
type dloggerImpl struct {
_ sys.NotInHeap
w debugLogWriter
// allLink is the next dlogger in the allDloggers list.
allLink *dloggerImpl
// owned indicates that this dlogger is owned by an M. This is
// accessed atomically.
owned atomic.Uint32
}
// allDloggers is a list of all dloggers, linked through
// dlogger.allLink. This is accessed atomically. This is prepend only,
// so it doesn't need to protect against ABA races.
var allDloggers *dloggerImpl
// A dloggerFake is a no-op implementation of dlogger.
type dloggerFake struct{}
//go:nosplit
func (l dloggerFake) end() {}
//go:nosplit
func (l *dloggerImpl) end() {
// Fill in framing header.
size := l.w.write - l.w.r.end
if !l.w.writeFrameAt(l.w.r.end, size) {
throw("record too large")
}
// Commit the record.
l.w.r.end = l.w.write
// Attempt to return this logger to the cache.
if putCachedDlogger(l) {
return
}
// Return the logger to the global pool.
l.owned.Store(0)
}
const (
debugLogUnknown = 1 + iota
debugLogBoolTrue
debugLogBoolFalse
debugLogInt
debugLogUint
debugLogHex
debugLogPtr
debugLogString
debugLogConstString
debugLogHexdump
debugLogOverflow
debugLogPC
debugLogTraceback
)
//go:nosplit
func (l dloggerFake) b(x bool) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) b(x bool) *dloggerImpl {
if x {
l.w.byte(debugLogBoolTrue)
} else {
l.w.byte(debugLogBoolFalse)
}
return l
}
//go:nosplit
func (l dloggerFake) i(x int) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) i(x int) *dloggerImpl {
return l.i64(int64(x))
}
//go:nosplit
func (l dloggerFake) i8(x int8) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) i8(x int8) *dloggerImpl {
return l.i64(int64(x))
}
//go:nosplit
func (l dloggerFake) i16(x int16) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) i16(x int16) *dloggerImpl {
return l.i64(int64(x))
}
//go:nosplit
func (l dloggerFake) i32(x int32) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) i32(x int32) *dloggerImpl {
return l.i64(int64(x))
}
//go:nosplit
func (l dloggerFake) i64(x int64) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) i64(x int64) *dloggerImpl {
l.w.byte(debugLogInt)
l.w.varint(x)
return l
}
//go:nosplit
func (l dloggerFake) u(x uint) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) u(x uint) *dloggerImpl {
return l.u64(uint64(x))
}
//go:nosplit
func (l dloggerFake) uptr(x uintptr) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) uptr(x uintptr) *dloggerImpl {
return l.u64(uint64(x))
}
//go:nosplit
func (l dloggerFake) u8(x uint8) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) u8(x uint8) *dloggerImpl {
return l.u64(uint64(x))
}
//go:nosplit
func (l dloggerFake) u16(x uint16) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) u16(x uint16) *dloggerImpl {
return l.u64(uint64(x))
}
//go:nosplit
func (l dloggerFake) u32(x uint32) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) u32(x uint32) *dloggerImpl {
return l.u64(uint64(x))
}
//go:nosplit
func (l dloggerFake) u64(x uint64) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) u64(x uint64) *dloggerImpl {
l.w.byte(debugLogUint)
l.w.uvarint(x)
return l
}
//go:nosplit
func (l dloggerFake) hex(x uint64) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) hex(x uint64) *dloggerImpl {
l.w.byte(debugLogHex)
l.w.uvarint(x)
return l
}
//go:nosplit
func (l dloggerFake) p(x any) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) p(x any) *dloggerImpl {
l.w.byte(debugLogPtr)
if x == nil {
l.w.uvarint(0)
} else {
v := efaceOf(&x)
switch v._type.Kind() {
case abi.Chan, abi.Func, abi.Map, abi.Pointer, abi.UnsafePointer:
l.w.uvarint(uint64(uintptr(v.data)))
default:
throw("not a pointer type")
}
}
return l
}
//go:nosplit
func (l dloggerFake) s(x string) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) s(x string) *dloggerImpl {
strData := unsafe.StringData(x)
datap := &firstmoduledata
if len(x) > 4 && datap.etext <= uintptr(unsafe.Pointer(strData)) && uintptr(unsafe.Pointer(strData)) < datap.end {
// String constants are in the rodata section, which
// isn't recorded in moduledata. But it has to be
// somewhere between etext and end.
l.w.byte(debugLogConstString)
l.w.uvarint(uint64(len(x)))
l.w.uvarint(uint64(uintptr(unsafe.Pointer(strData)) - datap.etext))
} else {
l.w.byte(debugLogString)
// We can't use unsafe.Slice as it may panic, which isn't safe
// in this (potentially) nowritebarrier context.
var b []byte
bb := (*slice)(unsafe.Pointer(&b))
bb.array = unsafe.Pointer(strData)
bb.len, bb.cap = len(x), len(x)
if len(b) > debugLogStringLimit {
b = b[:debugLogStringLimit]
}
l.w.uvarint(uint64(len(b)))
l.w.bytes(b)
if len(b) != len(x) {
l.w.byte(debugLogOverflow)
l.w.uvarint(uint64(len(x) - len(b)))
}
}
return l
}
//go:nosplit
func (l dloggerFake) hexdump(p unsafe.Pointer, bytes uintptr) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) hexdump(p unsafe.Pointer, bytes uintptr) *dloggerImpl {
var b []byte
bb := (*slice)(unsafe.Pointer(&b))
bb.array = unsafe.Pointer(p)
bb.len, bb.cap = int(bytes), int(bytes)
if len(b) > debugLogStringLimit {
b = b[:debugLogStringLimit]
}
l.w.byte(debugLogHexdump)
l.w.uvarint(uint64(uintptr(p)))
l.w.uvarint(uint64(len(b)))
l.w.bytes(b)
if uintptr(len(b)) != bytes {
l.w.byte(debugLogOverflow)
l.w.uvarint(uint64(bytes) - uint64(len(b)))
}
return l
}
//go:nosplit
func (l dloggerFake) pc(x uintptr) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) pc(x uintptr) *dloggerImpl {
l.w.byte(debugLogPC)
l.w.uvarint(uint64(x))
return l
}
//go:nosplit
func (l dloggerFake) traceback(x []uintptr) dloggerFake { return l }
//go:nosplit
func (l *dloggerImpl) traceback(x []uintptr) *dloggerImpl {
l.w.byte(debugLogTraceback)
l.w.uvarint(uint64(len(x)))
for _, pc := range x {
l.w.uvarint(uint64(pc))
}
return l
}
// A debugLogWriter is a ring buffer of binary debug log records.
//
// A log record consists of a 2-byte framing header and a sequence of
// fields. The framing header gives the size of the record as a little
// endian 16-bit value. Each field starts with a byte indicating its
// type, followed by type-specific data. If the size in the framing
// header is 0, it's a sync record consisting of two little endian
// 64-bit values giving a new time base.
//
// Because this is a ring buffer, new records will eventually
// overwrite old records. Hence, it maintains a reader that consumes
// the log as it gets overwritten. That reader state is where an
// actual log reader would start.
type debugLogWriter struct {
_ sys.NotInHeap
write uint64
data debugLogBuf
// tick and nano are the time bases from the most recently
// written sync record.
tick, nano uint64
// r is a reader that consumes records as they get overwritten
// by the writer. It also acts as the initial reader state
// when printing the log.
r debugLogReader
// buf is a scratch buffer for encoding. This is here to
// reduce stack usage.
buf [10]byte
}
type debugLogBuf struct {
_ sys.NotInHeap
b [debugLogBytes]byte
}
const (
// debugLogHeaderSize is the number of bytes in the framing
// header of every dlog record.
debugLogHeaderSize = 2
// debugLogSyncSize is the number of bytes in a sync record.
debugLogSyncSize = debugLogHeaderSize + 2*8
)
//go:nosplit
func (l *debugLogWriter) ensure(n uint64) {
for l.write+n >= l.r.begin+uint64(len(l.data.b)) {
// Consume record at begin.
if l.r.skip() == ^uint64(0) {
// Wrapped around within a record.
//
// TODO(austin): It would be better to just
// eat the whole buffer at this point, but we
// have to communicate that to the reader
// somehow.
throw("record wrapped around")
}
}
}
//go:nosplit
func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool {
l.data.b[pos%uint64(len(l.data.b))] = uint8(size)
l.data.b[(pos+1)%uint64(len(l.data.b))] = uint8(size >> 8)
return size <= 0xFFFF
}
//go:nosplit
func (l *debugLogWriter) writeSync(tick, nano uint64) {
l.tick, l.nano = tick, nano
l.ensure(debugLogHeaderSize)
l.writeFrameAt(l.write, 0)
l.write += debugLogHeaderSize
l.writeUint64LE(tick)
l.writeUint64LE(nano)
l.r.end = l.write
}
//go:nosplit
func (l *debugLogWriter) writeUint64LE(x uint64) {
var b [8]byte
byteorder.LEPutUint64(b[:], x)
l.bytes(b[:])
}
//go:nosplit
func (l *debugLogWriter) byte(x byte) {
l.ensure(1)
pos := l.write
l.write++
l.data.b[pos%uint64(len(l.data.b))] = x
}
//go:nosplit
func (l *debugLogWriter) bytes(x []byte) {
l.ensure(uint64(len(x)))
pos := l.write
l.write += uint64(len(x))
for len(x) > 0 {
n := copy(l.data.b[pos%uint64(len(l.data.b)):], x)
pos += uint64(n)
x = x[n:]
}
}
//go:nosplit
func (l *debugLogWriter) varint(x int64) {
var u uint64
if x < 0 {
u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
} else {
u = (uint64(x) << 1) // do not complement i, bit 0 is 0
}
l.uvarint(u)
}
//go:nosplit
func (l *debugLogWriter) uvarint(u uint64) {
i := 0
for u >= 0x80 {
l.buf[i] = byte(u) | 0x80
u >>= 7
i++
}
l.buf[i] = byte(u)
i++
l.bytes(l.buf[:i])
}
type debugLogReader struct {
data *debugLogBuf
// begin and end are the positions in the log of the beginning
// and end of the log data, modulo len(data).
begin, end uint64
// tick and nano are the current time base at begin.
tick, nano uint64
}
//go:nosplit
func (r *debugLogReader) skip() uint64 {
// Read size at pos.
if r.begin+debugLogHeaderSize > r.end {
return ^uint64(0)
}
size := uint64(r.readUint16LEAt(r.begin))
if size == 0 {
// Sync packet.
r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
size = debugLogSyncSize
}
if r.begin+size > r.end {
return ^uint64(0)
}
r.begin += size
return size
}
//go:nosplit
func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 {
return uint16(r.data.b[pos%uint64(len(r.data.b))]) |
uint16(r.data.b[(pos+1)%uint64(len(r.data.b))])<<8
}
//go:nosplit
func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 {
var b [8]byte
for i := range b {
b[i] = r.data.b[pos%uint64(len(r.data.b))]
pos++
}
return byteorder.LEUint64(b[:])
}
func (r *debugLogReader) peek() (tick uint64) {
// Consume any sync records.
size := uint64(0)
for size == 0 {
if r.begin+debugLogHeaderSize > r.end {
return ^uint64(0)
}
size = uint64(r.readUint16LEAt(r.begin))
if size != 0 {
break
}
if r.begin+debugLogSyncSize > r.end {
return ^uint64(0)
}
// Sync packet.
r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
r.begin += debugLogSyncSize
}
// Peek tick delta.
if r.begin+size > r.end {
return ^uint64(0)
}
pos := r.begin + debugLogHeaderSize
var u uint64
for i := uint(0); ; i += 7 {
b := r.data.b[pos%uint64(len(r.data.b))]
pos++
u |= uint64(b&^0x80) << i
if b&0x80 == 0 {
break
}
}
if pos > r.begin+size {
return ^uint64(0)
}
return r.tick + u
}
func (r *debugLogReader) header() (end, tick, nano uint64, p int) {
// Read size. We've already skipped sync packets and checked
// bounds in peek.
size := uint64(r.readUint16LEAt(r.begin))
end = r.begin + size
r.begin += debugLogHeaderSize
// Read tick, nano, and p.
tick = r.uvarint() + r.tick
nano = r.uvarint() + r.nano
p = int(r.varint())
return
}
func (r *debugLogReader) uvarint() uint64 {
var u uint64
for i := uint(0); ; i += 7 {
b := r.data.b[r.begin%uint64(len(r.data.b))]
r.begin++
u |= uint64(b&^0x80) << i
if b&0x80 == 0 {
break
}
}
return u
}
func (r *debugLogReader) varint() int64 {
u := r.uvarint()
var v int64
if u&1 == 0 {
v = int64(u >> 1)
} else {
v = ^int64(u >> 1)
}
return v
}
func (r *debugLogReader) printVal() bool {
typ := r.data.b[r.begin%uint64(len(r.data.b))]
r.begin++
switch typ {
default:
print("<unknown field type ", hex(typ), " pos ", r.begin-1, " end ", r.end, ">\n")
return false
case debugLogUnknown:
print("<unknown kind>")
case debugLogBoolTrue:
print(true)
case debugLogBoolFalse:
print(false)
case debugLogInt:
print(r.varint())
case debugLogUint:
print(r.uvarint())
case debugLogHex, debugLogPtr:
print(hex(r.uvarint()))
case debugLogString:
sl := r.uvarint()
if r.begin+sl > r.end {
r.begin = r.end
print("<string length corrupted>")
break
}
for sl > 0 {
b := r.data.b[r.begin%uint64(len(r.data.b)):]
if uint64(len(b)) > sl {
b = b[:sl]
}
r.begin += uint64(len(b))
sl -= uint64(len(b))
gwrite(b)
}
case debugLogConstString:
len, ptr := int(r.uvarint()), uintptr(r.uvarint())
ptr += firstmoduledata.etext
// We can't use unsafe.String as it may panic, which isn't safe
// in this (potentially) nowritebarrier context.
str := stringStruct{
str: unsafe.Pointer(ptr),
len: len,
}
s := *(*string)(unsafe.Pointer(&str))
print(s)
case debugLogOverflow:
print("..(", r.uvarint(), " more bytes)..")
case debugLogHexdump:
p := uintptr(r.uvarint())
bl := r.uvarint()
if r.begin+bl > r.end {
r.begin = r.end
print("<hexdump length corrupted>")
break
}
println() // Start on a new line
hd := hexdumper{addr: p}
for bl > 0 {
b := r.data.b[r.begin%uint64(len(r.data.b)):]
if uint64(len(b)) > bl {
b = b[:bl]
}
r.begin += uint64(len(b))
bl -= uint64(len(b))
hd.write(b)
}
hd.close()
case debugLogPC:
printDebugLogPC(uintptr(r.uvarint()), false)
case debugLogTraceback:
n := int(r.uvarint())
for i := 0; i < n; i++ {
print("\n\t")
// gentraceback PCs are always return PCs.
// Convert them to call PCs.
//
// TODO(austin): Expand inlined frames.
printDebugLogPC(uintptr(r.uvarint()), true)
}
}
return true
}
// printDebugLog prints the debug log.
func printDebugLog() {
if dlogEnabled {
printDebugLogImpl()
}
}
func printDebugLogImpl() {
// This function should not panic or throw since it is used in
// the fatal panic path and this may deadlock.
printlock()
// Get the list of all debug logs.
allp := (*uintptr)(unsafe.Pointer(&allDloggers))
all := (*dloggerImpl)(unsafe.Pointer(atomic.Loaduintptr(allp)))
// Count the logs.
n := 0
for l := all; l != nil; l = l.allLink {
n++
}
if n == 0 {
printunlock()
return
}
// Prepare read state for all logs.
type readState struct {
debugLogReader
first bool
lost uint64
nextTick uint64
}
// Use sysAllocOS instead of sysAlloc because we want to interfere
// with the runtime as little as possible, and sysAlloc updates accounting.
state1 := sysAllocOS(unsafe.Sizeof(readState{})*uintptr(n), "debug log")
if state1 == nil {
println("failed to allocate read state for", n, "logs")
printunlock()
return
}
state := (*[1 << 20]readState)(state1)[:n]
{
l := all
for i := range state {
s := &state[i]
s.debugLogReader = l.w.r
s.first = true
s.lost = l.w.r.begin
s.nextTick = s.peek()
l = l.allLink
}
}
// Print records.
for {
// Find the next record.
var best struct {
tick uint64
i int
}
best.tick = ^uint64(0)
for i := range state {
if state[i].nextTick < best.tick {
best.tick = state[i].nextTick
best.i = i
}
}
if best.tick == ^uint64(0) {
break
}
// Print record.
s := &state[best.i]
if s.first {
print(">> begin log ", best.i)
if s.lost != 0 {
print("; lost first ", s.lost>>10, "KB")
}
print(" <<\n")
s.first = false
}
end, _, nano, p := s.header()
oldEnd := s.end
s.end = end
print("[")
var tmpbuf [21]byte
pnano := int64(nano) - runtimeInitTime
if pnano < 0 {
// Logged before runtimeInitTime was set.
pnano = 0
}
pnanoBytes := itoaDiv(tmpbuf[:], uint64(pnano), 9)
print(slicebytetostringtmp((*byte)(noescape(unsafe.Pointer(&pnanoBytes[0]))), len(pnanoBytes)))
print(" P ", p, "] ")
for i := 0; s.begin < s.end; i++ {
if i > 0 {
print(" ")
}
if !s.printVal() {
// Abort this P log.
print("<aborting P log>")
end = oldEnd
break
}
}
println()
// Move on to the next record.
s.begin = end
s.end = oldEnd
s.nextTick = s.peek()
}
printunlock()
}
// printDebugLogPC prints a single symbolized PC. If returnPC is true,
// pc is a return PC that must first be converted to a call PC.
func printDebugLogPC(pc uintptr, returnPC bool) {
fn := findfunc(pc)
if returnPC && (!fn.valid() || pc > fn.entry()) {
// TODO(austin): Don't back up if the previous frame
// was a sigpanic.
pc--
}
print(hex(pc))
if !fn.valid() {
print(" [unknown PC]")
} else {
name := funcname(fn)
file, line := funcline(fn, pc)
print(" [", name, "+", hex(pc-fn.entry()),
" ", file, ":", line, "]")
}
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !debuglog
package runtime
const dlogEnabled = false
type dlogger = dloggerFake
func dlog1() dloggerFake {
return dlogFake()
}
type dlogPerM struct{}
func getCachedDlogger() *dloggerImpl {
return nil
}
func putCachedDlogger(l *dloggerImpl) bool {
return false
}
// created by cgo -cdefs and then converted to Go
// cgo -cdefs defs_linux.go defs1_linux.go
package runtime
import "unsafe"
const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
_ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
_PROT_WRITE = 0x2
_PROT_EXEC = 0x4
_MAP_ANON = 0x20
_MAP_PRIVATE = 0x2
_MAP_FIXED = 0x10
_MADV_DONTNEED = 0x4
_MADV_FREE = 0x8
_MADV_HUGEPAGE = 0xe
_MADV_NOHUGEPAGE = 0xf
_MADV_COLLAPSE = 0x19
_SA_RESTART = 0x10000000
_SA_ONSTACK = 0x8000000
_SA_RESTORER = 0x4000000
_SA_SIGINFO = 0x4
_SI_KERNEL = 0x80
_SI_TIMER = -0x2
_SIGHUP = 0x1
_SIGINT = 0x2
_SIGQUIT = 0x3
_SIGILL = 0x4
_SIGTRAP = 0x5
_SIGABRT = 0x6
_SIGBUS = 0x7
_SIGFPE = 0x8
_SIGKILL = 0x9
_SIGUSR1 = 0xa
_SIGSEGV = 0xb
_SIGUSR2 = 0xc
_SIGPIPE = 0xd
_SIGALRM = 0xe
_SIGSTKFLT = 0x10
_SIGCHLD = 0x11
_SIGCONT = 0x12
_SIGSTOP = 0x13
_SIGTSTP = 0x14
_SIGTTIN = 0x15
_SIGTTOU = 0x16
_SIGURG = 0x17
_SIGXCPU = 0x18
_SIGXFSZ = 0x19
_SIGVTALRM = 0x1a
_SIGPROF = 0x1b
_SIGWINCH = 0x1c
_SIGIO = 0x1d
_SIGPWR = 0x1e
_SIGSYS = 0x1f
_SIGRTMIN = 0x20
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
_FPE_FLTOVF = 0x4
_FPE_FLTUND = 0x5
_FPE_FLTRES = 0x6
_FPE_FLTINV = 0x7
_FPE_FLTSUB = 0x8
_BUS_ADRALN = 0x1
_BUS_ADRERR = 0x2
_BUS_OBJERR = 0x3
_SEGV_MAPERR = 0x1
_SEGV_ACCERR = 0x2
_ITIMER_REAL = 0x0
_ITIMER_VIRTUAL = 0x1
_ITIMER_PROF = 0x2
_CLOCK_THREAD_CPUTIME_ID = 0x3
_SIGEV_THREAD_ID = 0x4
_AF_UNIX = 0x1
_SOCK_DGRAM = 0x2
)
type timespec struct {
tv_sec int64
tv_nsec int64
}
//go:nosplit
func (ts *timespec) setNsec(ns int64) {
ts.tv_sec = ns / 1e9
ts.tv_nsec = ns % 1e9
}
type timeval struct {
tv_sec int64
tv_usec int64
}
func (tv *timeval) set_usec(x int32) {
tv.tv_usec = int64(x)
}
type sigactiont struct {
sa_handler uintptr
sa_flags uint64
sa_restorer uintptr
sa_mask uint64
}
type siginfoFields struct {
si_signo int32
si_errno int32
si_code int32
// below here is a union; si_addr is the only field we use
si_addr uint64
}
type siginfo struct {
siginfoFields
// Pad struct to the max size in the kernel.
_ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte
}
type itimerspec struct {
it_interval timespec
it_value timespec
}
type itimerval struct {
it_interval timeval
it_value timeval
}
type sigeventFields struct {
value uintptr
signo int32
notify int32
// below here is a union; sigev_notify_thread_id is the only field we use
sigev_notify_thread_id int32
}
type sigevent struct {
sigeventFields
// Pad struct to the max size in the kernel.
_ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte
}
// created by cgo -cdefs and then converted to Go
// cgo -cdefs defs_linux.go defs1_linux.go
const (
_O_RDONLY = 0x0
_O_WRONLY = 0x1
_O_CREAT = 0x40
_O_TRUNC = 0x200
_O_NONBLOCK = 0x800
_O_CLOEXEC = 0x80000
)
type usigset struct {
__val [16]uint64
}
type fpxreg struct {
significand [4]uint16
exponent uint16
padding [3]uint16
}
type xmmreg struct {
element [4]uint32
}
type fpstate struct {
cwd uint16
swd uint16
ftw uint16
fop uint16
rip uint64
rdp uint64
mxcsr uint32
mxcr_mask uint32
_st [8]fpxreg
_xmm [16]xmmreg
padding [24]uint32
}
type fpxreg1 struct {
significand [4]uint16
exponent uint16
padding [3]uint16
}
type xmmreg1 struct {
element [4]uint32
}
type fpstate1 struct {
cwd uint16
swd uint16
ftw uint16
fop uint16
rip uint64
rdp uint64
mxcsr uint32
mxcr_mask uint32
_st [8]fpxreg1
_xmm [16]xmmreg1
padding [24]uint32
}
type fpreg1 struct {
significand [4]uint16
exponent uint16
}
type stackt struct {
ss_sp *byte
ss_flags int32
pad_cgo_0 [4]byte
ss_size uintptr
}
type mcontext struct {
gregs [23]uint64
fpregs *fpstate
__reserved1 [8]uint64
}
type ucontext struct {
uc_flags uint64
uc_link *ucontext
uc_stack stackt
uc_mcontext mcontext
uc_sigmask usigset
__fpregs_mem fpstate
}
type sigcontext struct {
r8 uint64
r9 uint64
r10 uint64
r11 uint64
r12 uint64
r13 uint64
r14 uint64
r15 uint64
rdi uint64
rsi uint64
rbp uint64
rbx uint64
rdx uint64
rax uint64
rcx uint64
rsp uint64
rip uint64
eflags uint64
cs uint16
gs uint16
fs uint16
__pad0 uint16
err uint64
trapno uint64
oldmask uint64
cr2 uint64
fpstate *fpstate1
__reserved1 [8]uint64
}
type sockaddr_un struct {
family uint16
path [108]byte
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/runtime/sys"
_ "unsafe"
)
//go:linkname dit_setEnabled crypto/subtle.setDITEnabled
func dit_setEnabled() bool {
g := getg()
g.ditWanted = true
g.m.ditEnabled = true
return sys.EnableDIT()
}
//go:linkname dit_setDisabled crypto/subtle.setDITDisabled
func dit_setDisabled() {
g := getg()
g.ditWanted = false
g.m.ditEnabled = false
sys.DisableDIT()
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
func gogetenv(key string) string {
env := environ()
if env == nil {
throw("getenv before env init")
}
for _, s := range env {
if len(s) > len(key) && s[len(key)] == '=' && envKeyEqual(s[:len(key)], key) {
return s[len(key)+1:]
}
}
return ""
}
// envKeyEqual reports whether a == b, with ASCII-only case insensitivity
// on Windows. The two strings must have the same length.
func envKeyEqual(a, b string) bool {
if GOOS == "windows" { // case insensitive
for i := 0; i < len(a); i++ {
ca, cb := a[i], b[i]
if ca == cb || lowerASCII(ca) == lowerASCII(cb) {
continue
}
return false
}
return true
}
return a == b
}
func lowerASCII(c byte) byte {
if 'A' <= c && c <= 'Z' {
return c + ('a' - 'A')
}
return c
}
// _cgo_setenv should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ebitengine/purego
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname _cgo_setenv
var _cgo_setenv unsafe.Pointer // pointer to C function
// _cgo_unsetenv should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ebitengine/purego
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname _cgo_unsetenv
var _cgo_unsetenv unsafe.Pointer // pointer to C function
// Update the C environment if cgo is loaded.
func setenv_c(k string, v string) {
if _cgo_setenv == nil {
return
}
arg := [2]unsafe.Pointer{cstring(k), cstring(v)}
asmcgocall(_cgo_setenv, unsafe.Pointer(&arg))
}
// Update the C environment if cgo is loaded.
func unsetenv_c(k string) {
if _cgo_unsetenv == nil {
return
}
arg := [1]unsafe.Pointer{cstring(k)}
asmcgocall(_cgo_unsetenv, unsafe.Pointer(&arg))
}
func cstring(s string) unsafe.Pointer {
p := make([]byte, len(s)+1)
copy(p, s)
return unsafe.Pointer(&p[0])
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/bytealg"
"internal/runtime/sys"
)
// Error identifies a runtime error used in panic.
//
// The Go runtime triggers panics for a variety of cases, as described by the
// Go Language Spec, such as out-of-bounds slice/array access, close of nil
// channels, type assertion failures, etc.
//
// When these cases occur, the Go runtime panics with an error that implements
// Error. This can be useful when recovering from panics to distinguish between
// custom application panics and fundamental runtime panics.
//
// Packages outside of the Go standard library should not implement Error.
type Error interface {
error
// RuntimeError is a no-op function but
// serves to distinguish types that are runtime
// errors from ordinary errors: a type is a
// runtime error if it has a RuntimeError method.
RuntimeError()
}
// A TypeAssertionError explains a failed type assertion.
type TypeAssertionError struct {
_interface *_type
concrete *_type
asserted *_type
missingMethod string // one method needed by Interface, missing from Concrete
}
func (*TypeAssertionError) RuntimeError() {}
func (e *TypeAssertionError) Error() string {
inter := "interface"
if e._interface != nil {
inter = toRType(e._interface).string()
}
as := toRType(e.asserted).string()
if e.concrete == nil {
return "interface conversion: " + inter + " is nil, not " + as
}
cs := toRType(e.concrete).string()
if e.missingMethod == "" {
msg := "interface conversion: " + inter + " is " + cs + ", not " + as
if cs == as {
// provide slightly clearer error message
if toRType(e.concrete).pkgpath() != toRType(e.asserted).pkgpath() {
msg += " (types from different packages)"
} else {
msg += " (types from different scopes)"
}
}
return msg
}
return "interface conversion: " + cs + " is not " + as +
": missing method " + e.missingMethod
}
// itoa converts val to a decimal representation. The result is
// written somewhere within buf and the location of the result is returned.
// buf must be at least 20 bytes.
//
//go:nosplit
func itoa(buf []byte, val uint64) []byte {
i := len(buf) - 1
for val >= 10 {
buf[i] = byte(val%10 + '0')
i--
val /= 10
}
buf[i] = byte(val + '0')
return buf[i:]
}
// An errorString represents a runtime error described by a single string.
type errorString string
func (e errorString) RuntimeError() {}
func (e errorString) Error() string {
return "runtime error: " + string(e)
}
type errorAddressString struct {
msg string // error message
addr uintptr // memory address where the error occurred
}
func (e errorAddressString) RuntimeError() {}
func (e errorAddressString) Error() string {
return "runtime error: " + e.msg
}
// Addr returns the memory address where a fault occurred.
// The address provided is best-effort.
// The veracity of the result may depend on the platform.
// Errors providing this method will only be returned as
// a result of using [runtime/debug.SetPanicOnFault].
func (e errorAddressString) Addr() uintptr {
return e.addr
}
// plainError represents a runtime error described a string without
// the prefix "runtime error: " after invoking errorString.Error().
// See Issue #14965.
type plainError string
func (e plainError) RuntimeError() {}
func (e plainError) Error() string {
return string(e)
}
// A boundsError represents an indexing or slicing operation gone wrong.
type boundsError struct {
x int64
y int
// Values in an index or slice expression can be signed or unsigned.
// That means we'd need 65 bits to encode all possible indexes, from -2^63 to 2^64-1.
// Instead, we keep track of whether x should be interpreted as signed or unsigned.
// y is known to be nonnegative and to fit in an int.
signed bool
code abi.BoundsErrorCode
}
// boundsErrorFmts provide error text for various out-of-bounds panics.
// Note: if you change these strings, you should adjust the size of the buffer
// in boundsError.Error below as well.
var boundsErrorFmts = [...]string{
abi.BoundsIndex: "index out of range [%x] with length %y",
abi.BoundsSliceAlen: "slice bounds out of range [:%x] with length %y",
abi.BoundsSliceAcap: "slice bounds out of range [:%x] with capacity %y",
abi.BoundsSliceB: "slice bounds out of range [%x:%y]",
abi.BoundsSlice3Alen: "slice bounds out of range [::%x] with length %y",
abi.BoundsSlice3Acap: "slice bounds out of range [::%x] with capacity %y",
abi.BoundsSlice3B: "slice bounds out of range [:%x:%y]",
abi.BoundsSlice3C: "slice bounds out of range [%x:%y:]",
abi.BoundsConvert: "cannot convert slice with length %y to array or pointer to array with length %x",
}
// boundsNegErrorFmts are overriding formats if x is negative. In this case there's no need to report y.
var boundsNegErrorFmts = [...]string{
abi.BoundsIndex: "index out of range [%x]",
abi.BoundsSliceAlen: "slice bounds out of range [:%x]",
abi.BoundsSliceAcap: "slice bounds out of range [:%x]",
abi.BoundsSliceB: "slice bounds out of range [%x:]",
abi.BoundsSlice3Alen: "slice bounds out of range [::%x]",
abi.BoundsSlice3Acap: "slice bounds out of range [::%x]",
abi.BoundsSlice3B: "slice bounds out of range [:%x:]",
abi.BoundsSlice3C: "slice bounds out of range [%x::]",
}
func (e boundsError) RuntimeError() {}
func appendIntStr(b []byte, v int64, signed bool) []byte {
if signed && v < 0 {
b = append(b, '-')
v = -v
}
var buf [20]byte
b = append(b, itoa(buf[:], uint64(v))...)
return b
}
func (e boundsError) Error() string {
fmt := boundsErrorFmts[e.code]
if e.signed && e.x < 0 {
fmt = boundsNegErrorFmts[e.code]
}
// max message length is 99: "runtime error: slice bounds out of range [::%x] with capacity %y"
// x can be at most 20 characters. y can be at most 19.
b := make([]byte, 0, 100)
b = append(b, "runtime error: "...)
for i := 0; i < len(fmt); i++ {
c := fmt[i]
if c != '%' {
b = append(b, c)
continue
}
i++
switch fmt[i] {
case 'x':
b = appendIntStr(b, e.x, e.signed)
case 'y':
b = appendIntStr(b, int64(e.y), true)
}
}
return string(b)
}
type stringer interface {
String() string
}
// printpanicval prints an argument passed to panic.
// If panic is called with a value that has a String or Error method,
// it has already been converted into a string by preprintpanics.
//
// To ensure that the traceback can be unambiguously parsed even when
// the panic value contains "\ngoroutine" and other stack-like
// strings, newlines in the string representation of v are replaced by
// "\n\t".
func printpanicval(v any) {
switch v := v.(type) {
case nil:
print("nil")
case bool:
print(v)
case int:
print(v)
case int8:
print(v)
case int16:
print(v)
case int32:
print(v)
case int64:
print(v)
case uint:
print(v)
case uint8:
print(v)
case uint16:
print(v)
case uint32:
print(v)
case uint64:
print(v)
case uintptr:
print(v)
case float32:
print(v)
case float64:
print(v)
case complex64:
print(v)
case complex128:
print(v)
case string:
printindented(v)
default:
printanycustomtype(v)
}
}
// Invariant: each newline in the string representation is followed by a tab.
func printanycustomtype(i any) {
eface := efaceOf(&i)
typestring := toRType(eface._type).string()
switch eface._type.Kind() {
case abi.String:
print(typestring, `("`)
printindented(*(*string)(eface.data))
print(`")`)
case abi.Bool:
print(typestring, "(", *(*bool)(eface.data), ")")
case abi.Int:
print(typestring, "(", *(*int)(eface.data), ")")
case abi.Int8:
print(typestring, "(", *(*int8)(eface.data), ")")
case abi.Int16:
print(typestring, "(", *(*int16)(eface.data), ")")
case abi.Int32:
print(typestring, "(", *(*int32)(eface.data), ")")
case abi.Int64:
print(typestring, "(", *(*int64)(eface.data), ")")
case abi.Uint:
print(typestring, "(", *(*uint)(eface.data), ")")
case abi.Uint8:
print(typestring, "(", *(*uint8)(eface.data), ")")
case abi.Uint16:
print(typestring, "(", *(*uint16)(eface.data), ")")
case abi.Uint32:
print(typestring, "(", *(*uint32)(eface.data), ")")
case abi.Uint64:
print(typestring, "(", *(*uint64)(eface.data), ")")
case abi.Uintptr:
print(typestring, "(", *(*uintptr)(eface.data), ")")
case abi.Float32:
print(typestring, "(", *(*float32)(eface.data), ")")
case abi.Float64:
print(typestring, "(", *(*float64)(eface.data), ")")
case abi.Complex64:
print(typestring, *(*complex64)(eface.data))
case abi.Complex128:
print(typestring, *(*complex128)(eface.data))
default:
print("(", typestring, ") ", eface.data)
}
}
// printindented prints s, replacing "\n" with "\n\t".
func printindented(s string) {
for {
i := bytealg.IndexByteString(s, '\n')
if i < 0 {
break
}
i += len("\n")
print(s[:i])
print("\t")
s = s[i:]
}
print(s)
}
// panicwrap generates a panic for a call to a wrapped value method
// with a nil pointer receiver.
//
// It is called from the generated wrapper code.
func panicwrap() {
pc := sys.GetCallerPC()
name := funcNameForPrint(funcname(findfunc(pc)))
// name is something like "main.(*T).F".
// We want to extract pkg ("main"), typ ("T"), and meth ("F").
// Do it by finding the parens.
i := bytealg.IndexByteString(name, '(')
if i < 0 {
throw("panicwrap: no ( in " + name)
}
pkg := name[:i-1]
if i+2 >= len(name) || name[i-1:i+2] != ".(*" {
throw("panicwrap: unexpected string after package name: " + name)
}
name = name[i+2:]
i = bytealg.IndexByteString(name, ')')
if i < 0 {
throw("panicwrap: no ) in " + name)
}
if i+2 >= len(name) || name[i:i+2] != ")." {
throw("panicwrap: unexpected string after type name: " + name)
}
typ := name[:i]
meth := name[i+2:]
panic(plainError("value method " + pkg + "." + typ + "." + meth + " called using nil *" + typ + " pointer"))
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package runtime contains operations that interact with Go's runtime system,
such as functions to control goroutines. It also includes the low-level type information
used by the reflect package; see [reflect]'s documentation for the programmable
interface to the run-time type system.
# Environment Variables
The following environment variables ($name or %name%, depending on the host
operating system) control the run-time behavior of Go programs. The meanings
and use may change from release to release.
The GOGC variable sets the initial garbage collection target percentage.
A collection is triggered when the ratio of freshly allocated data to live data
remaining after the previous collection reaches this percentage. The default
is GOGC=100. Setting GOGC=off disables the garbage collector entirely.
[runtime/debug.SetGCPercent] allows changing this percentage at run time.
The GOMEMLIMIT variable sets a soft memory limit for the runtime. This memory limit
includes the Go heap and all other memory managed by the runtime, and excludes
external memory sources such as mappings of the binary itself, memory managed in
other languages, and memory held by the operating system on behalf of the Go
program. GOMEMLIMIT is a numeric value in bytes with an optional unit suffix.
The supported suffixes include B, KiB, MiB, GiB, and TiB. These suffixes
represent quantities of bytes as defined by the IEC 80000-13 standard. That is,
they are based on powers of two: KiB means 2^10 bytes, MiB means 2^20 bytes,
and so on. The default setting is [math.MaxInt64], which effectively disables the
memory limit. [runtime/debug.SetMemoryLimit] allows changing this limit at run
time.
The GODEBUG variable controls debugging variables within the runtime.
It is a comma-separated list of name=val pairs setting these named variables:
clobberfree: setting clobberfree=1 causes the garbage collector to
clobber the memory content of an object with bad content when it frees
the object.
cpu.*: cpu.all=off disables the use of all optional instruction set extensions.
cpu.extension=off disables use of instructions from the specified instruction set extension.
extension is the lower case name for the instruction set extension such as sse41 or avx
as listed in internal/cpu package. As an example cpu.avx=off disables runtime detection
and thereby use of AVX instructions.
cgocheck: setting cgocheck=0 disables all checks for packages
using cgo to incorrectly pass Go pointers to non-Go code.
Setting cgocheck=1 (the default) enables relatively cheap
checks that may miss some errors. A more complete, but slow,
cgocheck mode can be enabled using GOEXPERIMENT (which
requires a rebuild), see https://pkg.go.dev/internal/goexperiment for details.
checkfinalizers: setting checkfinalizers=1 causes the garbage collector to run
multiple partial non-parallel stop-the-world collections to identify common issues with
finalizers and cleanups, like those listed at
https://go.dev/doc/gc-guide#Finalizers_cleanups_and_weak_pointers. If a potential issue
is found, the program will terminate with a description of all potential issues, the
associated values, and a list of those values' finalizers and cleanups, including where
they were created. It also adds tracking for tiny blocks to help diagnose issues with
those as well. The analysis performed during the partial collection is conservative.
Notably, it flags any path back to the original object from the cleanup function,
cleanup arguments, or finalizer function as a potential issue, even if that path might
be severed sometime later during execution (though this is not a recommended pattern).
This mode also produces one line of output to stderr every GC cycle with information
about the finalizer and cleanup queue lengths. Lines produced by this mode start with
"checkfinalizers:".
decoratemappings: controls whether the Go runtime annotates OS
anonymous memory mappings with context about their purpose. These
annotations appear in /proc/self/maps and /proc/self/smaps as
"[anon: Go: ...]". This setting is only used on Linux. For Go 1.25, it
defaults to `decoratemappings=1`, enabling annotations. Using
`decoratemappings=0` reverts to the pre-Go 1.25 behavior.
disablethp: setting disablethp=1 on Linux disables transparent huge pages for the heap.
It has no effect on other platforms. disablethp is meant for compatibility with versions
of Go before 1.21, which stopped working around a Linux kernel default that can result
in significant memory overuse. See https://go.dev/issue/64332. This setting will be
removed in a future release, so operators should tweak their Linux configuration to suit
their needs before then. See https://go.dev/doc/gc-guide#Linux_transparent_huge_pages.
dontfreezetheworld: by default, the start of a fatal panic or throw
"freezes the world", preempting all threads to stop all running
goroutines, which makes it possible to traceback all goroutines, and
keeps their state close to the point of panic. Setting
dontfreezetheworld=1 disables this preemption, allowing goroutines to
continue executing during panic processing. Note that goroutines that
naturally enter the scheduler will still stop. This can be useful when
debugging the runtime scheduler, as freezetheworld perturbs scheduler
state and thus may hide problems.
efence: setting efence=1 causes the allocator to run in a mode
where each object is allocated on a unique page and addresses are
never recycled.
gccheckmark: setting gccheckmark=1 enables verification of the
garbage collector's concurrent mark phase by performing a
second mark pass while the world is stopped. If the second
pass finds a reachable object that was not found by concurrent
mark, the garbage collector will panic.
gcpacertrace: setting gcpacertrace=1 causes the garbage collector to
print information about the internal state of the concurrent pacer.
gcshrinkstackoff: setting gcshrinkstackoff=1 disables moving goroutines
onto smaller stacks. In this mode, a goroutine's stack can only grow.
gcstoptheworld: setting gcstoptheworld=1 disables concurrent garbage collection,
making every garbage collection a stop-the-world event. Setting gcstoptheworld=2
also disables concurrent sweeping after the garbage collection finishes.
gctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard
error at each collection, summarizing the amount of memory collected and the
length of the pause. The format of this line is subject to change. Included in
the explanation below is also the relevant runtime/metrics metric for each field.
Currently, it is:
gc # @#s #%: #+#+# ms clock, #+#/#/#+# ms cpu, #->#-># MB, # MB goal, # MB stacks, #MB globals, # P
where the fields are as follows:
gc # the GC number, incremented at each GC
@#s time in seconds since program start
#% percentage of time spent in GC since program start
#+...+# wall-clock/CPU times for the phases of the GC
#->#-># MB heap size at GC start, at GC end, and live heap, or /gc/scan/heap:bytes
# MB goal goal heap size, or /gc/heap/goal:bytes
# MB stacks estimated scannable stack size, or /gc/scan/stack:bytes
# MB globals scannable global size, or /gc/scan/globals:bytes
# P number of processors used, or /sched/gomaxprocs:threads
The phases are stop-the-world (STW) sweep termination, concurrent
mark and scan, and STW mark termination. The CPU times
for mark/scan are broken down in to assist time (GC performed in
line with allocation), background GC time, and idle GC time.
If the line ends with "(forced)", this GC was forced by a
runtime.GC() call.
harddecommit: setting harddecommit=1 causes memory that is returned to the OS to
also have protections removed on it. This is the only mode of operation on Windows,
but is helpful in debugging scavenger-related issues on other platforms. Currently,
only supported on Linux.
inittrace: setting inittrace=1 causes the runtime to emit a single line to standard
error for each package with init work, summarizing the execution time and memory
allocation. No information is printed for inits executed as part of plugin loading
and for packages without both user defined and compiler generated init work.
The format of this line is subject to change. Currently, it is:
init # @#ms, # ms clock, # bytes, # allocs
where the fields are as follows:
init # the package name
@# ms time in milliseconds when the init started since program start
# clock wall-clock time for package initialization work
# bytes memory allocated on the heap
# allocs number of heap allocations
madvdontneed: setting madvdontneed=0 will use MADV_FREE
instead of MADV_DONTNEED on Linux when returning memory to the
kernel. This is more efficient, but means RSS numbers will
drop only when the OS is under memory pressure. On the BSDs and
Illumos/Solaris, setting madvdontneed=1 will use MADV_DONTNEED instead
of MADV_FREE. This is less efficient, but causes RSS numbers to drop
more quickly.
memprofilerate: setting memprofilerate=X will update the value of runtime.MemProfileRate.
When set to 0 memory profiling is disabled. Refer to the description of
MemProfileRate for the default value.
profstackdepth: profstackdepth=128 (the default) will set the maximum stack
depth used by all pprof profilers except for the CPU profiler to 128 frames.
Stack traces that exceed this limit will be truncated to the limit starting
from the leaf frame. Setting profstackdepth to any value above 1024 will
silently default to 1024. Future versions of Go may remove this limitation
and extend profstackdepth to apply to the CPU profiler and execution tracer.
panicnil: setting panicnil=1 disables the runtime error when calling panic with nil
interface value or an untyped nil.
invalidptr: invalidptr=1 (the default) causes the garbage collector and stack
copier to crash the program if an invalid pointer value (for example, 1)
is found in a pointer-typed location. Setting invalidptr=0 disables this check.
This should only be used as a temporary workaround to diagnose buggy code.
The real fix is to not store integers in pointer-typed locations.
sbrk: setting sbrk=1 replaces the memory allocator and garbage collector
with a trivial allocator that obtains memory from the operating system and
never reclaims any memory.
scavtrace: setting scavtrace=1 causes the runtime to emit a single line to standard
error, roughly once per GC cycle, summarizing the amount of work done by the
scavenger as well as the total amount of memory returned to the operating system
and an estimate of physical memory utilization. The format of this line is subject
to change, but currently it is:
scav # KiB work (bg), # KiB work (eager), # KiB total, #% util
where the fields are as follows:
# KiB work (bg) the amount of memory returned to the OS in the background since
the last line
# KiB work (eager) the amount of memory returned to the OS eagerly since the last line
# KiB now the amount of address space currently returned to the OS
#% util the fraction of all unscavenged heap memory which is in-use
If the line ends with "(forced)", then scavenging was forced by a
debug.FreeOSMemory() call.
scheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit
detailed multiline info every X milliseconds, describing state of the scheduler,
processors, threads and goroutines.
schedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard
error every X milliseconds, summarizing the scheduler state.
tracebackancestors: setting tracebackancestors=N extends tracebacks with the stacks at
which goroutines were created, where N limits the number of ancestor goroutines to
report. This also extends the information returned by runtime.Stack.
Setting N to 0 will report no ancestry information.
tracefpunwindoff: setting tracefpunwindoff=1 forces the execution tracer
and block and mutex profilers to use the runtime's default stack
unwinder instead of frame pointer unwinding. This increases their
overhead, but could be helpful as a workaround or for debugging
unexpected regressions caused by frame pointer unwinding.
traceadvanceperiod: the approximate period in nanoseconds between trace generations. Only
applies if a program is built with GOEXPERIMENT=exectracer2. Used primarily for testing
and debugging the execution tracer.
tracecheckstackownership: setting tracecheckstackownership=1 enables a debug check in the
execution tracer to double-check stack ownership before taking a stack trace.
asyncpreemptoff: asyncpreemptoff=1 disables signal-based
asynchronous goroutine preemption. This makes some loops
non-preemptible for long periods, which may delay GC and
goroutine scheduling. This is useful for debugging GC issues
because it also disables the conservative stack scanning used
for asynchronously preempted goroutines.
The [net] and [net/http] packages also refer to debugging variables in GODEBUG.
See the documentation for those packages for details.
The GOMAXPROCS variable limits the number of operating system threads that
can execute user-level Go code simultaneously. There is no limit to the number of threads
that can be blocked in system calls on behalf of Go code; those do not count against
the GOMAXPROCS limit. This package's [GOMAXPROCS] function queries and changes
the limit.
The GORACE variable configures the race detector, for programs built using -race.
See the [Race Detector article] for details.
The GOTRACEBACK variable controls the amount of output generated when a Go
program fails due to an unrecovered panic or an unexpected runtime condition.
By default, a failure prints a stack trace for the current goroutine,
eliding functions internal to the run-time system, and then exits with exit code 2.
The failure prints stack traces for all goroutines if there is no current goroutine
or the failure is internal to the run-time.
GOTRACEBACK=none omits the goroutine stack traces entirely.
GOTRACEBACK=single (the default) behaves as described above.
GOTRACEBACK=all adds stack traces for all user-created goroutines.
GOTRACEBACK=system is like “all” but adds stack frames for run-time functions
and shows goroutines created internally by the run-time.
GOTRACEBACK=crash is like “system” but crashes in an operating system-specific
manner instead of exiting. For example, on Unix systems, the crash raises
SIGABRT to trigger a core dump.
GOTRACEBACK=wer is like “crash” but doesn't disable Windows Error Reporting (WER).
For historical reasons, the GOTRACEBACK settings 0, 1, and 2 are synonyms for
none, all, and system, respectively.
The [runtime/debug.SetTraceback] function allows increasing the
amount of output at run time, but it cannot reduce the amount below that
specified by the environment variable.
The GOARCH, GOOS, GOPATH, and GOROOT environment variables complete
the set of Go environment variables. They influence the building of Go programs
(see [cmd/go] and [go/build]).
GOARCH, GOOS, and GOROOT are recorded at compile time and made available by
constants or functions in this package, but they do not influence the execution
of the run-time system.
# Security
On Unix platforms, Go's runtime system behaves slightly differently when a
binary is setuid/setgid or executed with setuid/setgid-like properties, in order
to prevent dangerous behaviors. On Linux this is determined by checking for the
AT_SECURE flag in the auxiliary vector, on the BSDs and Solaris/Illumos it is
determined by checking the issetugid syscall, and on AIX it is determined by
checking if the uid/gid match the effective uid/gid.
When the runtime determines the binary is setuid/setgid-like, it does two main
things:
- The value of the GOTRACEBACK environment variable is set to 'none'.
- When a signal is received that terminates the program, or the program
encounters an unrecoverable panic that would otherwise override the value
of GOTRACEBACK, the goroutine stack, registers, and other memory related
information are omitted.
Additinally on Unix platforms the Go runtime automatically checks whether
the standard input/output file descriptors (0, 1, 2) are open. If any of them are
closed, they are opened pointing at /dev/null.
[Race Detector article]: https://go.dev/doc/articles/race_detector
*/
package runtime
import (
"internal/goarch"
"internal/goos"
)
// Caller reports file and line number information about function invocations on
// the calling goroutine's stack. The argument skip is the number of stack frames
// to ascend, with 0 identifying the caller of Caller. (For historical reasons the
// meaning of skip differs between Caller and [Callers].) The return values report
// the program counter, the file name (using forward slashes as path separator, even
// on Windows), and the line number within the file of the corresponding call.
// The boolean ok is false if it was not possible to recover the information.
func Caller(skip int) (pc uintptr, file string, line int, ok bool) {
rpc := make([]uintptr, 1)
n := callers(skip+1, rpc)
if n < 1 {
return
}
frame, _ := CallersFrames(rpc).Next()
return frame.PC, frame.File, frame.Line, frame.PC != 0
}
// Callers fills the slice pc with the return program counters of function invocations
// on the calling goroutine's stack. The argument skip is the number of stack frames
// to skip before recording in pc, with 0 identifying the frame for Callers itself and
// 1 identifying the caller of Callers.
// It returns the number of entries written to pc.
//
// To translate these PCs into symbolic information such as function
// names and line numbers, use [CallersFrames]. CallersFrames accounts
// for inlined functions and adjusts the return program counters into
// call program counters. Iterating over the returned slice of PCs
// directly is discouraged, as is using [FuncForPC] on any of the
// returned PCs, since these cannot account for inlining or return
// program counter adjustment.
func Callers(skip int, pc []uintptr) int {
// runtime.callers uses pc.array==nil as a signal
// to print a stack trace. Pick off 0-length pc here
// so that we don't let a nil pc slice get to it.
if len(pc) == 0 {
return 0
}
return callers(skip, pc)
}
var defaultGOROOT string // set by cmd/link
// GOROOT returns the root of the Go tree. It uses the
// GOROOT environment variable, if set at process start,
// or else the root used during the Go build.
//
// Deprecated: The root used during the Go build will not be
// meaningful if the binary is copied to another machine.
// Use the system path to locate the “go” binary, and use
// “go env GOROOT” to find its GOROOT.
func GOROOT() string {
s := gogetenv("GOROOT")
if s != "" {
return s
}
return defaultGOROOT
}
// buildVersion is the Go tree's version string at build time.
//
// If any GOEXPERIMENTs are set to non-default values, it will include
// "X:<GOEXPERIMENT>".
//
// This is set by the linker.
//
// This is accessed by "go version <binary>".
var buildVersion string
// Version returns the Go tree's version string.
// It is either the commit hash and date at the time of the build or,
// when possible, a release tag like "go1.3".
func Version() string {
return buildVersion
}
// GOOS is the running program's operating system target:
// one of darwin, freebsd, linux, and so on.
// To view possible combinations of GOOS and GOARCH, run "go tool dist list".
const GOOS string = goos.GOOS
// GOARCH is the running program's architecture target:
// one of 386, amd64, arm, s390x, and so on.
const GOARCH string = goarch.GOARCH
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// fastlog2 implements a fast approximation to the base 2 log of a
// float64. This is used to compute a geometric distribution for heap
// sampling, without introducing dependencies into package math. This
// uses a very rough approximation using the float64 exponent and the
// first 25 bits of the mantissa. The top 5 bits of the mantissa are
// used to load limits from a table of constants and the rest are used
// to scale linearly between them.
func fastlog2(x float64) float64 {
const fastlogScaleBits = 20
const fastlogScaleRatio = 1.0 / (1 << fastlogScaleBits)
xBits := float64bits(x)
// Extract the exponent from the IEEE float64, and index a constant
// table with the first 10 bits from the mantissa.
xExp := int64((xBits>>52)&0x7FF) - 1023
xManIndex := (xBits >> (52 - fastlogNumBits)) % (1 << fastlogNumBits)
xManScale := (xBits >> (52 - fastlogNumBits - fastlogScaleBits)) % (1 << fastlogScaleBits)
low, high := fastlog2Table[xManIndex], fastlog2Table[xManIndex+1]
return float64(xExp) + low + (high-low)*float64(xManScale)*fastlogScaleRatio
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package runtime
func checkfds() {
if islibrary || isarchive {
// If the program is actually a library, presumably being consumed by
// another program, we don't want to mess around with the file
// descriptors.
return
}
const (
// F_GETFD, EBADF, O_RDWR are standard across all unixes we support, so
// we define them here rather than in each of the OS specific files.
F_GETFD = 0x01
EBADF = 0x09
O_RDWR = 0x02
)
devNull := []byte("/dev/null\x00")
for i := 0; i < 3; i++ {
ret, errno := fcntl(int32(i), F_GETFD, 0)
if ret >= 0 {
continue
}
if errno != EBADF {
print("runtime: unexpected error while checking standard file descriptor ", i, ", errno=", errno, "\n")
throw("cannot open standard fds")
}
if ret := open(&devNull[0], O_RDWR, 0); ret < 0 {
print("runtime: standard file descriptor ", i, " closed, unable to open /dev/null, errno=", errno, "\n")
throw("cannot open standard fds")
} else if ret != int32(i) {
print("runtime: opened unexpected file descriptor ", ret, " when attempting to open ", i, "\n")
throw("cannot open standard fds")
}
}
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import _ "unsafe"
//go:linkname fips140_setBypass crypto/fips140.setBypass
func fips140_setBypass() {
getg().fipsOnlyBypass = true
}
//go:linkname fips140_unsetBypass crypto/fips140.unsetBypass
func fips140_unsetBypass() {
getg().fipsOnlyBypass = false
}
//go:linkname fips140_isBypassed crypto/fips140.isBypassed
func fips140_isBypassed() bool {
return getg().fipsOnlyBypass
}
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
const (
float64Mask = 0x7FF
float64Shift = 64 - 11 - 1
float64Bias = 1023
)
var inf = float64frombits(0x7FF0000000000000)
// isNaN reports whether f is an IEEE 754 “not-a-number” value.
func isNaN(f float64) (is bool) {
// IEEE 754 says that only NaNs satisfy f != f.
return f != f
}
// isFinite reports whether f is neither NaN nor an infinity.
func isFinite(f float64) bool {
return !isNaN(f - f)
}
// isInf reports whether f is an infinity.
func isInf(f float64) bool {
return !isNaN(f) && !isFinite(f)
}
// abs returns the absolute value of x.
//
// Special cases are:
//
// abs(±Inf) = +Inf
// abs(NaN) = NaN
func abs(x float64) float64 {
const sign = 1 << 63
return float64frombits(float64bits(x) &^ sign)
}
// copysign returns a value with the magnitude
// of x and the sign of y.
func copysign(x, y float64) float64 {
const sign = 1 << 63
return float64frombits(float64bits(x)&^sign | float64bits(y)&sign)
}
// float64bits returns the IEEE 754 binary representation of f.
func float64bits(f float64) uint64 {
return *(*uint64)(unsafe.Pointer(&f))
}
// float64frombits returns the floating point number corresponding
// the IEEE 754 binary representation b.
func float64frombits(b uint64) float64 {
return *(*float64)(unsafe.Pointer(&b))
}
// floor returns the greatest integer value less than or equal to x.
//
// Special cases are:
//
// floor(±0) = ±0
// floor(±Inf) = ±Inf
// floor(NaN) = NaN
//
// N.B. Portable floor copied from math. math also has optimized arch-specific
// implementations.
func floor(x float64) float64 {
if x == 0 || isNaN(x) || isInf(x) {
return x
}
if x < 0 {
d, fract := modf(-x)
if fract != 0.0 {
d = d + 1
}
return -d
}
d, _ := modf(x)
return d
}
// ceil returns the least integer value greater than or equal to x.
//
// Special cases are:
//
// Ceil(±0) = ±0
// Ceil(±Inf) = ±Inf
// Ceil(NaN) = NaN
//
// N.B. Portable ceil copied from math. math also has optimized arch-specific
// implementations.
func ceil(x float64) float64 {
return -floor(-x)
}
// modf returns integer and fractional floating-point numbers
// that sum to f. Both values have the same sign as f.
//
// Special cases are:
//
// Modf(±Inf) = ±Inf, NaN
// Modf(NaN) = NaN, NaN
//
// N.B. Portable modf copied from math. math also has optimized arch-specific
// implementations.
func modf(f float64) (int float64, frac float64) {
if f < 1 {
switch {
case f < 0:
int, frac = modf(-f)
return -int, -frac
case f == 0:
return f, f // Return -0, -0 when f == -0
}
return 0, f
}
x := float64bits(f)
e := uint(x>>float64Shift)&float64Mask - float64Bias
// Keep the top 12+e bits, the integer part; clear the rest.
if e < 64-12 {
x &^= 1<<(64-12-e) - 1
}
int = float64frombits(x)
frac = f - int
return
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Hashing algorithm inspired by
// wyhash: https://github.com/wangyi-fudan/wyhash
//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x
package runtime
import (
"math/bits"
"unsafe"
)
const (
m5 = 0x1d8e4e27c47d124f
)
func memhashFallback(p unsafe.Pointer, seed, s uintptr) uintptr {
var a, b uintptr
seed ^= hashkey[0]
switch {
case s == 0:
return seed
case s < 4:
a = uintptr(*(*byte)(p))
a |= uintptr(*(*byte)(add(p, s>>1))) << 8
a |= uintptr(*(*byte)(add(p, s-1))) << 16
case s == 4:
a = r4(p)
b = a
case s < 8:
a = r4(p)
b = r4(add(p, s-4))
case s == 8:
a = r8(p)
b = a
case s <= 16:
a = r8(p)
b = r8(add(p, s-8))
default:
l := s
if l > 48 {
seed1 := seed
seed2 := seed
for ; l > 48; l -= 48 {
seed = mix(r8(p)^hashkey[1], r8(add(p, 8))^seed)
seed1 = mix(r8(add(p, 16))^hashkey[2], r8(add(p, 24))^seed1)
seed2 = mix(r8(add(p, 32))^hashkey[3], r8(add(p, 40))^seed2)
p = add(p, 48)
}
seed ^= seed1 ^ seed2
}
for ; l > 16; l -= 16 {
seed = mix(r8(p)^hashkey[1], r8(add(p, 8))^seed)
p = add(p, 16)
}
a = r8(add(p, l-16))
b = r8(add(p, l-8))
}
return mix(m5^s, mix(a^hashkey[1], b^seed))
}
func memhash32Fallback(p unsafe.Pointer, seed uintptr) uintptr {
a := r4(p)
return mix(m5^4, mix(a^hashkey[1], a^seed^hashkey[0]))
}
func memhash64Fallback(p unsafe.Pointer, seed uintptr) uintptr {
a := r8(p)
return mix(m5^8, mix(a^hashkey[1], a^seed^hashkey[0]))
}
func mix(a, b uintptr) uintptr {
hi, lo := bits.Mul64(uint64(a), uint64(b))
return uintptr(hi ^ lo)
}
func r4(p unsafe.Pointer) uintptr {
return uintptr(readUnaligned32(p))
}
func r8(p unsafe.Pointer) uintptr {
return uintptr(readUnaligned64(p))
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Implementation of runtime/debug.WriteHeapDump. Writes all
// objects in the heap plus additional info (roots, threads,
// finalizers, etc.) to a file.
// The format of the dumped file is described at
// https://golang.org/s/go15heapdump.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/runtime/gc"
"unsafe"
)
//go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
func runtime_debug_WriteHeapDump(fd uintptr) {
stw := stopTheWorld(stwWriteHeapDump)
// Keep m on this G's stack instead of the system stack.
// Both readmemstats_m and writeheapdump_m have pretty large
// peak stack depths and we risk blowing the system stack.
// This is safe because the world is stopped, so we don't
// need to worry about anyone shrinking and therefore moving
// our stack.
var m MemStats
systemstack(func() {
// Call readmemstats_m here instead of deeper in
// writeheapdump_m because we might blow the system stack
// otherwise.
readmemstats_m(&m)
writeheapdump_m(fd, &m)
})
startTheWorld(stw)
}
const (
fieldKindEol = 0
fieldKindPtr = 1
fieldKindIface = 2
fieldKindEface = 3
tagEOF = 0
tagObject = 1
tagOtherRoot = 2
tagType = 3
tagGoroutine = 4
tagStackFrame = 5
tagParams = 6
tagFinalizer = 7
tagItab = 8
tagOSThread = 9
tagMemStats = 10
tagQueuedFinalizer = 11
tagData = 12
tagBSS = 13
tagDefer = 14
tagPanic = 15
tagMemProf = 16
tagAllocSample = 17
)
var dumpfd uintptr // fd to write the dump to.
var tmpbuf []byte
// buffer of pending write data
const (
bufSize = 4096
)
var buf [bufSize]byte
var nbuf uintptr
func dwrite(data unsafe.Pointer, len uintptr) {
if len == 0 {
return
}
if nbuf+len <= bufSize {
copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
nbuf += len
return
}
write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
if len >= bufSize {
write(dumpfd, data, int32(len))
nbuf = 0
} else {
copy(buf[:], (*[bufSize]byte)(data)[:len])
nbuf = len
}
}
func dwritebyte(b byte) {
dwrite(unsafe.Pointer(&b), 1)
}
func flush() {
write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
nbuf = 0
}
// Cache of types that have been serialized already.
// We use a type's hash field to pick a bucket.
// Inside a bucket, we keep a list of types that
// have been serialized so far, most recently used first.
// Note: when a bucket overflows we may end up
// serializing a type more than once. That's ok.
const (
typeCacheBuckets = 256
typeCacheAssoc = 4
)
type typeCacheBucket struct {
t [typeCacheAssoc]*_type
}
var typecache [typeCacheBuckets]typeCacheBucket
// dump a uint64 in a varint format parseable by encoding/binary.
func dumpint(v uint64) {
var buf [10]byte
var n int
for v >= 0x80 {
buf[n] = byte(v | 0x80)
n++
v >>= 7
}
buf[n] = byte(v)
n++
dwrite(unsafe.Pointer(&buf), uintptr(n))
}
func dumpbool(b bool) {
if b {
dumpint(1)
} else {
dumpint(0)
}
}
// dump varint uint64 length followed by memory contents.
func dumpmemrange(data unsafe.Pointer, len uintptr) {
dumpint(uint64(len))
dwrite(data, len)
}
func dumpslice(b []byte) {
dumpint(uint64(len(b)))
if len(b) > 0 {
dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
}
}
func dumpstr(s string) {
dumpmemrange(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
}
// dump information for a type.
func dumptype(t *_type) {
if t == nil {
return
}
// If we've definitely serialized the type before,
// no need to do it again.
b := &typecache[t.Hash&(typeCacheBuckets-1)]
if t == b.t[0] {
return
}
for i := 1; i < typeCacheAssoc; i++ {
if t == b.t[i] {
// Move-to-front
for j := i; j > 0; j-- {
b.t[j] = b.t[j-1]
}
b.t[0] = t
return
}
}
// Might not have been dumped yet. Dump it and
// remember we did so.
for j := typeCacheAssoc - 1; j > 0; j-- {
b.t[j] = b.t[j-1]
}
b.t[0] = t
// dump the type
dumpint(tagType)
dumpint(uint64(uintptr(unsafe.Pointer(t))))
dumpint(uint64(t.Size_))
rt := toRType(t)
if x := t.Uncommon(); x == nil || rt.nameOff(x.PkgPath).Name() == "" {
dumpstr(rt.string())
} else {
pkgpath := rt.nameOff(x.PkgPath).Name()
name := rt.name()
dumpint(uint64(uintptr(len(pkgpath)) + 1 + uintptr(len(name))))
dwrite(unsafe.Pointer(unsafe.StringData(pkgpath)), uintptr(len(pkgpath)))
dwritebyte('.')
dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name)))
}
dumpbool(!t.IsDirectIface() || t.Pointers())
}
// dump an object.
func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
dumpint(tagObject)
dumpint(uint64(uintptr(obj)))
dumpmemrange(obj, size)
dumpfields(bv)
}
func dumpotherroot(description string, to unsafe.Pointer) {
dumpint(tagOtherRoot)
dumpstr(description)
dumpint(uint64(uintptr(to)))
}
func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
dumpint(tagFinalizer)
dumpint(uint64(uintptr(obj)))
dumpint(uint64(uintptr(unsafe.Pointer(fn))))
dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
dumpint(uint64(uintptr(unsafe.Pointer(fint))))
dumpint(uint64(uintptr(unsafe.Pointer(ot))))
}
type childInfo struct {
// Information passed up from the callee frame about
// the layout of the outargs region.
argoff uintptr // where the arguments start in the frame
arglen uintptr // size of args region
args bitvector // if args.n >= 0, pointer map of args region
sp *uint8 // callee sp
depth uintptr // depth in call stack (0 == most recent)
}
// dump kinds & offsets of interesting fields in bv.
func dumpbv(cbv *bitvector, offset uintptr) {
for i := uintptr(0); i < uintptr(cbv.n); i++ {
if cbv.ptrbit(i) == 1 {
dumpint(fieldKindPtr)
dumpint(uint64(offset + i*goarch.PtrSize))
}
}
}
func dumpframe(s *stkframe, child *childInfo) {
f := s.fn
// Figure out what we can about our stack map
pc := s.pc
pcdata := int32(-1) // Use the entry map at function entry
if pc != f.entry() {
pc--
pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, pc)
}
if pcdata == -1 {
// We do not have a valid pcdata value but there might be a
// stackmap for this function. It is likely that we are looking
// at the function prologue, assume so and hope for the best.
pcdata = 0
}
stkmap := (*stackmap)(funcdata(f, abi.FUNCDATA_LocalsPointerMaps))
var bv bitvector
if stkmap != nil && stkmap.n > 0 {
bv = stackmapdata(stkmap, pcdata)
} else {
bv.n = -1
}
// Dump main body of stack frame.
dumpint(tagStackFrame)
dumpint(uint64(s.sp)) // lowest address in frame
dumpint(uint64(child.depth)) // # of frames deep on the stack
dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp) // frame contents
dumpint(uint64(f.entry()))
dumpint(uint64(s.pc))
dumpint(uint64(s.continpc))
name := funcname(f)
if name == "" {
name = "unknown function"
}
dumpstr(name)
// Dump fields in the outargs section
if child.args.n >= 0 {
dumpbv(&child.args, child.argoff)
} else {
// conservative - everything might be a pointer
for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
}
// Dump fields in the local vars section
if stkmap == nil {
// No locals information, dump everything.
for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
} else if stkmap.n < 0 {
// Locals size information, dump just the locals.
size := uintptr(-stkmap.n)
for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
} else if stkmap.n > 0 {
// Locals bitmap information, scan just the pointers in
// locals.
dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
}
dumpint(fieldKindEol)
// Record arg info for parent.
child.argoff = s.argp - s.fp
child.arglen = s.argBytes()
child.sp = (*uint8)(unsafe.Pointer(s.sp))
child.depth++
stkmap = (*stackmap)(funcdata(f, abi.FUNCDATA_ArgsPointerMaps))
if stkmap != nil {
child.args = stackmapdata(stkmap, pcdata)
} else {
child.args.n = -1
}
return
}
func dumpgoroutine(gp *g) {
var sp, pc, lr uintptr
if gp.syscallsp != 0 {
sp = gp.syscallsp
pc = gp.syscallpc
lr = 0
} else {
sp = gp.sched.sp
pc = gp.sched.pc
lr = gp.sched.lr
}
dumpint(tagGoroutine)
dumpint(uint64(uintptr(unsafe.Pointer(gp))))
dumpint(uint64(sp))
dumpint(gp.goid)
dumpint(uint64(gp.gopc))
dumpint(uint64(readgstatus(gp)))
dumpbool(isSystemGoroutine(gp, false))
dumpbool(false) // isbackground
dumpint(uint64(gp.waitsince))
dumpstr(gp.waitreason.String())
dumpint(uint64(uintptr(gp.sched.ctxt)))
dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
// dump stack
var child childInfo
child.args.n = -1
child.arglen = 0
child.sp = nil
child.depth = 0
var u unwinder
for u.initAt(pc, sp, lr, gp, 0); u.valid(); u.next() {
dumpframe(&u.frame, &child)
}
// dump defer & panic records
for d := gp._defer; d != nil; d = d.link {
dumpint(tagDefer)
dumpint(uint64(uintptr(unsafe.Pointer(d))))
dumpint(uint64(uintptr(unsafe.Pointer(gp))))
dumpint(uint64(d.sp))
dumpint(uint64(d.pc))
fn := *(**funcval)(unsafe.Pointer(&d.fn))
dumpint(uint64(uintptr(unsafe.Pointer(fn))))
if d.fn == nil {
// d.fn can be nil for open-coded defers
dumpint(uint64(0))
} else {
dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
}
dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
}
for p := gp._panic; p != nil; p = p.link {
dumpint(tagPanic)
dumpint(uint64(uintptr(unsafe.Pointer(p))))
dumpint(uint64(uintptr(unsafe.Pointer(gp))))
eface := efaceOf(&p.arg)
dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
dumpint(uint64(uintptr(eface.data)))
dumpint(0) // was p->defer, no longer recorded
dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
}
}
func dumpgs() {
assertWorldStopped()
// goroutines & stacks
forEachG(func(gp *g) {
status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
switch status {
case _Grunning:
// Dump goroutine if it's _Grunning only during a syscall. This is safe
// because the goroutine will just park without mutating its stack, since
// the world is stopped.
if gp.syscallsp != 0 {
dumpgoroutine(gp)
}
fallthrough
default:
print("runtime: unexpected G.status ", hex(status), "\n")
throw("dumpgs in STW - bad status")
case _Gdead, _Gdeadextra:
// ok
case _Grunnable,
_Gsyscall,
_Gwaiting:
dumpgoroutine(gp)
}
})
}
func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
dumpint(tagQueuedFinalizer)
dumpint(uint64(uintptr(obj)))
dumpint(uint64(uintptr(unsafe.Pointer(fn))))
dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
dumpint(uint64(uintptr(unsafe.Pointer(fint))))
dumpint(uint64(uintptr(unsafe.Pointer(ot))))
}
func dumproots() {
// To protect mheap_.allspans.
assertWorldStopped()
// TODO(mwhudson): dump datamask etc from all objects
// data segment
dumpint(tagData)
dumpint(uint64(firstmoduledata.data))
dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data)
dumpfields(firstmoduledata.gcdatamask)
// bss segment
dumpint(tagBSS)
dumpint(uint64(firstmoduledata.bss))
dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss)
dumpfields(firstmoduledata.gcbssmask)
// mspan.types
for _, s := range mheap_.allspans {
if s.state.get() == mSpanInUse {
// Finalizers
for sp := s.specials; sp != nil; sp = sp.next {
if sp.kind != _KindSpecialFinalizer {
continue
}
spf := (*specialfinalizer)(unsafe.Pointer(sp))
p := unsafe.Pointer(s.base() + spf.special.offset)
dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
}
}
}
// Finalizer queue
iterate_finq(finq_callback)
}
// Bit vector of free marks.
// Needs to be as big as the largest number of objects per span.
var freemark [gc.MaxObjsPerSpan]bool
func dumpobjs() {
// To protect mheap_.allspans.
assertWorldStopped()
for _, s := range mheap_.allspans {
if s.state.get() != mSpanInUse {
continue
}
p := s.base()
size := s.elemsize
n := (s.npages << gc.PageShift) / size
if n > uintptr(len(freemark)) {
throw("freemark array doesn't have enough entries")
}
for freeIndex := uint16(0); freeIndex < s.nelems; freeIndex++ {
if s.isFree(uintptr(freeIndex)) {
freemark[freeIndex] = true
}
}
for j := uintptr(0); j < n; j, p = j+1, p+size {
if freemark[j] {
freemark[j] = false
continue
}
dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
}
}
}
func dumpparams() {
dumpint(tagParams)
x := uintptr(1)
if *(*byte)(unsafe.Pointer(&x)) == 1 {
dumpbool(false) // little-endian ptrs
} else {
dumpbool(true) // big-endian ptrs
}
dumpint(goarch.PtrSize)
var arenaStart, arenaEnd uintptr
for i1 := range mheap_.arenas {
if mheap_.arenas[i1] == nil {
continue
}
for i, ha := range mheap_.arenas[i1] {
if ha == nil {
continue
}
base := arenaBase(arenaIdx(i1)<<arenaL1Shift | arenaIdx(i))
if arenaStart == 0 || base < arenaStart {
arenaStart = base
}
if base+heapArenaBytes > arenaEnd {
arenaEnd = base + heapArenaBytes
}
}
}
dumpint(uint64(arenaStart))
dumpint(uint64(arenaEnd))
dumpstr(goarch.GOARCH)
dumpstr(buildVersion)
dumpint(uint64(numCPUStartup))
}
func itab_callback(tab *itab) {
t := tab.Type
dumptype(t)
dumpint(tagItab)
dumpint(uint64(uintptr(unsafe.Pointer(tab))))
dumpint(uint64(uintptr(unsafe.Pointer(t))))
}
func dumpitabs() {
iterate_itabs(itab_callback)
}
func dumpms() {
for mp := allm; mp != nil; mp = mp.alllink {
dumpint(tagOSThread)
dumpint(uint64(uintptr(unsafe.Pointer(mp))))
dumpint(uint64(mp.id))
dumpint(mp.procid)
}
}
//go:systemstack
func dumpmemstats(m *MemStats) {
assertWorldStopped()
// These ints should be identical to the exported
// MemStats structure and should be ordered the same
// way too.
dumpint(tagMemStats)
dumpint(m.Alloc)
dumpint(m.TotalAlloc)
dumpint(m.Sys)
dumpint(m.Lookups)
dumpint(m.Mallocs)
dumpint(m.Frees)
dumpint(m.HeapAlloc)
dumpint(m.HeapSys)
dumpint(m.HeapIdle)
dumpint(m.HeapInuse)
dumpint(m.HeapReleased)
dumpint(m.HeapObjects)
dumpint(m.StackInuse)
dumpint(m.StackSys)
dumpint(m.MSpanInuse)
dumpint(m.MSpanSys)
dumpint(m.MCacheInuse)
dumpint(m.MCacheSys)
dumpint(m.BuckHashSys)
dumpint(m.GCSys)
dumpint(m.OtherSys)
dumpint(m.NextGC)
dumpint(m.LastGC)
dumpint(m.PauseTotalNs)
for i := 0; i < 256; i++ {
dumpint(m.PauseNs[i])
}
dumpint(uint64(m.NumGC))
}
func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
dumpint(tagMemProf)
dumpint(uint64(uintptr(unsafe.Pointer(b))))
dumpint(uint64(size))
dumpint(uint64(nstk))
for i := uintptr(0); i < nstk; i++ {
pc := stk[i]
f := findfunc(pc)
if !f.valid() {
var buf [64]byte
n := len(buf)
n--
buf[n] = ')'
if pc == 0 {
n--
buf[n] = '0'
} else {
for pc > 0 {
n--
buf[n] = "0123456789abcdef"[pc&15]
pc >>= 4
}
}
n--
buf[n] = 'x'
n--
buf[n] = '0'
n--
buf[n] = '('
dumpslice(buf[n:])
dumpstr("?")
dumpint(0)
} else {
dumpstr(funcname(f))
if i > 0 && pc > f.entry() {
pc--
}
file, line := funcline(f, pc)
dumpstr(file)
dumpint(uint64(line))
}
}
dumpint(uint64(allocs))
dumpint(uint64(frees))
}
func dumpmemprof() {
// To protect mheap_.allspans.
assertWorldStopped()
iterate_memprof(dumpmemprof_callback)
for _, s := range mheap_.allspans {
if s.state.get() != mSpanInUse {
continue
}
for sp := s.specials; sp != nil; sp = sp.next {
if sp.kind != _KindSpecialProfile {
continue
}
spp := (*specialprofile)(unsafe.Pointer(sp))
p := s.base() + spp.special.offset
dumpint(tagAllocSample)
dumpint(uint64(p))
dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
}
}
}
var dumphdr = []byte("go1.7 heap dump\n")
func mdump(m *MemStats) {
assertWorldStopped()
// make sure we're done sweeping
for _, s := range mheap_.allspans {
if s.state.get() == mSpanInUse {
s.ensureSwept()
}
}
memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
dumpparams()
dumpitabs()
dumpobjs()
dumpgs()
dumpms()
dumproots()
dumpmemstats(m)
dumpmemprof()
dumpint(tagEOF)
flush()
}
func writeheapdump_m(fd uintptr, m *MemStats) {
assertWorldStopped()
gp := getg()
casGToWaiting(gp.m.curg, _Grunning, waitReasonDumpingHeap)
// Set dump file.
dumpfd = fd
// Call dump routine.
mdump(m)
// Reset dump file.
dumpfd = 0
if tmpbuf != nil {
sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
tmpbuf = nil
}
casgstatus(gp.m.curg, _Gwaiting, _Grunning)
}
// dumpint() the kind & offset of each field in an object.
func dumpfields(bv bitvector) {
dumpbv(&bv, 0)
dumpint(fieldKindEol)
}
func makeheapobjbv(p uintptr, size uintptr) bitvector {
// Extend the temp buffer if necessary.
nptr := size / goarch.PtrSize
if uintptr(len(tmpbuf)) < nptr/8+1 {
if tmpbuf != nil {
sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
}
n := nptr/8 + 1
p := sysAlloc(n, &memstats.other_sys, "heapdump")
if p == nil {
throw("heapdump: out of memory")
}
tmpbuf = (*[1 << 30]byte)(p)[:n]
}
// Convert heap bitmap to pointer bitmap.
clear(tmpbuf[:nptr/8+1])
s := spanOf(p)
tp := s.typePointersOf(p, size)
for {
var addr uintptr
if tp, addr = tp.next(p + size); addr == 0 {
break
}
i := (addr - p) / goarch.PtrSize
tmpbuf[i/8] |= 1 << (i % 8)
}
return bitvector{int32(nptr), &tmpbuf[0]}
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/goarch"
"unsafe"
)
// hexdumpWords prints a word-oriented hex dump of [p, p+len).
//
// If mark != nil, it will be passed to hexdumper.mark.
func hexdumpWords(p, len uintptr, mark func(uintptr, hexdumpMarker)) {
printlock()
// Provide a default annotation
symMark := func(u uintptr, hm hexdumpMarker) {
if mark != nil {
mark(u, hm)
}
// Can we symbolize this value?
val := *(*uintptr)(unsafe.Pointer(u))
fn := findfunc(val)
if fn.valid() {
hm.start()
print("<", funcname(fn), "+", hex(val-fn.entry()), ">\n")
}
}
h := hexdumper{addr: p, mark: symMark}
h.write(unsafe.Slice((*byte)(unsafe.Pointer(p)), len))
h.close()
printunlock()
}
// hexdumper is a Swiss-army knife hex dumper.
//
// To use, optionally set addr and wordBytes, then call write repeatedly,
// followed by close.
type hexdumper struct {
// addr is the address to print for the first byte of data.
addr uintptr
// addrBytes is the number of bytes of addr to print. If this is 0, it
// defaults to goarch.PtrSize.
addrBytes uint8
// wordBytes is the number of bytes in a word. If wordBytes is 1, this
// prints a byte-oriented dump. If it's > 1, this interprets the data as a
// sequence of words of the given size. If it's 0, it's treated as
// goarch.PtrSize.
wordBytes uint8
// mark is an optional function that can annotate values in the hex dump.
//
// If non-nil, it is called with the address of every complete, aligned word
// in the hex dump.
//
// If it decides to print an annotation, it must first call m.start(), then
// print the annotation, followed by a new line.
mark func(addr uintptr, m hexdumpMarker)
// Below here is state
ready int8 // 0=need to init state; 1=need to print header; 2=ready
// dataBuf accumulates a line at a time of data, in case it's split across
// buffers.
dataBuf [16]byte
dataPos uint8
dataSkip uint8 // Skip first n bytes of buf on first line
// toPos maps from byte offset in data to a visual offset in the printed line.
toPos [16]byte
}
type hexdumpMarker struct {
chars int
}
func (h *hexdumper) write(data []byte) {
if h.ready == 0 {
h.init()
}
// Handle leading data
if h.dataPos > 0 {
n := copy(h.dataBuf[h.dataPos:], data)
h.dataPos += uint8(n)
data = data[n:]
if h.dataPos < uint8(len(h.dataBuf)) {
return
}
h.flushLine(h.dataBuf[:])
h.dataPos = 0
}
// Handle full lines in data
for len(data) >= len(h.dataBuf) {
h.flushLine(data[:len(h.dataBuf)])
data = data[len(h.dataBuf):]
}
// Handle trailing data
h.dataPos = uint8(copy(h.dataBuf[:], data))
}
func (h *hexdumper) close() {
if h.dataPos > 0 {
h.flushLine(h.dataBuf[:h.dataPos])
}
}
func (h *hexdumper) init() {
const bytesPerLine = len(h.dataBuf)
if h.addrBytes == 0 {
h.addrBytes = goarch.PtrSize
} else if h.addrBytes < 0 || h.addrBytes > goarch.PtrSize {
throw("invalid addrBytes")
}
if h.wordBytes == 0 {
h.wordBytes = goarch.PtrSize
}
wb := int(h.wordBytes)
if wb < 0 || wb >= bytesPerLine || wb&(wb-1) != 0 {
throw("invalid wordBytes")
}
// Construct position mapping.
for i := range h.toPos {
// First, calculate the "field" within the line, applying byte swizzling.
field := 0
if goarch.BigEndian {
field = i
} else {
field = i ^ int(wb-1)
}
// Translate this field into a visual offset.
// "00112233 44556677 8899AABB CCDDEEFF"
h.toPos[i] = byte(field*2 + field/4 + field/8)
}
// The first line may need to skip some fields to get to alignment.
// Round down the starting address.
nAddr := h.addr &^ uintptr(bytesPerLine-1)
// Skip bytes to get to alignment.
h.dataPos = uint8(h.addr - nAddr)
h.dataSkip = uint8(h.addr - nAddr)
h.addr = nAddr
// We're ready to print the header.
h.ready = 1
}
func (h *hexdumper) flushLine(data []byte) {
const bytesPerLine = len(h.dataBuf)
const maxAddrChars = 2 * goarch.PtrSize
const addrSep = ": "
dataStart := int(2*h.addrBytes) + len(addrSep)
// dataChars uses the same formula to toPos above. We calculate it with the
// "last field", then add the size of the last field.
const dataChars = (bytesPerLine-1)*2 + (bytesPerLine-1)/4 + (bytesPerLine-1)/8 + 2
const asciiSep = " "
asciiStart := dataStart + dataChars + len(asciiSep)
const asciiChars = bytesPerLine
nlPos := asciiStart + asciiChars
var lineBuf [maxAddrChars + len(addrSep) + dataChars + len(asciiSep) + asciiChars + 1]byte
clear := func() {
for i := range lineBuf {
lineBuf[i] = ' '
}
}
clear()
if h.ready == 1 {
// Print column offsets header.
for offset, pos := range h.toPos {
h.fmtHex(lineBuf[dataStart+int(pos+1):][:1], uint64(offset))
}
// Print ASCII offsets.
for offset := range asciiChars {
h.fmtHex(lineBuf[asciiStart+offset:][:1], uint64(offset))
}
lineBuf[nlPos] = '\n'
gwrite(lineBuf[:nlPos+1])
clear()
h.ready = 2
}
// Format address.
h.fmtHex(lineBuf[:2*h.addrBytes], uint64(h.addr))
copy(lineBuf[2*h.addrBytes:], addrSep)
// Format data in hex and ASCII.
for offset, b := range data {
if offset < int(h.dataSkip) {
continue
}
pos := h.toPos[offset]
h.fmtHex(lineBuf[dataStart+int(pos):][:2], uint64(b))
copy(lineBuf[dataStart+dataChars:], asciiSep)
ascii := uint8('.')
if b >= ' ' && b <= '~' {
ascii = b
}
lineBuf[asciiStart+offset] = ascii
}
// Trim buffer.
end := asciiStart + len(data)
lineBuf[end] = '\n'
buf := lineBuf[:end+1]
// Print.
gwrite(buf)
// Print marks.
if h.mark != nil {
clear()
for offset := 0; offset+int(h.wordBytes) <= len(data); offset += int(h.wordBytes) {
if offset < int(h.dataSkip) {
continue
}
addr := h.addr + uintptr(offset)
// Find the position of the left edge of this word
caret := dataStart + int(min(h.toPos[offset], h.toPos[offset+int(h.wordBytes)-1]))
h.mark(addr, hexdumpMarker{caret})
}
}
h.addr += uintptr(bytesPerLine)
h.dataPos = 0
h.dataSkip = 0
}
// fmtHex formats v in base 16 into buf. It fills all of buf. If buf is too
// small to represent v, it the output will start with '*'.
func (h *hexdumper) fmtHex(buf []byte, v uint64) {
const dig = "0123456789abcdef"
i := len(buf) - 1
for ; i >= 0; i-- {
buf[i] = dig[v%16]
v /= 16
}
if v != 0 {
// Indicate that we couldn't fit the whole number.
buf[0] = '*'
}
}
func (m hexdumpMarker) start() {
var spaces [64]byte
for i := range spaces {
spaces[i] = ' '
}
for m.chars > len(spaces) {
gwrite(spaces[:])
m.chars -= len(spaces)
}
gwrite(spaces[:m.chars])
print("^ ")
}
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
const (
// For the time histogram type, we use an HDR histogram.
// Values are placed in buckets based solely on the most
// significant set bit. Thus, buckets are power-of-2 sized.
// Values are then placed into sub-buckets based on the value of
// the next timeHistSubBucketBits most significant bits. Thus,
// sub-buckets are linear within a bucket.
//
// Therefore, the number of sub-buckets (timeHistNumSubBuckets)
// defines the error. This error may be computed as
// 1/timeHistNumSubBuckets*100%. For example, for 16 sub-buckets
// per bucket the error is approximately 6%.
//
// The number of buckets (timeHistNumBuckets), on the
// other hand, defines the range. To avoid producing a large number
// of buckets that are close together, especially for small numbers
// (e.g. 1, 2, 3, 4, 5 ns) that aren't very useful, timeHistNumBuckets
// is defined in terms of the least significant bit (timeHistMinBucketBits)
// that needs to be set before we start bucketing and the most
// significant bit (timeHistMaxBucketBits) that we bucket before we just
// dump it into a catch-all bucket.
//
// As an example, consider the configuration:
//
// timeHistMinBucketBits = 9
// timeHistMaxBucketBits = 48
// timeHistSubBucketBits = 2
//
// Then:
//
// 011000001
// ^--
// │ ^
// │ └---- Next 2 bits -> sub-bucket 3
// └------- Bit 9 unset -> bucket 0
//
// 110000001
// ^--
// │ ^
// │ └---- Next 2 bits -> sub-bucket 2
// └------- Bit 9 set -> bucket 1
//
// 1000000010
// ^-- ^
// │ ^ └-- Lower bits ignored
// │ └---- Next 2 bits -> sub-bucket 0
// └------- Bit 10 set -> bucket 2
//
// Following this pattern, bucket 38 will have the bit 46 set. We don't
// have any buckets for higher values, so we spill the rest into an overflow
// bucket containing values of 2^47-1 nanoseconds or approx. 1 day or more.
// This range is more than enough to handle durations produced by the runtime.
timeHistMinBucketBits = 9
timeHistMaxBucketBits = 48 // Note that this is exclusive; 1 higher than the actual range.
timeHistSubBucketBits = 2
timeHistNumSubBuckets = 1 << timeHistSubBucketBits
timeHistNumBuckets = timeHistMaxBucketBits - timeHistMinBucketBits + 1
// Two extra buckets, one for underflow, one for overflow.
timeHistTotalBuckets = timeHistNumBuckets*timeHistNumSubBuckets + 2
)
// timeHistogram represents a distribution of durations in
// nanoseconds.
//
// The accuracy and range of the histogram is defined by the
// timeHistSubBucketBits and timeHistNumBuckets constants.
//
// It is an HDR histogram with exponentially-distributed
// buckets and linearly distributed sub-buckets.
//
// The histogram is safe for concurrent reads and writes.
type timeHistogram struct {
counts [timeHistNumBuckets * timeHistNumSubBuckets]atomic.Uint64
// underflow counts all the times we got a negative duration
// sample. Because of how time works on some platforms, it's
// possible to measure negative durations. We could ignore them,
// but we record them anyway because it's better to have some
// signal that it's happening than just missing samples.
underflow atomic.Uint64
// overflow counts all the times we got a duration that exceeded
// the range counts represents.
overflow atomic.Uint64
}
// record adds the given duration to the distribution.
//
// Disallow preemptions and stack growths because this function
// may run in sensitive locations.
//
//go:nosplit
func (h *timeHistogram) record(duration int64) {
// If the duration is negative, capture that in underflow.
if duration < 0 {
h.underflow.Add(1)
return
}
// bucketBit is the target bit for the bucket which is usually the
// highest 1 bit, but if we're less than the minimum, is the highest
// 1 bit of the minimum (which will be zero in the duration).
//
// bucket is the bucket index, which is the bucketBit minus the
// highest bit of the minimum, plus one to leave room for the catch-all
// bucket for samples lower than the minimum.
var bucketBit, bucket uint
if l := sys.Len64(uint64(duration)); l < timeHistMinBucketBits {
bucketBit = timeHistMinBucketBits
bucket = 0 // bucketBit - timeHistMinBucketBits
} else {
bucketBit = uint(l)
bucket = bucketBit - timeHistMinBucketBits + 1
}
// If the bucket we computed is greater than the number of buckets,
// count that in overflow.
if bucket >= timeHistNumBuckets {
h.overflow.Add(1)
return
}
// The sub-bucket index is just next timeHistSubBucketBits after the bucketBit.
subBucket := uint(duration>>(bucketBit-1-timeHistSubBucketBits)) % timeHistNumSubBuckets
h.counts[bucket*timeHistNumSubBuckets+subBucket].Add(1)
}
// write dumps the histogram to the passed metricValue as a float64 histogram.
func (h *timeHistogram) write(out *metricValue) {
hist := out.float64HistOrInit(timeHistBuckets)
// The bottom-most bucket, containing negative values, is tracked
// separately as underflow, so fill that in manually and then iterate
// over the rest.
hist.counts[0] = h.underflow.Load()
for i := range h.counts {
hist.counts[i+1] = h.counts[i].Load()
}
hist.counts[len(hist.counts)-1] = h.overflow.Load()
}
const (
fInf = 0x7FF0000000000000
fNegInf = 0xFFF0000000000000
)
func float64Inf() float64 {
inf := uint64(fInf)
return *(*float64)(unsafe.Pointer(&inf))
}
func float64NegInf() float64 {
inf := uint64(fNegInf)
return *(*float64)(unsafe.Pointer(&inf))
}
// timeHistogramMetricsBuckets generates a slice of boundaries for
// the timeHistogram. These boundaries are represented in seconds,
// not nanoseconds like the timeHistogram represents durations.
func timeHistogramMetricsBuckets() []float64 {
b := make([]float64, timeHistTotalBuckets+1)
// Underflow bucket.
b[0] = float64NegInf()
for j := 0; j < timeHistNumSubBuckets; j++ {
// No bucket bit for the first few buckets. Just sub-bucket bits after the
// min bucket bit.
bucketNanos := uint64(j) << (timeHistMinBucketBits - 1 - timeHistSubBucketBits)
// Convert nanoseconds to seconds via a division.
// These values will all be exactly representable by a float64.
b[j+1] = float64(bucketNanos) / 1e9
}
// Generate the rest of the buckets. It's easier to reason
// about if we cut out the 0'th bucket.
for i := timeHistMinBucketBits; i < timeHistMaxBucketBits; i++ {
for j := 0; j < timeHistNumSubBuckets; j++ {
// Set the bucket bit.
bucketNanos := uint64(1) << (i - 1)
// Set the sub-bucket bits.
bucketNanos |= uint64(j) << (i - 1 - timeHistSubBucketBits)
// The index for this bucket is going to be the (i+1)'th bucket
// (note that we're starting from zero, but handled the first bucket
// earlier, so we need to compensate), and the j'th sub bucket.
// Add 1 because we left space for -Inf.
bucketIndex := (i-timeHistMinBucketBits+1)*timeHistNumSubBuckets + j + 1
// Convert nanoseconds to seconds via a division.
// These values will all be exactly representable by a float64.
b[bucketIndex] = float64(bucketNanos) / 1e9
}
}
// Overflow bucket.
b[len(b)-2] = float64(uint64(1)<<(timeHistMaxBucketBits-1)) / 1e9
b[len(b)-1] = float64Inf()
return b
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
const itabInitSize = 512
var (
itabLock mutex // lock for accessing itab table
itabTable = &itabTableInit // pointer to current table
itabTableInit = itabTableType{size: itabInitSize} // starter table
)
// Note: change the formula in the mallocgc call in itabAdd if you change these fields.
type itabTableType struct {
size uintptr // length of entries array. Always a power of 2.
count uintptr // current number of filled entries.
entries [itabInitSize]*itab // really [size] large
}
func itabHashFunc(inter *interfacetype, typ *_type) uintptr {
// compiler has provided some good hash codes for us.
return uintptr(inter.Type.Hash ^ typ.Hash)
}
// getitab should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname getitab
func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
if len(inter.Methods) == 0 {
throw("internal error - misuse of itab")
}
// easy case
if typ.TFlag&abi.TFlagUncommon == 0 {
if canfail {
return nil
}
name := toRType(&inter.Type).nameOff(inter.Methods[0].Name)
panic(&TypeAssertionError{nil, typ, &inter.Type, name.Name()})
}
var m *itab
// First, look in the existing table to see if we can find the itab we need.
// This is by far the most common case, so do it without locks.
// Use atomic to ensure we see any previous writes done by the thread
// that updates the itabTable field (with atomic.Storep in itabAdd).
t := (*itabTableType)(atomic.Loadp(unsafe.Pointer(&itabTable)))
if m = t.find(inter, typ); m != nil {
goto finish
}
// Not found. Grab the lock and try again.
lock(&itabLock)
if m = itabTable.find(inter, typ); m != nil {
unlock(&itabLock)
goto finish
}
// Entry doesn't exist yet. Make a new entry & add it.
m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
m.Inter = inter
m.Type = typ
// The hash is used in type switches. However, compiler statically generates itab's
// for all interface/type pairs used in switches (which are added to itabTable
// in itabsinit). The dynamically-generated itab's never participate in type switches,
// and thus the hash is irrelevant.
// Note: m.Hash is _not_ the hash used for the runtime itabTable hash table.
m.Hash = 0
itabInit(m, true)
itabAdd(m)
unlock(&itabLock)
finish:
if m.Fun[0] != 0 {
return m
}
if canfail {
return nil
}
// this can only happen if the conversion
// was already done once using the , ok form
// and we have a cached negative result.
// The cached result doesn't record which
// interface function was missing, so initialize
// the itab again to get the missing function name.
panic(&TypeAssertionError{concrete: typ, asserted: &inter.Type, missingMethod: itabInit(m, false)})
}
// find finds the given interface/type pair in t.
// Returns nil if the given interface/type pair isn't present.
func (t *itabTableType) find(inter *interfacetype, typ *_type) *itab {
// Implemented using quadratic probing.
// Probe sequence is h(i) = h0 + i*(i+1)/2 mod 2^k.
// We're guaranteed to hit all table entries using this probe sequence.
mask := t.size - 1
h := itabHashFunc(inter, typ) & mask
for i := uintptr(1); ; i++ {
p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
// Use atomic read here so if we see m != nil, we also see
// the initializations of the fields of m.
// m := *p
m := (*itab)(atomic.Loadp(unsafe.Pointer(p)))
if m == nil {
return nil
}
if m.Inter == inter && m.Type == typ {
return m
}
h += i
h &= mask
}
}
// itabAdd adds the given itab to the itab hash table.
// itabLock must be held.
func itabAdd(m *itab) {
// Bugs can lead to calling this while mallocing is set,
// typically because this is called while panicking.
// Crash reliably, rather than only when we need to grow
// the hash table.
if getg().m.mallocing != 0 {
throw("malloc deadlock")
}
t := itabTable
if t.count >= 3*(t.size/4) { // 75% load factor
// Grow hash table.
// t2 = new(itabTableType) + some additional entries
// We lie and tell malloc we want pointer-free memory because
// all the pointed-to values are not in the heap.
t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true))
t2.size = t.size * 2
// Copy over entries.
// Note: while copying, other threads may look for an itab and
// fail to find it. That's ok, they will then try to get the itab lock
// and as a consequence wait until this copying is complete.
iterate_itabs(t2.add)
if t2.count != t.count {
throw("mismatched count during itab table copy")
}
// Publish new hash table. Use an atomic write: see comment in getitab.
atomicstorep(unsafe.Pointer(&itabTable), unsafe.Pointer(t2))
// Adopt the new table as our own.
t = itabTable
// Note: the old table can be GC'ed here.
}
t.add(m)
}
// add adds the given itab to itab table t.
// itabLock must be held.
func (t *itabTableType) add(m *itab) {
// See comment in find about the probe sequence.
// Insert new itab in the first empty spot in the probe sequence.
mask := t.size - 1
h := itabHashFunc(m.Inter, m.Type) & mask
for i := uintptr(1); ; i++ {
p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
m2 := *p
if m2 == m {
// A given itab may be used in more than one module
// and thanks to the way global symbol resolution works, the
// pointed-to itab may already have been inserted into the
// global 'hash'.
return
}
if m2 == nil {
// Use atomic write here so if a reader sees m, it also
// sees the correctly initialized fields of m.
// NoWB is ok because m is not in heap memory.
// *p = m
atomic.StorepNoWB(unsafe.Pointer(p), unsafe.Pointer(m))
t.count++
return
}
h += i
h &= mask
}
}
// itabInit fills in the m.Fun array with all the code pointers for
// the m.Inter/m.Type pair. If the type does not implement the interface,
// it sets m.Fun[0] to 0 and returns the name of an interface function that is missing.
// If !firstTime, itabInit will not write anything to m.Fun (see issue 65962).
// It is ok to call this multiple times on the same m, even concurrently
// (although it will only be called once with firstTime==true).
func itabInit(m *itab, firstTime bool) string {
inter := m.Inter
typ := m.Type
x := typ.Uncommon()
// both inter and typ have method sorted by name,
// and interface names are unique,
// so can iterate over both in lock step;
// the loop is O(ni+nt) not O(ni*nt).
ni := len(inter.Methods)
nt := int(x.Mcount)
xmhdr := unsafe.Slice((*abi.Method)(add(unsafe.Pointer(x), uintptr(x.Moff))), nt)
j := 0
methods := unsafe.Slice((*unsafe.Pointer)(unsafe.Pointer(&m.Fun[0])), ni)
var fun0 unsafe.Pointer
imethods:
for k := 0; k < ni; k++ {
i := &inter.Methods[k]
itype := toRType(&inter.Type).typeOff(i.Typ)
name := toRType(&inter.Type).nameOff(i.Name)
iname := name.Name()
ipkg := pkgPath(name)
if ipkg == "" {
ipkg = inter.PkgPath.Name()
}
for ; j < nt; j++ {
t := &xmhdr[j]
rtyp := toRType(typ)
tname := rtyp.nameOff(t.Name)
if rtyp.typeOff(t.Mtyp) == itype && tname.Name() == iname {
pkgPath := pkgPath(tname)
if pkgPath == "" {
pkgPath = rtyp.nameOff(x.PkgPath).Name()
}
if tname.IsExported() || pkgPath == ipkg {
ifn := rtyp.textOff(t.Ifn)
if k == 0 {
fun0 = ifn // we'll set m.Fun[0] at the end
} else if firstTime {
methods[k] = ifn
}
continue imethods
}
}
}
// didn't find method
// Leaves m.Fun[0] set to 0.
return iname
}
if firstTime {
m.Fun[0] = uintptr(fun0)
}
return ""
}
func itabsinit() {
lockInit(&itabLock, lockRankItab)
lock(&itabLock)
for _, md := range activeModules() {
addModuleItabs(md)
}
unlock(&itabLock)
}
// addModuleItabs adds the pre-compiled itabs from md to the itab hash table.
// This is an optimization to let us skip creating itabs we already have.
func addModuleItabs(md *moduledata) {
p := md.types + md.itaboffset
for p < md.etypes {
itab := (*itab)(unsafe.Pointer(p))
itabAdd(itab)
p += uintptr(itab.Size())
}
}
// panicdottypeE is called when doing an e.(T) conversion and the conversion fails.
// have = the dynamic type we have.
// want = the static type we're trying to convert to.
// iface = the static type we're converting from.
func panicdottypeE(have, want, iface *_type) {
panic(&TypeAssertionError{iface, have, want, ""})
}
// panicdottypeI is called when doing an i.(T) conversion and the conversion fails.
// Same args as panicdottypeE, but "have" is the dynamic itab we have.
func panicdottypeI(have *itab, want, iface *_type) {
var t *_type
if have != nil {
t = have.Type
}
panicdottypeE(t, want, iface)
}
// panicnildottype is called when doing an i.(T) conversion and the interface i is nil.
// want = the static type we're trying to convert to.
func panicnildottype(want *_type) {
panic(&TypeAssertionError{nil, nil, want, ""})
// TODO: Add the static type we're converting from as well.
// It might generate a better error message.
// Just to match other nil conversion errors, we don't for now.
}
// The specialized convTx routines need a type descriptor to use when calling mallocgc.
// We don't need the type to be exact, just to have the correct size, alignment, and pointer-ness.
// However, when debugging, it'd be nice to have some indication in mallocgc where the types came from,
// so we use named types here.
// We then construct interface values of these types,
// and then extract the type word to use as needed.
type (
uint16InterfacePtr uint16
uint32InterfacePtr uint32
uint64InterfacePtr uint64
stringInterfacePtr string
sliceInterfacePtr []byte
)
var (
uint16Eface any = uint16InterfacePtr(0)
uint32Eface any = uint32InterfacePtr(0)
uint64Eface any = uint64InterfacePtr(0)
stringEface any = stringInterfacePtr("")
sliceEface any = sliceInterfacePtr(nil)
uint16Type *_type = efaceOf(&uint16Eface)._type
uint32Type *_type = efaceOf(&uint32Eface)._type
uint64Type *_type = efaceOf(&uint64Eface)._type
stringType *_type = efaceOf(&stringEface)._type
sliceType *_type = efaceOf(&sliceEface)._type
)
// The conv and assert functions below do very similar things.
// The convXXX functions are guaranteed by the compiler to succeed.
// The assertXXX functions may fail (either panicking or returning false,
// depending on whether they are 1-result or 2-result).
// The convXXX functions succeed on a nil input, whereas the assertXXX
// functions fail on a nil input.
// convT converts a value of type t, which is pointed to by v, to a pointer that can
// be used as the second word of an interface value.
func convT(t *_type, v unsafe.Pointer) unsafe.Pointer {
if raceenabled {
raceReadObjectPC(t, v, sys.GetCallerPC(), abi.FuncPCABIInternal(convT))
}
if msanenabled {
msanread(v, t.Size_)
}
if asanenabled {
asanread(v, t.Size_)
}
x := mallocgc(t.Size_, t, true)
typedmemmove(t, x, v)
return x
}
func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer {
// TODO: maybe take size instead of type?
if raceenabled {
raceReadObjectPC(t, v, sys.GetCallerPC(), abi.FuncPCABIInternal(convTnoptr))
}
if msanenabled {
msanread(v, t.Size_)
}
if asanenabled {
asanread(v, t.Size_)
}
x := mallocgc(t.Size_, t, false)
memmove(x, v, t.Size_)
return x
}
func convT16(val uint16) (x unsafe.Pointer) {
if val < uint16(len(staticuint64s)) {
x = unsafe.Pointer(&staticuint64s[val])
if goarch.BigEndian {
x = add(x, 6)
}
} else {
x = mallocgc(2, uint16Type, false)
*(*uint16)(x) = val
}
return
}
func convT32(val uint32) (x unsafe.Pointer) {
if val < uint32(len(staticuint64s)) {
x = unsafe.Pointer(&staticuint64s[val])
if goarch.BigEndian {
x = add(x, 4)
}
} else {
x = mallocgc(4, uint32Type, false)
*(*uint32)(x) = val
}
return
}
// convT64 should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname convT64
func convT64(val uint64) (x unsafe.Pointer) {
if val < uint64(len(staticuint64s)) {
x = unsafe.Pointer(&staticuint64s[val])
} else {
x = mallocgc(8, uint64Type, false)
*(*uint64)(x) = val
}
return
}
// convTstring should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname convTstring
func convTstring(val string) (x unsafe.Pointer) {
if val == "" {
x = unsafe.Pointer(&zeroVal[0])
} else {
x = mallocgc(unsafe.Sizeof(val), stringType, true)
*(*string)(x) = val
}
return
}
// convTslice should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname convTslice
func convTslice(val []byte) (x unsafe.Pointer) {
// Note: this must work for any element type, not just byte.
if (*slice)(unsafe.Pointer(&val)).array == nil {
x = unsafe.Pointer(&zeroVal[0])
} else {
x = mallocgc(unsafe.Sizeof(val), sliceType, true)
*(*[]byte)(x) = val
}
return
}
func assertE2I(inter *interfacetype, t *_type) *itab {
if t == nil {
// explicit conversions require non-nil interface value.
panic(&TypeAssertionError{nil, nil, &inter.Type, ""})
}
return getitab(inter, t, false)
}
func assertE2I2(inter *interfacetype, t *_type) *itab {
if t == nil {
return nil
}
return getitab(inter, t, true)
}
// typeAssert builds an itab for the concrete type t and the
// interface type s.Inter. If the conversion is not possible it
// panics if s.CanFail is false and returns nil if s.CanFail is true.
func typeAssert(s *abi.TypeAssert, t *_type) *itab {
var tab *itab
if t == nil {
if !s.CanFail {
panic(&TypeAssertionError{nil, nil, &s.Inter.Type, ""})
}
} else {
tab = getitab(s.Inter, t, s.CanFail)
}
if !abi.UseInterfaceSwitchCache(goarch.ArchFamily) {
return tab
}
// Maybe update the cache, so the next time the generated code
// doesn't need to call into the runtime.
if cheaprand()&1023 != 0 {
// Only bother updating the cache ~1 in 1000 times.
return tab
}
// Load the current cache.
oldC := (*abi.TypeAssertCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
if cheaprand()&uint32(oldC.Mask) != 0 {
// As cache gets larger, choose to update it less often
// so we can amortize the cost of building a new cache.
return tab
}
// Make a new cache.
newC := buildTypeAssertCache(oldC, t, tab)
// Update cache. Use compare-and-swap so if multiple threads
// are fighting to update the cache, at least one of their
// updates will stick.
atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
return tab
}
func buildTypeAssertCache(oldC *abi.TypeAssertCache, typ *_type, tab *itab) *abi.TypeAssertCache {
oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
// Count the number of entries we need.
n := 1
for _, e := range oldEntries {
if e.Typ != 0 {
n++
}
}
// Figure out how big a table we need.
// We need at least one more slot than the number of entries
// so that we are guaranteed an empty slot (for termination).
newN := n * 2 // make it at most 50% full
newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2
// Allocate the new table.
newSize := unsafe.Sizeof(abi.TypeAssertCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.TypeAssertCacheEntry{})
newC := (*abi.TypeAssertCache)(mallocgc(newSize, nil, true))
newC.Mask = uintptr(newN - 1)
newEntries := unsafe.Slice(&newC.Entries[0], newN)
// Fill the new table.
addEntry := func(typ *_type, tab *itab) {
h := int(typ.Hash) & (newN - 1)
for {
if newEntries[h].Typ == 0 {
newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
return
}
h = (h + 1) & (newN - 1)
}
}
for _, e := range oldEntries {
if e.Typ != 0 {
addEntry((*_type)(unsafe.Pointer(e.Typ)), (*itab)(unsafe.Pointer(e.Itab)))
}
}
addEntry(typ, tab)
return newC
}
// Empty type assert cache. Contains one entry with a nil Typ (which
// causes a cache lookup to fail immediately.)
var emptyTypeAssertCache = abi.TypeAssertCache{Mask: 0}
// interfaceSwitch compares t against the list of cases in s.
// If t matches case i, interfaceSwitch returns the case index i and
// an itab for the pair <t, s.Cases[i]>.
// If there is no match, return N,nil, where N is the number
// of cases.
func interfaceSwitch(s *abi.InterfaceSwitch, t *_type) (int, *itab) {
cases := unsafe.Slice(&s.Cases[0], s.NCases)
// Results if we don't find a match.
case_ := len(cases)
var tab *itab
// Look through each case in order.
for i, c := range cases {
tab = getitab(c, t, true)
if tab != nil {
case_ = i
break
}
}
if !abi.UseInterfaceSwitchCache(goarch.ArchFamily) {
return case_, tab
}
// Maybe update the cache, so the next time the generated code
// doesn't need to call into the runtime.
if cheaprand()&1023 != 0 {
// Only bother updating the cache ~1 in 1000 times.
// This ensures we don't waste memory on switches, or
// switch arguments, that only happen a few times.
return case_, tab
}
// Load the current cache.
oldC := (*abi.InterfaceSwitchCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
if cheaprand()&uint32(oldC.Mask) != 0 {
// As cache gets larger, choose to update it less often
// so we can amortize the cost of building a new cache
// (that cost is linear in oldc.Mask).
return case_, tab
}
// Make a new cache.
newC := buildInterfaceSwitchCache(oldC, t, case_, tab)
// Update cache. Use compare-and-swap so if multiple threads
// are fighting to update the cache, at least one of their
// updates will stick.
atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
return case_, tab
}
// buildInterfaceSwitchCache constructs an interface switch cache
// containing all the entries from oldC plus the new entry
// (typ,case_,tab).
func buildInterfaceSwitchCache(oldC *abi.InterfaceSwitchCache, typ *_type, case_ int, tab *itab) *abi.InterfaceSwitchCache {
oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
// Count the number of entries we need.
n := 1
for _, e := range oldEntries {
if e.Typ != 0 {
n++
}
}
// Figure out how big a table we need.
// We need at least one more slot than the number of entries
// so that we are guaranteed an empty slot (for termination).
newN := n * 2 // make it at most 50% full
newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2
// Allocate the new table.
newSize := unsafe.Sizeof(abi.InterfaceSwitchCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.InterfaceSwitchCacheEntry{})
newC := (*abi.InterfaceSwitchCache)(mallocgc(newSize, nil, true))
newC.Mask = uintptr(newN - 1)
newEntries := unsafe.Slice(&newC.Entries[0], newN)
// Fill the new table.
addEntry := func(typ *_type, case_ int, tab *itab) {
h := int(typ.Hash) & (newN - 1)
for {
if newEntries[h].Typ == 0 {
newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
newEntries[h].Case = case_
newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
return
}
h = (h + 1) & (newN - 1)
}
}
for _, e := range oldEntries {
if e.Typ != 0 {
addEntry((*_type)(unsafe.Pointer(e.Typ)), e.Case, (*itab)(unsafe.Pointer(e.Itab)))
}
}
addEntry(typ, case_, tab)
return newC
}
// Empty interface switch cache. Contains one entry with a nil Typ (which
// causes a cache lookup to fail immediately.)
var emptyInterfaceSwitchCache = abi.InterfaceSwitchCache{Mask: 0}
// reflect_ifaceE2I is for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
// - github.com/modern-go/reflect2
// - github.com/v2pro/plz
//
// Do not remove or change the type signature.
//
//go:linkname reflect_ifaceE2I reflect.ifaceE2I
func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
*dst = iface{assertE2I(inter, e._type), e.data}
}
//go:linkname reflectlite_ifaceE2I internal/reflectlite.ifaceE2I
func reflectlite_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
*dst = iface{assertE2I(inter, e._type), e.data}
}
func iterate_itabs(fn func(*itab)) {
// Note: only runs during stop the world or with itabLock held,
// so no other locks/atomics needed.
t := itabTable
for i := uintptr(0); i < t.size; i++ {
m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize))
if m != nil {
fn(m)
}
}
}
// staticuint64s is used to avoid allocating in convTx for small integer values.
// staticuint64s[0] == 0, staticuint64s[1] == 1, and so forth.
// It is defined in assembler code so that it is read-only.
var staticuint64s [256]uint64
// getStaticuint64s is called by the reflect package to get a pointer
// to the read-only array.
//
//go:linkname getStaticuint64s
func getStaticuint64s() *[256]uint64 {
return &staticuint64s
}
// The linker redirects a reference of a method that it determined
// unreachable to a reference to this function, so it will throw if
// ever called.
func unreachableMethod() {
throw("unreachable method called. linker bug?")
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Lock-free stack.
package runtime
import (
"internal/runtime/atomic"
"unsafe"
)
// lfstack is the head of a lock-free stack.
//
// The zero value of lfstack is an empty list.
//
// This stack is intrusive. Nodes must embed lfnode as the first field.
//
// The stack does not keep GC-visible pointers to nodes, so the caller
// must ensure the nodes are allocated outside the Go heap.
type lfstack uint64
func (head *lfstack) push(node *lfnode) {
node.pushcnt++
new := lfstackPack(node, node.pushcnt)
for {
old := atomic.Load64((*uint64)(head))
node.next = old
if atomic.Cas64((*uint64)(head), old, new) {
break
}
}
}
func (head *lfstack) pop() unsafe.Pointer {
var backoff uint32
// TODO: tweak backoff parameters on other architectures.
if GOARCH == "arm64" {
backoff = 128
}
for {
old := atomic.Load64((*uint64)(head))
if old == 0 {
return nil
}
node := lfstackUnpack(old)
next := atomic.Load64(&node.next)
if atomic.Cas64((*uint64)(head), old, next) {
return unsafe.Pointer(node)
}
// Use a backoff approach to reduce demand to the shared memory location
// decreases memory contention and allows for other threads to make quicker
// progress.
// Read more in this Arm blog post:
// https://community.arm.com/arm-community-blogs/b/architectures-and-processors-blog/posts/multi-threaded-applications-arm
procyield(backoff)
// Increase backoff time.
backoff += backoff / 2
}
}
func (head *lfstack) empty() bool {
return atomic.Load64((*uint64)(head)) == 0
}
// lfnodeValidate panics if node is not a valid address for use with
// lfstack.push. This only needs to be called when node is allocated.
func lfnodeValidate(node *lfnode) {
if base, _, _ := findObject(uintptr(unsafe.Pointer(node)), 0, 0); base != 0 {
throw("lfstack node allocated from the heap")
}
lfstackPack(node, ^uintptr(0))
}
func lfstackPack(node *lfnode, cnt uintptr) uint64 {
return uint64(taggedPointerPack(unsafe.Pointer(node), cnt&(1<<tagBits-1)))
}
func lfstackUnpack(val uint64) *lfnode {
return (*lfnode)(taggedPointer(val).pointer())
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/runtime/maps"
"internal/runtime/sys"
"unsafe"
)
// Legacy //go:linkname compatibility shims
//
// The functions below are unused by the toolchain, and exist only for
// compatibility with existing //go:linkname use in the ecosystem.
// linknameIter is the it argument to mapiterinit and mapiternext.
//
// Callers of mapiterinit allocate their own iter structure, which has the
// layout of the pre-Go 1.24 hiter structure, shown here for posterity:
//
// type hiter struct {
// key unsafe.Pointer
// elem unsafe.Pointer
// t *maptype // old map abi.Type
// h *hmap
// buckets unsafe.Pointer
// bptr *bmap
// overflow *[]*bmap
// oldoverflow *[]*bmap
// startBucket uintptr
// offset uint8
// wrapped bool
// B uint8
// i uint8
// bucket uintptr
// checkBucket uintptr
// }
//
// Our structure must maintain compatibility with the old structure. This
// means:
//
// - Our structure must be the same size or smaller than hiter. Otherwise we
// may write outside the caller's hiter allocation.
// - Our structure must have the same pointer layout as hiter, so that the GC
// tracks pointers properly.
//
// Based on analysis of the "hall of shame" users of these linknames:
//
// - The key and elem fields must be kept up to date with the current key/elem.
// Some users directly access the key and elem fields rather than calling
// reflect.mapiterkey/reflect.mapiterelem.
// - The t field must be non-nil after mapiterinit. gonum.org/v1/gonum uses
// this to verify the iterator is initialized.
// - github.com/segmentio/encoding and github.com/RomiChan/protobuf check if h
// is non-nil, but the code has no effect. Thus the value of h does not
// matter. See internal/runtime_reflect/map.go.
type linknameIter struct {
// Fields from hiter.
key unsafe.Pointer
elem unsafe.Pointer
typ *abi.MapType
// The real iterator.
it *maps.Iter
}
// mapiterinit is a compatibility wrapper for map iterator for users of
// //go:linkname from before Go 1.24. It is not used by Go itself. New users
// should use reflect or the maps package.
//
// mapiterinit should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
// - github.com/goccy/go-json
// - github.com/RomiChan/protobuf
// - github.com/segmentio/encoding
// - github.com/ugorji/go/codec
// - github.com/wI2L/jettison
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mapiterinit
func mapiterinit(t *abi.MapType, m *maps.Map, it *linknameIter) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapiterinit))
}
it.typ = t
it.it = new(maps.Iter)
it.it.Init(t, m)
it.it.Next()
it.key = it.it.Key()
it.elem = it.it.Elem()
}
// reflect_mapiterinit is a compatibility wrapper for map iterator for users of
// //go:linkname from before Go 1.24. It is not used by Go itself. New users
// should use reflect or the maps package.
//
// reflect_mapiterinit should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/modern-go/reflect2
// - gitee.com/quant1x/gox
// - github.com/v2pro/plz
// - github.com/wI2L/jettison
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_mapiterinit reflect.mapiterinit
func reflect_mapiterinit(t *abi.MapType, m *maps.Map, it *linknameIter) {
mapiterinit(t, m, it)
}
// mapiternext is a compatibility wrapper for map iterator for users of
// //go:linkname from before Go 1.24. It is not used by Go itself. New users
// should use reflect or the maps package.
//
// mapiternext should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
// - github.com/RomiChan/protobuf
// - github.com/segmentio/encoding
// - github.com/ugorji/go/codec
// - gonum.org/v1/gonum
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mapiternext
func mapiternext(it *linknameIter) {
if raceenabled {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(it.it.Map()), callerpc, abi.FuncPCABIInternal(mapiternext))
}
it.it.Next()
it.key = it.it.Key()
it.elem = it.it.Elem()
}
// reflect_mapiternext is a compatibility wrapper for map iterator for users of
// //go:linkname from before Go 1.24. It is not used by Go itself. New users
// should use reflect or the maps package.
//
// reflect_mapiternext is for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
// - github.com/modern-go/reflect2
// - github.com/goccy/go-json
// - github.com/v2pro/plz
// - github.com/wI2L/jettison
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_mapiternext reflect.mapiternext
func reflect_mapiternext(it *linknameIter) {
mapiternext(it)
}
// reflect_mapiterkey is a compatibility wrapper for map iterator for users of
// //go:linkname from before Go 1.24. It is not used by Go itself. New users
// should use reflect or the maps package.
//
// reflect_mapiterkey should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/goccy/go-json
// - gonum.org/v1/gonum
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_mapiterkey reflect.mapiterkey
func reflect_mapiterkey(it *linknameIter) unsafe.Pointer {
return it.it.Key()
}
// reflect_mapiterelem is a compatibility wrapper for map iterator for users of
// //go:linkname from before Go 1.24. It is not used by Go itself. New users
// should use reflect or the maps package.
//
// reflect_mapiterelem should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/goccy/go-json
// - gonum.org/v1/gonum
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_mapiterelem reflect.mapiterelem
func reflect_mapiterelem(it *linknameIter) unsafe.Pointer {
return it.it.Elem()
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"unsafe"
)
// listHead points to the head of an intrusive doubly-linked list.
//
// Prior to use, you must call init to store the offset of listNode fields.
//
// Every object in the list should be the same type.
type listHead struct {
obj unsafe.Pointer
initialized bool
nodeOffset uintptr
}
// init initializes the list head. off is the offset (via unsafe.Offsetof) of
// the listNode field in the objects in the list.
func (head *listHead) init(off uintptr) {
head.initialized = true
head.nodeOffset = off
}
// listNode is the linked list node for objects in a listHead list.
//
// listNode must be stored as a field in objects placed in the linked list. The
// offset of the field is registered via listHead.init.
//
// For example:
//
// type foo struct {
// val int
//
// node listNode
// }
//
// var fooHead listHead
// fooHead.init(unsafe.Offsetof(foo{}.node))
type listNode struct {
prev unsafe.Pointer
next unsafe.Pointer
}
func (head *listHead) getNode(p unsafe.Pointer) *listNode {
if !head.initialized {
throw("runtime: uninitialized listHead")
}
if p == nil {
return nil
}
return (*listNode)(unsafe.Add(p, head.nodeOffset))
}
// Returns true if the list is empty.
func (head *listHead) empty() bool {
return head.obj == nil
}
// Returns the head of the list without removing it.
func (head *listHead) head() unsafe.Pointer {
return head.obj
}
// Push p onto the front of the list.
func (head *listHead) push(p unsafe.Pointer) {
// p becomes the head of the list.
// ... so p's next is the current head.
pNode := head.getNode(p)
pNode.next = head.obj
// ... and the current head's prev is p.
if head.obj != nil {
headNode := head.getNode(head.obj)
headNode.prev = p
}
head.obj = p
}
// Pop removes the head of the list.
func (head *listHead) pop() unsafe.Pointer {
if head.obj == nil {
return nil
}
// Return the head of the list.
p := head.obj
// ... so the new head is p's next.
pNode := head.getNode(p)
head.obj = pNode.next
// p is no longer on the list. Clear next to remove unused references.
// N.B. as the head, prev must already be nil.
pNode.next = nil
// ... and the new head no longer has a prev.
if head.obj != nil {
headNode := head.getNode(head.obj)
headNode.prev = nil
}
return p
}
// Remove p from the middle of the list.
func (head *listHead) remove(p unsafe.Pointer) {
if head.obj == p {
// Use pop to ensure head is updated when removing the head.
head.pop()
return
}
pNode := head.getNode(p)
prevNode := head.getNode(pNode.prev)
nextNode := head.getNode(pNode.next)
// Link prev to next.
if prevNode != nil {
prevNode.next = pNode.next
}
// Link next to prev.
if nextNode != nil {
nextNode.prev = pNode.prev
}
pNode.prev = nil
pNode.next = nil
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"unsafe"
)
// The types in this file are exact copies of the types in list.go, but with
// unsafe.Pointer replaced with uintptr for use where write barriers must be
// avoided, such as uses of muintptr, puintptr, guintptr.
//
// Objects in these lists must be kept alive via another real reference.
// listHeadManual points to the head of an intrusive doubly-linked list of
// objects.
//
// Prior to use, you must call init to store the offset of listNodeManual fields.
//
// Every object in the list should be the same type.
type listHeadManual struct {
obj uintptr
initialized bool
nodeOffset uintptr
}
// init initializes the list head. off is the offset (via unsafe.Offsetof) of
// the listNodeManual field in the objects in the list.
func (head *listHeadManual) init(off uintptr) {
head.initialized = true
head.nodeOffset = off
}
// listNodeManual is the linked list node for objects in a listHeadManual list.
//
// listNodeManual must be stored as a field in objects placed in the linked list.
// The offset of the field is registered via listHeadManual.init.
//
// For example:
//
// type foo struct {
// val int
//
// node listNodeManual
// }
//
// var fooHead listHeadManual
// fooHead.init(unsafe.Offsetof(foo{}.node))
type listNodeManual struct {
prev uintptr
next uintptr
}
func (head *listHeadManual) getNode(p unsafe.Pointer) *listNodeManual {
if !head.initialized {
throw("runtime: uninitialized listHead")
}
if p == nil {
return nil
}
return (*listNodeManual)(unsafe.Add(p, head.nodeOffset))
}
// Returns true if the list is empty.
func (head *listHeadManual) empty() bool {
return head.obj == 0
}
// Returns the head of the list without removing it.
func (head *listHeadManual) head() unsafe.Pointer {
return unsafe.Pointer(head.obj)
}
// Push p onto the front of the list.
func (head *listHeadManual) push(p unsafe.Pointer) {
// p becomes the head of the list.
// ... so p's next is the current head.
pNode := head.getNode(p)
pNode.next = head.obj
// ... and the current head's prev is p.
if head.obj != 0 {
headNode := head.getNode(unsafe.Pointer(head.obj))
headNode.prev = uintptr(p)
}
head.obj = uintptr(p)
}
// Pop removes the head of the list.
func (head *listHeadManual) pop() unsafe.Pointer {
if head.obj == 0 {
return nil
}
// Return the head of the list.
p := unsafe.Pointer(head.obj)
// ... so the new head is p's next.
pNode := head.getNode(p)
head.obj = pNode.next
// p is no longer on the list. Clear next to remove unused references.
// N.B. as the head, prev must already be nil.
pNode.next = 0
// ... and the new head no longer has a prev.
if head.obj != 0 {
headNode := head.getNode(unsafe.Pointer(head.obj))
headNode.prev = 0
}
return p
}
// Remove p from the middle of the list.
func (head *listHeadManual) remove(p unsafe.Pointer) {
if unsafe.Pointer(head.obj) == p {
// Use pop to ensure head is updated when removing the head.
head.pop()
return
}
pNode := head.getNode(p)
prevNode := head.getNode(unsafe.Pointer(pNode.prev))
nextNode := head.getNode(unsafe.Pointer(pNode.next))
// Link prev to next.
if prevNode != nil {
prevNode.next = pNode.next
}
// Link next to prev.
if nextNode != nil {
nextNode.prev = pNode.prev
}
pNode.prev = 0
pNode.next = 0
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build dragonfly || freebsd || linux
package runtime
import (
"internal/runtime/atomic"
"unsafe"
)
// We use the uintptr mutex.key and note.key as a uint32.
//
//go:nosplit
func key32(p *uintptr) *uint32 {
return (*uint32)(unsafe.Pointer(p))
}
// One-time notifications.
func noteclear(n *note) {
n.key = 0
}
func notewakeup(n *note) {
old := atomic.Xchg(key32(&n.key), 1)
if old != 0 {
print("notewakeup - double wakeup (", old, ")\n")
throw("notewakeup - double wakeup")
}
futexwakeup(key32(&n.key), 1)
}
func notesleep(n *note) {
gp := getg()
if gp != gp.m.g0 {
throw("notesleep not on g0")
}
ns := int64(-1)
if *cgo_yield != nil {
// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
ns = 10e6
}
for atomic.Load(key32(&n.key)) == 0 {
gp.m.blocked = true
futexsleep(key32(&n.key), 0, ns)
if *cgo_yield != nil {
asmcgocall(*cgo_yield, nil)
}
gp.m.blocked = false
}
}
// May run with m.p==nil if called from notetsleep, so write barriers
// are not allowed.
//
//go:nosplit
//go:nowritebarrier
func notetsleep_internal(n *note, ns int64) bool {
gp := getg()
if ns < 0 {
if *cgo_yield != nil {
// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
ns = 10e6
}
for atomic.Load(key32(&n.key)) == 0 {
gp.m.blocked = true
futexsleep(key32(&n.key), 0, ns)
if *cgo_yield != nil {
asmcgocall(*cgo_yield, nil)
}
gp.m.blocked = false
}
return true
}
if atomic.Load(key32(&n.key)) != 0 {
return true
}
deadline := nanotime() + ns
for {
if *cgo_yield != nil && ns > 10e6 {
ns = 10e6
}
gp.m.blocked = true
futexsleep(key32(&n.key), 0, ns)
if *cgo_yield != nil {
asmcgocall(*cgo_yield, nil)
}
gp.m.blocked = false
if atomic.Load(key32(&n.key)) != 0 {
break
}
now := nanotime()
if now >= deadline {
break
}
ns = deadline - now
}
return atomic.Load(key32(&n.key)) != 0
}
func notetsleep(n *note, ns int64) bool {
gp := getg()
if gp != gp.m.g0 && gp.m.preemptoff != "" {
throw("notetsleep not on g0")
}
return notetsleep_internal(n, ns)
}
// same as runtime·notetsleep, but called on user g (not g0)
// calls only nosplit functions between entersyscallblock/exitsyscall.
func notetsleepg(n *note, ns int64) bool {
gp := getg()
if gp == gp.m.g0 {
throw("notetsleepg on g0")
}
entersyscallblock()
ok := notetsleep_internal(n, ns)
exitsyscall()
return ok
}
func beforeIdle(int64, int64) (*g, bool) {
return nil, false
}
func checkTimeouts() {}
//go:nosplit
func semacreate(mp *m) {}
//go:nosplit
func semasleep(ns int64) int32 {
mp := getg().m
for v := atomic.Xadd(&mp.waitsema, -1); ; v = atomic.Load(&mp.waitsema) {
if int32(v) >= 0 {
return 0
}
futexsleep(&mp.waitsema, v, ns)
if ns >= 0 {
if int32(v) >= 0 {
return 0
} else {
return -1
}
}
}
}
//go:nosplit
func semawakeup(mp *m) {
v := atomic.Xadd(&mp.waitsema, 1)
if v == 0 {
futexwakeup(&mp.waitsema, 1)
}
}
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !wasm
package runtime
import (
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/gc"
"unsafe"
)
// This implementation depends on OS-specific implementations of
//
// func semacreate(mp *m)
// Create a semaphore for mp, if it does not already have one.
//
// func semasleep(ns int64) int32
// If ns < 0, acquire m's semaphore and return 0.
// If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
// Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
//
// func semawakeup(mp *m)
// Wake up mp, which is or will soon be sleeping on its semaphore.
// The mutex state consists of four flags and a pointer. The flag at bit 0,
// mutexLocked, represents the lock itself. Bit 1, mutexSleeping, is a hint that
// the pointer is non-nil. The fast paths for locking and unlocking the mutex
// are based on atomic 8-bit swap operations on the low byte; bits 2 through 7
// are unused.
//
// Bit 8, mutexSpinning, is a try-lock that grants a waiting M permission to
// spin on the state word. Most other Ms must attempt to spend their time
// sleeping to reduce traffic on the cache line. This is the "spin bit" for
// which the implementation is named. (The anti-starvation mechanism also grants
// temporary permission for an M to spin.)
//
// Bit 9, mutexStackLocked, is a try-lock that grants an unlocking M permission
// to inspect the list of waiting Ms and to pop an M off of that stack.
//
// The upper bits hold a (partial) pointer to the M that most recently went to
// sleep. The sleeping Ms form a stack linked by their mWaitList.next fields.
// Because the fast paths use an 8-bit swap on the low byte of the state word,
// we'll need to reconstruct the full M pointer from the bits we have. Most Ms
// are allocated on the heap, and have a known alignment and base offset. (The
// offset is due to mallocgc's allocation headers.) The main program thread uses
// a static M value, m0. We check for m0 specifically and add a known offset
// otherwise.
const (
active_spin = 4 // referenced in proc.go for sync.Mutex implementation
active_spin_cnt = 30 // referenced in proc.go for sync.Mutex implementation
)
const (
mutexLocked = 0x001
mutexSleeping = 0x002
mutexSpinning = 0x100
mutexStackLocked = 0x200
mutexMMask = 0x3FF
mutexMOffset = gc.MallocHeaderSize // alignment of heap-allocated Ms (those other than m0)
mutexActiveSpinCount = 4
mutexActiveSpinSize = 30
mutexPassiveSpinCount = 1
mutexTailWakePeriod = 16
)
//go:nosplit
func key8(p *uintptr) *uint8 {
if goarch.BigEndian {
return &(*[8]uint8)(unsafe.Pointer(p))[goarch.PtrSize/1-1]
}
return &(*[8]uint8)(unsafe.Pointer(p))[0]
}
// mWaitList is part of the M struct, and holds the list of Ms that are waiting
// for a particular runtime.mutex.
//
// When an M is unable to immediately obtain a lock, it adds itself to the list
// of Ms waiting for the lock. It does that via this struct's next field,
// forming a singly-linked list with the mutex's key field pointing to the head
// of the list.
type mWaitList struct {
next muintptr // next m waiting for lock
startTicks int64 // when this m started waiting for the current lock holder, in cputicks
}
// lockVerifyMSize confirms that we can recreate the low bits of the M pointer.
func lockVerifyMSize() {
size := roundupsize(unsafe.Sizeof(mPadded{}), false) + gc.MallocHeaderSize
if size&mutexMMask != 0 {
print("M structure uses sizeclass ", size, "/", hex(size), " bytes; ",
"incompatible with mutex flag mask ", hex(mutexMMask), "\n")
throw("runtime.m memory alignment too small for spinbit mutex")
}
}
// mutexWaitListHead recovers a full muintptr that was missing its low bits.
// With the exception of the static m0 value, it requires allocating runtime.m
// values in a size class with a particular minimum alignment. The 2048-byte
// size class allows recovering the full muintptr value even after overwriting
// the low 11 bits with flags. We can use those 11 bits as 3 flags and an
// atomically-swapped byte.
//
//go:nosplit
func mutexWaitListHead(v uintptr) muintptr {
if highBits := v &^ mutexMMask; highBits == 0 {
return 0
} else if m0bits := muintptr(unsafe.Pointer(&m0)); highBits == uintptr(m0bits)&^mutexMMask {
return m0bits
} else {
return muintptr(highBits + mutexMOffset)
}
}
// mutexPreferLowLatency reports if this mutex prefers low latency at the risk
// of performance collapse. If so, we can allow all waiting threads to spin on
// the state word rather than go to sleep.
//
// TODO: We could have the waiting Ms each spin on their own private cache line,
// especially if we can put a bound on the on-CPU time that would consume.
//
// TODO: If there's a small set of mutex values with special requirements, they
// could make use of a more specialized lock2/unlock2 implementation. Otherwise,
// we're constrained to what we can fit within a single uintptr with no
// additional storage on the M for each lock held.
//
//go:nosplit
func mutexPreferLowLatency(l *mutex) bool {
switch l {
default:
return false
case &sched.lock:
// We often expect sched.lock to pass quickly between Ms in a way that
// each M has unique work to do: for instance when we stop-the-world
// (bringing each P to idle) or add new netpoller-triggered work to the
// global run queue.
return true
}
}
func mutexContended(l *mutex) bool {
return atomic.Loaduintptr(&l.key)&^mutexMMask != 0
}
func lock(l *mutex) {
lockWithRank(l, getLockRank(l))
}
func lock2(l *mutex) {
gp := getg()
if gp.m.locks < 0 {
throw("runtime·lock: lock count")
}
gp.m.locks++
k8 := key8(&l.key)
// Speculative grab for lock.
v8 := atomic.Xchg8(k8, mutexLocked)
if v8&mutexLocked == 0 {
if v8&mutexSleeping != 0 {
atomic.Or8(k8, mutexSleeping)
}
return
}
semacreate(gp.m)
var startTime int64
// On uniprocessors, no point spinning.
// On multiprocessors, spin for mutexActiveSpinCount attempts.
spin := 0
if numCPUStartup > 1 {
spin = mutexActiveSpinCount
}
var weSpin, atTail, haveTimers bool
v := atomic.Loaduintptr(&l.key)
tryAcquire:
for i := 0; ; i++ {
if v&mutexLocked == 0 {
if weSpin {
next := (v &^ mutexSpinning) | mutexSleeping | mutexLocked
if next&^mutexMMask == 0 {
// The fast-path Xchg8 may have cleared mutexSleeping. Fix
// the hint so unlock2 knows when to use its slow path.
next = next &^ mutexSleeping
}
if atomic.Casuintptr(&l.key, v, next) {
gp.m.mLockProfile.end(startTime)
return
}
} else {
prev8 := atomic.Xchg8(k8, mutexLocked|mutexSleeping)
if prev8&mutexLocked == 0 {
gp.m.mLockProfile.end(startTime)
return
}
}
v = atomic.Loaduintptr(&l.key)
continue tryAcquire
}
if !weSpin && v&mutexSpinning == 0 && atomic.Casuintptr(&l.key, v, v|mutexSpinning) {
v |= mutexSpinning
weSpin = true
}
if weSpin || atTail || mutexPreferLowLatency(l) {
if i < spin {
procyield(mutexActiveSpinSize)
v = atomic.Loaduintptr(&l.key)
continue tryAcquire
} else if i < spin+mutexPassiveSpinCount {
osyield() // TODO: Consider removing this step. See https://go.dev/issue/69268.
v = atomic.Loaduintptr(&l.key)
continue tryAcquire
}
}
// Go to sleep
if v&mutexLocked == 0 {
throw("runtime·lock: sleeping while lock is available")
}
// Collect times for mutex profile (seen in unlock2 only via mWaitList),
// and for "/sync/mutex/wait/total:seconds" metric (to match).
if !haveTimers {
gp.m.mWaitList.startTicks = cputicks()
startTime = gp.m.mLockProfile.start()
haveTimers = true
}
// Store the current head of the list of sleeping Ms in our gp.m.mWaitList.next field
gp.m.mWaitList.next = mutexWaitListHead(v)
// Pack a (partial) pointer to this M with the current lock state bits
next := (uintptr(unsafe.Pointer(gp.m)) &^ mutexMMask) | v&mutexMMask | mutexSleeping
if weSpin { // If we were spinning, prepare to retire
next = next &^ mutexSpinning
}
if atomic.Casuintptr(&l.key, v, next) {
weSpin = false
// We've pushed ourselves onto the stack of waiters. Wait.
semasleep(-1)
atTail = gp.m.mWaitList.next == 0 // we were at risk of starving
i = 0
}
gp.m.mWaitList.next = 0
v = atomic.Loaduintptr(&l.key)
}
}
func unlock(l *mutex) {
unlockWithRank(l)
}
// We might not be holding a p in this code.
//
//go:nowritebarrier
func unlock2(l *mutex) {
gp := getg()
var prev8 uint8
var haveStackLock bool
var endTicks int64
if !mutexSampleContention() {
// Not collecting a sample for the contention profile, do the quick release
prev8 = atomic.Xchg8(key8(&l.key), 0)
} else {
// If there's contention, we'll sample it. Don't allow another
// lock2/unlock2 pair to finish before us and take our blame. Prevent
// that by trading for the stack lock with a CAS.
v := atomic.Loaduintptr(&l.key)
for {
if v&^mutexMMask == 0 || v&mutexStackLocked != 0 {
// No contention, or (stack lock unavailable) no way to calculate it
prev8 = atomic.Xchg8(key8(&l.key), 0)
endTicks = 0
break
}
// There's contention, the stack lock appeared to be available, and
// we'd like to collect a sample for the contention profile.
if endTicks == 0 {
// Read the time before releasing the lock. The profile will be
// strictly smaller than what other threads would see by timing
// their lock calls.
endTicks = cputicks()
}
next := (v | mutexStackLocked) &^ (mutexLocked | mutexSleeping)
if atomic.Casuintptr(&l.key, v, next) {
haveStackLock = true
prev8 = uint8(v)
// The fast path of lock2 may have cleared mutexSleeping.
// Restore it so we're sure to call unlock2Wake below.
prev8 |= mutexSleeping
break
}
v = atomic.Loaduintptr(&l.key)
}
}
if prev8&mutexLocked == 0 {
throw("unlock of unlocked lock")
}
if prev8&mutexSleeping != 0 {
unlock2Wake(l, haveStackLock, endTicks)
}
gp.m.mLockProfile.store()
gp.m.locks--
if gp.m.locks < 0 {
throw("runtime·unlock: lock count")
}
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
gp.stackguard0 = stackPreempt
}
}
// mutexSampleContention returns whether the current mutex operation should
// report any contention it discovers.
func mutexSampleContention() bool {
rate := atomic.Load64(&mutexprofilerate)
return rate > 0 && cheaprandu64()%rate == 0
}
// unlock2Wake updates the list of Ms waiting on l, waking an M if necessary.
//
//go:nowritebarrier
func unlock2Wake(l *mutex, haveStackLock bool, endTicks int64) {
v := atomic.Loaduintptr(&l.key)
// On occasion, seek out and wake the M at the bottom of the stack so it
// doesn't starve.
antiStarve := cheaprandn(mutexTailWakePeriod) == 0
if haveStackLock {
goto useStackLock
}
if !(antiStarve || // avoiding starvation may require a wake
v&mutexSpinning == 0 || // no spinners means we must wake
mutexPreferLowLatency(l)) { // prefer waiters be awake as much as possible
return
}
for {
if v&^mutexMMask == 0 || v&mutexStackLocked != 0 {
// No waiting Ms means nothing to do.
//
// If the stack lock is unavailable, its owner would make the same
// wake decisions that we would, so there's nothing for us to do.
//
// Although: This thread may have a different call stack, which
// would result in a different entry in the mutex contention profile
// (upon completion of go.dev/issue/66999). That could lead to weird
// results if a slow critical section ends but another thread
// quickly takes the lock, finishes its own critical section,
// releases the lock, and then grabs the stack lock. That quick
// thread would then take credit (blame) for the delay that this
// slow thread caused. The alternative is to have more expensive
// atomic operations (a CAS) on the critical path of unlock2.
return
}
// Other M's are waiting for the lock.
// Obtain the stack lock, and pop off an M.
next := v | mutexStackLocked
if atomic.Casuintptr(&l.key, v, next) {
break
}
v = atomic.Loaduintptr(&l.key)
}
// We own the mutexStackLocked flag. New Ms may push themselves onto the
// stack concurrently, but we're now the only thread that can remove or
// modify the Ms that are sleeping in the list.
useStackLock:
if endTicks != 0 {
// Find the M at the bottom of the stack of waiters, which has been
// asleep for the longest. Take the average of its wait time and the
// head M's wait time for the mutex contention profile, matching the
// estimate we do in semrelease1 (for sync.Mutex contention).
//
// We don't keep track of the tail node (we don't need it often), so do
// an O(N) walk on the list of sleeping Ms to find it.
head := mutexWaitListHead(v).ptr()
for node, n := head, 0; ; {
n++
next := node.mWaitList.next.ptr()
if next == nil {
cycles := ((endTicks - head.mWaitList.startTicks) + (endTicks - node.mWaitList.startTicks)) / 2
node.mWaitList.startTicks = endTicks
head.mWaitList.startTicks = endTicks
getg().m.mLockProfile.recordUnlock(cycles * int64(n))
break
}
node = next
}
}
var committed *m // If we choose an M within the stack, we've made a promise to wake it
for {
headM := v &^ mutexMMask
flags := v & (mutexMMask &^ mutexStackLocked) // preserve low bits, but release stack lock
mp := mutexWaitListHead(v).ptr()
wakem := committed
if committed == nil {
if v&mutexSpinning == 0 || mutexPreferLowLatency(l) {
wakem = mp
}
if antiStarve {
// Wake the M at the bottom of the stack of waiters. (This is
// O(N) with the number of waiters.)
wakem = mp
prev := mp
for {
next := wakem.mWaitList.next.ptr()
if next == nil {
break
}
prev, wakem = wakem, next
}
if wakem != mp {
committed = wakem
prev.mWaitList.next = wakem.mWaitList.next
// An M sets its own startTicks when it first goes to sleep.
// When an unlock operation is sampled for the mutex
// contention profile, it takes blame for the entire list of
// waiting Ms but only updates the startTicks value at the
// tail. Copy any updates to the next-oldest M.
prev.mWaitList.startTicks = wakem.mWaitList.startTicks
}
}
}
if wakem == mp {
headM = uintptr(mp.mWaitList.next) &^ mutexMMask
}
next := headM | flags
if atomic.Casuintptr(&l.key, v, next) {
if wakem != nil {
// Claimed an M. Wake it.
semawakeup(wakem)
}
return
}
v = atomic.Loaduintptr(&l.key)
}
}
// Code generated by mklockrank.go; DO NOT EDIT.
package runtime
type lockRank int
// Constants representing the ranks of all non-leaf runtime locks, in rank order.
// Locks with lower rank must be taken before locks with higher rank,
// in addition to satisfying the partial order in lockPartialOrder.
// A few ranks allow self-cycles, which are specified in lockPartialOrder.
const (
lockRankUnknown lockRank = iota
lockRankSysmon
lockRankScavenge
lockRankForcegc
lockRankComputeMaxProcs
lockRankUpdateMaxProcsG
lockRankDefer
lockRankSweepWaiters
lockRankAssistQueue
lockRankStrongFromWeakQueue
lockRankCleanupQueue
lockRankSweep
lockRankTestR
lockRankTestW
lockRankVgetrandom
lockRankTimerSend
lockRankAllocmW
lockRankExecW
lockRankCpuprof
lockRankPollCache
lockRankPollDesc
lockRankWakeableSleep
lockRankHchan
// SCHED
lockRankAllocmR
lockRankExecR
lockRankSched
lockRankAllg
lockRankAllp
lockRankNotifyList
lockRankSudog
lockRankTimers
lockRankTimer
lockRankNetpollInit
lockRankRoot
lockRankItab
lockRankReflectOffs
lockRankTypelinks
lockRankSynctest
lockRankUserArenaState
// TRACEGLOBAL
lockRankTraceBuf
lockRankTraceStrings
// MALLOC
lockRankFin
lockRankSpanSetSpine
lockRankMspanSpecial
lockRankTraceTypeTab
// MPROF
lockRankGcBitsArenas
lockRankProfInsert
lockRankProfBlock
lockRankProfMemActive
lockRankProfMemFuture
// STACKGROW
lockRankGscan
lockRankStackpool
lockRankStackLarge
lockRankHchanLeaf
// WB
lockRankWbufSpans
lockRankXRegAlloc
lockRankSpanSPMCs
lockRankMheap
lockRankMheapSpecial
lockRankGlobalAlloc
// TRACE
lockRankTrace
lockRankTraceStackTab
lockRankPanic
lockRankDeadlock
lockRankRaceFini
lockRankAllocmRInternal
lockRankExecRInternal
lockRankTestRInternal
)
// lockRankLeafRank is the rank of lock that does not have a declared rank,
// and hence is a leaf lock.
const lockRankLeafRank lockRank = 1000
// lockNames gives the names associated with each of the above ranks.
var lockNames = []string{
lockRankSysmon: "sysmon",
lockRankScavenge: "scavenge",
lockRankForcegc: "forcegc",
lockRankComputeMaxProcs: "computeMaxProcs",
lockRankUpdateMaxProcsG: "updateMaxProcsG",
lockRankDefer: "defer",
lockRankSweepWaiters: "sweepWaiters",
lockRankAssistQueue: "assistQueue",
lockRankStrongFromWeakQueue: "strongFromWeakQueue",
lockRankCleanupQueue: "cleanupQueue",
lockRankSweep: "sweep",
lockRankTestR: "testR",
lockRankTestW: "testW",
lockRankVgetrandom: "vgetrandom",
lockRankTimerSend: "timerSend",
lockRankAllocmW: "allocmW",
lockRankExecW: "execW",
lockRankCpuprof: "cpuprof",
lockRankPollCache: "pollCache",
lockRankPollDesc: "pollDesc",
lockRankWakeableSleep: "wakeableSleep",
lockRankHchan: "hchan",
lockRankAllocmR: "allocmR",
lockRankExecR: "execR",
lockRankSched: "sched",
lockRankAllg: "allg",
lockRankAllp: "allp",
lockRankNotifyList: "notifyList",
lockRankSudog: "sudog",
lockRankTimers: "timers",
lockRankTimer: "timer",
lockRankNetpollInit: "netpollInit",
lockRankRoot: "root",
lockRankItab: "itab",
lockRankReflectOffs: "reflectOffs",
lockRankTypelinks: "typelinks",
lockRankSynctest: "synctest",
lockRankUserArenaState: "userArenaState",
lockRankTraceBuf: "traceBuf",
lockRankTraceStrings: "traceStrings",
lockRankFin: "fin",
lockRankSpanSetSpine: "spanSetSpine",
lockRankMspanSpecial: "mspanSpecial",
lockRankTraceTypeTab: "traceTypeTab",
lockRankGcBitsArenas: "gcBitsArenas",
lockRankProfInsert: "profInsert",
lockRankProfBlock: "profBlock",
lockRankProfMemActive: "profMemActive",
lockRankProfMemFuture: "profMemFuture",
lockRankGscan: "gscan",
lockRankStackpool: "stackpool",
lockRankStackLarge: "stackLarge",
lockRankHchanLeaf: "hchanLeaf",
lockRankWbufSpans: "wbufSpans",
lockRankXRegAlloc: "xRegAlloc",
lockRankSpanSPMCs: "spanSPMCs",
lockRankMheap: "mheap",
lockRankMheapSpecial: "mheapSpecial",
lockRankGlobalAlloc: "globalAlloc",
lockRankTrace: "trace",
lockRankTraceStackTab: "traceStackTab",
lockRankPanic: "panic",
lockRankDeadlock: "deadlock",
lockRankRaceFini: "raceFini",
lockRankAllocmRInternal: "allocmRInternal",
lockRankExecRInternal: "execRInternal",
lockRankTestRInternal: "testRInternal",
}
func (rank lockRank) String() string {
if rank == 0 {
return "UNKNOWN"
}
if rank == lockRankLeafRank {
return "LEAF"
}
if rank < 0 || int(rank) >= len(lockNames) {
return "BAD RANK"
}
return lockNames[rank]
}
// lockPartialOrder is the transitive closure of the lock rank graph.
// An entry for rank X lists all of the ranks that can already be held
// when rank X is acquired.
//
// Lock ranks that allow self-cycles list themselves.
var lockPartialOrder [][]lockRank = [][]lockRank{
lockRankSysmon: {},
lockRankScavenge: {lockRankSysmon},
lockRankForcegc: {lockRankSysmon},
lockRankComputeMaxProcs: {lockRankSysmon},
lockRankUpdateMaxProcsG: {lockRankSysmon},
lockRankDefer: {},
lockRankSweepWaiters: {},
lockRankAssistQueue: {},
lockRankStrongFromWeakQueue: {},
lockRankCleanupQueue: {},
lockRankSweep: {},
lockRankTestR: {},
lockRankTestW: {},
lockRankVgetrandom: {},
lockRankTimerSend: {},
lockRankAllocmW: {},
lockRankExecW: {},
lockRankCpuprof: {},
lockRankPollCache: {},
lockRankPollDesc: {},
lockRankWakeableSleep: {},
lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan},
lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan},
lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan},
lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR},
lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched},
lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched},
lockRankNotifyList: {},
lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList},
lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers},
lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers},
lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer},
lockRankRoot: {},
lockRankItab: {},
lockRankReflectOffs: {lockRankItab},
lockRankTypelinks: {},
lockRankSynctest: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankRoot, lockRankItab, lockRankReflectOffs},
lockRankUserArenaState: {},
lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},
lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf},
lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial},
lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive},
lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture},
lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf},
lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
lockRankXRegAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched},
lockRankSpanSPMCs: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans},
lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankXRegAlloc, lockRankSpanSPMCs, lockRankMheap, lockRankMheapSpecial},
lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTypelinks, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace},
lockRankPanic: {},
lockRankDeadlock: {lockRankPanic, lockRankDeadlock},
lockRankRaceFini: {lockRankPanic},
lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR},
lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR},
lockRankTestRInternal: {lockRankTestR, lockRankTestW},
}
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.staticlockranking
package runtime
const staticLockRanking = false
// // lockRankStruct is embedded in mutex, but is empty when staticklockranking is
// disabled (the default)
type lockRankStruct struct {
}
func lockInit(l *mutex, rank lockRank) {
}
func getLockRank(l *mutex) lockRank {
return 0
}
func lockWithRank(l *mutex, rank lockRank) {
lock2(l)
}
// This function may be called in nosplit context and thus must be nosplit.
//
//go:nosplit
func acquireLockRankAndM(rank lockRank) {
acquirem()
}
func unlockWithRank(l *mutex) {
unlock2(l)
}
// This function may be called in nosplit context and thus must be nosplit.
//
//go:nosplit
func releaseLockRankAndM(rank lockRank) {
releasem(getg().m)
}
// This function may be called in nosplit context and thus must be nosplit.
//
//go:nosplit
func lockWithRankMayAcquire(l *mutex, rank lockRank) {
}
//go:nosplit
func assertLockHeld(l *mutex) {
}
//go:nosplit
func assertRankHeld(r lockRank) {
}
//go:nosplit
func worldStopped() {
}
//go:nosplit
func worldStarted() {
}
//go:nosplit
func assertWorldStopped() {
}
//go:nosplit
func assertWorldStoppedOrLockHeld(l *mutex) {
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Memory allocator.
//
// This was originally based on tcmalloc, but has diverged quite a bit.
// http://goog-perftools.sourceforge.net/doc/tcmalloc.html
// The main allocator works in runs of pages.
// Small allocation sizes (up to and including 32 kB) are
// rounded to one of about 70 size classes, each of which
// has its own free set of objects of exactly that size.
// Any free page of memory can be split into a set of objects
// of one size class, which are then managed using a free bitmap.
//
// The allocator's data structures are:
//
// fixalloc: a free-list allocator for fixed-size off-heap objects,
// used to manage storage used by the allocator.
// mheap: the malloc heap, managed at page (8192-byte) granularity.
// mspan: a run of in-use pages managed by the mheap.
// mcentral: collects all spans of a given size class.
// mcache: a per-P cache of mspans with free space.
// mstats: allocation statistics.
//
// Allocating a small object proceeds up a hierarchy of caches:
//
// 1. Round the size up to one of the small size classes
// and look in the corresponding mspan in this P's mcache.
// Scan the mspan's free bitmap to find a free slot.
// If there is a free slot, allocate it.
// This can all be done without acquiring a lock.
//
// 2. If the mspan has no free slots, obtain a new mspan
// from the mcentral's list of mspans of the required size
// class that have free space.
// Obtaining a whole span amortizes the cost of locking
// the mcentral.
//
// 3. If the mcentral's mspan list is empty, obtain a run
// of pages from the mheap to use for the mspan.
//
// 4. If the mheap is empty or has no page runs large enough,
// allocate a new group of pages (at least 1MB) from the
// operating system. Allocating a large run of pages
// amortizes the cost of talking to the operating system.
//
// Sweeping an mspan and freeing objects on it proceeds up a similar
// hierarchy:
//
// 1. If the mspan is being swept in response to allocation, it
// is returned to the mcache to satisfy the allocation.
//
// 2. Otherwise, if the mspan still has allocated objects in it,
// it is placed on the mcentral free list for the mspan's size
// class.
//
// 3. Otherwise, if all objects in the mspan are free, the mspan's
// pages are returned to the mheap and the mspan is now dead.
//
// Allocating and freeing a large object uses the mheap
// directly, bypassing the mcache and mcentral.
//
// If mspan.needzero is false, then free object slots in the mspan are
// already zeroed. Otherwise if needzero is true, objects are zeroed as
// they are allocated. There are various benefits to delaying zeroing
// this way:
//
// 1. Stack frame allocation can avoid zeroing altogether.
//
// 2. It exhibits better temporal locality, since the program is
// probably about to write to the memory.
//
// 3. We don't zero pages that never get reused.
// Virtual memory layout
//
// The heap consists of a set of arenas, which are 64MB on 64-bit and
// 4MB on 32-bit (heapArenaBytes). Each arena's start address is also
// aligned to the arena size.
//
// Each arena has an associated heapArena object that stores the
// metadata for that arena: the heap bitmap for all words in the arena
// and the span map for all pages in the arena. heapArena objects are
// themselves allocated off-heap.
//
// Since arenas are aligned, the address space can be viewed as a
// series of arena frames. The arena map (mheap_.arenas) maps from
// arena frame number to *heapArena, or nil for parts of the address
// space not backed by the Go heap. The arena map is structured as a
// two-level array consisting of a "L1" arena map and many "L2" arena
// maps; however, since arenas are large, on many architectures, the
// arena map consists of a single, large L2 map.
//
// The arena map covers the entire possible address space, allowing
// the Go heap to use any part of the address space. The allocator
// attempts to keep arenas contiguous so that large spans (and hence
// large objects) can cross arenas.
package runtime
import (
"internal/goarch"
"internal/goexperiment"
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/gc"
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
)
const (
maxTinySize = _TinySize
tinySizeClass = _TinySizeClass
maxSmallSize = gc.MaxSmallSize
pageSize = 1 << gc.PageShift
pageMask = pageSize - 1
// Unused. Left for viewcore.
_PageSize = pageSize
minSizeForMallocHeader = gc.MinSizeForMallocHeader
mallocHeaderSize = gc.MallocHeaderSize
// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
_64bit = 1 << (^uintptr(0) >> 63) / 2
// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
_TinySize = gc.TinySize
_TinySizeClass = int8(gc.TinySizeClass)
_FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
// Per-P, per order stack segment cache size.
_StackCacheSize = 32 * 1024
// Number of orders that get caching. Order 0 is FixedStack
// and each successive order is twice as large.
// We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
// will be allocated directly.
// Since FixedStack is different on different systems, we
// must vary NumStackOrders to keep the same maximum cached size.
// OS | FixedStack | NumStackOrders
// -----------------+------------+---------------
// linux/darwin/bsd | 2KB | 4
// windows/32 | 4KB | 3
// windows/64 | 8KB | 2
// plan9 | 4KB | 3
_NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
// heapAddrBits is the number of bits in a heap address. On
// amd64, addresses are sign-extended beyond heapAddrBits. On
// other arches, they are zero-extended.
//
// On most 64-bit platforms, we limit this to 48 bits based on a
// combination of hardware and OS limitations.
//
// amd64 hardware limits addresses to 48 bits, sign-extended
// to 64 bits. Addresses where the top 16 bits are not either
// all 0 or all 1 are "non-canonical" and invalid. Because of
// these "negative" addresses, we offset addresses by 1<<47
// (arenaBaseOffset) on amd64 before computing indexes into
// the heap arenas index. In 2017, amd64 hardware added
// support for 57 bit addresses; however, currently only Linux
// supports this extension and the kernel will never choose an
// address above 1<<47 unless mmap is called with a hint
// address above 1<<47 (which we never do).
//
// arm64 hardware (as of ARMv8) limits user addresses to 48
// bits, in the range [0, 1<<48).
//
// ppc64, mips64, and s390x support arbitrary 64 bit addresses
// in hardware. On Linux, Go leans on stricter OS limits. Based
// on Linux's processor.h, the user address space is limited as
// follows on 64-bit architectures:
//
// Architecture Name Maximum Value (exclusive)
// ---------------------------------------------------------------------
// amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses)
// arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses)
// ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses)
// mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses)
// s390x TASK_SIZE 1<<64 (64 bit addresses)
//
// These limits may increase over time, but are currently at
// most 48 bits except on s390x. On all architectures, Linux
// starts placing mmap'd regions at addresses that are
// significantly below 48 bits, so even if it's possible to
// exceed Go's 48 bit limit, it's extremely unlikely in
// practice.
//
// On 32-bit platforms, we accept the full 32-bit address
// space because doing so is cheap.
// mips32 only has access to the low 2GB of virtual memory, so
// we further limit it to 31 bits.
//
// On ios/arm64, although 64-bit pointers are presumably
// available, pointers are truncated to 33 bits in iOS <14.
// Furthermore, only the top 4 GiB of the address space are
// actually available to the application. In iOS >=14, more
// of the address space is available, and the OS can now
// provide addresses outside of those 33 bits. Pick 40 bits
// as a reasonable balance between address space usage by the
// page allocator, and flexibility for what mmap'd regions
// we'll accept for the heap. We can't just move to the full
// 48 bits because this uses too much address space for older
// iOS versions.
// TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64
// to a 48-bit address space like every other arm64 platform.
//
// WebAssembly currently has a limit of 4GB linear memory.
heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64
// maxAlloc is the maximum size of an allocation. On 64-bit,
// it's theoretically possible to allocate 1<<heapAddrBits bytes. On
// 32-bit, however, this is one less than 1<<32 because the
// number of bytes in the address space doesn't actually fit
// in a uintptr.
maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
// The number of bits in a heap address, the size of heap
// arenas, and the L1 and L2 arena map sizes are related by
//
// (1 << addr bits) = arena size * L1 entries * L2 entries
//
// Currently, we balance these as follows:
//
// Platform Addr bits Arena size L1 entries L2 entries
// -------------- --------- ---------- ---------- -----------
// */64-bit 48 64MB 1 4M (32MB)
// windows/64-bit 48 4MB 64 1M (8MB)
// ios/arm64 40 4MB 1 256K (2MB)
// */32-bit 32 4MB 1 1024 (4KB)
// */mips(le) 31 4MB 1 512 (2KB)
// wasm 32 512KB 1 8192 (64KB)
// heapArenaBytes is the size of a heap arena. The heap
// consists of mappings of size heapArenaBytes, aligned to
// heapArenaBytes. The initial heap mapping is one arena.
//
// This is currently 64MB on 64-bit non-Windows, 4MB on
// 32-bit and on Windows, and 512KB on Wasm. We use smaller
// arenas on Windows because all committed memory is charged
// to the process, even if it's not touched. Hence, for
// processes with small heaps, the mapped arena space needs
// to be commensurate. This is particularly important with
// the race detector, since it significantly amplifies the
// cost of committed memory. We use smaller arenas on Wasm
// because some Wasm programs have very small heap, and
// everything in the Wasm linear memory is charged.
heapArenaBytes = 1 << logHeapArenaBytes
heapArenaWords = heapArenaBytes / goarch.PtrSize
// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
// prefer using heapArenaBytes where possible (we need the
// constant to compute some other constants).
logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (9+10)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
// heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs.
heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize)
pagesPerArena = heapArenaBytes / pageSize
// arenaL1Bits is the number of bits of the arena number
// covered by the first level arena map.
//
// This number should be small, since the first level arena
// map requires PtrSize*(1<<arenaL1Bits) of space in the
// binary's BSS. It can be zero, in which case the first level
// index is effectively unused. There is a performance benefit
// to this, since the generated code can be more efficient,
// but comes at the cost of having a large L2 mapping.
//
// We use the L1 map on 64-bit Windows because the arena size
// is small, but the address space is still 48 bits, and
// there's a high cost to having a large L2.
arenaL1Bits = 6 * (_64bit * goos.IsWindows)
// arenaL2Bits is the number of bits of the arena number
// covered by the second level arena index.
//
// The size of each arena map allocation is proportional to
// 1<<arenaL2Bits, so it's important that this not be too
// large. 48 bits leads to 32MB arena index allocations, which
// is about the practical threshold.
arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
// arenaL1Shift is the number of bits to shift an arena frame
// number by to compute an index into the first level arena map.
arenaL1Shift = arenaL2Bits
// arenaBits is the total bits in a combined arena map index.
// This is split between the index into the L1 arena map and
// the L2 arena map.
arenaBits = arenaL1Bits + arenaL2Bits
// arenaBaseOffset is the pointer value that corresponds to
// index 0 in the heap arena map.
//
// On amd64, the address space is 48 bits, sign extended to 64
// bits. This offset lets us handle "negative" addresses (or
// high addresses if viewed as unsigned).
//
// On aix/ppc64, this offset allows to keep the heapAddrBits to
// 48. Otherwise, it would be 60 in order to handle mmap addresses
// (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
// case, the memory reserved in (s *pageAlloc).init for chunks
// is causing important slowdowns.
//
// On other platforms, the user address space is contiguous
// and starts at 0, so no offset is necessary.
arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
// A typed version of this constant that will make it into DWARF (for viewcore).
arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
// Max number of threads to run garbage collection.
// 2, 3, and 4 are all plausible maximums depending
// on the hardware details of the machine. The garbage
// collector scales well to 32 cpus.
_MaxGcproc = 32
// minLegalPointer is the smallest possible legal pointer.
// This is the smallest possible architectural page size,
// since we assume that the first page is never mapped.
//
// This should agree with minZeroPage in the compiler.
minLegalPointer uintptr = 4096
// minHeapForMetadataHugePages sets a threshold on when certain kinds of
// heap metadata, currently the arenas map L2 entries and page alloc bitmap
// mappings, are allowed to be backed by huge pages. If the heap goal ever
// exceeds this threshold, then huge pages are enabled.
//
// These numbers are chosen with the assumption that huge pages are on the
// order of a few MiB in size.
//
// The kind of metadata this applies to has a very low overhead when compared
// to address space used, but their constant overheads for small heaps would
// be very high if they were to be backed by huge pages (e.g. a few MiB makes
// a huge difference for an 8 MiB heap, but barely any difference for a 1 GiB
// heap). The benefit of huge pages is also not worth it for small heaps,
// because only a very, very small part of the metadata is used for small heaps.
//
// N.B. If the heap goal exceeds the threshold then shrinks to a very small size
// again, then huge pages will still be enabled for this mapping. The reason is that
// there's no point unless we're also returning the physical memory for these
// metadata mappings back to the OS. That would be quite complex to do in general
// as the heap is likely fragmented after a reduction in heap size.
minHeapForMetadataHugePages = 1 << 30
// randomizeHeapBase indicates if the heap base address should be randomized.
// See comment in mallocinit for how the randomization is performed.
randomizeHeapBase = goexperiment.RandomizedHeapBase64 && goarch.PtrSize == 8 && !isSbrkPlatform && !raceenabled && !msanenabled && !asanenabled
// randHeapBasePrefixMask is used to extract the top byte of the randomized
// heap base address.
randHeapBasePrefixMask = ^uintptr(0xff << (heapAddrBits - 8))
)
// physPageSize is the size in bytes of the OS's physical pages.
// Mapping and unmapping operations must be done at multiples of
// physPageSize.
//
// This must be set by the OS init code (typically in osinit) before
// mallocinit.
var physPageSize uintptr
// physHugePageSize is the size in bytes of the OS's default physical huge
// page size whose allocation is opaque to the application. It is assumed
// and verified to be a power of two.
//
// If set, this must be set by the OS init code (typically in osinit) before
// mallocinit. However, setting it at all is optional, and leaving the default
// value is always safe (though potentially less efficient).
//
// Since physHugePageSize is always assumed to be a power of two,
// physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
// The purpose of physHugePageShift is to avoid doing divisions in
// performance critical functions.
var (
physHugePageSize uintptr
physHugePageShift uint
)
var (
// heapRandSeed is a random value that is populated in mallocinit if
// randomizeHeapBase is set. It is used in mallocinit, and mheap.grow, to
// randomize the base heap address.
heapRandSeed uintptr
heapRandSeedBitsRemaining int
)
func nextHeapRandBits(bits int) uintptr {
if bits > heapRandSeedBitsRemaining {
throw("not enough heapRandSeed bits remaining")
}
r := heapRandSeed >> (64 - bits)
heapRandSeed <<= bits
heapRandSeedBitsRemaining -= bits
return r
}
func mallocinit() {
if gc.SizeClassToSize[tinySizeClass] != maxTinySize {
throw("bad TinySizeClass")
}
if heapArenaBitmapWords&(heapArenaBitmapWords-1) != 0 {
// heapBits expects modular arithmetic on bitmap
// addresses to work.
throw("heapArenaBitmapWords not a power of 2")
}
// Check physPageSize.
if physPageSize == 0 {
// The OS init code failed to fetch the physical page size.
throw("failed to get system page size")
}
if physPageSize > maxPhysPageSize {
print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
throw("bad system page size")
}
if physPageSize < minPhysPageSize {
print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
throw("bad system page size")
}
if physPageSize&(physPageSize-1) != 0 {
print("system page size (", physPageSize, ") must be a power of 2\n")
throw("bad system page size")
}
if physHugePageSize&(physHugePageSize-1) != 0 {
print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
throw("bad system huge page size")
}
if physHugePageSize > maxPhysHugePageSize {
// physHugePageSize is greater than the maximum supported huge page size.
// Don't throw here, like in the other cases, since a system configured
// in this way isn't wrong, we just don't have the code to support them.
// Instead, silently set the huge page size to zero.
physHugePageSize = 0
}
if physHugePageSize != 0 {
// Since physHugePageSize is a power of 2, it suffices to increase
// physHugePageShift until 1<<physHugePageShift == physHugePageSize.
for 1<<physHugePageShift != physHugePageSize {
physHugePageShift++
}
}
if pagesPerArena%pagesPerSpanRoot != 0 {
print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n")
throw("bad pagesPerSpanRoot")
}
if pagesPerArena%pagesPerReclaimerChunk != 0 {
print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
throw("bad pagesPerReclaimerChunk")
}
// Check that the minimum size (exclusive) for a malloc header is also
// a size class boundary. This is important to making sure checks align
// across different parts of the runtime.
//
// While we're here, also check to make sure all these size classes'
// span sizes are one page. Some code relies on this.
minSizeForMallocHeaderIsSizeClass := false
sizeClassesUpToMinSizeForMallocHeaderAreOnePage := true
for i := 0; i < len(gc.SizeClassToSize); i++ {
if gc.SizeClassToNPages[i] > 1 {
sizeClassesUpToMinSizeForMallocHeaderAreOnePage = false
}
if gc.MinSizeForMallocHeader == uintptr(gc.SizeClassToSize[i]) {
minSizeForMallocHeaderIsSizeClass = true
break
}
}
if !minSizeForMallocHeaderIsSizeClass {
throw("min size of malloc header is not a size class boundary")
}
if !sizeClassesUpToMinSizeForMallocHeaderAreOnePage {
throw("expected all size classes up to min size for malloc header to fit in one-page spans")
}
// Check that the pointer bitmap for all small sizes without a malloc header
// fits in a word.
if gc.MinSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize {
throw("max pointer/scan bitmap size for headerless objects is too large")
}
if minTagBits > tagBits {
throw("tagBits too small")
}
// Initialize the heap.
mheap_.init()
mcache0 = allocmcache()
lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas)
lockInit(&profInsertLock, lockRankProfInsert)
lockInit(&profBlockLock, lockRankProfBlock)
lockInit(&profMemActiveLock, lockRankProfMemActive)
for i := range profMemFutureLock {
lockInit(&profMemFutureLock[i], lockRankProfMemFuture)
}
lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
// Create initial arena growth hints.
if isSbrkPlatform {
// Don't generate hints on sbrk platforms. We can
// only grow the break sequentially.
} else if goarch.PtrSize == 8 {
// On a 64-bit machine, we pick the following hints
// because:
//
// 1. Starting from the middle of the address space
// makes it easier to grow out a contiguous range
// without running in to some other mapping.
//
// 2. This makes Go heap addresses more easily
// recognizable when debugging.
//
// 3. Stack scanning in gccgo is still conservative,
// so it's important that addresses be distinguishable
// from other data.
//
// Starting at 0x00c0 means that the valid memory addresses
// will begin 0x00c0, 0x00c1, ...
// In little-endian, that's c0 00, c1 00, ... None of those are valid
// UTF-8 sequences, and they are otherwise as far away from
// ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
// addresses. An earlier attempt to use 0x11f8 caused out of memory errors
// on OS X during thread allocations. 0x00c0 causes conflicts with
// AddressSanitizer which reserves all memory up to 0x0100.
// These choices reduce the odds of a conservative garbage collector
// not collecting memory because some non-pointer block of memory
// had a bit pattern that matched a memory address.
//
// However, on arm64, we ignore all this advice above and slam the
// allocation at 0x40 << 32 because when using 4k pages with 3-level
// translation buffers, the user address space is limited to 39 bits
// On ios/arm64, the address space is even smaller.
//
// On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
// processes.
//
// Space mapped for user arenas comes immediately after the range
// originally reserved for the regular heap when race mode is not
// enabled because user arena chunks can never be used for regular heap
// allocations and we want to avoid fragmenting the address space.
//
// In race mode we have no choice but to just use the same hints because
// the race detector requires that the heap be mapped contiguously.
//
// If randomizeHeapBase is set, we attempt to randomize the base address
// as much as possible. We do this by generating a random uint64 via
// bootstrapRand and using it's bits to randomize portions of the base
// address as follows:
// * We first generate a random heapArenaBytes aligned address that we use for
// generating the hints.
// * On the first call to mheap.grow, we then generate a random PallocChunkBytes
// aligned offset into the mmap'd heap region, which we use as the base for
// the heap region.
// * We then select a page offset in that PallocChunkBytes region to start the
// heap at, and mark all the pages up to that offset as allocated.
//
// Our final randomized "heap base address" becomes the first byte of
// the first available page returned by the page allocator. This results
// in an address with at least heapAddrBits-gc.PageShift-2-(1*goarch.IsAmd64)
// bits of entropy.
var randHeapBase uintptr
var randHeapBasePrefix byte
// heapAddrBits is 48 on most platforms, but we only use 47 of those
// bits in order to provide a good amount of room for the heap to grow
// contiguously. On amd64, there are 48 bits, but the top bit is sign
// extended, so we throw away another bit, just to be safe.
randHeapAddrBits := heapAddrBits - 1 - (goarch.IsAmd64 * 1)
if randomizeHeapBase {
// Generate a random value, and take the bottom heapAddrBits-logHeapArenaBytes
// bits, using them as the top bits for randHeapBase.
heapRandSeed, heapRandSeedBitsRemaining = uintptr(bootstrapRand()), 64
topBits := (randHeapAddrBits - logHeapArenaBytes)
randHeapBase = nextHeapRandBits(topBits) << (randHeapAddrBits - topBits)
randHeapBase = alignUp(randHeapBase, heapArenaBytes)
randHeapBasePrefix = byte(randHeapBase >> (randHeapAddrBits - 8))
}
var vmaSize int
if GOARCH == "riscv64" {
// Identify which memory layout is in use based on the system
// stack address, knowing that the bottom half of virtual memory
// is user space. This should result in 39, 48 or 57. It may be
// possible to use RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS at some
// point in the future - for now use the system stack address.
vmaSize = sys.Len64(uint64(getg().m.g0.stack.hi)) + 1
if raceenabled && vmaSize != 39 && vmaSize != 48 {
println("vma size = ", vmaSize)
throw("riscv64 vma size is unknown and race mode is enabled")
}
}
for i := 0x7f; i >= 0; i-- {
var p uintptr
switch {
case raceenabled && GOARCH == "riscv64" && vmaSize == 39:
p = uintptr(i)<<28 | uintptrMask&(0x0013<<28)
if p >= uintptrMask&0x000f00000000 {
continue
}
case raceenabled:
// The TSAN runtime requires the heap
// to be in the range [0x00c000000000,
// 0x00e000000000).
p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
if p >= uintptrMask&0x00e000000000 {
continue
}
case randomizeHeapBase:
prefix := uintptr(randHeapBasePrefix+byte(i)) << (randHeapAddrBits - 8)
p = prefix | (randHeapBase & randHeapBasePrefixMask)
case GOARCH == "arm64" && GOOS == "ios":
p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
case GOARCH == "arm64":
p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
case GOARCH == "riscv64" && vmaSize == 39:
p = uintptr(i)<<32 | uintptrMask&(0x0013<<28)
case GOOS == "aix":
if i == 0 {
// We don't use addresses directly after 0x0A00000000000000
// to avoid collisions with others mmaps done by non-go programs.
continue
}
p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
default:
p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
}
// Switch to generating hints for user arenas if we've gone
// through about half the hints. In race mode, take only about
// a quarter; we don't have very much space to work with.
hintList := &mheap_.arenaHints
if (!raceenabled && i > 0x3f) || (raceenabled && i > 0x5f) {
hintList = &mheap_.userArena.arenaHints
}
hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
hint.addr = p
hint.next, *hintList = *hintList, hint
}
} else {
// On a 32-bit machine, we're much more concerned
// about keeping the usable heap contiguous.
// Hence:
//
// 1. We reserve space for all heapArenas up front so
// they don't get interleaved with the heap. They're
// ~258MB, so this isn't too bad. (We could reserve a
// smaller amount of space up front if this is a
// problem.)
//
// 2. We hint the heap to start right above the end of
// the binary so we have the best chance of keeping it
// contiguous.
//
// 3. We try to stake out a reasonably large initial
// heap reservation.
const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
meta := uintptr(sysReserve(nil, arenaMetaSize, "heap reservation"))
if meta != 0 {
mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
}
// We want to start the arena low, but if we're linked
// against C code, it's possible global constructors
// have called malloc and adjusted the process' brk.
// Query the brk so we can avoid trying to map the
// region over it (which will cause the kernel to put
// the region somewhere else, likely at a high
// address).
procBrk := sbrk0()
// If we ask for the end of the data segment but the
// operating system requires a little more space
// before we can start allocating, it will give out a
// slightly higher pointer. Except QEMU, which is
// buggy, as usual: it won't adjust the pointer
// upward. So adjust it upward a little bit ourselves:
// 1/4 MB to get away from the running binary image.
p := firstmoduledata.end
if p < procBrk {
p = procBrk
}
if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
p = mheap_.heapArenaAlloc.end
}
p = alignUp(p+(256<<10), heapArenaBytes)
// Because we're worried about fragmentation on
// 32-bit, we try to make a large initial reservation.
arenaSizes := []uintptr{
512 << 20,
256 << 20,
128 << 20,
}
for _, arenaSize := range arenaSizes {
a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes, "heap reservation")
if a != nil {
mheap_.arena.init(uintptr(a), size, false)
p = mheap_.arena.end // For hint below
break
}
}
hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
hint.addr = p
hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
// Place the hint for user arenas just after the large reservation.
//
// While this potentially competes with the hint above, in practice we probably
// aren't going to be getting this far anyway on 32-bit platforms.
userArenaHint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
userArenaHint.addr = p
userArenaHint.next, mheap_.userArena.arenaHints = mheap_.userArena.arenaHints, userArenaHint
}
// Initialize the memory limit here because the allocator is going to look at it
// but we haven't called gcinit yet and we're definitely going to allocate memory before then.
gcController.memoryLimit.Store(math.MaxInt64)
}
// sysAlloc allocates heap arena space for at least n bytes. The
// returned pointer is always heapArenaBytes-aligned and backed by
// h.arenas metadata. The returned size is always a multiple of
// heapArenaBytes. sysAlloc returns nil on failure.
// There is no corresponding free function.
//
// hintList is a list of hint addresses for where to allocate new
// heap arenas. It must be non-nil.
//
// sysAlloc returns a memory region in the Reserved state. This region must
// be transitioned to Prepared and then Ready before use.
//
// arenaList is the list the arena should be added to.
//
// h must be locked.
func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx) (v unsafe.Pointer, size uintptr) {
assertLockHeld(&h.lock)
n = alignUp(n, heapArenaBytes)
if hintList == &h.arenaHints {
// First, try the arena pre-reservation.
// Newly-used mappings are considered released.
//
// Only do this if we're using the regular heap arena hints.
// This behavior is only for the heap.
v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased, "heap")
if v != nil {
size = n
goto mapped
}
}
// Try to grow the heap at a hint address.
for *hintList != nil {
hint := *hintList
p := hint.addr
if hint.down {
p -= n
}
if p+n < p {
// We can't use this, so don't ask.
v = nil
} else if arenaIndex(p+n-1) >= 1<<arenaBits {
// Outside addressable heap. Can't use.
v = nil
} else {
v = sysReserve(unsafe.Pointer(p), n, "heap reservation")
}
if p == uintptr(v) {
// Success. Update the hint.
if !hint.down {
p += n
}
hint.addr = p
size = n
break
}
// Failed. Discard this hint and try the next.
//
// TODO: This would be cleaner if sysReserve could be
// told to only return the requested address. In
// particular, this is already how Windows behaves, so
// it would simplify things there.
if v != nil {
sysFreeOS(v, n)
}
*hintList = hint.next
h.arenaHintAlloc.free(unsafe.Pointer(hint))
}
if size == 0 {
if raceenabled {
// The race detector assumes the heap lives in
// [0x00c000000000, 0x00e000000000), but we
// just ran out of hints in this region. Give
// a nice failure.
throw("too many address space collisions for -race mode")
}
// All of the hints failed, so we'll take any
// (sufficiently aligned) address the kernel will give
// us.
v, size = sysReserveAligned(nil, n, heapArenaBytes, "heap")
if v == nil {
return nil, 0
}
// Create new hints for extending this region.
hint := (*arenaHint)(h.arenaHintAlloc.alloc())
hint.addr, hint.down = uintptr(v), true
hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
hint = (*arenaHint)(h.arenaHintAlloc.alloc())
hint.addr = uintptr(v) + size
hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
}
// Check for bad pointers or pointers we can't use.
{
var bad string
p := uintptr(v)
if p+size < p {
bad = "region exceeds uintptr range"
} else if arenaIndex(p) >= 1<<arenaBits {
bad = "base outside usable address space"
} else if arenaIndex(p+size-1) >= 1<<arenaBits {
bad = "end outside usable address space"
}
if bad != "" {
// This should be impossible on most architectures,
// but it would be really confusing to debug.
print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
throw("memory reservation exceeds address space limit")
}
}
if uintptr(v)&(heapArenaBytes-1) != 0 {
throw("misrounded allocation in sysAlloc")
}
mapped:
if valgrindenabled {
valgrindCreateMempool(v)
valgrindMakeMemNoAccess(v, size)
}
// Create arena metadata.
for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
l2 := h.arenas[ri.l1()]
if l2 == nil {
// Allocate an L2 arena map.
//
// Use sysAllocOS instead of sysAlloc or persistentalloc because there's no
// statistic we can comfortably account for this space in. With this structure,
// we rely on demand paging to avoid large overheads, but tracking which memory
// is paged in is too expensive. Trying to account for the whole region means
// that it will appear like an enormous memory overhead in statistics, even though
// it is not.
l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2), "heap index"))
if l2 == nil {
throw("out of memory allocating heap arena map")
}
if h.arenasHugePages {
sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
} else {
sysNoHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
}
atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2))
}
if l2[ri.l2()] != nil {
throw("arena already initialized")
}
var r *heapArena
r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys, "heap metadata"))
if r == nil {
r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
if r == nil {
throw("out of memory allocating heap arena metadata")
}
}
// Register the arena in allArenas if requested.
if len((*arenaList)) == cap((*arenaList)) {
size := 2 * uintptr(cap((*arenaList))) * goarch.PtrSize
if size == 0 {
size = physPageSize
}
newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
if newArray == nil {
throw("out of memory allocating allArenas")
}
oldSlice := (*arenaList)
*(*notInHeapSlice)(unsafe.Pointer(&(*arenaList))) = notInHeapSlice{newArray, len((*arenaList)), int(size / goarch.PtrSize)}
copy((*arenaList), oldSlice)
// Do not free the old backing array because
// there may be concurrent readers. Since we
// double the array each time, this can lead
// to at most 2x waste.
}
(*arenaList) = (*arenaList)[:len((*arenaList))+1]
(*arenaList)[len((*arenaList))-1] = ri
// Store atomically just in case an object from the
// new heap arena becomes visible before the heap lock
// is released (which shouldn't happen, but there's
// little downside to this).
atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r))
}
// Tell the race detector about the new heap memory.
if raceenabled {
racemapshadow(v, size)
}
return
}
// sysReserveAligned is like sysReserve, but the returned pointer is
// aligned to align bytes. It may reserve either n or n+align bytes,
// so it returns the size that was reserved.
func sysReserveAligned(v unsafe.Pointer, size, align uintptr, vmaName string) (unsafe.Pointer, uintptr) {
if isSbrkPlatform {
if v != nil {
throw("unexpected heap arena hint on sbrk platform")
}
return sysReserveAlignedSbrk(size, align)
}
// Since the alignment is rather large in uses of this
// function, we're not likely to get it by chance, so we ask
// for a larger region and remove the parts we don't need.
retries := 0
retry:
p := uintptr(sysReserve(v, size+align, vmaName))
switch {
case p == 0:
return nil, 0
case p&(align-1) == 0:
return unsafe.Pointer(p), size + align
case GOOS == "windows":
// On Windows we can't release pieces of a
// reservation, so we release the whole thing and
// re-reserve the aligned sub-region. This may race,
// so we may have to try again.
sysFreeOS(unsafe.Pointer(p), size+align)
p = alignUp(p, align)
p2 := sysReserve(unsafe.Pointer(p), size, vmaName)
if p != uintptr(p2) {
// Must have raced. Try again.
sysFreeOS(p2, size)
if retries++; retries == 100 {
throw("failed to allocate aligned heap memory; too many retries")
}
goto retry
}
// Success.
return p2, size
default:
// Trim off the unaligned parts.
pAligned := alignUp(p, align)
sysFreeOS(unsafe.Pointer(p), pAligned-p)
end := pAligned + size
endLen := (p + size + align) - end
if endLen > 0 {
sysFreeOS(unsafe.Pointer(end), endLen)
}
return unsafe.Pointer(pAligned), size
}
}
// enableMetadataHugePages enables huge pages for various sources of heap metadata.
//
// A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant
// time, but may take time proportional to the size of the mapped heap beyond that.
//
// This function is idempotent.
//
// The heap lock must not be held over this operation, since it will briefly acquire
// the heap lock.
//
// Must be called on the system stack because it acquires the heap lock.
//
//go:systemstack
func (h *mheap) enableMetadataHugePages() {
// Enable huge pages for page structure.
h.pages.enableChunkHugePages()
// Grab the lock and set arenasHugePages if it's not.
//
// Once arenasHugePages is set, all new L2 entries will be eligible for
// huge pages. We'll set all the old entries after we release the lock.
lock(&h.lock)
if h.arenasHugePages {
unlock(&h.lock)
return
}
h.arenasHugePages = true
unlock(&h.lock)
// N.B. The arenas L1 map is quite small on all platforms, so it's fine to
// just iterate over the whole thing.
for i := range h.arenas {
l2 := (*[1 << arenaL2Bits]*heapArena)(atomic.Loadp(unsafe.Pointer(&h.arenas[i])))
if l2 == nil {
continue
}
sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
}
}
// base address for all 0-byte allocations
var zerobase uintptr
// nextFreeFast returns the next free object if one is quickly available.
// Otherwise it returns 0.
func nextFreeFast(s *mspan) gclinkptr {
theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache?
if theBit < 64 {
result := s.freeindex + uint16(theBit)
if result < s.nelems {
freeidx := result + 1
if freeidx%64 == 0 && freeidx != s.nelems {
return 0
}
s.allocCache >>= uint(theBit + 1)
s.freeindex = freeidx
s.allocCount++
return gclinkptr(uintptr(result)*s.elemsize + s.base())
}
}
return 0
}
// nextFree returns the next free object from the cached span if one is available.
// Otherwise it refills the cache with a span with an available object and
// returns that object along with a flag indicating that this was a heavy
// weight allocation. If it is a heavy weight allocation the caller must
// determine whether a new GC cycle needs to be started or if the GC is active
// whether this goroutine needs to assist the GC.
//
// Must run in a non-preemptible context since otherwise the owner of
// c could change.
func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger bool) {
s = c.alloc[spc]
checkGCTrigger = false
freeIndex := s.nextFreeIndex()
if freeIndex == s.nelems {
// The span is full.
if s.allocCount != s.nelems {
println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
throw("s.allocCount != s.nelems && freeIndex == s.nelems")
}
c.refill(spc)
checkGCTrigger = true
s = c.alloc[spc]
freeIndex = s.nextFreeIndex()
}
if freeIndex >= s.nelems {
throw("freeIndex is not valid")
}
v = gclinkptr(uintptr(freeIndex)*s.elemsize + s.base())
s.allocCount++
if s.allocCount > s.nelems {
println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
throw("s.allocCount > s.nelems")
}
return
}
// doubleCheckMalloc enables a bunch of extra checks to malloc to double-check
// that various invariants are upheld.
//
// We might consider turning these on by default; many of them previously were.
// They account for a few % of mallocgc's cost though, which does matter somewhat
// at scale. (When testing changes to malloc, consider enabling this, and also
// some function-local 'doubleCheck' consts such as in mbitmap.go currently.)
const doubleCheckMalloc = false
// sizeSpecializedMallocEnabled is the set of conditions where we enable the size-specialized
// mallocgc implementation: the experiment must be enabled, and none of the sanitizers should
// be enabled. The tables used to select the size-specialized malloc function do not compile
// properly on plan9, so size-specialized malloc is also disabled on plan9.
const sizeSpecializedMallocEnabled = goexperiment.SizeSpecializedMalloc && GOOS != "plan9" && !asanenabled && !raceenabled && !msanenabled && !valgrindenabled
// runtimeFreegcEnabled is the set of conditions where we enable the runtime.freegc
// implementation and the corresponding allocation-related changes: the experiment must be
// enabled, and none of the memory sanitizers should be enabled. We allow the race detector,
// in contrast to sizeSpecializedMallocEnabled.
// TODO(thepudds): it would be nice to check Valgrind integration, though there are some hints
// there might not be any canned tests in tree for Go's integration with Valgrind.
const runtimeFreegcEnabled = goexperiment.RuntimeFreegc && !asanenabled && !msanenabled && !valgrindenabled
// Allocate an object of size bytes.
// Small objects are allocated from the per-P cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
//
// mallocgc should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/gopkg
// - github.com/bytedance/sonic
// - github.com/cloudwego/frugal
// - github.com/cockroachdb/cockroach
// - github.com/cockroachdb/pebble
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mallocgc
func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
// Short-circuit zero-sized allocation requests.
if size == 0 {
return unsafe.Pointer(&zerobase)
}
if sizeSpecializedMallocEnabled && heapBitsInSpan(size) {
if typ == nil || !typ.Pointers() {
return mallocNoScanTable[size](size, typ, needzero)
} else {
if !needzero {
throw("objects with pointers must be zeroed")
}
return mallocScanTable[size](size, typ, needzero)
}
}
// It's possible for any malloc to trigger sweeping, which may in
// turn queue finalizers. Record this dynamic lock edge.
// N.B. Compiled away if lockrank experiment is not enabled.
lockRankMayQueueFinalizer()
// Pre-malloc debug hooks.
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
// For ASAN, we allocate extra memory around each allocation called the "redzone."
// These "redzones" are marked as unaddressable.
var asanRZ uintptr
if asanenabled {
asanRZ = redZoneSize(size)
size += asanRZ
}
// Assist the GC if needed. (On the reuse path, we currently compensate for this;
// changes here might require changes there.)
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
// Actually do the allocation.
var x unsafe.Pointer
var elemsize uintptr
if sizeSpecializedMallocEnabled {
// we know that heapBitsInSpan is false.
if size <= maxSmallSize-gc.MallocHeaderSize {
if typ == nil || !typ.Pointers() {
x, elemsize = mallocgcSmallNoscan(size, typ, needzero)
} else {
if !needzero {
throw("objects with pointers must be zeroed")
}
x, elemsize = mallocgcSmallScanHeader(size, typ)
}
} else {
x, elemsize = mallocgcLarge(size, typ, needzero)
}
} else {
if size <= maxSmallSize-gc.MallocHeaderSize {
if typ == nil || !typ.Pointers() {
// tiny allocations might be kept alive by other co-located values.
// Make sure secret allocations get zeroed by avoiding the tiny allocator
// See go.dev/issue/76356
gp := getg()
if size < maxTinySize && gp.secret == 0 {
x, elemsize = mallocgcTiny(size, typ)
} else {
x, elemsize = mallocgcSmallNoscan(size, typ, needzero)
}
} else {
if !needzero {
throw("objects with pointers must be zeroed")
}
if heapBitsInSpan(size) {
x, elemsize = mallocgcSmallScanNoHeader(size, typ)
} else {
x, elemsize = mallocgcSmallScanHeader(size, typ)
}
}
} else {
x, elemsize = mallocgcLarge(size, typ, needzero)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
// Mark any object allocated while in secret mode as secret.
// This ensures we zero it immediately when freeing it.
addSecret(x, size)
}
// Notify sanitizers, if enabled.
if raceenabled {
racemalloc(x, size-asanRZ)
}
if msanenabled {
msanmalloc(x, size-asanRZ)
}
if asanenabled {
// Poison the space between the end of the requested size of x
// and the end of the slot. Unpoison the requested allocation.
frag := elemsize - size
if typ != nil && typ.Pointers() && !heapBitsInSpan(elemsize) && size <= maxSmallSize-gc.MallocHeaderSize {
frag -= gc.MallocHeaderSize
}
asanpoison(unsafe.Add(x, size-asanRZ), asanRZ)
asanunpoison(x, size-asanRZ)
}
if valgrindenabled {
valgrindMalloc(x, size-asanRZ)
}
// Adjust our GC assist debt to account for internal fragmentation.
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
// Post-malloc debug hooks.
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTiny(size uintptr, typ *_type) (unsafe.Pointer, uintptr) {
// Set mp.mallocing to keep from being preempted by GC.
mp := acquirem()
if doubleCheckMalloc {
if mp.mallocing != 0 {
throw("malloc deadlock")
}
if mp.gsignal == getg() {
throw("malloc during signal")
}
if typ != nil && typ.Pointers() {
throw("expected noscan for tiny alloc")
}
}
mp.mallocing = 1
// Tiny allocator.
//
// Tiny allocator combines several tiny allocation requests
// into a single memory block. The resulting memory block
// is freed when all subobjects are unreachable. The subobjects
// must be noscan (don't have pointers), this ensures that
// the amount of potentially wasted memory is bounded.
//
// Size of the memory block used for combining (maxTinySize) is tunable.
// Current setting is 16 bytes, which relates to 2x worst case memory
// wastage (when all but one subobjects are unreachable).
// 8 bytes would result in no wastage at all, but provides less
// opportunities for combining.
// 32 bytes provides more opportunities for combining,
// but can lead to 4x worst case wastage.
// The best case winning is 8x regardless of block size.
//
// Objects obtained from tiny allocator must not be freed explicitly.
// So when an object will be freed explicitly, we ensure that
// its size >= maxTinySize.
//
// SetFinalizer has a special case for objects potentially coming
// from tiny allocator, it such case it allows to set finalizers
// for an inner byte of a memory block.
//
// The main targets of tiny allocator are small strings and
// standalone escaping variables. On a json benchmark
// the allocator reduces number of allocations by ~12% and
// reduces heap size by ~20%.
c := getMCache(mp)
off := c.tinyoffset
// Align tiny pointer for required (conservative) alignment.
if size&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && size == 12 {
// Conservatively align 12-byte objects to 8 bytes on 32-bit
// systems so that objects whose first field is a 64-bit
// value is aligned to 8 bytes and does not cause a fault on
// atomic access. See issue 37262.
// TODO(mknyszek): Remove this workaround if/when issue 36606
// is resolved.
off = alignUp(off, 8)
} else if size&3 == 0 {
off = alignUp(off, 4)
} else if size&1 == 0 {
off = alignUp(off, 2)
}
if off+size <= maxTinySize && c.tiny != 0 {
// The object fits into existing tiny block.
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + size
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
return x, 0
}
// Allocate a new maxTinySize block.
checkGCTrigger := false
span := c.alloc[tinySpanClass]
v := nextFreeFast(span)
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0 // Always zero
(*[2]uint64)(x)[1] = 0
// See if we need to replace the existing tiny block with the new one
// based on amount of remaining free space.
if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
// Note: disabled when race detector is on, see comment near end of this function.
c.tiny = uintptr(x)
c.tinyoffset = size
}
// Ensure that the stores above that initialize x to
// type-safe memory and set the heap bits occur before
// the caller can make x observable to the garbage
// collector. Otherwise, on weakly ordered machines,
// the garbage collector could follow a pointer to x,
// but see uninitialized memory or stale heap bits.
publicationBarrier()
if writeBarrier.enabled {
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
gcmarknewobject(span, uintptr(x))
} else {
// Track the last free index before the mark phase. This field
// is only used by the garbage collector. During the mark phase
// this is used by the conservative scanner to filter out objects
// that are both free and recently-allocated. It's safe to do that
// because we allocate-black if the GC is enabled. The conservative
// scanner produces pointers out of thin air, so without additional
// synchronization it might otherwise observe a partially-initialized
// object, which could crash the program.
span.freeIndexForScan = span.freeindex
}
// Note cache c only valid while m acquired; see #47302
//
// N.B. Use the full size because that matches how the GC
// will update the mem profile on the "free" side.
//
// TODO(mknyszek): We should really count the header as part
// of gc_sys or something. The code below just pretends it is
// internal fragmentation and matches the GC's accounting by
// using the whole allocation slot.
c.nextSample -= int64(span.elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, span.elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
// Pad tinysize allocations so they are aligned with the end
// of the tinyalloc region. This ensures that any arithmetic
// that goes off the top end of the object will be detectable
// by checkptr (issue 38872).
// Note that we disable tinyalloc when raceenabled for this to work.
// TODO: This padding is only performed when the race detector
// is enabled. It would be nice to enable it if any package
// was compiled with checkptr, but there's no easy way to
// detect that (especially at compile time).
// TODO: enable this padding for all allocations, not just
// tinyalloc ones. It's tricky because of pointer maps.
// Maybe just all noscan objects?
x = add(x, span.elemsize-size)
}
return x, span.elemsize
}
func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
// Set mp.mallocing to keep from being preempted by GC.
mp := acquirem()
if doubleCheckMalloc {
if mp.mallocing != 0 {
throw("malloc deadlock")
}
if mp.gsignal == getg() {
throw("malloc during signal")
}
if typ != nil && typ.Pointers() {
throw("expected noscan type for noscan alloc")
}
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
var sizeclass uint8
if size <= gc.SmallSizeMax-8 {
sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)]
} else {
sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)]
}
size = uintptr(gc.SizeClassToSize[sizeclass])
spc := makeSpanClass(sizeclass, true)
span := c.alloc[spc]
// First, check for a reusable object.
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
// We have a reusable object, use it.
x := mallocgcSmallNoscanReuse(c, span, spc, size, needzero)
mp.mallocing = 0
releasem(mp)
return x, size
}
v := nextFreeFast(span)
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, size)
}
// Ensure that the stores above that initialize x to
// type-safe memory and set the heap bits occur before
// the caller can make x observable to the garbage
// collector. Otherwise, on weakly ordered machines,
// the garbage collector could follow a pointer to x,
// but see uninitialized memory or stale heap bits.
publicationBarrier()
if writeBarrier.enabled {
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
gcmarknewobject(span, uintptr(x))
} else {
// Track the last free index before the mark phase. This field
// is only used by the garbage collector. During the mark phase
// this is used by the conservative scanner to filter out objects
// that are both free and recently-allocated. It's safe to do that
// because we allocate-black if the GC is enabled. The conservative
// scanner produces pointers out of thin air, so without additional
// synchronization it might otherwise observe a partially-initialized
// object, which could crash the program.
span.freeIndexForScan = span.freeindex
}
// Note cache c only valid while m acquired; see #47302
//
// N.B. Use the full size because that matches how the GC
// will update the mem profile on the "free" side.
//
// TODO(mknyszek): We should really count the header as part
// of gc_sys or something. The code below just pretends it is
// internal fragmentation and matches the GC's accounting by
// using the whole allocation slot.
c.nextSample -= int64(size)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, size)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
return x, size
}
// mallocgcSmallNoscanReuse returns a previously freed noscan object after preparing it for reuse.
// It must only be called if hasReusableNoscan returned true.
func mallocgcSmallNoscanReuse(c *mcache, span *mspan, spc spanClass, size uintptr, needzero bool) unsafe.Pointer {
// TODO(thepudds): could nextFreeFast, nextFree and nextReusable return unsafe.Pointer?
// Maybe doesn't matter. gclinkptr might be for historical reasons.
v, span := c.nextReusableNoScan(span, spc)
x := unsafe.Pointer(v)
// Compensate for the GC assist credit deducted in mallocgc (before calling us and
// after we return) because this is not a newly allocated object. We use the full slot
// size (elemsize) here because that's what mallocgc deducts overall. Note we only
// adjust this when gcBlackenEnabled is true, which follows mallocgc behavior.
// TODO(thepudds): a follow-up CL adds a more specific test of our assist credit
// handling, including for validating internal fragmentation handling.
if gcBlackenEnabled != 0 {
addAssistCredit(size)
}
// This is a previously used object, so only check needzero (and not span.needzero)
// for clearing.
if needzero {
memclrNoHeapPointers(x, size)
}
// See publicationBarrier comment in mallocgcSmallNoscan.
publicationBarrier()
// Finish and return. Note that we do not update span.freeIndexForScan, profiling info,
// nor do we check gcTrigger.
// TODO(thepudds): the current approach is viable for a GOEXPERIMENT, but
// means we do not profile reused heap objects. Ultimately, we will need a better
// approach for profiling, or at least ensure we are not introducing bias in the
// profiled allocations.
// TODO(thepudds): related, we probably want to adjust how allocs and frees are counted
// in the existing stats. Currently, reused objects are not counted as allocs nor
// frees, but instead roughly appear as if the original heap object lived on. We
// probably will also want some additional runtime/metrics, and generally think about
// user-facing observability & diagnostics, though all this likely can wait for an
// official proposal.
if writeBarrier.enabled {
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
gcmarknewobject(span, uintptr(x))
}
return x
}
func mallocgcSmallScanNoHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) {
// Set mp.mallocing to keep from being preempted by GC.
mp := acquirem()
if doubleCheckMalloc {
if mp.mallocing != 0 {
throw("malloc deadlock")
}
if mp.gsignal == getg() {
throw("malloc during signal")
}
if typ == nil || !typ.Pointers() {
throw("noscan allocated in scan-only path")
}
if !heapBitsInSpan(size) {
throw("heap bits in not in span for non-header-only path")
}
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
sizeclass := gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)]
spc := makeSpanClass(sizeclass, false)
span := c.alloc[spc]
v := nextFreeFast(span)
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, size)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
// initHeapBits already set the pointer bits for the 8-byte sizeclass
// on 64-bit platforms.
c.scanAlloc += 8
} else {
c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span)
}
size = uintptr(gc.SizeClassToSize[sizeclass])
// Ensure that the stores above that initialize x to
// type-safe memory and set the heap bits occur before
// the caller can make x observable to the garbage
// collector. Otherwise, on weakly ordered machines,
// the garbage collector could follow a pointer to x,
// but see uninitialized memory or stale heap bits.
publicationBarrier()
if writeBarrier.enabled {
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
gcmarknewobject(span, uintptr(x))
} else {
// Track the last free index before the mark phase. This field
// is only used by the garbage collector. During the mark phase
// this is used by the conservative scanner to filter out objects
// that are both free and recently-allocated. It's safe to do that
// because we allocate-black if the GC is enabled. The conservative
// scanner produces pointers out of thin air, so without additional
// synchronization it might otherwise observe a partially-initialized
// object, which could crash the program.
span.freeIndexForScan = span.freeindex
}
// Note cache c only valid while m acquired; see #47302
//
// N.B. Use the full size because that matches how the GC
// will update the mem profile on the "free" side.
//
// TODO(mknyszek): We should really count the header as part
// of gc_sys or something. The code below just pretends it is
// internal fragmentation and matches the GC's accounting by
// using the whole allocation slot.
c.nextSample -= int64(size)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, size)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
return x, size
}
func mallocgcSmallScanHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) {
// Set mp.mallocing to keep from being preempted by GC.
mp := acquirem()
if doubleCheckMalloc {
if mp.mallocing != 0 {
throw("malloc deadlock")
}
if mp.gsignal == getg() {
throw("malloc during signal")
}
if typ == nil || !typ.Pointers() {
throw("noscan allocated in scan-only path")
}
if heapBitsInSpan(size) {
throw("heap bits in span for header-only path")
}
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
size += gc.MallocHeaderSize
var sizeclass uint8
if size <= gc.SmallSizeMax-8 {
sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)]
} else {
sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)]
}
size = uintptr(gc.SizeClassToSize[sizeclass])
spc := makeSpanClass(sizeclass, false)
span := c.alloc[spc]
v := nextFreeFast(span)
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, size)
}
header := (**_type)(x)
x = add(x, gc.MallocHeaderSize)
c.scanAlloc += heapSetTypeSmallHeader(uintptr(x), size-gc.MallocHeaderSize, typ, header, span)
// Ensure that the stores above that initialize x to
// type-safe memory and set the heap bits occur before
// the caller can make x observable to the garbage
// collector. Otherwise, on weakly ordered machines,
// the garbage collector could follow a pointer to x,
// but see uninitialized memory or stale heap bits.
publicationBarrier()
if writeBarrier.enabled {
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
gcmarknewobject(span, uintptr(x))
} else {
// Track the last free index before the mark phase. This field
// is only used by the garbage collector. During the mark phase
// this is used by the conservative scanner to filter out objects
// that are both free and recently-allocated. It's safe to do that
// because we allocate-black if the GC is enabled. The conservative
// scanner produces pointers out of thin air, so without additional
// synchronization it might otherwise observe a partially-initialized
// object, which could crash the program.
span.freeIndexForScan = span.freeindex
}
// Note cache c only valid while m acquired; see #47302
//
// N.B. Use the full size because that matches how the GC
// will update the mem profile on the "free" side.
//
// TODO(mknyszek): We should really count the header as part
// of gc_sys or something. The code below just pretends it is
// internal fragmentation and matches the GC's accounting by
// using the whole allocation slot.
c.nextSample -= int64(size)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, size)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
return x, size
}
func mallocgcLarge(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
// Set mp.mallocing to keep from being preempted by GC.
mp := acquirem()
if doubleCheckMalloc {
if mp.mallocing != 0 {
throw("malloc deadlock")
}
if mp.gsignal == getg() {
throw("malloc during signal")
}
}
mp.mallocing = 1
c := getMCache(mp)
// For large allocations, keep track of zeroed state so that
// bulk zeroing can be happen later in a preemptible context.
span := c.allocLarge(size, typ == nil || !typ.Pointers())
span.freeindex = 1
span.allocCount = 1
span.largeType = nil // Tell the GC not to look at this yet.
size = span.elemsize
x := unsafe.Pointer(span.base())
// Ensure that the store above that sets largeType to
// nil happens before the caller can make x observable
// to the garbage collector.
//
// Otherwise, on weakly ordered machines, the garbage
// collector could follow a pointer to x, but see a stale
// largeType value.
publicationBarrier()
if writeBarrier.enabled {
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
gcmarknewobject(span, uintptr(x))
} else {
// Track the last free index before the mark phase. This field
// is only used by the garbage collector. During the mark phase
// this is used by the conservative scanner to filter out objects
// that are both free and recently-allocated. It's safe to do that
// because we allocate-black if the GC is enabled. The conservative
// scanner produces pointers out of thin air, so without additional
// synchronization it might otherwise observe a partially-initialized
// object, which could crash the program.
span.freeIndexForScan = span.freeindex
}
// Note cache c only valid while m acquired; see #47302
//
// N.B. Use the full size because that matches how the GC
// will update the mem profile on the "free" side.
//
// TODO(mknyszek): We should really count the header as part
// of gc_sys or something. The code below just pretends it is
// internal fragmentation and matches the GC's accounting by
// using the whole allocation slot.
c.nextSample -= int64(size)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, size)
}
mp.mallocing = 0
releasem(mp)
// Check to see if we need to trigger the GC.
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
// Objects can be zeroed late in a context where preemption can occur.
//
// x will keep the memory alive.
if needzero && span.needzero != 0 {
// N.B. size == fullSize always in this case.
memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
}
// Set the type and run the publication barrier while non-preemptible. We need to make
// sure that between heapSetTypeLarge and publicationBarrier we cannot get preempted,
// otherwise the GC could potentially observe non-zeroed memory but largeType set on weak
// memory architectures.
//
// The GC can also potentially observe non-zeroed memory if conservative scanning spuriously
// observes a partially-allocated object, see the freeIndexForScan update above. This case is
// handled by synchronization inside heapSetTypeLarge.
mp = acquirem()
if typ != nil && typ.Pointers() {
// Finish storing the type information, now that we're certain the memory is zeroed.
getMCache(mp).scanAlloc += heapSetTypeLarge(uintptr(x), size, typ, span)
}
// Publish the object again, now with zeroed memory and initialized type information.
//
// Even if we didn't update any type information, this is necessary to ensure that, for example,
// x written to a global without any synchronization still results in other goroutines observing
// zeroed memory.
publicationBarrier()
releasem(mp)
return x, size
}
func preMallocgcDebug(size uintptr, typ *_type) unsafe.Pointer {
if debug.sbrk != 0 {
align := uintptr(16)
if typ != nil {
// TODO(austin): This should be just
// align = uintptr(typ.align)
// but that's only 4 on 32-bit platforms,
// even if there's a uint64 field in typ (see #599).
// This causes 64-bit atomic accesses to panic.
// Hence, we use stricter alignment that matches
// the normal allocator better.
if size&7 == 0 {
align = 8
} else if size&3 == 0 {
align = 4
} else if size&1 == 0 {
align = 2
} else {
align = 1
}
}
return persistentalloc(size, align, &memstats.other_sys)
}
if inittrace.active && inittrace.id == getg().goid {
// Init functions are executed sequentially in a single goroutine.
inittrace.allocs += 1
}
return nil
}
func postMallocgcDebug(x unsafe.Pointer, elemsize uintptr, typ *_type) {
if inittrace.active && inittrace.id == getg().goid {
// Init functions are executed sequentially in a single goroutine.
inittrace.bytes += uint64(elemsize)
}
if traceAllocFreeEnabled() {
trace := traceAcquire()
if trace.ok() {
trace.HeapObjectAlloc(uintptr(x), typ)
traceRelease(trace)
}
}
// N.B. elemsize == 0 indicates a tiny allocation, since no new slot was
// allocated to fulfill this call to mallocgc. This means checkfinalizer
// will only flag an error if there is actually any risk. If an allocation
// has the tiny block to itself, it will not get flagged, because we won't
// mark the block as a tiny block.
if debug.checkfinalizers != 0 && elemsize == 0 {
setTinyBlockContext(unsafe.Pointer(alignDown(uintptr(x), maxTinySize)))
}
}
// deductAssistCredit reduces the current G's assist credit
// by size bytes, and assists the GC if necessary.
//
// Caller must be preemptible.
func deductAssistCredit(size uintptr) {
// Charge the current user G for this allocation.
assistG := getg()
if assistG.m.curg != nil {
assistG = assistG.m.curg
}
// Charge the allocation against the G. We'll account
// for internal fragmentation at the end of mallocgc.
assistG.gcAssistBytes -= int64(size)
if assistG.gcAssistBytes < 0 {
// This G is in debt. Assist the GC to correct
// this before allocating. This must happen
// before disabling preemption.
gcAssistAlloc(assistG)
}
}
// addAssistCredit is like deductAssistCredit,
// but adds credit rather than removes,
// and never calls gcAssistAlloc.
func addAssistCredit(size uintptr) {
// Credit the current user G.
assistG := getg()
if assistG.m.curg != nil { // TODO(thepudds): do we need to do this?
assistG = assistG.m.curg
}
// Credit the size against the G.
assistG.gcAssistBytes += int64(size)
}
const (
// doubleCheckReusable enables some additional invariant checks for the
// runtime.freegc and reusable objects. Note that some of these checks alter timing,
// and it is good to test changes with and without this enabled.
doubleCheckReusable = false
// debugReusableLog enables some printlns for runtime.freegc and reusable objects.
debugReusableLog = false
)
// freegc records that a heap object is reusable and available for
// immediate reuse in a subsequent mallocgc allocation, without
// needing to wait for the GC cycle to progress.
//
// The information is recorded in a free list stored in the
// current P's mcache. The caller must pass in the user size
// and whether the object has pointers, which allows a faster free
// operation.
//
// freegc must be called by the effective owner of ptr who knows
// the pointer is logically dead, with no possible aliases that might
// be used past that moment. In other words, ptr must be the
// last and only pointer to its referent.
//
// The intended caller is the compiler.
//
// Note: please do not send changes that attempt to add freegc calls
// to the standard library.
//
// ptr must point to a heap object or into the current g's stack,
// in which case freegc is a no-op. In particular, ptr must not point
// to memory in the data or bss sections, which is partially enforced.
// For objects with a malloc header, ptr should point mallocHeaderSize bytes
// past the base; otherwise, ptr should point to the base of the heap object.
// In other words, ptr should be the same pointer that was returned by mallocgc.
//
// In addition, the caller must know that ptr's object has no specials, such
// as might have been created by a call to SetFinalizer or AddCleanup.
// (Internally, the runtime deals appropriately with internally-created
// specials, such as specials for memory profiling).
//
// If the size of ptr's object is less than 16 bytes or greater than
// 32KiB - gc.MallocHeaderSize bytes, freegc is currently a no-op. It must only
// be called in alloc-safe places. It currently throws if noscan is false
// (support for which is implemented in a later CL in our stack).
//
// Note that freegc accepts an unsafe.Pointer and hence keeps the pointer
// alive. It therefore could be a pessimization in some cases (such
// as a long-lived function) if the caller does not call freegc before
// or roughly when the liveness analysis of the compiler
// would otherwise have determined ptr's object is reclaimable by the GC.
func freegc(ptr unsafe.Pointer, size uintptr, noscan bool) bool {
if !runtimeFreegcEnabled || !reusableSize(size) {
return false
}
if sizeSpecializedMallocEnabled && !noscan {
// TODO(thepudds): temporarily disable freegc with SizeSpecializedMalloc for pointer types
// until we finish integrating.
return false
}
if ptr == nil {
throw("freegc nil")
}
// Set mp.mallocing to keep from being preempted by GC.
// Otherwise, the GC could flush our mcache or otherwise cause problems.
mp := acquirem()
if mp.mallocing != 0 {
throw("freegc deadlock")
}
if mp.gsignal == getg() {
throw("freegc during signal")
}
mp.mallocing = 1
if mp.curg.stack.lo <= uintptr(ptr) && uintptr(ptr) < mp.curg.stack.hi {
// This points into our stack, so free is a no-op.
mp.mallocing = 0
releasem(mp)
return false
}
if doubleCheckReusable {
// TODO(thepudds): we could enforce no free on globals in bss or data. Maybe by
// checking span via spanOf or spanOfHeap, or maybe walk from firstmoduledata
// like isGoPointerWithoutSpan, or activeModules, or something. If so, we might
// be able to delay checking until reuse (e.g., check span just before reusing,
// though currently we don't always need to lookup a span on reuse). If we think
// no usage patterns could result in globals, maybe enforcement for globals could
// be behind -d=checkptr=1 or similar. The compiler can have knowledge of where
// a variable is allocated, but stdlib does not, although there are certain
// usage patterns that cannot result in a global.
// TODO(thepudds): separately, consider a local debugReusableMcacheOnly here
// to ignore freed objects if not in mspan in mcache, maybe when freeing and reading,
// by checking something like s.base() <= uintptr(v) && uintptr(v) < s.limit. Or
// maybe a GODEBUG or compiler debug flag.
span := spanOf(uintptr(ptr))
if span == nil {
throw("nextReusable: nil span for pointer in free list")
}
if state := span.state.get(); state != mSpanInUse {
throw("nextReusable: span is not in use")
}
}
if debug.clobberfree != 0 {
clobberfree(ptr, size)
}
// We first check if p is still in our per-P cache.
// Get our per-P cache for small objects.
c := getMCache(mp)
if c == nil {
throw("freegc called without a P or outside bootstrapping")
}
v := uintptr(ptr)
if !noscan && !heapBitsInSpan(size) {
// mallocgcSmallScanHeader expects to get the base address of the object back
// from the findReusable funcs (as well as from nextFreeFast and nextFree), and
// not mallocHeaderSize bytes into a object, so adjust that here.
v -= mallocHeaderSize
// The size class lookup wants size to be adjusted by mallocHeaderSize.
size += mallocHeaderSize
}
// TODO(thepudds): should verify (behind doubleCheckReusable constant) that our calculated
// sizeclass here matches what's in span found via spanOf(ptr) or findObject(ptr).
var sizeclass uint8
if size <= gc.SmallSizeMax-8 {
sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)]
} else {
sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)]
}
spc := makeSpanClass(sizeclass, noscan)
s := c.alloc[spc]
if debugReusableLog {
if s.base() <= uintptr(v) && uintptr(v) < s.limit {
println("freegc [in mcache]:", hex(uintptr(v)), "sweepgen:", mheap_.sweepgen, "writeBarrier.enabled:", writeBarrier.enabled)
} else {
println("freegc [NOT in mcache]:", hex(uintptr(v)), "sweepgen:", mheap_.sweepgen, "writeBarrier.enabled:", writeBarrier.enabled)
}
}
if noscan {
c.addReusableNoscan(spc, uintptr(v))
} else {
// TODO(thepudds): implemented in later CL in our stack.
throw("freegc called for object with pointers, not yet implemented")
}
// For stats, for now we leave allocCount alone, roughly pretending to the rest
// of the system that this potential reuse never happened.
mp.mallocing = 0
releasem(mp)
return true
}
// nextReusableNoScan returns the next reusable object for a noscan span,
// or 0 if no reusable object is found.
func (c *mcache) nextReusableNoScan(s *mspan, spc spanClass) (gclinkptr, *mspan) {
if !runtimeFreegcEnabled {
return 0, s
}
// Pop a reusable pointer from the free list for this span class.
v := c.reusableNoscan[spc]
if v == 0 {
return 0, s
}
c.reusableNoscan[spc] = v.ptr().next
if debugReusableLog {
println("reusing from ptr free list:", hex(v), "sweepgen:", mheap_.sweepgen, "writeBarrier.enabled:", writeBarrier.enabled)
}
if doubleCheckReusable {
doubleCheckNextReusable(v) // debug only sanity check
}
// For noscan spans, we only need the span if the write barrier is enabled (so that our caller
// can call gcmarknewobject to allocate black). If the write barrier is enabled, we can skip
// looking up the span when the pointer is in a span in the mcache.
if !writeBarrier.enabled {
return v, nil
}
if s.base() <= uintptr(v) && uintptr(v) < s.limit {
// Return the original span.
return v, s
}
// We must find and return the span.
span := spanOf(uintptr(v))
if span == nil {
// TODO(thepudds): construct a test that triggers this throw.
throw("nextReusableNoScan: nil span for pointer in reusable object free list")
}
return v, span
}
// doubleCheckNextReusable checks some invariants.
// TODO(thepudds): will probably delete some of this. Can mostly be ignored for review.
func doubleCheckNextReusable(v gclinkptr) {
// TODO(thepudds): should probably take the spanClass as well to confirm expected
// sizeclass match.
_, span, objIndex := findObject(uintptr(v), 0, 0)
if span == nil {
throw("nextReusable: nil span for pointer in free list")
}
if state := span.state.get(); state != mSpanInUse {
throw("nextReusable: span is not in use")
}
if uintptr(v) < span.base() || uintptr(v) >= span.limit {
throw("nextReusable: span is not in range")
}
if span.objBase(uintptr(v)) != uintptr(v) {
print("nextReusable: v=", hex(v), " base=", hex(span.objBase(uintptr(v))), "\n")
throw("nextReusable: v is non-base-address for object found on pointer free list")
}
if span.isFree(objIndex) {
throw("nextReusable: pointer on free list is free")
}
const debugReusableEnsureSwept = false
if debugReusableEnsureSwept {
// Currently disabled.
// Note: ensureSwept here alters behavior (not just an invariant check).
span.ensureSwept()
if span.isFree(objIndex) {
throw("nextReusable: pointer on free list is free after ensureSwept")
}
}
}
// reusableSize reports if size is a currently supported size for a reusable object.
func reusableSize(size uintptr) bool {
if size < maxTinySize || size > maxSmallSize-mallocHeaderSize {
return false
}
return true
}
// memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
// on chunks of the buffer to be zeroed, with opportunities for preemption
// along the way. memclrNoHeapPointers contains no safepoints and also
// cannot be preemptively scheduled, so this provides a still-efficient
// block copy that can also be preempted on a reasonable granularity.
//
// Use this with care; if the data being cleared is tagged to contain
// pointers, this allows the GC to run before it is all cleared.
func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
v := uintptr(x)
// got this from benchmarking. 128k is too small, 512k is too large.
const chunkBytes = 256 * 1024
vsize := v + size
for voff := v; voff < vsize; voff = voff + chunkBytes {
if getg().preempt {
// may hold locks, e.g., profiling
goschedguarded()
}
// clear min(avail, lump) bytes
n := vsize - voff
if n > chunkBytes {
n = chunkBytes
}
memclrNoHeapPointers(unsafe.Pointer(voff), n)
}
}
// implementation of new builtin
// compiler (both frontend and SSA backend) knows the signature
// of this function.
func newobject(typ *_type) unsafe.Pointer {
return mallocgc(typ.Size_, typ, true)
}
//go:linkname maps_newobject internal/runtime/maps.newobject
func maps_newobject(typ *_type) unsafe.Pointer {
return newobject(typ)
}
// reflect_unsafe_New is meant for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
// - github.com/goccy/json
// - github.com/modern-go/reflect2
// - github.com/v2pro/plz
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_unsafe_New reflect.unsafe_New
func reflect_unsafe_New(typ *_type) unsafe.Pointer {
return mallocgc(typ.Size_, typ, true)
}
//go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
return mallocgc(typ.Size_, typ, true)
}
// newarray allocates an array of n elements of type typ.
//
// newarray should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/RomiChan/protobuf
// - github.com/segmentio/encoding
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname newarray
func newarray(typ *_type, n int) unsafe.Pointer {
if n == 1 {
return mallocgc(typ.Size_, typ, true)
}
mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
if overflow || mem > maxAlloc || n < 0 {
panic(plainError("runtime: allocation size out of range"))
}
return mallocgc(mem, typ, true)
}
// reflect_unsafe_NewArray is meant for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
// - github.com/bytedance/sonic
// - github.com/goccy/json
// - github.com/modern-go/reflect2
// - github.com/segmentio/encoding
// - github.com/segmentio/kafka-go
// - github.com/v2pro/plz
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
return newarray(typ, n)
}
//go:linkname maps_newarray internal/runtime/maps.newarray
func maps_newarray(typ *_type, n int) unsafe.Pointer {
return newarray(typ, n)
}
// profilealloc resets the current mcache's nextSample counter and
// records a memory profile sample.
//
// The caller must be non-preemptible and have a P.
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
c := getMCache(mp)
if c == nil {
throw("profilealloc called without a P or outside bootstrapping")
}
c.memProfRate = MemProfileRate
c.nextSample = nextSample()
mProf_Malloc(mp, x, size)
}
// nextSample returns the next sampling point for heap profiling. The goal is
// to sample allocations on average every MemProfileRate bytes, but with a
// completely random distribution over the allocation timeline; this
// corresponds to a Poisson process with parameter MemProfileRate. In Poisson
// processes, the distance between two samples follows the exponential
// distribution (exp(MemProfileRate)), so the best return value is a random
// number taken from an exponential distribution whose mean is MemProfileRate.
func nextSample() int64 {
if MemProfileRate == 0 {
// Basically never sample.
return math.MaxInt64
}
if MemProfileRate == 1 {
// Sample immediately.
return 0
}
return int64(fastexprand(MemProfileRate))
}
// fastexprand returns a random number from an exponential distribution with
// the specified mean.
func fastexprand(mean int) int32 {
// Avoid overflow. Maximum possible step is
// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
switch {
case mean > 0x7000000:
mean = 0x7000000
case mean == 0:
return 0
}
// Take a random sample of the exponential distribution exp(-mean*x).
// The probability distribution function is mean*exp(-mean*x), so the CDF is
// p = 1 - exp(-mean*x), so
// q = 1 - p == exp(-mean*x)
// log_e(q) = -mean*x
// -log_e(q)/mean = x
// x = -log_e(q) * mean
// x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency
const randomBitCount = 26
q := cheaprandn(1<<randomBitCount) + 1
qlog := fastlog2(float64(q)) - randomBitCount
if qlog > 0 {
qlog = 0
}
const minusLog2 = -0.6931471805599453 // -ln(2)
return int32(qlog*(minusLog2*float64(mean))) + 1
}
type persistentAlloc struct {
base *notInHeap
off uintptr
}
var globalAlloc struct {
mutex
persistentAlloc
}
// persistentChunkSize is the number of bytes we allocate when we grow
// a persistentAlloc.
const persistentChunkSize = 256 << 10
// persistentChunks is a list of all the persistent chunks we have
// allocated. The list is maintained through the first word in the
// persistent chunk. This is updated atomically.
var persistentChunks *notInHeap
// Wrapper around sysAlloc that can allocate small chunks.
// There is no associated free operation.
// Intended for things like function/type/debug-related persistent data.
// If align is 0, uses default align (currently 8).
// The returned memory will be zeroed.
// sysStat must be non-nil.
//
// Consider marking persistentalloc'd types not in heap by embedding
// internal/runtime/sys.NotInHeap.
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
var p *notInHeap
systemstack(func() {
p = persistentalloc1(size, align, sysStat)
})
return unsafe.Pointer(p)
}
// Must run on system stack because stack growth can (re)invoke it.
// See issue 9174.
//
//go:systemstack
func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
const (
maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
)
if size == 0 {
throw("persistentalloc: size == 0")
}
if align != 0 {
if align&(align-1) != 0 {
throw("persistentalloc: align is not a power of 2")
}
if align > pageSize {
throw("persistentalloc: align is too large")
}
} else {
align = 8
}
if size >= maxBlock {
return (*notInHeap)(sysAlloc(size, sysStat, "immortal metadata"))
}
mp := acquirem()
var persistent *persistentAlloc
if mp != nil && mp.p != 0 {
persistent = &mp.p.ptr().palloc
} else {
lock(&globalAlloc.mutex)
persistent = &globalAlloc.persistentAlloc
}
persistent.off = alignUp(persistent.off, align)
if persistent.off+size > persistentChunkSize || persistent.base == nil {
persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys, "immortal metadata"))
if persistent.base == nil {
if persistent == &globalAlloc.persistentAlloc {
unlock(&globalAlloc.mutex)
}
throw("runtime: cannot allocate memory")
}
// Add the new chunk to the persistentChunks list.
for {
chunks := uintptr(unsafe.Pointer(persistentChunks))
*(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
break
}
}
persistent.off = alignUp(goarch.PtrSize, align)
}
p := persistent.base.add(persistent.off)
persistent.off += size
releasem(mp)
if persistent == &globalAlloc.persistentAlloc {
unlock(&globalAlloc.mutex)
}
if sysStat != &memstats.other_sys {
sysStat.add(int64(size))
memstats.other_sys.add(-int64(size))
}
return p
}
// inPersistentAlloc reports whether p points to memory allocated by
// persistentalloc. This must be nosplit because it is called by the
// cgo checker code, which is called by the write barrier code.
//
//go:nosplit
func inPersistentAlloc(p uintptr) bool {
chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
for chunk != 0 {
if p >= chunk && p < chunk+persistentChunkSize {
return true
}
chunk = *(*uintptr)(unsafe.Pointer(chunk))
}
return false
}
// linearAlloc is a simple linear allocator that pre-reserves a region
// of memory and then optionally maps that region into the Ready state
// as needed.
//
// The caller is responsible for locking.
type linearAlloc struct {
next uintptr // next free byte
mapped uintptr // one byte past end of mapped space
end uintptr // end of reserved space
mapMemory bool // transition memory from Reserved to Ready if true
}
func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
if base+size < base {
// Chop off the last byte. The runtime isn't prepared
// to deal with situations where the bounds could overflow.
// Leave that memory reserved, though, so we don't map it
// later.
size -= 1
}
l.next, l.mapped = base, base
l.end = base + size
l.mapMemory = mapMemory
}
func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat, vmaName string) unsafe.Pointer {
p := alignUp(l.next, align)
if p+size > l.end {
return nil
}
l.next = p + size
if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
if l.mapMemory {
// Transition from Reserved to Prepared to Ready.
n := pEnd - l.mapped
sysMap(unsafe.Pointer(l.mapped), n, sysStat, vmaName)
sysUsed(unsafe.Pointer(l.mapped), n, n)
}
l.mapped = pEnd
}
return unsafe.Pointer(p)
}
// notInHeap is off-heap memory allocated by a lower-level allocator
// like sysAlloc or persistentAlloc.
//
// In general, it's better to use real types which embed
// internal/runtime/sys.NotInHeap, but this serves as a generic type
// for situations where that isn't possible (like in the allocators).
//
// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
type notInHeap struct{ _ sys.NotInHeap }
func (p *notInHeap) add(bytes uintptr) *notInHeap {
return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
}
// redZoneSize computes the size of the redzone for a given allocation.
// Refer to the implementation of the compiler-rt.
func redZoneSize(userSize uintptr) uintptr {
switch {
case userSize <= (64 - 16):
return 16 << 0
case userSize <= (128 - 32):
return 16 << 1
case userSize <= (512 - 64):
return 16 << 2
case userSize <= (4096 - 128):
return 16 << 3
case userSize <= (1<<14)-256:
return 16 << 4
case userSize <= (1<<15)-512:
return 16 << 5
case userSize <= (1<<16)-1024:
return 16 << 6
default:
return 16 << 7
}
}
// Code generated by mkmalloc.go; DO NOT EDIT.
// See overview in malloc_stubs.go.
package runtime
import (
"internal/goarch"
"internal/goexperiment"
"internal/runtime/sys"
"unsafe"
)
func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 1
const elemsize = 8
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
8 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(8)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 8
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 2
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(16)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 16
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 3
const elemsize = 24
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
24 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(24)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 24
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 4
const elemsize = 32
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
32 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(32)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 32
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 5
const elemsize = 48
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
48 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(48)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 48
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 6
const elemsize = 64
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
64 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(64)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 64
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 7
const elemsize = 80
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
80 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(80)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 80
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 8
const elemsize = 96
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
96 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(96)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 96
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 9
const elemsize = 112
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
112 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(112)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 112
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 10
const elemsize = 128
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
128 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(128)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 128
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 11
const elemsize = 144
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
144 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(144)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 144
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 12
const elemsize = 160
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
160 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(160)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 160
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 13
const elemsize = 176
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
176 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(176)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 176
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 14
const elemsize = 192
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
192 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(192)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 192
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 15
const elemsize = 208
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
208 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(208)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 208
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 16
const elemsize = 224
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
224 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(224)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 224
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 17
const elemsize = 240
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
240 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(240)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 240
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 18
const elemsize = 256
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
256 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(256)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 256
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 19
const elemsize = 288
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
288 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(288)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 288
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 20
const elemsize = 320
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
320 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(320)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 320
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 21
const elemsize = 352
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
352 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(352)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 352
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 22
const elemsize = 384
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
384 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(384)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 384
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 23
const elemsize = 416
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
416 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(416)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 416
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 24
const elemsize = 448
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
448 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(448)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 448
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 25
const elemsize = 480
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
480 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(480)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 480
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 26
const elemsize = 512
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(0)
span := c.alloc[spc]
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
512 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
c.scanAlloc += 8
} else {
dataSize := size
x := uintptr(x)
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(512)) {
throw("tried to write heap bits, but no heap bits in span")
}
src0 := readUintptr(getGCMask(typ))
const elemsize = 512
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
scanSize = dataSize
} else {
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
c.scanAlloc += scanSize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 1
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 2
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 3
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 4
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 5
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 6
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 7
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 8
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 9
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 10
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 11
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 12
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 13
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 14
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcTinySize15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const constsize = 15
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
c := getMCache(mp)
off := c.tinyoffset
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
const elemsize = 0
{
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
checkGCTrigger := false
span := c.alloc[tinySpanClass]
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
16,
)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
x = add(x, elemsize-constsize)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 2
const elemsize = 16
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
16 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 3
const elemsize = 24
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
24 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 4
const elemsize = 32
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
32 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 5
const elemsize = 48
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
48 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 6
const elemsize = 64
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
64 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 7
const elemsize = 80
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
80 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 8
const elemsize = 96
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
96 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 9
const elemsize = 112
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
112 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 10
const elemsize = 128
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
128 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 11
const elemsize = 144
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
144 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 12
const elemsize = 160
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
160 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 13
const elemsize = 176
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
176 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 14
const elemsize = 192
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
192 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 15
const elemsize = 208
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
208 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 16
const elemsize = 224
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
224 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 17
const elemsize = 240
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
240 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 18
const elemsize = 256
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
256 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 19
const elemsize = 288
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
288 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 20
const elemsize = 320
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
320 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 21
const elemsize = 352
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
352 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 22
const elemsize = 384
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
384 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 23
const elemsize = 416
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
416 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 24
const elemsize = 448
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
448 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 25
const elemsize = 480
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
480 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
lockRankMayQueueFinalizer()
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
const sizeclass = 26
const elemsize = 512
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(1)
span := c.alloc[spc]
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
x := v
{
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
}
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache)
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*
512 +
span.base())
}
}
}
v := nextFreeFastResult
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
publicationBarrier()
if writeBarrier.enabled {
gcmarknewobject(span, uintptr(x))
} else {
span.freeIndexForScan = span.freeindex
}
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
addSecret(x, size)
}
if valgrindenabled {
valgrindMalloc(x, size)
}
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains stub functions that are not meant to be called directly,
// but that will be assembled together using the inlining logic in runtime/_mkmalloc
// to produce a full mallocgc function that's specialized for a span class
// or specific size in the case of the tiny allocator.
//
// To generate the specialized mallocgc functions, do 'go run .' inside runtime/_mkmalloc.
//
// To assemble a mallocgc function, the mallocStub function is cloned, and the call to
// inlinedMalloc is replaced with the inlined body of smallScanNoHeaderStub,
// smallNoScanStub or tinyStub, depending on the parameters being specialized.
//
// The size_ (for the tiny case) and elemsize_, sizeclass_, and noscanint_ (for all three cases)
// identifiers are replaced with the value of the parameter in the specialized case.
// The nextFreeFastStub, nextFreeFastTiny, heapSetTypeNoHeaderStub, and writeHeapBitsSmallStub
// functions are also inlined by _mkmalloc.
package runtime
import (
"internal/goarch"
"internal/goexperiment"
"internal/runtime/sys"
"unsafe"
)
// These identifiers will all be replaced by the inliner. So their values don't
// really matter: they just need to be set so that the stub functions, which
// will never be used on their own, can compile. elemsize_ can't be set to
// zero because we divide by it in nextFreeFastTiny, and the compiler would
// complain about a division by zero. Its replaced value will always be greater
// than zero.
const elemsize_ = 8
const sizeclass_ = 0
const noscanint_ = 0
const size_ = 0
const isTiny_ = false
func malloc0(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
// Short-circuit zero-sized allocation requests.
return unsafe.Pointer(&zerobase)
}
func mallocPanic(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
panic("not defined for sizeclass")
}
// WARNING: mallocStub does not do any work for sanitizers so callers need
// to steer out of this codepath early if sanitizers are enabled.
func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if isTiny_ {
// secret code, need to avoid the tiny allocator since it might keep
// co-located values alive longer and prevent timely zero-ing
//
// Call directly into the NoScan allocator.
// See go.dev/issue/76356
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
return mallocgcSmallNoScanSC2(size, typ, needzero)
}
}
if doubleCheckMalloc {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
}
// It's possible for any malloc to trigger sweeping, which may in
// turn queue finalizers. Record this dynamic lock edge.
// N.B. Compiled away if lockrank experiment is not enabled.
lockRankMayQueueFinalizer()
// Pre-malloc debug hooks.
if debug.malloc {
if x := preMallocgcDebug(size, typ); x != nil {
return x
}
}
// Assist the GC if needed. (On the reuse path, we currently compensate for this;
// changes here might require changes there.)
if gcBlackenEnabled != 0 {
deductAssistCredit(size)
}
// Actually do the allocation.
x, elemsize := inlinedMalloc(size, typ, needzero)
if !isTiny_ {
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
// Mark any object allocated while in secret mode as secret.
// This ensures we zero it immediately when freeing it.
addSecret(x, size)
}
}
// Notify valgrind, if enabled.
// To allow the compiler to not know about valgrind, we do valgrind instrumentation
// unlike the other sanitizers.
if valgrindenabled {
valgrindMalloc(x, size)
}
// Adjust our GC assist debt to account for internal fragmentation.
if gcBlackenEnabled != 0 && elemsize != 0 {
if assistG := getg().m.curg; assistG != nil {
assistG.gcAssistBytes -= int64(elemsize - size)
}
}
// Post-malloc debug hooks.
if debug.malloc {
postMallocgcDebug(x, elemsize, typ)
}
return x
}
// inlinedMalloc will never be called. It is defined just so that the compiler can compile
// the mallocStub function, which will also never be called, but instead used as a template
// to generate a size-specialized malloc function. The call to inlinedMalloc in mallocStub
// will be replaced with the inlined body of smallScanNoHeaderStub, smallNoScanStub, or tinyStub
// when generating the size-specialized malloc function. See the comment at the top of this
// file for more information.
func inlinedMalloc(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
return unsafe.Pointer(uintptr(0)), 0
}
func doubleCheckSmallScanNoHeader(size uintptr, typ *_type, mp *m) {
if mp.mallocing != 0 {
throw("malloc deadlock")
}
if mp.gsignal == getg() {
throw("malloc during signal")
}
if typ == nil || !typ.Pointers() {
throw("noscan allocated in scan-only path")
}
if !heapBitsInSpan(size) {
throw("heap bits in not in span for non-header-only path")
}
}
func smallScanNoHeaderStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
const sizeclass = sizeclass_
const elemsize = elemsize_
// Set mp.mallocing to keep from being preempted by GC.
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallScanNoHeader(size, typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(noscanint_)
span := c.alloc[spc]
v := nextFreeFastStub(span)
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
if goarch.PtrSize == 8 && sizeclass == 1 {
// initHeapBits already set the pointer bits for the 8-byte sizeclass
// on 64-bit platforms.
c.scanAlloc += 8
} else {
dataSize := size // make the inliner happy
x := uintptr(x)
scanSize := heapSetTypeNoHeaderStub(x, dataSize, typ, span)
c.scanAlloc += scanSize
}
// Ensure that the stores above that initialize x to
// type-safe memory and set the heap bits occur before
// the caller can make x observable to the garbage
// collector. Otherwise, on weakly ordered machines,
// the garbage collector could follow a pointer to x,
// but see uninitialized memory or stale heap bits.
publicationBarrier()
if writeBarrier.enabled {
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
gcmarknewobject(span, uintptr(x))
} else {
// Track the last free index before the mark phase. This field
// is only used by the garbage collector. During the mark phase
// this is used by the conservative scanner to filter out objects
// that are both free and recently-allocated. It's safe to do that
// because we allocate-black if the GC is enabled. The conservative
// scanner produces pointers out of thin air, so without additional
// synchronization it might otherwise observe a partially-initialized
// object, which could crash the program.
span.freeIndexForScan = span.freeindex
}
// Note cache c only valid while m acquired; see #47302
//
// N.B. Use the full size because that matches how the GC
// will update the mem profile on the "free" side.
//
// TODO(mknyszek): We should really count the header as part
// of gc_sys or something. The code below just pretends it is
// internal fragmentation and matches the GC's accounting by
// using the whole allocation slot.
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
return x, elemsize
}
func doubleCheckSmallNoScan(typ *_type, mp *m) {
if mp.mallocing != 0 {
throw("malloc deadlock")
}
if mp.gsignal == getg() {
throw("malloc during signal")
}
if typ != nil && typ.Pointers() {
throw("expected noscan type for noscan alloc")
}
}
func smallNoScanStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
// TODO(matloob): Add functionality to mkmalloc to allow us to inline a non-constant
// sizeclass_ and elemsize_ value (instead just set to the expressions to look up the size class
// and elemsize. We'd also need to teach mkmalloc that values that are touched by these (specifically
// spc below) should turn into vars. This would allow us to generate mallocgcSmallNoScan itself,
// so that its code could not diverge from the generated functions.
const sizeclass = sizeclass_
const elemsize = elemsize_
// Set mp.mallocing to keep from being preempted by GC.
mp := acquirem()
if doubleCheckMalloc {
doubleCheckSmallNoScan(typ, mp)
}
mp.mallocing = 1
checkGCTrigger := false
c := getMCache(mp)
const spc = spanClass(sizeclass<<1) | spanClass(noscanint_)
span := c.alloc[spc]
// First, check for a reusable object.
if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
// We have a reusable object, use it.
v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
mp.mallocing = 0
releasem(mp)
// TODO(thepudds): note that the generated return path is essentially duplicated
// by the generator. For example, see the two postMallocgcDebug calls and
// related duplicated code on the return path currently in the generated
// mallocgcSmallNoScanSC2 function. One set of those correspond to this
// return here. We might be able to de-duplicate the generated return path
// by updating the generator, perhaps by jumping to a shared return or similar.
return v, elemsize
}
v := nextFreeFastStub(span)
if v == 0 {
v, span, checkGCTrigger = c.nextFree(spc)
}
x := unsafe.Pointer(v)
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, elemsize)
}
// Ensure that the stores above that initialize x to
// type-safe memory and set the heap bits occur before
// the caller can make x observable to the garbage
// collector. Otherwise, on weakly ordered machines,
// the garbage collector could follow a pointer to x,
// but see uninitialized memory or stale heap bits.
publicationBarrier()
if writeBarrier.enabled {
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
gcmarknewobject(span, uintptr(x))
} else {
// Track the last free index before the mark phase. This field
// is only used by the garbage collector. During the mark phase
// this is used by the conservative scanner to filter out objects
// that are both free and recently-allocated. It's safe to do that
// because we allocate-black if the GC is enabled. The conservative
// scanner produces pointers out of thin air, so without additional
// synchronization it might otherwise observe a partially-initialized
// object, which could crash the program.
span.freeIndexForScan = span.freeindex
}
// Note cache c only valid while m acquired; see #47302
//
// N.B. Use the full size because that matches how the GC
// will update the mem profile on the "free" side.
//
// TODO(mknyszek): We should really count the header as part
// of gc_sys or something. The code below just pretends it is
// internal fragmentation and matches the GC's accounting by
// using the whole allocation slot.
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
return x, elemsize
}
func doubleCheckTiny(size uintptr, typ *_type, mp *m) {
if mp.mallocing != 0 {
throw("malloc deadlock")
}
if mp.gsignal == getg() {
throw("malloc during signal")
}
if typ != nil && typ.Pointers() {
throw("expected noscan for tiny alloc")
}
}
func tinyStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) {
const constsize = size_
const elemsize = elemsize_
// Set mp.mallocing to keep from being preempted by GC.
mp := acquirem()
if doubleCheckMalloc {
doubleCheckTiny(constsize, typ, mp)
}
mp.mallocing = 1
// Tiny allocator.
//
// Tiny allocator combines several tiny allocation requests
// into a single memory block. The resulting memory block
// is freed when all subobjects are unreachable. The subobjects
// must be noscan (don't have pointers), this ensures that
// the amount of potentially wasted memory is bounded.
//
// Size of the memory block used for combining (maxTinySize) is tunable.
// Current setting is 16 bytes, which relates to 2x worst case memory
// wastage (when all but one subobjects are unreachable).
// 8 bytes would result in no wastage at all, but provides less
// opportunities for combining.
// 32 bytes provides more opportunities for combining,
// but can lead to 4x worst case wastage.
// The best case winning is 8x regardless of block size.
//
// Objects obtained from tiny allocator must not be freed explicitly.
// So when an object will be freed explicitly, we ensure that
// its size >= maxTinySize.
//
// SetFinalizer has a special case for objects potentially coming
// from tiny allocator, it such case it allows to set finalizers
// for an inner byte of a memory block.
//
// The main targets of tiny allocator are small strings and
// standalone escaping variables. On a json benchmark
// the allocator reduces number of allocations by ~12% and
// reduces heap size by ~20%.
c := getMCache(mp)
off := c.tinyoffset
// Align tiny pointer for required (conservative) alignment.
if constsize&7 == 0 {
off = alignUp(off, 8)
} else if goarch.PtrSize == 4 && constsize == 12 {
// Conservatively align 12-byte objects to 8 bytes on 32-bit
// systems so that objects whose first field is a 64-bit
// value is aligned to 8 bytes and does not cause a fault on
// atomic access. See issue 37262.
// TODO(mknyszek): Remove this workaround if/when issue 36606
// is resolved.
off = alignUp(off, 8)
} else if constsize&3 == 0 {
off = alignUp(off, 4)
} else if constsize&1 == 0 {
off = alignUp(off, 2)
}
if off+constsize <= maxTinySize && c.tiny != 0 {
// The object fits into existing tiny block.
x := unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + constsize
c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
return x, 0
}
// Allocate a new maxTinySize block.
checkGCTrigger := false
span := c.alloc[tinySpanClass]
v := nextFreeFastTiny(span)
if v == 0 {
v, span, checkGCTrigger = c.nextFree(tinySpanClass)
}
x := unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0 // Always zero
(*[2]uint64)(x)[1] = 0
// See if we need to replace the existing tiny block with the new one
// based on amount of remaining free space.
if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
// Note: disabled when race detector is on, see comment near end of this function.
c.tiny = uintptr(x)
c.tinyoffset = constsize
}
// Ensure that the stores above that initialize x to
// type-safe memory and set the heap bits occur before
// the caller can make x observable to the garbage
// collector. Otherwise, on weakly ordered machines,
// the garbage collector could follow a pointer to x,
// but see uninitialized memory or stale heap bits.
publicationBarrier()
if writeBarrier.enabled {
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
gcmarknewobject(span, uintptr(x))
} else {
// Track the last free index before the mark phase. This field
// is only used by the garbage collector. During the mark phase
// this is used by the conservative scanner to filter out objects
// that are both free and recently-allocated. It's safe to do that
// because we allocate-black if the GC is enabled. The conservative
// scanner produces pointers out of thin air, so without additional
// synchronization it might otherwise observe a partially-initialized
// object, which could crash the program.
span.freeIndexForScan = span.freeindex
}
// Note cache c only valid while m acquired; see #47302
//
// N.B. Use the full size because that matches how the GC
// will update the mem profile on the "free" side.
//
// TODO(mknyszek): We should really count the header as part
// of gc_sys or something. The code below just pretends it is
// internal fragmentation and matches the GC's accounting by
// using the whole allocation slot.
c.nextSample -= int64(elemsize)
if c.nextSample < 0 || MemProfileRate != c.memProfRate {
profilealloc(mp, x, elemsize)
}
mp.mallocing = 0
releasem(mp)
if checkGCTrigger {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
if raceenabled {
// Pad tinysize allocations so they are aligned with the end
// of the tinyalloc region. This ensures that any arithmetic
// that goes off the top end of the object will be detectable
// by checkptr (issue 38872).
// Note that we disable tinyalloc when raceenabled for this to work.
// TODO: This padding is only performed when the race detector
// is enabled. It would be nice to enable it if any package
// was compiled with checkptr, but there's no easy way to
// detect that (especially at compile time).
// TODO: enable this padding for all allocations, not just
// tinyalloc ones. It's tricky because of pointer maps.
// Maybe just all noscan objects?
x = add(x, elemsize-constsize)
}
return x, elemsize
}
// TODO(matloob): Should we let the go compiler inline this instead of using mkmalloc?
// We won't be able to use elemsize_ but that's probably ok.
func nextFreeFastTiny(span *mspan) gclinkptr {
const nbytes = 8192
const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / elemsize_)
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) // Is there a free object in the allocCache?
result := span.freeindex + uint16(theBit)
if result < nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*elemsize_ + span.base())
}
}
}
return nextFreeFastResult
}
func nextFreeFastStub(span *mspan) gclinkptr {
var nextFreeFastResult gclinkptr
if span.allocCache != 0 {
theBit := sys.TrailingZeros64(span.allocCache) // Is there a free object in the allocCache?
result := span.freeindex + uint16(theBit)
if result < span.nelems {
freeidx := result + 1
if !(freeidx%64 == 0 && freeidx != span.nelems) {
span.allocCache >>= uint(theBit + 1)
span.freeindex = freeidx
span.allocCount++
nextFreeFastResult = gclinkptr(uintptr(result)*elemsize_ + span.base())
}
}
}
return nextFreeFastResult
}
func heapSetTypeNoHeaderStub(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(elemsize_)) {
throw("tried to write heap bits, but no heap bits in span")
}
scanSize := writeHeapBitsSmallStub(span, x, dataSize, typ)
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
return scanSize
}
// writeHeapBitsSmallStub writes the heap bits for small objects whose ptr/scalar data is
// stored as a bitmap at the end of the span.
//
// Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span.
// heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_.
//
//go:nosplit
func writeHeapBitsSmallStub(span *mspan, x, dataSize uintptr, typ *_type) uintptr {
// The objects here are always really small, so a single load is sufficient.
src0 := readUintptr(getGCMask(typ))
const elemsize = elemsize_
// Create repetitions of the bitmap if we have a small slice backing store.
var scanSize uintptr
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
// This object is all pointers, so scanSize is just dataSize.
scanSize = dataSize
} else {
// N.B. We rely on dataSize being an exact multiple of the type size.
// The alternative is to be defensive and mask out src to the length
// of dataSize. The purpose is to save on one additional masking operation.
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
}
// Since we're never writing more than one uintptr's worth of bits, we're either going
// to do one or two writes.
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
const bits uintptr = elemsize / goarch.PtrSize
// In the if statement below, we have to do two uintptr writes if the bits
// we need to write straddle across two different memory locations. But if
// the number of bits we're writing divides evenly into the number of bits
// in the uintptr we're writing, this can never happen. Since bitsIsPowerOfTwo
// is a compile-time constant in the generated code, in the case where the size is
// a power of two less than or equal to ptrBits, the compiler can remove the
// 'two writes' branch of the if statement and always do only one write without
// the check.
const bitsIsPowerOfTwo = bits&(bits-1) == 0
if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
// Two writes.
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
// One write.
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) // We're taking the min so this compiles on 32 bit platforms. But if bits > ptrbits we always take the other branch
}
const doubleCheck = false
if doubleCheck {
writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
}
return scanSize
}
func writeHeapBitsDoubleCheck(span *mspan, x, dataSize, src, src0, i, j, bits uintptr, typ *_type) {
srcRead := span.heapBitsSmallForAddr(x)
if srcRead != src {
print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
throw("bad pointer bits written for small object")
}
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/runtime/maps"
"internal/runtime/sys"
"unsafe"
)
const (
// TODO: remove? These are used by tests but not the actual map
loadFactorNum = 7
loadFactorDen = 8
)
//go:linkname maps_errNilAssign internal/runtime/maps.errNilAssign
var maps_errNilAssign error = plainError("assignment to entry in nil map")
func makemap64(t *abi.MapType, hint int64, m *maps.Map) *maps.Map {
if int64(int(hint)) != hint {
hint = 0
}
return makemap(t, int(hint), m)
}
// makemap_small implements Go map creation for make(map[k]v) and
// make(map[k]v, hint) when hint is known to be at most abi.MapGroupSlots
// at compile time and the map needs to be allocated on the heap.
//
// makemap_small should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname makemap_small
func makemap_small() *maps.Map {
return maps.NewEmptyMap()
}
// makemap implements Go map creation for make(map[k]v, hint).
// If the compiler has determined that the map or the first group
// can be created on the stack, m and optionally m.dirPtr may be non-nil.
// If m != nil, the map can be created directly in m.
// If m.dirPtr != nil, it points to a group usable for a small map.
//
// makemap should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname makemap
func makemap(t *abi.MapType, hint int, m *maps.Map) *maps.Map {
if hint < 0 {
hint = 0
}
return maps.NewMap(t, uintptr(hint), m, maxAlloc)
}
// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
// it will return a reference to the zero object for the elem type if
// the key is not in the map.
// NOTE: The returned pointer may keep the whole map live, so don't
// hold onto it for very long.
//
// mapaccess1 is pushed from internal/runtime/maps. We could just call it, but
// we want to avoid one layer of call.
//
//go:linkname mapaccess1
func mapaccess1(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
// mapaccess2 should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mapaccess2
func mapaccess2(t *abi.MapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool)
func mapaccess1_fat(t *abi.MapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer {
e, ok := mapaccess2(t, m, key)
if !ok {
return zero
}
return e
}
func mapaccess2_fat(t *abi.MapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
e, ok := mapaccess2(t, m, key)
if !ok {
return zero, false
}
return e, true
}
// mapassign is pushed from internal/runtime/maps. We could just call it, but
// we want to avoid one layer of call.
//
// mapassign should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
// - github.com/RomiChan/protobuf
// - github.com/segmentio/encoding
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mapassign
func mapassign(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
// mapdelete should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname mapdelete
func mapdelete(t *abi.MapType, m *maps.Map, key unsafe.Pointer) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapdelete)
racewritepc(unsafe.Pointer(m), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
}
if msanenabled && m != nil {
msanread(key, t.Key.Size_)
}
if asanenabled && m != nil {
asanread(key, t.Key.Size_)
}
m.Delete(t, key)
}
// mapIterStart initializes the Iter struct used for ranging over maps and
// performs the first step of iteration. The Iter struct pointed to by 'it' is
// allocated on the stack by the compilers order pass or on the heap by
// reflect. Both need to have zeroed it since the struct contains pointers.
func mapIterStart(t *abi.MapType, m *maps.Map, it *maps.Iter) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapIterStart))
}
it.Init(t, m)
it.Next()
}
// mapIterNext performs the next step of iteration. Afterwards, the next
// key/elem are in it.Key()/it.Elem().
func mapIterNext(it *maps.Iter) {
if raceenabled {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(it.Map()), callerpc, abi.FuncPCABIInternal(mapIterNext))
}
it.Next()
}
// mapclear deletes all keys from a map.
func mapclear(t *abi.MapType, m *maps.Map) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapclear)
racewritepc(unsafe.Pointer(m), callerpc, pc)
}
m.Clear(t)
}
// Reflect stubs. Called from ../reflect/asm_*.s
// reflect_makemap is for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
// - github.com/modern-go/reflect2
// - github.com/goccy/go-json
// - github.com/RomiChan/protobuf
// - github.com/segmentio/encoding
// - github.com/v2pro/plz
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_makemap reflect.makemap
func reflect_makemap(t *abi.MapType, cap int) *maps.Map {
// Check invariants and reflects math.
if t.Key.Equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
// TODO: other checks
return makemap(t, cap, nil)
}
// reflect_mapaccess is for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
// - github.com/modern-go/reflect2
// - github.com/v2pro/plz
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_mapaccess reflect.mapaccess
func reflect_mapaccess(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
elem, ok := mapaccess2(t, m, key)
if !ok {
// reflect wants nil for a missing element
elem = nil
}
return elem
}
//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
func reflect_mapaccess_faststr(t *abi.MapType, m *maps.Map, key string) unsafe.Pointer {
elem, ok := mapaccess2_faststr(t, m, key)
if !ok {
// reflect wants nil for a missing element
elem = nil
}
return elem
}
// reflect_mapassign is for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
// - github.com/v2pro/plz
//
// Do not remove or change the type signature.
//
//go:linkname reflect_mapassign reflect.mapassign0
func reflect_mapassign(t *abi.MapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer) {
p := mapassign(t, m, key)
typedmemmove(t.Elem, p, elem)
}
//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
func reflect_mapassign_faststr(t *abi.MapType, m *maps.Map, key string, elem unsafe.Pointer) {
p := mapassign_faststr(t, m, key)
typedmemmove(t.Elem, p, elem)
}
//go:linkname reflect_mapdelete reflect.mapdelete
func reflect_mapdelete(t *abi.MapType, m *maps.Map, key unsafe.Pointer) {
mapdelete(t, m, key)
}
//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
func reflect_mapdelete_faststr(t *abi.MapType, m *maps.Map, key string) {
mapdelete_faststr(t, m, key)
}
// reflect_maplen is for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/goccy/go-json
// - github.com/wI2L/jettison
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_maplen reflect.maplen
func reflect_maplen(m *maps.Map) int {
if m == nil {
return 0
}
if raceenabled {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
return int(m.Used())
}
//go:linkname reflect_mapclear reflect.mapclear
func reflect_mapclear(t *abi.MapType, m *maps.Map) {
mapclear(t, m)
}
//go:linkname reflectlite_maplen internal/reflectlite.maplen
func reflectlite_maplen(m *maps.Map) int {
if m == nil {
return 0
}
if raceenabled {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
return int(m.Used())
}
// mapinitnoop is a no-op function known the Go linker; if a given global
// map (of the right size) is determined to be dead, the linker will
// rewrite the relocation (from the package init func) from the outlined
// map init function to this symbol. Defined in assembly so as to avoid
// complications with instrumentation (coverage, etc).
func mapinitnoop()
// mapclone for implementing maps.Clone
//
//go:linkname mapclone maps.clone
func mapclone(m any) any {
e := efaceOf(&m)
typ := (*abi.MapType)(unsafe.Pointer(e._type))
map_ := (*maps.Map)(e.data)
map_ = map_.Clone(typ)
e.data = (unsafe.Pointer)(map_)
return m
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector: write barriers.
//
// For the concurrent garbage collector, the Go compiler implements
// updates to pointer-valued fields that may be in heap objects by
// emitting calls to write barriers. The main write barrier for
// individual pointer writes is gcWriteBarrier and is implemented in
// assembly. This file contains write barrier entry points for bulk
// operations. See also mwbbuf.go.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"internal/runtime/sys"
"unsafe"
)
// Go uses a hybrid barrier that combines a Yuasa-style deletion
// barrier—which shades the object whose reference is being
// overwritten—with Dijkstra insertion barrier—which shades the object
// whose reference is being written. The insertion part of the barrier
// is necessary while the calling goroutine's stack is grey. In
// pseudocode, the barrier is:
//
// writePointer(slot, ptr):
// shade(*slot)
// if current stack is grey:
// shade(ptr)
// *slot = ptr
//
// slot is the destination in Go code.
// ptr is the value that goes into the slot in Go code.
//
// Shade indicates that it has seen a white pointer by adding the referent
// to wbuf as well as marking it.
//
// The two shades and the condition work together to prevent a mutator
// from hiding an object from the garbage collector:
//
// 1. shade(*slot) prevents a mutator from hiding an object by moving
// the sole pointer to it from the heap to its stack. If it attempts
// to unlink an object from the heap, this will shade it.
//
// 2. shade(ptr) prevents a mutator from hiding an object by moving
// the sole pointer to it from its stack into a black object in the
// heap. If it attempts to install the pointer into a black object,
// this will shade it.
//
// 3. Once a goroutine's stack is black, the shade(ptr) becomes
// unnecessary. shade(ptr) prevents hiding an object by moving it from
// the stack to the heap, but this requires first having a pointer
// hidden on the stack. Immediately after a stack is scanned, it only
// points to shaded objects, so it's not hiding anything, and the
// shade(*slot) prevents it from hiding any other pointers on its
// stack.
//
// For a detailed description of this barrier and proof of
// correctness, see https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md
//
//
//
// Dealing with memory ordering:
//
// Both the Yuasa and Dijkstra barriers can be made conditional on the
// color of the object containing the slot. We chose not to make these
// conditional because the cost of ensuring that the object holding
// the slot doesn't concurrently change color without the mutator
// noticing seems prohibitive.
//
// Consider the following example where the mutator writes into
// a slot and then loads the slot's mark bit while the GC thread
// writes to the slot's mark bit and then as part of scanning reads
// the slot.
//
// Initially both [slot] and [slotmark] are 0 (nil)
// Mutator thread GC thread
// st [slot], ptr st [slotmark], 1
//
// ld r1, [slotmark] ld r2, [slot]
//
// Without an expensive memory barrier between the st and the ld, the final
// result on most HW (including 386/amd64) can be r1==r2==0. This is a classic
// example of what can happen when loads are allowed to be reordered with older
// stores (avoiding such reorderings lies at the heart of the classic
// Peterson/Dekker algorithms for mutual exclusion). Rather than require memory
// barriers, which will slow down both the mutator and the GC, we always grey
// the ptr object regardless of the slot's color.
//
//
// Stack writes:
//
// The compiler omits write barriers for writes to the current frame,
// but if a stack pointer has been passed down the call stack, the
// compiler will generate a write barrier for writes through that
// pointer (because it doesn't know it's not a heap pointer).
//
//
// Global writes:
//
// The Go garbage collector requires write barriers when heap pointers
// are stored in globals. Many garbage collectors ignore writes to
// globals and instead pick up global -> heap pointers during
// termination. This increases pause time, so we instead rely on write
// barriers for writes to globals so that we don't have to rescan
// global during mark termination.
//
//
// Publication ordering:
//
// The write barrier is *pre-publication*, meaning that the write
// barrier happens prior to the *slot = ptr write that may make ptr
// reachable by some goroutine that currently cannot reach it.
//
//
// Signal handler pointer writes:
//
// In general, the signal handler cannot safely invoke the write
// barrier because it may run without a P or even during the write
// barrier.
//
// There is exactly one exception: profbuf.go omits a barrier during
// signal handler profile logging. That's safe only because of the
// deletion barrier. See profbuf.go for a detailed argument. If we
// remove the deletion barrier, we'll have to work out a new way to
// handle the profile logging.
// typedmemmove copies a value of type typ to dst from src.
// Must be nosplit, see #16026.
//
// TODO: Perfect for go:nosplitrec since we can't have a safe point
// anywhere in the bulk barrier or memmove.
//
// typedmemmove should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/RomiChan/protobuf
// - github.com/segmentio/encoding
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname typedmemmove
//go:nosplit
func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer) {
if dst == src {
return
}
if writeBarrier.enabled && typ.Pointers() {
// This always copies a full value of type typ so it's safe
// to pass typ along as an optimization. See the comment on
// bulkBarrierPreWrite.
bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes, typ)
}
// There's a race here: if some other goroutine can write to
// src, it may change some pointer in src after we've
// performed the write barrier but before we perform the
// memory copy. This safe because the write performed by that
// other goroutine must also be accompanied by a write
// barrier, so at worst we've unnecessarily greyed the old
// pointer that was in src.
memmove(dst, src, typ.Size_)
if goexperiment.CgoCheck2 {
cgoCheckMemmove2(typ, dst, src, 0, typ.Size_)
}
}
// wbZero performs the write barrier operations necessary before
// zeroing a region of memory at address dst of type typ.
// Does not actually do the zeroing.
//
//go:nowritebarrierrec
//go:nosplit
func wbZero(typ *_type, dst unsafe.Pointer) {
// This always copies a full value of type typ so it's safe
// to pass typ along as an optimization. See the comment on
// bulkBarrierPreWrite.
bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes, typ)
}
// wbMove performs the write barrier operations necessary before
// copying a region of memory from src to dst of type typ.
// Does not actually do the copying.
//
//go:nowritebarrierrec
//go:nosplit
func wbMove(typ *_type, dst, src unsafe.Pointer) {
// This always copies a full value of type typ so it's safe to
// pass a type here.
//
// See the comment on bulkBarrierPreWrite.
bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes, typ)
}
// reflect_typedmemmove is meant for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
// - github.com/goccy/json
// - github.com/modern-go/reflect2
// - github.com/ugorji/go/codec
// - github.com/v2pro/plz
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_typedmemmove reflect.typedmemmove
func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
if raceenabled {
raceWriteObjectPC(typ, dst, sys.GetCallerPC(), abi.FuncPCABIInternal(reflect_typedmemmove))
raceReadObjectPC(typ, src, sys.GetCallerPC(), abi.FuncPCABIInternal(reflect_typedmemmove))
}
if msanenabled {
msanwrite(dst, typ.Size_)
msanread(src, typ.Size_)
}
if asanenabled {
asanwrite(dst, typ.Size_)
asanread(src, typ.Size_)
}
typedmemmove(typ, dst, src)
}
//go:linkname reflectlite_typedmemmove internal/reflectlite.typedmemmove
func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
reflect_typedmemmove(typ, dst, src)
}
//go:linkname maps_typedmemmove internal/runtime/maps.typedmemmove
func maps_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
typedmemmove(typ, dst, src)
}
// reflectcallmove is invoked by reflectcall to copy the return values
// out of the stack and into the heap, invoking the necessary write
// barriers. dst, src, and size describe the return value area to
// copy. typ describes the entire frame (not just the return values).
// typ may be nil, which indicates write barriers are not needed.
//
// It must be nosplit and must only call nosplit functions because the
// stack map of reflectcall is wrong.
//
//go:nosplit
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
if writeBarrier.enabled && typ != nil && typ.Pointers() && size >= goarch.PtrSize {
// Pass nil for the type. dst does not point to value of type typ,
// but rather points into one, so applying the optimization is not
// safe. See the comment on this function.
bulkBarrierPreWrite(uintptr(dst), uintptr(src), size, nil)
}
memmove(dst, src, size)
// Move pointers returned in registers to a place where the GC can see them.
for i := range regs.Ints {
if regs.ReturnIsPtr.Get(i) {
regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i])
}
}
}
// typedslicecopy should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/segmentio/encoding
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname typedslicecopy
//go:nosplit
func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int {
n := dstLen
if n > srcLen {
n = srcLen
}
if n == 0 {
return 0
}
// The compiler emits calls to typedslicecopy before
// instrumentation runs, so unlike the other copying and
// assignment operations, it's not instrumented in the calling
// code and needs its own instrumentation.
if raceenabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(slicecopy)
racewriterangepc(dstPtr, uintptr(n)*typ.Size_, callerpc, pc)
racereadrangepc(srcPtr, uintptr(n)*typ.Size_, callerpc, pc)
}
if msanenabled {
msanwrite(dstPtr, uintptr(n)*typ.Size_)
msanread(srcPtr, uintptr(n)*typ.Size_)
}
if asanenabled {
asanwrite(dstPtr, uintptr(n)*typ.Size_)
asanread(srcPtr, uintptr(n)*typ.Size_)
}
if goexperiment.CgoCheck2 {
cgoCheckSliceCopy(typ, dstPtr, srcPtr, n)
}
if dstPtr == srcPtr {
return n
}
// Note: No point in checking typ.PtrBytes here:
// compiler only emits calls to typedslicecopy for types with pointers,
// and growslice and reflect_typedslicecopy check for pointers
// before calling typedslicecopy.
size := uintptr(n) * typ.Size_
if writeBarrier.enabled {
// This always copies one or more full values of type typ so
// it's safe to pass typ along as an optimization. See the comment on
// bulkBarrierPreWrite.
pwsize := size - typ.Size_ + typ.PtrBytes
bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize, typ)
}
// See typedmemmove for a discussion of the race between the
// barrier and memmove.
memmove(dstPtr, srcPtr, size)
return n
}
// reflect_typedslicecopy is meant for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
// - github.com/modern-go/reflect2
// - github.com/RomiChan/protobuf
// - github.com/segmentio/encoding
// - github.com/v2pro/plz
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_typedslicecopy reflect.typedslicecopy
func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
if !elemType.Pointers() {
return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_)
}
return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
}
// typedmemclr clears the typed memory at ptr with type typ. The
// memory at ptr must already be initialized (and hence in type-safe
// state). If the memory is being initialized for the first time, see
// memclrNoHeapPointers.
//
// If the caller knows that typ has pointers, it can alternatively
// call memclrHasPointers.
//
// TODO: A "go:nosplitrec" annotation would be perfect for this.
//
//go:nosplit
func typedmemclr(typ *_type, ptr unsafe.Pointer) {
if writeBarrier.enabled && typ.Pointers() {
// This always clears a whole value of type typ, so it's
// safe to pass a type here and apply the optimization.
// See the comment on bulkBarrierPreWrite.
bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes, typ)
}
memclrNoHeapPointers(ptr, typ.Size_)
}
// reflect_typedmemclr is meant for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_typedmemclr reflect.typedmemclr
func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
typedmemclr(typ, ptr)
}
//go:linkname maps_typedmemclr internal/runtime/maps.typedmemclr
func maps_typedmemclr(typ *_type, ptr unsafe.Pointer) {
typedmemclr(typ, ptr)
}
//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
if writeBarrier.enabled && typ.Pointers() {
// Pass nil for the type. ptr does not point to value of type typ,
// but rather points into one so it's not safe to apply the optimization.
// See the comment on this function in the reflect package and the
// comment on bulkBarrierPreWrite.
bulkBarrierPreWrite(uintptr(ptr), 0, size, nil)
}
memclrNoHeapPointers(ptr, size)
}
//go:linkname reflect_typedarrayclear reflect.typedarrayclear
func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) {
size := typ.Size_ * uintptr(len)
if writeBarrier.enabled && typ.Pointers() {
// This always clears whole elements of an array, so it's
// safe to pass a type here. See the comment on bulkBarrierPreWrite.
bulkBarrierPreWrite(uintptr(ptr), 0, size, typ)
}
memclrNoHeapPointers(ptr, size)
}
// memclrHasPointers clears n bytes of typed memory starting at ptr.
// The caller must ensure that the type of the object at ptr has
// pointers, usually by checking typ.PtrBytes. However, ptr
// does not have to point to the start of the allocation.
//
// memclrHasPointers should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname memclrHasPointers
//go:nosplit
func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
// Pass nil for the type since we don't have one here anyway.
bulkBarrierPreWrite(uintptr(ptr), 0, n, nil)
memclrNoHeapPointers(ptr, n)
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector: type and heap bitmaps.
//
// Stack, data, and bss bitmaps
//
// Stack frames and global variables in the data and bss sections are
// described by bitmaps with 1 bit per pointer-sized word. A "1" bit
// means the word is a live pointer to be visited by the GC (referred to
// as "pointer"). A "0" bit means the word should be ignored by GC
// (referred to as "scalar", though it could be a dead pointer value).
//
// Heap bitmaps
//
// The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
// recording whether a pointer is stored in that word or not. This bitmap
// is stored at the end of a span for small objects and is unrolled at
// runtime from type metadata for all larger objects. Objects without
// pointers have neither a bitmap nor associated type metadata.
//
// Bits in all cases correspond to words in little-endian order.
//
// For small objects, if s is the mspan for the span starting at "start",
// then s.heapBits() returns a slice containing the bitmap for the whole span.
// That is, s.heapBits()[0] holds the goarch.PtrSize*8 bits for the first
// goarch.PtrSize*8 words from "start" through "start+63*ptrSize" in the span.
// On a related note, small objects are always small enough that their bitmap
// fits in goarch.PtrSize*8 bits, so writing out bitmap data takes two bitmap
// writes at most (because object boundaries don't generally lie on
// s.heapBits()[i] boundaries).
//
// For larger objects, if t is the type for the object starting at "start",
// within some span whose mspan is s, then the bitmap at t.GCData is "tiled"
// from "start" through "start+s.elemsize".
// Specifically, the first bit of t.GCData corresponds to the word at "start",
// the second to the word after "start", and so on up to t.PtrBytes. At t.PtrBytes,
// we skip to "start+t.Size_" and begin again from there. This process is
// repeated until we hit "start+s.elemsize".
// This tiling algorithm supports array data, since the type always refers to
// the element type of the array. Single objects are considered the same as
// single-element arrays.
// The tiling algorithm may scan data past the end of the compiler-recognized
// object, but any unused data within the allocation slot (i.e. within s.elemsize)
// is zeroed, so the GC just observes nil pointers.
// Note that this "tiled" bitmap isn't stored anywhere; it is generated on-the-fly.
//
// For objects without their own span, the type metadata is stored in the first
// word before the object at the beginning of the allocation slot. For objects
// with their own span, the type metadata is stored in the mspan.
//
// The bitmap for small unallocated objects in scannable spans is not maintained
// (can be junk).
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"internal/runtime/atomic"
"internal/runtime/gc"
"internal/runtime/sys"
"unsafe"
)
// heapBitsInSpan returns true if the size of an object implies its ptr/scalar
// data is stored at the end of the span, and is accessible via span.heapBits.
//
// Note: this works for both rounded-up sizes (span.elemsize) and unrounded
// type sizes because gc.MinSizeForMallocHeader is guaranteed to be at a size
// class boundary.
//
//go:nosplit
func heapBitsInSpan(userSize uintptr) bool {
// N.B. gc.MinSizeForMallocHeader is an exclusive minimum so that this function is
// invariant under size-class rounding on its input.
return userSize <= gc.MinSizeForMallocHeader
}
// typePointers is an iterator over the pointers in a heap object.
//
// Iteration through this type implements the tiling algorithm described at the
// top of this file.
type typePointers struct {
// elem is the address of the current array element of type typ being iterated over.
// Objects that are not arrays are treated as single-element arrays, in which case
// this value does not change.
elem uintptr
// addr is the address the iterator is currently working from and describes
// the address of the first word referenced by mask.
addr uintptr
// mask is a bitmask where each bit corresponds to pointer-words after addr.
// Bit 0 is the pointer-word at addr, Bit 1 is the next word, and so on.
// If a bit is 1, then there is a pointer at that word.
// nextFast and next mask out bits in this mask as their pointers are processed.
mask uintptr
// typ is a pointer to the type information for the heap object's type.
// This may be nil if the object is in a span where heapBitsInSpan(span.elemsize) is true.
typ *_type
}
// typePointersOf returns an iterator over all heap pointers in the range [addr, addr+size).
//
// addr and addr+size must be in the range [span.base(), span.limit).
//
// Note: addr+size must be passed as the limit argument to the iterator's next method on
// each iteration. This slightly awkward API is to allow typePointers to be destructured
// by the compiler.
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
base := span.objBase(addr)
tp := span.typePointersOfUnchecked(base)
if base == addr && size == span.elemsize {
return tp
}
return tp.fastForward(addr-tp.addr, addr+size)
}
// typePointersOfUnchecked is like typePointersOf, but assumes addr is the base
// of an allocation slot in a span (the start of the object if no header, the
// header otherwise). It returns an iterator that generates all pointers
// in the range [addr, addr+span.elemsize).
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
const doubleCheck = false
if doubleCheck && span.objBase(addr) != addr {
print("runtime: addr=", addr, " base=", span.objBase(addr), "\n")
throw("typePointersOfUnchecked consisting of non-base-address for object")
}
spc := span.spanclass
if spc.noscan() {
return typePointers{}
}
if heapBitsInSpan(span.elemsize) {
// Handle header-less objects.
return typePointers{elem: addr, addr: addr, mask: span.heapBitsSmallForAddr(addr)}
}
// All of these objects have a header.
var typ *_type
if spc.sizeclass() != 0 {
// Pull the allocation header from the first word of the object.
typ = *(**_type)(unsafe.Pointer(addr))
addr += gc.MallocHeaderSize
} else {
// Synchronize with allocator, in case this came from the conservative scanner.
// See heapSetTypeLarge for more details.
typ = (*_type)(atomic.Loadp(unsafe.Pointer(&span.largeType)))
if typ == nil {
// Allow a nil type here for delayed zeroing. See mallocgc.
return typePointers{}
}
}
gcmask := getGCMask(typ)
return typePointers{elem: addr, addr: addr, mask: readUintptr(gcmask), typ: typ}
}
// typePointersOfType is like typePointersOf, but assumes addr points to one or more
// contiguous instances of the provided type. The provided type must not be nil.
//
// It returns an iterator that tiles typ's gcmask starting from addr. It's the caller's
// responsibility to limit iteration.
//
// nosplit because its callers are nosplit and require all their callees to be nosplit.
//
//go:nosplit
func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers {
const doubleCheck = false
if doubleCheck && typ == nil {
throw("bad type passed to typePointersOfType")
}
if span.spanclass.noscan() {
return typePointers{}
}
// Since we have the type, pretend we have a header.
gcmask := getGCMask(typ)
return typePointers{elem: addr, addr: addr, mask: readUintptr(gcmask), typ: typ}
}
// nextFast is the fast path of next. nextFast is written to be inlineable and,
// as the name implies, fast.
//
// Callers that are performance-critical should iterate using the following
// pattern:
//
// for {
// var addr uintptr
// if tp, addr = tp.nextFast(); addr == 0 {
// if tp, addr = tp.next(limit); addr == 0 {
// break
// }
// }
// // Use addr.
// ...
// }
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func (tp typePointers) nextFast() (typePointers, uintptr) {
// TESTQ/JEQ
if tp.mask == 0 {
return tp, 0
}
// BSFQ
var i int
if goarch.PtrSize == 8 {
i = sys.TrailingZeros64(uint64(tp.mask))
} else {
i = sys.TrailingZeros32(uint32(tp.mask))
}
if GOARCH == "amd64" {
// BTCQ
tp.mask ^= uintptr(1) << (i & (ptrBits - 1))
} else {
// SUB, AND
tp.mask &= tp.mask - 1
}
// LEAQ (XX)(XX*8)
return tp, tp.addr + uintptr(i)*goarch.PtrSize
}
// next advances the pointers iterator, returning the updated iterator and
// the address of the next pointer.
//
// limit must be the same each time it is passed to next.
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
for {
if tp.mask != 0 {
return tp.nextFast()
}
// Stop if we don't actually have type information.
if tp.typ == nil {
return typePointers{}, 0
}
// Advance to the next element if necessary.
if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
tp.elem += tp.typ.Size_
tp.addr = tp.elem
} else {
tp.addr += ptrBits * goarch.PtrSize
}
// Check if we've exceeded the limit with the last update.
if tp.addr >= limit {
return typePointers{}, 0
}
// Grab more bits and try again.
tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
if tp.addr+goarch.PtrSize*ptrBits > limit {
bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
}
}
}
// fastForward moves the iterator forward by n bytes. n must be a multiple
// of goarch.PtrSize. limit must be the same limit passed to next for this
// iterator.
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func (tp typePointers) fastForward(n, limit uintptr) typePointers {
// Basic bounds check.
target := tp.addr + n
if target >= limit {
return typePointers{}
}
if tp.typ == nil {
// Handle small objects.
// Clear any bits before the target address.
tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
// Clear any bits past the limit.
if tp.addr+goarch.PtrSize*ptrBits > limit {
bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
}
return tp
}
// Move up elem and addr.
// Offsets within an element are always at a ptrBits*goarch.PtrSize boundary.
if n >= tp.typ.Size_ {
// elem needs to be moved to the element containing
// tp.addr + n.
oldelem := tp.elem
tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
} else {
tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
}
if tp.addr-tp.elem >= tp.typ.PtrBytes {
// We're starting in the non-pointer area of an array.
// Move up to the next element.
tp.elem += tp.typ.Size_
tp.addr = tp.elem
tp.mask = readUintptr(getGCMask(tp.typ))
// We may have exceeded the limit after this. Bail just like next does.
if tp.addr >= limit {
return typePointers{}
}
} else {
// Grab the mask, but then clear any bits before the target address and any
// bits over the limit.
tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
}
if tp.addr+goarch.PtrSize*ptrBits > limit {
bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
}
return tp
}
// objBase returns the base pointer for the object containing addr in span.
//
// Assumes that addr points into a valid part of span (span.base() <= addr < span.limit).
//
//go:nosplit
func (span *mspan) objBase(addr uintptr) uintptr {
return span.base() + span.objIndex(addr)*span.elemsize
}
// bulkBarrierPreWrite executes a write barrier
// for every pointer slot in the memory range [src, src+size),
// using pointer/scalar information from [dst, dst+size).
// This executes the write barriers necessary before a memmove.
// src, dst, and size must be pointer-aligned.
// The range [dst, dst+size) must lie within a single object.
// It does not perform the actual writes.
//
// As a special case, src == 0 indicates that this is being used for a
// memclr. bulkBarrierPreWrite will pass 0 for the src of each write
// barrier.
//
// Callers should call bulkBarrierPreWrite immediately before
// calling memmove(dst, src, size). This function is marked nosplit
// to avoid being preempted; the GC must not stop the goroutine
// between the memmove and the execution of the barriers.
// The caller is also responsible for cgo pointer checks if this
// may be writing Go pointers into non-Go memory.
//
// Pointer data is not maintained for allocations containing
// no pointers at all; any caller of bulkBarrierPreWrite must first
// make sure the underlying allocation contains pointers, usually
// by checking typ.PtrBytes.
//
// The typ argument is the type of the space at src and dst (and the
// element type if src and dst refer to arrays) and it is optional.
// If typ is nil, the barrier will still behave as expected and typ
// is used purely as an optimization. However, it must be used with
// care.
//
// If typ is not nil, then src and dst must point to one or more values
// of type typ. The caller must ensure that the ranges [src, src+size)
// and [dst, dst+size) refer to one or more whole values of type src and
// dst (leaving off the pointerless tail of the space is OK). If this
// precondition is not followed, this function will fail to scan the
// right pointers.
//
// When in doubt, pass nil for typ. That is safe and will always work.
//
// Callers must perform cgo checks if goexperiment.CgoCheck2.
//
//go:nosplit
func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) {
if (dst|src|size)&(goarch.PtrSize-1) != 0 {
throw("bulkBarrierPreWrite: unaligned arguments")
}
if !writeBarrier.enabled {
return
}
s := spanOf(dst)
if s == nil {
// If dst is a global, use the data or BSS bitmaps to
// execute write barriers.
for _, datap := range activeModules() {
if datap.data <= dst && dst < datap.edata {
bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
return
}
}
for _, datap := range activeModules() {
if datap.bss <= dst && dst < datap.ebss {
bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
return
}
}
return
} else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
// dst was heap memory at some point, but isn't now.
// It can't be a global. It must be either our stack,
// or in the case of direct channel sends, it could be
// another stack. Either way, no need for barriers.
// This will also catch if dst is in a freed span,
// though that should never have.
return
}
buf := &getg().m.p.ptr().wbBuf
// Double-check that the bitmaps generated in the two possible paths match.
const doubleCheck = false
if doubleCheck {
doubleCheckTypePointersOfType(s, typ, dst, size)
}
var tp typePointers
if typ != nil {
tp = s.typePointersOfType(typ, dst)
} else {
tp = s.typePointersOf(dst, size)
}
if src == 0 {
for {
var addr uintptr
if tp, addr = tp.next(dst + size); addr == 0 {
break
}
dstx := (*uintptr)(unsafe.Pointer(addr))
p := buf.get1()
p[0] = *dstx
}
} else {
for {
var addr uintptr
if tp, addr = tp.next(dst + size); addr == 0 {
break
}
dstx := (*uintptr)(unsafe.Pointer(addr))
srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
p := buf.get2()
p[0] = *dstx
p[1] = *srcx
}
}
}
// bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
// does not execute write barriers for [dst, dst+size).
//
// In addition to the requirements of bulkBarrierPreWrite
// callers need to ensure [dst, dst+size) is zeroed.
//
// This is used for special cases where e.g. dst was just
// created and zeroed with malloc.
//
// The type of the space can be provided purely as an optimization.
// See bulkBarrierPreWrite's comment for more details -- use this
// optimization with great care.
//
//go:nosplit
func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, typ *abi.Type) {
if (dst|src|size)&(goarch.PtrSize-1) != 0 {
throw("bulkBarrierPreWrite: unaligned arguments")
}
if !writeBarrier.enabled {
return
}
buf := &getg().m.p.ptr().wbBuf
s := spanOf(dst)
// Double-check that the bitmaps generated in the two possible paths match.
const doubleCheck = false
if doubleCheck {
doubleCheckTypePointersOfType(s, typ, dst, size)
}
var tp typePointers
if typ != nil {
tp = s.typePointersOfType(typ, dst)
} else {
tp = s.typePointersOf(dst, size)
}
for {
var addr uintptr
if tp, addr = tp.next(dst + size); addr == 0 {
break
}
srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
p := buf.get1()
p[0] = *srcx
}
}
// initHeapBits initializes the heap bitmap for a span.
func (s *mspan) initHeapBits() {
if goarch.PtrSize == 8 && !s.spanclass.noscan() && s.spanclass.sizeclass() == 1 {
b := s.heapBits()
for i := range b {
b[i] = ^uintptr(0)
}
} else if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {
b := s.heapBits()
clear(b)
}
if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(s.elemsize) {
s.initInlineMarkBits()
}
}
// heapBits returns the heap ptr/scalar bits stored at the end of the span for
// small object spans and heap arena spans.
//
// Note that the uintptr of each element means something different for small object
// spans and for heap arena spans. Small object spans are easy: they're never interpreted
// as anything but uintptr, so they're immune to differences in endianness. However, the
// heapBits for user arena spans is exposed through a dummy type descriptor, so the byte
// ordering needs to match the same byte ordering the compiler would emit. The compiler always
// emits the bitmap data in little endian byte ordering, so on big endian platforms these
// uintptrs will have their byte orders swapped from what they normally would be.
//
// heapBitsInSpan(span.elemsize) or span.isUserArenaChunk must be true.
//
//go:nosplit
func (span *mspan) heapBits() []uintptr {
const doubleCheck = false
if doubleCheck && !span.isUserArenaChunk {
if span.spanclass.noscan() {
throw("heapBits called for noscan")
}
if span.elemsize > gc.MinSizeForMallocHeader {
throw("heapBits called for span class that should have a malloc header")
}
}
// Find the bitmap at the end of the span.
//
// Nearly every span with heap bits is exactly one page in size. Arenas are the only exception.
if span.npages == 1 {
// This will be inlined and constant-folded down.
return heapBitsSlice(span.base(), pageSize, span.elemsize)
}
return heapBitsSlice(span.base(), span.npages*pageSize, span.elemsize)
}
// Helper for constructing a slice for the span's heap bits.
//
//go:nosplit
func heapBitsSlice(spanBase, spanSize, elemsize uintptr) []uintptr {
base, bitmapSize := spanHeapBitsRange(spanBase, spanSize, elemsize)
elems := int(bitmapSize / goarch.PtrSize)
var sl notInHeapSlice
sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(base)), elems, elems}
return *(*[]uintptr)(unsafe.Pointer(&sl))
}
//go:nosplit
func spanHeapBitsRange(spanBase, spanSize, elemsize uintptr) (base, size uintptr) {
size = spanSize / goarch.PtrSize / 8
base = spanBase + spanSize - size
if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(elemsize) {
base -= unsafe.Sizeof(spanInlineMarkBits{})
}
return
}
// heapBitsSmallForAddr loads the heap bits for the object stored at addr from span.heapBits.
//
// addr must be the base pointer of an object in the span. heapBitsInSpan(span.elemsize)
// must be true.
//
//go:nosplit
func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr {
hbitsBase, _ := spanHeapBitsRange(span.base(), span.npages*pageSize, span.elemsize)
hbits := (*byte)(unsafe.Pointer(hbitsBase))
// These objects are always small enough that their bitmaps
// fit in a single word, so just load the word or two we need.
//
// Mirrors mspan.writeHeapBitsSmall.
//
// We should be using heapBits(), but unfortunately it introduces
// both bounds checks panics and throw which causes us to exceed
// the nosplit limit in quite a few cases.
i := (addr - span.base()) / goarch.PtrSize / ptrBits
j := (addr - span.base()) / goarch.PtrSize % ptrBits
bits := span.elemsize / goarch.PtrSize
word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
var read uintptr
if j+bits > ptrBits {
// Two reads.
bits0 := ptrBits - j
bits1 := bits - bits0
read = *word0 >> j
read |= (*word1 & ((1 << bits1) - 1)) << bits0
} else {
// One read.
read = (*word0 >> j) & ((1 << bits) - 1)
}
return read
}
// writeHeapBitsSmall writes the heap bits for small objects whose ptr/scalar data is
// stored as a bitmap at the end of the span.
//
// Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span.
// heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_.
//
//go:nosplit
func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
// The objects here are always really small, so a single load is sufficient.
src0 := readUintptr(getGCMask(typ))
// Create repetitions of the bitmap if we have a small slice backing store.
src := src0
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
// This object is all pointers, so scanSize is just dataSize.
scanSize = dataSize
} else {
// N.B. We rely on dataSize being an exact multiple of the type size.
// The alternative is to be defensive and mask out src to the length
// of dataSize. The purpose is to save on one additional masking operation.
if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
}
scanSize = typ.PtrBytes
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
if asanenabled {
// Mask src down to dataSize. dataSize is going to be a strange size because of
// the redzone required for allocations when asan is enabled.
src &= (1 << (dataSize / goarch.PtrSize)) - 1
}
}
// Since we're never writing more than one uintptr's worth of bits, we're either going
// to do one or two writes.
dstBase, _ := spanHeapBitsRange(span.base(), pageSize, span.elemsize)
dst := unsafe.Pointer(dstBase)
o := (x - span.base()) / goarch.PtrSize
i := o / ptrBits
j := o % ptrBits
bits := span.elemsize / goarch.PtrSize
if j+bits > ptrBits {
// Two writes.
bits0 := ptrBits - j
bits1 := bits - bits0
dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
} else {
// One write.
dst := (*uintptr)(add(dst, i*goarch.PtrSize))
*dst = (*dst)&^(((1<<bits)-1)<<j) | (src << j)
}
const doubleCheck = false
if doubleCheck {
srcRead := span.heapBitsSmallForAddr(x)
if srcRead != src {
print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
throw("bad pointer bits written for small object")
}
}
return
}
// heapSetType* functions record that the new allocation [x, x+size)
// holds in [x, x+dataSize) one or more values of type typ.
// (The number of values is given by dataSize / typ.Size.)
// If dataSize < size, the fragment [x+dataSize, x+size) is
// recorded as non-pointer data.
// It is known that the type has pointers somewhere;
// malloc does not call heapSetType* when there are no pointers.
//
// There can be read-write races between heapSetType* and things
// that read the heap metadata like scanObject. However, since
// heapSetType* is only used for objects that have not yet been
// made reachable, readers will ignore bits being modified by this
// function. This does mean this function cannot transiently modify
// shared memory that belongs to neighboring objects. Also, on weakly-ordered
// machines, callers must execute a store/store (publication) barrier
// between calling this function and making the object reachable.
const doubleCheckHeapSetType = doubleCheckMalloc
func heapSetTypeNoHeader(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) {
throw("tried to write heap bits, but no heap bits in span")
}
scanSize := span.writeHeapBitsSmall(x, dataSize, typ)
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, nil, span)
}
return scanSize
}
func heapSetTypeSmallHeader(x, dataSize uintptr, typ *_type, header **_type, span *mspan) uintptr {
if header == nil {
// This nil check and throw is almost pointless. Normally we would
// expect header to never be nil. However, this is called on potentially
// freshly-allocated virtual memory. As of 2025, the compiler-inserted
// nil check is not a branch but a memory read that we expect to fault
// if the pointer really is nil.
//
// However, this causes a read of the page, and operating systems may
// take it as a hint to back the accessed memory with a read-only zero
// page. However, we immediately write to this memory, which can then
// force operating systems to have to update the page table and flush
// the TLB.
//
// This nil check is thus an explicit branch instead of what the compiler
// would insert circa 2025, which is a memory read instruction.
//
// See go.dev/issue/74375 for details of a similar issue in
// spanInlineMarkBits.
throw("runtime: pointer to heap type header nil?")
}
*header = typ
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, header, span)
}
return span.elemsize
}
func heapSetTypeLarge(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
gctyp := typ
// Write out the header atomically to synchronize with the garbage collector.
//
// This atomic store is paired with an atomic load in typePointersOfUnchecked.
// This store ensures that initializing x's memory cannot be reordered after
// this store. Meanwhile the load in typePointersOfUnchecked ensures that
// reading x's memory cannot be reordered before largeType is loaded. Together,
// these two operations guarantee that the garbage collector can only see
// initialized memory if largeType is non-nil.
//
// Gory details below...
//
// Ignoring conservative scanning for a moment, this store need not be atomic
// if we have a publication barrier on our side. This is because the garbage
// collector cannot observe x unless:
// 1. It stops this goroutine and scans its stack, or
// 2. We return from mallocgc and publish the pointer somewhere.
// Either case requires a write on our side, followed by some synchronization
// followed by a read by the garbage collector.
//
// In case (1), the garbage collector can only observe a nil largeType, since it
// had to stop our goroutine when it was preemptible during zeroing. For the
// duration of the zeroing, largeType is nil and the object has nothing interesting
// for the garbage collector to look at, so the garbage collector will not access
// the object at all.
//
// In case (2), the garbage collector can also observe a nil largeType. This
// might happen if the object was newly allocated, and a new GC cycle didn't start
// (that would require a global barrier, STW). In this case, the garbage collector
// will once again ignore the object, and that's safe because objects are
// allocate-black.
//
// However, the garbage collector can also observe a non-nil largeType in case (2).
// This is still okay, since to access the object's memory, it must have first
// loaded the object's pointer from somewhere. This makes the access of the object's
// memory a data-dependent load, and our publication barrier in the allocator
// guarantees that a data-dependent load must observe a version of the object's
// data from after the publication barrier executed.
//
// Unfortunately conservative scanning is a problem. There's no guarantee of a
// data dependency as in case (2) because conservative scanning can produce pointers
// 'out of thin air' in that it need not have been written somewhere by the allocating
// thread first. It might not even be a pointer, or it could be a pointer written to
// some stack location long ago. This is the fundamental reason why we need
// explicit synchronization somewhere in this whole mess. We choose to put that
// synchronization on largeType.
//
// As described at the very top, the treating largeType as an atomic variable, on
// both the reader and writer side, is sufficient to ensure that only initialized
// memory at x will be observed if largeType is non-nil.
atomic.StorepNoWB(unsafe.Pointer(&span.largeType), unsafe.Pointer(gctyp))
if doubleCheckHeapSetType {
doubleCheckHeapType(x, dataSize, typ, &span.largeType, span)
}
return span.elemsize
}
func doubleCheckHeapType(x, dataSize uintptr, gctyp *_type, header **_type, span *mspan) {
doubleCheckHeapPointers(x, dataSize, gctyp, header, span)
// To exercise the less common path more often, generate
// a random interior pointer and make sure iterating from
// that point works correctly too.
maxIterBytes := span.elemsize
if header == nil {
maxIterBytes = dataSize
}
off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
size := dataSize - off
if size == 0 {
off -= goarch.PtrSize
size += goarch.PtrSize
}
interior := x + off
size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize)
if size == 0 {
size = goarch.PtrSize
}
// Round up the type to the size of the type.
size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_
if interior+size > x+maxIterBytes {
size = x + maxIterBytes - interior
}
doubleCheckHeapPointersInterior(x, interior, size, dataSize, gctyp, header, span)
}
func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) {
// Check that scanning the full object works.
tp := span.typePointersOfUnchecked(span.objBase(x))
maxIterBytes := span.elemsize
if header == nil {
maxIterBytes = dataSize
}
bad := false
for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
// Compute the pointer bit we want at offset i.
want := false
if i < span.elemsize {
off := i % typ.Size_
if off < typ.PtrBytes {
j := off / goarch.PtrSize
want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
}
}
if want {
var addr uintptr
tp, addr = tp.next(x + span.elemsize)
if addr == 0 {
println("runtime: found bad iterator")
}
if addr != x+i {
print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
bad = true
}
}
}
if !bad {
var addr uintptr
tp, addr = tp.next(x + span.elemsize)
if addr == 0 {
return
}
println("runtime: extra pointer:", hex(addr))
}
print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " TFlagGCMaskOnDemaind=", typ.TFlag&abi.TFlagGCMaskOnDemand != 0, "\n")
print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, "\n")
print("runtime: typ=", unsafe.Pointer(typ), " typ.PtrBytes=", typ.PtrBytes, "\n")
print("runtime: limit=", hex(x+span.elemsize), "\n")
tp = span.typePointersOfUnchecked(x)
dumpTypePointers(tp)
for {
var addr uintptr
if tp, addr = tp.next(x + span.elemsize); addr == 0 {
println("runtime: would've stopped here")
dumpTypePointers(tp)
break
}
print("runtime: addr=", hex(addr), "\n")
dumpTypePointers(tp)
}
throw("heapSetType: pointer entry not correct")
}
func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) {
bad := false
if interior < x {
print("runtime: interior=", hex(interior), " x=", hex(x), "\n")
throw("found bad interior pointer")
}
off := interior - x
tp := span.typePointersOf(interior, size)
for i := off; i < off+size; i += goarch.PtrSize {
// Compute the pointer bit we want at offset i.
want := false
if i < span.elemsize {
off := i % typ.Size_
if off < typ.PtrBytes {
j := off / goarch.PtrSize
want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
}
}
if want {
var addr uintptr
tp, addr = tp.next(interior + size)
if addr == 0 {
println("runtime: found bad iterator")
bad = true
}
if addr != x+i {
print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
bad = true
}
}
}
if !bad {
var addr uintptr
tp, addr = tp.next(interior + size)
if addr == 0 {
return
}
println("runtime: extra pointer:", hex(addr))
}
print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n")
print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, " interior=", hex(interior), " size=", size, "\n")
print("runtime: limit=", hex(interior+size), "\n")
tp = span.typePointersOf(interior, size)
dumpTypePointers(tp)
for {
var addr uintptr
if tp, addr = tp.next(interior + size); addr == 0 {
println("runtime: would've stopped here")
dumpTypePointers(tp)
break
}
print("runtime: addr=", hex(addr), "\n")
dumpTypePointers(tp)
}
print("runtime: want: ")
for i := off; i < off+size; i += goarch.PtrSize {
// Compute the pointer bit we want at offset i.
want := false
if i < dataSize {
off := i % typ.Size_
if off < typ.PtrBytes {
j := off / goarch.PtrSize
want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
}
}
if want {
print("1")
} else {
print("0")
}
}
println()
throw("heapSetType: pointer entry not correct")
}
//go:nosplit
func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) {
if typ == nil {
return
}
if typ.Kind() == abi.Interface {
// Interfaces are unfortunately inconsistently handled
// when it comes to the type pointer, so it's easy to
// produce a lot of false positives here.
return
}
tp0 := s.typePointersOfType(typ, addr)
tp1 := s.typePointersOf(addr, size)
failed := false
for {
var addr0, addr1 uintptr
tp0, addr0 = tp0.next(addr + size)
tp1, addr1 = tp1.next(addr + size)
if addr0 != addr1 {
failed = true
break
}
if addr0 == 0 {
break
}
}
if failed {
tp0 := s.typePointersOfType(typ, addr)
tp1 := s.typePointersOf(addr, size)
print("runtime: addr=", hex(addr), " size=", size, "\n")
print("runtime: type=", toRType(typ).string(), "\n")
dumpTypePointers(tp0)
dumpTypePointers(tp1)
for {
var addr0, addr1 uintptr
tp0, addr0 = tp0.next(addr + size)
tp1, addr1 = tp1.next(addr + size)
print("runtime: ", hex(addr0), " ", hex(addr1), "\n")
if addr0 == 0 && addr1 == 0 {
break
}
}
throw("mismatch between typePointersOfType and typePointersOf")
}
}
func dumpTypePointers(tp typePointers) {
print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n")
print("runtime: tp.addr=", hex(tp.addr), " tp.mask=")
for i := uintptr(0); i < ptrBits; i++ {
if tp.mask&(uintptr(1)<<i) != 0 {
print("1")
} else {
print("0")
}
}
println()
}
// addb returns the byte pointer p+n.
//
//go:nowritebarrier
//go:nosplit
func addb(p *byte, n uintptr) *byte {
// Note: wrote out full expression instead of calling add(p, n)
// to reduce the number of temporaries generated by the
// compiler for this trivial expression during inlining.
return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
}
// subtractb returns the byte pointer p-n.
//
//go:nowritebarrier
//go:nosplit
func subtractb(p *byte, n uintptr) *byte {
// Note: wrote out full expression instead of calling add(p, -n)
// to reduce the number of temporaries generated by the
// compiler for this trivial expression during inlining.
return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
}
// add1 returns the byte pointer p+1.
//
//go:nowritebarrier
//go:nosplit
func add1(p *byte) *byte {
// Note: wrote out full expression instead of calling addb(p, 1)
// to reduce the number of temporaries generated by the
// compiler for this trivial expression during inlining.
return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
}
// subtract1 returns the byte pointer p-1.
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nowritebarrier
//go:nosplit
func subtract1(p *byte) *byte {
// Note: wrote out full expression instead of calling subtractb(p, 1)
// to reduce the number of temporaries generated by the
// compiler for this trivial expression during inlining.
return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
}
// markBits provides access to the mark bit for an object in the heap.
// bytep points to the byte holding the mark bit.
// mask is a byte with a single bit set that can be &ed with *bytep
// to see if the bit has been set.
// *m.byte&m.mask != 0 indicates the mark bit is set.
// index can be used along with span information to generate
// the address of the object in the heap.
// We maintain one set of mark bits for allocation and one for
// marking purposes.
type markBits struct {
bytep *uint8
mask uint8
index uintptr
}
//go:nosplit
func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
bytep, mask := s.allocBits.bitp(allocBitIndex)
return markBits{bytep, mask, allocBitIndex}
}
// refillAllocCache takes 8 bytes s.allocBits starting at whichByte
// and negates them so that ctz (count trailing zeros) instructions
// can be used. It then places these 8 bytes into the cached 64 bit
// s.allocCache.
func (s *mspan) refillAllocCache(whichByte uint16) {
bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))
aCache := uint64(0)
aCache |= uint64(bytes[0])
aCache |= uint64(bytes[1]) << (1 * 8)
aCache |= uint64(bytes[2]) << (2 * 8)
aCache |= uint64(bytes[3]) << (3 * 8)
aCache |= uint64(bytes[4]) << (4 * 8)
aCache |= uint64(bytes[5]) << (5 * 8)
aCache |= uint64(bytes[6]) << (6 * 8)
aCache |= uint64(bytes[7]) << (7 * 8)
s.allocCache = ^aCache
}
// nextFreeIndex returns the index of the next free object in s at
// or after s.freeindex.
// There are hardware instructions that can be used to make this
// faster if profiling warrants it.
func (s *mspan) nextFreeIndex() uint16 {
sfreeindex := s.freeindex
snelems := s.nelems
if sfreeindex == snelems {
return sfreeindex
}
if sfreeindex > snelems {
throw("s.freeindex > s.nelems")
}
aCache := s.allocCache
bitIndex := sys.TrailingZeros64(aCache)
for bitIndex == 64 {
// Move index to start of next cached bits.
sfreeindex = (sfreeindex + 64) &^ (64 - 1)
if sfreeindex >= snelems {
s.freeindex = snelems
return snelems
}
whichByte := sfreeindex / 8
// Refill s.allocCache with the next 64 alloc bits.
s.refillAllocCache(whichByte)
aCache = s.allocCache
bitIndex = sys.TrailingZeros64(aCache)
// nothing available in cached bits
// grab the next 8 bytes and try again.
}
result := sfreeindex + uint16(bitIndex)
if result >= snelems {
s.freeindex = snelems
return snelems
}
s.allocCache >>= uint(bitIndex + 1)
sfreeindex = result + 1
if sfreeindex%64 == 0 && sfreeindex != snelems {
// We just incremented s.freeindex so it isn't 0.
// As each 1 in s.allocCache was encountered and used for allocation
// it was shifted away. At this point s.allocCache contains all 0s.
// Refill s.allocCache so that it corresponds
// to the bits at s.allocBits starting at s.freeindex.
whichByte := sfreeindex / 8
s.refillAllocCache(whichByte)
}
s.freeindex = sfreeindex
return result
}
// isFree reports whether the index'th object in s is unallocated.
//
// The caller must ensure s.state is mSpanInUse, and there must have
// been no preemption points since ensuring this (which could allow a
// GC transition, which would allow the state to change).
//
// Callers must ensure that the index passed here must not have been
// produced from a pointer that came from 'thin air', as might happen
// with conservative scanning.
func (s *mspan) isFree(index uintptr) bool {
if index < uintptr(s.freeindex) {
return false
}
bytep, mask := s.allocBits.bitp(index)
return *bytep&mask == 0
}
// isFreeOrNewlyAllocated reports whether the index'th object in s is
// either unallocated or has been allocated since the beginning of the
// last mark phase.
//
// The caller must ensure s.state is mSpanInUse, and there must have
// been no preemption points since ensuring this (which could allow a
// GC transition, which would allow the state to change).
//
// Callers must ensure that the index passed here must not have been
// produced from a pointer that came from 'thin air', as might happen
// with conservative scanning, unless the GC is currently in the mark
// phase. If the GC is currently in the mark phase, this function is
// safe to call for out-of-thin-air pointers.
func (s *mspan) isFreeOrNewlyAllocated(index uintptr) bool {
if index < uintptr(s.freeIndexForScan) {
return false
}
bytep, mask := s.allocBits.bitp(index)
return *bytep&mask == 0
}
// divideByElemSize returns n/s.elemsize.
// n must be within [0, s.npages*_PageSize),
// or may be exactly s.npages*_PageSize
// if s.elemsize is from sizeclasses.go.
//
// nosplit, because it is called by objIndex, which is nosplit
//
//go:nosplit
func (s *mspan) divideByElemSize(n uintptr) uintptr {
const doubleCheck = false
// See explanation in mksizeclasses.go's computeDivMagic.
q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
if doubleCheck && q != n/s.elemsize {
println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
throw("bad magic division")
}
return q
}
// nosplit, because it is called by other nosplit code like findObject
//
//go:nosplit
func (s *mspan) objIndex(p uintptr) uintptr {
return s.divideByElemSize(p - s.base())
}
func markBitsForAddr(p uintptr) markBits {
s := spanOf(p)
objIndex := s.objIndex(p)
return s.markBitsForIndex(objIndex)
}
// isMarked reports whether mark bit m is set.
func (m markBits) isMarked() bool {
return *m.bytep&m.mask != 0
}
// setMarked sets the marked bit in the markbits, atomically.
func (m markBits) setMarked() {
// Might be racing with other updates, so use atomic update always.
// We used to be clever here and use a non-atomic update in certain
// cases, but it's not worth the risk.
atomic.Or8(m.bytep, m.mask)
}
// setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
func (m markBits) setMarkedNonAtomic() {
*m.bytep |= m.mask
}
// clearMarked clears the marked bit in the markbits, atomically.
func (m markBits) clearMarked() {
// Might be racing with other updates, so use atomic update always.
// We used to be clever here and use a non-atomic update in certain
// cases, but it's not worth the risk.
atomic.And8(m.bytep, ^m.mask)
}
// markBitsForSpan returns the markBits for the span base address base.
func markBitsForSpan(base uintptr) (mbits markBits) {
mbits = markBitsForAddr(base)
if mbits.mask != 1 {
throw("markBitsForSpan: unaligned start")
}
return mbits
}
// isMarkedOrNotInHeap returns true if a pointer is in the heap and marked,
// or if the pointer is not in the heap. Used by goroutine leak detection
// to determine if concurrency resources are reachable in memory.
func isMarkedOrNotInHeap(p unsafe.Pointer) bool {
obj, span, objIndex := findObject(uintptr(p), 0, 0)
if obj != 0 {
mbits := span.markBitsForIndex(objIndex)
return mbits.isMarked()
}
// If we fall through to get here, the object is not in the heap.
// In this case, it is either a pointer to a stack object or a global resource.
// Treat it as reachable in memory by default, to be safe.
//
// TODO(vsaioc): we could be more precise by checking against the stacks
// of runnable goroutines. I don't think this is necessary, based on what we've seen, but
// let's keep the option open in case the runtime evolves.
// This will (naively) lead to quadratic blow-up for goroutine leak detection,
// but if it is only run on demand, maybe the extra cost is not a show-stopper.
return true
}
// advance advances the markBits to the next object in the span.
func (m *markBits) advance() {
if m.mask == 1<<7 {
m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
m.mask = 1
} else {
m.mask = m.mask << 1
}
m.index++
}
// clobberdeadPtr is a special value that is used by the compiler to
// clobber dead stack slots, when -clobberdead flag is set.
const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
// badPointer throws bad pointer in heap panic.
func badPointer(s *mspan, p, refBase, refOff uintptr) {
// Typically this indicates an incorrect use
// of unsafe or cgo to store a bad pointer in
// the Go heap. It may also indicate a runtime
// bug.
//
// TODO(austin): We could be more aggressive
// and detect pointers to unallocated objects
// in allocated spans.
printlock()
print("runtime: pointer ", hex(p))
if s != nil {
state := s.state.get()
if state != mSpanInUse {
print(" to unallocated span")
} else {
print(" to unused region of span")
}
print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
}
print("\n")
if refBase != 0 {
print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
gcDumpObject("object", refBase, refOff)
}
getg().m.traceback = 2
throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
}
// findObject returns the base address for the heap object containing
// the address p, the object's span, and the index of the object in s.
// If p does not point into a heap object, it returns base == 0.
//
// If p points is an invalid heap pointer and debug.invalidptr != 0,
// findObject panics.
//
// refBase and refOff optionally give the base address of the object
// in which the pointer p was found and the byte offset at which it
// was found. These are used for error reporting.
//
// It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
// Since p is a uintptr, it would not be adjusted if the stack were to move.
//
// findObject should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname findObject
//go:nosplit
func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
s = spanOf(p)
// If s is nil, the virtual address has never been part of the heap.
// This pointer may be to some mmap'd region, so we allow it.
if s == nil {
if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
// Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
// as they are the only platform where compiler's clobberdead mode is
// implemented. On these platforms clobberdeadPtr cannot be a valid address.
badPointer(s, p, refBase, refOff)
}
return
}
// If p is a bad pointer, it may not be in s's bounds.
//
// Check s.state to synchronize with span initialization
// before checking other fields. See also spanOfHeap.
if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
// Pointers into stacks are also ok, the runtime manages these explicitly.
if state == mSpanManual {
return
}
// The following ensures that we are rigorous about what data
// structures hold valid pointers.
if debug.invalidptr != 0 {
badPointer(s, p, refBase, refOff)
}
return
}
objIndex = s.objIndex(p)
base = s.base() + objIndex*s.elemsize
return
}
// reflect_verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok.
//
//go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr
func reflect_verifyNotInHeapPtr(p uintptr) bool {
// Conversion to a pointer is ok as long as findObject above does not call badPointer.
// Since we're already promised that p doesn't point into the heap, just disallow heap
// pointers and the special clobbered pointer.
return spanOf(p) == nil && p != clobberdeadPtr
}
const ptrBits = 8 * goarch.PtrSize
// bulkBarrierBitmap executes write barriers for copying from [src,
// src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
// assumed to start maskOffset bytes into the data covered by the
// bitmap in bits (which may not be a multiple of 8).
//
// This is used by bulkBarrierPreWrite for writes to data and BSS.
//
//go:nosplit
func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
word := maskOffset / goarch.PtrSize
bits = addb(bits, word/8)
mask := uint8(1) << (word % 8)
buf := &getg().m.p.ptr().wbBuf
for i := uintptr(0); i < size; i += goarch.PtrSize {
if mask == 0 {
bits = addb(bits, 1)
if *bits == 0 {
// Skip 8 words.
i += 7 * goarch.PtrSize
continue
}
mask = 1
}
if *bits&mask != 0 {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
if src == 0 {
p := buf.get1()
p[0] = *dstx
} else {
srcx := (*uintptr)(unsafe.Pointer(src + i))
p := buf.get2()
p[0] = *dstx
p[1] = *srcx
}
}
mask <<= 1
}
}
// typeBitsBulkBarrier executes a write barrier for every
// pointer that would be copied from [src, src+size) to [dst,
// dst+size) by a memmove using the type bitmap to locate those
// pointer slots.
//
// The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
// dst, src, and size must be pointer-aligned.
//
// Must not be preempted because it typically runs right before memmove,
// and the GC must observe them as an atomic action.
//
// Callers must perform cgo checks if goexperiment.CgoCheck2.
//
//go:nosplit
func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
if typ == nil {
throw("runtime: typeBitsBulkBarrier without type")
}
if typ.Size_ != size {
println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
throw("runtime: invalid typeBitsBulkBarrier")
}
if !writeBarrier.enabled {
return
}
ptrmask := getGCMask(typ)
buf := &getg().m.p.ptr().wbBuf
var bits uint32
for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
if i&(goarch.PtrSize*8-1) == 0 {
bits = uint32(*ptrmask)
ptrmask = addb(ptrmask, 1)
} else {
bits = bits >> 1
}
if bits&1 != 0 {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
srcx := (*uintptr)(unsafe.Pointer(src + i))
p := buf.get2()
p[0] = *dstx
p[1] = *srcx
}
}
}
// countAlloc returns the number of objects allocated in span s by
// scanning the mark bitmap.
func (s *mspan) countAlloc() int {
count := 0
bytes := divRoundUp(uintptr(s.nelems), 8)
// Iterate over each 8-byte chunk and count allocations
// with an intrinsic. Note that newMarkBits guarantees that
// gcmarkBits will be 8-byte aligned, so we don't have to
// worry about edge cases, irrelevant bits will simply be zero.
for i := uintptr(0); i < bytes; i += 8 {
// Extract 64 bits from the byte pointer and get a OnesCount.
// Note that the unsafe cast here doesn't preserve endianness,
// but that's OK. We only care about how many bits are 1, not
// about the order we discover them in.
mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
count += sys.OnesCount64(mrkBits)
}
return count
}
// Read the bytes starting at the aligned pointer p into a uintptr.
// Read is little-endian.
func readUintptr(p *byte) uintptr {
x := *(*uintptr)(unsafe.Pointer(p))
if goarch.BigEndian {
if goarch.PtrSize == 8 {
return uintptr(sys.Bswap64(uint64(x)))
}
return uintptr(sys.Bswap32(uint32(x)))
}
return x
}
var debugPtrmask struct {
lock mutex
data *byte
}
// progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
// size the size of the region described by prog, in bytes.
// The resulting bitvector will have no more than size/goarch.PtrSize bits.
func progToPointerMask(prog *byte, size uintptr) bitvector {
n := (size/goarch.PtrSize + 7) / 8
x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
x[len(x)-1] = 0xa1 // overflow check sentinel
n = runGCProg(prog, &x[0])
if x[len(x)-1] != 0xa1 {
throw("progToPointerMask: overflow")
}
return bitvector{int32(n), &x[0]}
}
// Packed GC pointer bitmaps, aka GC programs.
//
// For large types containing arrays, the type information has a
// natural repetition that can be encoded to save space in the
// binary and in the memory representation of the type information.
//
// The encoding is a simple Lempel-Ziv style bytecode machine
// with the following instructions:
//
// 00000000: stop
// 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
// 10000000 n c: repeat the previous n bits c times; n, c are varints
// 1nnnnnnn c: repeat the previous n bits c times; c is a varint
//
// Currently, gc programs are only used for describing data and bss
// sections of the binary.
// runGCProg returns the number of 1-bit entries written to memory.
func runGCProg(prog, dst *byte) uintptr {
dstStart := dst
// Bits waiting to be written to memory.
var bits uintptr
var nbits uintptr
p := prog
Run:
for {
// Flush accumulated full bytes.
// The rest of the loop assumes that nbits <= 7.
for ; nbits >= 8; nbits -= 8 {
*dst = uint8(bits)
dst = add1(dst)
bits >>= 8
}
// Process one instruction.
inst := uintptr(*p)
p = add1(p)
n := inst & 0x7F
if inst&0x80 == 0 {
// Literal bits; n == 0 means end of program.
if n == 0 {
// Program is over.
break Run
}
nbyte := n / 8
for i := uintptr(0); i < nbyte; i++ {
bits |= uintptr(*p) << nbits
p = add1(p)
*dst = uint8(bits)
dst = add1(dst)
bits >>= 8
}
if n %= 8; n > 0 {
bits |= uintptr(*p) << nbits
p = add1(p)
nbits += n
}
continue Run
}
// Repeat. If n == 0, it is encoded in a varint in the next bytes.
if n == 0 {
for off := uint(0); ; off += 7 {
x := uintptr(*p)
p = add1(p)
n |= (x & 0x7F) << off
if x&0x80 == 0 {
break
}
}
}
// Count is encoded in a varint in the next bytes.
c := uintptr(0)
for off := uint(0); ; off += 7 {
x := uintptr(*p)
p = add1(p)
c |= (x & 0x7F) << off
if x&0x80 == 0 {
break
}
}
c *= n // now total number of bits to copy
// If the number of bits being repeated is small, load them
// into a register and use that register for the entire loop
// instead of repeatedly reading from memory.
// Handling fewer than 8 bits here makes the general loop simpler.
// The cutoff is goarch.PtrSize*8 - 7 to guarantee that when we add
// the pattern to a bit buffer holding at most 7 bits (a partial byte)
// it will not overflow.
src := dst
const maxBits = goarch.PtrSize*8 - 7
if n <= maxBits {
// Start with bits in output buffer.
pattern := bits
npattern := nbits
// If we need more bits, fetch them from memory.
src = subtract1(src)
for npattern < n {
pattern <<= 8
pattern |= uintptr(*src)
src = subtract1(src)
npattern += 8
}
// We started with the whole bit output buffer,
// and then we loaded bits from whole bytes.
// Either way, we might now have too many instead of too few.
// Discard the extra.
if npattern > n {
pattern >>= npattern - n
npattern = n
}
// Replicate pattern to at most maxBits.
if npattern == 1 {
// One bit being repeated.
// If the bit is 1, make the pattern all 1s.
// If the bit is 0, the pattern is already all 0s,
// but we can claim that the number of bits
// in the word is equal to the number we need (c),
// because right shift of bits will zero fill.
if pattern == 1 {
pattern = 1<<maxBits - 1
npattern = maxBits
} else {
npattern = c
}
} else {
b := pattern
nb := npattern
if nb+nb <= maxBits {
// Double pattern until the whole uintptr is filled.
for nb <= goarch.PtrSize*8 {
b |= b << nb
nb += nb
}
// Trim away incomplete copy of original pattern in high bits.
// TODO(rsc): Replace with table lookup or loop on systems without divide?
nb = maxBits / npattern * npattern
b &= 1<<nb - 1
pattern = b
npattern = nb
}
}
// Add pattern to bit buffer and flush bit buffer, c/npattern times.
// Since pattern contains >8 bits, there will be full bytes to flush
// on each iteration.
for ; c >= npattern; c -= npattern {
bits |= pattern << nbits
nbits += npattern
for nbits >= 8 {
*dst = uint8(bits)
dst = add1(dst)
bits >>= 8
nbits -= 8
}
}
// Add final fragment to bit buffer.
if c > 0 {
pattern &= 1<<c - 1
bits |= pattern << nbits
nbits += c
}
continue Run
}
// Repeat; n too large to fit in a register.
// Since nbits <= 7, we know the first few bytes of repeated data
// are already written to memory.
off := n - nbits // n > nbits because n > maxBits and nbits <= 7
// Leading src fragment.
src = subtractb(src, (off+7)/8)
if frag := off & 7; frag != 0 {
bits |= uintptr(*src) >> (8 - frag) << nbits
src = add1(src)
nbits += frag
c -= frag
}
// Main loop: load one byte, write another.
// The bits are rotating through the bit buffer.
for i := c / 8; i > 0; i-- {
bits |= uintptr(*src) << nbits
src = add1(src)
*dst = uint8(bits)
dst = add1(dst)
bits >>= 8
}
// Final src fragment.
if c %= 8; c > 0 {
bits |= (uintptr(*src) & (1<<c - 1)) << nbits
nbits += c
}
}
// Write any final bits out, using full-byte writes, even for the final byte.
totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
nbits += -nbits & 7
for ; nbits > 0; nbits -= 8 {
*dst = uint8(bits)
dst = add1(dst)
bits >>= 8
}
return totalBits
}
func dumpGCProg(p *byte) {
nptr := 0
for {
x := *p
p = add1(p)
if x == 0 {
print("\t", nptr, " end\n")
break
}
if x&0x80 == 0 {
print("\t", nptr, " lit ", x, ":")
n := int(x+7) / 8
for i := 0; i < n; i++ {
print(" ", hex(*p))
p = add1(p)
}
print("\n")
nptr += int(x)
} else {
nbit := int(x &^ 0x80)
if nbit == 0 {
for nb := uint(0); ; nb += 7 {
x := *p
p = add1(p)
nbit |= int(x&0x7f) << nb
if x&0x80 == 0 {
break
}
}
}
count := 0
for nb := uint(0); ; nb += 7 {
x := *p
p = add1(p)
count |= int(x&0x7f) << nb
if x&0x80 == 0 {
break
}
}
print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
nptr += nbit * count
}
}
}
// Testing.
// reflect_gcbits returns the GC type info for x, for testing.
// The result is the bitmap entries (0 or 1), one entry per byte.
//
//go:linkname reflect_gcbits reflect.gcbits
func reflect_gcbits(x any) []byte {
return pointerMask(x)
}
// Returns GC type info for the pointer stored in ep for testing.
// If ep points to the stack, only static live information will be returned
// (i.e. not for objects which are only dynamically live stack objects).
func pointerMask(ep any) (mask []byte) {
e := *efaceOf(&ep)
p := e.data
t := e._type
var et *_type
if t.Kind() != abi.Pointer {
throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried")
}
et = (*ptrtype)(unsafe.Pointer(t)).Elem
// data or bss
for _, datap := range activeModules() {
// data
if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
bitmap := datap.gcdatamask.bytedata
n := et.Size_
mask = make([]byte, n/goarch.PtrSize)
for i := uintptr(0); i < n; i += goarch.PtrSize {
off := (uintptr(p) + i - datap.data) / goarch.PtrSize
mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
}
return
}
// bss
if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
bitmap := datap.gcbssmask.bytedata
n := et.Size_
mask = make([]byte, n/goarch.PtrSize)
for i := uintptr(0); i < n; i += goarch.PtrSize {
off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
}
return
}
}
// heap
if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
if s.spanclass.noscan() {
return nil
}
limit := base + s.elemsize
// Move the base up to the iterator's start, because
// we want to hide evidence of a malloc header from the
// caller.
tp := s.typePointersOfUnchecked(base)
base = tp.addr
// Unroll the full bitmap the GC would actually observe.
maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize)
for {
var addr uintptr
if tp, addr = tp.next(limit); addr == 0 {
break
}
maskFromHeap[(addr-base)/goarch.PtrSize] = 1
}
// Double-check that every part of the ptr/scalar we're not
// showing the caller is zeroed. This keeps us honest that
// that information is actually irrelevant.
for i := limit; i < s.elemsize; i++ {
if *(*byte)(unsafe.Pointer(i)) != 0 {
throw("found non-zeroed tail of allocation")
}
}
// Callers (and a check we're about to run) expects this mask
// to end at the last pointer.
for len(maskFromHeap) > 0 && maskFromHeap[len(maskFromHeap)-1] == 0 {
maskFromHeap = maskFromHeap[:len(maskFromHeap)-1]
}
// Unroll again, but this time from the type information.
maskFromType := make([]byte, (limit-base)/goarch.PtrSize)
tp = s.typePointersOfType(et, base)
for {
var addr uintptr
if tp, addr = tp.next(limit); addr == 0 {
break
}
maskFromType[(addr-base)/goarch.PtrSize] = 1
}
// Validate that the prefix of maskFromType is equal to
// maskFromHeap. maskFromType may contain more pointers than
// maskFromHeap produces because maskFromHeap may be able to
// get exact type information for certain classes of objects.
// With maskFromType, we're always just tiling the type bitmap
// through to the elemsize.
//
// It's OK if maskFromType has pointers in elemsize that extend
// past the actual populated space; we checked above that all
// that space is zeroed, so just the GC will just see nil pointers.
differs := false
for i := range maskFromHeap {
if maskFromHeap[i] != maskFromType[i] {
differs = true
break
}
}
if differs {
print("runtime: heap mask=")
for _, b := range maskFromHeap {
print(b)
}
println()
print("runtime: type mask=")
for _, b := range maskFromType {
print(b)
}
println()
print("runtime: type=", toRType(et).string(), "\n")
throw("found two different masks from two different methods")
}
// Select the heap mask to return. We may not have a type mask.
mask = maskFromHeap
// Make sure we keep ep alive. We may have stopped referencing
// ep's data pointer sometime before this point and it's possible
// for that memory to get freed.
KeepAlive(ep)
return
}
// stack
if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
found := false
var u unwinder
for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
found = true
break
}
}
if found {
locals, _, _ := u.frame.getStackMap(false)
if locals.n == 0 {
return
}
size := uintptr(locals.n) * goarch.PtrSize
n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
mask = make([]byte, n/goarch.PtrSize)
for i := uintptr(0); i < n; i += goarch.PtrSize {
off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
mask[i/goarch.PtrSize] = locals.ptrbit(off)
}
}
return
}
// otherwise, not something the GC knows about.
// possibly read-only data, like malloc(0).
// must not have pointers
return
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/runtime/atomic"
"internal/runtime/gc"
"internal/runtime/sys"
"unsafe"
)
// Per-thread (in Go, per-P) cache for small objects.
// This includes a small object cache and local allocation stats.
// No locking needed because it is per-thread (per-P).
//
// mcaches are allocated from non-GC'd memory, so any heap pointers
// must be specially handled.
type mcache struct {
_ sys.NotInHeap
// The following members are accessed on every malloc,
// so they are grouped here for better caching.
nextSample int64 // trigger heap sample after allocating this many bytes
memProfRate int // cached mem profile rate, used to detect changes
scanAlloc uintptr // bytes of scannable heap allocated
// Allocator cache for tiny objects w/o pointers.
// See "Tiny allocator" comment in malloc.go.
// tiny points to the beginning of the current tiny block, or
// nil if there is no current tiny block.
//
// tiny is a heap pointer. Since mcache is in non-GC'd memory,
// we handle it by clearing it in releaseAll during mark
// termination.
//
// tinyAllocs is the number of tiny allocations performed
// by the P that owns this mcache.
tiny uintptr
tinyoffset uintptr
tinyAllocs uintptr
// The rest is not accessed on every malloc.
// alloc contains spans to allocate from, indexed by spanClass.
alloc [numSpanClasses]*mspan
// TODO(thepudds): better to interleave alloc and reusableScan/reusableNoscan so that
// a single malloc call can often access both in the same cache line for a given spanClass.
// It's not interleaved right now in part to have slightly smaller diff, and might be
// negligible effect on current microbenchmarks.
// reusableNoscan contains linked lists of reusable noscan heap objects, indexed by spanClass.
// The next pointers are stored in the first word of the heap objects.
reusableNoscan [numSpanClasses]gclinkptr
stackcache [_NumStackOrders]stackfreelist
// flushGen indicates the sweepgen during which this mcache
// was last flushed. If flushGen != mheap_.sweepgen, the spans
// in this mcache are stale and need to be flushed so they
// can be swept. This is done in acquirep.
flushGen atomic.Uint32
}
// A gclink is a node in a linked list of blocks, like mlink,
// but it is opaque to the garbage collector.
// The GC does not trace the pointers during collection,
// and the compiler does not emit write barriers for assignments
// of gclinkptr values. Code should store references to gclinks
// as gclinkptr, not as *gclink.
type gclink struct {
next gclinkptr
}
// A gclinkptr is a pointer to a gclink, but it is opaque
// to the garbage collector.
type gclinkptr uintptr
// ptr returns the *gclink form of p.
// The result should be used for accessing fields, not stored
// in other data structures.
func (p gclinkptr) ptr() *gclink {
return (*gclink)(unsafe.Pointer(p))
}
type stackfreelist struct {
list gclinkptr // linked list of free stacks
size uintptr // total size of stacks in list
}
// dummy mspan that contains no free objects.
var emptymspan mspan
func allocmcache() *mcache {
var c *mcache
systemstack(func() {
lock(&mheap_.lock)
c = (*mcache)(mheap_.cachealloc.alloc())
c.flushGen.Store(mheap_.sweepgen)
unlock(&mheap_.lock)
})
for i := range c.alloc {
c.alloc[i] = &emptymspan
}
c.nextSample = nextSample()
return c
}
// freemcache releases resources associated with this
// mcache and puts the object onto a free list.
//
// In some cases there is no way to simply release
// resources, such as statistics, so donate them to
// a different mcache (the recipient).
func freemcache(c *mcache) {
systemstack(func() {
c.releaseAll()
stackcache_clear(c)
// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
// with the stealing of gcworkbufs during garbage collection to avoid
// a race where the workbuf is double-freed.
// gcworkbuffree(c.gcworkbuf)
lock(&mheap_.lock)
mheap_.cachealloc.free(unsafe.Pointer(c))
unlock(&mheap_.lock)
})
}
// getMCache is a convenience function which tries to obtain an mcache.
//
// Returns nil if we're not bootstrapping or we don't have a P. The caller's
// P must not change, so we must be in a non-preemptible state.
func getMCache(mp *m) *mcache {
// Grab the mcache, since that's where stats live.
pp := mp.p.ptr()
var c *mcache
if pp == nil {
// We will be called without a P while bootstrapping,
// in which case we use mcache0, which is set in mallocinit.
// mcache0 is cleared when bootstrapping is complete,
// by procresize.
c = mcache0
} else {
c = pp.mcache
}
return c
}
// refill acquires a new span of span class spc for c. This span will
// have at least one free object. The current span in c must be full.
//
// Must run in a non-preemptible context since otherwise the owner of
// c could change.
func (c *mcache) refill(spc spanClass) {
// Return the current cached span to the central lists.
s := c.alloc[spc]
if s.allocCount != s.nelems {
throw("refill of span with free space remaining")
}
// TODO(thepudds): we might be able to allow mallocgcTiny to reuse 16 byte objects from spc==5,
// but for now, just clear our reusable objects for tinySpanClass.
if spc == tinySpanClass {
c.reusableNoscan[spc] = 0
}
if c.reusableNoscan[spc] != 0 {
throw("refill of span with reusable pointers remaining on pointer free list")
}
if s != &emptymspan {
// Mark this span as no longer cached.
if s.sweepgen != mheap_.sweepgen+3 {
throw("bad sweepgen in refill")
}
mheap_.central[spc].mcentral.uncacheSpan(s)
// Count up how many slots were used and record it.
stats := memstats.heapStats.acquire()
slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache)
atomic.Xadd64(&stats.smallAllocCount[spc.sizeclass()], slotsUsed)
// Flush tinyAllocs.
if spc == tinySpanClass {
atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
c.tinyAllocs = 0
}
memstats.heapStats.release()
// Count the allocs in inconsistent, internal stats.
bytesAllocated := slotsUsed * int64(s.elemsize)
gcController.totalAlloc.Add(bytesAllocated)
// Clear the second allocCount just to be safe.
s.allocCountBeforeCache = 0
}
// Get a new cached span from the central lists.
s = mheap_.central[spc].mcentral.cacheSpan()
if s == nil {
throw("out of memory")
}
if s.allocCount == s.nelems {
throw("span has no free space")
}
// Indicate that this span is cached and prevent asynchronous
// sweeping in the next sweep phase.
s.sweepgen = mheap_.sweepgen + 3
// Store the current alloc count for accounting later.
s.allocCountBeforeCache = s.allocCount
// Update heapLive and flush scanAlloc.
//
// We have not yet allocated anything new into the span, but we
// assume that all of its slots will get used, so this makes
// heapLive an overestimate.
//
// When the span gets uncached, we'll fix up this overestimate
// if necessary (see releaseAll).
//
// We pick an overestimate here because an underestimate leads
// the pacer to believe that it's in better shape than it is,
// which appears to lead to more memory used. See #53738 for
// more details.
usedBytes := uintptr(s.allocCount) * s.elemsize
gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
c.scanAlloc = 0
c.alloc[spc] = s
}
// allocLarge allocates a span for a large object.
func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan {
if size+pageSize < size {
throw("out of memory")
}
npages := size >> gc.PageShift
if size&pageMask != 0 {
npages++
}
// Deduct credit for this span allocation and sweep if
// necessary. mHeap_Alloc will also sweep npages, so this only
// pays the debt down to npage pages.
deductSweepCredit(npages*pageSize, npages)
spc := makeSpanClass(0, noscan)
s := mheap_.alloc(npages, spc)
if s == nil {
throw("out of memory")
}
// Count the alloc in consistent, external stats.
stats := memstats.heapStats.acquire()
atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize))
atomic.Xadd64(&stats.largeAllocCount, 1)
memstats.heapStats.release()
// Count the alloc in inconsistent, internal stats.
gcController.totalAlloc.Add(int64(npages * pageSize))
// Update heapLive.
gcController.update(int64(s.npages*pageSize), 0)
// Put the large span in the mcentral swept list so that it's
// visible to the background sweeper.
mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
// Adjust s.limit down to the object-containing part of the span.
//
// This is just to create a slightly tighter bound on the limit.
// It's totally OK if the garbage collector, in particular
// conservative scanning, can temporarily observes an inflated
// limit. It will simply mark the whole object or just skip it
// since we're in the mark phase anyway.
s.limit = s.base() + size
s.initHeapBits()
return s
}
func (c *mcache) releaseAll() {
// Take this opportunity to flush scanAlloc.
scanAlloc := int64(c.scanAlloc)
c.scanAlloc = 0
sg := mheap_.sweepgen
dHeapLive := int64(0)
for i := range c.alloc {
s := c.alloc[i]
if s != &emptymspan {
slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache)
s.allocCountBeforeCache = 0
// Adjust smallAllocCount for whatever was allocated.
stats := memstats.heapStats.acquire()
atomic.Xadd64(&stats.smallAllocCount[spanClass(i).sizeclass()], slotsUsed)
memstats.heapStats.release()
// Adjust the actual allocs in inconsistent, internal stats.
// We assumed earlier that the full span gets allocated.
gcController.totalAlloc.Add(slotsUsed * int64(s.elemsize))
if s.sweepgen != sg+1 {
// refill conservatively counted unallocated slots in gcController.heapLive.
// Undo this.
//
// If this span was cached before sweep, then gcController.heapLive was totally
// recomputed since caching this span, so we don't do this for stale spans.
dHeapLive -= int64(s.nelems-s.allocCount) * int64(s.elemsize)
}
// Release the span to the mcentral.
mheap_.central[i].mcentral.uncacheSpan(s)
c.alloc[i] = &emptymspan
}
}
// Clear tinyalloc pool.
c.tiny = 0
c.tinyoffset = 0
// Flush tinyAllocs.
stats := memstats.heapStats.acquire()
atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
c.tinyAllocs = 0
memstats.heapStats.release()
// Clear the reusable linked lists.
// For noscan objects, the nodes of the linked lists are the reusable heap objects themselves,
// so we can simply clear the linked list head pointers.
// TODO(thepudds): consider having debug logging of a non-empty reusable lists getting cleared,
// maybe based on the existing debugReusableLog.
clear(c.reusableNoscan[:])
// Update heapLive and heapScan.
gcController.update(dHeapLive, scanAlloc)
}
// prepareForSweep flushes c if the system has entered a new sweep phase
// since c was populated. This must happen between the sweep phase
// starting and the first allocation from c.
func (c *mcache) prepareForSweep() {
// Alternatively, instead of making sure we do this on every P
// between starting the world and allocating on that P, we
// could leave allocate-black on, allow allocation to continue
// as usual, use a ragged barrier at the beginning of sweep to
// ensure all cached spans are swept, and then disable
// allocate-black. However, with this approach it's difficult
// to avoid spilling mark bits into the *next* GC cycle.
sg := mheap_.sweepgen
flushGen := c.flushGen.Load()
if flushGen == sg {
return
} else if flushGen != sg-2 {
println("bad flushGen", flushGen, "in prepareForSweep; sweepgen", sg)
throw("bad flushGen")
}
c.releaseAll()
stackcache_clear(c)
c.flushGen.Store(mheap_.sweepgen) // Synchronizes with gcStart
}
// addReusableNoscan adds a noscan object pointer to the reusable pointer free list
// for a span class.
func (c *mcache) addReusableNoscan(spc spanClass, ptr uintptr) {
if !runtimeFreegcEnabled {
return
}
// Add to the reusable pointers free list.
v := gclinkptr(ptr)
v.ptr().next = c.reusableNoscan[spc]
c.reusableNoscan[spc] = v
}
// hasReusableNoscan reports whether there is a reusable object available for
// a noscan spc.
func (c *mcache) hasReusableNoscan(spc spanClass) bool {
if !runtimeFreegcEnabled {
return false
}
return c.reusableNoscan[spc] != 0
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Central free lists.
//
// See malloc.go for an overview.
//
// The mcentral doesn't actually contain the list of free objects; the mspan does.
// Each mcentral is two lists of mspans: those with free objects (c->nonempty)
// and those that are completely allocated (c->empty).
package runtime
import (
"internal/runtime/atomic"
"internal/runtime/gc"
"internal/runtime/sys"
)
// Central list of free objects of a given size.
type mcentral struct {
_ sys.NotInHeap
spanclass spanClass
// partial and full contain two mspan sets: one of swept in-use
// spans, and one of unswept in-use spans. These two trade
// roles on each GC cycle. The unswept set is drained either by
// allocation or by the background sweeper in every GC cycle,
// so only two roles are necessary.
//
// sweepgen is increased by 2 on each GC cycle, so the swept
// spans are in partial[sweepgen/2%2] and the unswept spans are in
// partial[1-sweepgen/2%2]. Sweeping pops spans from the
// unswept set and pushes spans that are still in-use on the
// swept set. Likewise, allocating an in-use span pushes it
// on the swept set.
//
// Some parts of the sweeper can sweep arbitrary spans, and hence
// can't remove them from the unswept set, but will add the span
// to the appropriate swept list. As a result, the parts of the
// sweeper and mcentral that do consume from the unswept list may
// encounter swept spans, and these should be ignored.
partial [2]spanSet // list of spans with a free object
full [2]spanSet // list of spans with no free objects
}
// Initialize a single central free list.
func (c *mcentral) init(spc spanClass) {
c.spanclass = spc
lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine)
lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine)
lockInit(&c.full[0].spineLock, lockRankSpanSetSpine)
lockInit(&c.full[1].spineLock, lockRankSpanSetSpine)
}
// partialUnswept returns the spanSet which holds partially-filled
// unswept spans for this sweepgen.
func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet {
return &c.partial[1-sweepgen/2%2]
}
// partialSwept returns the spanSet which holds partially-filled
// swept spans for this sweepgen.
func (c *mcentral) partialSwept(sweepgen uint32) *spanSet {
return &c.partial[sweepgen/2%2]
}
// fullUnswept returns the spanSet which holds unswept spans without any
// free slots for this sweepgen.
func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet {
return &c.full[1-sweepgen/2%2]
}
// fullSwept returns the spanSet which holds swept spans without any
// free slots for this sweepgen.
func (c *mcentral) fullSwept(sweepgen uint32) *spanSet {
return &c.full[sweepgen/2%2]
}
// Allocate a span to use in an mcache.
func (c *mcentral) cacheSpan() *mspan {
// Deduct credit for this span allocation and sweep if necessary.
spanBytes := uintptr(gc.SizeClassToNPages[c.spanclass.sizeclass()]) * pageSize
deductSweepCredit(spanBytes, 0)
traceDone := false
trace := traceAcquire()
if trace.ok() {
trace.GCSweepStart()
traceRelease(trace)
}
// If we sweep spanBudget spans without finding any free
// space, just allocate a fresh span. This limits the amount
// of time we can spend trying to find free space and
// amortizes the cost of small object sweeping over the
// benefit of having a full free span to allocate from. By
// setting this to 100, we limit the space overhead to 1%.
//
// TODO(austin,mknyszek): This still has bad worst-case
// throughput. For example, this could find just one free slot
// on the 100th swept span. That limits allocation latency, but
// still has very poor throughput. We could instead keep a
// running free-to-used budget and switch to fresh span
// allocation if the budget runs low.
spanBudget := 100
var s *mspan
var sl sweepLocker
// Try partial swept spans first.
sg := mheap_.sweepgen
if s = c.partialSwept(sg).pop(); s != nil {
goto havespan
}
sl = sweep.active.begin()
if sl.valid {
// Now try partial unswept spans.
for ; spanBudget >= 0; spanBudget-- {
s = c.partialUnswept(sg).pop()
if s == nil {
break
}
if s, ok := sl.tryAcquire(s); ok {
// We got ownership of the span, so let's sweep it and use it.
s.sweep(true)
sweep.active.end(sl)
goto havespan
}
// We failed to get ownership of the span, which means it's being or
// has been swept by an asynchronous sweeper that just couldn't remove it
// from the unswept list. That sweeper took ownership of the span and
// responsibility for either freeing it to the heap or putting it on the
// right swept list. Either way, we should just ignore it (and it's unsafe
// for us to do anything else).
}
// Now try full unswept spans, sweeping them and putting them into the
// right list if we fail to get a span.
for ; spanBudget >= 0; spanBudget-- {
s = c.fullUnswept(sg).pop()
if s == nil {
break
}
if s, ok := sl.tryAcquire(s); ok {
// We got ownership of the span, so let's sweep it.
s.sweep(true)
// Check if there's any free space.
freeIndex := s.nextFreeIndex()
if freeIndex != s.nelems {
s.freeindex = freeIndex
sweep.active.end(sl)
goto havespan
}
// Add it to the swept list, because sweeping didn't give us any free space.
c.fullSwept(sg).push(s.mspan)
}
// See comment for partial unswept spans.
}
sweep.active.end(sl)
}
trace = traceAcquire()
if trace.ok() {
trace.GCSweepDone()
traceDone = true
traceRelease(trace)
}
// We failed to get a span from the mcentral so get one from mheap.
s = c.grow()
if s == nil {
return nil
}
// At this point s is a span that should have free slots.
havespan:
if !traceDone {
trace := traceAcquire()
if trace.ok() {
trace.GCSweepDone()
traceRelease(trace)
}
}
n := int(s.nelems) - int(s.allocCount)
if n == 0 || s.freeindex == s.nelems || s.allocCount == s.nelems {
throw("span has no free objects")
}
freeByteBase := s.freeindex &^ (64 - 1)
whichByte := freeByteBase / 8
// Init alloc bits cache.
s.refillAllocCache(whichByte)
// Adjust the allocCache so that s.freeindex corresponds to the low bit in
// s.allocCache.
s.allocCache >>= s.freeindex % 64
return s
}
// Return span from an mcache.
//
// s must have a span class corresponding to this
// mcentral and it must not be empty.
func (c *mcentral) uncacheSpan(s *mspan) {
if s.allocCount == 0 {
throw("uncaching span but s.allocCount == 0")
}
sg := mheap_.sweepgen
stale := s.sweepgen == sg+1
// Fix up sweepgen.
if stale {
// Span was cached before sweep began. It's our
// responsibility to sweep it.
//
// Set sweepgen to indicate it's not cached but needs
// sweeping and can't be allocated from. sweep will
// set s.sweepgen to indicate s is swept.
atomic.Store(&s.sweepgen, sg-1)
} else {
// Indicate that s is no longer cached.
atomic.Store(&s.sweepgen, sg)
}
// Put the span in the appropriate place.
if stale {
// It's stale, so just sweep it. Sweeping will put it on
// the right list.
//
// We don't use a sweepLocker here. Stale cached spans
// aren't in the global sweep lists, so mark termination
// itself holds up sweep completion until all mcaches
// have been swept.
ss := sweepLocked{s}
ss.sweep(false)
} else {
if int(s.nelems)-int(s.allocCount) > 0 {
// Put it back on the partial swept list.
c.partialSwept(sg).push(s)
} else {
// There's no free space and it's not stale, so put it on the
// full swept list.
c.fullSwept(sg).push(s)
}
}
}
// grow allocates a new empty span from the heap and initializes it for c's size class.
func (c *mcentral) grow() *mspan {
npages := uintptr(gc.SizeClassToNPages[c.spanclass.sizeclass()])
s := mheap_.alloc(npages, c.spanclass)
if s == nil {
return nil
}
s.initHeapBits()
return s
}
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// GC checkmarks
//
// In a concurrent garbage collector, one worries about failing to mark
// a live object due to mutations without write barriers or bugs in the
// collector implementation. As a sanity check, the GC has a 'checkmark'
// mode that retraverses the object graph with the world stopped, to make
// sure that everything that should be marked is marked.
package runtime
import (
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
// A checkmarksMap stores the GC marks in "checkmarks" mode. It is a
// per-arena bitmap with a bit for every word in the arena. The mark
// is stored on the bit corresponding to the first word of the marked
// allocation.
type checkmarksMap struct {
_ sys.NotInHeap
b [heapArenaBytes / goarch.PtrSize / 8]uint8
}
// If useCheckmark is true, marking of an object uses the checkmark
// bits instead of the standard mark bits.
var useCheckmark = false
// startCheckmarks prepares for the checkmarks phase.
//
// The world must be stopped.
func startCheckmarks() {
assertWorldStopped()
// Clear all checkmarks.
clearCheckmarks := func(ai arenaIdx) {
arena := mheap_.arenas[ai.l1()][ai.l2()]
bitmap := arena.checkmarks
if bitmap == nil {
// Allocate bitmap on first use.
bitmap = (*checkmarksMap)(persistentalloc(unsafe.Sizeof(*bitmap), 0, &memstats.gcMiscSys))
if bitmap == nil {
throw("out of memory allocating checkmarks bitmap")
}
arena.checkmarks = bitmap
} else {
// Otherwise clear the existing bitmap.
clear(bitmap.b[:])
}
}
for _, ai := range mheap_.heapArenas {
clearCheckmarks(ai)
}
for _, ai := range mheap_.userArenaArenas {
clearCheckmarks(ai)
}
// Enable checkmarking.
useCheckmark = true
}
// endCheckmarks ends the checkmarks phase.
func endCheckmarks() {
if !gcIsMarkDone() {
throw("GC work not flushed")
}
useCheckmark = false
}
// setCheckmark throws if marking object is a checkmarks violation,
// and otherwise sets obj's checkmark. It returns true if obj was
// already checkmarked.
func setCheckmark(obj, base, off uintptr, mbits markBits) bool {
if !mbits.isMarked() {
printlock()
print("runtime: checkmarks found unexpected unmarked object obj=", hex(obj), "\n")
print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
// Dump the source (base) object
gcDumpObject("base", base, off)
// Dump the object
gcDumpObject("obj", obj, ^uintptr(0))
getg().m.traceback = 2
throw("checkmark found unmarked object")
}
bytep, mask := getCheckmark(obj)
if bytep == nil {
return false
}
if atomic.Load8(bytep)&mask != 0 {
// Already checkmarked.
return true
}
atomic.Or8(bytep, mask)
return false
}
func getCheckmark(obj uintptr) (bytep *byte, mask uint8) {
ai := arenaIndex(obj)
arena := mheap_.arenas[ai.l1()][ai.l2()]
if arena == nil {
// Non-heap pointer.
return nil, 0
}
wordIdx := (obj - alignDown(obj, heapArenaBytes)) / goarch.PtrSize
arenaWord := wordIdx / 8
mask = byte(1 << (wordIdx % 8))
bytep = &arena.checkmarks.b[arenaWord]
return bytep, mask
}
// runCheckmark runs a full non-parallel, stop-the-world mark using
// checkmark bits, to check that we didn't forget to mark anything
// during the concurrent mark process.
//
// The world must be stopped to call runCheckmark.
func runCheckmark(prepareRootSet func(*gcWork)) {
assertWorldStopped()
// Turn off gcwaiting because that will force
// gcDrain to return early if this goroutine
// happens to have its preemption flag set.
// This is fine because the world is stopped.
// Restore it after we're done just to be safe.
sched.gcwaiting.Store(false)
startCheckmarks()
gcResetMarkState()
gcw := &getg().m.p.ptr().gcw
prepareRootSet(gcw)
gcDrain(gcw, 0)
wbBufFlush1(getg().m.p.ptr())
gcw.dispose()
endCheckmarks()
sched.gcwaiting.Store(true)
}
// checkFinalizersAndCleanups uses checkmarks to check for potential issues
// with the program's use of cleanups and finalizers.
func checkFinalizersAndCleanups() {
assertWorldStopped()
const (
reportCycle = 1 << iota
reportTiny
)
// Find the arena and page index into that arena for this shard.
type report struct {
issues int
ptr uintptr
sp *special
}
var reports [50]report
var nreports int
var more bool
var lastTinyBlock uintptr
forEachSpecial(func(p uintptr, s *mspan, sp *special) bool {
// N.B. The tiny block specials are sorted first in the specials list.
if sp.kind == _KindSpecialTinyBlock {
lastTinyBlock = s.base() + sp.offset
return true
}
// We only care about finalizers and cleanups.
if sp.kind != _KindSpecialFinalizer && sp.kind != _KindSpecialCleanup {
return true
}
// Run a checkmark GC using this cleanup and/or finalizer as a root.
if debug.checkfinalizers > 1 {
print("Scan trace for cleanup/finalizer on ", hex(p), ":\n")
}
runCheckmark(func(gcw *gcWork) {
switch sp.kind {
case _KindSpecialFinalizer:
gcScanFinalizer((*specialfinalizer)(unsafe.Pointer(sp)), s, gcw)
case _KindSpecialCleanup:
gcScanCleanup((*specialCleanup)(unsafe.Pointer(sp)), gcw)
}
})
if debug.checkfinalizers > 1 {
println()
}
// Now check to see if the object the special is attached to was marked.
// The roots above do not directly mark p, so if it is marked, then p
// must be reachable from the finalizer and/or cleanup, preventing
// reclamation.
bytep, mask := getCheckmark(p)
if bytep == nil {
return true
}
var issues int
if atomic.Load8(bytep)&mask != 0 {
issues |= reportCycle
}
if p >= lastTinyBlock && p < lastTinyBlock+maxTinySize {
issues |= reportTiny
}
if issues != 0 {
if nreports >= len(reports) {
more = true
return false
}
reports[nreports] = report{issues, p, sp}
nreports++
}
return true
})
if nreports > 0 {
lastPtr := uintptr(0)
println("WARNING: LIKELY CLEANUP/FINALIZER ISSUES")
println()
for _, r := range reports[:nreports] {
var ctx *specialCheckFinalizer
var kind string
if r.sp.kind == _KindSpecialFinalizer {
kind = "finalizer"
ctx = getCleanupContext(r.ptr, 0)
} else {
kind = "cleanup"
ctx = getCleanupContext(r.ptr, ((*specialCleanup)(unsafe.Pointer(r.sp))).id)
}
// N.B. reports is sorted 'enough' that cleanups/finalizers on the same pointer will
// appear consecutively because the specials list is sorted.
if lastPtr != r.ptr {
if lastPtr != 0 {
println()
}
print("Value of type ", toRType(ctx.ptrType).string(), " at ", hex(r.ptr), "\n")
if r.issues&reportCycle != 0 {
if r.sp.kind == _KindSpecialFinalizer {
println(" is reachable from finalizer")
} else {
println(" is reachable from cleanup or cleanup argument")
}
}
if r.issues&reportTiny != 0 {
println(" is in a tiny block with other (possibly long-lived) values")
}
if r.issues&reportTiny != 0 && r.issues&reportCycle != 0 {
if r.sp.kind == _KindSpecialFinalizer {
println(" may be in the same tiny block as finalizer")
} else {
println(" may be in the same tiny block as cleanup or cleanup argument")
}
}
}
println()
println("Has", kind, "at", hex(uintptr(unsafe.Pointer(r.sp))))
funcInfo := findfunc(ctx.funcPC)
if funcInfo.valid() {
file, line := funcline(funcInfo, ctx.funcPC)
print(" ", funcname(funcInfo), "()\n")
print(" ", file, ":", line, " +", hex(ctx.funcPC-funcInfo.entry()), "\n")
} else {
print(" <bad pc ", hex(ctx.funcPC), ">\n")
}
println("created at: ")
createInfo := findfunc(ctx.createPC)
if createInfo.valid() {
file, line := funcline(createInfo, ctx.createPC)
print(" ", funcname(createInfo), "()\n")
print(" ", file, ":", line, " +", hex(ctx.createPC-createInfo.entry()), "\n")
} else {
print(" <bad pc ", hex(ctx.createPC), ">\n")
}
lastPtr = r.ptr
}
println()
if more {
println("... too many potential issues ...")
}
throw("detected possible issues with cleanups and/or finalizers")
}
}
// forEachSpecial is an iterator over all specials.
//
// Used by debug.checkfinalizers.
//
// The world must be stopped.
func forEachSpecial(yield func(p uintptr, s *mspan, sp *special) bool) {
assertWorldStopped()
// Find the arena and page index into that arena for this shard.
for _, ai := range mheap_.markArenas {
ha := mheap_.arenas[ai.l1()][ai.l2()]
// Construct slice of bitmap which we'll iterate over.
for i := range ha.pageSpecials[:] {
// Find set bits, which correspond to spans with specials.
specials := atomic.Load8(&ha.pageSpecials[i])
if specials == 0 {
continue
}
for j := uint(0); j < 8; j++ {
if specials&(1<<j) == 0 {
continue
}
// Find the span for this bit.
//
// This value is guaranteed to be non-nil because having
// specials implies that the span is in-use, and since we're
// currently marking we can be sure that we don't have to worry
// about the span being freed and re-used.
s := ha.spans[uint(i)*8+j]
// Lock the specials to prevent a special from being
// removed from the list while we're traversing it.
for sp := s.specials; sp != nil; sp = sp.next {
if !yield(s.base()+sp.offset, s, sp) {
return
}
}
}
}
}
}
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/cpu"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
)
// AddCleanup attaches a cleanup function to ptr. Some time after ptr is no longer
// reachable, the runtime will call cleanup(arg) in a separate goroutine.
//
// A typical use is that ptr is an object wrapping an underlying resource (e.g.,
// a File object wrapping an OS file descriptor), arg is the underlying resource
// (e.g., the OS file descriptor), and the cleanup function releases the underlying
// resource (e.g., by calling the close system call).
//
// There are few constraints on ptr. In particular, multiple cleanups may be
// attached to the same pointer, or to different pointers within the same
// allocation.
//
// If ptr is reachable from cleanup or arg, ptr will never be collected
// and the cleanup will never run. As a protection against simple cases of this,
// AddCleanup panics if arg is equal to ptr.
//
// There is no specified order in which cleanups will run.
// In particular, if several objects point to each other and all become
// unreachable at the same time, their cleanups all become eligible to run
// and can run in any order. This is true even if the objects form a cycle.
//
// Cleanups run concurrently with any user-created goroutines.
// Cleanups may also run concurrently with one another (unlike finalizers).
// If a cleanup function must run for a long time, it should create a new goroutine
// to avoid blocking the execution of other cleanups.
//
// If ptr has both a cleanup and a finalizer, the cleanup will only run once
// it has been finalized and becomes unreachable without an associated finalizer.
//
// The cleanup(arg) call is not always guaranteed to run; in particular it is not
// guaranteed to run before program exit.
//
// Cleanups are not guaranteed to run if the size of T is zero bytes, because
// it may share same address with other zero-size objects in memory. See
// https://go.dev/ref/spec#Size_and_alignment_guarantees.
//
// It is not guaranteed that a cleanup will run for objects allocated
// in initializers for package-level variables. Such objects may be
// linker-allocated, not heap-allocated.
//
// Note that because cleanups may execute arbitrarily far into the future
// after an object is no longer referenced, the runtime is allowed to perform
// a space-saving optimization that batches objects together in a single
// allocation slot. The cleanup for an unreferenced object in such an
// allocation may never run if it always exists in the same batch as a
// referenced object. Typically, this batching only happens for tiny
// (on the order of 16 bytes or less) and pointer-free objects.
//
// A cleanup may run as soon as an object becomes unreachable.
// In order to use cleanups correctly, the program must ensure that
// the object is reachable until it is safe to run its cleanup.
// Objects stored in global variables, or that can be found by tracing
// pointers from a global variable, are reachable. A function argument or
// receiver may become unreachable at the last point where the function
// mentions it. To ensure a cleanup does not get called prematurely,
// pass the object to the [KeepAlive] function after the last point
// where the object must remain reachable.
//
//go:nocheckptr
func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup {
// This is marked nocheckptr because checkptr doesn't understand the
// pointer manipulation done when looking at closure pointers.
// Similar code in mbitmap.go works because the functions are
// go:nosplit, which implies go:nocheckptr (CL 202158).
// Explicitly force ptr and cleanup to escape to the heap.
ptr = abi.Escape(ptr)
cleanup = abi.Escape(cleanup)
// The pointer to the object must be valid.
if ptr == nil {
panic("runtime.AddCleanup: ptr is nil")
}
usptr := uintptr(unsafe.Pointer(ptr))
// Check that arg is not equal to ptr.
argType := abi.TypeOf(arg)
kind := argType.Kind()
if kind == abi.Pointer || kind == abi.UnsafePointer {
if unsafe.Pointer(ptr) == *((*unsafe.Pointer)(unsafe.Pointer(&arg))) {
panic("runtime.AddCleanup: ptr is equal to arg, cleanup will never run")
}
}
if inUserArenaChunk(usptr) {
// Arena-allocated objects are not eligible for cleanup.
panic("runtime.AddCleanup: ptr is arena-allocated")
}
if debug.sbrk != 0 {
// debug.sbrk never frees memory, so no cleanup will ever run
// (and we don't have the data structures to record them).
// Return a noop cleanup.
return Cleanup{}
}
// Create new storage for the argument.
var argv *S
if size := unsafe.Sizeof(arg); size < maxTinySize && argType.PtrBytes == 0 {
// Side-step the tiny allocator to avoid liveness issues, since this box
// will be treated like a root by the GC. We model the box as an array of
// uintptrs to guarantee maximum allocator alignment.
//
// TODO(mknyszek): Consider just making space in cleanupFn for this. The
// unfortunate part of this is it would grow specialCleanup by 16 bytes, so
// while there wouldn't be an allocation, *every* cleanup would take the
// memory overhead hit.
box := new([maxTinySize / goarch.PtrSize]uintptr)
argv = (*S)(unsafe.Pointer(box))
} else {
argv = new(S)
}
*argv = arg
// Find the containing object.
base, span, _ := findObject(usptr, 0, 0)
if base == 0 {
if isGoPointerWithoutSpan(unsafe.Pointer(ptr)) {
// Cleanup is a noop.
return Cleanup{}
}
panic("runtime.AddCleanup: ptr not in allocated block")
}
// Check that arg is not within ptr.
if kind == abi.Pointer || kind == abi.UnsafePointer {
argPtr := uintptr(*(*unsafe.Pointer)(unsafe.Pointer(&arg)))
if argPtr >= base && argPtr < base+span.elemsize {
// It's possible that both pointers are separate
// parts of a tiny allocation, which is OK.
// We side-stepped the tiny allocator above for
// the allocation for the cleanup,
// but the argument itself can still overlap
// with the value to which the cleanup is attached.
if span.spanclass != tinySpanClass {
panic("runtime.AddCleanup: ptr is within arg, cleanup will never run")
}
}
}
// Check that the cleanup function doesn't close over the pointer.
cleanupFV := unsafe.Pointer(*(**funcval)(unsafe.Pointer(&cleanup)))
cBase, cSpan, _ := findObject(uintptr(cleanupFV), 0, 0)
if cBase != 0 {
tp := cSpan.typePointersOfUnchecked(cBase)
for {
var addr uintptr
if tp, addr = tp.next(cBase + cSpan.elemsize); addr == 0 {
break
}
ptr := *(*uintptr)(unsafe.Pointer(addr))
if ptr >= base && ptr < base+span.elemsize {
panic("runtime.AddCleanup: cleanup function closes over ptr, cleanup will never run")
}
}
}
// Create another G if necessary.
if gcCleanups.needG() {
gcCleanups.createGs()
}
id := addCleanup(unsafe.Pointer(ptr), cleanupFn{
// Instantiate a caller function to call the cleanup, that is cleanup(*argv).
//
// TODO(mknyszek): This allocates because the generic dictionary argument
// gets closed over, but callCleanup doesn't even use the dictionary argument,
// so theoretically that could be removed, eliminating an allocation.
call: callCleanup[S],
fn: *(**funcval)(unsafe.Pointer(&cleanup)),
arg: unsafe.Pointer(argv),
})
if debug.checkfinalizers != 0 {
cleanupFn := *(**funcval)(unsafe.Pointer(&cleanup))
setCleanupContext(unsafe.Pointer(ptr), abi.TypeFor[T](), sys.GetCallerPC(), cleanupFn.fn, id)
}
return Cleanup{
id: id,
ptr: usptr,
}
}
// callCleanup is a helper for calling cleanups in a polymorphic way.
//
// In practice, all it does is call fn(*arg). arg must be a *T.
//
//go:noinline
func callCleanup[T any](fn *funcval, arg unsafe.Pointer) {
cleanup := *(*func(T))(unsafe.Pointer(&fn))
cleanup(*(*T)(arg))
}
// Cleanup is a handle to a cleanup call for a specific object.
type Cleanup struct {
// id is the unique identifier for the cleanup within the arena.
id uint64
// ptr contains the pointer to the object.
ptr uintptr
}
// Stop cancels the cleanup call. Stop will have no effect if the cleanup call
// has already been queued for execution (because ptr became unreachable).
// To guarantee that Stop removes the cleanup function, the caller must ensure
// that the pointer that was passed to AddCleanup is reachable across the call to Stop.
func (c Cleanup) Stop() {
if c.id == 0 {
// id is set to zero when the cleanup is a noop.
return
}
// The following block removes the Special record of type cleanup for the object c.ptr.
span := spanOfHeap(c.ptr)
if span == nil {
return
}
// Ensure that the span is swept.
// Sweeping accesses the specials list w/o locks, so we have
// to synchronize with it. And it's just much safer.
mp := acquirem()
span.ensureSwept()
offset := c.ptr - span.base()
var found *special
lock(&span.speciallock)
iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCleanup)
if exists {
for {
s := *iter
if s == nil {
// Reached the end of the linked list. Stop searching at this point.
break
}
if offset == s.offset && _KindSpecialCleanup == s.kind &&
(*specialCleanup)(unsafe.Pointer(s)).id == c.id {
// The special is a cleanup and contains a matching cleanup id.
*iter = s.next
found = s
break
}
if offset < s.offset || (offset == s.offset && _KindSpecialCleanup < s.kind) {
// The special is outside the region specified for that kind of
// special. The specials are sorted by kind.
break
}
// Try the next special.
iter = &s.next
}
}
if span.specials == nil {
spanHasNoSpecials(span)
}
unlock(&span.speciallock)
releasem(mp)
if found == nil {
return
}
lock(&mheap_.speciallock)
mheap_.specialCleanupAlloc.free(unsafe.Pointer(found))
unlock(&mheap_.speciallock)
if debug.checkfinalizers != 0 {
clearCleanupContext(c.ptr, c.id)
}
}
const cleanupBlockSize = 512
// cleanupBlock is an block of cleanups to be executed.
//
// cleanupBlock is allocated from non-GC'd memory, so any heap pointers
// must be specially handled. The GC and cleanup queue currently assume
// that the cleanup queue does not grow during marking (but it can shrink).
type cleanupBlock struct {
cleanupBlockHeader
cleanups [(cleanupBlockSize - unsafe.Sizeof(cleanupBlockHeader{})) / unsafe.Sizeof(cleanupFn{})]cleanupFn
}
var cleanupFnPtrMask = [...]uint8{0b111}
// cleanupFn represents a cleanup function with it's argument, yet to be called.
type cleanupFn struct {
// call is an adapter function that understands how to safely call fn(*arg).
call func(*funcval, unsafe.Pointer)
fn *funcval // cleanup function passed to AddCleanup.
arg unsafe.Pointer // pointer to argument to pass to cleanup function.
}
var cleanupBlockPtrMask [cleanupBlockSize / goarch.PtrSize / 8]byte
type cleanupBlockHeader struct {
_ sys.NotInHeap
lfnode
alllink *cleanupBlock
// n is sometimes accessed atomically.
//
// The invariant depends on what phase the garbage collector is in.
// During the sweep phase (gcphase == _GCoff), each block has exactly
// one owner, so it's always safe to update this without atomics.
// But if this *could* be updated during the mark phase, it must be
// updated atomically to synchronize with the garbage collector
// scanning the block as a root.
n uint32
}
// enqueue pushes a single cleanup function into the block.
//
// Returns if this enqueue call filled the block. This is odd,
// but we want to flush full blocks eagerly to get cleanups
// running as soon as possible.
//
// Must only be called if the GC is in the sweep phase (gcphase == _GCoff),
// because it does not synchronize with the garbage collector.
func (b *cleanupBlock) enqueue(c cleanupFn) bool {
b.cleanups[b.n] = c
b.n++
return b.full()
}
// full returns true if the cleanup block is full.
func (b *cleanupBlock) full() bool {
return b.n == uint32(len(b.cleanups))
}
// empty returns true if the cleanup block is empty.
func (b *cleanupBlock) empty() bool {
return b.n == 0
}
// take moves as many cleanups as possible from b into a.
func (a *cleanupBlock) take(b *cleanupBlock) {
dst := a.cleanups[a.n:]
if uint32(len(dst)) >= b.n {
// Take all.
copy(dst, b.cleanups[:])
a.n += b.n
b.n = 0
} else {
// Partial take. Copy from the tail to avoid having
// to move more memory around.
copy(dst, b.cleanups[b.n-uint32(len(dst)):b.n])
a.n = uint32(len(a.cleanups))
b.n -= uint32(len(dst))
}
}
// cleanupQueue is a queue of ready-to-run cleanup functions.
type cleanupQueue struct {
// Stack of full cleanup blocks.
full lfstack
workUnits atomic.Uint64 // length of full; decrement before pop from full, increment after push to full
_ [cpu.CacheLinePadSize - unsafe.Sizeof(lfstack(0)) - unsafe.Sizeof(atomic.Uint64{})]byte
// Stack of free cleanup blocks.
free lfstack
// flushed indicates whether all local cleanupBlocks have been
// flushed, and we're in a period of time where this condition is
// stable (after the last sweeper, before the next sweep phase
// begins).
flushed atomic.Bool // Next to free because frequently accessed together.
_ [cpu.CacheLinePadSize - unsafe.Sizeof(lfstack(0)) - 1]byte
// Linked list of all cleanup blocks.
all atomic.UnsafePointer // *cleanupBlock
_ [cpu.CacheLinePadSize - unsafe.Sizeof(atomic.UnsafePointer{})]byte
// Goroutine block state.
lock mutex
// sleeping is the list of sleeping cleanup goroutines.
//
// Protected by lock.
sleeping gList
// asleep is the number of cleanup goroutines sleeping.
//
// Read without lock, written only with the lock held.
// When the lock is held, the lock holder may only observe
// asleep.Load() == sleeping.n.
//
// To make reading without the lock safe as a signal to wake up
// a goroutine and handle new work, it must always be greater
// than or equal to sleeping.n. In the periods of time that it
// is strictly greater, it may cause spurious calls to wake.
asleep atomic.Uint32
// running indicates the number of cleanup goroutines actively
// executing user cleanup functions at any point in time.
//
// Read and written to without lock.
running atomic.Uint32
// ng is the number of cleanup goroutines.
//
// Read without lock, written only with lock held.
ng atomic.Uint32
// needg is the number of new cleanup goroutines that
// need to be created.
//
// Read without lock, written only with lock held.
needg atomic.Uint32
// Cleanup queue stats.
// queued represents a monotonic count of queued cleanups. This is sharded across
// Ps via the field cleanupsQueued in each p, so reading just this value is insufficient.
// In practice, this value only includes the queued count of dead Ps.
//
// Writes are protected by STW.
queued uint64
// executed is a monotonic count of executed cleanups.
//
// Read and updated atomically.
executed atomic.Uint64
}
// addWork indicates that n units of parallelizable work have been added to the queue.
func (q *cleanupQueue) addWork(n int) {
q.workUnits.Add(int64(n))
}
// tryTakeWork is an attempt to dequeue some work by a cleanup goroutine.
// This might fail if there's no work to do.
func (q *cleanupQueue) tryTakeWork() bool {
for {
wu := q.workUnits.Load()
if wu == 0 {
return false
}
// CAS to prevent us from going negative.
if q.workUnits.CompareAndSwap(wu, wu-1) {
return true
}
}
}
// enqueue queues a single cleanup for execution.
//
// Called by the sweeper, and only the sweeper.
func (q *cleanupQueue) enqueue(c cleanupFn) {
mp := acquirem()
pp := mp.p.ptr()
b := pp.cleanups
if b == nil {
if q.flushed.Load() {
q.flushed.Store(false)
}
b = (*cleanupBlock)(q.free.pop())
if b == nil {
b = (*cleanupBlock)(persistentalloc(cleanupBlockSize, tagAlign, &memstats.gcMiscSys))
for {
next := (*cleanupBlock)(q.all.Load())
b.alllink = next
if q.all.CompareAndSwap(unsafe.Pointer(next), unsafe.Pointer(b)) {
break
}
}
}
pp.cleanups = b
}
if full := b.enqueue(c); full {
q.full.push(&b.lfnode)
pp.cleanups = nil
q.addWork(1)
}
pp.cleanupsQueued++
releasem(mp)
}
// dequeue pops a block of cleanups from the queue. Blocks until one is available
// and never returns nil.
func (q *cleanupQueue) dequeue() *cleanupBlock {
for {
if q.tryTakeWork() {
// Guaranteed to be non-nil.
return (*cleanupBlock)(q.full.pop())
}
lock(&q.lock)
// Increment asleep first. We may have to undo this if we abort the sleep.
// We must update asleep first because the scheduler might not try to wake
// us up when work comes in between the last check of workUnits and when we
// go to sleep. (It may see asleep as 0.) By incrementing it here, we guarantee
// after this point that if new work comes in, someone will try to grab the
// lock and wake us. However, this also means that if we back out, we may cause
// someone to spuriously grab the lock and try to wake us up, only to fail.
// This should be very rare because the window here is incredibly small: the
// window between now and when we decrement q.asleep below.
q.asleep.Add(1)
// Re-check workUnits under the lock and with asleep updated. If it's still zero,
// then no new work came in, and it's safe for us to go to sleep. If new work
// comes in after this point, then the scheduler will notice that we're sleeping
// and wake us up.
if q.workUnits.Load() > 0 {
// Undo the q.asleep update and try to take work again.
q.asleep.Add(-1)
unlock(&q.lock)
continue
}
q.sleeping.push(getg())
goparkunlock(&q.lock, waitReasonCleanupWait, traceBlockSystemGoroutine, 1)
}
}
// flush pushes all active cleanup blocks to the full list and wakes up cleanup
// goroutines to handle them.
//
// Must only be called at a point when we can guarantee that no more cleanups
// are being queued, such as after the final sweeper for the cycle is done
// but before the next mark phase.
func (q *cleanupQueue) flush() {
mp := acquirem()
flushed := 0
emptied := 0
missing := 0
// Coalesce the partially-filled blocks to present a more accurate picture of demand.
// We use the number of coalesced blocks to process as a signal for demand to create
// new cleanup goroutines.
var cb *cleanupBlock
for _, pp := range allp {
if pp == nil {
// This function is reachable via mallocgc in the
// middle of procresize, when allp has been resized,
// but the new Ps not allocated yet.
missing++
continue
}
b := pp.cleanups
if b == nil {
missing++
continue
}
pp.cleanups = nil
if cb == nil {
cb = b
continue
}
// N.B. After take, either cb is full, b is empty, or both.
cb.take(b)
if cb.full() {
q.full.push(&cb.lfnode)
flushed++
cb = b
b = nil
}
if b != nil && b.empty() {
q.free.push(&b.lfnode)
emptied++
}
}
if cb != nil {
q.full.push(&cb.lfnode)
flushed++
}
if flushed != 0 {
q.addWork(flushed)
}
if flushed+emptied+missing != len(allp) {
throw("failed to correctly flush all P-owned cleanup blocks")
}
q.flushed.Store(true)
releasem(mp)
}
// needsWake returns true if cleanup goroutines may need to be awoken or created to handle cleanup load.
func (q *cleanupQueue) needsWake() bool {
return q.workUnits.Load() > 0 && (q.asleep.Load() > 0 || q.ng.Load() < maxCleanupGs())
}
// wake wakes up one or more goroutines to process the cleanup queue. If there aren't
// enough sleeping goroutines to handle the demand, wake will arrange for new goroutines
// to be created.
func (q *cleanupQueue) wake() {
lock(&q.lock)
// Figure out how many goroutines to wake, and how many extra goroutines to create.
// Wake one goroutine for each work unit.
var wake, extra uint32
work := q.workUnits.Load()
asleep := uint64(q.asleep.Load())
if work > asleep {
wake = uint32(asleep)
if work > uint64(math.MaxUint32) {
// Protect against overflow.
extra = math.MaxUint32
} else {
extra = uint32(work - asleep)
}
} else {
wake = uint32(work)
extra = 0
}
if extra != 0 {
// Signal that we should create new goroutines, one for each extra work unit,
// up to maxCleanupGs.
newg := min(extra, maxCleanupGs()-q.ng.Load())
if newg > 0 {
q.needg.Add(int32(newg))
}
}
if wake == 0 {
// Nothing to do.
unlock(&q.lock)
return
}
// Take ownership of waking 'wake' goroutines.
//
// Nobody else will wake up these goroutines, so they're guaranteed
// to be sitting on q.sleeping, waiting for us to wake them.
q.asleep.Add(-int32(wake))
// Collect them and schedule them.
var list gList
for range wake {
list.push(q.sleeping.pop())
}
unlock(&q.lock)
injectglist(&list)
return
}
func (q *cleanupQueue) needG() bool {
have := q.ng.Load()
if have >= maxCleanupGs() {
return false
}
if have == 0 {
// Make sure we have at least one.
return true
}
return q.needg.Load() > 0
}
func (q *cleanupQueue) createGs() {
lock(&q.lock)
have := q.ng.Load()
need := min(q.needg.Swap(0), maxCleanupGs()-have)
if have == 0 && need == 0 {
// Make sure we have at least one.
need = 1
}
if need > 0 {
q.ng.Add(int32(need))
}
unlock(&q.lock)
for range need {
go runCleanups()
}
}
func (q *cleanupQueue) beginRunningCleanups() {
// Update runningCleanups and running atomically with respect
// to goroutine profiles by disabling preemption.
mp := acquirem()
getg().runningCleanups.Store(true)
q.running.Add(1)
releasem(mp)
}
func (q *cleanupQueue) endRunningCleanups() {
// Update runningCleanups and running atomically with respect
// to goroutine profiles by disabling preemption.
mp := acquirem()
getg().runningCleanups.Store(false)
q.running.Add(-1)
releasem(mp)
}
func (q *cleanupQueue) readQueueStats() (queued, executed uint64) {
executed = q.executed.Load()
queued = q.queued
// N.B. This is inconsistent, but that's intentional. It's just an estimate.
// Read this _after_ reading executed to decrease the chance that we observe
// an inconsistency in the statistics (executed > queued).
for _, pp := range allp {
queued += pp.cleanupsQueued
}
return
}
func maxCleanupGs() uint32 {
// N.B. Left as a function to make changing the policy easier.
return uint32(max(gomaxprocs/4, 1))
}
// gcCleanups is the global cleanup queue.
var gcCleanups cleanupQueue
// runCleanups is the entrypoint for all cleanup-running goroutines.
func runCleanups() {
for {
b := gcCleanups.dequeue()
if raceenabled {
// Approximately: adds a happens-before edge between the cleanup
// argument being mutated and the call to the cleanup below.
racefingo()
}
gcCleanups.beginRunningCleanups()
for i := 0; i < int(b.n); i++ {
c := b.cleanups[i]
b.cleanups[i] = cleanupFn{}
var racectx uintptr
if raceenabled {
// Enter a new race context so the race detector can catch
// potential races between cleanups, even if they execute on
// the same goroutine.
//
// Synchronize on fn. This would fail to find races on the
// closed-over values in fn (suppose arg is passed to multiple
// AddCleanup calls) if arg was not unique, but it is.
racerelease(unsafe.Pointer(c.arg))
racectx = raceEnterNewCtx()
raceacquire(unsafe.Pointer(c.arg))
}
// Execute the next cleanup.
c.call(c.fn, c.arg)
if raceenabled {
// Restore the old context.
raceRestoreCtx(racectx)
}
}
gcCleanups.endRunningCleanups()
gcCleanups.executed.Add(int64(b.n))
atomic.Store(&b.n, 0) // Synchronize with markroot. See comment in cleanupBlockHeader.
gcCleanups.free.push(&b.lfnode)
}
}
// blockUntilEmpty blocks until either the cleanup queue is emptied
// and the cleanups have been executed, or the timeout is reached.
// Returns true if the cleanup queue was emptied.
// This is used by the sync and unique tests.
func (q *cleanupQueue) blockUntilEmpty(timeout int64) bool {
start := nanotime()
for nanotime()-start < timeout {
lock(&q.lock)
// The queue is empty when there's no work left to do *and* all the cleanup goroutines
// are asleep. If they're not asleep, they may be actively working on a block.
if q.flushed.Load() && q.full.empty() && uint32(q.sleeping.size) == q.ng.Load() {
unlock(&q.lock)
return true
}
unlock(&q.lock)
Gosched()
}
return false
}
//go:linkname unique_runtime_blockUntilEmptyCleanupQueue unique.runtime_blockUntilEmptyCleanupQueue
func unique_runtime_blockUntilEmptyCleanupQueue(timeout int64) bool {
return gcCleanups.blockUntilEmpty(timeout)
}
//go:linkname sync_test_runtime_blockUntilEmptyCleanupQueue sync_test.runtime_blockUntilEmptyCleanupQueue
func sync_test_runtime_blockUntilEmptyCleanupQueue(timeout int64) bool {
return gcCleanups.blockUntilEmpty(timeout)
}
// raceEnterNewCtx creates a new racectx and switches the current
// goroutine to it. Returns the old racectx.
//
// Must be running on a user goroutine. nosplit to match other race
// instrumentation.
//
//go:nosplit
func raceEnterNewCtx() uintptr {
// We use the existing ctx as the spawn context, but gp.gopc
// as the spawn PC to make the error output a little nicer
// (pointing to AddCleanup, where the goroutines are created).
//
// We also need to carefully indicate to the race detector
// that the goroutine stack will only be accessed by the new
// race context, to avoid false positives on stack locations.
// We do this by marking the stack as free in the first context
// and then re-marking it as allocated in the second. Crucially,
// there must be (1) no race operations and (2) no stack changes
// in between. (1) is easy to avoid because we're in the runtime
// so there's no implicit race instrumentation. To avoid (2) we
// defensively become non-preemptible so the GC can't stop us,
// and rely on the fact that racemalloc, racefreem, and racectx
// are nosplit.
mp := acquirem()
gp := getg()
ctx := getg().racectx
racefree(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
getg().racectx = racectxstart(gp.gopc, ctx)
racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
releasem(mp)
return ctx
}
// raceRestoreCtx restores ctx on the goroutine. It is the inverse of
// raceenternewctx and must be called with its result.
//
// Must be running on a user goroutine. nosplit to match other race
// instrumentation.
//
//go:nosplit
func raceRestoreCtx(ctx uintptr) {
mp := acquirem()
gp := getg()
racefree(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
racectxend(getg().racectx)
racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
getg().racectx = ctx
releasem(mp)
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
// OS memory management abstraction layer
//
// Regions of the address space managed by the runtime may be in one of four
// states at any given time:
// 1) None - Unreserved and unmapped, the default state of any region.
// 2) Reserved - Owned by the runtime, but accessing it would cause a fault.
// Does not count against the process' memory footprint.
// 3) Prepared - Reserved, intended not to be backed by physical memory (though
// an OS may implement this lazily). Can transition efficiently to
// Ready. Accessing memory in such a region is undefined (may
// fault, may give back unexpected zeroes, etc.).
// 4) Ready - may be accessed safely.
//
// This set of states is more than strictly necessary to support all the
// currently supported platforms. One could get by with just None, Reserved, and
// Ready. However, the Prepared state gives us flexibility for performance
// purposes. For example, on POSIX-y operating systems, Reserved is usually a
// private anonymous mmap'd region with PROT_NONE set, and to transition
// to Ready would require setting PROT_READ|PROT_WRITE. However the
// underspecification of Prepared lets us use just MADV_FREE to transition from
// Ready to Prepared. Thus with the Prepared state we can set the permission
// bits just once early on, we can efficiently tell the OS that it's free to
// take pages away from us when we don't strictly need them.
//
// This file defines a cross-OS interface for a common set of helpers
// that transition memory regions between these states. The helpers call into
// OS-specific implementations that handle errors, while the interface boundary
// implements cross-OS functionality, like updating runtime accounting.
// sysAlloc transitions an OS-chosen region of memory from None to Ready.
// More specifically, it obtains a large chunk of zeroed memory from the
// operating system, typically on the order of a hundred kilobytes
// or a megabyte. This memory is always immediately available for use.
//
// sysStat must be non-nil.
//
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysAlloc(n uintptr, sysStat *sysMemStat, vmaName string) unsafe.Pointer {
sysStat.add(int64(n))
gcController.mappedReady.Add(int64(n))
p := sysAllocOS(n, vmaName)
// When using ASAN leak detection, we must tell ASAN about
// cases where we store pointers in mmapped memory.
if asanenabled {
lsanregisterrootregion(p, n)
}
return p
}
// sysUnused transitions a memory region from Ready to Prepared. It notifies the
// operating system that the physical pages backing this memory region are no
// longer needed and can be reused for other purposes. The contents of a
// sysUnused memory region are considered forfeit and the region must not be
// accessed again until sysUsed is called.
func sysUnused(v unsafe.Pointer, n uintptr) {
gcController.mappedReady.Add(-int64(n))
sysUnusedOS(v, n)
}
// needZeroAfterSysUnused reports whether memory returned by sysUnused must be
// zeroed for use.
func needZeroAfterSysUnused() bool {
return needZeroAfterSysUnusedOS()
}
// sysUsed transitions a memory region from Prepared to Ready. It notifies the
// operating system that the memory region is needed and ensures that the region
// may be safely accessed. This is typically a no-op on systems that don't have
// an explicit commit step and hard over-commit limits, but is critical on
// Windows, for example.
//
// This operation is idempotent for memory already in the Prepared state, so
// it is safe to refer, with v and n, to a range of memory that includes both
// Prepared and Ready memory. However, the caller must provide the exact amount
// of Prepared memory for accounting purposes.
func sysUsed(v unsafe.Pointer, n, prepared uintptr) {
gcController.mappedReady.Add(int64(prepared))
sysUsedOS(v, n)
}
// sysHugePage does not transition memory regions, but instead provides a
// hint to the OS that it would be more efficient to back this memory region
// with pages of a larger size transparently.
func sysHugePage(v unsafe.Pointer, n uintptr) {
sysHugePageOS(v, n)
}
// sysNoHugePage does not transition memory regions, but instead provides a
// hint to the OS that it would be less efficient to back this memory region
// with pages of a larger size transparently.
func sysNoHugePage(v unsafe.Pointer, n uintptr) {
sysNoHugePageOS(v, n)
}
// sysHugePageCollapse attempts to immediately back the provided memory region
// with huge pages. It is best-effort and may fail silently.
func sysHugePageCollapse(v unsafe.Pointer, n uintptr) {
sysHugePageCollapseOS(v, n)
}
// sysFree transitions a memory region from any state to None. Therefore, it
// returns memory unconditionally. It is used if an out-of-memory error has been
// detected midway through an allocation or to carve out an aligned section of
// the address space. It is okay if sysFree is a no-op only if sysReserve always
// returns a memory region aligned to the heap allocator's alignment
// restrictions.
//
// sysStat must be non-nil.
//
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
// When using ASAN leak detection, the memory being freed is
// known by the sanitizer. We need to unregister it so it's
// not accessed by it.
if asanenabled {
lsanunregisterrootregion(v, n)
}
sysStat.add(-int64(n))
gcController.mappedReady.Add(-int64(n))
sysFreeOS(v, n)
}
// sysFault transitions a memory region from Ready to Reserved. It
// marks a region such that it will always fault if accessed. Used only for
// debugging the runtime.
//
// TODO(mknyszek): Currently it's true that all uses of sysFault transition
// memory from Ready to Reserved, but this may not be true in the future
// since on every platform the operation is much more general than that.
// If a transition from Prepared is ever introduced, create a new function
// that elides the Ready state accounting.
func sysFault(v unsafe.Pointer, n uintptr) {
gcController.mappedReady.Add(-int64(n))
sysFaultOS(v, n)
}
// sysReserve transitions a memory region from None to Reserved. It reserves
// address space in such a way that it would cause a fatal fault upon access
// (either via permissions or not committing the memory). Such a reservation is
// thus never backed by physical memory.
//
// If the pointer passed to it is non-nil, the caller wants the
// reservation there, but sysReserve can still choose another
// location if that one is unavailable.
//
// NOTE: sysReserve returns OS-aligned memory, but the heap allocator
// may use larger alignment, so the caller must be careful to realign the
// memory obtained by sysReserve.
func sysReserve(v unsafe.Pointer, n uintptr, vmaName string) unsafe.Pointer {
p := sysReserveOS(v, n, vmaName)
// When using ASAN leak detection, we must tell ASAN about
// cases where we store pointers in mmapped memory.
if asanenabled {
lsanregisterrootregion(p, n)
}
return p
}
// sysMap transitions a memory region from Reserved to Prepared. It ensures the
// memory region can be efficiently transitioned to Ready.
//
// sysStat must be non-nil.
func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat, vmaName string) {
sysStat.add(int64(n))
sysMapOS(v, n, vmaName)
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/runtime/atomic"
"unsafe"
)
const (
_EACCES = 13
_EINVAL = 22
)
// Don't split the stack as this method may be invoked without a valid G, which
// prevents us from allocating more stack.
//
//go:nosplit
func sysAllocOS(n uintptr, vmaName string) unsafe.Pointer {
p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
if err == _EACCES {
print("runtime: mmap: access denied\n")
exit(2)
}
if err == _EAGAIN {
print("runtime: mmap: too much locked memory (check 'ulimit -l').\n")
exit(2)
}
return nil
}
setVMAName(p, n, vmaName)
return p
}
var adviseUnused = uint32(_MADV_FREE)
const madviseUnsupported = 0
func sysUnusedOS(v unsafe.Pointer, n uintptr) {
if uintptr(v)&(physPageSize-1) != 0 || n&(physPageSize-1) != 0 {
// madvise will round this to any physical page
// *covered* by this range, so an unaligned madvise
// will release more memory than intended.
throw("unaligned sysUnused")
}
advise := atomic.Load(&adviseUnused)
if debug.madvdontneed != 0 && advise != madviseUnsupported {
advise = _MADV_DONTNEED
}
switch advise {
case _MADV_FREE:
if madvise(v, n, _MADV_FREE) == 0 {
break
}
atomic.Store(&adviseUnused, _MADV_DONTNEED)
fallthrough
case _MADV_DONTNEED:
// MADV_FREE was added in Linux 4.5. Fall back on MADV_DONTNEED if it's
// not supported.
if madvise(v, n, _MADV_DONTNEED) == 0 {
break
}
atomic.Store(&adviseUnused, madviseUnsupported)
fallthrough
case madviseUnsupported:
// Since Linux 3.18, support for madvise is optional.
// Fall back on mmap if it's not supported.
// _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE will unmap all the
// pages in the old mapping, and remap the memory region.
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == 0 && p != nil {
setVMAName(p, n, "unused")
}
}
if debug.harddecommit > 0 {
p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if p != v || err != 0 {
throw("runtime: cannot disable permissions in address space")
}
setVMAName(p, n, "unused")
}
}
func sysUsedOS(v unsafe.Pointer, n uintptr) {
if debug.harddecommit > 0 {
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == _ENOMEM {
throw("runtime: out of memory")
}
if p != v || err != 0 {
throw("runtime: cannot remap pages in address space")
}
setVMAName(p, n, "used")
return
}
}
func sysHugePageOS(v unsafe.Pointer, n uintptr) {
if physHugePageSize != 0 {
// Round v up to a huge page boundary.
beg := alignUp(uintptr(v), physHugePageSize)
// Round v+n down to a huge page boundary.
end := alignDown(uintptr(v)+n, physHugePageSize)
if beg < end {
madvise(unsafe.Pointer(beg), end-beg, _MADV_HUGEPAGE)
}
}
}
func sysNoHugePageOS(v unsafe.Pointer, n uintptr) {
if uintptr(v)&(physPageSize-1) != 0 {
// The Linux implementation requires that the address
// addr be page-aligned, and allows length to be zero.
throw("unaligned sysNoHugePageOS")
}
madvise(v, n, _MADV_NOHUGEPAGE)
}
func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) {
if uintptr(v)&(physPageSize-1) != 0 {
// The Linux implementation requires that the address
// addr be page-aligned, and allows length to be zero.
throw("unaligned sysHugePageCollapseOS")
}
if physHugePageSize == 0 {
return
}
// N.B. If you find yourself debugging this code, note that
// this call can fail with EAGAIN because it's best-effort.
// Also, when it returns an error, it's only for the last
// huge page in the region requested.
//
// It can also sometimes return EINVAL if the corresponding
// region hasn't been backed by physical memory. This is
// difficult to guarantee in general, and it also means
// there's no way to distinguish whether this syscall is
// actually available. Oops.
//
// Anyway, that's why this call just doesn't bother checking
// any errors.
madvise(v, n, _MADV_COLLAPSE)
}
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr) {
munmap(v, n)
}
func sysFaultOS(v unsafe.Pointer, n uintptr) {
mprotect(v, n, _PROT_NONE)
madvise(v, n, _MADV_DONTNEED)
}
func sysReserveOS(v unsafe.Pointer, n uintptr, vmaName string) unsafe.Pointer {
p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
return nil
}
setVMAName(p, n, vmaName)
return p
}
func sysMapOS(v unsafe.Pointer, n uintptr, vmaName string) {
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if err == _ENOMEM {
throw("runtime: out of memory")
}
if p != v || err != 0 {
print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n")
throw("runtime: cannot map pages in arena address space")
}
setVMAName(p, n, vmaName)
// Disable huge pages if the GODEBUG for it is set.
//
// Note that there are a few sysHugePage calls that can override this, but
// they're all for GC metadata.
if debug.disablethp != 0 {
sysNoHugePageOS(v, n)
}
}
func needZeroAfterSysUnusedOS() bool {
return debug.madvdontneed == 0
}
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !plan9 && !wasm
package runtime
import "unsafe"
const isSbrkPlatform = false
func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) {
panic("unreachable")
}
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// Metrics implementation exported to runtime/metrics.
import (
"internal/godebugs"
"internal/runtime/atomic"
"internal/runtime/gc"
"unsafe"
)
var (
// metrics is a map of runtime/metrics keys to data used by the runtime
// to sample each metric's value. metricsInit indicates it has been
// initialized.
//
// These fields are protected by metricsSema which should be
// locked/unlocked with metricsLock() / metricsUnlock().
metricsSema uint32 = 1
metricsInit bool
metrics map[string]metricData
sizeClassBuckets []float64
timeHistBuckets []float64
)
type metricData struct {
// deps is the set of runtime statistics that this metric
// depends on. Before compute is called, the statAggregate
// which will be passed must ensure() these dependencies.
deps statDepSet
// compute is a function that populates a metricValue
// given a populated statAggregate structure.
compute func(in *statAggregate, out *metricValue)
}
func metricsLock() {
// Acquire the metricsSema but with handoff. Operations are typically
// expensive enough that queueing up goroutines and handing off between
// them will be noticeably better-behaved.
semacquire1(&metricsSema, true, 0, 0, waitReasonSemacquire)
if raceenabled {
raceacquire(unsafe.Pointer(&metricsSema))
}
}
func metricsUnlock() {
if raceenabled {
racerelease(unsafe.Pointer(&metricsSema))
}
semrelease(&metricsSema)
}
// initMetrics initializes the metrics map if it hasn't been yet.
//
// metricsSema must be held.
func initMetrics() {
if metricsInit {
return
}
sizeClassBuckets = make([]float64, gc.NumSizeClasses, gc.NumSizeClasses+1)
// Skip size class 0 which is a stand-in for large objects, but large
// objects are tracked separately (and they actually get placed in
// the last bucket, not the first).
sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
for i := 1; i < gc.NumSizeClasses; i++ {
// Size classes have an inclusive upper-bound
// and exclusive lower bound (e.g. 48-byte size class is
// (32, 48]) whereas we want and inclusive lower-bound
// and exclusive upper-bound (e.g. 48-byte size class is
// [33, 49)). We can achieve this by shifting all bucket
// boundaries up by 1.
//
// Also, a float64 can precisely represent integers with
// value up to 2^53 and size classes are relatively small
// (nowhere near 2^48 even) so this will give us exact
// boundaries.
sizeClassBuckets[i] = float64(gc.SizeClassToSize[i] + 1)
}
sizeClassBuckets = append(sizeClassBuckets, float64Inf())
timeHistBuckets = timeHistogramMetricsBuckets()
metrics = map[string]metricData{
"/cgo/go-to-c-calls:calls": {
compute: func(_ *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = uint64(NumCgoCall())
},
},
"/cpu/classes/gc/mark/assist:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
out.scalar = float64bits(nsToSec(in.cpuStats.GCAssistTime))
},
},
"/cpu/classes/gc/mark/dedicated:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
out.scalar = float64bits(nsToSec(in.cpuStats.GCDedicatedTime))
},
},
"/cpu/classes/gc/mark/idle:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
out.scalar = float64bits(nsToSec(in.cpuStats.GCIdleTime))
},
},
"/cpu/classes/gc/pause:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
out.scalar = float64bits(nsToSec(in.cpuStats.GCPauseTime))
},
},
"/cpu/classes/gc/total:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
out.scalar = float64bits(nsToSec(in.cpuStats.GCTotalTime))
},
},
"/cpu/classes/idle:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
out.scalar = float64bits(nsToSec(in.cpuStats.IdleTime))
},
},
"/cpu/classes/scavenge/assist:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeAssistTime))
},
},
"/cpu/classes/scavenge/background:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeBgTime))
},
},
"/cpu/classes/scavenge/total:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeTotalTime))
},
},
"/cpu/classes/total:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
out.scalar = float64bits(nsToSec(in.cpuStats.TotalTime))
},
},
"/cpu/classes/user:cpu-seconds": {
deps: makeStatDepSet(cpuStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
out.scalar = float64bits(nsToSec(in.cpuStats.UserTime))
},
},
"/gc/cleanups/executed:cleanups": {
deps: makeStatDepSet(finalStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.finalStats.cleanupsExecuted
},
},
"/gc/cleanups/queued:cleanups": {
deps: makeStatDepSet(finalStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.finalStats.cleanupsQueued
},
},
"/gc/cycles/automatic:gc-cycles": {
deps: makeStatDepSet(sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.sysStats.gcCyclesDone - in.sysStats.gcCyclesForced
},
},
"/gc/cycles/forced:gc-cycles": {
deps: makeStatDepSet(sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.sysStats.gcCyclesForced
},
},
"/gc/cycles/total:gc-cycles": {
deps: makeStatDepSet(sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.sysStats.gcCyclesDone
},
},
"/gc/finalizers/executed:finalizers": {
deps: makeStatDepSet(finalStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.finalStats.finalizersExecuted
},
},
"/gc/finalizers/queued:finalizers": {
deps: makeStatDepSet(finalStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.finalStats.finalizersQueued
},
},
"/gc/scan/globals:bytes": {
deps: makeStatDepSet(gcStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.gcStats.globalsScan
},
},
"/gc/scan/heap:bytes": {
deps: makeStatDepSet(gcStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.gcStats.heapScan
},
},
"/gc/scan/stack:bytes": {
deps: makeStatDepSet(gcStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.gcStats.stackScan
},
},
"/gc/scan/total:bytes": {
deps: makeStatDepSet(gcStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.gcStats.totalScan
},
},
"/gc/heap/allocs-by-size:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
hist := out.float64HistOrInit(sizeClassBuckets)
hist.counts[len(hist.counts)-1] = in.heapStats.largeAllocCount
// Cut off the first index which is ostensibly for size class 0,
// but large objects are tracked separately so it's actually unused.
for i, count := range in.heapStats.smallAllocCount[1:] {
hist.counts[i] = count
}
},
},
"/gc/heap/allocs:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.heapStats.totalAllocated
},
},
"/gc/heap/allocs:objects": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.heapStats.totalAllocs
},
},
"/gc/heap/frees-by-size:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
hist := out.float64HistOrInit(sizeClassBuckets)
hist.counts[len(hist.counts)-1] = in.heapStats.largeFreeCount
// Cut off the first index which is ostensibly for size class 0,
// but large objects are tracked separately so it's actually unused.
for i, count := range in.heapStats.smallFreeCount[1:] {
hist.counts[i] = count
}
},
},
"/gc/heap/frees:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.heapStats.totalFreed
},
},
"/gc/heap/frees:objects": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.heapStats.totalFrees
},
},
"/gc/heap/goal:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.sysStats.heapGoal
},
},
"/gc/gomemlimit:bytes": {
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = uint64(gcController.memoryLimit.Load())
},
},
"/gc/gogc:percent": {
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = uint64(gcController.gcPercent.Load())
},
},
"/gc/heap/live:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = gcController.heapMarked
},
},
"/gc/heap/objects:objects": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.heapStats.numObjects
},
},
"/gc/heap/tiny/allocs:objects": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.heapStats.tinyAllocCount
},
},
"/gc/limiter/last-enabled:gc-cycle": {
compute: func(_ *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load())
},
},
"/gc/pauses:seconds": {
compute: func(_ *statAggregate, out *metricValue) {
// N.B. this is identical to /sched/pauses/total/gc:seconds.
sched.stwTotalTimeGC.write(out)
},
},
"/gc/stack/starting-size:bytes": {
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = uint64(startingStackSize)
},
},
"/memory/classes/heap/free:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap -
in.heapStats.inStacks - in.heapStats.inWorkBufs)
},
},
"/memory/classes/heap/objects:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.heapStats.inObjects
},
},
"/memory/classes/heap/released:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = uint64(in.heapStats.released)
},
},
"/memory/classes/heap/stacks:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = uint64(in.heapStats.inStacks)
},
},
"/memory/classes/heap/unused:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = uint64(in.heapStats.inHeap) - in.heapStats.inObjects
},
},
"/memory/classes/metadata/mcache/free:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.sysStats.mCacheSys - in.sysStats.mCacheInUse
},
},
"/memory/classes/metadata/mcache/inuse:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.sysStats.mCacheInUse
},
},
"/memory/classes/metadata/mspan/free:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.sysStats.mSpanSys - in.sysStats.mSpanInUse
},
},
"/memory/classes/metadata/mspan/inuse:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.sysStats.mSpanInUse
},
},
"/memory/classes/metadata/other:bytes": {
deps: makeStatDepSet(heapStatsDep, sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = uint64(in.heapStats.inWorkBufs) + in.sysStats.gcMiscSys
},
},
"/memory/classes/os-stacks:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.sysStats.stacksSys
},
},
"/memory/classes/other:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.sysStats.otherSys
},
},
"/memory/classes/profiling/buckets:bytes": {
deps: makeStatDepSet(sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.sysStats.buckHashSys
},
},
"/memory/classes/total:bytes": {
deps: makeStatDepSet(heapStatsDep, sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = uint64(in.heapStats.committed+in.heapStats.released) +
in.sysStats.stacksSys + in.sysStats.mSpanSys +
in.sysStats.mCacheSys + in.sysStats.buckHashSys +
in.sysStats.gcMiscSys + in.sysStats.otherSys
},
},
"/sched/gomaxprocs:threads": {
compute: func(_ *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = uint64(gomaxprocs)
},
},
"/sched/goroutines:goroutines": {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.schedStats.gTotal
},
},
"/sched/goroutines/not-in-go:goroutines": {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.schedStats.gNonGo
},
},
"/sched/goroutines/running:goroutines": {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.schedStats.gRunning
},
},
"/sched/goroutines/runnable:goroutines": {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.schedStats.gRunnable
},
},
"/sched/goroutines/waiting:goroutines": {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.schedStats.gWaiting
},
},
"/sched/goroutines-created:goroutines": {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.schedStats.gCreated
},
},
"/sched/latencies:seconds": {
compute: func(_ *statAggregate, out *metricValue) {
sched.timeToRun.write(out)
},
},
"/sched/pauses/stopping/gc:seconds": {
compute: func(_ *statAggregate, out *metricValue) {
sched.stwStoppingTimeGC.write(out)
},
},
"/sched/pauses/stopping/other:seconds": {
compute: func(_ *statAggregate, out *metricValue) {
sched.stwStoppingTimeOther.write(out)
},
},
"/sched/pauses/total/gc:seconds": {
compute: func(_ *statAggregate, out *metricValue) {
sched.stwTotalTimeGC.write(out)
},
},
"/sched/pauses/total/other:seconds": {
compute: func(_ *statAggregate, out *metricValue) {
sched.stwTotalTimeOther.write(out)
},
},
"/sched/threads/total:threads": {
deps: makeStatDepSet(schedStatsDep),
compute: func(in *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = in.schedStats.threads
},
},
"/sync/mutex/wait/total:seconds": {
compute: func(_ *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
out.scalar = float64bits(nsToSec(totalMutexWaitTimeNanos()))
},
},
}
for _, info := range godebugs.All {
if !info.Opaque {
metrics["/godebug/non-default-behavior/"+info.Name+":events"] = metricData{compute: compute0}
}
}
metricsInit = true
}
func compute0(_ *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = 0
}
type metricReader func() uint64
func (f metricReader) compute(_ *statAggregate, out *metricValue) {
out.kind = metricKindUint64
out.scalar = f()
}
//go:linkname godebug_registerMetric internal/godebug.registerMetric
func godebug_registerMetric(name string, read func() uint64) {
metricsLock()
initMetrics()
d, ok := metrics[name]
if !ok {
throw("runtime: unexpected metric registration for " + name)
}
d.compute = metricReader(read).compute
metrics[name] = d
metricsUnlock()
}
// statDep is a dependency on a group of statistics
// that a metric might have.
type statDep uint
const (
heapStatsDep statDep = iota // corresponds to heapStatsAggregate
sysStatsDep // corresponds to sysStatsAggregate
cpuStatsDep // corresponds to cpuStatsAggregate
gcStatsDep // corresponds to gcStatsAggregate
finalStatsDep // corresponds to finalStatsAggregate
schedStatsDep // corresponds to schedStatsAggregate
numStatsDeps
)
// statDepSet represents a set of statDeps.
//
// Under the hood, it's a bitmap.
type statDepSet [1]uint64
// makeStatDepSet creates a new statDepSet from a list of statDeps.
func makeStatDepSet(deps ...statDep) statDepSet {
var s statDepSet
for _, d := range deps {
s[d/64] |= 1 << (d % 64)
}
return s
}
// difference returns set difference of s from b as a new set.
func (s statDepSet) difference(b statDepSet) statDepSet {
var c statDepSet
for i := range s {
c[i] = s[i] &^ b[i]
}
return c
}
// union returns the union of the two sets as a new set.
func (s statDepSet) union(b statDepSet) statDepSet {
var c statDepSet
for i := range s {
c[i] = s[i] | b[i]
}
return c
}
// empty returns true if there are no dependencies in the set.
func (s *statDepSet) empty() bool {
for _, c := range s {
if c != 0 {
return false
}
}
return true
}
// has returns true if the set contains a given statDep.
func (s *statDepSet) has(d statDep) bool {
return s[d/64]&(1<<(d%64)) != 0
}
// heapStatsAggregate represents memory stats obtained from the
// runtime. This set of stats is grouped together because they
// depend on each other in some way to make sense of the runtime's
// current heap memory use. They're also sharded across Ps, so it
// makes sense to grab them all at once.
type heapStatsAggregate struct {
heapStatsDelta
// Derived from values in heapStatsDelta.
// inObjects is the bytes of memory occupied by objects,
inObjects uint64
// numObjects is the number of live objects in the heap.
numObjects uint64
// totalAllocated is the total bytes of heap objects allocated
// over the lifetime of the program.
totalAllocated uint64
// totalFreed is the total bytes of heap objects freed
// over the lifetime of the program.
totalFreed uint64
// totalAllocs is the number of heap objects allocated over
// the lifetime of the program.
totalAllocs uint64
// totalFrees is the number of heap objects freed over
// the lifetime of the program.
totalFrees uint64
}
// compute populates the heapStatsAggregate with values from the runtime.
func (a *heapStatsAggregate) compute() {
memstats.heapStats.read(&a.heapStatsDelta)
// Calculate derived stats.
a.totalAllocs = a.largeAllocCount
a.totalFrees = a.largeFreeCount
a.totalAllocated = a.largeAlloc
a.totalFreed = a.largeFree
for i := range a.smallAllocCount {
na := a.smallAllocCount[i]
nf := a.smallFreeCount[i]
a.totalAllocs += na
a.totalFrees += nf
a.totalAllocated += na * uint64(gc.SizeClassToSize[i])
a.totalFreed += nf * uint64(gc.SizeClassToSize[i])
}
a.inObjects = a.totalAllocated - a.totalFreed
a.numObjects = a.totalAllocs - a.totalFrees
}
// sysStatsAggregate represents system memory stats obtained
// from the runtime. This set of stats is grouped together because
// they're all relatively cheap to acquire and generally independent
// of one another and other runtime memory stats. The fact that they
// may be acquired at different times, especially with respect to
// heapStatsAggregate, means there could be some skew, but because of
// these stats are independent, there's no real consistency issue here.
type sysStatsAggregate struct {
stacksSys uint64
mSpanSys uint64
mSpanInUse uint64
mCacheSys uint64
mCacheInUse uint64
buckHashSys uint64
gcMiscSys uint64
otherSys uint64
heapGoal uint64
gcCyclesDone uint64
gcCyclesForced uint64
}
// compute populates the sysStatsAggregate with values from the runtime.
func (a *sysStatsAggregate) compute() {
a.stacksSys = memstats.stacks_sys.load()
a.buckHashSys = memstats.buckhash_sys.load()
a.gcMiscSys = memstats.gcMiscSys.load()
a.otherSys = memstats.other_sys.load()
a.heapGoal = gcController.heapGoal()
a.gcCyclesDone = uint64(memstats.numgc)
a.gcCyclesForced = uint64(memstats.numforcedgc)
systemstack(func() {
lock(&mheap_.lock)
a.mSpanSys = memstats.mspan_sys.load()
a.mSpanInUse = uint64(mheap_.spanalloc.inuse)
a.mCacheSys = memstats.mcache_sys.load()
a.mCacheInUse = uint64(mheap_.cachealloc.inuse)
unlock(&mheap_.lock)
})
}
// cpuStatsAggregate represents CPU stats obtained from the runtime
// acquired together to avoid skew and inconsistencies.
type cpuStatsAggregate struct {
cpuStats
}
// compute populates the cpuStatsAggregate with values from the runtime.
func (a *cpuStatsAggregate) compute() {
a.cpuStats = work.cpuStats
// TODO(mknyszek): Update the CPU stats again so that we're not
// just relying on the STW snapshot. The issue here is that currently
// this will cause non-monotonicity in the "user" CPU time metric.
//
// a.cpuStats.accumulate(nanotime(), gcphase == _GCmark)
}
// gcStatsAggregate represents various GC stats obtained from the runtime
// acquired together to avoid skew and inconsistencies.
type gcStatsAggregate struct {
heapScan uint64
stackScan uint64
globalsScan uint64
totalScan uint64
}
// compute populates the gcStatsAggregate with values from the runtime.
func (a *gcStatsAggregate) compute() {
a.heapScan = gcController.heapScan.Load()
a.stackScan = gcController.lastStackScan.Load()
a.globalsScan = gcController.globalsScan.Load()
a.totalScan = a.heapScan + a.stackScan + a.globalsScan
}
// finalStatsAggregate represents various finalizer/cleanup stats obtained
// from the runtime acquired together to avoid skew and inconsistencies.
type finalStatsAggregate struct {
finalizersQueued uint64
finalizersExecuted uint64
cleanupsQueued uint64
cleanupsExecuted uint64
}
// compute populates the finalStatsAggregate with values from the runtime.
func (a *finalStatsAggregate) compute() {
a.finalizersQueued, a.finalizersExecuted = finReadQueueStats()
a.cleanupsQueued, a.cleanupsExecuted = gcCleanups.readQueueStats()
}
// schedStatsAggregate contains stats about the scheduler, including
// an approximate count of goroutines in each state.
type schedStatsAggregate struct {
gTotal uint64
gRunning uint64
gRunnable uint64
gNonGo uint64
gWaiting uint64
gCreated uint64
threads uint64
}
// compute populates the schedStatsAggregate with values from the runtime.
func (a *schedStatsAggregate) compute() {
// Lock the scheduler so the global run queue can't change and
// the number of Ps can't change. This doesn't prevent the
// local run queues from changing, so the results are still
// approximate.
lock(&sched.lock)
// The total count of threads owned by Go is the number of Ms
// minus extra Ms on the list or in use.
a.threads = uint64(mcount()) - uint64(extraMInUse.Load()) - uint64(extraMLength.Load())
// Collect running/runnable from per-P run queues.
a.gCreated += sched.goroutinesCreated.Load()
for _, p := range allp {
if p == nil || p.status == _Pdead {
break
}
a.gCreated += p.goroutinesCreated
switch p.status {
case _Prunning:
if thread, ok := setBlockOnExitSyscall(p); ok {
thread.resume()
a.gNonGo++
} else {
a.gRunning++
}
case _Pgcstop:
// The world is stopping or stopped.
// This is fine. The results will be
// slightly odd since nothing else
// is running, but it will be accurate.
}
for {
h := atomic.Load(&p.runqhead)
t := atomic.Load(&p.runqtail)
next := atomic.Loaduintptr((*uintptr)(&p.runnext))
runnable := int32(t - h)
if atomic.Load(&p.runqhead) != h || runnable < 0 {
continue
}
if next != 0 {
runnable++
}
a.gRunnable += uint64(runnable)
break
}
}
// Global run queue.
a.gRunnable += uint64(sched.runq.size)
// Account for Gs that are in _Gsyscall without a P.
nGsyscallNoP := sched.nGsyscallNoP.Load()
// nGsyscallNoP can go negative during temporary races.
if nGsyscallNoP >= 0 {
a.gNonGo += uint64(nGsyscallNoP)
}
// Compute the number of blocked goroutines. We have to
// include system goroutines in this count because we included
// them above.
a.gTotal = uint64(gcount(true))
a.gWaiting = a.gTotal - (a.gRunning + a.gRunnable + a.gNonGo)
if a.gWaiting < 0 {
a.gWaiting = 0
}
unlock(&sched.lock)
}
// nsToSec takes a duration in nanoseconds and converts it to seconds as
// a float64.
func nsToSec(ns int64) float64 {
return float64(ns) / 1e9
}
// statAggregate is the main driver of the metrics implementation.
//
// It contains multiple aggregates of runtime statistics, as well
// as a set of these aggregates that it has populated. The aggregates
// are populated lazily by its ensure method.
type statAggregate struct {
ensured statDepSet
heapStats heapStatsAggregate
sysStats sysStatsAggregate
cpuStats cpuStatsAggregate
gcStats gcStatsAggregate
finalStats finalStatsAggregate
schedStats schedStatsAggregate
}
// ensure populates statistics aggregates determined by deps if they
// haven't yet been populated.
func (a *statAggregate) ensure(deps *statDepSet) {
missing := deps.difference(a.ensured)
if missing.empty() {
return
}
for i := statDep(0); i < numStatsDeps; i++ {
if !missing.has(i) {
continue
}
switch i {
case heapStatsDep:
a.heapStats.compute()
case sysStatsDep:
a.sysStats.compute()
case cpuStatsDep:
a.cpuStats.compute()
case gcStatsDep:
a.gcStats.compute()
case finalStatsDep:
a.finalStats.compute()
case schedStatsDep:
a.schedStats.compute()
}
}
a.ensured = a.ensured.union(missing)
}
// metricKind is a runtime copy of runtime/metrics.ValueKind and
// must be kept structurally identical to that type.
type metricKind int
const (
// These values must be kept identical to their corresponding Kind* values
// in the runtime/metrics package.
metricKindBad metricKind = iota
metricKindUint64
metricKindFloat64
metricKindFloat64Histogram
)
// metricSample is a runtime copy of runtime/metrics.Sample and
// must be kept structurally identical to that type.
type metricSample struct {
name string
value metricValue
}
// metricValue is a runtime copy of runtime/metrics.Sample and
// must be kept structurally identical to that type.
type metricValue struct {
kind metricKind
scalar uint64 // contains scalar values for scalar Kinds.
pointer unsafe.Pointer // contains non-scalar values.
}
// float64HistOrInit tries to pull out an existing float64Histogram
// from the value, but if none exists, then it allocates one with
// the given buckets.
func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram {
var hist *metricFloat64Histogram
if v.kind == metricKindFloat64Histogram && v.pointer != nil {
hist = (*metricFloat64Histogram)(v.pointer)
} else {
v.kind = metricKindFloat64Histogram
hist = new(metricFloat64Histogram)
v.pointer = unsafe.Pointer(hist)
}
hist.buckets = buckets
if len(hist.counts) != len(hist.buckets)-1 {
hist.counts = make([]uint64, len(buckets)-1)
}
return hist
}
// metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram
// and must be kept structurally identical to that type.
type metricFloat64Histogram struct {
counts []uint64
buckets []float64
}
// agg is used by readMetrics, and is protected by metricsSema.
//
// Managed as a global variable because its pointer will be
// an argument to a dynamically-defined function, and we'd
// like to avoid it escaping to the heap.
var agg statAggregate
type metricName struct {
name string
kind metricKind
}
// readMetricNames is the implementation of runtime/metrics.readMetricNames,
// used by the runtime/metrics test and otherwise unreferenced.
//
//go:linkname readMetricNames runtime/metrics_test.runtime_readMetricNames
func readMetricNames() []string {
metricsLock()
initMetrics()
n := len(metrics)
metricsUnlock()
list := make([]string, 0, n)
metricsLock()
for name := range metrics {
list = append(list, name)
}
metricsUnlock()
return list
}
// readMetrics is the implementation of runtime/metrics.Read.
//
//go:linkname readMetrics runtime/metrics.runtime_readMetrics
func readMetrics(samplesp unsafe.Pointer, len int, cap int) {
metricsLock()
// Ensure the map is initialized.
initMetrics()
// Read the metrics.
readMetricsLocked(samplesp, len, cap)
metricsUnlock()
}
// readMetricsLocked is the internal, locked portion of readMetrics.
//
// Broken out for more robust testing. metricsLock must be held and
// initMetrics must have been called already.
func readMetricsLocked(samplesp unsafe.Pointer, len int, cap int) {
// Construct a slice from the args.
sl := slice{samplesp, len, cap}
samples := *(*[]metricSample)(unsafe.Pointer(&sl))
// Clear agg defensively.
agg = statAggregate{}
// Sample.
for i := range samples {
sample := &samples[i]
data, ok := metrics[sample.name]
if !ok {
sample.value.kind = metricKindBad
continue
}
// Ensure we have all the stats we need.
// agg is populated lazily.
agg.ensure(&data.deps)
// Compute the value based on the stats we have.
data.compute(&agg, &sample.value)
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector: finalizers and block profiling.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/gc"
"internal/runtime/sys"
"unsafe"
)
const finBlockSize = 4 * 1024
// finBlock is an block of finalizers to be executed. finBlocks
// are arranged in a linked list for the finalizer queue.
//
// finBlock is allocated from non-GC'd memory, so any heap pointers
// must be specially handled. GC currently assumes that the finalizer
// queue does not grow during marking (but it can shrink).
type finBlock struct {
_ sys.NotInHeap
alllink *finBlock
next *finBlock
cnt uint32
_ int32
fin [(finBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
}
var fingStatus atomic.Uint32
// finalizer goroutine status.
const (
fingUninitialized uint32 = iota
fingCreated uint32 = 1 << (iota - 1)
fingRunningFinalizer
fingWait
fingWake
)
var (
finlock mutex // protects the following variables
fing *g // goroutine that runs finalizers
finq *finBlock // list of finalizers that are to be executed
finc *finBlock // cache of free blocks
finptrmask [finBlockSize / goarch.PtrSize / 8]byte
finqueued uint64 // monotonic count of queued finalizers
finexecuted uint64 // monotonic count of executed finalizers
)
var allfin *finBlock // list of all blocks
// NOTE: Layout known to queuefinalizer.
type finalizer struct {
fn *funcval // function to call (may be a heap pointer)
arg unsafe.Pointer // ptr to object (may be a heap pointer)
nret uintptr // bytes of return values from fn
fint *_type // type of first argument of fn
ot *ptrtype // type of ptr to object (may be a heap pointer)
}
var finalizer1 = [...]byte{
// Each Finalizer is 5 words, ptr ptr INT ptr ptr (INT = uintptr here)
// Each byte describes 8 words.
// Need 8 Finalizers described by 5 bytes before pattern repeats:
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// ptr ptr INT ptr ptr
// aka
//
// ptr ptr INT ptr ptr ptr ptr INT
// ptr ptr ptr ptr INT ptr ptr ptr
// ptr INT ptr ptr ptr ptr INT ptr
// ptr ptr ptr INT ptr ptr ptr ptr
// INT ptr ptr ptr ptr INT ptr ptr
//
// Assumptions about Finalizer layout checked below.
1<<0 | 1<<1 | 0<<2 | 1<<3 | 1<<4 | 1<<5 | 1<<6 | 0<<7,
1<<0 | 1<<1 | 1<<2 | 1<<3 | 0<<4 | 1<<5 | 1<<6 | 1<<7,
1<<0 | 0<<1 | 1<<2 | 1<<3 | 1<<4 | 1<<5 | 0<<6 | 1<<7,
1<<0 | 1<<1 | 1<<2 | 0<<3 | 1<<4 | 1<<5 | 1<<6 | 1<<7,
0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7,
}
// lockRankMayQueueFinalizer records the lock ranking effects of a
// function that may call queuefinalizer.
func lockRankMayQueueFinalizer() {
lockWithRankMayAcquire(&finlock, getLockRank(&finlock))
}
func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
if gcphase != _GCoff {
// Currently we assume that the finalizer queue won't
// grow during marking so we don't have to rescan it
// during mark termination. If we ever need to lift
// this assumption, we can do it by adding the
// necessary barriers to queuefinalizer (which it may
// have automatically).
throw("queuefinalizer during GC")
}
lock(&finlock)
if finq == nil || finq.cnt == uint32(len(finq.fin)) {
if finc == nil {
finc = (*finBlock)(persistentalloc(finBlockSize, 0, &memstats.gcMiscSys))
finc.alllink = allfin
allfin = finc
if finptrmask[0] == 0 {
// Build pointer mask for Finalizer array in block.
// Check assumptions made in finalizer1 array above.
if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize ||
unsafe.Offsetof(finalizer{}.fn) != 0 ||
unsafe.Offsetof(finalizer{}.arg) != goarch.PtrSize ||
unsafe.Offsetof(finalizer{}.nret) != 2*goarch.PtrSize ||
unsafe.Offsetof(finalizer{}.fint) != 3*goarch.PtrSize ||
unsafe.Offsetof(finalizer{}.ot) != 4*goarch.PtrSize) {
throw("finalizer out of sync")
}
for i := range finptrmask {
finptrmask[i] = finalizer1[i%len(finalizer1)]
}
}
}
block := finc
finc = block.next
block.next = finq
finq = block
}
f := &finq.fin[finq.cnt]
atomic.Xadd(&finq.cnt, +1) // Sync with markroots
f.fn = fn
f.nret = nret
f.fint = fint
f.ot = ot
f.arg = p
finqueued++
unlock(&finlock)
fingStatus.Or(fingWake)
}
//go:nowritebarrier
func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
for fb := allfin; fb != nil; fb = fb.alllink {
for i := uint32(0); i < fb.cnt; i++ {
f := &fb.fin[i]
callback(f.fn, f.arg, f.nret, f.fint, f.ot)
}
}
}
func wakefing() *g {
if ok := fingStatus.CompareAndSwap(fingCreated|fingWait|fingWake, fingCreated); ok {
return fing
}
return nil
}
func createfing() {
// start the finalizer goroutine exactly once
if fingStatus.Load() == fingUninitialized && fingStatus.CompareAndSwap(fingUninitialized, fingCreated) {
go runFinalizers()
}
}
func finalizercommit(gp *g, lock unsafe.Pointer) bool {
unlock((*mutex)(lock))
// fingStatus should be modified after fing is put into a waiting state
// to avoid waking fing in running state, even if it is about to be parked.
fingStatus.Or(fingWait)
return true
}
func finReadQueueStats() (queued, executed uint64) {
lock(&finlock)
queued = finqueued
executed = finexecuted
unlock(&finlock)
return
}
// This is the goroutine that runs all of the finalizers.
func runFinalizers() {
var (
frame unsafe.Pointer
framecap uintptr
argRegs int
)
gp := getg()
lock(&finlock)
fing = gp
unlock(&finlock)
for {
lock(&finlock)
fb := finq
finq = nil
if fb == nil {
gopark(finalizercommit, unsafe.Pointer(&finlock), waitReasonFinalizerWait, traceBlockSystemGoroutine, 1)
continue
}
argRegs = intArgRegs
unlock(&finlock)
if raceenabled {
racefingo()
}
for fb != nil {
n := fb.cnt
for i := n; i > 0; i-- {
f := &fb.fin[i-1]
var regs abi.RegArgs
// The args may be passed in registers or on stack. Even for
// the register case, we still need the spill slots.
// TODO: revisit if we remove spill slots.
//
// Unfortunately because we can have an arbitrary
// amount of returns and it would be complex to try and
// figure out how many of those can get passed in registers,
// just conservatively assume none of them do.
framesz := unsafe.Sizeof((any)(nil)) + f.nret
if framecap < framesz {
// The frame does not contain pointers interesting for GC,
// all not yet finalized objects are stored in finq.
// If we do not mark it as FlagNoScan,
// the last finalized object is not collected.
frame = mallocgc(framesz, nil, true)
framecap = framesz
}
if f.fint == nil {
throw("missing type in finalizer")
}
r := frame
if argRegs > 0 {
r = unsafe.Pointer(®s.Ints)
} else {
// frame is effectively uninitialized
// memory. That means we have to clear
// it before writing to it to avoid
// confusing the write barrier.
*(*[2]uintptr)(frame) = [2]uintptr{}
}
switch f.fint.Kind() {
case abi.Pointer:
// direct use of pointer
*(*unsafe.Pointer)(r) = f.arg
case abi.Interface:
ityp := (*interfacetype)(unsafe.Pointer(f.fint))
// set up with empty interface
(*eface)(r)._type = &f.ot.Type
(*eface)(r).data = f.arg
if len(ityp.Methods) != 0 {
// convert to interface with methods
// this conversion is guaranteed to succeed - we checked in SetFinalizer
(*iface)(r).tab = assertE2I(ityp, (*eface)(r)._type)
}
default:
throw("bad type kind in finalizer")
}
fingStatus.Or(fingRunningFinalizer)
reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), uint32(framesz), ®s)
fingStatus.And(^fingRunningFinalizer)
// Drop finalizer queue heap references
// before hiding them from markroot.
// This also ensures these will be
// clear if we reuse the finalizer.
f.fn = nil
f.arg = nil
f.ot = nil
atomic.Store(&fb.cnt, i-1)
}
next := fb.next
lock(&finlock)
finexecuted += uint64(n)
fb.next = finc
finc = fb
unlock(&finlock)
fb = next
}
}
}
func isGoPointerWithoutSpan(p unsafe.Pointer) bool {
// 0-length objects are okay.
if p == unsafe.Pointer(&zerobase) {
return true
}
// Global initializers might be linker-allocated.
// var Foo = &Object{}
// func main() {
// runtime.SetFinalizer(Foo, nil)
// }
// The relevant segments are: noptrdata, data, bss, noptrbss.
// We cannot assume they are in any order or even contiguous,
// due to external linking.
for datap := &firstmoduledata; datap != nil; datap = datap.next {
if datap.noptrdata <= uintptr(p) && uintptr(p) < datap.enoptrdata ||
datap.data <= uintptr(p) && uintptr(p) < datap.edata ||
datap.bss <= uintptr(p) && uintptr(p) < datap.ebss ||
datap.noptrbss <= uintptr(p) && uintptr(p) < datap.enoptrbss {
return true
}
}
return false
}
// blockUntilEmptyFinalizerQueue blocks until either the finalizer
// queue is emptied (and the finalizers have executed) or the timeout
// is reached. Returns true if the finalizer queue was emptied.
// This is used by the runtime, sync, and unique tests.
func blockUntilEmptyFinalizerQueue(timeout int64) bool {
start := nanotime()
for nanotime()-start < timeout {
lock(&finlock)
// We know the queue has been drained when both finq is nil
// and the finalizer g has stopped executing.
empty := finq == nil
empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait
unlock(&finlock)
if empty {
return true
}
Gosched()
}
return false
}
// SetFinalizer sets the finalizer associated with obj to the provided
// finalizer function. When the garbage collector finds an unreachable block
// with an associated finalizer, it clears the association and runs
// finalizer(obj) in a separate goroutine. This makes obj reachable again,
// but now without an associated finalizer. Assuming that SetFinalizer
// is not called again, the next time the garbage collector sees
// that obj is unreachable, it will free obj.
//
// SetFinalizer(obj, nil) clears any finalizer associated with obj.
//
// New Go code should consider using [AddCleanup] instead, which is much
// less error-prone than SetFinalizer.
//
// The argument obj must be a pointer to an object allocated by calling
// new, by taking the address of a composite literal, or by taking the
// address of a local variable.
// The argument finalizer must be a function that takes a single argument
// to which obj's type can be assigned, and can have arbitrary ignored return
// values. If either of these is not true, SetFinalizer may abort the
// program.
//
// Finalizers are run in dependency order: if A points at B, both have
// finalizers, and they are otherwise unreachable, only the finalizer
// for A runs; once A is freed, the finalizer for B can run.
// If a cyclic structure includes a block with a finalizer, that
// cycle is not guaranteed to be garbage collected and the finalizer
// is not guaranteed to run, because there is no ordering that
// respects the dependencies.
//
// The finalizer is scheduled to run at some arbitrary time after the
// program can no longer reach the object to which obj points.
// There is no guarantee that finalizers will run before a program exits,
// so typically they are useful only for releasing non-memory resources
// associated with an object during a long-running program.
// For example, an [os.File] object could use a finalizer to close the
// associated operating system file descriptor when a program discards
// an os.File without calling Close, but it would be a mistake
// to depend on a finalizer to flush an in-memory I/O buffer such as a
// [bufio.Writer], because the buffer would not be flushed at program exit.
//
// It is not guaranteed that a finalizer will run if the size of *obj is
// zero bytes, because it may share same address with other zero-size
// objects in memory. See https://go.dev/ref/spec#Size_and_alignment_guarantees.
//
// It is not guaranteed that a finalizer will run for objects allocated
// in initializers for package-level variables. Such objects may be
// linker-allocated, not heap-allocated.
//
// Note that because finalizers may execute arbitrarily far into the future
// after an object is no longer referenced, the runtime is allowed to perform
// a space-saving optimization that batches objects together in a single
// allocation slot. The finalizer for an unreferenced object in such an
// allocation may never run if it always exists in the same batch as a
// referenced object. Typically, this batching only happens for tiny
// (on the order of 16 bytes or less) and pointer-free objects.
//
// A finalizer may run as soon as an object becomes unreachable.
// In order to use finalizers correctly, the program must ensure that
// the object is reachable until it is no longer required.
// Objects stored in global variables, or that can be found by tracing
// pointers from a global variable, are reachable. A function argument or
// receiver may become unreachable at the last point where the function
// mentions it. To make an unreachable object reachable, pass the object
// to a call of the [KeepAlive] function to mark the last point in the
// function where the object must be reachable.
//
// For example, if p points to a struct, such as os.File, that contains
// a file descriptor d, and p has a finalizer that closes that file
// descriptor, and if the last use of p in a function is a call to
// syscall.Write(p.d, buf, size), then p may be unreachable as soon as
// the program enters [syscall.Write]. The finalizer may run at that moment,
// closing p.d, causing syscall.Write to fail because it is writing to
// a closed file descriptor (or, worse, to an entirely different
// file descriptor opened by a different goroutine). To avoid this problem,
// call KeepAlive(p) after the call to syscall.Write.
//
// A single goroutine runs all finalizers for a program, sequentially.
// If a finalizer must run for a long time, it should do so by starting
// a new goroutine.
//
// In the terminology of the Go memory model, a call
// SetFinalizer(x, f) “synchronizes before” the finalization call f(x).
// However, there is no guarantee that KeepAlive(x) or any other use of x
// “synchronizes before” f(x), so in general a finalizer should use a mutex
// or other synchronization mechanism if it needs to access mutable state in x.
// For example, consider a finalizer that inspects a mutable field in x
// that is modified from time to time in the main program before x
// becomes unreachable and the finalizer is invoked.
// The modifications in the main program and the inspection in the finalizer
// need to use appropriate synchronization, such as mutexes or atomic updates,
// to avoid read-write races.
func SetFinalizer(obj any, finalizer any) {
e := efaceOf(&obj)
etyp := e._type
if etyp == nil {
throw("runtime.SetFinalizer: first argument is nil")
}
if etyp.Kind() != abi.Pointer {
throw("runtime.SetFinalizer: first argument is " + toRType(etyp).string() + ", not pointer")
}
ot := (*ptrtype)(unsafe.Pointer(etyp))
if ot.Elem == nil {
throw("nil elem type!")
}
if inUserArenaChunk(uintptr(e.data)) {
// Arena-allocated objects are not eligible for finalizers.
throw("runtime.SetFinalizer: first argument was allocated into an arena")
}
if debug.sbrk != 0 {
// debug.sbrk never frees memory, so no finalizers run
// (and we don't have the data structures to record them).
return
}
// find the containing object
base, span, _ := findObject(uintptr(e.data), 0, 0)
if base == 0 {
if isGoPointerWithoutSpan(e.data) {
return
}
throw("runtime.SetFinalizer: pointer not in allocated block")
}
// Move base forward if we've got an allocation header.
if !span.spanclass.noscan() && !heapBitsInSpan(span.elemsize) && span.spanclass.sizeclass() != 0 {
base += gc.MallocHeaderSize
}
if uintptr(e.data) != base {
// As an implementation detail we allow to set finalizers for an inner byte
// of an object if it could come from tiny alloc (see mallocgc for details).
if ot.Elem == nil || ot.Elem.Pointers() || ot.Elem.Size_ >= maxTinySize {
throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
}
}
f := efaceOf(&finalizer)
ftyp := f._type
if ftyp == nil {
// switch to system stack and remove finalizer
systemstack(func() {
removefinalizer(e.data)
if debug.checkfinalizers != 0 {
clearFinalizerContext(uintptr(e.data))
KeepAlive(e.data)
}
})
return
}
if ftyp.Kind() != abi.Func {
throw("runtime.SetFinalizer: second argument is " + toRType(ftyp).string() + ", not a function")
}
ft := (*functype)(unsafe.Pointer(ftyp))
if ft.IsVariadic() {
throw("runtime.SetFinalizer: cannot pass " + toRType(etyp).string() + " to finalizer " + toRType(ftyp).string() + " because dotdotdot")
}
if ft.InCount != 1 {
throw("runtime.SetFinalizer: cannot pass " + toRType(etyp).string() + " to finalizer " + toRType(ftyp).string())
}
fint := ft.InSlice()[0]
switch {
case fint == etyp:
// ok - same type
goto okarg
case fint.Kind() == abi.Pointer:
if (fint.Uncommon() == nil || etyp.Uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).Elem == ot.Elem {
// ok - not same type, but both pointers,
// one or the other is unnamed, and same element type, so assignable.
goto okarg
}
case fint.Kind() == abi.Interface:
ityp := (*interfacetype)(unsafe.Pointer(fint))
if len(ityp.Methods) == 0 {
// ok - satisfies empty interface
goto okarg
}
if itab := assertE2I2(ityp, efaceOf(&obj)._type); itab != nil {
goto okarg
}
}
throw("runtime.SetFinalizer: cannot pass " + toRType(etyp).string() + " to finalizer " + toRType(ftyp).string())
okarg:
// compute size needed for return parameters
nret := uintptr(0)
for _, t := range ft.OutSlice() {
nret = alignUp(nret, uintptr(t.Align_)) + t.Size_
}
nret = alignUp(nret, goarch.PtrSize)
// make sure we have a finalizer goroutine
createfing()
callerpc := sys.GetCallerPC()
systemstack(func() {
if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
throw("runtime.SetFinalizer: finalizer already set")
}
if debug.checkfinalizers != 0 {
setFinalizerContext(e.data, ot.Elem, callerpc, (*funcval)(f.data).fn)
}
})
}
// Mark KeepAlive as noinline so that it is easily detectable as an intrinsic.
//
//go:noinline
// KeepAlive marks its argument as currently reachable.
// This ensures that the object is not freed, and its finalizer is not run,
// before the point in the program where KeepAlive is called.
//
// A very simplified example showing where KeepAlive is required:
//
// type File struct { d int }
// d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0)
// // ... do something if err != nil ...
// p := &File{d}
// runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) })
// var buf [10]byte
// n, err := syscall.Read(p.d, buf[:])
// // Ensure p is not finalized until Read returns.
// runtime.KeepAlive(p)
// // No more uses of p after this point.
//
// Without the KeepAlive call, the finalizer could run at the start of
// [syscall.Read], closing the file descriptor before syscall.Read makes
// the actual system call.
//
// Note: KeepAlive should only be used to prevent finalizers from
// running prematurely. In particular, when used with [unsafe.Pointer],
// the rules for valid uses of unsafe.Pointer still apply.
func KeepAlive(x any) {
// Introduce a use of x that the compiler can't eliminate.
// This makes sure x is alive on entry. We need x to be alive
// on entry for "defer runtime.KeepAlive(x)"; see issue 21402.
if cgoAlwaysFalse {
println(x)
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Fixed-size object allocator. Returned memory is not zeroed.
//
// See malloc.go for overview.
package runtime
import (
"internal/runtime/sys"
"unsafe"
)
// fixalloc is a simple free-list allocator for fixed size objects.
// Malloc uses a FixAlloc wrapped around sysAlloc to manage its
// mcache and mspan objects.
//
// Memory returned by fixalloc.alloc is zeroed by default, but the
// caller may take responsibility for zeroing allocations by setting
// the zero flag to false. This is only safe if the memory never
// contains heap pointers.
//
// The caller is responsible for locking around FixAlloc calls.
// Callers can keep state in the object but the first word is
// smashed by freeing and reallocating.
//
// Consider marking fixalloc'd types not in heap by embedding
// internal/runtime/sys.NotInHeap.
type fixalloc struct {
size uintptr
first func(arg, p unsafe.Pointer) // called first time p is returned
arg unsafe.Pointer
list *mlink
chunk uintptr // use uintptr instead of unsafe.Pointer to avoid write barriers
nchunk uint32 // bytes remaining in current chunk
nalloc uint32 // size of new chunks in bytes
inuse uintptr // in-use bytes now
stat *sysMemStat
zero bool // zero allocations
}
// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
// Since assignments to mlink.next will result in a write barrier being performed
// this cannot be used by some of the internal GC structures. For example when
// the sweeper is placing an unmarked object on the free list it does not want the
// write barrier to be called since that could result in the object being reachable.
type mlink struct {
_ sys.NotInHeap
next *mlink
}
// Initialize f to allocate objects of the given size,
// using the allocator to obtain chunks of memory.
func (f *fixalloc) init(size uintptr, first func(arg, p unsafe.Pointer), arg unsafe.Pointer, stat *sysMemStat) {
if size > _FixAllocChunk {
throw("runtime: fixalloc size too large")
}
size = max(size, unsafe.Sizeof(mlink{}))
f.size = size
f.first = first
f.arg = arg
f.list = nil
f.chunk = 0
f.nchunk = 0
f.nalloc = uint32(_FixAllocChunk / size * size) // Round _FixAllocChunk down to an exact multiple of size to eliminate tail waste
f.inuse = 0
f.stat = stat
f.zero = true
}
func (f *fixalloc) alloc() unsafe.Pointer {
if f.size == 0 {
print("runtime: use of FixAlloc_Alloc before FixAlloc_Init\n")
throw("runtime: internal error")
}
if f.list != nil {
v := unsafe.Pointer(f.list)
f.list = f.list.next
f.inuse += f.size
if f.zero {
memclrNoHeapPointers(v, f.size)
}
return v
}
if uintptr(f.nchunk) < f.size {
f.chunk = uintptr(persistentalloc(uintptr(f.nalloc), 0, f.stat))
f.nchunk = f.nalloc
}
v := unsafe.Pointer(f.chunk)
if f.first != nil {
f.first(f.arg, v)
}
f.chunk = f.chunk + f.size
f.nchunk -= uint32(f.size)
f.inuse += f.size
return v
}
func (f *fixalloc) free(p unsafe.Pointer) {
f.inuse -= f.size
v := (*mlink)(p)
v.next = f.list
f.list = v
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector (GC).
//
// The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple
// GC threads to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
// non-generational and non-compacting. Allocation is done using size segregated per P allocation
// areas to minimize fragmentation while eliminating locks in the common case.
//
// The algorithm decomposes into several steps.
// This is a high level description of the algorithm being used. For an overview of GC a good
// place to start is Richard Jones' gchandbook.org.
//
// The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
// Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
// On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978),
// 966-975.
// For journal quality proofs that these steps are complete, correct, and terminate see
// Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
// Concurrency and Computation: Practice and Experience 15(3-5), 2003.
//
// 1. GC performs sweep termination.
//
// a. Stop the world. This causes all Ps to reach a GC safe-point.
//
// b. Sweep any unswept spans. There will only be unswept spans if
// this GC cycle was forced before the expected time.
//
// 2. GC performs the mark phase.
//
// a. Prepare for the mark phase by setting gcphase to _GCmark
// (from _GCoff), enabling the write barrier, enabling mutator
// assists, and enqueueing root mark jobs. No objects may be
// scanned until all Ps have enabled the write barrier, which is
// accomplished using STW.
//
// b. Start the world. From this point, GC work is done by mark
// workers started by the scheduler and by assists performed as
// part of allocation. The write barrier shades both the
// overwritten pointer and the new pointer value for any pointer
// writes (see mbarrier.go for details). Newly allocated objects
// are immediately marked black.
//
// c. GC performs root marking jobs. This includes scanning all
// stacks, shading all globals, and shading any heap pointers in
// off-heap runtime data structures. Scanning a stack stops a
// goroutine, shades any pointers found on its stack, and then
// resumes the goroutine.
//
// d. GC drains the work queue of grey objects, scanning each grey
// object to black and shading all pointers found in the object
// (which in turn may add those pointers to the work queue).
//
// e. Because GC work is spread across local caches, GC uses a
// distributed termination algorithm to detect when there are no
// more root marking jobs or grey objects (see gcMarkDone). At this
// point, GC transitions to mark termination.
//
// 3. GC performs mark termination.
//
// a. Stop the world.
//
// b. Set gcphase to _GCmarktermination, and disable workers and
// assists.
//
// c. Perform housekeeping like flushing mcaches.
//
// 4. GC performs the sweep phase.
//
// a. Prepare for the sweep phase by setting gcphase to _GCoff,
// setting up sweep state and disabling the write barrier.
//
// b. Start the world. From this point on, newly allocated objects
// are white, and allocating sweeps spans before use if necessary.
//
// c. GC does concurrent sweeping in the background and in response
// to allocation. See description below.
//
// 5. When sufficient allocation has taken place, replay the sequence
// starting with 1 above. See discussion of GC rate below.
// Concurrent sweep.
//
// The sweep phase proceeds concurrently with normal program execution.
// The heap is swept span-by-span both lazily (when a goroutine needs another span)
// and concurrently in a background goroutine (this helps programs that are not CPU bound).
// At the end of STW mark termination all spans are marked as "needs sweeping".
//
// The background sweeper goroutine simply sweeps spans one-by-one.
//
// To avoid requesting more OS memory while there are unswept spans, when a
// goroutine needs another span, it first attempts to reclaim that much memory
// by sweeping. When a goroutine needs to allocate a new small-object span, it
// sweeps small-object spans for the same object size until it frees at least
// one object. When a goroutine needs to allocate large-object span from heap,
// it sweeps spans until it frees at least that many pages into heap. There is
// one case where this may not suffice: if a goroutine sweeps and frees two
// nonadjacent one-page spans to the heap, it will allocate a new two-page
// span, but there can still be other one-page unswept spans which could be
// combined into a two-page span.
//
// It's critical to ensure that no operations proceed on unswept spans (that would corrupt
// mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
// so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
// When a goroutine explicitly frees an object or sets a finalizer, it ensures that
// the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
// The finalizer goroutine is kicked off only when all spans are swept.
// When the next GC starts, it sweeps all not-yet-swept spans (if any).
// GC rate.
// Next GC is after we've allocated an extra amount of memory proportional to
// the amount already in use. The proportion is controlled by GOGC environment variable
// (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
// (this mark is computed by the gcController.heapGoal method). This keeps the GC cost in
// linear proportion to the allocation cost. Adjusting GOGC just changes the linear constant
// (and also the amount of extra memory used).
// Oblets
//
// In order to prevent long pauses while scanning large objects and to
// improve parallelism, the garbage collector breaks up scan jobs for
// objects larger than maxObletBytes into "oblets" of at most
// maxObletBytes. When scanning encounters the beginning of a large
// object, it scans only the first oblet and enqueues the remaining
// oblets as new scan jobs.
package runtime
import (
"internal/cpu"
"internal/goarch"
"internal/goexperiment"
"internal/runtime/atomic"
"internal/runtime/gc"
"unsafe"
)
const (
_DebugGC = 0
// concurrentSweep is a debug flag. Disabling this flag
// ensures all spans are swept while the world is stopped.
concurrentSweep = true
// debugScanConservative enables debug logging for stack
// frames that are scanned conservatively.
debugScanConservative = false
// sweepMinHeapDistance is a lower bound on the heap distance
// (in bytes) reserved for concurrent sweeping between GC
// cycles.
sweepMinHeapDistance = 1024 * 1024
)
// heapObjectsCanMove always returns false in the current garbage collector.
// It exists for go4.org/unsafe/assume-no-moving-gc, which is an
// unfortunate idea that had an even more unfortunate implementation.
// Every time a new Go release happened, the package stopped building,
// and the authors had to add a new file with a new //go:build line, and
// then the entire ecosystem of packages with that as a dependency had to
// explicitly update to the new version. Many packages depend on
// assume-no-moving-gc transitively, through paths like
// inet.af/netaddr -> go4.org/intern -> assume-no-moving-gc.
// This was causing a significant amount of friction around each new
// release, so we added this bool for the package to //go:linkname
// instead. The bool is still unfortunate, but it's not as bad as
// breaking the ecosystem on every new release.
//
// If the Go garbage collector ever does move heap objects, we can set
// this to true to break all the programs using assume-no-moving-gc.
//
//go:linkname heapObjectsCanMove
func heapObjectsCanMove() bool {
return false
}
func gcinit() {
if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
throw("size of Workbuf is suboptimal")
}
// No sweep on the first cycle.
sweep.active.state.Store(sweepDrainedMask)
// Initialize GC pacer state.
// Use the environment variable GOGC for the initial gcPercent value.
// Use the environment variable GOMEMLIMIT for the initial memoryLimit value.
gcController.init(readGOGC(), readGOMEMLIMIT())
// Set up the cleanup block ptr mask.
for i := range cleanupBlockPtrMask {
cleanupBlockPtrMask[i] = 0xff
}
work.startSema = 1
work.markDoneSema = 1
work.spanSPMCs.list.init(unsafe.Offsetof(spanSPMC{}.allnode))
lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters)
lockInit(&work.assistQueue.lock, lockRankAssistQueue)
lockInit(&work.strongFromWeak.lock, lockRankStrongFromWeakQueue)
lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
lockInit(&work.spanSPMCs.lock, lockRankSpanSPMCs)
lockInit(&gcCleanups.lock, lockRankCleanupQueue)
}
// gcenable is called after the bulk of the runtime initialization,
// just before we're about to start letting user code run.
// It kicks off the background sweeper goroutine, the background
// scavenger goroutine, and enables GC.
func gcenable() {
// Kick off sweeping and scavenging.
c := make(chan int, 2)
go bgsweep(c)
go bgscavenge(c)
<-c
<-c
memstats.enablegc = true // now that runtime is initialized, GC is okay
}
// Garbage collector phase.
// Indicates to write barrier and synchronization task to perform.
var gcphase uint32
// The compiler knows about this variable.
// If you change it, you must change builtin/runtime.go, too.
// If you change the first four bytes, you must also change the write
// barrier insertion code.
//
// writeBarrier should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname writeBarrier
var writeBarrier struct {
enabled bool // compiler emits a check of this before calling write barrier
pad [3]byte // compiler uses 32-bit load for "enabled" field
alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
}
// gcBlackenEnabled is 1 if mutator assists and background mark
// workers are allowed to blacken objects. This must only be set when
// gcphase == _GCmark.
var gcBlackenEnabled uint32
const (
_GCoff = iota // GC not running; sweeping in background, write barrier disabled
_GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED
_GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED
)
//go:nosplit
func setGCPhase(x uint32) {
atomic.Store(&gcphase, x)
writeBarrier.enabled = gcphase == _GCmark || gcphase == _GCmarktermination
}
// gcMarkWorkerMode represents the mode that a concurrent mark worker
// should operate in.
//
// Concurrent marking happens through four different mechanisms. One
// is mutator assists, which happen in response to allocations and are
// not scheduled. The other three are variations in the per-P mark
// workers and are distinguished by gcMarkWorkerMode.
type gcMarkWorkerMode int
const (
// gcMarkWorkerNotWorker indicates that the next scheduled G is not
// starting work and the mode should be ignored.
gcMarkWorkerNotWorker gcMarkWorkerMode = iota
// gcMarkWorkerDedicatedMode indicates that the P of a mark
// worker is dedicated to running that mark worker. The mark
// worker should run without preemption.
gcMarkWorkerDedicatedMode
// gcMarkWorkerFractionalMode indicates that a P is currently
// running the "fractional" mark worker. The fractional worker
// is necessary when GOMAXPROCS*gcBackgroundUtilization is not
// an integer and using only dedicated workers would result in
// utilization too far from the target of gcBackgroundUtilization.
// The fractional worker should run until it is preempted and
// will be scheduled to pick up the fractional part of
// GOMAXPROCS*gcBackgroundUtilization.
gcMarkWorkerFractionalMode
// gcMarkWorkerIdleMode indicates that a P is running the mark
// worker because it has nothing else to do. The idle worker
// should run until it is preempted and account its time
// against gcController.idleMarkTime.
gcMarkWorkerIdleMode
)
// gcMarkWorkerModeStrings are the strings labels of gcMarkWorkerModes
// to use in execution traces.
var gcMarkWorkerModeStrings = [...]string{
"Not worker",
"GC (dedicated)",
"GC (fractional)",
"GC (idle)",
}
// pollFractionalWorkerExit reports whether a fractional mark worker
// should self-preempt. It assumes it is called from the fractional
// worker.
func pollFractionalWorkerExit() bool {
// This should be kept in sync with the fractional worker
// scheduler logic in findRunnableGCWorker.
now := nanotime()
delta := now - gcController.markStartTime
if delta <= 0 {
return true
}
p := getg().m.p.ptr()
selfTime := p.gcFractionalMarkTime.Load() + (now - p.gcMarkWorkerStartTime)
// Add some slack to the utilization goal so that the
// fractional worker isn't behind again the instant it exits.
return float64(selfTime)/float64(delta) > 1.2*gcController.fractionalUtilizationGoal
}
var work workType
type workType struct {
full lfstack // lock-free list of full blocks workbuf
_ cpu.CacheLinePad // prevents false-sharing between full and empty
empty lfstack // lock-free list of empty blocks workbuf
_ cpu.CacheLinePad // prevents false-sharing between empty and wbufSpans
wbufSpans struct {
lock mutex
// free is a list of spans dedicated to workbufs, but
// that don't currently contain any workbufs.
free mSpanList
// busy is a list of all spans containing workbufs on
// one of the workbuf lists.
busy mSpanList
}
_ cpu.CacheLinePad // prevents false-sharing between wbufSpans and spanWorkMask
// spanqMask is a bitmap indicating which Ps have local work worth stealing.
// Set or cleared by the owning P, cleared by stealing Ps.
//
// spanqMask is like a proxy for a global queue. An important invariant is that
// forced flushing like gcw.dispose must set this bit on any P that has local
// span work.
spanqMask pMask
_ cpu.CacheLinePad // prevents false-sharing between spanqMask and everything else
// List of all spanSPMCs.
//
// Only used if goexperiment.GreenTeaGC.
spanSPMCs struct {
lock mutex
list listHeadManual // *spanSPMC
}
// Restore 64-bit alignment on 32-bit.
// _ uint32
// bytesMarked is the number of bytes marked this cycle. This
// includes bytes blackened in scanned objects, noscan objects
// that go straight to black, objects allocated as black during
// the cycle, and permagrey objects scanned by markroot during
// the concurrent scan phase.
//
// This is updated atomically during the cycle. Updates may be batched
// arbitrarily, since the value is only read at the end of the cycle.
//
// Because of benign races during marking, this number may not
// be the exact number of marked bytes, but it should be very
// close.
//
// Put this field here because it needs 64-bit atomic access
// (and thus 8-byte alignment even on 32-bit architectures).
bytesMarked uint64
markrootNext atomic.Uint32 // next markroot job
markrootJobs atomic.Uint32 // number of markroot jobs
nproc uint32
tstart int64
nwait uint32
// Number of roots of various root types. Set by gcPrepareMarkRoots.
//
// During normal GC cycle, nStackRoots == nMaybeRunnableStackRoots == len(stackRoots);
// during goroutine leak detection, nMaybeRunnableStackRoots is the number of stackRoots
// scheduled for marking.
// In both variants, nStackRoots == len(stackRoots).
nDataRoots, nBSSRoots, nSpanRoots, nStackRoots, nMaybeRunnableStackRoots int
// The following fields monitor the GC phase of the current cycle during
// goroutine leak detection.
goroutineLeak struct {
// Once set, it indicates that the GC will perform goroutine leak detection during
// the next GC cycle; it is set by goroutineLeakGC and unset during gcStart.
pending atomic.Bool
// Once set, it indicates that the GC has started a goroutine leak detection run;
// it is set during gcStart and unset during gcMarkTermination;
//
// Protected by STW.
enabled bool
// Once set, it indicates that the GC has performed goroutine leak detection during
// the current GC cycle; it is set during gcMarkDone, right after goroutine leak detection,
// and unset during gcMarkTermination;
//
// Protected by STW.
done bool
// The number of leaked goroutines during the last leak detection GC cycle.
//
// Write-protected by STW in findGoroutineLeaks.
count int
}
// Base indexes of each root type. Set by gcPrepareMarkRoots.
baseData, baseBSS, baseSpans, baseStacks, baseEnd uint32
// stackRoots is a snapshot of all of the Gs that existed before the
// beginning of concurrent marking. During goroutine leak detection, stackRoots
// is partitioned into two sets; to the left of nMaybeRunnableStackRoots are stackRoots
// of running / runnable goroutines and to the right of nMaybeRunnableStackRoots are
// stackRoots of unmarked / not runnable goroutines
// The stackRoots array is re-partitioned after each marking phase iteration.
stackRoots []*g
// Each type of GC state transition is protected by a lock.
// Since multiple threads can simultaneously detect the state
// transition condition, any thread that detects a transition
// condition must acquire the appropriate transition lock,
// re-check the transition condition and return if it no
// longer holds or perform the transition if it does.
// Likewise, any transition must invalidate the transition
// condition before releasing the lock. This ensures that each
// transition is performed by exactly one thread and threads
// that need the transition to happen block until it has
// happened.
//
// startSema protects the transition from "off" to mark or
// mark termination.
startSema uint32
// markDoneSema protects transitions from mark to mark termination.
markDoneSema uint32
bgMarkDone uint32 // cas to 1 when at a background mark completion point
// Background mark completion signaling
// mode is the concurrency mode of the current GC cycle.
mode gcMode
// userForced indicates the current GC cycle was forced by an
// explicit user call.
userForced bool
// initialHeapLive is the value of gcController.heapLive at the
// beginning of this GC cycle.
initialHeapLive uint64
// assistQueue is a queue of assists that are blocked because
// there was neither enough credit to steal or enough work to
// do.
assistQueue struct {
lock mutex
q gQueue
}
// sweepWaiters is a list of blocked goroutines to wake when
// we transition from mark termination to sweep.
sweepWaiters struct {
lock mutex
list gList
}
// strongFromWeak controls how the GC interacts with weak->strong
// pointer conversions.
strongFromWeak struct {
// block is a flag set during mark termination that prevents
// new weak->strong conversions from executing by blocking the
// goroutine and enqueuing it onto q.
//
// Mutated only by one goroutine at a time in gcMarkDone,
// with globally-synchronizing events like forEachP and
// stopTheWorld.
block bool
// q is a queue of goroutines that attempted to perform a
// weak->strong conversion during mark termination.
//
// Protected by lock.
lock mutex
q gQueue
}
// cycles is the number of completed GC cycles, where a GC
// cycle is sweep termination, mark, mark termination, and
// sweep. This differs from memstats.numgc, which is
// incremented at mark termination.
cycles atomic.Uint32
// Timing/utilization stats for this cycle.
stwprocs, maxprocs int32
tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start
// pauseNS is the total STW time this cycle, measured as the time between
// when stopping began (just before trying to stop Ps) and just after the
// world started again.
pauseNS int64
// debug.gctrace heap sizes for this cycle.
heap0, heap1, heap2 uint64
// Cumulative estimated CPU usage.
cpuStats
}
// GC runs a garbage collection and blocks the caller until the
// garbage collection is complete. It may also block the entire
// program.
func GC() {
// We consider a cycle to be: sweep termination, mark, mark
// termination, and sweep. This function shouldn't return
// until a full cycle has been completed, from beginning to
// end. Hence, we always want to finish up the current cycle
// and start a new one. That means:
//
// 1. In sweep termination, mark, or mark termination of cycle
// N, wait until mark termination N completes and transitions
// to sweep N.
//
// 2. In sweep N, help with sweep N.
//
// At this point we can begin a full cycle N+1.
//
// 3. Trigger cycle N+1 by starting sweep termination N+1.
//
// 4. Wait for mark termination N+1 to complete.
//
// 5. Help with sweep N+1 until it's done.
//
// This all has to be written to deal with the fact that the
// GC may move ahead on its own. For example, when we block
// until mark termination N, we may wake up in cycle N+2.
// Wait until the current sweep termination, mark, and mark
// termination complete.
n := work.cycles.Load()
gcWaitOnMark(n)
// We're now in sweep N or later. Trigger GC cycle N+1, which
// will first finish sweep N if necessary and then enter sweep
// termination N+1.
gcStart(gcTrigger{kind: gcTriggerCycle, n: n + 1})
// Wait for mark termination N+1 to complete.
gcWaitOnMark(n + 1)
// Finish sweep N+1 before returning. We do this both to
// complete the cycle and because runtime.GC() is often used
// as part of tests and benchmarks to get the system into a
// relatively stable and isolated state.
for work.cycles.Load() == n+1 && sweepone() != ^uintptr(0) {
Gosched()
}
// Callers may assume that the heap profile reflects the
// just-completed cycle when this returns (historically this
// happened because this was a STW GC), but right now the
// profile still reflects mark termination N, not N+1.
//
// As soon as all of the sweep frees from cycle N+1 are done,
// we can go ahead and publish the heap profile.
//
// First, wait for sweeping to finish. (We know there are no
// more spans on the sweep queue, but we may be concurrently
// sweeping spans, so we have to wait.)
for work.cycles.Load() == n+1 && !isSweepDone() {
Gosched()
}
// Now we're really done with sweeping, so we can publish the
// stable heap profile. Only do this if we haven't already hit
// another mark termination.
mp := acquirem()
cycle := work.cycles.Load()
if cycle == n+1 || (gcphase == _GCmark && cycle == n+2) {
mProf_PostSweep()
}
releasem(mp)
}
// goroutineLeakGC runs a GC cycle that performs goroutine leak detection.
//
//go:linkname goroutineLeakGC runtime/pprof.runtime_goroutineLeakGC
func goroutineLeakGC() {
// Set the pending flag to true, instructing the next GC cycle to
// perform goroutine leak detection.
work.goroutineLeak.pending.Store(true)
// Spin GC cycles until the pending flag is unset.
// This ensures that goroutineLeakGC waits for a GC cycle that
// actually performs goroutine leak detection.
//
// This is needed in case multiple concurrent calls to GC
// are simultaneously fired by the system, wherein some
// of them are dropped.
//
// In the vast majority of cases, only one loop iteration is needed;
// however, multiple concurrent calls to goroutineLeakGC could lead to
// the execution of additional GC cycles.
//
// Examples:
//
// pending? | G1 | G2
// ---------|-------------------------|-----------------------
// - | goroutineLeakGC() | goroutineLeakGC()
// - | pending.Store(true) | .
// X | for pending.Load() | .
// X | GC() | .
// X | > gcStart() | .
// X | pending.Store(false) | .
// ...
// - | > gcMarkDone() | .
// - | . | pending.Store(true)
// ...
// X | > gcMarkTermination() | .
// X | ...
// X | < GC returns | .
// X | for pending.Load | .
// X | GC() | .
// X | . | for pending.Load()
// X | . | GC()
// ...
// The first to pick up the pending flag will start a
// leak detection cycle.
for work.goroutineLeak.pending.Load() {
GC()
}
}
// gcWaitOnMark blocks until GC finishes the Nth mark phase. If GC has
// already completed this mark phase, it returns immediately.
func gcWaitOnMark(n uint32) {
for {
// Disable phase transitions.
lock(&work.sweepWaiters.lock)
nMarks := work.cycles.Load()
if gcphase != _GCmark {
// We've already completed this cycle's mark.
nMarks++
}
if nMarks > n {
// We're done.
unlock(&work.sweepWaiters.lock)
return
}
// Wait until sweep termination, mark, and mark
// termination of cycle N complete.
work.sweepWaiters.list.push(getg())
goparkunlock(&work.sweepWaiters.lock, waitReasonWaitForGCCycle, traceBlockUntilGCEnds, 1)
}
}
// gcMode indicates how concurrent a GC cycle should be.
type gcMode int
const (
gcBackgroundMode gcMode = iota // concurrent GC and sweep
gcForceMode // stop-the-world GC now, concurrent sweep
gcForceBlockMode // stop-the-world GC now and STW sweep (forced by user)
)
// A gcTrigger is a predicate for starting a GC cycle. Specifically,
// it is an exit condition for the _GCoff phase.
type gcTrigger struct {
kind gcTriggerKind
now int64 // gcTriggerTime: current time
n uint32 // gcTriggerCycle: cycle number to start
}
type gcTriggerKind int
const (
// gcTriggerHeap indicates that a cycle should be started when
// the heap size reaches the trigger heap size computed by the
// controller.
gcTriggerHeap gcTriggerKind = iota
// gcTriggerTime indicates that a cycle should be started when
// it's been more than forcegcperiod nanoseconds since the
// previous GC cycle.
gcTriggerTime
// gcTriggerCycle indicates that a cycle should be started if
// we have not yet started cycle number gcTrigger.n (relative
// to work.cycles).
gcTriggerCycle
)
// test reports whether the trigger condition is satisfied, meaning
// that the exit condition for the _GCoff phase has been met. The exit
// condition should be tested when allocating.
func (t gcTrigger) test() bool {
if !memstats.enablegc || panicking.Load() != 0 || gcphase != _GCoff {
return false
}
switch t.kind {
case gcTriggerHeap:
trigger, _ := gcController.trigger()
return gcController.heapLive.Load() >= trigger
case gcTriggerTime:
if gcController.gcPercent.Load() < 0 {
return false
}
lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime))
return lastgc != 0 && t.now-lastgc > forcegcperiod
case gcTriggerCycle:
// t.n > work.cycles, but accounting for wraparound.
return int32(t.n-work.cycles.Load()) > 0
}
return true
}
// gcStart starts the GC. It transitions from _GCoff to _GCmark (if
// debug.gcstoptheworld == 0) or performs all of GC (if
// debug.gcstoptheworld != 0).
//
// This may return without performing this transition in some cases,
// such as when called on a system stack or with locks held.
func gcStart(trigger gcTrigger) {
// Since this is called from malloc and malloc is called in
// the guts of a number of libraries that might be holding
// locks, don't attempt to start GC in non-preemptible or
// potentially unstable situations.
mp := acquirem()
if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" {
releasem(mp)
return
}
releasem(mp)
mp = nil
if gp := getg(); gp.bubble != nil {
// Disassociate the G from its synctest bubble while allocating.
// This is less elegant than incrementing the group's active count,
// but avoids any contamination between GC and synctest.
bubble := gp.bubble
gp.bubble = nil
defer func() {
gp.bubble = bubble
}()
}
// Pick up the remaining unswept/not being swept spans concurrently
//
// This shouldn't happen if we're being invoked in background
// mode since proportional sweep should have just finished
// sweeping everything, but rounding errors, etc, may leave a
// few spans unswept. In forced mode, this is necessary since
// GC can be forced at any point in the sweeping cycle.
//
// We check the transition condition continuously here in case
// this G gets delayed in to the next GC cycle.
for trigger.test() && sweepone() != ^uintptr(0) {
}
// Perform GC initialization and the sweep termination
// transition.
semacquire(&work.startSema)
// Re-check transition condition under transition lock.
if !trigger.test() {
semrelease(&work.startSema)
return
}
// In gcstoptheworld debug mode, upgrade the mode accordingly.
// We do this after re-checking the transition condition so
// that multiple goroutines that detect the heap trigger don't
// start multiple STW GCs.
mode := gcBackgroundMode
if debug.gcstoptheworld == 1 {
mode = gcForceMode
} else if debug.gcstoptheworld == 2 {
mode = gcForceBlockMode
}
// Ok, we're doing it! Stop everybody else
semacquire(&gcsema)
semacquire(&worldsema)
// For stats, check if this GC was forced by the user.
// Update it under gcsema to avoid gctrace getting wrong values.
work.userForced = trigger.kind == gcTriggerCycle
trace := traceAcquire()
if trace.ok() {
trace.GCStart()
traceRelease(trace)
}
// Check and setup per-P state.
for _, p := range allp {
// Check that all Ps have finished deferred mcache flushes.
if fg := p.mcache.flushGen.Load(); fg != mheap_.sweepgen {
println("runtime: p", p.id, "flushGen", fg, "!= sweepgen", mheap_.sweepgen)
throw("p mcache not flushed")
}
// Initialize ptrBuf if necessary.
if goexperiment.GreenTeaGC && p.gcw.ptrBuf == nil {
p.gcw.ptrBuf = (*[gc.PageSize / goarch.PtrSize]uintptr)(persistentalloc(gc.PageSize, goarch.PtrSize, &memstats.gcMiscSys))
}
}
gcBgMarkStartWorkers()
systemstack(gcResetMarkState)
work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs
if work.stwprocs > numCPUStartup {
// This is used to compute CPU time of the STW phases, so it
// can't be more than the CPU count, even if GOMAXPROCS is.
work.stwprocs = numCPUStartup
}
work.heap0 = gcController.heapLive.Load()
work.pauseNS = 0
work.mode = mode
now := nanotime()
work.tSweepTerm = now
var stw worldStop
systemstack(func() {
stw = stopTheWorldWithSema(stwGCSweepTerm)
})
// Accumulate fine-grained stopping time.
work.cpuStats.accumulateGCPauseTime(stw.stoppingCPUTime, 1)
if goexperiment.RuntimeSecret {
// The world is stopped, which means every M is either idle, blocked
// in a syscall or this M that we are running on now.
// The blocked Ms had any secret spill on their signal stacks erased
// when they entered their respective states. Now we have to handle
// this one.
eraseSecretsSignalStk()
}
// Finish sweep before we start concurrent scan.
systemstack(func() {
finishsweep_m()
})
// clearpools before we start the GC. If we wait the memory will not be
// reclaimed until the next GC cycle.
clearpools()
work.cycles.Add(1)
// Assists and workers can start the moment we start
// the world.
gcController.startCycle(now, int(gomaxprocs), trigger)
// Notify the CPU limiter that assists may begin.
gcCPULimiter.startGCTransition(true, now)
// In STW mode, disable scheduling of user Gs. This may also
// disable scheduling of this goroutine, so it may block as
// soon as we start the world again.
if mode != gcBackgroundMode {
schedEnableUser(false)
}
// If goroutine leak detection is pending, enable it for this GC cycle.
if work.goroutineLeak.pending.Load() {
work.goroutineLeak.enabled = true
work.goroutineLeak.pending.Store(false)
// Set all sync objects of blocked goroutines as untraceable
// by the GC. Only set as traceable at the end of the GC cycle.
setSyncObjectsUntraceable()
}
// Enter concurrent mark phase and enable
// write barriers.
//
// Because the world is stopped, all Ps will
// observe that write barriers are enabled by
// the time we start the world and begin
// scanning.
//
// Write barriers must be enabled before assists are
// enabled because they must be enabled before
// any non-leaf heap objects are marked. Since
// allocations are blocked until assists can
// happen, we want to enable assists as early as
// possible.
setGCPhase(_GCmark)
gcBgMarkPrepare() // Must happen before assists are enabled.
gcPrepareMarkRoots()
// Mark all active tinyalloc blocks. Since we're
// allocating from these, they need to be black like
// other allocations. The alternative is to blacken
// the tiny block on every allocation from it, which
// would slow down the tiny allocator.
gcMarkTinyAllocs()
// At this point all Ps have enabled the write
// barrier, thus maintaining the no white to
// black invariant. Enable mutator assists to
// put back-pressure on fast allocating
// mutators.
atomic.Store(&gcBlackenEnabled, 1)
// In STW mode, we could block the instant systemstack
// returns, so make sure we're not preemptible.
mp = acquirem()
// Update the CPU stats pause time.
//
// Use maxprocs instead of stwprocs here because the total time
// computed in the CPU stats is based on maxprocs, and we want them
// to be comparable.
work.cpuStats.accumulateGCPauseTime(nanotime()-stw.finishedStopping, work.maxprocs)
// Concurrent mark.
systemstack(func() {
now = startTheWorldWithSema(0, stw)
work.pauseNS += now - stw.startedStopping
work.tMark = now
// Release the CPU limiter.
gcCPULimiter.finishGCTransition(now)
})
// Release the world sema before Gosched() in STW mode
// because we will need to reacquire it later but before
// this goroutine becomes runnable again, and we could
// self-deadlock otherwise.
semrelease(&worldsema)
releasem(mp)
// Make sure we block instead of returning to user code
// in STW mode.
if mode != gcBackgroundMode {
Gosched()
}
semrelease(&work.startSema)
}
// gcMarkDoneFlushed counts the number of P's with flushed work.
//
// Ideally this would be a captured local in gcMarkDone, but forEachP
// escapes its callback closure, so it can't capture anything.
//
// This is protected by markDoneSema.
var gcMarkDoneFlushed uint32
// gcDebugMarkDone contains fields used to debug/test mark termination.
var gcDebugMarkDone struct {
// spinAfterRaggedBarrier forces gcMarkDone to spin after it executes
// the ragged barrier.
spinAfterRaggedBarrier atomic.Bool
// restartedDueTo27993 indicates that we restarted mark termination
// due to the bug described in issue #27993.
//
// Protected by worldsema.
restartedDueTo27993 bool
}
// gcMarkDone transitions the GC from mark to mark termination if all
// reachable objects have been marked (that is, there are no grey
// objects and can be no more in the future). Otherwise, it flushes
// all local work to the global queues where it can be discovered by
// other workers.
//
// All goroutines performing GC work must call gcBeginWork to signal
// that they're executing GC work. They must call gcEndWork when done.
// This should be called when all local mark work has been drained and
// there are no remaining workers. Specifically, when gcEndWork returns
// true.
//
// The calling context must be preemptible.
//
// Flushing local work is important because idle Ps may have local
// work queued. This is the only way to make that work visible and
// drive GC to completion.
//
// It is explicitly okay to have write barriers in this function. If
// it does transition to mark termination, then all reachable objects
// have been marked, so the write barrier cannot shade any more
// objects.
func gcMarkDone() {
// Ensure only one thread is running the ragged barrier at a
// time.
semacquire(&work.markDoneSema)
top:
// Re-check transition condition under transition lock.
//
// It's critical that this checks the global work queues are
// empty before performing the ragged barrier. Otherwise,
// there could be global work that a P could take after the P
// has passed the ragged barrier.
if !(gcphase == _GCmark && gcIsMarkDone()) {
semrelease(&work.markDoneSema)
return
}
// forEachP needs worldsema to execute, and we'll need it to
// stop the world later, so acquire worldsema now.
semacquire(&worldsema)
// Prevent weak->strong conversions from generating additional
// GC work. forEachP will guarantee that it is observed globally.
work.strongFromWeak.block = true
// Flush all local buffers and collect flushedWork flags.
gcMarkDoneFlushed = 0
forEachP(waitReasonGCMarkTermination, func(pp *p) {
// Flush the write barrier buffer, since this may add
// work to the gcWork.
wbBufFlush1(pp)
// Flush the gcWork, since this may create global work
// and set the flushedWork flag.
//
// TODO(austin): Break up these workbufs to
// better distribute work.
pp.gcw.dispose()
// Collect the flushedWork flag.
if pp.gcw.flushedWork {
atomic.Xadd(&gcMarkDoneFlushed, 1)
pp.gcw.flushedWork = false
}
})
if gcMarkDoneFlushed != 0 {
// More grey objects were discovered since the
// previous termination check, so there may be more
// work to do. Keep going. It's possible the
// transition condition became true again during the
// ragged barrier, so re-check it.
semrelease(&worldsema)
goto top
}
// For debugging/testing.
for gcDebugMarkDone.spinAfterRaggedBarrier.Load() {
}
// There was no global work, no local work, and no Ps
// communicated work since we took markDoneSema. Therefore
// there are no grey objects and no more objects can be
// shaded. Transition to mark termination.
now := nanotime()
work.tMarkTerm = now
getg().m.preemptoff = "gcing"
var stw worldStop
systemstack(func() {
stw = stopTheWorldWithSema(stwGCMarkTerm)
})
// The gcphase is _GCmark, it will transition to _GCmarktermination
// below. The important thing is that the wb remains active until
// all marking is complete. This includes writes made by the GC.
// Accumulate fine-grained stopping time.
work.cpuStats.accumulateGCPauseTime(stw.stoppingCPUTime, 1)
// There is sometimes work left over when we enter mark termination due
// to write barriers performed after the completion barrier above.
// Detect this and resume concurrent mark. This is obviously
// unfortunate.
//
// See issue #27993 for details.
//
// Switch to the system stack to call wbBufFlush1, though in this case
// it doesn't matter because we're non-preemptible anyway.
restart := false
systemstack(func() {
for _, p := range allp {
wbBufFlush1(p)
if !p.gcw.empty() {
restart = true
break
}
}
})
// Check whether we need to resume the marking phase because of issue #27993
// or because of goroutine leak detection.
if restart || (work.goroutineLeak.enabled && !work.goroutineLeak.done) {
if restart {
// Restart because of issue #27993.
gcDebugMarkDone.restartedDueTo27993 = true
} else {
// Marking has reached a fixed-point. Attempt to detect goroutine leaks.
//
// If the returned value is true, then detection already concluded for this cycle.
// Otherwise, more runnable goroutines were discovered, requiring additional mark work.
work.goroutineLeak.done = findGoroutineLeaks()
}
getg().m.preemptoff = ""
systemstack(func() {
// Accumulate the time we were stopped before we had to start again.
work.cpuStats.accumulateGCPauseTime(nanotime()-stw.finishedStopping, work.maxprocs)
// Start the world again.
now := startTheWorldWithSema(0, stw)
work.pauseNS += now - stw.startedStopping
})
semrelease(&worldsema)
goto top
}
gcComputeStartingStackSize()
// Disable assists and background workers. We must do
// this before waking blocked assists.
atomic.Store(&gcBlackenEnabled, 0)
// Notify the CPU limiter that GC assists will now cease.
gcCPULimiter.startGCTransition(false, now)
// Wake all blocked assists. These will run when we
// start the world again.
gcWakeAllAssists()
// Wake all blocked weak->strong conversions. These will run
// when we start the world again.
work.strongFromWeak.block = false
gcWakeAllStrongFromWeak()
// Likewise, release the transition lock. Blocked
// workers and assists will run when we start the
// world again.
semrelease(&work.markDoneSema)
// In STW mode, re-enable user goroutines. These will be
// queued to run after we start the world.
schedEnableUser(true)
// endCycle depends on all gcWork cache stats being flushed.
// The termination algorithm above ensured that up to
// allocations since the ragged barrier.
gcController.endCycle(now, int(gomaxprocs))
// Perform mark termination. This will restart the world.
gcMarkTermination(stw)
}
// isMaybeRunnable checks whether a goroutine may still be semantically runnable.
// For goroutines which are semantically runnable, this will eventually return true
// as the GC marking phase progresses. It returns false for leaked goroutines, or for
// goroutines which are not yet computed as possibly runnable by the GC.
func (gp *g) isMaybeRunnable() bool {
// Check whether the goroutine is actually in a waiting state first.
if readgstatus(gp) != _Gwaiting {
// If the goroutine is not waiting, then clearly it is maybe runnable.
return true
}
switch gp.waitreason {
case waitReasonSelectNoCases,
waitReasonChanSendNilChan,
waitReasonChanReceiveNilChan:
// Select with no cases or communicating on nil channels
// make goroutines unrunnable by definition.
return false
case waitReasonChanReceive,
waitReasonSelect,
waitReasonChanSend:
// Cycle all through all *sudog to check whether
// the goroutine is waiting on a marked channel.
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
if isMarkedOrNotInHeap(unsafe.Pointer(sg.c.get())) {
return true
}
}
return false
case waitReasonSyncCondWait,
waitReasonSyncWaitGroupWait,
waitReasonSyncMutexLock,
waitReasonSyncRWMutexLock,
waitReasonSyncRWMutexRLock:
// If waiting on mutexes, wait groups, or condition variables,
// check if the synchronization primitive attached to the sudog is marked.
if gp.waiting != nil {
return isMarkedOrNotInHeap(gp.waiting.elem.get())
}
}
return true
}
// findMaybeRunnableGoroutines checks to see if more blocked but maybe-runnable goroutines exist.
// If so, it adds them into root set and increments work.markrootJobs accordingly.
// Returns true if we need to run another phase of markroots; returns false otherwise.
func findMaybeRunnableGoroutines() (moreWork bool) {
oldRootJobs := work.markrootJobs.Load()
// To begin with we have a set of unchecked stackRoots between
// vIndex and ivIndex. During the loop, anything < vIndex should be
// valid stackRoots and anything >= ivIndex should be invalid stackRoots.
// The loop terminates when the two indices meet.
var vIndex, ivIndex int = work.nMaybeRunnableStackRoots, work.nStackRoots
// Reorder goroutine list
for vIndex < ivIndex {
if work.stackRoots[vIndex].isMaybeRunnable() {
vIndex = vIndex + 1
continue
}
for ivIndex = ivIndex - 1; ivIndex != vIndex; ivIndex = ivIndex - 1 {
if gp := work.stackRoots[ivIndex]; gp.isMaybeRunnable() {
work.stackRoots[ivIndex] = work.stackRoots[vIndex]
work.stackRoots[vIndex] = gp
vIndex = vIndex + 1
break
}
}
}
newRootJobs := work.baseStacks + uint32(vIndex)
if newRootJobs > oldRootJobs {
work.nMaybeRunnableStackRoots = vIndex
work.markrootJobs.Store(newRootJobs)
}
return newRootJobs > oldRootJobs
}
// setSyncObjectsUntraceable scans allgs and sets the elem and c fields of all sudogs to
// an untrackable pointer. This prevents the GC from marking these objects as live in memory
// by following these pointers when runnning deadlock detection.
func setSyncObjectsUntraceable() {
assertWorldStopped()
forEachGRace(func(gp *g) {
// Set as untraceable all synchronization objects of goroutines
// blocked at concurrency operations that could leak.
switch {
case gp.waitreason.isSyncWait():
// Synchronization primitives are reachable from the *sudog via
// via the elem field.
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
sg.elem.setUntraceable()
}
case gp.waitreason.isChanWait():
// Channels and select statements are reachable from the *sudog via the c field.
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
sg.c.setUntraceable()
}
}
})
}
// gcRestoreSyncObjects restores the elem and c fields of all sudogs to their original values.
// Should be invoked after the goroutine leak detection phase.
func gcRestoreSyncObjects() {
assertWorldStopped()
forEachGRace(func(gp *g) {
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
sg.elem.setTraceable()
sg.c.setTraceable()
}
})
}
// findGoroutineLeaks scans the remaining stackRoots and marks any which are
// blocked over exclusively unreachable concurrency primitives as leaked (deadlocked).
// Returns true if the goroutine leak check was performed (or unnecessary).
// Returns false if the GC cycle has not yet computed all maybe-runnable goroutines.
func findGoroutineLeaks() bool {
assertWorldStopped()
// Report goroutine leaks and mark them unreachable, and resume marking
// we still need to mark these unreachable *g structs as they
// get reused, but their stack won't get scanned
if work.nMaybeRunnableStackRoots == work.nStackRoots {
// nMaybeRunnableStackRoots == nStackRoots means that all goroutines are marked.
return true
}
// Check whether any more maybe-runnable goroutines can be found by the GC.
if findMaybeRunnableGoroutines() {
// We found more work, so we need to resume the marking phase.
return false
}
// For the remaining goroutines, mark them as unreachable and leaked.
work.goroutineLeak.count = work.nStackRoots - work.nMaybeRunnableStackRoots
for i := work.nMaybeRunnableStackRoots; i < work.nStackRoots; i++ {
gp := work.stackRoots[i]
casgstatus(gp, _Gwaiting, _Gleaked)
// Add the primitives causing the goroutine leaks
// to the GC work queue, to ensure they are marked.
//
// NOTE(vsaioc): these primitives should also be reachable
// from the goroutine's stack, but let's play it safe.
switch {
case gp.waitreason.isChanWait():
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
shade(sg.c.uintptr())
}
case gp.waitreason.isSyncWait():
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
shade(sg.elem.uintptr())
}
}
}
// Put the remaining roots as ready for marking and drain them.
work.markrootJobs.Add(int32(work.nStackRoots - work.nMaybeRunnableStackRoots))
work.nMaybeRunnableStackRoots = work.nStackRoots
return true
}
// World must be stopped and mark assists and background workers must be
// disabled.
func gcMarkTermination(stw worldStop) {
// Start marktermination (write barrier remains enabled for now).
setGCPhase(_GCmarktermination)
work.heap1 = gcController.heapLive.Load()
startTime := nanotime()
mp := acquirem()
mp.preemptoff = "gcing"
mp.traceback = 2
curgp := mp.curg
// N.B. The execution tracer is not aware of this status
// transition and handles it specially based on the
// wait reason.
casGToWaitingForSuspendG(curgp, _Grunning, waitReasonGarbageCollection)
// Run gc on the g0 stack. We do this so that the g stack
// we're currently running on will no longer change. Cuts
// the root set down a bit (g0 stacks are not scanned, and
// we don't need to scan gc's internal state). We also
// need to switch to g0 so we can shrink the stack.
systemstack(func() {
gcMark(startTime)
// Must return immediately.
// The outer function's stack may have moved
// during gcMark (it shrinks stacks, including the
// outer function's stack), so we must not refer
// to any of its variables. Return back to the
// non-system stack to pick up the new addresses
// before continuing.
})
var stwSwept bool
systemstack(func() {
work.heap2 = work.bytesMarked
if debug.gccheckmark > 0 {
runCheckmark(func(_ *gcWork) { gcPrepareMarkRoots() })
}
if debug.checkfinalizers > 0 {
checkFinalizersAndCleanups()
}
// marking is complete so we can turn the write barrier off
setGCPhase(_GCoff)
stwSwept = gcSweep(work.mode)
})
mp.traceback = 0
casgstatus(curgp, _Gwaiting, _Grunning)
trace := traceAcquire()
if trace.ok() {
trace.GCDone()
traceRelease(trace)
}
// all done
mp.preemptoff = ""
if gcphase != _GCoff {
throw("gc done but gcphase != _GCoff")
}
// Record heapInUse for scavenger.
memstats.lastHeapInUse = gcController.heapInUse.load()
// Update GC trigger and pacing, as well as downstream consumers
// of this pacing information, for the next cycle.
systemstack(gcControllerCommit)
// Update timing memstats
now := nanotime()
sec, nsec, _ := time_now()
unixNow := sec*1e9 + int64(nsec)
work.pauseNS += now - stw.startedStopping
work.tEnd = now
atomic.Store64(&memstats.last_gc_unix, uint64(unixNow)) // must be Unix time to make sense to user
atomic.Store64(&memstats.last_gc_nanotime, uint64(now)) // monotonic time for us
memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow)
memstats.pause_total_ns += uint64(work.pauseNS)
// Accumulate CPU stats.
//
// Use maxprocs instead of stwprocs for GC pause time because the total time
// computed in the CPU stats is based on maxprocs, and we want them to be
// comparable.
//
// Pass gcMarkPhase=true to accumulate so we can get all the latest GC CPU stats
// in there too.
work.cpuStats.accumulateGCPauseTime(now-stw.finishedStopping, work.maxprocs)
work.cpuStats.accumulate(now, true)
// Compute overall GC CPU utilization.
// Omit idle marking time from the overall utilization here since it's "free".
memstats.gc_cpu_fraction = float64(work.cpuStats.GCTotalTime-work.cpuStats.GCIdleTime) / float64(work.cpuStats.TotalTime)
// Reset assist time and background time stats.
//
// Do this now, instead of at the start of the next GC cycle, because
// these two may keep accumulating even if the GC is not active.
scavenge.assistTime.Store(0)
scavenge.backgroundTime.Store(0)
// Reset idle time stat.
sched.idleTime.Store(0)
if work.userForced {
memstats.numforcedgc++
}
// Bump GC cycle count and wake goroutines waiting on sweep.
lock(&work.sweepWaiters.lock)
memstats.numgc++
injectglist(&work.sweepWaiters.list)
unlock(&work.sweepWaiters.lock)
// Increment the scavenge generation now.
//
// This moment represents peak heap in use because we're
// about to start sweeping.
mheap_.pages.scav.index.nextGen()
// Release the CPU limiter.
gcCPULimiter.finishGCTransition(now)
// Finish the current heap profiling cycle and start a new
// heap profiling cycle. We do this before starting the world
// so events don't leak into the wrong cycle.
mProf_NextCycle()
// There may be stale spans in mcaches that need to be swept.
// Those aren't tracked in any sweep lists, so we need to
// count them against sweep completion until we ensure all
// those spans have been forced out.
//
// If gcSweep fully swept the heap (for example if the sweep
// is not concurrent due to a GODEBUG setting), then we expect
// the sweepLocker to be invalid, since sweeping is done.
//
// N.B. Below we might duplicate some work from gcSweep; this is
// fine as all that work is idempotent within a GC cycle, and
// we're still holding worldsema so a new cycle can't start.
sl := sweep.active.begin()
if !stwSwept && !sl.valid {
throw("failed to set sweep barrier")
} else if stwSwept && sl.valid {
throw("non-concurrent sweep failed to drain all sweep queues")
}
if work.goroutineLeak.enabled {
// Restore the elem and c fields of all sudogs to their original values.
gcRestoreSyncObjects()
}
var goroutineLeakDone bool
systemstack(func() {
// Pull the GC out of goroutine leak detection mode.
work.goroutineLeak.enabled = false
goroutineLeakDone = work.goroutineLeak.done
work.goroutineLeak.done = false
// The memstats updated above must be updated with the world
// stopped to ensure consistency of some values, such as
// sched.idleTime and sched.totaltime. memstats also include
// the pause time (work,pauseNS), forcing computation of the
// total pause time before the pause actually ends.
//
// Here we reuse the same now for start the world so that the
// time added to /sched/pauses/total/gc:seconds will be
// consistent with the value in memstats.
startTheWorldWithSema(now, stw)
})
// Flush the heap profile so we can start a new cycle next GC.
// This is relatively expensive, so we don't do it with the
// world stopped.
mProf_Flush()
// Prepare workbufs for freeing by the sweeper. We do this
// asynchronously because it can take non-trivial time.
prepareFreeWorkbufs()
// Free stack spans. This must be done between GC cycles.
systemstack(freeStackSpans)
// Ensure all mcaches are flushed. Each P will flush its own
// mcache before allocating, but idle Ps may not. Since this
// is necessary to sweep all spans, we need to ensure all
// mcaches are flushed before we start the next GC cycle.
//
// While we're here, flush the page cache for idle Ps to avoid
// having pages get stuck on them. These pages are hidden from
// the scavenger, so in small idle heaps a significant amount
// of additional memory might be held onto.
//
// Also, flush the pinner cache, to avoid leaking that memory
// indefinitely.
if debug.gctrace > 1 {
clear(memstats.lastScanStats[:])
}
forEachP(waitReasonFlushProcCaches, func(pp *p) {
pp.mcache.prepareForSweep()
if pp.status == _Pidle {
systemstack(func() {
lock(&mheap_.lock)
pp.pcache.flush(&mheap_.pages)
unlock(&mheap_.lock)
})
}
if debug.gctrace > 1 {
pp.gcw.flushScanStats(&memstats.lastScanStats)
}
pp.pinnerCache = nil
})
if sl.valid {
// Now that we've swept stale spans in mcaches, they don't
// count against unswept spans.
//
// Note: this sweepLocker may not be valid if sweeping had
// already completed during the STW. See the corresponding
// begin() call that produced sl.
sweep.active.end(sl)
}
// Print gctrace before dropping worldsema. As soon as we drop
// worldsema another cycle could start and smash the stats
// we're trying to print.
if debug.gctrace > 0 {
util := int(memstats.gc_cpu_fraction * 100)
var sbuf [24]byte
printlock()
print("gc ", memstats.numgc,
" @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
util, "%")
if goroutineLeakDone {
print(" (checking for goroutine leaks)")
}
print(": ")
prev := work.tSweepTerm
for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
if i != 0 {
print("+")
}
print(string(fmtNSAsMS(sbuf[:], uint64(ns-prev))))
prev = ns
}
print(" ms clock, ")
for i, ns := range []int64{
int64(work.stwprocs) * (work.tMark - work.tSweepTerm),
gcController.assistTime.Load(),
gcController.dedicatedMarkTime.Load() + gcController.fractionalMarkTime.Load(),
gcController.idleMarkTime.Load(),
int64(work.stwprocs) * (work.tEnd - work.tMarkTerm),
} {
if i == 2 || i == 3 {
// Separate mark time components with /.
print("/")
} else if i != 0 {
print("+")
}
print(string(fmtNSAsMS(sbuf[:], uint64(ns))))
}
print(" ms cpu, ",
work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ",
gcController.lastHeapGoal>>20, " MB goal, ",
gcController.lastStackScan.Load()>>20, " MB stacks, ",
gcController.globalsScan.Load()>>20, " MB globals, ",
work.maxprocs, " P")
if work.userForced {
print(" (forced)")
}
print("\n")
if debug.gctrace > 1 {
dumpScanStats()
}
printunlock()
}
// Print finalizer/cleanup queue length. Like gctrace, do this before the next GC starts.
// The fact that the next GC might start is not that problematic here, but acts as a convenient
// lock on printing this information (so it cannot overlap with itself from the next GC cycle).
if debug.checkfinalizers > 0 {
fq, fe := finReadQueueStats()
fn := max(int64(fq)-int64(fe), 0)
cq, ce := gcCleanups.readQueueStats()
cn := max(int64(cq)-int64(ce), 0)
println("checkfinalizers: queue:", fn, "finalizers +", cn, "cleanups")
}
// Set any arena chunks that were deferred to fault.
lock(&userArenaState.lock)
faultList := userArenaState.fault
userArenaState.fault = nil
unlock(&userArenaState.lock)
for _, lc := range faultList {
lc.mspan.setUserArenaChunkToFault()
}
// Enable huge pages on some metadata if we cross a heap threshold.
if gcController.heapGoal() > minHeapForMetadataHugePages {
systemstack(func() {
mheap_.enableMetadataHugePages()
})
}
semrelease(&worldsema)
semrelease(&gcsema)
// Careful: another GC cycle may start now.
releasem(mp)
mp = nil
// now that gc is done, kick off finalizer thread if needed
if !concurrentSweep {
// give the queued finalizers, if any, a chance to run
Gosched()
}
}
// gcBgMarkStartWorkers prepares background mark worker goroutines. These
// goroutines will not run until the mark phase, but they must be started while
// the work is not stopped and from a regular G stack. The caller must hold
// worldsema.
func gcBgMarkStartWorkers() {
// Background marking is performed by per-P G's. Ensure that each P has
// a background GC G.
//
// Worker Gs don't exit if gomaxprocs is reduced. If it is raised
// again, we can reuse the old workers; no need to create new workers.
if gcBgMarkWorkerCount >= gomaxprocs {
return
}
// Increment mp.locks when allocating. We are called within gcStart,
// and thus must not trigger another gcStart via an allocation. gcStart
// bails when allocating with locks held, so simulate that for these
// allocations.
//
// TODO(prattmic): cleanup gcStart to use a more explicit "in gcStart"
// check for bailing.
mp := acquirem()
ready := make(chan struct{}, 1)
releasem(mp)
for gcBgMarkWorkerCount < gomaxprocs {
mp := acquirem() // See above, we allocate a closure here.
go gcBgMarkWorker(ready)
releasem(mp)
// N.B. we intentionally wait on each goroutine individually
// rather than starting all in a batch and then waiting once
// afterwards. By running one goroutine at a time, we can take
// advantage of runnext to bounce back and forth between
// workers and this goroutine. In an overloaded application,
// this can reduce GC start latency by prioritizing these
// goroutines rather than waiting on the end of the run queue.
<-ready
// The worker is now guaranteed to be added to the pool before
// its P's next findRunnableGCWorker.
gcBgMarkWorkerCount++
}
}
// gcBgMarkPrepare sets up state for background marking.
// Mutator assists must not yet be enabled.
func gcBgMarkPrepare() {
// Background marking will stop when the work queues are empty
// and there are no more workers (note that, since this is
// concurrent, this may be a transient state, but mark
// termination will clean it up). Between background workers
// and assists, we don't really know how many workers there
// will be, so we pretend to have an arbitrarily large number
// of workers, almost all of which are "waiting". While a
// worker is working it decrements nwait. If nproc == nwait,
// there are no workers.
work.nproc = ^uint32(0)
work.nwait = ^uint32(0)
}
// gcBgMarkWorkerNode is an entry in the gcBgMarkWorkerPool. It points to a single
// gcBgMarkWorker goroutine.
type gcBgMarkWorkerNode struct {
// Unused workers are managed in a lock-free stack. This field must be first.
node lfnode
// The g of this worker.
gp guintptr
// Release this m on park. This is used to communicate with the unlock
// function, which cannot access the G's stack. It is unused outside of
// gcBgMarkWorker().
m muintptr
}
type gcBgMarkWorkerNodePadded struct {
gcBgMarkWorkerNode
pad [tagAlign - unsafe.Sizeof(gcBgMarkWorkerNode{}) - gcBgMarkWorkerNodeRedZoneSize]byte
}
const gcBgMarkWorkerNodeRedZoneSize = (16 << 2) * asanenabledBit // redZoneSize(512)
func gcBgMarkWorker(ready chan struct{}) {
gp := getg()
// We pass node to a gopark unlock function, so it can't be on
// the stack (see gopark). Prevent deadlock from recursively
// starting GC by disabling preemption.
gp.m.preemptoff = "GC worker init"
// TODO: This is technically not allowed in the heap. See comment in tagptr.go.
//
// It is kept alive simply by virtue of being used in the infinite loop
// below. gcBgMarkWorkerPool keeps pointers to nodes that are not
// GC-visible, so this must be kept alive indefinitely (even if
// GOMAXPROCS decreases).
node := &new(gcBgMarkWorkerNodePadded).gcBgMarkWorkerNode
gp.m.preemptoff = ""
node.gp.set(gp)
node.m.set(acquirem())
ready <- struct{}{}
// After this point, the background mark worker is generally scheduled
// cooperatively by gcController.findRunnableGCWorker. While performing
// work on the P, preemption is disabled because we are working on
// P-local work buffers. When the preempt flag is set, this puts itself
// into _Gwaiting to be woken up by gcController.findRunnableGCWorker
// at the appropriate time.
//
// When preemption is enabled (e.g., while in gcMarkDone), this worker
// may be preempted and schedule as a _Grunnable G from a runq. That is
// fine; it will eventually gopark again for further scheduling via
// findRunnableGCWorker.
//
// Since we disable preemption before notifying ready, we guarantee that
// this G will be in the worker pool for the next findRunnableGCWorker.
// This isn't strictly necessary, but it reduces latency between
// _GCmark starting and the workers starting.
for {
// Go to sleep until woken by
// gcController.findRunnableGCWorker.
gopark(func(g *g, nodep unsafe.Pointer) bool {
node := (*gcBgMarkWorkerNode)(nodep)
if mp := node.m.ptr(); mp != nil {
// The worker G is no longer running; release
// the M.
//
// N.B. it is _safe_ to release the M as soon
// as we are no longer performing P-local mark
// work.
//
// However, since we cooperatively stop work
// when gp.preempt is set, if we releasem in
// the loop then the following call to gopark
// would immediately preempt the G. This is
// also safe, but inefficient: the G must
// schedule again only to enter gopark and park
// again. Thus, we defer the release until
// after parking the G.
releasem(mp)
}
// Release this G to the pool.
gcBgMarkWorkerPool.push(&node.node)
// Note that at this point, the G may immediately be
// rescheduled and may be running.
return true
}, unsafe.Pointer(node), waitReasonGCWorkerIdle, traceBlockSystemGoroutine, 0)
// Preemption must not occur here, or another G might see
// p.gcMarkWorkerMode.
// Disable preemption so we can use the gcw. If the
// scheduler wants to preempt us, we'll stop draining,
// dispose the gcw, and then preempt.
node.m.set(acquirem())
pp := gp.m.p.ptr() // P can't change with preemption disabled.
if gcBlackenEnabled == 0 {
println("worker mode", pp.gcMarkWorkerMode)
throw("gcBgMarkWorker: blackening not enabled")
}
if pp.gcMarkWorkerMode == gcMarkWorkerNotWorker {
throw("gcBgMarkWorker: mode not set")
}
startTime := nanotime()
pp.gcMarkWorkerStartTime = startTime
var trackLimiterEvent bool
if pp.gcMarkWorkerMode == gcMarkWorkerIdleMode {
trackLimiterEvent = pp.limiterEvent.start(limiterEventIdleMarkWork, startTime)
}
gcBeginWork()
systemstack(func() {
// Mark our goroutine preemptible so its stack can be scanned or observed
// by the execution tracer. This, for example, lets two mark workers scan
// each other (otherwise, they would deadlock).
//
// casGToWaitingForSuspendG marks the goroutine as ineligible for a
// stack shrink, effectively pinning the stack in memory for the duration.
//
// N.B. The execution tracer is not aware of this status transition and
// handles it specially based on the wait reason.
casGToWaitingForSuspendG(gp, _Grunning, waitReasonGCWorkerActive)
switch pp.gcMarkWorkerMode {
default:
throw("gcBgMarkWorker: unexpected gcMarkWorkerMode")
case gcMarkWorkerDedicatedMode:
gcDrainMarkWorkerDedicated(&pp.gcw, true)
if gp.preempt {
// We were preempted. This is
// a useful signal to kick
// everything out of the run
// queue so it can run
// somewhere else.
if drainQ := runqdrain(pp); !drainQ.empty() {
lock(&sched.lock)
globrunqputbatch(&drainQ)
unlock(&sched.lock)
}
}
// Go back to draining, this time
// without preemption.
gcDrainMarkWorkerDedicated(&pp.gcw, false)
case gcMarkWorkerFractionalMode:
gcDrainMarkWorkerFractional(&pp.gcw)
case gcMarkWorkerIdleMode:
gcDrainMarkWorkerIdle(&pp.gcw)
}
casgstatus(gp, _Gwaiting, _Grunning)
})
// Account for time and mark us as stopped.
now := nanotime()
duration := now - startTime
gcController.markWorkerStop(pp.gcMarkWorkerMode, duration)
if trackLimiterEvent {
pp.limiterEvent.stop(limiterEventIdleMarkWork, now)
}
if pp.gcMarkWorkerMode == gcMarkWorkerFractionalMode {
pp.gcFractionalMarkTime.Add(duration)
}
// We'll releasem after this point and thus this P may run
// something else. We must clear the worker mode to avoid
// attributing the mode to a different (non-worker) G in
// tracev2.GoStart.
pp.gcMarkWorkerMode = gcMarkWorkerNotWorker
// If this worker reached a background mark completion
// point, signal the main GC goroutine.
if gcEndWork() {
// We don't need the P-local buffers here, allow
// preemption because we may schedule like a regular
// goroutine in gcMarkDone (block on locks, etc).
releasem(node.m.ptr())
node.m.set(nil)
gcMarkDone()
}
}
}
// gcShouldScheduleWorker reports whether executing a mark worker
// on p is potentially useful. p may be nil.
func gcShouldScheduleWorker(p *p) bool {
if p != nil && !p.gcw.empty() {
return true
}
return gcMarkWorkAvailable()
}
// gcIsMarkDone reports whether the mark phase is (probably) done.
func gcIsMarkDone() bool {
return work.nwait == work.nproc && !gcMarkWorkAvailable()
}
// gcBeginWork signals to the garbage collector that a new worker is
// about to process GC work.
func gcBeginWork() {
decnwait := atomic.Xadd(&work.nwait, -1)
if decnwait == work.nproc {
println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
throw("work.nwait was > work.nproc")
}
}
// gcEndWork signals to the garbage collector that a new worker has just finished
// its work. It reports whether it was the last worker and there's no more work
// to do. If it returns true, the caller must call gcMarkDone.
func gcEndWork() (last bool) {
incnwait := atomic.Xadd(&work.nwait, +1)
if incnwait > work.nproc {
println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
throw("work.nwait > work.nproc")
}
return incnwait == work.nproc && !gcMarkWorkAvailable()
}
// gcMark runs the mark (or, for concurrent GC, mark termination)
// All gcWork caches must be empty.
// STW is in effect at this point.
func gcMark(startTime int64) {
if gcphase != _GCmarktermination {
throw("in gcMark expecting to see gcphase as _GCmarktermination")
}
work.tstart = startTime
// Check that there's no marking work remaining.
if next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); work.full != 0 || next < jobs {
print("runtime: full=", hex(work.full), " next=", next, " jobs=", jobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
panic("non-empty mark queue after concurrent mark")
}
if debug.gccheckmark > 0 {
// This is expensive when there's a large number of
// Gs, so only do it if checkmark is also enabled.
gcMarkRootCheck()
}
// Drop allg snapshot. allgs may have grown, in which case
// this is the only reference to the old backing store and
// there's no need to keep it around.
work.stackRoots = nil
// Clear out buffers and double-check that all gcWork caches
// are empty. This should be ensured by gcMarkDone before we
// enter mark termination.
//
// TODO: We could clear out buffers just before mark if this
// has a non-negligible impact on STW time.
for _, p := range allp {
// The write barrier may have buffered pointers since
// the gcMarkDone barrier. However, since the barrier
// ensured all reachable objects were marked, all of
// these must be pointers to black objects. Hence we
// can just discard the write barrier buffer.
if debug.gccheckmark > 0 {
// For debugging, flush the buffer and make
// sure it really was all marked.
wbBufFlush1(p)
} else {
p.wbBuf.reset()
}
gcw := &p.gcw
if !gcw.empty() {
printlock()
print("runtime: P ", p.id, " flushedWork ", gcw.flushedWork)
if gcw.wbuf1 == nil {
print(" wbuf1=<nil>")
} else {
print(" wbuf1.n=", gcw.wbuf1.nobj)
}
if gcw.wbuf2 == nil {
print(" wbuf2=<nil>")
} else {
print(" wbuf2.n=", gcw.wbuf2.nobj)
}
print("\n")
throw("P has cached GC work at end of mark termination")
}
// There may still be cached empty buffers, which we
// need to flush since we're going to free them. Also,
// there may be non-zero stats because we allocated
// black after the gcMarkDone barrier.
gcw.dispose()
}
// Flush scanAlloc from each mcache since we're about to modify
// heapScan directly. If we were to flush this later, then scanAlloc
// might have incorrect information.
//
// Note that it's not important to retain this information; we know
// exactly what heapScan is at this point via scanWork.
for _, p := range allp {
c := p.mcache
if c == nil {
continue
}
c.scanAlloc = 0
}
// Reset controller state.
gcController.resetLive(work.bytesMarked)
}
// gcSweep must be called on the system stack because it acquires the heap
// lock. See mheap for details.
//
// Returns true if the heap was fully swept by this function.
//
// The world must be stopped.
//
//go:systemstack
func gcSweep(mode gcMode) bool {
assertWorldStopped()
if gcphase != _GCoff {
throw("gcSweep being done but phase is not GCoff")
}
lock(&mheap_.lock)
mheap_.sweepgen += 2
sweep.active.reset()
mheap_.pagesSwept.Store(0)
mheap_.sweepArenas = mheap_.heapArenas
mheap_.reclaimIndex.Store(0)
mheap_.reclaimCredit.Store(0)
unlock(&mheap_.lock)
sweep.centralIndex.clear()
if !concurrentSweep || mode == gcForceBlockMode {
// Special case synchronous sweep.
// Record that no proportional sweeping has to happen.
lock(&mheap_.lock)
mheap_.sweepPagesPerByte = 0
unlock(&mheap_.lock)
// Flush all mcaches.
for _, pp := range allp {
pp.mcache.prepareForSweep()
}
// Sweep all spans eagerly.
for sweepone() != ^uintptr(0) {
}
// Free workbufs and span rings eagerly.
prepareFreeWorkbufs()
for freeSomeWbufs(false) {
}
freeDeadSpanSPMCs()
// All "free" events for this mark/sweep cycle have
// now happened, so we can make this profile cycle
// available immediately.
mProf_NextCycle()
mProf_Flush()
return true
}
// Background sweep.
lock(&sweep.lock)
if sweep.parked {
sweep.parked = false
ready(sweep.g, 0, true)
}
unlock(&sweep.lock)
return false
}
// gcResetMarkState resets global state prior to marking (concurrent
// or STW) and resets the stack scan state of all Gs.
//
// This is safe to do without the world stopped because any Gs created
// during or after this will start out in the reset state.
//
// gcResetMarkState must be called on the system stack because it acquires
// the heap lock. See mheap for details.
//
//go:systemstack
func gcResetMarkState() {
// This may be called during a concurrent phase, so lock to make sure
// allgs doesn't change.
forEachG(func(gp *g) {
gp.gcscandone = false // set to true in gcphasework
gp.gcAssistBytes = 0
})
// Clear page marks. This is just 1MB per 64GB of heap, so the
// time here is pretty trivial.
lock(&mheap_.lock)
arenas := mheap_.heapArenas
unlock(&mheap_.lock)
for _, ai := range arenas {
ha := mheap_.arenas[ai.l1()][ai.l2()]
clear(ha.pageMarks[:])
}
work.bytesMarked = 0
work.initialHeapLive = gcController.heapLive.Load()
}
// Hooks for other packages
var poolcleanup func()
var boringCaches []unsafe.Pointer // for crypto/internal/boring
// sync_runtime_registerPoolCleanup should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/gopkg
// - github.com/songzhibin97/gkit
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup
func sync_runtime_registerPoolCleanup(f func()) {
poolcleanup = f
}
//go:linkname boring_registerCache crypto/internal/boring/bcache.registerCache
func boring_registerCache(p unsafe.Pointer) {
boringCaches = append(boringCaches, p)
}
func clearpools() {
// clear sync.Pools
if poolcleanup != nil {
poolcleanup()
}
// clear boringcrypto caches
for _, p := range boringCaches {
atomicstorep(p, nil)
}
// Clear central sudog cache.
// Leave per-P caches alone, they have strictly bounded size.
// Disconnect cached list before dropping it on the floor,
// so that a dangling ref to one entry does not pin all of them.
lock(&sched.sudoglock)
var sg, sgnext *sudog
for sg = sched.sudogcache; sg != nil; sg = sgnext {
sgnext = sg.next
sg.next = nil
}
sched.sudogcache = nil
unlock(&sched.sudoglock)
// Clear central defer pool.
// Leave per-P pools alone, they have strictly bounded size.
lock(&sched.deferlock)
// disconnect cached list before dropping it on the floor,
// so that a dangling ref to one entry does not pin all of them.
var d, dlink *_defer
for d = sched.deferpool; d != nil; d = dlink {
dlink = d.link
d.link = nil
}
sched.deferpool = nil
unlock(&sched.deferlock)
}
// Timing
// itoaDiv formats val/(10**dec) into buf.
func itoaDiv(buf []byte, val uint64, dec int) []byte {
i := len(buf) - 1
idec := i - dec
for val >= 10 || i >= idec {
buf[i] = byte(val%10 + '0')
i--
if i == idec {
buf[i] = '.'
i--
}
val /= 10
}
buf[i] = byte(val + '0')
return buf[i:]
}
// fmtNSAsMS nicely formats ns nanoseconds as milliseconds.
func fmtNSAsMS(buf []byte, ns uint64) []byte {
if ns >= 10e6 {
// Format as whole milliseconds.
return itoaDiv(buf, ns/1e6, 0)
}
// Format two digits of precision, with at most three decimal places.
x := ns / 1e3
if x == 0 {
buf[0] = '0'
return buf[:1]
}
dec := 3
for x >= 100 {
x /= 10
dec--
}
return itoaDiv(buf, x, dec)
}
// Helpers for testing GC.
// gcTestMoveStackOnNextCall causes the stack to be moved on a call
// immediately following the call to this. It may not work correctly
// if any other work appears after this call (such as returning).
// Typically the following call should be marked go:noinline so it
// performs a stack check.
//
// In rare cases this may not cause the stack to move, specifically if
// there's a preemption between this call and the next.
func gcTestMoveStackOnNextCall() {
gp := getg()
gp.stackguard0 = stackForceMove
}
// gcTestIsReachable performs a GC and returns a bit set where bit i
// is set if ptrs[i] is reachable.
func gcTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
// This takes the pointers as unsafe.Pointers in order to keep
// them live long enough for us to attach specials. After
// that, we drop our references to them.
if len(ptrs) > 64 {
panic("too many pointers for uint64 mask")
}
// Block GC while we attach specials and drop our references
// to ptrs. Otherwise, if a GC is in progress, it could mark
// them reachable via this function before we have a chance to
// drop them.
semacquire(&gcsema)
// Create reachability specials for ptrs.
specials := make([]*specialReachable, len(ptrs))
for i, p := range ptrs {
lock(&mheap_.speciallock)
s := (*specialReachable)(mheap_.specialReachableAlloc.alloc())
unlock(&mheap_.speciallock)
s.special.kind = _KindSpecialReachable
if !addspecial(p, &s.special, false) {
throw("already have a reachable special (duplicate pointer?)")
}
specials[i] = s
// Make sure we don't retain ptrs.
ptrs[i] = nil
}
semrelease(&gcsema)
// Force a full GC and sweep.
GC()
// Process specials.
for i, s := range specials {
if !s.done {
printlock()
println("runtime: object", i, "was not swept")
throw("IsReachable failed")
}
if s.reachable {
mask |= 1 << i
}
lock(&mheap_.speciallock)
mheap_.specialReachableAlloc.free(unsafe.Pointer(s))
unlock(&mheap_.speciallock)
}
return mask
}
// gcTestPointerClass returns the category of what p points to, one of:
// "heap", "stack", "data", "bss", "other". This is useful for checking
// that a test is doing what it's intended to do.
//
// This is nosplit simply to avoid extra pointer shuffling that may
// complicate a test.
//
//go:nosplit
func gcTestPointerClass(p unsafe.Pointer) string {
p2 := uintptr(noescape(p))
gp := getg()
if gp.stack.lo <= p2 && p2 < gp.stack.hi {
return "stack"
}
if base, _, _ := findObject(p2, 0, 0); base != 0 {
return "heap"
}
for _, datap := range activeModules() {
if datap.data <= p2 && p2 < datap.edata || datap.noptrdata <= p2 && p2 < datap.enoptrdata {
return "data"
}
if datap.bss <= p2 && p2 < datap.ebss || datap.noptrbss <= p2 && p2 <= datap.enoptrbss {
return "bss"
}
}
KeepAlive(p)
return "other"
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "internal/runtime/atomic"
// gcCPULimiter is a mechanism to limit GC CPU utilization in situations
// where it might become excessive and inhibit application progress (e.g.
// a death spiral).
//
// The core of the limiter is a leaky bucket mechanism that fills with GC
// CPU time and drains with mutator time. Because the bucket fills and
// drains with time directly (i.e. without any weighting), this effectively
// sets a very conservative limit of 50%. This limit could be enforced directly,
// however, but the purpose of the bucket is to accommodate spikes in GC CPU
// utilization without hurting throughput.
//
// Note that the bucket in the leaky bucket mechanism can never go negative,
// so the GC never gets credit for a lot of CPU time spent without the GC
// running. This is intentional, as an application that stays idle for, say,
// an entire day, could build up enough credit to fail to prevent a death
// spiral the following day. The bucket's capacity is the GC's only leeway.
//
// The capacity thus also sets the window the limiter considers. For example,
// if the capacity of the bucket is 1 cpu-second, then the limiter will not
// kick in until at least 1 full cpu-second in the last 2 cpu-second window
// is spent on GC CPU time.
var gcCPULimiter gcCPULimiterState
type gcCPULimiterState struct {
lock atomic.Uint32
enabled atomic.Bool
// gcEnabled is an internal copy of gcBlackenEnabled that determines
// whether the limiter tracks total assist time.
//
// gcBlackenEnabled isn't used directly so as to keep this structure
// unit-testable.
gcEnabled bool
// transitioning is true when the GC is in a STW and transitioning between
// the mark and sweep phases.
transitioning bool
// test indicates whether this instance of the struct was made for testing purposes.
test bool
bucket struct {
// Invariants:
// - fill >= 0
// - capacity >= 0
// - fill <= capacity
fill, capacity uint64
}
// overflow is the cumulative amount of GC CPU time that we tried to fill the
// bucket with but exceeded its capacity.
overflow uint64
// assistTimePool is the accumulated assist time since the last update.
assistTimePool atomic.Int64
// idleMarkTimePool is the accumulated idle mark time since the last update.
idleMarkTimePool atomic.Int64
// idleTimePool is the accumulated time Ps spent on the idle list since the last update.
idleTimePool atomic.Int64
// lastUpdate is the nanotime timestamp of the last time update was called.
//
// Updated under lock, but may be read concurrently.
lastUpdate atomic.Int64
// lastEnabledCycle is the GC cycle that last had the limiter enabled.
lastEnabledCycle atomic.Uint32
// nprocs is an internal copy of gomaxprocs, used to determine total available
// CPU time.
//
// gomaxprocs isn't used directly so as to keep this structure unit-testable.
nprocs int32
}
// limiting returns true if the CPU limiter is currently enabled, meaning the Go GC
// should take action to limit CPU utilization.
//
// It is safe to call concurrently with other operations.
func (l *gcCPULimiterState) limiting() bool {
return l.enabled.Load()
}
// startGCTransition notifies the limiter of a GC transition.
//
// This call takes ownership of the limiter and disables all other means of
// updating the limiter. Release ownership by calling finishGCTransition.
//
// It is safe to call concurrently with other operations.
func (l *gcCPULimiterState) startGCTransition(enableGC bool, now int64) {
if !l.tryLock() {
// This must happen during a STW, so we can't fail to acquire the lock.
// If we did, something went wrong. Throw.
throw("failed to acquire lock to start a GC transition")
}
if l.gcEnabled == enableGC {
throw("transitioning GC to the same state as before?")
}
// Flush whatever was left between the last update and now.
l.updateLocked(now)
l.gcEnabled = enableGC
l.transitioning = true
// N.B. finishGCTransition releases the lock.
//
// We don't release here to increase the chance that if there's a failure
// to finish the transition, that we throw on failing to acquire the lock.
}
// finishGCTransition notifies the limiter that the GC transition is complete
// and releases ownership of it. It also accumulates STW time in the bucket.
// now must be the timestamp from the end of the STW pause.
func (l *gcCPULimiterState) finishGCTransition(now int64) {
if !l.transitioning {
throw("finishGCTransition called without starting one?")
}
// Count the full nprocs set of CPU time because the world is stopped
// between startGCTransition and finishGCTransition. Even though the GC
// isn't running on all CPUs, it is preventing user code from doing so,
// so it might as well be.
if lastUpdate := l.lastUpdate.Load(); now >= lastUpdate {
l.accumulate(0, (now-lastUpdate)*int64(l.nprocs))
}
l.lastUpdate.Store(now)
l.transitioning = false
l.unlock()
}
// gcCPULimiterUpdatePeriod dictates the maximum amount of wall-clock time
// we can go before updating the limiter.
const gcCPULimiterUpdatePeriod = 10e6 // 10ms
// needUpdate returns true if the limiter's maximum update period has been
// exceeded, and so would benefit from an update.
func (l *gcCPULimiterState) needUpdate(now int64) bool {
return now-l.lastUpdate.Load() > gcCPULimiterUpdatePeriod
}
// addAssistTime notifies the limiter of additional assist time. It will be
// included in the next update.
func (l *gcCPULimiterState) addAssistTime(t int64) {
l.assistTimePool.Add(t)
}
// addIdleTime notifies the limiter of additional time a P spent on the idle list. It will be
// subtracted from the total CPU time in the next update.
func (l *gcCPULimiterState) addIdleTime(t int64) {
l.idleTimePool.Add(t)
}
// update updates the bucket given runtime-specific information. now is the
// current monotonic time in nanoseconds.
//
// This is safe to call concurrently with other operations, except *GCTransition.
func (l *gcCPULimiterState) update(now int64) {
if !l.tryLock() {
// We failed to acquire the lock, which means something else is currently
// updating. Just drop our update, the next one to update will include
// our total assist time.
return
}
if l.transitioning {
throw("update during transition")
}
l.updateLocked(now)
l.unlock()
}
// updateLocked is the implementation of update. l.lock must be held.
func (l *gcCPULimiterState) updateLocked(now int64) {
lastUpdate := l.lastUpdate.Load()
if now < lastUpdate {
// Defensively avoid overflow. This isn't even the latest update anyway.
return
}
windowTotalTime := (now - lastUpdate) * int64(l.nprocs)
l.lastUpdate.Store(now)
// Drain the pool of assist time.
assistTime := l.assistTimePool.Load()
if assistTime != 0 {
l.assistTimePool.Add(-assistTime)
}
// Drain the pool of idle time.
idleTime := l.idleTimePool.Load()
if idleTime != 0 {
l.idleTimePool.Add(-idleTime)
}
if !l.test {
// Consume time from in-flight events. Make sure we're not preemptible so allp can't change.
//
// The reason we do this instead of just waiting for those events to finish and push updates
// is to ensure that all the time we're accounting for happened sometime between lastUpdate
// and now. This dramatically simplifies reasoning about the limiter because we're not at
// risk of extra time being accounted for in this window than actually happened in this window,
// leading to all sorts of weird transient behavior.
mp := acquirem()
for _, pp := range allp {
typ, duration := pp.limiterEvent.consume(now)
switch typ {
case limiterEventIdle:
sched.idleTime.Add(duration)
idleTime += duration
case limiterEventIdleMarkWork:
idleTime += duration
case limiterEventMarkAssist, limiterEventScavengeAssist:
assistTime += duration
case limiterEventNone:
break
default:
throw("invalid limiter event type found")
}
}
releasem(mp)
}
// Compute total GC time.
windowGCTime := assistTime
if l.gcEnabled {
windowGCTime += int64(float64(windowTotalTime) * gcBackgroundUtilization)
}
// Subtract out all idle time from the total time. Do this after computing
// GC time, because the background utilization is dependent on the *real*
// total time, not the total time after idle time is subtracted.
//
// Idle time is counted as any time that a P is on the P idle list plus idle mark
// time. Idle mark workers soak up time that the application spends idle.
//
// On a heavily undersubscribed system, any additional idle time can skew GC CPU
// utilization, because the GC might be executing continuously and thrashing,
// yet the CPU utilization with respect to GOMAXPROCS will be quite low, so
// the limiter fails to turn on. By subtracting idle time, we're removing time that
// we know the application was idle giving a more accurate picture of whether
// the GC is thrashing.
//
// Note that this can cause the limiter to turn on even if it's not needed. For
// instance, on a system with 32 Ps but only 1 running goroutine, each GC will have
// 8 dedicated GC workers. Assuming the GC cycle is half mark phase and half sweep
// phase, then the GC CPU utilization over that cycle, with idle time removed, will
// be 8/(8+2) = 80%. Even though the limiter turns on, though, assist should be
// unnecessary, as the GC has way more CPU time to outpace the 1 goroutine that's
// running.
windowTotalTime -= idleTime
l.accumulate(windowTotalTime-windowGCTime, windowGCTime)
}
// accumulate adds time to the bucket and signals whether the limiter is enabled.
//
// This is an internal function that deals just with the bucket. Prefer update.
// l.lock must be held.
func (l *gcCPULimiterState) accumulate(mutatorTime, gcTime int64) {
headroom := l.bucket.capacity - l.bucket.fill
enabled := headroom == 0
// Let's be careful about three things here:
// 1. The addition and subtraction, for the invariants.
// 2. Overflow.
// 3. Excessive mutation of l.enabled, which is accessed
// by all assists, potentially more than once.
change := gcTime - mutatorTime
// Handle limiting case.
if change > 0 && headroom <= uint64(change) {
l.overflow += uint64(change) - headroom
l.bucket.fill = l.bucket.capacity
if !enabled {
l.enabled.Store(true)
l.lastEnabledCycle.Store(memstats.numgc + 1)
}
return
}
// Handle non-limiting cases.
if change < 0 && l.bucket.fill <= uint64(-change) {
// Bucket emptied.
l.bucket.fill = 0
} else {
// All other cases.
l.bucket.fill -= uint64(-change)
}
if change != 0 && enabled {
l.enabled.Store(false)
}
}
// tryLock attempts to lock l. Returns true on success.
func (l *gcCPULimiterState) tryLock() bool {
return l.lock.CompareAndSwap(0, 1)
}
// unlock releases the lock on l. Must be called if tryLock returns true.
func (l *gcCPULimiterState) unlock() {
old := l.lock.Swap(0)
if old != 1 {
throw("double unlock")
}
}
// capacityPerProc is the limiter's bucket capacity for each P in GOMAXPROCS.
const capacityPerProc = 1e9 // 1 second in nanoseconds
// resetCapacity updates the capacity based on GOMAXPROCS. Must not be called
// while the GC is enabled.
//
// It is safe to call concurrently with other operations.
func (l *gcCPULimiterState) resetCapacity(now int64, nprocs int32) {
if !l.tryLock() {
// This must happen during a STW, so we can't fail to acquire the lock.
// If we did, something went wrong. Throw.
throw("failed to acquire lock to reset capacity")
}
// Flush the rest of the time for this period.
l.updateLocked(now)
l.nprocs = nprocs
l.bucket.capacity = uint64(nprocs) * capacityPerProc
if l.bucket.fill > l.bucket.capacity {
l.bucket.fill = l.bucket.capacity
l.enabled.Store(true)
l.lastEnabledCycle.Store(memstats.numgc + 1)
} else if l.bucket.fill < l.bucket.capacity {
l.enabled.Store(false)
}
l.unlock()
}
// limiterEventType indicates the type of an event occurring on some P.
//
// These events represent the full set of events that the GC CPU limiter tracks
// to execute its function.
//
// This type may use no more than limiterEventBits bits of information.
type limiterEventType uint8
const (
limiterEventNone limiterEventType = iota // None of the following events.
limiterEventIdleMarkWork // Refers to an idle mark worker (see gcMarkWorkerMode).
limiterEventMarkAssist // Refers to mark assist (see gcAssistAlloc).
limiterEventScavengeAssist // Refers to a scavenge assist (see allocSpan).
limiterEventIdle // Refers to time a P spent on the idle list.
limiterEventBits = 3
)
// limiterEventTypeMask is a mask for the bits in p.limiterEventStart that represent
// the event type. The rest of the bits of that field represent a timestamp.
const (
limiterEventTypeMask = uint64((1<<limiterEventBits)-1) << (64 - limiterEventBits)
limiterEventStampNone = limiterEventStamp(0)
)
// limiterEventStamp is a nanotime timestamp packed with a limiterEventType.
type limiterEventStamp uint64
// makeLimiterEventStamp creates a new stamp from the event type and the current timestamp.
func makeLimiterEventStamp(typ limiterEventType, now int64) limiterEventStamp {
return limiterEventStamp(uint64(typ)<<(64-limiterEventBits) | (uint64(now) &^ limiterEventTypeMask))
}
// duration computes the difference between now and the start time stored in the stamp.
//
// Returns 0 if the difference is negative, which may happen if now is stale or if the
// before and after timestamps cross a 2^(64-limiterEventBits) boundary.
func (s limiterEventStamp) duration(now int64) int64 {
// The top limiterEventBits bits of the timestamp are derived from the current time
// when computing a duration.
start := int64((uint64(now) & limiterEventTypeMask) | (uint64(s) &^ limiterEventTypeMask))
if now < start {
return 0
}
return now - start
}
// type extracts the event type from the stamp.
func (s limiterEventStamp) typ() limiterEventType {
return limiterEventType(s >> (64 - limiterEventBits))
}
// limiterEvent represents tracking state for an event tracked by the GC CPU limiter.
type limiterEvent struct {
stamp atomic.Uint64 // Stores a limiterEventStamp.
}
// start begins tracking a new limiter event of the current type. If an event
// is already in flight, then a new event cannot begin because the current time is
// already being attributed to that event. In this case, this function returns false.
// Otherwise, it returns true.
//
// The caller must be non-preemptible until at least stop is called or this function
// returns false. Because this is trying to measure "on-CPU" time of some event, getting
// scheduled away during it can mean that whatever we're measuring isn't a reflection
// of "on-CPU" time. The OS could deschedule us at any time, but we want to maintain as
// close of an approximation as we can.
func (e *limiterEvent) start(typ limiterEventType, now int64) bool {
if limiterEventStamp(e.stamp.Load()).typ() != limiterEventNone {
return false
}
e.stamp.Store(uint64(makeLimiterEventStamp(typ, now)))
return true
}
// consume acquires the partial event CPU time from any in-flight event.
// It achieves this by storing the current time as the new event time.
//
// Returns the type of the in-flight event, as well as how long it's currently been
// executing for. Returns limiterEventNone if no event is active.
func (e *limiterEvent) consume(now int64) (typ limiterEventType, duration int64) {
// Read the limiter event timestamp and update it to now.
for {
old := limiterEventStamp(e.stamp.Load())
typ = old.typ()
if typ == limiterEventNone {
// There's no in-flight event, so just push that up.
return
}
duration = old.duration(now)
if duration == 0 {
// We might have a stale now value, or this crossed the
// 2^(64-limiterEventBits) boundary in the clock readings.
// Just ignore it.
return limiterEventNone, 0
}
new := makeLimiterEventStamp(typ, now)
if e.stamp.CompareAndSwap(uint64(old), uint64(new)) {
break
}
}
return
}
// stop stops the active limiter event. Throws if the
//
// The caller must be non-preemptible across the event. See start as to why.
func (e *limiterEvent) stop(typ limiterEventType, now int64) {
var stamp limiterEventStamp
for {
stamp = limiterEventStamp(e.stamp.Load())
if stamp.typ() != typ {
print("runtime: want=", typ, " got=", stamp.typ(), "\n")
throw("limiterEvent.stop: found wrong event in p's limiter event slot")
}
if e.stamp.CompareAndSwap(uint64(stamp), uint64(limiterEventStampNone)) {
break
}
}
duration := stamp.duration(now)
if duration == 0 {
// It's possible that we're missing time because we crossed a
// 2^(64-limiterEventBits) boundary between the start and end.
// In this case, we're dropping that information. This is OK because
// at worst it'll cause a transient hiccup that will quickly resolve
// itself as all new timestamps begin on the other side of the boundary.
// Such a hiccup should be incredibly rare.
return
}
// Account for the event.
switch typ {
case limiterEventIdle:
sched.idleTime.Add(duration)
gcCPULimiter.addIdleTime(duration)
case limiterEventIdleMarkWork:
gcCPULimiter.addIdleTime(duration)
case limiterEventMarkAssist, limiterEventScavengeAssist:
gcCPULimiter.addAssistTime(duration)
default:
throw("limiterEvent.stop: invalid limiter event type found")
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector: marking and scanning
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
const (
fixedRootFinalizers = iota
fixedRootFreeGStacks
fixedRootCleanups
fixedRootCount
// rootBlockBytes is the number of bytes to scan per data or
// BSS root.
rootBlockBytes = 256 << 10
// maxObletBytes is the maximum bytes of an object to scan at
// once. Larger objects will be split up into "oblets" of at
// most this size. Since we can scan 1–2 MB/ms, 128 KB bounds
// scan preemption at ~100 µs.
//
// This must be > _MaxSmallSize so that the object base is the
// span base.
maxObletBytes = 128 << 10
// drainCheckThreshold specifies how many units of work to do
// between self-preemption checks in gcDrain. Assuming a scan
// rate of 1 MB/ms, this is ~100 µs. Lower values have higher
// overhead in the scan loop (the scheduler check may perform
// a syscall, so its overhead is nontrivial). Higher values
// make the system less responsive to incoming work.
drainCheckThreshold = 100000
// pagesPerSpanRoot indicates how many pages to scan from a span root
// at a time. Used by special root marking.
//
// Higher values improve throughput by increasing locality, but
// increase the minimum latency of a marking operation.
//
// Must be a multiple of the pageInUse bitmap element size and
// must also evenly divide pagesPerArena.
pagesPerSpanRoot = min(512, pagesPerArena)
)
// internalBlocked returns true if the goroutine is blocked due to an
// internal (non-leaking) waitReason, e.g. waiting for the netpoller or garbage collector.
// Such goroutines are never leak detection candidates according to the GC.
//
//go:nosplit
func (gp *g) internalBlocked() bool {
reason := gp.waitreason
return reason < waitReasonChanReceiveNilChan || waitReasonSyncWaitGroupWait < reason
}
// allGsSnapshotSortedForGC takes a snapshot of allgs and returns a sorted
// array of Gs. The array is sorted by the G's status, with running Gs
// first, followed by blocked Gs. The returned index indicates the cutoff
// between runnable and blocked Gs.
//
// The world must be stopped or allglock must be held.
func allGsSnapshotSortedForGC() ([]*g, int) {
assertWorldStoppedOrLockHeld(&allglock)
// Reset the status of leaked goroutines in order to improve
// the precision of goroutine leak detection.
for _, gp := range allgs {
gp.atomicstatus.CompareAndSwap(_Gleaked, _Gwaiting)
}
allgsSorted := make([]*g, len(allgs))
// Indices cutting off runnable and blocked Gs.
var currIndex, blockedIndex = 0, len(allgsSorted) - 1
for _, gp := range allgs {
// not sure if we need atomic load because we are stopping the world,
// but do it just to be safe for now
if status := readgstatus(gp); status != _Gwaiting || gp.internalBlocked() {
allgsSorted[currIndex] = gp
currIndex++
} else {
allgsSorted[blockedIndex] = gp
blockedIndex--
}
}
// Because the world is stopped or allglock is held, allgadd
// cannot happen concurrently with this. allgs grows
// monotonically and existing entries never change, so we can
// simply return a copy of the slice header. For added safety,
// we trim everything past len because that can still change.
return allgsSorted, blockedIndex + 1
}
// gcPrepareMarkRoots queues root scanning jobs (stacks, globals, and
// some miscellany) and initializes scanning-related state.
//
// The world must be stopped.
func gcPrepareMarkRoots() {
assertWorldStopped()
// Compute how many data and BSS root blocks there are.
nBlocks := func(bytes uintptr) int {
return int(divRoundUp(bytes, rootBlockBytes))
}
work.nDataRoots = 0
work.nBSSRoots = 0
// Scan globals.
for _, datap := range activeModules() {
nDataRoots := nBlocks(datap.edata - datap.data)
if nDataRoots > work.nDataRoots {
work.nDataRoots = nDataRoots
}
nBSSRoots := nBlocks(datap.ebss - datap.bss)
if nBSSRoots > work.nBSSRoots {
work.nBSSRoots = nBSSRoots
}
}
// Scan span roots for finalizer specials.
//
// We depend on addfinalizer to mark objects that get
// finalizers after root marking.
//
// We're going to scan the whole heap (that was available at the time the
// mark phase started, i.e. markArenas) for in-use spans which have specials.
//
// Break up the work into arenas, and further into chunks.
//
// Snapshot heapArenas as markArenas. This snapshot is safe because heapArenas
// is append-only.
mheap_.markArenas = mheap_.heapArenas[:len(mheap_.heapArenas):len(mheap_.heapArenas)]
work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
// Scan stacks.
//
// Gs may be created after this point, but it's okay that we
// ignore them because they begin life without any roots, so
// there's nothing to scan, and any roots they create during
// the concurrent phase will be caught by the write barrier.
if work.goroutineLeak.enabled {
// goroutine leak finder GC --- only prepare runnable
// goroutines for marking.
work.stackRoots, work.nMaybeRunnableStackRoots = allGsSnapshotSortedForGC()
} else {
// regular GC --- scan every goroutine
work.stackRoots = allGsSnapshot()
work.nMaybeRunnableStackRoots = len(work.stackRoots)
}
work.nStackRoots = len(work.stackRoots)
work.markrootNext.Store(0)
work.markrootJobs.Store(uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nMaybeRunnableStackRoots))
// Calculate base indexes of each root type
work.baseData = uint32(fixedRootCount)
work.baseBSS = work.baseData + uint32(work.nDataRoots)
work.baseSpans = work.baseBSS + uint32(work.nBSSRoots)
work.baseStacks = work.baseSpans + uint32(work.nSpanRoots)
work.baseEnd = work.baseStacks + uint32(work.nStackRoots)
}
// gcMarkRootCheck checks that all roots have been scanned. It is
// purely for debugging.
func gcMarkRootCheck() {
if next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); next < jobs {
print(next, " of ", jobs, " markroot jobs done\n")
throw("left over markroot jobs")
}
// Check that stacks have been scanned.
//
// We only check the first nStackRoots Gs that we should have scanned.
// Since we don't care about newer Gs (see comment in
// gcPrepareMarkRoots), no locking is required.
i := 0
forEachGRace(func(gp *g) {
if i >= work.nStackRoots {
return
}
if !gp.gcscandone {
println("gp", gp, "goid", gp.goid,
"status", readgstatus(gp),
"gcscandone", gp.gcscandone)
throw("scan missed a g")
}
i++
})
}
// oneptrmask for an allocation containing a single pointer.
var oneptrmask = [...]uint8{1}
// markroot scans the i'th root.
//
// Preemption must be disabled (because this uses a gcWork).
//
// Returns the amount of GC work credit produced by the operation.
// If flushBgCredit is true, then that credit is also flushed
// to the background credit pool.
//
// nowritebarrier is only advisory here.
//
//go:nowritebarrier
func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64 {
// Note: if you add a case here, please also update heapdump.go:dumproots.
var workDone int64
var workCounter *atomic.Int64
switch {
case work.baseData <= i && i < work.baseBSS:
workCounter = &gcController.globalsScanWork
for _, datap := range activeModules() {
workDone += markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData))
}
case work.baseBSS <= i && i < work.baseSpans:
workCounter = &gcController.globalsScanWork
for _, datap := range activeModules() {
workDone += markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS))
}
case i == fixedRootFinalizers:
for fb := allfin; fb != nil; fb = fb.alllink {
cnt := uintptr(atomic.Load(&fb.cnt))
scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
}
case i == fixedRootFreeGStacks:
// Switch to the system stack so we can call
// stackfree.
systemstack(markrootFreeGStacks)
case i == fixedRootCleanups:
for cb := (*cleanupBlock)(gcCleanups.all.Load()); cb != nil; cb = cb.alllink {
// N.B. This only needs to synchronize with cleanup execution, which only resets these blocks.
// All cleanup queueing happens during sweep.
n := uintptr(atomic.Load(&cb.n))
scanblock(uintptr(unsafe.Pointer(&cb.cleanups[0])), n*unsafe.Sizeof(cleanupFn{}), &cleanupBlockPtrMask[0], gcw, nil)
}
case work.baseSpans <= i && i < work.baseStacks:
// mark mspan.specials
markrootSpans(gcw, int(i-work.baseSpans))
default:
// the rest is scanning goroutine stacks
workCounter = &gcController.stackScanWork
if i < work.baseStacks || work.baseEnd <= i {
printlock()
print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n")
throw("markroot: bad index")
}
gp := work.stackRoots[i-work.baseStacks]
// remember when we've first observed the G blocked
// needed only to output in traceback
status := readgstatus(gp) // We are not in a scan state
if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
gp.waitsince = work.tstart
}
// scanstack must be done on the system stack in case
// we're trying to scan our own stack.
systemstack(func() {
// If this is a self-scan, put the user G in
// _Gwaiting to prevent self-deadlock. It may
// already be in _Gwaiting if this is a mark
// worker or we're in mark termination.
userG := getg().m.curg
selfScan := gp == userG && readgstatus(userG) == _Grunning
if selfScan {
casGToWaitingForSuspendG(userG, _Grunning, waitReasonGarbageCollectionScan)
}
// TODO: suspendG blocks (and spins) until gp
// stops, which may take a while for
// running goroutines. Consider doing this in
// two phases where the first is non-blocking:
// we scan the stacks we can and ask running
// goroutines to scan themselves; and the
// second blocks.
stopped := suspendG(gp)
if stopped.dead {
gp.gcscandone = true
return
}
if gp.gcscandone {
throw("g already scanned")
}
workDone += scanstack(gp, gcw)
gp.gcscandone = true
resumeG(stopped)
if selfScan {
casgstatus(userG, _Gwaiting, _Grunning)
}
})
}
if workCounter != nil && workDone != 0 {
workCounter.Add(workDone)
if flushBgCredit {
gcFlushBgCredit(workDone)
}
}
return workDone
}
// markrootBlock scans the shard'th shard of the block of memory [b0,
// b0+n0), with the given pointer mask.
//
// Returns the amount of work done.
//
//go:nowritebarrier
func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) int64 {
if rootBlockBytes%(8*goarch.PtrSize) != 0 {
// This is necessary to pick byte offsets in ptrmask0.
throw("rootBlockBytes must be a multiple of 8*ptrSize")
}
// Note that if b0 is toward the end of the address space,
// then b0 + rootBlockBytes might wrap around.
// These tests are written to avoid any possible overflow.
off := uintptr(shard) * rootBlockBytes
if off >= n0 {
return 0
}
b := b0 + off
ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
n := uintptr(rootBlockBytes)
if off+n > n0 {
n = n0 - off
}
// Scan this shard.
scanblock(b, n, ptrmask, gcw, nil)
return int64(n)
}
// markrootFreeGStacks frees stacks of dead Gs.
//
// This does not free stacks of dead Gs cached on Ps, but having a few
// cached stacks around isn't a problem.
func markrootFreeGStacks() {
// Take list of dead Gs with stacks.
lock(&sched.gFree.lock)
list := sched.gFree.stack
sched.gFree.stack = gList{}
unlock(&sched.gFree.lock)
if list.empty() {
return
}
// Free stacks.
var tail *g
for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
tail = gp
stackfree(gp.stack)
gp.stack.lo = 0
gp.stack.hi = 0
if valgrindenabled {
valgrindDeregisterStack(gp.valgrindStackID)
gp.valgrindStackID = 0
}
}
q := gQueue{list.head, tail.guintptr(), list.size}
// Put Gs back on the free list.
lock(&sched.gFree.lock)
sched.gFree.noStack.pushAll(q)
unlock(&sched.gFree.lock)
}
// markrootSpans marks roots for one shard of markArenas.
//
//go:nowritebarrier
func markrootSpans(gcw *gcWork, shard int) {
// Objects with finalizers have two GC-related invariants:
//
// 1) Everything reachable from the object must be marked.
// This ensures that when we pass the object to its finalizer,
// everything the finalizer can reach will be retained.
//
// 2) Finalizer specials (which are not in the garbage
// collected heap) are roots. In practice, this means the fn
// field must be scanned.
//
// Objects with weak handles have only one invariant related
// to this function: weak handle specials (which are not in the
// garbage collected heap) are roots. In practice, this means
// the handle field must be scanned. Note that the value the
// handle pointer referenced does *not* need to be scanned. See
// the definition of specialWeakHandle for details.
sg := mheap_.sweepgen
// Find the arena and page index into that arena for this shard.
ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)]
ha := mheap_.arenas[ai.l1()][ai.l2()]
arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena)
// Construct slice of bitmap which we'll iterate over.
specialsbits := ha.pageSpecials[arenaPage/8:]
specialsbits = specialsbits[:pagesPerSpanRoot/8]
for i := range specialsbits {
// Find set bits, which correspond to spans with specials.
specials := atomic.Load8(&specialsbits[i])
if specials == 0 {
continue
}
for j := uint(0); j < 8; j++ {
if specials&(1<<j) == 0 {
continue
}
// Find the span for this bit.
//
// This value is guaranteed to be non-nil because having
// specials implies that the span is in-use, and since we're
// currently marking we can be sure that we don't have to worry
// about the span being freed and re-used.
s := ha.spans[arenaPage+uint(i)*8+j]
// The state must be mSpanInUse if the specials bit is set, so
// sanity check that.
if state := s.state.get(); state != mSpanInUse {
print("s.state = ", state, "\n")
throw("non in-use span found with specials bit set")
}
// Check that this span was swept (it may be cached or uncached).
if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
// sweepgen was updated (+2) during non-checkmark GC pass
print("sweep ", s.sweepgen, " ", sg, "\n")
throw("gc: unswept span")
}
// Lock the specials to prevent a special from being
// removed from the list while we're traversing it.
lock(&s.speciallock)
for sp := s.specials; sp != nil; sp = sp.next {
switch sp.kind {
case _KindSpecialFinalizer:
gcScanFinalizer((*specialfinalizer)(unsafe.Pointer(sp)), s, gcw)
case _KindSpecialWeakHandle:
// The special itself is a root.
spw := (*specialWeakHandle)(unsafe.Pointer(sp))
scanblock(uintptr(unsafe.Pointer(&spw.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
case _KindSpecialCleanup:
gcScanCleanup((*specialCleanup)(unsafe.Pointer(sp)), gcw)
}
}
unlock(&s.speciallock)
}
}
}
// gcScanFinalizer scans the relevant parts of a finalizer special as a root.
func gcScanFinalizer(spf *specialfinalizer, s *mspan, gcw *gcWork) {
// Don't mark finalized object, but scan it so we retain everything it points to.
// A finalizer can be set for an inner byte of an object, find object beginning.
p := s.base() + spf.special.offset/s.elemsize*s.elemsize
// Mark everything that can be reached from
// the object (but *not* the object itself or
// we'll never collect it).
if !s.spanclass.noscan() {
scanObject(p, gcw)
}
// The special itself is also a root.
scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
}
// gcScanCleanup scans the relevant parts of a cleanup special as a root.
func gcScanCleanup(spc *specialCleanup, gcw *gcWork) {
// The special itself is a root.
scanblock(uintptr(unsafe.Pointer(&spc.cleanup)), unsafe.Sizeof(cleanupFn{}), &cleanupFnPtrMask[0], gcw, nil)
}
// gcAssistAlloc performs GC work to make gp's assist debt positive.
// gp must be the calling user goroutine.
//
// This must be called with preemption enabled.
func gcAssistAlloc(gp *g) {
// Don't assist in non-preemptible contexts. These are
// generally fragile and won't allow the assist to block.
if getg() == gp.m.g0 {
return
}
if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
return
}
if gp := getg(); gp.bubble != nil {
// Disassociate the G from its synctest bubble while allocating.
// This is less elegant than incrementing the group's active count,
// but avoids any contamination between GC assist and synctest.
bubble := gp.bubble
gp.bubble = nil
defer func() {
gp.bubble = bubble
}()
}
// This extremely verbose boolean indicates whether we've
// entered mark assist from the perspective of the tracer.
//
// In the tracer, this is just before we call gcAssistAlloc1
// *regardless* of whether tracing is enabled. This is because
// the tracer allows for tracing to begin (and advance
// generations) in the middle of a GC mark phase, so we need to
// record some state so that the tracer can pick it up to ensure
// a consistent trace result.
//
// TODO(mknyszek): Hide the details of inMarkAssist in tracer
// functions and simplify all the state tracking. This is a lot.
enteredMarkAssistForTracing := false
retry:
if gcCPULimiter.limiting() {
// If the CPU limiter is enabled, intentionally don't
// assist to reduce the amount of CPU time spent in the GC.
if enteredMarkAssistForTracing {
trace := traceAcquire()
if trace.ok() {
trace.GCMarkAssistDone()
// Set this *after* we trace the end to make sure
// that we emit an in-progress event if this is
// the first event for the goroutine in the trace
// or trace generation. Also, do this between
// acquire/release because this is part of the
// goroutine's trace state, and it must be atomic
// with respect to the tracer.
gp.inMarkAssist = false
traceRelease(trace)
} else {
// This state is tracked even if tracing isn't enabled.
// It's only used by the new tracer.
// See the comment on enteredMarkAssistForTracing.
gp.inMarkAssist = false
}
}
return
}
// Compute the amount of scan work we need to do to make the
// balance positive. When the required amount of work is low,
// we over-assist to build up credit for future allocations
// and amortize the cost of assisting.
assistWorkPerByte := gcController.assistWorkPerByte.Load()
assistBytesPerWork := gcController.assistBytesPerWork.Load()
debtBytes := -gp.gcAssistBytes
scanWork := int64(assistWorkPerByte * float64(debtBytes))
if scanWork < gcOverAssistWork {
scanWork = gcOverAssistWork
debtBytes = int64(assistBytesPerWork * float64(scanWork))
}
// Steal as much credit as we can from the background GC's
// scan credit. This is racy and may drop the background
// credit below 0 if two mutators steal at the same time. This
// will just cause steals to fail until credit is accumulated
// again, so in the long run it doesn't really matter, but we
// do have to handle the negative credit case.
bgScanCredit := gcController.bgScanCredit.Load()
stolen := int64(0)
if bgScanCredit > 0 {
if bgScanCredit < scanWork {
stolen = bgScanCredit
gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(stolen))
} else {
stolen = scanWork
gp.gcAssistBytes += debtBytes
}
gcController.bgScanCredit.Add(-stolen)
scanWork -= stolen
if scanWork == 0 {
// We were able to steal all of the credit we
// needed.
if enteredMarkAssistForTracing {
trace := traceAcquire()
if trace.ok() {
trace.GCMarkAssistDone()
// Set this *after* we trace the end to make sure
// that we emit an in-progress event if this is
// the first event for the goroutine in the trace
// or trace generation. Also, do this between
// acquire/release because this is part of the
// goroutine's trace state, and it must be atomic
// with respect to the tracer.
gp.inMarkAssist = false
traceRelease(trace)
} else {
// This state is tracked even if tracing isn't enabled.
// It's only used by the new tracer.
// See the comment on enteredMarkAssistForTracing.
gp.inMarkAssist = false
}
}
return
}
}
if !enteredMarkAssistForTracing {
trace := traceAcquire()
if trace.ok() {
trace.GCMarkAssistStart()
// Set this *after* we trace the start, otherwise we may
// emit an in-progress event for an assist we're about to start.
gp.inMarkAssist = true
traceRelease(trace)
} else {
gp.inMarkAssist = true
}
// In the new tracer, set enter mark assist tracing if we
// ever pass this point, because we must manage inMarkAssist
// correctly.
//
// See the comment on enteredMarkAssistForTracing.
enteredMarkAssistForTracing = true
}
// Perform assist work
systemstack(func() {
gcAssistAlloc1(gp, scanWork)
// The user stack may have moved, so this can't touch
// anything on it until it returns from systemstack.
})
completed := gp.param != nil
gp.param = nil
if completed {
gcMarkDone()
}
if gp.gcAssistBytes < 0 {
// We were unable steal enough credit or perform
// enough work to pay off the assist debt. We need to
// do one of these before letting the mutator allocate
// more to prevent over-allocation.
//
// If this is because we were preempted, reschedule
// and try some more.
if gp.preempt {
Gosched()
goto retry
}
// Add this G to an assist queue and park. When the GC
// has more background credit, it will satisfy queued
// assists before flushing to the global credit pool.
//
// Note that this does *not* get woken up when more
// work is added to the work list. The theory is that
// there wasn't enough work to do anyway, so we might
// as well let background marking take care of the
// work that is available.
if !gcParkAssist() {
goto retry
}
// At this point either background GC has satisfied
// this G's assist debt, or the GC cycle is over.
}
if enteredMarkAssistForTracing {
trace := traceAcquire()
if trace.ok() {
trace.GCMarkAssistDone()
// Set this *after* we trace the end to make sure
// that we emit an in-progress event if this is
// the first event for the goroutine in the trace
// or trace generation. Also, do this between
// acquire/release because this is part of the
// goroutine's trace state, and it must be atomic
// with respect to the tracer.
gp.inMarkAssist = false
traceRelease(trace)
} else {
// This state is tracked even if tracing isn't enabled.
// It's only used by the new tracer.
// See the comment on enteredMarkAssistForTracing.
gp.inMarkAssist = false
}
}
}
// gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system
// stack. This is a separate function to make it easier to see that
// we're not capturing anything from the user stack, since the user
// stack may move while we're in this function.
//
// gcAssistAlloc1 indicates whether this assist completed the mark
// phase by setting gp.param to non-nil. This can't be communicated on
// the stack since it may move.
//
//go:systemstack
func gcAssistAlloc1(gp *g, scanWork int64) {
// Clear the flag indicating that this assist completed the
// mark phase.
gp.param = nil
if atomic.Load(&gcBlackenEnabled) == 0 {
// The gcBlackenEnabled check in malloc races with the
// store that clears it but an atomic check in every malloc
// would be a performance hit.
// Instead we recheck it here on the non-preemptible system
// stack to determine if we should perform an assist.
// GC is done, so ignore any remaining debt.
gp.gcAssistBytes = 0
return
}
// Track time spent in this assist. Since we're on the
// system stack, this is non-preemptible, so we can
// just measure start and end time.
//
// Limiter event tracking might be disabled if we end up here
// while on a mark worker.
startTime := nanotime()
trackLimiterEvent := gp.m.p.ptr().limiterEvent.start(limiterEventMarkAssist, startTime)
gcBeginWork()
// gcDrainN requires the caller to be preemptible.
casGToWaitingForSuspendG(gp, _Grunning, waitReasonGCAssistMarking)
// drain own cached work first in the hopes that it
// will be more cache friendly.
gcw := &getg().m.p.ptr().gcw
workDone := gcDrainN(gcw, scanWork)
casgstatus(gp, _Gwaiting, _Grunning)
// Record that we did this much scan work.
//
// Back out the number of bytes of assist credit that
// this scan work counts for. The "1+" is a poor man's
// round-up, to ensure this adds credit even if
// assistBytesPerWork is very low.
assistBytesPerWork := gcController.assistBytesPerWork.Load()
gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(workDone))
// If this is the last worker and we ran out of work,
// signal a completion point.
if gcEndWork() {
// This has reached a background completion point. Set
// gp.param to a non-nil value to indicate this. It
// doesn't matter what we set it to (it just has to be
// a valid pointer).
gp.param = unsafe.Pointer(gp)
}
now := nanotime()
duration := now - startTime
pp := gp.m.p.ptr()
pp.gcAssistTime += duration
if trackLimiterEvent {
pp.limiterEvent.stop(limiterEventMarkAssist, now)
}
if pp.gcAssistTime > gcAssistTimeSlack {
gcController.assistTime.Add(pp.gcAssistTime)
gcCPULimiter.update(now)
pp.gcAssistTime = 0
}
}
// gcWakeAllAssists wakes all currently blocked assists. This is used
// at the end of a GC cycle. gcBlackenEnabled must be false to prevent
// new assists from going to sleep after this point.
func gcWakeAllAssists() {
lock(&work.assistQueue.lock)
list := work.assistQueue.q.popList()
injectglist(&list)
unlock(&work.assistQueue.lock)
}
// gcParkAssist puts the current goroutine on the assist queue and parks.
//
// gcParkAssist reports whether the assist is now satisfied. If it
// returns false, the caller must retry the assist.
func gcParkAssist() bool {
lock(&work.assistQueue.lock)
// If the GC cycle finished while we were getting the lock,
// exit the assist. The cycle can't finish while we hold the
// lock.
if atomic.Load(&gcBlackenEnabled) == 0 {
unlock(&work.assistQueue.lock)
return true
}
gp := getg()
oldList := work.assistQueue.q
work.assistQueue.q.pushBack(gp)
// Recheck for background credit now that this G is in
// the queue, but can still back out. This avoids a
// race in case background marking has flushed more
// credit since we checked above.
if gcController.bgScanCredit.Load() > 0 {
work.assistQueue.q = oldList
if oldList.tail != 0 {
oldList.tail.ptr().schedlink.set(nil)
}
unlock(&work.assistQueue.lock)
return false
}
// Park.
goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceBlockGCMarkAssist, 2)
return true
}
// gcFlushBgCredit flushes scanWork units of background scan work
// credit. This first satisfies blocked assists on the
// work.assistQueue and then flushes any remaining credit to
// gcController.bgScanCredit.
//
// Write barriers are disallowed because this is used by gcDrain after
// it has ensured that all work is drained and this must preserve that
// condition.
//
//go:nowritebarrierrec
func gcFlushBgCredit(scanWork int64) {
if work.assistQueue.q.empty() {
// Fast path; there are no blocked assists. There's a
// small window here where an assist may add itself to
// the blocked queue and park. If that happens, we'll
// just get it on the next flush.
gcController.bgScanCredit.Add(scanWork)
return
}
assistBytesPerWork := gcController.assistBytesPerWork.Load()
scanBytes := int64(float64(scanWork) * assistBytesPerWork)
lock(&work.assistQueue.lock)
for !work.assistQueue.q.empty() && scanBytes > 0 {
gp := work.assistQueue.q.pop()
// Note that gp.gcAssistBytes is negative because gp
// is in debt. Think carefully about the signs below.
if scanBytes+gp.gcAssistBytes >= 0 {
// Satisfy this entire assist debt.
scanBytes += gp.gcAssistBytes
gp.gcAssistBytes = 0
// It's important that we *not* put gp in
// runnext. Otherwise, it's possible for user
// code to exploit the GC worker's high
// scheduler priority to get itself always run
// before other goroutines and always in the
// fresh quantum started by GC.
ready(gp, 0, false)
} else {
// Partially satisfy this assist.
gp.gcAssistBytes += scanBytes
scanBytes = 0
// As a heuristic, we move this assist to the
// back of the queue so that large assists
// can't clog up the assist queue and
// substantially delay small assists.
work.assistQueue.q.pushBack(gp)
break
}
}
if scanBytes > 0 {
// Convert from scan bytes back to work.
assistWorkPerByte := gcController.assistWorkPerByte.Load()
scanWork = int64(float64(scanBytes) * assistWorkPerByte)
gcController.bgScanCredit.Add(scanWork)
}
unlock(&work.assistQueue.lock)
}
// scanstack scans gp's stack, greying all pointers found on the stack.
//
// Returns the amount of scan work performed, but doesn't update
// gcController.stackScanWork or flush any credit. Any background credit produced
// by this function should be flushed by its caller. scanstack itself can't
// safely flush because it may result in trying to wake up a goroutine that
// was just scanned, resulting in a self-deadlock.
//
// scanstack will also shrink the stack if it is safe to do so. If it
// is not, it schedules a stack shrink for the next synchronous safe
// point.
//
// scanstack is marked go:systemstack because it must not be preempted
// while using a workbuf.
//
//go:nowritebarrier
//go:systemstack
func scanstack(gp *g, gcw *gcWork) int64 {
if readgstatus(gp)&_Gscan == 0 {
print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
throw("scanstack - bad status")
}
switch readgstatus(gp) &^ _Gscan {
default:
print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
throw("mark - bad status")
case _Gdead, _Gdeadextra:
return 0
case _Grunning:
print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
throw("scanstack: goroutine not stopped")
case _Grunnable, _Gsyscall, _Gwaiting, _Gleaked:
// ok
}
if gp == getg() {
throw("can't scan our own stack")
}
// scannedSize is the amount of work we'll be reporting.
//
// It is less than the allocated size (which is hi-lo).
var sp uintptr
if gp.syscallsp != 0 {
sp = gp.syscallsp // If in a system call this is the stack pointer (gp.sched.sp can be 0 in this case on Windows).
} else {
sp = gp.sched.sp
}
scannedSize := gp.stack.hi - sp
// Keep statistics for initial stack size calculation.
// Note that this accumulates the scanned size, not the allocated size.
p := getg().m.p.ptr()
p.scannedStackSize += uint64(scannedSize)
p.scannedStacks++
if isShrinkStackSafe(gp) {
// Shrink the stack if not much of it is being used.
shrinkstack(gp)
} else {
// Otherwise, shrink the stack at the next sync safe point.
gp.preemptShrink = true
}
var state stackScanState
state.stack = gp.stack
if stackTraceDebug {
println("stack trace goroutine", gp.goid)
}
if debugScanConservative && gp.asyncSafePoint {
print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
}
// Scan the saved context register. This is effectively a live
// register that gets moved back and forth between the
// register and sched.ctxt without a write barrier.
if gp.sched.ctxt != nil {
scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
// Scan conservatively the extended register state.
if gp.asyncSafePoint {
xRegScan(gp, gcw, &state)
}
// Scan the stack. Accumulate a list of stack objects.
var u unwinder
for u.init(gp, 0); u.valid(); u.next() {
scanframeworker(&u.frame, &state, gcw)
}
// Find additional pointers that point into the stack from the heap.
// Currently this includes defers and panics. See also function copystack.
// Find and trace other pointers in defer records.
for d := gp._defer; d != nil; d = d.link {
if d.fn != nil {
// Scan the func value, which could be a stack allocated closure.
// See issue 30453.
scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
if d.link != nil {
// The link field of a stack-allocated defer record might point
// to a heap-allocated defer record. Keep that heap record live.
scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
// Retain defers records themselves.
// Defer records might not be reachable from the G through regular heap
// tracing because the defer linked list might weave between the stack and the heap.
if d.heap {
scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
}
if gp._panic != nil {
// Panics are always stack allocated.
state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false)
}
// Find and scan all reachable stack objects.
//
// The state's pointer queue prioritizes precise pointers over
// conservative pointers so that we'll prefer scanning stack
// objects precisely.
state.buildIndex()
for {
p, conservative := state.getPtr()
if p == 0 {
break
}
obj := state.findObject(p)
if obj == nil {
continue
}
r := obj.r
if r == nil {
// We've already scanned this object.
continue
}
obj.setRecord(nil) // Don't scan it again.
if stackTraceDebug {
printlock()
print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of size", obj.size)
if conservative {
print(" (conservative)")
}
println()
printunlock()
}
ptrBytes, gcData := r.gcdata()
b := state.stack.lo + uintptr(obj.off)
if conservative {
scanConservative(b, ptrBytes, gcData, gcw, &state)
} else {
scanblock(b, ptrBytes, gcData, gcw, &state)
}
}
// Deallocate object buffers.
// (Pointer buffers were all deallocated in the loop above.)
for state.head != nil {
x := state.head
state.head = x.next
if stackTraceDebug {
for i := 0; i < x.nobj; i++ {
obj := &x.obj[i]
if obj.r == nil { // reachable
continue
}
println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of size", obj.r.size)
// Note: not necessarily really dead - only reachable-from-ptr dead.
}
}
x.nobj = 0
putempty((*workbuf)(unsafe.Pointer(x)))
}
if state.buf != nil || state.cbuf != nil || state.freeBuf != nil {
throw("remaining pointer buffers")
}
return int64(scannedSize)
}
// Scan a stack frame: local variables and function arguments/results.
//
//go:nowritebarrier
func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
if _DebugGC > 1 && frame.continpc != 0 {
print("scanframe ", funcname(frame.fn), "\n")
}
isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == abi.FuncID_asyncPreempt
isDebugCall := frame.fn.valid() && frame.fn.funcID == abi.FuncID_debugCallV2
if state.conservative || isAsyncPreempt || isDebugCall {
if debugScanConservative {
println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc))
}
// Conservatively scan the frame. Unlike the precise
// case, this includes the outgoing argument space
// since we may have stopped while this function was
// setting up a call.
//
// TODO: We could narrow this down if the compiler
// produced a single map per function of stack slots
// and registers that ever contain a pointer.
if frame.varp != 0 {
size := frame.varp - frame.sp
if size > 0 {
scanConservative(frame.sp, size, nil, gcw, state)
}
}
// Scan arguments to this frame.
if n := frame.argBytes(); n != 0 {
// TODO: We could pass the entry argument map
// to narrow this down further.
scanConservative(frame.argp, n, nil, gcw, state)
}
if isAsyncPreempt || isDebugCall {
// This function's frame contained the
// registers for the asynchronously stopped
// parent frame. Scan the parent
// conservatively.
state.conservative = true
} else {
// We only wanted to scan those two frames
// conservatively. Clear the flag for future
// frames.
state.conservative = false
}
return
}
locals, args, objs := frame.getStackMap(false)
// Scan local variables if stack frame has been allocated.
if locals.n > 0 {
size := uintptr(locals.n) * goarch.PtrSize
scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
}
// Scan arguments.
if args.n > 0 {
scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
}
// Add all stack objects to the stack object list.
if frame.varp != 0 {
// varp is 0 for defers, where there are no locals.
// In that case, there can't be a pointer to its args, either.
// (And all args would be scanned above anyway.)
for i := range objs {
obj := &objs[i]
off := obj.off
base := frame.varp // locals base pointer
if off >= 0 {
base = frame.argp // arguments and return values base pointer
}
ptr := base + uintptr(off)
if ptr < frame.sp {
// object hasn't been allocated in the frame yet.
continue
}
if stackTraceDebug {
println("stkobj at", hex(ptr), "of size", obj.size)
}
state.addObject(ptr, obj)
}
}
}
type gcDrainFlags int
const (
gcDrainUntilPreempt gcDrainFlags = 1 << iota
gcDrainFlushBgCredit
gcDrainIdle
gcDrainFractional
)
// gcDrainMarkWorkerIdle is a wrapper for gcDrain that exists to better account
// mark time in profiles.
func gcDrainMarkWorkerIdle(gcw *gcWork) {
gcDrain(gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit)
}
// gcDrainMarkWorkerDedicated is a wrapper for gcDrain that exists to better account
// mark time in profiles.
func gcDrainMarkWorkerDedicated(gcw *gcWork, untilPreempt bool) {
flags := gcDrainFlushBgCredit
if untilPreempt {
flags |= gcDrainUntilPreempt
}
gcDrain(gcw, flags)
}
// gcDrainMarkWorkerFractional is a wrapper for gcDrain that exists to better account
// mark time in profiles.
func gcDrainMarkWorkerFractional(gcw *gcWork) {
gcDrain(gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
}
// gcNextMarkRoot safely increments work.markrootNext and returns the
// index of the next root job. The returned boolean is true if the root job
// is valid, and false if there are no more root jobs to be claimed,
// i.e. work.markrootNext >= work.markrootJobs.
func gcNextMarkRoot() (uint32, bool) {
if !work.goroutineLeak.enabled {
// If not running goroutine leak detection, assume regular GC behavior.
job := work.markrootNext.Add(1) - 1
return job, job < work.markrootJobs.Load()
}
// Otherwise, use a CAS loop to increment markrootNext.
for next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); next < jobs; next = work.markrootNext.Load() {
// There is still work available at the moment.
if work.markrootNext.CompareAndSwap(next, next+1) {
// We manage to snatch a root job. Return the root index.
return next, true
}
}
return 0, false
}
// gcDrain scans roots and objects in work buffers, blackening grey
// objects until it is unable to get more work. It may return before
// GC is done; it's the caller's responsibility to balance work from
// other Ps.
//
// If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
// is set.
//
// If flags&gcDrainIdle != 0, gcDrain returns when there is other work
// to do.
//
// If flags&gcDrainFractional != 0, gcDrain self-preempts when
// pollFractionalWorkerExit() returns true. This implies
// gcDrainNoBlock.
//
// If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
// credit to gcController.bgScanCredit every gcCreditSlack units of
// scan work.
//
// gcDrain will always return if there is a pending STW or forEachP.
//
// Disabling write barriers is necessary to ensure that after we've
// confirmed that we've drained gcw, that we don't accidentally end
// up flipping that condition by immediately adding work in the form
// of a write barrier buffer flush.
//
// Don't set nowritebarrierrec because it's safe for some callees to
// have write barriers enabled.
//
//go:nowritebarrier
func gcDrain(gcw *gcWork, flags gcDrainFlags) {
if !writeBarrier.enabled {
throw("gcDrain phase incorrect")
}
// N.B. We must be running in a non-preemptible context, so it's
// safe to hold a reference to our P here.
gp := getg().m.curg
pp := gp.m.p.ptr()
preemptible := flags&gcDrainUntilPreempt != 0
flushBgCredit := flags&gcDrainFlushBgCredit != 0
idle := flags&gcDrainIdle != 0
initScanWork := gcw.heapScanWork
// checkWork is the scan work before performing the next
// self-preempt check.
checkWork := int64(1<<63 - 1)
var check func() bool
if flags&(gcDrainIdle|gcDrainFractional) != 0 {
checkWork = initScanWork + drainCheckThreshold
if idle {
check = pollWork
} else if flags&gcDrainFractional != 0 {
check = pollFractionalWorkerExit
}
}
if work.markrootNext.Load() < work.markrootJobs.Load() {
// Stop if we're preemptible, if someone wants to STW, or if
// someone is calling forEachP.
for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
job, ok := gcNextMarkRoot()
if !ok {
break
}
markroot(gcw, job, flushBgCredit)
if check != nil && check() {
goto done
}
// Spin up a new worker if requested.
if goexperiment.GreenTeaGC && gcw.mayNeedWorker {
gcw.mayNeedWorker = false
if gcphase == _GCmark {
gcController.enlistWorker()
}
}
}
}
// Drain heap marking jobs.
//
// Stop if we're preemptible, if someone wants to STW, or if
// someone is calling forEachP.
//
// TODO(mknyszek): Consider always checking gp.preempt instead
// of having the preempt flag, and making an exception for certain
// mark workers in retake. That might be simpler than trying to
// enumerate all the reasons why we might want to preempt, even
// if we're supposed to be mostly non-preemptible.
for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
// Try to keep work available on the global queue. We used to
// check if there were waiting workers, but it's better to
// just keep work available than to make workers wait. In the
// worst case, we'll do O(log(_WorkbufSize)) unnecessary
// balances.
if work.full == 0 {
gcw.balance()
}
// See mgcwork.go for the rationale behind the order in which we check these queues.
var b uintptr
var s objptr
if b = gcw.tryGetObjFast(); b == 0 {
if s = gcw.tryGetSpanFast(); s == 0 {
if b = gcw.tryGetObj(); b == 0 {
if s = gcw.tryGetSpan(); s == 0 {
// Flush the write barrier
// buffer; this may create
// more work.
wbBufFlush()
if b = gcw.tryGetObj(); b == 0 {
if s = gcw.tryGetSpan(); s == 0 {
s = gcw.tryStealSpan()
}
}
}
}
}
}
if b != 0 {
scanObject(b, gcw)
} else if s != 0 {
scanSpan(s, gcw)
} else {
// Unable to get work.
break
}
// Spin up a new worker if requested.
if goexperiment.GreenTeaGC && gcw.mayNeedWorker {
gcw.mayNeedWorker = false
if gcphase == _GCmark {
gcController.enlistWorker()
}
}
// Flush background scan work credit to the global
// account if we've accumulated enough locally so
// mutator assists can draw on it.
if gcw.heapScanWork >= gcCreditSlack {
gcController.heapScanWork.Add(gcw.heapScanWork)
if flushBgCredit {
gcFlushBgCredit(gcw.heapScanWork - initScanWork)
initScanWork = 0
}
checkWork -= gcw.heapScanWork
gcw.heapScanWork = 0
if checkWork <= 0 {
checkWork += drainCheckThreshold
if check != nil && check() {
break
}
}
}
}
done:
// Flush remaining scan work credit.
if gcw.heapScanWork > 0 {
gcController.heapScanWork.Add(gcw.heapScanWork)
if flushBgCredit {
gcFlushBgCredit(gcw.heapScanWork - initScanWork)
}
gcw.heapScanWork = 0
}
}
// gcDrainN blackens grey objects until it has performed roughly
// scanWork units of scan work or the G is preempted. This is
// best-effort, so it may perform less work if it fails to get a work
// buffer. Otherwise, it will perform at least n units of work, but
// may perform more because scanning is always done in whole object
// increments. It returns the amount of scan work performed.
//
// The caller goroutine must be in a preemptible state (e.g.,
// _Gwaiting) to prevent deadlocks during stack scanning. As a
// consequence, this must be called on the system stack.
//
//go:nowritebarrier
//go:systemstack
func gcDrainN(gcw *gcWork, scanWork int64) int64 {
if !writeBarrier.enabled {
throw("gcDrainN phase incorrect")
}
// There may already be scan work on the gcw, which we don't
// want to claim was done by this call.
workFlushed := -gcw.heapScanWork
// In addition to backing out because of a preemption, back out
// if the GC CPU limiter is enabled.
gp := getg().m.curg
for !gp.preempt && !gcCPULimiter.limiting() && workFlushed+gcw.heapScanWork < scanWork {
// See gcDrain comment.
if work.full == 0 {
gcw.balance()
}
// See mgcwork.go for the rationale behind the order in which we check these queues.
var b uintptr
var s objptr
if b = gcw.tryGetObjFast(); b == 0 {
if s = gcw.tryGetSpanFast(); s == 0 {
if b = gcw.tryGetObj(); b == 0 {
if s = gcw.tryGetSpan(); s == 0 {
// Flush the write barrier
// buffer; this may create
// more work.
wbBufFlush()
if b = gcw.tryGetObj(); b == 0 {
if s = gcw.tryGetSpan(); s == 0 {
// Try to do a root job.
if work.markrootNext.Load() < work.markrootJobs.Load() {
job, ok := gcNextMarkRoot()
if ok {
workFlushed += markroot(gcw, job, false)
continue
}
}
s = gcw.tryStealSpan()
}
}
}
}
}
}
if b != 0 {
scanObject(b, gcw)
} else if s != 0 {
scanSpan(s, gcw)
} else {
// Unable to get work.
break
}
// Flush background scan work credit.
if gcw.heapScanWork >= gcCreditSlack {
gcController.heapScanWork.Add(gcw.heapScanWork)
workFlushed += gcw.heapScanWork
gcw.heapScanWork = 0
}
// Spin up a new worker if requested.
if goexperiment.GreenTeaGC && gcw.mayNeedWorker {
gcw.mayNeedWorker = false
if gcphase == _GCmark {
gcController.enlistWorker()
}
}
}
// Unlike gcDrain, there's no need to flush remaining work
// here because this never flushes to bgScanCredit and
// gcw.dispose will flush any remaining work to scanWork.
return workFlushed + gcw.heapScanWork
}
// scanblock scans b as scanObject would, but using an explicit
// pointer bitmap instead of the heap bitmap.
//
// This is used to scan non-heap roots, so it does not update
// gcw.bytesMarked or gcw.heapScanWork.
//
// If stk != nil, possible stack pointers are also reported to stk.putPtr.
//
//go:nowritebarrier
func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
// Use local copies of original parameters, so that a stack trace
// due to one of the throws below shows the original block
// base and extent.
b := b0
n := n0
for i := uintptr(0); i < n; {
// Find bits for the next word.
bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
if bits == 0 {
i += goarch.PtrSize * 8
continue
}
for j := 0; j < 8 && i < n; j++ {
if bits&1 != 0 {
// Same work as in scanObject; see comments there.
p := *(*uintptr)(unsafe.Pointer(b + i))
if p != 0 {
if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
stk.putPtr(p, false)
} else {
if !tryDeferToSpanScan(p, gcw) {
if obj, span, objIndex := findObject(p, b, i); obj != 0 {
greyobject(obj, b, i, span, gcw, objIndex)
}
}
}
}
}
bits >>= 1
i += goarch.PtrSize
}
}
}
// scanConservative scans block [b, b+n) conservatively, treating any
// pointer-like value in the block as a pointer.
//
// If ptrmask != nil, only words that are marked in ptrmask are
// considered as potential pointers.
//
// If state != nil, it's assumed that [b, b+n) is a block in the stack
// and may contain pointers to stack objects.
func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) {
if debugScanConservative {
printlock()
print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
hexdumpWords(b, n, func(p uintptr, m hexdumpMarker) {
if ptrmask != nil {
word := (p - b) / goarch.PtrSize
bits := *addb(ptrmask, word/8)
if (bits>>(word%8))&1 == 0 {
return
}
}
val := *(*uintptr)(unsafe.Pointer(p))
if state != nil && state.stack.lo <= val && val < state.stack.hi {
m.start()
println("ptr to stack")
return
}
span := spanOfHeap(val)
if span == nil {
return
}
idx := span.objIndex(val)
if span.isFreeOrNewlyAllocated(idx) {
return
}
m.start()
println("ptr to heap")
})
printunlock()
}
for i := uintptr(0); i < n; i += goarch.PtrSize {
if ptrmask != nil {
word := i / goarch.PtrSize
bits := *addb(ptrmask, word/8)
if bits == 0 {
// Skip 8 words (the loop increment will do the 8th)
//
// This must be the first time we've
// seen this word of ptrmask, so i
// must be 8-word-aligned, but check
// our reasoning just in case.
if i%(goarch.PtrSize*8) != 0 {
throw("misaligned mask")
}
i += goarch.PtrSize*8 - goarch.PtrSize
continue
}
if (bits>>(word%8))&1 == 0 {
continue
}
}
val := *(*uintptr)(unsafe.Pointer(b + i))
// Check if val points into the stack.
if state != nil && state.stack.lo <= val && val < state.stack.hi {
// val may point to a stack object. This
// object may be dead from last cycle and
// hence may contain pointers to unallocated
// objects, but unlike heap objects we can't
// tell if it's already dead. Hence, if all
// pointers to this object are from
// conservative scanning, we have to scan it
// defensively, too.
state.putPtr(val, true)
continue
}
// Check if val points to a heap span.
span := spanOfHeap(val)
if span == nil {
continue
}
// Check if val points to an allocated object.
//
// Ignore objects allocated during the mark phase, they've
// been allocated black.
idx := span.objIndex(val)
if span.isFreeOrNewlyAllocated(idx) {
continue
}
// val points to an allocated object. Mark it.
obj := span.base() + idx*span.elemsize
if !tryDeferToSpanScan(obj, gcw) {
greyobject(obj, b, i, span, gcw, idx)
}
}
}
// Shade the object if it isn't already.
// The object is not nil and known to be in the heap.
// Preemption must be disabled.
//
//go:nowritebarrier
func shade(b uintptr) {
gcw := &getg().m.p.ptr().gcw
if !tryDeferToSpanScan(b, gcw) {
if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
greyobject(obj, 0, 0, span, gcw, objIndex)
}
}
}
// obj is the start of an object with mark mbits.
// If it isn't already marked, mark it and enqueue into gcw.
// base and off are for debugging only and could be removed.
//
// See also wbBufFlush1, which partially duplicates this logic.
//
//go:nowritebarrierrec
func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
// obj should be start of allocation, and so must be at least pointer-aligned.
if obj&(goarch.PtrSize-1) != 0 {
throw("greyobject: obj not pointer-aligned")
}
mbits := span.markBitsForIndex(objIndex)
if useCheckmark {
if setCheckmark(obj, base, off, mbits) {
// Already marked.
return
}
if debug.checkfinalizers > 1 {
print(" mark ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
}
} else {
if debug.gccheckmark > 0 && span.isFree(objIndex) {
print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
gcDumpObject("base", base, off)
gcDumpObject("obj", obj, ^uintptr(0))
getg().m.traceback = 2
throw("marking free object")
}
// If marked we have nothing to do.
if mbits.isMarked() {
return
}
mbits.setMarked()
// Mark span.
arena, pageIdx, pageMask := pageIndexOf(span.base())
if arena.pageMarks[pageIdx]&pageMask == 0 {
atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
}
}
// If this is a noscan object, fast-track it to black
// instead of greying it.
if span.spanclass.noscan() {
gcw.bytesMarked += uint64(span.elemsize)
return
}
// We're adding obj to P's local workbuf, so it's likely
// this object will be processed soon by the same P.
// Even if the workbuf gets flushed, there will likely still be
// some benefit on platforms with inclusive shared caches.
sys.Prefetch(obj)
// Queue the obj for scanning.
if !gcw.putObjFast(obj) {
gcw.putObj(obj)
}
}
// gcDumpObject dumps the contents of obj for debugging and marks the
// field at byte offset off in obj.
func gcDumpObject(label string, obj, off uintptr) {
s := spanOf(obj)
print(label, "=", hex(obj))
if s == nil {
print(" s=nil\n")
return
}
print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) {
print(mSpanStateNames[state], "\n")
} else {
print("unknown(", state, ")\n")
}
skipped := false
size := s.elemsize
if s.state.get() == mSpanManual && size == 0 {
// We're printing something from a stack frame. We
// don't know how big it is, so just show up to an
// including off.
size = off + goarch.PtrSize
}
for i := uintptr(0); i < size; i += goarch.PtrSize {
// For big objects, just print the beginning (because
// that usually hints at the object's type) and the
// fields around off.
if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
skipped = true
continue
}
if skipped {
print(" ...\n")
skipped = false
}
print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
if i == off {
print(" <==")
}
print("\n")
}
if skipped {
print(" ...\n")
}
}
// gcmarknewobject marks a newly allocated object black. obj must
// not contain any non-nil pointers.
//
// This is nosplit so it can manipulate a gcWork without preemption.
//
//go:nowritebarrier
//go:nosplit
func gcmarknewobject(span *mspan, obj uintptr) {
if useCheckmark { // The world should be stopped so this should not happen.
throw("gcmarknewobject called while doing checkmark")
}
if gcphase == _GCmarktermination {
// Check this here instead of on the hot path.
throw("mallocgc called with gcphase == _GCmarktermination")
}
// Mark object.
objIndex := span.objIndex(obj)
span.markBitsForIndex(objIndex).setMarked()
if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(span.elemsize) {
// No need to scan the new object.
span.scannedBitsForIndex(objIndex).setMarked()
}
// Mark span.
arena, pageIdx, pageMask := pageIndexOf(span.base())
if arena.pageMarks[pageIdx]&pageMask == 0 {
atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
}
gcw := &getg().m.p.ptr().gcw
gcw.bytesMarked += uint64(span.elemsize)
}
// gcMarkTinyAllocs greys all active tiny alloc blocks.
//
// The world must be stopped.
func gcMarkTinyAllocs() {
assertWorldStopped()
for _, p := range allp {
c := p.mcache
if c == nil || c.tiny == 0 {
continue
}
gcw := &p.gcw
if !tryDeferToSpanScan(c.tiny, gcw) {
_, span, objIndex := findObject(c.tiny, 0, 0)
greyobject(c.tiny, 0, 0, span, gcw, objIndex)
}
}
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Green Tea mark algorithm
//
// The core idea behind Green Tea is simple: achieve better locality during
// mark/scan by delaying scanning so that we can accumulate objects to scan
// within the same span, then scan the objects that have accumulated on the
// span all together.
//
// By batching objects this way, we increase the chance that adjacent objects
// will be accessed, amortize the cost of accessing object metadata, and create
// better opportunities for prefetching. We can take this even further and
// optimize the scan loop by size class (not yet completed) all the way to the
// point of applying SIMD techniques to really tear through the heap.
//
// Naturally, this depends on being able to create opportunties to batch objects
// together. The basic idea here is to have two sets of mark bits. One set is the
// regular set of mark bits ("marks"), while the other essentially says that the
// objects have been scanned already ("scans"). When we see a pointer for the first
// time we set its mark and enqueue its span. We track these spans in work queues
// with a FIFO policy, unlike workbufs which have a LIFO policy. Empirically, a
// FIFO policy appears to work best for accumulating objects to scan on a span.
// Later, when we dequeue the span, we find both the union and intersection of the
// mark and scan bitsets. The union is then written back into the scan bits, while
// the intersection is used to decide which objects need scanning, such that the GC
// is still precise.
//
// Below is the bulk of the implementation, focusing on the worst case
// for locality, small objects. Specifically, those that are smaller than
// a few cache lines in size and whose metadata is stored the same way (at the
// end of the span).
//go:build goexperiment.greenteagc
package runtime
import (
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/gc"
"internal/runtime/gc/scan"
"internal/runtime/sys"
"unsafe"
)
const doubleCheckGreenTea = false
// spanInlineMarkBits are mark bits that are inlined into the span
// itself. gcUsesSpanInlineMarkBits may be used to check if objects
// of a particular size use inline mark bits.
//
// Inline mark bits are a little bit more than just mark bits. They
// consist of two parts: scans and marks. Marks are like pre-mark
// bits. They're set once a pointer to an object is discovered for
// the first time. The marks allow us to scan many objects in bulk
// if we queue the whole span for scanning. Before we scan such objects
// in bulk, we copy the marks to the scans, computing a diff along the
// way. The resulting bitmap tells us which objects we should scan.
//
// The inlineMarkBits also hold state sufficient for scanning any
// object in the span, as well as state for acquiring ownership of
// the span for queuing. This avoids the need to look at the mspan when
// scanning.
type spanInlineMarkBits struct {
scans [63]uint8 // scanned bits.
owned spanScanOwnership // see the comment on spanScanOwnership.
marks [63]uint8 // mark bits.
class spanClass
}
// spanScanOwnership indicates whether some thread has acquired
// the span for scanning, and whether there has been one or more
// attempts to acquire the span. The latter information helps to
// fast-track span scans that only apply to a single mark, skipping
// the relatively costly merge-and-diff process for scans and marks
// by allowing one to just set the mark directly.
type spanScanOwnership uint8
const (
spanScanUnowned spanScanOwnership = 0 // Indicates the span is not acquired for scanning.
spanScanOneMark = 1 << iota // Indicates that only one mark bit is set relative to the scan bits.
spanScanManyMark // Indicates one or more scan bits may be set relative to the mark bits.
// "ManyMark" need not be exactly the value it has. In practice we just
// want to distinguish "none" from "one" from "many," so a comparison is
// sufficient (as opposed to a bit test) to check between these cases.
)
// load atomically loads from a pointer to a spanScanOwnership.
func (o *spanScanOwnership) load() spanScanOwnership {
return spanScanOwnership(atomic.Load8((*uint8)(unsafe.Pointer(o))))
}
func (o *spanScanOwnership) or(v spanScanOwnership) spanScanOwnership {
// N.B. We round down the address and use Or32 because Or8 doesn't
// return a result, and it's strictly necessary for this protocol.
//
// Making Or8 return a result, while making the code look nicer, would
// not be strictly better on any supported platform, as an Or8 that
// returns a result is not a common instruction. On many platforms it
// would be implemented exactly as it is here, and since Or8 is
// exclusively used in the runtime and a hot function, we want to keep
// using its no-result version elsewhere for performance.
o32 := (*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(o)) &^ 0b11))
off := (uintptr(unsafe.Pointer(o)) & 0b11) * 8
if goarch.BigEndian {
off = 32 - off - 8
}
return spanScanOwnership(atomic.Or32(o32, uint32(v)<<off) >> off)
}
func (imb *spanInlineMarkBits) init(class spanClass, needzero bool) {
if imb == nil {
// This nil check and throw is almost pointless. Normally we would
// expect imb to never be nil. However, this is called on potentially
// freshly-allocated virtual memory. As of 2025, the compiler-inserted
// nil check is not a branch but a memory read that we expect to fault
// if the pointer really is nil.
//
// However, this causes a read of the page, and operating systems may
// take it as a hint to back the accessed memory with a read-only zero
// page. However, we immediately write to this memory, which can then
// force operating systems to have to update the page table and flush
// the TLB, causing a lot of churn for programs that are short-lived
// and monotonically grow in size.
//
// This nil check is thus an explicit branch instead of what the compiler
// would insert circa 2025, which is a memory read instruction.
//
// See go.dev/issue/74375 for details.
throw("runtime: span inline mark bits nil?")
}
if needzero {
// Use memclrNoHeapPointers to avoid having the compiler make a worse
// decision. We know that imb is both aligned and a nice power-of-two
// size that works well for wider SIMD instructions. The compiler likely
// has no idea that imb is aligned to 128 bytes.
memclrNoHeapPointers(unsafe.Pointer(imb), unsafe.Sizeof(spanInlineMarkBits{}))
}
imb.class = class
}
// tryAcquire attempts to acquire the span for scanning. On success, the caller
// must queue the span for scanning or scan the span immediately.
func (imb *spanInlineMarkBits) tryAcquire() bool {
switch imb.owned.load() {
case spanScanUnowned:
// Try to mark the span as having only one object marked.
if imb.owned.or(spanScanOneMark) == spanScanUnowned {
return true
}
// If we didn't see an old value of spanScanUnowned, then we must
// have raced with someone else and seen spanScanOneMark or greater.
// Fall through and try to set spanScanManyMark.
fallthrough
case spanScanOneMark:
// We may be the first to set *any* bit on owned. In such a case,
// we still need to make sure the span is queued.
return imb.owned.or(spanScanManyMark) == spanScanUnowned
}
return false
}
// release releases the span for scanning, allowing another thread to queue the span.
//
// Returns an upper bound on the number of mark bits set since the span was queued. The
// upper bound is described as "one" (spanScanOneMark) or "many" (spanScanManyMark, with or
// without spanScanOneMark). If the return value indicates only one mark bit was set, the
// caller can be certain that it was the same mark bit that caused the span to get queued.
// Take note of the fact that this is *only* an upper-bound. In particular, it may still
// turn out that only one mark bit was set, even if the return value indicates "many".
func (imb *spanInlineMarkBits) release() spanScanOwnership {
return spanScanOwnership(atomic.Xchg8((*uint8)(unsafe.Pointer(&imb.owned)), uint8(spanScanUnowned)))
}
// spanInlineMarkBitsFromBase returns the spanInlineMarkBits for a span whose start address is base.
//
// The span must be gcUsesSpanInlineMarkBits(span.elemsize).
func spanInlineMarkBitsFromBase(base uintptr) *spanInlineMarkBits {
return (*spanInlineMarkBits)(unsafe.Pointer(base + gc.PageSize - unsafe.Sizeof(spanInlineMarkBits{})))
}
// initInlineMarkBits initializes the inlineMarkBits stored at the end of the span.
func (s *mspan) initInlineMarkBits() {
if doubleCheckGreenTea && !gcUsesSpanInlineMarkBits(s.elemsize) {
throw("expected span with inline mark bits")
}
// Zeroing is only necessary if this span wasn't just freshly allocated from the OS.
s.inlineMarkBits().init(s.spanclass, s.needzero != 0)
}
// moveInlineMarks merges the span's inline mark bits into dst and clears them.
//
// gcUsesSpanInlineMarkBits(s.elemsize) must be true.
func (s *mspan) moveInlineMarks(dst *gcBits) {
if doubleCheckGreenTea && !gcUsesSpanInlineMarkBits(s.elemsize) {
throw("expected span with inline mark bits")
}
bytes := divRoundUp(uintptr(s.nelems), 8)
imb := s.inlineMarkBits()
imbMarks := (*gc.ObjMask)(unsafe.Pointer(&imb.marks))
for i := uintptr(0); i < bytes; i += goarch.PtrSize {
marks := bswapIfBigEndian(imbMarks[i/goarch.PtrSize])
if i/goarch.PtrSize == uintptr(len(imb.marks)+1)/goarch.PtrSize-1 {
marks &^= 0xff << ((goarch.PtrSize - 1) * 8) // mask out class
}
*(*uintptr)(unsafe.Pointer(dst.bytep(i))) |= bswapIfBigEndian(marks)
}
if doubleCheckGreenTea && !s.spanclass.noscan() && imb.marks != imb.scans {
throw("marks don't match scans for span with pointer")
}
// Reset the inline mark bits.
imb.init(s.spanclass, true /* We know these bits are always dirty now. */)
}
// inlineMarkBits returns the inline mark bits for the span.
//
// gcUsesSpanInlineMarkBits(s.elemsize) must be true.
func (s *mspan) inlineMarkBits() *spanInlineMarkBits {
if doubleCheckGreenTea && !gcUsesSpanInlineMarkBits(s.elemsize) {
throw("expected span with inline mark bits")
}
return spanInlineMarkBitsFromBase(s.base())
}
func (s *mspan) markBitsForIndex(objIndex uintptr) (bits markBits) {
if gcUsesSpanInlineMarkBits(s.elemsize) {
bits.bytep = &s.inlineMarkBits().marks[objIndex/8]
} else {
bits.bytep = s.gcmarkBits.bytep(objIndex / 8)
}
bits.mask = uint8(1) << (objIndex % 8)
bits.index = objIndex
return
}
func (s *mspan) markBitsForBase() markBits {
if gcUsesSpanInlineMarkBits(s.elemsize) {
return markBits{&s.inlineMarkBits().marks[0], uint8(1), 0}
}
return markBits{&s.gcmarkBits.x, uint8(1), 0}
}
// scannedBitsForIndex returns a markBits representing the scanned bit
// for objIndex in the inline mark bits.
func (s *mspan) scannedBitsForIndex(objIndex uintptr) markBits {
return markBits{&s.inlineMarkBits().scans[objIndex/8], uint8(1) << (objIndex % 8), objIndex}
}
// gcUsesSpanInlineMarkBits returns true if a span holding objects of a certain size
// has inline mark bits. size must be the span's elemsize.
//
// nosplit because this is called from gcmarknewobject, which is nosplit.
//
//go:nosplit
func gcUsesSpanInlineMarkBits(size uintptr) bool {
return heapBitsInSpan(size) && size >= 16
}
// tryDeferToSpanScan tries to queue p on the span it points to, if it
// points to a small object span (gcUsesSpanQueue size).
func tryDeferToSpanScan(p uintptr, gcw *gcWork) bool {
if useCheckmark {
return false
}
// Quickly to see if this is a span that has inline mark bits.
ha := heapArenaOf(p)
if ha == nil {
return false
}
pageIdx := ((p / pageSize) / 8) % uintptr(len(ha.pageInUse))
pageMask := byte(1 << ((p / pageSize) % 8))
if ha.pageUseSpanInlineMarkBits[pageIdx]&pageMask == 0 {
return false
}
// Find the object's index from the span class info stored in the inline mark bits.
base := alignDown(p, gc.PageSize)
q := spanInlineMarkBitsFromBase(base)
objIndex := uint16((uint64(p-base) * uint64(gc.SizeClassToDivMagic[q.class.sizeclass()])) >> 32)
// Set mark bit.
idx, mask := objIndex/8, uint8(1)<<(objIndex%8)
if atomic.Load8(&q.marks[idx])&mask != 0 {
return true
}
atomic.Or8(&q.marks[idx], mask)
// Fast-track noscan objects.
if q.class.noscan() {
gcw.bytesMarked += uint64(gc.SizeClassToSize[q.class.sizeclass()])
return true
}
// Queue up the pointer (as a representative for its span).
if q.tryAcquire() {
if gcw.spanq.put(makeObjPtr(base, objIndex)) {
if gcphase == _GCmark {
// This is intentionally racy; the bit set here might get
// stomped on by a stealing P. See the comment in tryStealSpan
// for an explanation as to why this is OK.
if !work.spanqMask.read(uint32(gcw.id)) {
work.spanqMask.set(gcw.id)
}
gcw.mayNeedWorker = true
}
gcw.flushedWork = true
}
}
return true
}
// tryGetSpanFast attempts to get an entire span to scan.
func (w *gcWork) tryGetSpanFast() objptr {
return w.spanq.tryGetFast()
}
// tryGetSpan attempts to get an entire span to scan.
func (w *gcWork) tryGetSpan() objptr {
if s := w.spanq.tryGetFast(); s != 0 {
return s
}
// "Steal" from ourselves.
if s := w.spanq.steal(&w.spanq); s != 0 {
return s
}
// We failed to get any local work, so we're fresh out.
// Nobody else is going to add work for us. Clear our bit.
if work.spanqMask.read(uint32(w.id)) {
work.spanqMask.clear(w.id)
}
return 0
}
// spanQueue is a P-local stealable span queue.
type spanQueue struct {
// head, tail, and ring represent a local non-thread-safe ring buffer.
head, tail uint32
ring [256]objptr
// putsSinceDrain counts the number of put calls since the last drain.
putsSinceDrain int
// chain contains state visible to other Ps.
//
// In particular, that means a linked chain of single-producer multi-consumer
// ring buffers where the single producer is this P only.
//
// This linked chain structure is based off the sync.Pool dequeue.
chain struct {
// head is the spanSPMC to put to. This is only accessed
// by the producer, so doesn't need to be synchronized.
head *spanSPMC
// tail is the spanSPMC to steal from. This is accessed
// by consumers, so reads and writes must be atomic.
tail atomic.UnsafePointer // *spanSPMC
}
}
// putFast tries to put s onto the queue, but may fail if it's full.
func (q *spanQueue) putFast(s objptr) (ok bool) {
if q.tail-q.head == uint32(len(q.ring)) {
return false
}
q.ring[q.tail%uint32(len(q.ring))] = s
q.tail++
return true
}
// put puts s onto the queue.
//
// Returns whether the caller should spin up a new worker.
func (q *spanQueue) put(s objptr) bool {
// The constants below define the period of and volume of
// spans we spill to the spmc chain when the local queue is
// not full.
//
// spillPeriod must be > spillMax, otherwise that sets the
// effective maximum size of our local span queue. Even if
// we have a span ring of size N, but we flush K spans every
// K puts, then K becomes our effective maximum length. When
// spillPeriod > spillMax, then we're always spilling spans
// at a slower rate than we're accumulating them.
const (
// spillPeriod defines how often to check if we should
// spill some spans, counted in the number of calls to put.
spillPeriod = 64
// spillMax defines, at most, how many spans to drain with
// each spill.
spillMax = 16
)
if q.putFast(s) {
// Occasionally try to spill some work to generate parallelism.
q.putsSinceDrain++
if q.putsSinceDrain >= spillPeriod {
// Reset even if we don't drain, so we don't check every time.
q.putsSinceDrain = 0
// Try to drain some spans. Don't bother if there's very
// few of them or there's already spans in the spmc chain.
n := min((q.tail-q.head)/2, spillMax)
if n > 4 && q.chainEmpty() {
q.drain(n)
return true
}
}
return false
}
// We're out of space. Drain out our local spans.
q.drain(uint32(len(q.ring)) / 2)
if !q.putFast(s) {
throw("failed putFast after drain")
}
return true
}
// flush publishes all spans in the local queue to the spmc chain.
func (q *spanQueue) flush() {
n := q.tail - q.head
if n == 0 {
return
}
q.drain(n)
}
// empty returns true if there's no more work on the queue.
//
// Not thread-safe. Must only be called by the owner of q.
func (q *spanQueue) empty() bool {
// Check the local queue for work.
if q.tail-q.head > 0 {
return false
}
return q.chainEmpty()
}
// chainEmpty returns true if the spmc chain is empty.
//
// Thread-safe.
func (q *spanQueue) chainEmpty() bool {
// Check the rest of the rings for work.
r := (*spanSPMC)(q.chain.tail.Load())
for r != nil {
if !r.empty() {
return false
}
r = (*spanSPMC)(r.prev.Load())
}
return true
}
// drain publishes n spans from the local queue to the spmc chain.
func (q *spanQueue) drain(n uint32) {
q.putsSinceDrain = 0
if q.chain.head == nil {
// N.B. We target 1024, but this may be bigger if the physical
// page size is bigger, or if we can fit more uintptrs into a
// physical page. See newSpanSPMC docs.
r := newSpanSPMC(1024)
q.chain.head = r
q.chain.tail.StoreNoWB(unsafe.Pointer(r))
}
// Try to drain some of the queue to the head spmc.
if q.tryDrain(q.chain.head, n) {
return
}
// No space. Create a bigger spmc and add it to the chain.
// Double the size of the next one, up to a maximum.
//
// We double each time so we can avoid taking this slow path
// in the future, which involves a global lock. Ideally we want
// to hit a steady-state where the deepest any queue goes during
// a mark phase can fit in the ring.
//
// However, we still set a maximum on this. We set the maximum
// to something large to amortize the cost of lock acquisition, but
// still at a reasonable size for big heaps and/or a lot of Ps (which
// tend to be correlated).
//
// It's not too bad to burn relatively large-but-fixed amounts of per-P
// memory if we need to deal with really, really deep queues, since the
// constants of proportionality are small. Simultaneously, we want to
// avoid a situation where a single worker ends up queuing O(heap)
// work and then forever retains a queue of that size.
const maxCap = 1 << 20 / goarch.PtrSize
newCap := q.chain.head.cap * 2
if newCap > maxCap {
newCap = maxCap
}
newHead := newSpanSPMC(newCap)
if !q.tryDrain(newHead, n) {
throw("failed to put span on newly-allocated spanSPMC")
}
q.chain.head.prev.StoreNoWB(unsafe.Pointer(newHead))
q.chain.head = newHead
}
// tryDrain attempts to drain n spans from q's local queue to the chain.
//
// Returns whether it succeeded.
func (q *spanQueue) tryDrain(r *spanSPMC, n uint32) bool {
if q.head+n > q.tail {
throw("attempt to drain too many elements")
}
h := r.head.Load() // synchronize with consumers
t := r.tail.Load()
rn := t - h
if rn+n <= r.cap {
for i := uint32(0); i < n; i++ {
*r.slot(t + i) = q.ring[(q.head+i)%uint32(len(q.ring))]
}
r.tail.Store(t + n) // Makes the items avail for consumption.
q.head += n
return true
}
return false
}
// tryGetFast attempts to get a span from the local queue, but may fail if it's empty,
// returning false.
func (q *spanQueue) tryGetFast() objptr {
if q.tail-q.head == 0 {
return 0
}
s := q.ring[q.head%uint32(len(q.ring))]
q.head++
return s
}
// steal takes some spans from the ring chain of another span queue.
//
// q == q2 is OK.
func (q *spanQueue) steal(q2 *spanQueue) objptr {
r := (*spanSPMC)(q2.chain.tail.Load())
if r == nil {
return 0
}
for {
// It's important that we load the next pointer
// *before* popping the tail. In general, r may be
// transiently empty, but if next is non-nil before
// the pop and the pop fails, then r is permanently
// empty, which is the only condition under which it's
// safe to drop r from the chain.
r2 := (*spanSPMC)(r.prev.Load())
// Try to refill from one of the rings
if s := q.refill(r); s != 0 {
return s
}
if r2 == nil {
// This is the only ring. It's empty right
// now, but could be pushed to in the future.
return 0
}
// The tail of the chain has been drained, so move on
// to the next ring. Try to drop it from the chain
// so the next consumer doesn't have to look at the empty
// ring again.
if q2.chain.tail.CompareAndSwapNoWB(unsafe.Pointer(r), unsafe.Pointer(r2)) {
r.dead.Store(true)
}
r = r2
}
}
// refill takes some spans from r and puts them into q's local queue.
//
// One span is removed from the stolen spans and returned on success.
// Failure to steal returns a zero objptr.
//
// steal is thread-safe with respect to r.
func (q *spanQueue) refill(r *spanSPMC) objptr {
if q.tail-q.head != 0 {
throw("steal with local work available")
}
// Steal some spans.
var n uint32
for {
h := r.head.Load() // load-acquire, synchronize with other consumers
t := r.tail.Load() // load-acquire, synchronize with the producer
n = t - h
n = n - n/2
if n == 0 {
return 0
}
if n > r.cap { // read inconsistent h and t
continue
}
n = min(n, uint32(len(q.ring)/2))
for i := uint32(0); i < n; i++ {
q.ring[i] = *r.slot(h + i)
}
if r.head.CompareAndSwap(h, h+n) {
break
}
}
// Update local queue head and tail to reflect new buffered values.
q.head = 0
q.tail = n
// Pop off the head of the queue and return it.
return q.tryGetFast()
}
// destroy frees all chains in an empty spanQueue.
//
// Preconditions:
// - World is stopped.
// - GC is outside of the mark phase.
// - (Therefore) the queue is empty.
func (q *spanQueue) destroy() {
assertWorldStopped()
if gcphase != _GCoff {
throw("spanQueue.destroy during the mark phase")
}
if !q.empty() {
throw("spanQueue.destroy on non-empty queue")
}
lock(&work.spanSPMCs.lock)
// Remove, deinitialize, and free each ring.
for r := (*spanSPMC)(q.chain.tail.Load()); r != nil; r = (*spanSPMC)(r.prev.Load()) {
work.spanSPMCs.list.remove(unsafe.Pointer(r))
r.deinit()
mheap_.spanSPMCAlloc.free(unsafe.Pointer(r))
}
q.chain.head = nil
q.chain.tail.Store(nil)
q.putsSinceDrain = 0
unlock(&work.spanSPMCs.lock)
}
// spanSPMC is a ring buffer of objptrs that represent spans.
// Accessed without a lock.
//
// Single-producer, multi-consumer. The only producer is the P that owns this
// queue, but any other P may consume from it.
//
// ## Invariants for memory management
//
// 1. All spanSPMCs are allocated from mheap_.spanSPMCAlloc.
// 2. All allocated spanSPMCs must be on the work.spanSPMCs list.
// 3. spanSPMCs may only be allocated if gcphase != _GCoff.
// 4. spanSPMCs may only be deallocated if gcphase == _GCoff.
//
// Invariants (3) and (4) ensure that we do not need to concern ourselves with
// tricky reuse issues that stem from not knowing when a thread is truly done
// with a spanSPMC. For example, two threads could load the same spanSPMC from
// the tail of the chain. One thread is then paused while the other steals the
// last few elements off of it. It's not safe to free at that point since the
// other thread will still inspect that spanSPMC, and we have no way of knowing
// without more complex and/or heavyweight synchronization.
//
// Instead, we rely on the global synchronization inherent to GC phases, and
// the fact that spanSPMCs are only ever used during the mark phase, to ensure
// memory safety. This means we temporarily waste some memory, but it's only
// until the end of the mark phase.
type spanSPMC struct {
_ sys.NotInHeap
// allnode is the linked list node for work.spanSPMCs list. This is
// used to find and free dead spanSPMCs. Protected by
// work.spanSPMCs.lock.
allnode listNodeManual
// dead indicates whether the spanSPMC is no longer in use.
// Protected by the CAS to the prev field of the spanSPMC pointing
// to this spanSPMC. That is, whoever wins that CAS takes ownership
// of marking this spanSPMC as dead. See spanQueue.steal for details.
dead atomic.Bool
// prev is the next link up a spanQueue's SPMC chain, from tail to head,
// hence the name "prev." Set by a spanQueue's producer, cleared by a
// CAS in spanQueue.steal.
prev atomic.UnsafePointer // *spanSPMC
// head, tail, cap, and ring together represent a fixed-size SPMC lock-free
// ring buffer of size cap. The ring buffer contains objptr values.
head atomic.Uint32
tail atomic.Uint32
cap uint32 // cap(ring))
ring *objptr
}
// newSpanSPMC allocates and initializes a new spmc with the provided capacity.
//
// newSpanSPMC may override the capacity with a larger one if the provided one would
// waste memory.
func newSpanSPMC(cap uint32) *spanSPMC {
lock(&work.spanSPMCs.lock)
r := (*spanSPMC)(mheap_.spanSPMCAlloc.alloc())
work.spanSPMCs.list.push(unsafe.Pointer(r))
unlock(&work.spanSPMCs.lock)
// If cap < the capacity of a single physical page, round up.
pageCap := uint32(physPageSize / goarch.PtrSize) // capacity of a single page
if cap < pageCap {
cap = pageCap
}
if cap&(cap-1) != 0 {
throw("spmc capacity must be a power of 2")
}
r.cap = cap
ring := sysAlloc(uintptr(cap)*unsafe.Sizeof(objptr(0)), &memstats.gcMiscSys, "GC span queue")
atomic.StorepNoWB(unsafe.Pointer(&r.ring), ring)
return r
}
// empty returns true if the spmc is empty.
//
// empty is thread-safe.
func (r *spanSPMC) empty() bool {
h := r.head.Load()
t := r.tail.Load()
return t == h
}
// deinit frees any resources the spanSPMC is holding onto and zeroes it.
func (r *spanSPMC) deinit() {
sysFree(unsafe.Pointer(r.ring), uintptr(r.cap)*unsafe.Sizeof(objptr(0)), &memstats.gcMiscSys)
r.ring = nil
r.dead.Store(false)
r.prev.StoreNoWB(nil)
r.head.Store(0)
r.tail.Store(0)
r.cap = 0
r.allnode = listNodeManual{}
}
// slot returns a pointer to slot i%r.cap.
func (r *spanSPMC) slot(i uint32) *objptr {
idx := uintptr(i & (r.cap - 1))
return (*objptr)(unsafe.Add(unsafe.Pointer(r.ring), idx*unsafe.Sizeof(objptr(0))))
}
// freeDeadSpanSPMCs frees dead spanSPMCs back to the OS.
func freeDeadSpanSPMCs() {
// According to the SPMC memory management invariants, we can only free
// spanSPMCs outside of the mark phase. We ensure we do this in two ways.
//
// 1. We take the work.spanSPMCs lock, which we need anyway. This ensures
// that we are non-preemptible. If this path becomes lock-free, we will
// need to become non-preemptible in some other way.
// 2. Once we are non-preemptible, we check the gcphase, and back out if
// it's not safe.
//
// This way, we ensure that we don't start freeing if we're in the wrong
// phase, and the phase can't change on us while we're freeing.
//
// TODO(go.dev/issue/75771): Due to the grow semantics in
// spanQueue.drain, we expect a steady-state of around one spanSPMC per
// P, with some spikes higher when Ps have more than one. For high
// GOMAXPROCS, or if this list otherwise gets long, it would be nice to
// have a way to batch work that allows preemption during processing.
lock(&work.spanSPMCs.lock)
if gcphase != _GCoff || work.spanSPMCs.list.empty() {
unlock(&work.spanSPMCs.lock)
return
}
r := (*spanSPMC)(work.spanSPMCs.list.head())
for r != nil {
next := (*spanSPMC)(unsafe.Pointer(r.allnode.next))
if r.dead.Load() {
// It's dead. Remove, deinitialize and free it.
work.spanSPMCs.list.remove(unsafe.Pointer(r))
r.deinit()
mheap_.spanSPMCAlloc.free(unsafe.Pointer(r))
}
r = next
}
unlock(&work.spanSPMCs.lock)
}
// tryStealSpan attempts to steal a span from another P's local queue.
//
// Returns a non-zero objptr on success.
func (w *gcWork) tryStealSpan() objptr {
pp := getg().m.p.ptr()
for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
if !work.spanqMask.read(enum.position()) {
continue
}
p2 := allp[enum.position()]
if pp == p2 {
continue
}
if s := w.spanq.steal(&p2.gcw.spanq); s != 0 {
return s
}
// N.B. This is intentionally racy. We may stomp on a mask set by
// a P that just put a bunch of work into its local queue.
//
// This is OK because the ragged barrier in gcMarkDone will set
// the bit on each P if there's local work we missed. This race
// should generally be rare, since the window between noticing
// an empty local queue and this bit being set is quite small.
work.spanqMask.clear(int32(enum.position()))
}
return 0
}
// objptr consists of a span base and the index of the object in the span.
type objptr uintptr
// makeObjPtr creates an objptr from a span base address and an object index.
func makeObjPtr(spanBase uintptr, objIndex uint16) objptr {
if doubleCheckGreenTea && spanBase&((1<<gc.PageShift)-1) != 0 {
throw("created objptr with address that is incorrectly aligned")
}
return objptr(spanBase | uintptr(objIndex))
}
func (p objptr) spanBase() uintptr {
return uintptr(p) &^ ((1 << gc.PageShift) - 1)
}
func (p objptr) objIndex() uint16 {
return uint16(p) & ((1 << gc.PageShift) - 1)
}
// scanSpan scans objects indicated marks&^scans and then scans those objects,
// queuing the resulting pointers into gcw.
func scanSpan(p objptr, gcw *gcWork) {
spanBase := p.spanBase()
imb := spanInlineMarkBitsFromBase(spanBase)
spanclass := imb.class
if spanclass.noscan() {
throw("noscan object in scanSpan")
}
elemsize := uintptr(gc.SizeClassToSize[spanclass.sizeclass()])
// Release span.
if imb.release() == spanScanOneMark {
// Nobody else set any mark bits on this span while it was acquired.
// That means p is the sole object we need to handle. Fast-track it.
objIndex := p.objIndex()
bytep := &imb.scans[objIndex/8]
mask := uint8(1) << (objIndex % 8)
if atomic.Load8(bytep)&mask != 0 {
return
}
atomic.Or8(bytep, mask)
gcw.bytesMarked += uint64(elemsize)
if debug.gctrace > 1 {
gcw.stats[spanclass.sizeclass()].sparseObjsScanned++
}
b := spanBase + uintptr(objIndex)*elemsize
scanObjectSmall(spanBase, b, elemsize, gcw)
return
}
// Compute nelems.
divMagic := uint64(gc.SizeClassToDivMagic[spanclass.sizeclass()])
usableSpanSize := uint64(gc.PageSize - unsafe.Sizeof(spanInlineMarkBits{}))
if !spanclass.noscan() {
usableSpanSize -= gc.PageSize / goarch.PtrSize / 8
}
nelems := uint16((usableSpanSize * divMagic) >> 32)
// Grey objects and return if there's nothing else to do.
var toScan gc.ObjMask
objsMarked := spanSetScans(spanBase, nelems, imb, &toScan)
if objsMarked == 0 {
return
}
gcw.bytesMarked += uint64(objsMarked) * uint64(elemsize)
// Check if we have enough density to make a dartboard scan
// worthwhile. If not, just do what scanobject does, but
// localized to the span, using the dartboard.
if !scan.HasFastScanSpanPacked() || objsMarked < int(nelems/8) {
if debug.gctrace > 1 {
gcw.stats[spanclass.sizeclass()].spansSparseScanned++
gcw.stats[spanclass.sizeclass()].spanObjsSparseScanned += uint64(objsMarked)
}
scanObjectsSmall(spanBase, elemsize, nelems, gcw, &toScan)
return
}
// Scan the span.
//
// N.B. Use gcw.ptrBuf as the output buffer. This is a bit different
// from scanObjectsSmall, which puts addresses to dereference. ScanSpanPacked
// on the other hand, fills gcw.ptrBuf with already dereferenced pointers.
nptrs := scan.ScanSpanPacked(
unsafe.Pointer(spanBase),
&gcw.ptrBuf[0],
&toScan,
uintptr(spanclass.sizeclass()),
spanPtrMaskUnsafe(spanBase),
)
gcw.heapScanWork += int64(objsMarked) * int64(elemsize)
if debug.gctrace > 1 {
// Write down some statistics.
gcw.stats[spanclass.sizeclass()].spansDenseScanned++
gcw.stats[spanclass.sizeclass()].spanObjsDenseScanned += uint64(objsMarked)
}
// Process all the pointers we just got.
for _, p := range gcw.ptrBuf[:nptrs] {
if !tryDeferToSpanScan(p, gcw) {
if obj, span, objIndex := findObject(p, 0, 0); obj != 0 {
greyobject(obj, 0, 0, span, gcw, objIndex)
}
}
}
}
// spanSetScans sets any unset mark bits that have their mark bits set in the inline mark bits.
//
// toScan is populated with bits indicating whether a particular mark bit was set.
//
// Returns the number of objects marked, which could be zero.
func spanSetScans(spanBase uintptr, nelems uint16, imb *spanInlineMarkBits, toScan *gc.ObjMask) int {
arena, pageIdx, pageMask := pageIndexOf(spanBase)
if arena.pageMarks[pageIdx]&pageMask == 0 {
atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
}
bytes := divRoundUp(uintptr(nelems), 8)
objsMarked := 0
// Careful: these two structures alias since ObjMask is much bigger
// than marks or scans. We do these unsafe shenanigans so that we can
// access the marks and scans by uintptrs rather than by byte.
imbMarks := (*gc.ObjMask)(unsafe.Pointer(&imb.marks))
imbScans := (*gc.ObjMask)(unsafe.Pointer(&imb.scans))
// Iterate over one uintptr-sized chunks at a time, computing both
// the union and intersection of marks and scans. Store the union
// into scans, and the intersection into toScan.
for i := uintptr(0); i < bytes; i += goarch.PtrSize {
scans := atomic.Loaduintptr(&imbScans[i/goarch.PtrSize])
marks := imbMarks[i/goarch.PtrSize]
scans = bswapIfBigEndian(scans)
marks = bswapIfBigEndian(marks)
if i/goarch.PtrSize == uintptr(len(imb.marks)+1)/goarch.PtrSize-1 {
scans &^= 0xff << ((goarch.PtrSize - 1) * 8) // mask out owned
marks &^= 0xff << ((goarch.PtrSize - 1) * 8) // mask out class
}
toGrey := marks &^ scans
toScan[i/goarch.PtrSize] = toGrey
// If there's anything left to grey, do it.
if toGrey != 0 {
toGrey = bswapIfBigEndian(toGrey)
if goarch.PtrSize == 4 {
atomic.Or32((*uint32)(unsafe.Pointer(&imbScans[i/goarch.PtrSize])), uint32(toGrey))
} else {
atomic.Or64((*uint64)(unsafe.Pointer(&imbScans[i/goarch.PtrSize])), uint64(toGrey))
}
}
objsMarked += sys.OnesCount64(uint64(toGrey))
}
return objsMarked
}
func scanObjectSmall(spanBase, b, objSize uintptr, gcw *gcWork) {
hbitsBase, _ := spanHeapBitsRange(spanBase, gc.PageSize, objSize)
hbits := (*byte)(unsafe.Pointer(hbitsBase))
ptrBits := extractHeapBitsSmall(hbits, spanBase, b, objSize)
gcw.heapScanWork += int64(sys.Len64(uint64(ptrBits)) * goarch.PtrSize)
nptrs := 0
n := sys.OnesCount64(uint64(ptrBits))
for range n {
k := sys.TrailingZeros64(uint64(ptrBits))
ptrBits &^= 1 << k
addr := b + uintptr(k)*goarch.PtrSize
// Prefetch addr since we're about to use it. This point for prefetching
// was chosen empirically.
sys.Prefetch(addr)
// N.B. ptrBuf is always large enough to hold pointers for an entire 1-page span.
gcw.ptrBuf[nptrs] = addr
nptrs++
}
// Process all the pointers we just got.
for _, p := range gcw.ptrBuf[:nptrs] {
p = *(*uintptr)(unsafe.Pointer(p))
if p == 0 {
continue
}
if !tryDeferToSpanScan(p, gcw) {
if obj, span, objIndex := findObject(p, 0, 0); obj != 0 {
greyobject(obj, 0, 0, span, gcw, objIndex)
}
}
}
}
func scanObjectsSmall(base, objSize uintptr, elems uint16, gcw *gcWork, scans *gc.ObjMask) {
nptrs := 0
for i, bits := range scans {
if i*(goarch.PtrSize*8) > int(elems) {
break
}
n := sys.OnesCount64(uint64(bits))
hbitsBase, _ := spanHeapBitsRange(base, gc.PageSize, objSize)
hbits := (*byte)(unsafe.Pointer(hbitsBase))
for range n {
j := sys.TrailingZeros64(uint64(bits))
bits &^= 1 << j
b := base + uintptr(i*(goarch.PtrSize*8)+j)*objSize
ptrBits := extractHeapBitsSmall(hbits, base, b, objSize)
gcw.heapScanWork += int64(sys.Len64(uint64(ptrBits)) * goarch.PtrSize)
n := sys.OnesCount64(uint64(ptrBits))
for range n {
k := sys.TrailingZeros64(uint64(ptrBits))
ptrBits &^= 1 << k
addr := b + uintptr(k)*goarch.PtrSize
// Prefetch addr since we're about to use it. This point for prefetching
// was chosen empirically.
sys.Prefetch(addr)
// N.B. ptrBuf is always large enough to hold pointers for an entire 1-page span.
gcw.ptrBuf[nptrs] = addr
nptrs++
}
}
}
// Process all the pointers we just got.
for _, p := range gcw.ptrBuf[:nptrs] {
p = *(*uintptr)(unsafe.Pointer(p))
if p == 0 {
continue
}
if !tryDeferToSpanScan(p, gcw) {
if obj, span, objIndex := findObject(p, 0, 0); obj != 0 {
greyobject(obj, 0, 0, span, gcw, objIndex)
}
}
}
}
func extractHeapBitsSmall(hbits *byte, spanBase, addr, elemsize uintptr) uintptr {
// These objects are always small enough that their bitmaps
// fit in a single word, so just load the word or two we need.
//
// Mirrors mspan.writeHeapBitsSmall.
//
// We should be using heapBits(), but unfortunately it introduces
// both bounds checks panics and throw which causes us to exceed
// the nosplit limit in quite a few cases.
i := (addr - spanBase) / goarch.PtrSize / ptrBits
j := (addr - spanBase) / goarch.PtrSize % ptrBits
bits := elemsize / goarch.PtrSize
word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
var read uintptr
if j+bits > ptrBits {
// Two reads.
bits0 := ptrBits - j
bits1 := bits - bits0
read = *word0 >> j
read |= (*word1 & ((1 << bits1) - 1)) << bits0
} else {
// One read.
read = (*word0 >> j) & ((1 << bits) - 1)
}
return read
}
// spanPtrMaskUnsafe returns the pointer mask for a span with inline mark bits.
//
// The caller must ensure spanBase is the base of a span that:
// - 1 page in size,
// - Uses inline mark bits,
// - Contains pointers.
func spanPtrMaskUnsafe(spanBase uintptr) *gc.PtrMask {
base := spanBase + gc.PageSize - unsafe.Sizeof(gc.PtrMask{}) - unsafe.Sizeof(spanInlineMarkBits{})
return (*gc.PtrMask)(unsafe.Pointer(base))
}
type sizeClassScanStats struct {
spansDenseScanned uint64 // Spans scanned with ScanSpanPacked.
spanObjsDenseScanned uint64 // Objects scanned with ScanSpanPacked.
spansSparseScanned uint64 // Spans scanned with scanObjectsSmall.
spanObjsSparseScanned uint64 // Objects scanned with scanObjectsSmall.
sparseObjsScanned uint64 // Objects scanned with scanobject or scanObjectSmall.
// Note: sparseObjsScanned is sufficient for both cases because
// a particular size class either uses scanobject or scanObjectSmall,
// not both. In the latter case, we also know that there was one
// object scanned per span, so no need for a span counter.
}
func dumpScanStats() {
var (
spansDenseScanned uint64
spanObjsDenseScanned uint64
spansSparseScanned uint64
spanObjsSparseScanned uint64
sparseObjsScanned uint64
)
for _, stats := range memstats.lastScanStats {
spansDenseScanned += stats.spansDenseScanned
spanObjsDenseScanned += stats.spanObjsDenseScanned
spansSparseScanned += stats.spansSparseScanned
spanObjsSparseScanned += stats.spanObjsSparseScanned
sparseObjsScanned += stats.sparseObjsScanned
}
totalObjs := sparseObjsScanned + spanObjsSparseScanned + spanObjsDenseScanned
totalSpans := spansSparseScanned + spansDenseScanned
print("scan: total ", sparseObjsScanned, "+", spanObjsSparseScanned, "+", spanObjsDenseScanned, "=", totalObjs, " objs")
print(", ", spansSparseScanned, "+", spansDenseScanned, "=", totalSpans, " spans\n")
for i, stats := range memstats.lastScanStats {
if stats == (sizeClassScanStats{}) {
continue
}
totalObjs := stats.sparseObjsScanned + stats.spanObjsSparseScanned + stats.spanObjsDenseScanned
totalSpans := stats.spansSparseScanned + stats.spansDenseScanned
if i == 0 {
print("scan: class L ")
} else {
print("scan: class ", gc.SizeClassToSize[i], "B ")
}
print(stats.sparseObjsScanned, "+", stats.spanObjsSparseScanned, "+", stats.spanObjsDenseScanned, "=", totalObjs, " objs")
print(", ", stats.spansSparseScanned, "+", stats.spansDenseScanned, "=", totalSpans, " spans\n")
}
}
func (w *gcWork) flushScanStats(dst *[gc.NumSizeClasses]sizeClassScanStats) {
for i := range w.stats {
dst[i].spansDenseScanned += w.stats[i].spansDenseScanned
dst[i].spanObjsDenseScanned += w.stats[i].spanObjsDenseScanned
dst[i].spansSparseScanned += w.stats[i].spansSparseScanned
dst[i].spanObjsSparseScanned += w.stats[i].spanObjsSparseScanned
dst[i].sparseObjsScanned += w.stats[i].sparseObjsScanned
}
clear(w.stats[:])
}
// gcMarkWorkAvailable reports whether there's any non-local work available to do.
//
// This is a heavyweight check and must only be used for correctness, not
// as a hint.
func gcMarkWorkAvailable() bool {
if !work.full.empty() {
return true // global work available
}
if work.markrootNext.Load() < work.markrootJobs.Load() {
return true // root scan work available
}
if work.spanqMask.any() {
return true // stealable local work available
}
return false
}
// scanObject scans the object starting at b, adding pointers to gcw.
// b must point to the beginning of a heap object or an oblet.
// scanObject consults the GC bitmap for the pointer mask and the
// spans for the size of the object.
//
// Used only for !gcUsesSpanInlineMarkBits spans, but supports all
// object sizes and is safe to be called on all heap objects.
//
//go:nowritebarrier
func scanObject(b uintptr, gcw *gcWork) {
// Prefetch object before we scan it.
//
// This will overlap fetching the beginning of the object with initial
// setup before we start scanning the object.
sys.Prefetch(b)
// Find the bits for b and the size of the object at b.
//
// b is either the beginning of an object, in which case this
// is the size of the object to scan, or it points to an
// oblet, in which case we compute the size to scan below.
s := spanOfUnchecked(b)
n := s.elemsize
if n == 0 {
throw("scanObject n == 0")
}
if s.spanclass.noscan() {
// Correctness-wise this is ok, but it's inefficient
// if noscan objects reach here.
throw("scanObject of a noscan object")
}
var tp typePointers
if n > maxObletBytes {
// Large object. Break into oblets for better
// parallelism and lower latency.
if b == s.base() {
// Enqueue the other oblets to scan later.
// Some oblets may be in b's scalar tail, but
// these will be marked as "no more pointers",
// so we'll drop out immediately when we go to
// scan those.
for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
if !gcw.putObjFast(oblet) {
gcw.putObj(oblet)
}
}
}
// Compute the size of the oblet. Since this object
// must be a large object, s.base() is the beginning
// of the object.
n = s.base() + s.elemsize - b
n = min(n, maxObletBytes)
tp = s.typePointersOfUnchecked(s.base())
tp = tp.fastForward(b-tp.addr, b+n)
} else {
tp = s.typePointersOfUnchecked(b)
}
var scanSize uintptr
for {
var addr uintptr
if tp, addr = tp.nextFast(); addr == 0 {
if tp, addr = tp.next(b + n); addr == 0 {
break
}
}
// Keep track of farthest pointer we found, so we can
// update heapScanWork. TODO: is there a better metric,
// now that we can skip scalar portions pretty efficiently?
scanSize = addr - b + goarch.PtrSize
// Work here is duplicated in scanblock and above.
// If you make changes here, make changes there too.
obj := *(*uintptr)(unsafe.Pointer(addr))
// At this point we have extracted the next potential pointer.
// Quickly filter out nil and pointers back to the current object.
if obj != 0 && obj-b >= n {
// Test if obj points into the Go heap and, if so,
// mark the object.
//
// Note that it's possible for findObject to
// fail if obj points to a just-allocated heap
// object because of a race with growing the
// heap. In this case, we know the object was
// just allocated and hence will be marked by
// allocation itself.
if !tryDeferToSpanScan(obj, gcw) {
if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
greyobject(obj, b, addr-b, span, gcw, objIndex)
}
}
}
}
gcw.bytesMarked += uint64(n)
gcw.heapScanWork += int64(scanSize)
if debug.gctrace > 1 {
gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++
}
}
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/cpu"
"internal/goexperiment"
"internal/runtime/atomic"
"internal/runtime/math"
"internal/strconv"
_ "unsafe"
)
const (
// gcGoalUtilization is the goal CPU utilization for
// marking as a fraction of GOMAXPROCS.
//
// Increasing the goal utilization will shorten GC cycles as the GC
// has more resources behind it, lessening costs from the write barrier,
// but comes at the cost of increasing mutator latency.
gcGoalUtilization = gcBackgroundUtilization
// gcBackgroundUtilization is the fixed CPU utilization for background
// marking. It must be <= gcGoalUtilization. The difference between
// gcGoalUtilization and gcBackgroundUtilization will be made up by
// mark assists. The scheduler will aim to use within 50% of this
// goal.
//
// As a general rule, there's little reason to set gcBackgroundUtilization
// < gcGoalUtilization. One reason might be in mostly idle applications,
// where goroutines are unlikely to assist at all, so the actual
// utilization will be lower than the goal. But this is moot point
// because the idle mark workers already soak up idle CPU resources.
// These two values are still kept separate however because they are
// distinct conceptually, and in previous iterations of the pacer the
// distinction was more important.
gcBackgroundUtilization = 0.25
// gcCreditSlack is the amount of scan work credit that can
// accumulate locally before updating gcController.heapScanWork and,
// optionally, gcController.bgScanCredit. Lower values give a more
// accurate assist ratio and make it more likely that assists will
// successfully steal background credit. Higher values reduce memory
// contention.
gcCreditSlack = 2000
// gcAssistTimeSlack is the nanoseconds of mutator assist time that
// can accumulate on a P before updating gcController.assistTime.
gcAssistTimeSlack = 5000
// gcOverAssistWork determines how many extra units of scan work a GC
// assist does when an assist happens. This amortizes the cost of an
// assist by pre-paying for this many bytes of future allocations.
gcOverAssistWork = 64 << 10
// defaultHeapMinimum is the value of heapMinimum for GOGC==100.
defaultHeapMinimum = (goexperiment.HeapMinimum512KiBInt)*(512<<10) +
(1-goexperiment.HeapMinimum512KiBInt)*(4<<20)
// maxStackScanSlack is the bytes of stack space allocated or freed
// that can accumulate on a P before updating gcController.stackSize.
maxStackScanSlack = 8 << 10
// memoryLimitMinHeapGoalHeadroom is the minimum amount of headroom the
// pacer gives to the heap goal when operating in the memory-limited regime.
// That is, it'll reduce the heap goal by this many extra bytes off of the
// base calculation, at minimum.
memoryLimitMinHeapGoalHeadroom = 1 << 20
// memoryLimitHeapGoalHeadroomPercent is how headroom the memory-limit-based
// heap goal should have as a percent of the maximum possible heap goal allowed
// to maintain the memory limit.
memoryLimitHeapGoalHeadroomPercent = 3
)
// gcController implements the GC pacing controller that determines
// when to trigger concurrent garbage collection and how much marking
// work to do in mutator assists and background marking.
//
// It calculates the ratio between the allocation rate (in terms of CPU
// time) and the GC scan throughput to determine the heap size at which to
// trigger a GC cycle such that no GC assists are required to finish on time.
// This algorithm thus optimizes GC CPU utilization to the dedicated background
// mark utilization of 25% of GOMAXPROCS by minimizing GC assists.
// GOMAXPROCS. The high-level design of this algorithm is documented
// at https://github.com/golang/proposal/blob/master/design/44167-gc-pacer-redesign.md.
// See https://golang.org/s/go15gcpacing for additional historical context.
var gcController gcControllerState
type gcControllerState struct {
// Initialized from GOGC. GOGC=off means no GC.
gcPercent atomic.Int32
// memoryLimit is the soft memory limit in bytes.
//
// Initialized from GOMEMLIMIT. GOMEMLIMIT=off is equivalent to MaxInt64
// which means no soft memory limit in practice.
//
// This is an int64 instead of a uint64 to more easily maintain parity with
// the SetMemoryLimit API, which sets a maximum at MaxInt64. This value
// should never be negative.
memoryLimit atomic.Int64
// heapMinimum is the minimum heap size at which to trigger GC.
// For small heaps, this overrides the usual GOGC*live set rule.
//
// When there is a very small live set but a lot of allocation, simply
// collecting when the heap reaches GOGC*live results in many GC
// cycles and high total per-GC overhead. This minimum amortizes this
// per-GC overhead while keeping the heap reasonably small.
//
// During initialization this is set to 4MB*GOGC/100. In the case of
// GOGC==0, this will set heapMinimum to 0, resulting in constant
// collection even when the heap size is small, which is useful for
// debugging.
heapMinimum uint64
// runway is the amount of runway in heap bytes allocated by the
// application that we want to give the GC once it starts.
//
// This is computed from consMark during mark termination.
runway atomic.Uint64
// consMark is the estimated per-CPU consMark ratio for the application.
//
// It represents the ratio between the application's allocation
// rate, as bytes allocated per CPU-time, and the GC's scan rate,
// as bytes scanned per CPU-time.
// The units of this ratio are (B / cpu-ns) / (B / cpu-ns).
//
// At a high level, this value is computed as the bytes of memory
// allocated (cons) per unit of scan work completed (mark) in a GC
// cycle, divided by the CPU time spent on each activity.
//
// Updated at the end of each GC cycle, in endCycle.
consMark float64
// lastConsMark is the computed cons/mark value for the previous 4 GC
// cycles. Note that this is *not* the last value of consMark, but the
// measured cons/mark value in endCycle.
lastConsMark [4]float64
// gcPercentHeapGoal is the goal heapLive for when next GC ends derived
// from gcPercent.
//
// Set to ^uint64(0) if gcPercent is disabled.
gcPercentHeapGoal atomic.Uint64
// sweepDistMinTrigger is the minimum trigger to ensure a minimum
// sweep distance.
//
// This bound is also special because it applies to both the trigger
// *and* the goal (all other trigger bounds must be based *on* the goal).
//
// It is computed ahead of time, at commit time. The theory is that,
// absent a sudden change to a parameter like gcPercent, the trigger
// will be chosen to always give the sweeper enough headroom. However,
// such a change might dramatically and suddenly move up the trigger,
// in which case we need to ensure the sweeper still has enough headroom.
sweepDistMinTrigger atomic.Uint64
// triggered is the point at which the current GC cycle actually triggered.
// Only valid during the mark phase of a GC cycle, otherwise set to ^uint64(0).
//
// Updated while the world is stopped.
triggered uint64
// lastHeapGoal is the value of heapGoal at the moment the last GC
// ended. Note that this is distinct from the last value heapGoal had,
// because it could change if e.g. gcPercent changes.
//
// Read and written with the world stopped or with mheap_.lock held.
lastHeapGoal uint64
// heapLive is the number of bytes considered live by the GC.
// That is: retained by the most recent GC plus allocated
// since then. heapLive ≤ memstats.totalAlloc-memstats.totalFree, since
// heapAlloc includes unmarked objects that have not yet been swept (and
// hence goes up as we allocate and down as we sweep) while heapLive
// excludes these objects (and hence only goes up between GCs).
//
// To reduce contention, this is updated only when obtaining a span
// from an mcentral and at this point it counts all of the unallocated
// slots in that span (which will be allocated before that mcache
// obtains another span from that mcentral). Hence, it slightly
// overestimates the "true" live heap size. It's better to overestimate
// than to underestimate because 1) this triggers the GC earlier than
// necessary rather than potentially too late and 2) this leads to a
// conservative GC rate rather than a GC rate that is potentially too
// low.
//
// Whenever this is updated, call traceHeapAlloc() and
// this gcControllerState's revise() method.
heapLive atomic.Uint64
// heapScan is the number of bytes of "scannable" heap. This is the
// live heap (as counted by heapLive), but omitting no-scan objects and
// no-scan tails of objects.
//
// This value is fixed at the start of a GC cycle. It represents the
// maximum scannable heap.
heapScan atomic.Uint64
// lastHeapScan is the number of bytes of heap that were scanned
// last GC cycle. It is the same as heapMarked, but only
// includes the "scannable" parts of objects.
//
// Updated when the world is stopped.
lastHeapScan uint64
// lastStackScan is the number of bytes of stack that were scanned
// last GC cycle.
lastStackScan atomic.Uint64
// maxStackScan is the amount of allocated goroutine stack space in
// use by goroutines.
//
// This number tracks allocated goroutine stack space rather than used
// goroutine stack space (i.e. what is actually scanned) because used
// goroutine stack space is much harder to measure cheaply. By using
// allocated space, we make an overestimate; this is OK, it's better
// to conservatively overcount than undercount.
maxStackScan atomic.Uint64
// globalsScan is the total amount of global variable space
// that is scannable.
globalsScan atomic.Uint64
// heapMarked is the number of bytes marked by the previous
// GC. After mark termination, heapLive == heapMarked, but
// unlike heapLive, heapMarked does not change until the
// next mark termination.
heapMarked uint64
// heapScanWork is the total heap scan work performed this cycle.
// stackScanWork is the total stack scan work performed this cycle.
// globalsScanWork is the total globals scan work performed this cycle.
//
// These are updated atomically during the cycle. Updates occur in
// bounded batches, since they are both written and read
// throughout the cycle. At the end of the cycle, heapScanWork is how
// much of the retained heap is scannable.
//
// Currently these are measured in bytes. For most uses, this is an
// opaque unit of work, but for estimation the definition is important.
//
// Note that stackScanWork includes only stack space scanned, not all
// of the allocated stack.
heapScanWork atomic.Int64
stackScanWork atomic.Int64
globalsScanWork atomic.Int64
// bgScanCredit is the scan work credit accumulated by the concurrent
// background scan. This credit is accumulated by the background scan
// and stolen by mutator assists. Updates occur in bounded batches,
// since it is both written and read throughout the cycle.
bgScanCredit atomic.Int64
// assistTime is the nanoseconds spent in mutator assists
// during this cycle. This is updated atomically, and must also
// be updated atomically even during a STW, because it is read
// by sysmon. Updates occur in bounded batches, since it is both
// written and read throughout the cycle.
assistTime atomic.Int64
// dedicatedMarkTime is the nanoseconds spent in dedicated mark workers
// during this cycle. This is updated at the end of the concurrent mark
// phase.
dedicatedMarkTime atomic.Int64
// fractionalMarkTime is the nanoseconds spent in the fractional mark
// worker during this cycle. This is updated throughout the cycle and
// will be up-to-date if the fractional mark worker is not currently
// running.
fractionalMarkTime atomic.Int64
// idleMarkTime is the nanoseconds spent in idle marking during this
// cycle. This is updated throughout the cycle.
idleMarkTime atomic.Int64
// markStartTime is the absolute start time in nanoseconds
// that assists and background mark workers started.
markStartTime int64
// dedicatedMarkWorkersNeeded is the number of dedicated mark workers
// that need to be started. This is computed at the beginning of each
// cycle and decremented as dedicated mark workers get started.
dedicatedMarkWorkersNeeded atomic.Int64
// idleMarkWorkers is two packed int32 values in a single uint64.
// These two values are always updated simultaneously.
//
// The bottom int32 is the current number of idle mark workers executing.
//
// The top int32 is the maximum number of idle mark workers allowed to
// execute concurrently. Normally, this number is just gomaxprocs. However,
// during periodic GC cycles it is set to 0 because the system is idle
// anyway; there's no need to go full blast on all of GOMAXPROCS.
//
// The maximum number of idle mark workers is used to prevent new workers
// from starting, but it is not a hard maximum. It is possible (but
// exceedingly rare) for the current number of idle mark workers to
// transiently exceed the maximum. This could happen if the maximum changes
// just after a GC ends, and an M with no P.
//
// Note that if we have no dedicated mark workers, we set this value to
// 1 in this case we only have fractional GC workers which aren't scheduled
// strictly enough to ensure GC progress. As a result, idle-priority mark
// workers are vital to GC progress in these situations.
//
// For example, consider a situation in which goroutines block on the GC
// (such as via runtime.GOMAXPROCS) and only fractional mark workers are
// scheduled (e.g. GOMAXPROCS=1). Without idle-priority mark workers, the
// last running M might skip scheduling a fractional mark worker if its
// utilization goal is met, such that once it goes to sleep (because there's
// nothing to do), there will be nothing else to spin up a new M for the
// fractional worker in the future, stalling GC progress and causing a
// deadlock. However, idle-priority workers will *always* run when there is
// nothing left to do, ensuring the GC makes progress.
//
// See github.com/golang/go/issues/44163 for more details.
idleMarkWorkers atomic.Uint64
// assistWorkPerByte is the ratio of scan work to allocated
// bytes that should be performed by mutator assists. This is
// computed at the beginning of each cycle and updated every
// time heapScan is updated.
assistWorkPerByte atomic.Float64
// assistBytesPerWork is 1/assistWorkPerByte.
//
// Note that because this is read and written independently
// from assistWorkPerByte users may notice a skew between
// the two values, and such a state should be safe.
assistBytesPerWork atomic.Float64
// fractionalUtilizationGoal is the fraction of wall clock
// time that should be spent in the fractional mark worker on
// each P that isn't running a dedicated worker.
//
// For example, if the utilization goal is 25% and there are
// no dedicated workers, this will be 0.25. If the goal is
// 25%, there is one dedicated worker, and GOMAXPROCS is 5,
// this will be 0.05 to make up the missing 5%.
//
// If this is zero, no fractional workers are needed.
fractionalUtilizationGoal float64
// These memory stats are effectively duplicates of fields from
// memstats.heapStats but are updated atomically or with the world
// stopped and don't provide the same consistency guarantees.
//
// Because the runtime is responsible for managing a memory limit, it's
// useful to couple these stats more tightly to the gcController, which
// is intimately connected to how that memory limit is maintained.
heapInUse sysMemStat // bytes in mSpanInUse spans
heapReleased sysMemStat // bytes released to the OS
heapFree sysMemStat // bytes not in any span, but not released to the OS
totalAlloc atomic.Uint64 // total bytes allocated
totalFree atomic.Uint64 // total bytes freed
mappedReady atomic.Uint64 // total virtual memory in the Ready state (see mem.go).
// test indicates that this is a test-only copy of gcControllerState.
test bool
_ cpu.CacheLinePad
}
func (c *gcControllerState) init(gcPercent int32, memoryLimit int64) {
c.heapMinimum = defaultHeapMinimum
c.triggered = ^uint64(0)
c.setGCPercent(gcPercent)
c.setMemoryLimit(memoryLimit)
c.commit(true) // No sweep phase in the first GC cycle.
// N.B. Don't bother calling traceHeapGoal. Tracing is never enabled at
// initialization time.
// N.B. No need to call revise; there's no GC enabled during
// initialization.
}
// startCycle resets the GC controller's state and computes estimates
// for a new GC cycle. The caller must hold worldsema and the world
// must be stopped.
func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger gcTrigger) {
c.heapScanWork.Store(0)
c.stackScanWork.Store(0)
c.globalsScanWork.Store(0)
c.bgScanCredit.Store(0)
c.assistTime.Store(0)
c.dedicatedMarkTime.Store(0)
c.fractionalMarkTime.Store(0)
c.idleMarkTime.Store(0)
c.markStartTime = markStartTime
c.triggered = c.heapLive.Load()
// Compute the background mark utilization goal. In general,
// this may not come out exactly. We round the number of
// dedicated workers so that the utilization is closest to
// 25%. For small GOMAXPROCS, this would introduce too much
// error, so we add fractional workers in that case.
totalUtilizationGoal := float64(procs) * gcBackgroundUtilization
dedicatedMarkWorkersNeeded := int64(totalUtilizationGoal + 0.5)
utilError := float64(dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1
const maxUtilError = 0.3
if utilError < -maxUtilError || utilError > maxUtilError {
// Rounding put us more than 30% off our goal. With
// gcBackgroundUtilization of 25%, this happens for
// GOMAXPROCS<=3 or GOMAXPROCS=6. Enable fractional
// workers to compensate.
if float64(dedicatedMarkWorkersNeeded) > totalUtilizationGoal {
// Too many dedicated workers.
dedicatedMarkWorkersNeeded--
}
c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(dedicatedMarkWorkersNeeded)) / float64(procs)
} else {
c.fractionalUtilizationGoal = 0
}
// In STW mode, we just want dedicated workers.
if debug.gcstoptheworld > 0 {
dedicatedMarkWorkersNeeded = int64(procs)
c.fractionalUtilizationGoal = 0
}
// Clear per-P state
for _, p := range allp {
p.gcAssistTime = 0
p.gcFractionalMarkTime.Store(0)
}
if trigger.kind == gcTriggerTime {
// During a periodic GC cycle, reduce the number of idle mark workers
// required. However, we need at least one dedicated mark worker or
// idle GC worker to ensure GC progress in some scenarios (see comment
// on maxIdleMarkWorkers).
if dedicatedMarkWorkersNeeded > 0 {
c.setMaxIdleMarkWorkers(0)
} else {
// TODO(mknyszek): The fundamental reason why we need this is because
// we can't count on the fractional mark worker to get scheduled.
// Fix that by ensuring it gets scheduled according to its quota even
// if the rest of the application is idle.
c.setMaxIdleMarkWorkers(1)
}
} else {
// N.B. gomaxprocs and dedicatedMarkWorkersNeeded are guaranteed not to
// change during a GC cycle.
c.setMaxIdleMarkWorkers(int32(procs) - int32(dedicatedMarkWorkersNeeded))
}
// Compute initial values for controls that are updated
// throughout the cycle.
c.dedicatedMarkWorkersNeeded.Store(dedicatedMarkWorkersNeeded)
c.revise()
if debug.gcpacertrace > 0 {
heapGoal := c.heapGoal()
assistRatio := c.assistWorkPerByte.Load()
print("pacer: assist ratio=", assistRatio,
" (scan ", gcController.heapScan.Load()>>20, " MB in ",
work.initialHeapLive>>20, "->",
heapGoal>>20, " MB)",
" workers=", dedicatedMarkWorkersNeeded,
"+", c.fractionalUtilizationGoal, "\n")
}
}
// revise updates the assist ratio during the GC cycle to account for
// improved estimates. This should be called whenever gcController.heapScan,
// gcController.heapLive, or if any inputs to gcController.heapGoal are
// updated. It is safe to call concurrently, but it may race with other
// calls to revise.
//
// The result of this race is that the two assist ratio values may not line
// up or may be stale. In practice this is OK because the assist ratio
// moves slowly throughout a GC cycle, and the assist ratio is a best-effort
// heuristic anyway. Furthermore, no part of the heuristic depends on
// the two assist ratio values being exact reciprocals of one another, since
// the two values are used to convert values from different sources.
//
// The worst case result of this raciness is that we may miss a larger shift
// in the ratio (say, if we decide to pace more aggressively against the
// hard heap goal) but even this "hard goal" is best-effort (see #40460).
// The dedicated GC should ensure we don't exceed the hard goal by too much
// in the rare case we do exceed it.
//
// It should only be called when gcBlackenEnabled != 0 (because this
// is when assists are enabled and the necessary statistics are
// available).
func (c *gcControllerState) revise() {
gcPercent := c.gcPercent.Load()
if gcPercent < 0 {
// If GC is disabled but we're running a forced GC,
// act like GOGC is huge for the below calculations.
gcPercent = 100000
}
live := c.heapLive.Load()
scan := c.heapScan.Load()
work := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
// Assume we're under the soft goal. Pace GC to complete at
// heapGoal assuming the heap is in steady-state.
heapGoal := int64(c.heapGoal())
// The expected scan work is computed as the amount of bytes scanned last
// GC cycle (both heap and stack), plus our estimate of globals work for this cycle.
scanWorkExpected := int64(c.lastHeapScan + c.lastStackScan.Load() + c.globalsScan.Load())
// maxScanWork is a worst-case estimate of the amount of scan work that
// needs to be performed in this GC cycle. Specifically, it represents
// the case where *all* scannable memory turns out to be live, and
// *all* allocated stack space is scannable.
maxStackScan := c.maxStackScan.Load()
maxScanWork := int64(scan + maxStackScan + c.globalsScan.Load())
if work > scanWorkExpected {
// We've already done more scan work than expected. Because our expectation
// is based on a steady-state scannable heap size, we assume this means our
// heap is growing. Compute a new heap goal that takes our existing runway
// computed for scanWorkExpected and extrapolates it to maxScanWork, the worst-case
// scan work. This keeps our assist ratio stable if the heap continues to grow.
//
// The effect of this mechanism is that assists stay flat in the face of heap
// growths. It's OK to use more memory this cycle to scan all the live heap,
// because the next GC cycle is inevitably going to use *at least* that much
// memory anyway.
extHeapGoal := int64(float64(heapGoal-int64(c.triggered))/float64(scanWorkExpected)*float64(maxScanWork)) + int64(c.triggered)
scanWorkExpected = maxScanWork
// hardGoal is a hard limit on the amount that we're willing to push back the
// heap goal, and that's twice the heap goal (i.e. if GOGC=100 and the heap and/or
// stacks and/or globals grow to twice their size, this limits the current GC cycle's
// growth to 4x the original live heap's size).
//
// This maintains the invariant that we use no more memory than the next GC cycle
// will anyway.
hardGoal := int64((1.0 + float64(gcPercent)/100.0) * float64(heapGoal))
if extHeapGoal > hardGoal {
extHeapGoal = hardGoal
}
heapGoal = extHeapGoal
}
if int64(live) > heapGoal {
// We're already past our heap goal, even the extrapolated one.
// Leave ourselves some extra runway, so in the worst case we
// finish by that point.
const maxOvershoot = 1.1
heapGoal = int64(float64(heapGoal) * maxOvershoot)
// Compute the upper bound on the scan work remaining.
scanWorkExpected = maxScanWork
}
// Compute the remaining scan work estimate.
//
// Note that we currently count allocations during GC as both
// scannable heap (heapScan) and scan work completed
// (scanWork), so allocation will change this difference
// slowly in the soft regime and not at all in the hard
// regime.
scanWorkRemaining := scanWorkExpected - work
if scanWorkRemaining < 1000 {
// We set a somewhat arbitrary lower bound on
// remaining scan work since if we aim a little high,
// we can miss by a little.
//
// We *do* need to enforce that this is at least 1,
// since marking is racy and double-scanning objects
// may legitimately make the remaining scan work
// negative, even in the hard goal regime.
scanWorkRemaining = 1000
}
// Compute the heap distance remaining.
heapRemaining := heapGoal - int64(live)
if heapRemaining <= 0 {
// This shouldn't happen, but if it does, avoid
// dividing by zero or setting the assist negative.
heapRemaining = 1
}
// Compute the mutator assist ratio so by the time the mutator
// allocates the remaining heap bytes up to heapGoal, it will
// have done (or stolen) the remaining amount of scan work.
// Note that the assist ratio values are updated atomically
// but not together. This means there may be some degree of
// skew between the two values. This is generally OK as the
// values shift relatively slowly over the course of a GC
// cycle.
assistWorkPerByte := float64(scanWorkRemaining) / float64(heapRemaining)
assistBytesPerWork := float64(heapRemaining) / float64(scanWorkRemaining)
c.assistWorkPerByte.Store(assistWorkPerByte)
c.assistBytesPerWork.Store(assistBytesPerWork)
}
// endCycle computes the consMark estimate for the next cycle.
func (c *gcControllerState) endCycle(now int64, procs int) {
// Record last heap goal for the scavenger.
// We'll be updating the heap goal soon.
gcController.lastHeapGoal = c.heapGoal()
// Compute the duration of time for which assists were turned on.
assistDuration := now - c.markStartTime
// Assume background mark hit its utilization goal.
utilization := gcBackgroundUtilization
// Add assist utilization; avoid divide by zero.
if assistDuration > 0 {
utilization += float64(c.assistTime.Load()) / float64(assistDuration*int64(procs))
}
if c.heapLive.Load() <= c.triggered {
// Shouldn't happen, but let's be very safe about this in case the
// GC is somehow extremely short.
//
// In this case though, the only reasonable value for c.heapLive-c.triggered
// would be 0, which isn't really all that useful, i.e. the GC was so short
// that it didn't matter.
//
// Ignore this case and don't update anything.
return
}
idleUtilization := 0.0
if assistDuration > 0 {
idleUtilization = float64(c.idleMarkTime.Load()) / float64(assistDuration*int64(procs))
}
// Determine the cons/mark ratio.
//
// The units we want for the numerator and denominator are both B / cpu-ns.
// We get this by taking the bytes allocated or scanned, and divide by the amount of
// CPU time it took for those operations. For allocations, that CPU time is
//
// assistDuration * procs * (1 - utilization)
//
// Where utilization includes just background GC workers and assists. It does *not*
// include idle GC work time, because in theory the mutator is free to take that at
// any point.
//
// For scanning, that CPU time is
//
// assistDuration * procs * (utilization + idleUtilization)
//
// In this case, we *include* idle utilization, because that is additional CPU time that
// the GC had available to it.
//
// In effect, idle GC time is sort of double-counted here, but it's very weird compared
// to other kinds of GC work, because of how fluid it is. Namely, because the mutator is
// *always* free to take it.
//
// So this calculation is really:
// (heapLive-trigger) / (assistDuration * procs * (1-utilization)) /
// (scanWork) / (assistDuration * procs * (utilization+idleUtilization))
//
// Note that because we only care about the ratio, assistDuration and procs cancel out.
scanWork := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load()
currentConsMark := (float64(c.heapLive.Load()-c.triggered) * (utilization + idleUtilization)) /
(float64(scanWork) * (1 - utilization))
// Update our cons/mark estimate. This is the maximum of the value we just computed and the last
// 4 cons/mark values we measured. The reason we take the maximum here is to bias a noisy
// cons/mark measurement toward fewer assists at the expense of additional GC cycles (starting
// earlier).
oldConsMark := c.consMark
c.consMark = currentConsMark
for i := range c.lastConsMark {
if c.lastConsMark[i] > c.consMark {
c.consMark = c.lastConsMark[i]
}
}
copy(c.lastConsMark[:], c.lastConsMark[1:])
c.lastConsMark[len(c.lastConsMark)-1] = currentConsMark
if debug.gcpacertrace > 0 {
printlock()
goal := gcGoalUtilization * 100
print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ")
print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load(), " B exp.) ")
live := c.heapLive.Load()
print("in ", c.triggered, " B -> ", live, " B (∆goal ", int64(live)-int64(c.lastHeapGoal), ", cons/mark ", oldConsMark, ")")
println()
printunlock()
}
}
// enlistWorker encourages another dedicated mark worker to start on
// another P if there are spare worker slots. It is used by putfull
// when more work is made available.
//
// If goexperiment.GreenTeaGC, the caller must not hold a G's scan bit,
// otherwise this could cause a deadlock. This is already enforced by
// the static lock ranking.
//
//go:nowritebarrier
func (c *gcControllerState) enlistWorker() {
needDedicated := c.dedicatedMarkWorkersNeeded.Load() > 0
// Create new workers from idle Ps with goexperiment.GreenTeaGC.
//
// Note: with Green Tea, this places a requirement on enlistWorker
// that it must not be called while a G's scan bit is held.
if goexperiment.GreenTeaGC {
needIdle := c.needIdleMarkWorker()
// If we're all full on dedicated and idle workers, nothing
// to do.
if !needDedicated && !needIdle {
return
}
// If there are idle Ps, wake one so it will run a worker
// (the scheduler will already prefer to spin up a new
// dedicated worker over an idle one).
if sched.npidle.Load() != 0 && sched.nmspinning.Load() == 0 {
wakep() // Likely to consume our worker request.
return
}
}
// If we still need more dedicated workers, try to preempt a running P
// so it will switch to a worker.
if !needDedicated {
return
}
// Pick a random other P to preempt.
if gomaxprocs <= 1 {
return
}
gp := getg()
if gp == nil || gp.m == nil || gp.m.p == 0 {
return
}
myID := gp.m.p.ptr().id
for tries := 0; tries < 5; tries++ {
id := int32(cheaprandn(uint32(gomaxprocs - 1)))
if id >= myID {
id++
}
p := allp[id]
if p.status != _Prunning {
continue
}
if preemptone(p) {
return
}
}
}
// assignWaitingGCWorker assigns a background mark worker to pp if one should
// be run.
//
// If a worker is selected, it is assigned to pp.nextMarkGCWorker and the P is
// wired as a GC mark worker. The G is still in _Gwaiting. If no worker is
// selected, ok returns false.
//
// If assignedWaitingGCWorker returns true, this P must either:
// - Mark the G as runnable and run it, clearing pp.nextMarkGCWorker.
// - Or, call c.releaseNextGCMarkWorker.
//
// This must only be called when gcBlackenEnabled != 0.
func (c *gcControllerState) assignWaitingGCWorker(pp *p, now int64) (bool, int64) {
if gcBlackenEnabled == 0 {
throw("gcControllerState.findRunnable: blackening not enabled")
}
if now == 0 {
now = nanotime()
}
if !gcShouldScheduleWorker(pp) {
// No good reason to schedule a worker. This can happen at
// the end of the mark phase when there are still
// assists tapering off. Don't bother running a worker
// now because it'll just return immediately.
return false, now
}
if c.dedicatedMarkWorkersNeeded.Load() <= 0 && c.fractionalUtilizationGoal == 0 {
// No current need for dedicated workers, and no need at all for
// fractional workers. Check before trying to acquire a worker; when
// GOMAXPROCS is large, that can be expensive and is often unnecessary.
//
// When a dedicated worker stops running, the gcBgMarkWorker loop notes
// the need for the worker before returning it to the pool. If we don't
// see the need now, we wouldn't have found it in the pool anyway.
return false, now
}
// Grab a worker before we commit to running below.
node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
if node == nil {
// There is at least one worker per P, so normally there are
// enough workers to run on all Ps, if necessary. However, once
// a worker enters gcMarkDone it may park without rejoining the
// pool, thus freeing a P with no corresponding worker.
// gcMarkDone never depends on another worker doing work, so it
// is safe to simply do nothing here.
//
// If gcMarkDone bails out without completing the mark phase,
// it will always do so with queued global work. Thus, that P
// will be immediately eligible to re-run the worker G it was
// just using, ensuring work can complete.
return false, now
}
decIfPositive := func(val *atomic.Int64) bool {
for {
v := val.Load()
if v <= 0 {
return false
}
if val.CompareAndSwap(v, v-1) {
return true
}
}
}
if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
// This P is now dedicated to marking until the end of
// the concurrent mark phase.
pp.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
} else if c.fractionalUtilizationGoal == 0 {
// No need for fractional workers.
gcBgMarkWorkerPool.push(&node.node)
return false, now
} else {
// Is this P behind on the fractional utilization
// goal?
//
// This should be kept in sync with pollFractionalWorkerExit.
delta := now - c.markStartTime
if delta > 0 && float64(pp.gcFractionalMarkTime.Load())/float64(delta) > c.fractionalUtilizationGoal {
// Nope. No need to run a fractional worker.
gcBgMarkWorkerPool.push(&node.node)
return false, now
}
// Run a fractional worker.
pp.gcMarkWorkerMode = gcMarkWorkerFractionalMode
}
pp.nextGCMarkWorker = node
return true, now
}
// findRunnableGCWorker returns a background mark worker for pp if it
// should be run.
//
// If findRunnableGCWorker returns a G, this P is wired as a GC mark worker and
// must run the G.
//
// This must only be called when gcBlackenEnabled != 0.
//
// This function is allowed to have write barriers because it is called from
// the portion of findRunnable that always has a P.
//
//go:yeswritebarrierrec
func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) {
// Since we have the current time, check if the GC CPU limiter
// hasn't had an update in a while. This check is necessary in
// case the limiter is on but hasn't been checked in a while and
// so may have left sufficient headroom to turn off again.
if now == 0 {
now = nanotime()
}
if gcCPULimiter.needUpdate(now) {
gcCPULimiter.update(now)
}
// If a worker wasn't already assigned by procresize, assign one now.
if pp.nextGCMarkWorker == nil {
ok, now := c.assignWaitingGCWorker(pp, now)
if !ok {
return nil, now
}
}
node := pp.nextGCMarkWorker
pp.nextGCMarkWorker = nil
// Run the background mark worker.
gp := node.gp.ptr()
trace := traceAcquire()
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.ok() {
trace.GoUnpark(gp, 0)
traceRelease(trace)
}
return gp, now
}
// Release an unused pp.nextGCMarkWorker, if any.
//
// This function is allowed to have write barriers because it is called from
// the portion of schedule.
//
//go:yeswritebarrierrec
func (c *gcControllerState) releaseNextGCMarkWorker(pp *p) {
node := pp.nextGCMarkWorker
if node == nil {
return
}
c.markWorkerStop(pp.gcMarkWorkerMode, 0)
gcBgMarkWorkerPool.push(&node.node)
pp.nextGCMarkWorker = nil
}
// resetLive sets up the controller state for the next mark phase after the end
// of the previous one. Must be called after endCycle and before commit, before
// the world is started.
//
// The world must be stopped.
func (c *gcControllerState) resetLive(bytesMarked uint64) {
c.heapMarked = bytesMarked
c.heapLive.Store(bytesMarked)
c.heapScan.Store(uint64(c.heapScanWork.Load()))
c.lastHeapScan = uint64(c.heapScanWork.Load())
c.lastStackScan.Store(uint64(c.stackScanWork.Load()))
c.triggered = ^uint64(0) // Reset triggered.
// heapLive was updated, so emit a trace event.
trace := traceAcquire()
if trace.ok() {
trace.HeapAlloc(bytesMarked)
traceRelease(trace)
}
}
// markWorkerStop must be called whenever a mark worker stops executing.
//
// It updates mark work accounting in the controller by a duration of
// work in nanoseconds and other bookkeeping.
//
// Safe to execute at any time.
func (c *gcControllerState) markWorkerStop(mode gcMarkWorkerMode, duration int64) {
switch mode {
case gcMarkWorkerDedicatedMode:
c.dedicatedMarkTime.Add(duration)
c.dedicatedMarkWorkersNeeded.Add(1)
case gcMarkWorkerFractionalMode:
c.fractionalMarkTime.Add(duration)
case gcMarkWorkerIdleMode:
c.idleMarkTime.Add(duration)
c.removeIdleMarkWorker()
default:
throw("markWorkerStop: unknown mark worker mode")
}
}
func (c *gcControllerState) update(dHeapLive, dHeapScan int64) {
if dHeapLive != 0 {
trace := traceAcquire()
live := gcController.heapLive.Add(dHeapLive)
if trace.ok() {
// gcController.heapLive changed.
trace.HeapAlloc(live)
traceRelease(trace)
}
}
if gcBlackenEnabled == 0 {
// Update heapScan when we're not in a current GC. It is fixed
// at the beginning of a cycle.
if dHeapScan != 0 {
gcController.heapScan.Add(dHeapScan)
}
} else {
// gcController.heapLive changed.
c.revise()
}
}
func (c *gcControllerState) addScannableStack(pp *p, amount int64) {
if pp == nil {
c.maxStackScan.Add(amount)
return
}
pp.maxStackScanDelta += amount
if pp.maxStackScanDelta >= maxStackScanSlack || pp.maxStackScanDelta <= -maxStackScanSlack {
c.maxStackScan.Add(pp.maxStackScanDelta)
pp.maxStackScanDelta = 0
}
}
func (c *gcControllerState) addGlobals(amount int64) {
c.globalsScan.Add(amount)
}
// heapGoal returns the current heap goal.
func (c *gcControllerState) heapGoal() uint64 {
goal, _ := c.heapGoalInternal()
return goal
}
// heapGoalInternal is the implementation of heapGoal which returns additional
// information that is necessary for computing the trigger.
//
// The returned minTrigger is always <= goal.
func (c *gcControllerState) heapGoalInternal() (goal, minTrigger uint64) {
// Start with the goal calculated for gcPercent.
goal = c.gcPercentHeapGoal.Load()
// Check if the memory-limit-based goal is smaller, and if so, pick that.
if newGoal := c.memoryLimitHeapGoal(); newGoal < goal {
goal = newGoal
} else {
// We're not limited by the memory limit goal, so perform a series of
// adjustments that might move the goal forward in a variety of circumstances.
sweepDistTrigger := c.sweepDistMinTrigger.Load()
if sweepDistTrigger > goal {
// Set the goal to maintain a minimum sweep distance since
// the last call to commit. Note that we never want to do this
// if we're in the memory limit regime, because it could push
// the goal up.
goal = sweepDistTrigger
}
// Since we ignore the sweep distance trigger in the memory
// limit regime, we need to ensure we don't propagate it to
// the trigger, because it could cause a violation of the
// invariant that the trigger < goal.
minTrigger = sweepDistTrigger
// Ensure that the heap goal is at least a little larger than
// the point at which we triggered. This may not be the case if GC
// start is delayed or if the allocation that pushed gcController.heapLive
// over trigger is large or if the trigger is really close to
// GOGC. Assist is proportional to this distance, so enforce a
// minimum distance, even if it means going over the GOGC goal
// by a tiny bit.
//
// Ignore this if we're in the memory limit regime: we'd prefer to
// have the GC respond hard about how close we are to the goal than to
// push the goal back in such a manner that it could cause us to exceed
// the memory limit.
const minRunway = 64 << 10
if c.triggered != ^uint64(0) && goal < c.triggered+minRunway {
goal = c.triggered + minRunway
}
}
return
}
// memoryLimitHeapGoal returns a heap goal derived from memoryLimit.
func (c *gcControllerState) memoryLimitHeapGoal() uint64 {
// Start by pulling out some values we'll need. Be careful about overflow.
var heapFree, heapAlloc, mappedReady uint64
for {
heapFree = c.heapFree.load() // Free and unscavenged memory.
heapAlloc = c.totalAlloc.Load() - c.totalFree.Load() // Heap object bytes in use.
mappedReady = c.mappedReady.Load() // Total unreleased mapped memory.
if heapFree+heapAlloc <= mappedReady {
break
}
// It is impossible for total unreleased mapped memory to exceed heap memory, but
// because these stats are updated independently, we may observe a partial update
// including only some values. Thus, we appear to break the invariant. However,
// this condition is necessarily transient, so just try again. In the case of a
// persistent accounting error, we'll deadlock here.
}
// Below we compute a goal from memoryLimit. There are a few things to be aware of.
// Firstly, the memoryLimit does not easily compare to the heap goal: the former
// is total mapped memory by the runtime that hasn't been released, while the latter is
// only heap object memory. Intuitively, the way we convert from one to the other is to
// subtract everything from memoryLimit that both contributes to the memory limit (so,
// ignore scavenged memory) and doesn't contain heap objects. This isn't quite what
// lines up with reality, but it's a good starting point.
//
// In practice this computation looks like the following:
//
// goal := memoryLimit - ((mappedReady - heapFree - heapAlloc) + max(mappedReady - memoryLimit, 0))
// ^1 ^2
// goal -= goal / 100 * memoryLimitHeapGoalHeadroomPercent
// ^3
//
// Let's break this down.
//
// The first term (marker 1) is everything that contributes to the memory limit and isn't
// or couldn't become heap objects. It represents, broadly speaking, non-heap overheads.
// One oddity you may have noticed is that we also subtract out heapFree, i.e. unscavenged
// memory that may contain heap objects in the future.
//
// Let's take a step back. In an ideal world, this term would look something like just
// the heap goal. That is, we "reserve" enough space for the heap to grow to the heap
// goal, and subtract out everything else. This is of course impossible; the definition
// is circular! However, this impossible definition contains a key insight: the amount
// we're *going* to use matters just as much as whatever we're currently using.
//
// Consider if the heap shrinks to 1/10th its size, leaving behind lots of free and
// unscavenged memory. mappedReady - heapAlloc will be quite large, because of that free
// and unscavenged memory, pushing the goal down significantly.
//
// heapFree is also safe to exclude from the memory limit because in the steady-state, it's
// just a pool of memory for future heap allocations, and making new allocations from heapFree
// memory doesn't increase overall memory use. In transient states, the scavenger and the
// allocator actively manage the pool of heapFree memory to maintain the memory limit.
//
// The second term (marker 2) is the amount of memory we've exceeded the limit by, and is
// intended to help recover from such a situation. By pushing the heap goal down, we also
// push the trigger down, triggering and finishing a GC sooner in order to make room for
// other memory sources. Note that since we're effectively reducing the heap goal by X bytes,
// we're actually giving more than X bytes of headroom back, because the heap goal is in
// terms of heap objects, but it takes more than X bytes (e.g. due to fragmentation) to store
// X bytes worth of objects.
//
// The final adjustment (marker 3) reduces the maximum possible memory limit heap goal by
// memoryLimitHeapGoalPercent. As the name implies, this is to provide additional headroom in
// the face of pacing inaccuracies, and also to leave a buffer of unscavenged memory so the
// allocator isn't constantly scavenging. The reduction amount also has a fixed minimum
// (memoryLimitMinHeapGoalHeadroom, not pictured) because the aforementioned pacing inaccuracies
// disproportionately affect small heaps: as heaps get smaller, the pacer's inputs get fuzzier.
// Shorter GC cycles and less GC work means noisy external factors like the OS scheduler have a
// greater impact.
memoryLimit := uint64(c.memoryLimit.Load())
// Compute term 1.
nonHeapMemory := mappedReady - heapFree - heapAlloc
// Compute term 2.
var overage uint64
if mappedReady > memoryLimit {
overage = mappedReady - memoryLimit
}
if nonHeapMemory+overage >= memoryLimit {
// We're at a point where non-heap memory exceeds the memory limit on its own.
// There's honestly not much we can do here but just trigger GCs continuously
// and let the CPU limiter reign that in. Something has to give at this point.
// Set it to heapMarked, the lowest possible goal.
return c.heapMarked
}
// Compute the goal.
goal := memoryLimit - (nonHeapMemory + overage)
// Apply some headroom to the goal to account for pacing inaccuracies and to reduce
// the impact of scavenging at allocation time in response to a high allocation rate
// when GOGC=off. See issue #57069. Also, be careful about small limits.
headroom := goal / 100 * memoryLimitHeapGoalHeadroomPercent
if headroom < memoryLimitMinHeapGoalHeadroom {
// Set a fixed minimum to deal with the particularly large effect pacing inaccuracies
// have for smaller heaps.
headroom = memoryLimitMinHeapGoalHeadroom
}
if goal < headroom || goal-headroom < headroom {
goal = headroom
} else {
goal = goal - headroom
}
// Don't let us go below the live heap. A heap goal below the live heap doesn't make sense.
if goal < c.heapMarked {
goal = c.heapMarked
}
return goal
}
const (
// These constants determine the bounds on the GC trigger as a fraction
// of heap bytes allocated between the start of a GC (heapLive == heapMarked)
// and the end of a GC (heapLive == heapGoal).
//
// The constants are obscured in this way for efficiency. The denominator
// of the fraction is always a power-of-two for a quick division, so that
// the numerator is a single constant integer multiplication.
triggerRatioDen = 64
// The minimum trigger constant was chosen empirically: given a sufficiently
// fast/scalable allocator with 48 Ps that could drive the trigger ratio
// to <0.05, this constant causes applications to retain the same peak
// RSS compared to not having this allocator.
minTriggerRatioNum = 45 // ~0.7
// The maximum trigger constant is chosen somewhat arbitrarily, but the
// current constant has served us well over the years.
maxTriggerRatioNum = 61 // ~0.95
)
// trigger returns the current point at which a GC should trigger along with
// the heap goal.
//
// The returned value may be compared against heapLive to determine whether
// the GC should trigger. Thus, the GC trigger condition should be (but may
// not be, in the case of small movements for efficiency) checked whenever
// the heap goal may change.
func (c *gcControllerState) trigger() (uint64, uint64) {
goal, minTrigger := c.heapGoalInternal()
// Invariant: the trigger must always be less than the heap goal.
//
// Note that the memory limit sets a hard maximum on our heap goal,
// but the live heap may grow beyond it.
if c.heapMarked >= goal {
// The goal should never be smaller than heapMarked, but let's be
// defensive about it. The only reasonable trigger here is one that
// causes a continuous GC cycle at heapMarked, but respect the goal
// if it came out as smaller than that.
return goal, goal
}
// Below this point, c.heapMarked < goal.
// heapMarked is our absolute minimum, and it's possible the trigger
// bound we get from heapGoalinternal is less than that.
if minTrigger < c.heapMarked {
minTrigger = c.heapMarked
}
// If we let the trigger go too low, then if the application
// is allocating very rapidly we might end up in a situation
// where we're allocating black during a nearly always-on GC.
// The result of this is a growing heap and ultimately an
// increase in RSS. By capping us at a point >0, we're essentially
// saying that we're OK using more CPU during the GC to prevent
// this growth in RSS.
triggerLowerBound := ((goal-c.heapMarked)/triggerRatioDen)*minTriggerRatioNum + c.heapMarked
if minTrigger < triggerLowerBound {
minTrigger = triggerLowerBound
}
// For small heaps, set the max trigger point at maxTriggerRatio of the way
// from the live heap to the heap goal. This ensures we always have *some*
// headroom when the GC actually starts. For larger heaps, set the max trigger
// point at the goal, minus the minimum heap size.
//
// This choice follows from the fact that the minimum heap size is chosen
// to reflect the costs of a GC with no work to do. With a large heap but
// very little scan work to perform, this gives us exactly as much runway
// as we would need, in the worst case.
maxTrigger := ((goal-c.heapMarked)/triggerRatioDen)*maxTriggerRatioNum + c.heapMarked
if goal > defaultHeapMinimum && goal-defaultHeapMinimum > maxTrigger {
maxTrigger = goal - defaultHeapMinimum
}
maxTrigger = max(maxTrigger, minTrigger)
// Compute the trigger from our bounds and the runway stored by commit.
var trigger uint64
runway := c.runway.Load()
if runway > goal {
trigger = minTrigger
} else {
trigger = goal - runway
}
trigger = max(trigger, minTrigger)
trigger = min(trigger, maxTrigger)
if trigger > goal {
print("trigger=", trigger, " heapGoal=", goal, "\n")
print("minTrigger=", minTrigger, " maxTrigger=", maxTrigger, "\n")
throw("produced a trigger greater than the heap goal")
}
return trigger, goal
}
// commit recomputes all pacing parameters needed to derive the
// trigger and the heap goal. Namely, the gcPercent-based heap goal,
// and the amount of runway we want to give the GC this cycle.
//
// This can be called any time. If GC is the in the middle of a
// concurrent phase, it will adjust the pacing of that phase.
//
// isSweepDone should be the result of calling isSweepDone(),
// unless we're testing or we know we're executing during a GC cycle.
//
// This depends on gcPercent, gcController.heapMarked, and
// gcController.heapLive. These must be up to date.
//
// Callers must call gcControllerState.revise after calling this
// function if the GC is enabled.
//
// mheap_.lock must be held or the world must be stopped.
func (c *gcControllerState) commit(isSweepDone bool) {
if !c.test {
assertWorldStoppedOrLockHeld(&mheap_.lock)
}
if isSweepDone {
// The sweep is done, so there aren't any restrictions on the trigger
// we need to think about.
c.sweepDistMinTrigger.Store(0)
} else {
// Concurrent sweep happens in the heap growth
// from gcController.heapLive to trigger. Make sure we
// give the sweeper some runway if it doesn't have enough.
c.sweepDistMinTrigger.Store(c.heapLive.Load() + sweepMinHeapDistance)
}
// Compute the next GC goal, which is when the allocated heap
// has grown by GOGC/100 over where it started the last cycle,
// plus additional runway for non-heap sources of GC work.
gcPercentHeapGoal := ^uint64(0)
if gcPercent := c.gcPercent.Load(); gcPercent >= 0 {
gcPercentHeapGoal = c.heapMarked + (c.heapMarked+c.lastStackScan.Load()+c.globalsScan.Load())*uint64(gcPercent)/100
}
// Apply the minimum heap size here. It's defined in terms of gcPercent
// and is only updated by functions that call commit.
if gcPercentHeapGoal < c.heapMinimum {
gcPercentHeapGoal = c.heapMinimum
}
c.gcPercentHeapGoal.Store(gcPercentHeapGoal)
// Compute the amount of runway we want the GC to have by using our
// estimate of the cons/mark ratio.
//
// The idea is to take our expected scan work, and multiply it by
// the cons/mark ratio to determine how long it'll take to complete
// that scan work in terms of bytes allocated. This gives us our GC's
// runway.
//
// However, the cons/mark ratio is a ratio of rates per CPU-second, but
// here we care about the relative rates for some division of CPU
// resources among the mutator and the GC.
//
// To summarize, we have B / cpu-ns, and we want B / ns. We get that
// by multiplying by our desired division of CPU resources. We choose
// to express CPU resources as GOMAPROCS*fraction. Note that because
// we're working with a ratio here, we can omit the number of CPU cores,
// because they'll appear in the numerator and denominator and cancel out.
// As a result, this is basically just "weighing" the cons/mark ratio by
// our desired division of resources.
//
// Furthermore, by setting the runway so that CPU resources are divided
// this way, assuming that the cons/mark ratio is correct, we make that
// division a reality.
c.runway.Store(uint64((c.consMark * (1 - gcGoalUtilization) / (gcGoalUtilization)) * float64(c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load())))
}
// setGCPercent updates gcPercent. commit must be called after.
// Returns the old value of gcPercent.
//
// The world must be stopped, or mheap_.lock must be held.
func (c *gcControllerState) setGCPercent(in int32) int32 {
if !c.test {
assertWorldStoppedOrLockHeld(&mheap_.lock)
}
out := c.gcPercent.Load()
if in < 0 {
in = -1
}
c.heapMinimum = defaultHeapMinimum * uint64(in) / 100
c.gcPercent.Store(in)
return out
}
//go:linkname setGCPercent runtime/debug.setGCPercent
func setGCPercent(in int32) (out int32) {
// Run on the system stack since we grab the heap lock.
systemstack(func() {
lock(&mheap_.lock)
out = gcController.setGCPercent(in)
gcControllerCommit()
unlock(&mheap_.lock)
})
// If we just disabled GC, wait for any concurrent GC mark to
// finish so we always return with no GC running.
if in < 0 {
gcWaitOnMark(work.cycles.Load())
}
return out
}
func readGOGC() int32 {
p := gogetenv("GOGC")
if p == "off" {
return -1
}
if n, err := strconv.ParseInt(p, 10, 32); err == nil {
return int32(n)
}
return 100
}
// setMemoryLimit updates memoryLimit. commit must be called after
// Returns the old value of memoryLimit.
//
// The world must be stopped, or mheap_.lock must be held.
func (c *gcControllerState) setMemoryLimit(in int64) int64 {
if !c.test {
assertWorldStoppedOrLockHeld(&mheap_.lock)
}
out := c.memoryLimit.Load()
if in >= 0 {
c.memoryLimit.Store(in)
}
return out
}
//go:linkname setMemoryLimit runtime/debug.setMemoryLimit
func setMemoryLimit(in int64) (out int64) {
// Run on the system stack since we grab the heap lock.
systemstack(func() {
lock(&mheap_.lock)
out = gcController.setMemoryLimit(in)
if in < 0 || out == in {
// If we're just checking the value or not changing
// it, there's no point in doing the rest.
unlock(&mheap_.lock)
return
}
gcControllerCommit()
unlock(&mheap_.lock)
})
return out
}
func readGOMEMLIMIT() int64 {
p := gogetenv("GOMEMLIMIT")
if p == "" || p == "off" {
return math.MaxInt64
}
n, ok := parseByteCount(p)
if !ok {
print("GOMEMLIMIT=", p, "\n")
throw("malformed GOMEMLIMIT; see `go doc runtime/debug.SetMemoryLimit`")
}
return n
}
// addIdleMarkWorker attempts to add a new idle mark worker.
//
// If this returns true, the caller must become an idle mark worker unless
// there's no background mark worker goroutines in the pool. This case is
// harmless because there are already background mark workers running.
// If this returns false, the caller must NOT become an idle mark worker.
//
// nosplit because it may be called without a P.
//
//go:nosplit
func (c *gcControllerState) addIdleMarkWorker() bool {
for {
old := c.idleMarkWorkers.Load()
n, max := int32(old&uint64(^uint32(0))), int32(old>>32)
if n >= max {
// See the comment on idleMarkWorkers for why
// n > max is tolerated.
return false
}
if n < 0 {
print("n=", n, " max=", max, "\n")
throw("negative idle mark workers")
}
new := uint64(uint32(n+1)) | (uint64(max) << 32)
if c.idleMarkWorkers.CompareAndSwap(old, new) {
return true
}
}
}
// needIdleMarkWorker is a hint as to whether another idle mark worker is needed.
//
// The caller must still call addIdleMarkWorker to become one. This is mainly
// useful for a quick check before an expensive operation.
//
// nosplit because it may be called without a P.
//
//go:nosplit
func (c *gcControllerState) needIdleMarkWorker() bool {
p := c.idleMarkWorkers.Load()
n, max := int32(p&uint64(^uint32(0))), int32(p>>32)
return n < max
}
// removeIdleMarkWorker must be called when a new idle mark worker stops executing.
func (c *gcControllerState) removeIdleMarkWorker() {
for {
old := c.idleMarkWorkers.Load()
n, max := int32(old&uint64(^uint32(0))), int32(old>>32)
if n-1 < 0 {
print("n=", n, " max=", max, "\n")
throw("negative idle mark workers")
}
new := uint64(uint32(n-1)) | (uint64(max) << 32)
if c.idleMarkWorkers.CompareAndSwap(old, new) {
return
}
}
}
// setMaxIdleMarkWorkers sets the maximum number of idle mark workers allowed.
//
// This method is optimistic in that it does not wait for the number of
// idle mark workers to reduce to max before returning; it assumes the workers
// will deschedule themselves.
func (c *gcControllerState) setMaxIdleMarkWorkers(max int32) {
for {
old := c.idleMarkWorkers.Load()
n := int32(old & uint64(^uint32(0)))
if n < 0 {
print("n=", n, " max=", max, "\n")
throw("negative idle mark workers")
}
new := uint64(uint32(n)) | (uint64(max) << 32)
if c.idleMarkWorkers.CompareAndSwap(old, new) {
return
}
}
}
// gcControllerCommit is gcController.commit, but passes arguments from live
// (non-test) data. It also updates any consumers of the GC pacing, such as
// sweep pacing and the background scavenger.
//
// Calls gcController.commit.
//
// The heap lock must be held, so this must be executed on the system stack.
//
//go:systemstack
func gcControllerCommit() {
assertWorldStoppedOrLockHeld(&mheap_.lock)
gcController.commit(isSweepDone())
// Update mark pacing.
if gcphase != _GCoff {
gcController.revise()
}
// TODO(mknyszek): This isn't really accurate any longer because the heap
// goal is computed dynamically. Still useful to snapshot, but not as useful.
trace := traceAcquire()
if trace.ok() {
trace.HeapGoal()
traceRelease(trace)
}
trigger, heapGoal := gcController.trigger()
gcPaceSweeper(trigger)
gcPaceScavenger(gcController.memoryLimit.Load(), heapGoal, gcController.lastHeapGoal)
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Scavenging free pages.
//
// This file implements scavenging (the release of physical pages backing mapped
// memory) of free and unused pages in the heap as a way to deal with page-level
// fragmentation and reduce the RSS of Go applications.
//
// Scavenging in Go happens on two fronts: there's the background
// (asynchronous) scavenger and the allocation-time (synchronous) scavenger.
//
// The former happens on a goroutine much like the background sweeper which is
// soft-capped at using scavengePercent of the mutator's time, based on
// order-of-magnitude estimates of the costs of scavenging. The latter happens
// when allocating pages from the heap.
//
// The scavenger's primary goal is to bring the estimated heap RSS of the
// application down to a goal.
//
// Before we consider what this looks like, we need to split the world into two
// halves. One in which a memory limit is not set, and one in which it is.
//
// For the former, the goal is defined as:
// (retainExtraPercent+100) / 100 * (heapGoal / lastHeapGoal) * lastHeapInUse
//
// Essentially, we wish to have the application's RSS track the heap goal, but
// the heap goal is defined in terms of bytes of objects, rather than pages like
// RSS. As a result, we need to take into account for fragmentation internal to
// spans. heapGoal / lastHeapGoal defines the ratio between the current heap goal
// and the last heap goal, which tells us by how much the heap is growing and
// shrinking. We estimate what the heap will grow to in terms of pages by taking
// this ratio and multiplying it by heapInUse at the end of the last GC, which
// allows us to account for this additional fragmentation. Note that this
// procedure makes the assumption that the degree of fragmentation won't change
// dramatically over the next GC cycle. Overestimating the amount of
// fragmentation simply results in higher memory use, which will be accounted
// for by the next pacing up date. Underestimating the fragmentation however
// could lead to performance degradation. Handling this case is not within the
// scope of the scavenger. Situations where the amount of fragmentation balloons
// over the course of a single GC cycle should be considered pathologies,
// flagged as bugs, and fixed appropriately.
//
// An additional factor of retainExtraPercent is added as a buffer to help ensure
// that there's more unscavenged memory to allocate out of, since each allocation
// out of scavenged memory incurs a potentially expensive page fault.
//
// If a memory limit is set, then we wish to pick a scavenge goal that maintains
// that memory limit. For that, we look at total memory that has been committed
// (memstats.mappedReady) and try to bring that down below the limit. In this case,
// we want to give buffer space in the *opposite* direction. When the application
// is close to the limit, we want to make sure we push harder to keep it under, so
// if we target below the memory limit, we ensure that the background scavenger is
// giving the situation the urgency it deserves.
//
// In this case, the goal is defined as:
// (100-reduceExtraPercent) / 100 * memoryLimit
//
// We compute both of these goals, and check whether either of them have been met.
// The background scavenger continues operating as long as either one of the goals
// has not been met.
//
// The goals are updated after each GC.
//
// Synchronous scavenging happens for one of two reasons: if an allocation would
// exceed the memory limit or whenever the heap grows in size, for some
// definition of heap-growth. The intuition behind this second reason is that the
// application had to grow the heap because existing fragments were not sufficiently
// large to satisfy a page-level memory allocation, so we scavenge those fragments
// eagerly to offset the growth in RSS that results.
//
// Lastly, not all pages are available for scavenging at all times and in all cases.
// The background scavenger and heap-growth scavenger only release memory in chunks
// that have not been densely-allocated for at least 1 full GC cycle. The reason
// behind this is likelihood of reuse: the Go heap is allocated in a first-fit order
// and by the end of the GC mark phase, the heap tends to be densely packed. Releasing
// memory in these densely packed chunks while they're being packed is counter-productive,
// and worse, it breaks up huge pages on systems that support them. The scavenger (invoked
// during memory allocation) further ensures that chunks it identifies as "dense" are
// immediately eligible for being backed by huge pages. Note that for the most part these
// density heuristics are best-effort heuristics. It's totally possible (but unlikely)
// that a chunk that just became dense is scavenged in the case of a race between memory
// allocation and scavenging.
//
// When synchronously scavenging for the memory limit or for debug.FreeOSMemory, these
// "dense" packing heuristics are ignored (in other words, scavenging is "forced") because
// in these scenarios returning memory to the OS is more important than keeping CPU
// overheads low.
package runtime
import (
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
const (
// The background scavenger is paced according to these parameters.
//
// scavengePercent represents the portion of mutator time we're willing
// to spend on scavenging in percent.
scavengePercent = 1 // 1%
// retainExtraPercent represents the amount of memory over the heap goal
// that the scavenger should keep as a buffer space for the allocator.
// This constant is used when we do not have a memory limit set.
//
// The purpose of maintaining this overhead is to have a greater pool of
// unscavenged memory available for allocation (since using scavenged memory
// incurs an additional cost), to account for heap fragmentation and
// the ever-changing layout of the heap.
retainExtraPercent = 10
// reduceExtraPercent represents the amount of memory under the limit
// that the scavenger should target. For example, 5 means we target 95%
// of the limit.
//
// The purpose of shooting lower than the limit is to ensure that, once
// close to the limit, the scavenger is working hard to maintain it. If
// we have a memory limit set but are far away from it, there's no harm
// in leaving up to 100-retainExtraPercent live, and it's more efficient
// anyway, for the same reasons that retainExtraPercent exists.
reduceExtraPercent = 5
// maxPagesPerPhysPage is the maximum number of supported runtime pages per
// physical page, based on maxPhysPageSize.
maxPagesPerPhysPage = maxPhysPageSize / pageSize
// scavengeCostRatio is the approximate ratio between the costs of using previously
// scavenged memory and scavenging memory.
//
// For most systems the cost of scavenging greatly outweighs the costs
// associated with using scavenged memory, making this constant 0. On other systems
// (especially ones where "sysUsed" is not just a no-op) this cost is non-trivial.
//
// This ratio is used as part of multiplicative factor to help the scavenger account
// for the additional costs of using scavenged memory in its pacing.
scavengeCostRatio = 0.7 * (goos.IsDarwin + goos.IsIos)
// scavChunkHiOcFrac indicates the fraction of pages that need to be allocated
// in the chunk in a single GC cycle for it to be considered high density.
scavChunkHiOccFrac = 0.96875
scavChunkHiOccPages = uint16(scavChunkHiOccFrac * pallocChunkPages)
)
// heapRetained returns an estimate of the current heap RSS.
func heapRetained() uint64 {
return gcController.heapInUse.load() + gcController.heapFree.load()
}
// gcPaceScavenger updates the scavenger's pacing, particularly
// its rate and RSS goal. For this, it requires the current heapGoal,
// and the heapGoal for the previous GC cycle.
//
// The RSS goal is based on the current heap goal with a small overhead
// to accommodate non-determinism in the allocator.
//
// The pacing is based on scavengePageRate, which applies to both regular and
// huge pages. See that constant for more information.
//
// Must be called whenever GC pacing is updated.
//
// mheap_.lock must be held or the world must be stopped.
func gcPaceScavenger(memoryLimit int64, heapGoal, lastHeapGoal uint64) {
assertWorldStoppedOrLockHeld(&mheap_.lock)
// As described at the top of this file, there are two scavenge goals here: one
// for gcPercent and one for memoryLimit. Let's handle the latter first because
// it's simpler.
// We want to target retaining (100-reduceExtraPercent)% of the heap.
memoryLimitGoal := uint64(float64(memoryLimit) * (1 - reduceExtraPercent/100.0))
// mappedReady is comparable to memoryLimit, and represents how much total memory
// the Go runtime has committed now (estimated).
mappedReady := gcController.mappedReady.Load()
// If we're below the goal already indicate that we don't need the background
// scavenger for the memory limit. This may seems worrisome at first, but note
// that the allocator will assist the background scavenger in the face of a memory
// limit, so we'll be safe even if we stop the scavenger when we shouldn't have.
if mappedReady <= memoryLimitGoal {
scavenge.memoryLimitGoal.Store(^uint64(0))
} else {
scavenge.memoryLimitGoal.Store(memoryLimitGoal)
}
// Now handle the gcPercent goal.
// If we're called before the first GC completed, disable scavenging.
// We never scavenge before the 2nd GC cycle anyway (we don't have enough
// information about the heap yet) so this is fine, and avoids a fault
// or garbage data later.
if lastHeapGoal == 0 {
scavenge.gcPercentGoal.Store(^uint64(0))
return
}
// Compute our scavenging goal.
goalRatio := float64(heapGoal) / float64(lastHeapGoal)
gcPercentGoal := uint64(float64(memstats.lastHeapInUse) * goalRatio)
// Add retainExtraPercent overhead to retainedGoal. This calculation
// looks strange but the purpose is to arrive at an integer division
// (e.g. if retainExtraPercent = 12.5, then we get a divisor of 8)
// that also avoids the overflow from a multiplication.
gcPercentGoal += gcPercentGoal / (1.0 / (retainExtraPercent / 100.0))
// Align it to a physical page boundary to make the following calculations
// a bit more exact.
gcPercentGoal = (gcPercentGoal + uint64(physPageSize) - 1) &^ (uint64(physPageSize) - 1)
// Represents where we are now in the heap's contribution to RSS in bytes.
//
// Guaranteed to always be a multiple of physPageSize on systems where
// physPageSize <= pageSize since we map new heap memory at a size larger than
// any physPageSize and released memory in multiples of the physPageSize.
//
// However, certain functions recategorize heap memory as other stats (e.g.
// stacks) and this happens in multiples of pageSize, so on systems
// where physPageSize > pageSize the calculations below will not be exact.
// Generally this is OK since we'll be off by at most one regular
// physical page.
heapRetainedNow := heapRetained()
// If we're already below our goal, or within one page of our goal, then indicate
// that we don't need the background scavenger for maintaining a memory overhead
// proportional to the heap goal.
if heapRetainedNow <= gcPercentGoal || heapRetainedNow-gcPercentGoal < uint64(physPageSize) {
scavenge.gcPercentGoal.Store(^uint64(0))
} else {
scavenge.gcPercentGoal.Store(gcPercentGoal)
}
}
var scavenge struct {
// gcPercentGoal is the amount of retained heap memory (measured by
// heapRetained) that the runtime will try to maintain by returning
// memory to the OS. This goal is derived from gcController.gcPercent
// by choosing to retain enough memory to allocate heap memory up to
// the heap goal.
gcPercentGoal atomic.Uint64
// memoryLimitGoal is the amount of memory retained by the runtime (
// measured by gcController.mappedReady) that the runtime will try to
// maintain by returning memory to the OS. This goal is derived from
// gcController.memoryLimit by choosing to target the memory limit or
// some lower target to keep the scavenger working.
memoryLimitGoal atomic.Uint64
// assistTime is the time spent by the allocator scavenging in the last GC cycle.
//
// This is reset once a GC cycle ends.
assistTime atomic.Int64
// backgroundTime is the time spent by the background scavenger in the last GC cycle.
//
// This is reset once a GC cycle ends.
backgroundTime atomic.Int64
}
const (
// It doesn't really matter what value we start at, but we can't be zero, because
// that'll cause divide-by-zero issues. Pick something conservative which we'll
// also use as a fallback.
startingScavSleepRatio = 0.001
// Spend at least 1 ms scavenging, otherwise the corresponding
// sleep time to maintain our desired utilization is too low to
// be reliable.
minScavWorkTime = 1e6
)
// Sleep/wait state of the background scavenger.
var scavenger scavengerState
type scavengerState struct {
// lock protects all fields below.
lock mutex
// g is the goroutine the scavenger is bound to.
g *g
// timer is the timer used for the scavenger to sleep.
timer *timer
// sysmonWake signals to sysmon that it should wake the scavenger.
sysmonWake atomic.Uint32
// parked is whether or not the scavenger is parked.
parked bool
// printControllerReset instructs printScavTrace to signal that
// the controller was reset.
printControllerReset bool
// targetCPUFraction is the target CPU overhead for the scavenger.
targetCPUFraction float64
// sleepRatio is the ratio of time spent doing scavenging work to
// time spent sleeping. This is used to decide how long the scavenger
// should sleep for in between batches of work. It is set by
// critSleepController in order to maintain a CPU overhead of
// targetCPUFraction.
//
// Lower means more sleep, higher means more aggressive scavenging.
sleepRatio float64
// sleepController controls sleepRatio.
//
// See sleepRatio for more details.
sleepController piController
// controllerCooldown is the time left in nanoseconds during which we avoid
// using the controller and we hold sleepRatio at a conservative
// value. Used if the controller's assumptions fail to hold.
controllerCooldown int64
// sleepStub is a stub used for testing to avoid actually having
// the scavenger sleep.
//
// Unlike the other stubs, this is not populated if left nil
// Instead, it is called when non-nil because any valid implementation
// of this function basically requires closing over this scavenger
// state, and allocating a closure is not allowed in the runtime as
// a matter of policy.
sleepStub func(n int64) int64
// scavenge is a function that scavenges n bytes of memory.
// Returns how many bytes of memory it actually scavenged, as
// well as the time it took in nanoseconds. Usually mheap.pages.scavenge
// with nanotime called around it, but stubbed out for testing.
// Like mheap.pages.scavenge, if it scavenges less than n bytes of
// memory, the caller may assume the heap is exhausted of scavengable
// memory for now.
//
// If this is nil, it is populated with the real thing in init.
scavenge func(n uintptr) (uintptr, int64)
// shouldStop is a callback called in the work loop and provides a
// point that can force the scavenger to stop early, for example because
// the scavenge policy dictates too much has been scavenged already.
//
// If this is nil, it is populated with the real thing in init.
shouldStop func() bool
// gomaxprocs returns the current value of gomaxprocs. Stub for testing.
//
// If this is nil, it is populated with the real thing in init.
gomaxprocs func() int32
}
// init initializes a scavenger state and wires to the current G.
//
// Must be called from a regular goroutine that can allocate.
func (s *scavengerState) init() {
if s.g != nil {
throw("scavenger state is already wired")
}
lockInit(&s.lock, lockRankScavenge)
s.g = getg()
s.timer = new(timer)
f := func(s any, _ uintptr, _ int64) {
s.(*scavengerState).wake()
}
s.timer.init(f, s)
// input: fraction of CPU time actually used.
// setpoint: ideal CPU fraction.
// output: ratio of time worked to time slept (determines sleep time).
//
// The output of this controller is somewhat indirect to what we actually
// want to achieve: how much time to sleep for. The reason for this definition
// is to ensure that the controller's outputs have a direct relationship with
// its inputs (as opposed to an inverse relationship), making it somewhat
// easier to reason about for tuning purposes.
s.sleepController = piController{
// Tuned loosely via Ziegler-Nichols process.
kp: 0.3375,
ti: 3.2e6,
tt: 1e9, // 1 second reset time.
// These ranges seem wide, but we want to give the controller plenty of
// room to hunt for the optimal value.
min: 0.001, // 1:1000
max: 1000.0, // 1000:1
}
s.sleepRatio = startingScavSleepRatio
// Install real functions if stubs aren't present.
if s.scavenge == nil {
s.scavenge = func(n uintptr) (uintptr, int64) {
start := nanotime()
r := mheap_.pages.scavenge(n, nil, false)
end := nanotime()
if start >= end {
return r, 0
}
scavenge.backgroundTime.Add(end - start)
return r, end - start
}
}
if s.shouldStop == nil {
s.shouldStop = func() bool {
// If background scavenging is disabled or if there's no work to do just stop.
return heapRetained() <= scavenge.gcPercentGoal.Load() &&
gcController.mappedReady.Load() <= scavenge.memoryLimitGoal.Load()
}
}
if s.gomaxprocs == nil {
s.gomaxprocs = func() int32 {
return gomaxprocs
}
}
}
// park parks the scavenger goroutine.
func (s *scavengerState) park() {
lock(&s.lock)
if getg() != s.g {
throw("tried to park scavenger from another goroutine")
}
s.parked = true
goparkunlock(&s.lock, waitReasonGCScavengeWait, traceBlockSystemGoroutine, 2)
}
// ready signals to sysmon that the scavenger should be awoken.
func (s *scavengerState) ready() {
s.sysmonWake.Store(1)
}
// wake immediately unparks the scavenger if necessary.
//
// Safe to run without a P.
func (s *scavengerState) wake() {
lock(&s.lock)
if s.parked {
// Unset sysmonWake, since the scavenger is now being awoken.
s.sysmonWake.Store(0)
// s.parked is unset to prevent a double wake-up.
s.parked = false
// Ready the goroutine by injecting it. We use injectglist instead
// of ready or goready in order to allow us to run this function
// without a P. injectglist also avoids placing the goroutine in
// the current P's runnext slot, which is desirable to prevent
// the scavenger from interfering with user goroutine scheduling
// too much.
var list gList
list.push(s.g)
injectglist(&list)
}
unlock(&s.lock)
}
// sleep puts the scavenger to sleep based on the amount of time that it worked
// in nanoseconds.
//
// Note that this function should only be called by the scavenger.
//
// The scavenger may be woken up earlier by a pacing change, and it may not go
// to sleep at all if there's a pending pacing change.
func (s *scavengerState) sleep(worked float64) {
lock(&s.lock)
if getg() != s.g {
throw("tried to sleep scavenger from another goroutine")
}
if worked < minScavWorkTime {
// This means there wasn't enough work to actually fill up minScavWorkTime.
// That's fine; we shouldn't try to do anything with this information
// because it's going result in a short enough sleep request that things
// will get messy. Just assume we did at least this much work.
// All this means is that we'll sleep longer than we otherwise would have.
worked = minScavWorkTime
}
// Multiply the critical time by 1 + the ratio of the costs of using
// scavenged memory vs. scavenging memory. This forces us to pay down
// the cost of reusing this memory eagerly by sleeping for a longer period
// of time and scavenging less frequently. More concretely, we avoid situations
// where we end up scavenging so often that we hurt allocation performance
// because of the additional overheads of using scavenged memory.
worked *= 1 + scavengeCostRatio
// sleepTime is the amount of time we're going to sleep, based on the amount
// of time we worked, and the sleepRatio.
sleepTime := int64(worked / s.sleepRatio)
var slept int64
if s.sleepStub == nil {
// Set the timer.
//
// This must happen here instead of inside gopark
// because we can't close over any variables without
// failing escape analysis.
start := nanotime()
s.timer.reset(start+sleepTime, 0)
// Mark ourselves as asleep and go to sleep.
s.parked = true
goparkunlock(&s.lock, waitReasonSleep, traceBlockSleep, 2)
// How long we actually slept for.
slept = nanotime() - start
lock(&s.lock)
// Stop the timer here because s.wake is unable to do it for us.
// We don't really care if we succeed in stopping the timer. One
// reason we might fail is that we've already woken up, but the timer
// might be in the process of firing on some other P; essentially we're
// racing with it. That's totally OK. Double wake-ups are perfectly safe.
s.timer.stop()
unlock(&s.lock)
} else {
unlock(&s.lock)
slept = s.sleepStub(sleepTime)
}
// Stop here if we're cooling down from the controller.
if s.controllerCooldown > 0 {
// worked and slept aren't exact measures of time, but it's OK to be a bit
// sloppy here. We're just hoping we're avoiding some transient bad behavior.
t := slept + int64(worked)
if t > s.controllerCooldown {
s.controllerCooldown = 0
} else {
s.controllerCooldown -= t
}
return
}
// idealFraction is the ideal % of overall application CPU time that we
// spend scavenging.
idealFraction := float64(scavengePercent) / 100.0
// Calculate the CPU time spent.
//
// This may be slightly inaccurate with respect to GOMAXPROCS, but we're
// recomputing this often enough relative to GOMAXPROCS changes in general
// (it only changes when the world is stopped, and not during a GC) that
// that small inaccuracy is in the noise.
cpuFraction := worked / ((float64(slept) + worked) * float64(s.gomaxprocs()))
// Update the critSleepRatio, adjusting until we reach our ideal fraction.
var ok bool
s.sleepRatio, ok = s.sleepController.next(cpuFraction, idealFraction, float64(slept)+worked)
if !ok {
// The core assumption of the controller, that we can get a proportional
// response, broke down. This may be transient, so temporarily switch to
// sleeping a fixed, conservative amount.
s.sleepRatio = startingScavSleepRatio
s.controllerCooldown = 5e9 // 5 seconds.
// Signal the scav trace printer to output this.
s.controllerFailed()
}
}
// controllerFailed indicates that the scavenger's scheduling
// controller failed.
func (s *scavengerState) controllerFailed() {
lock(&s.lock)
s.printControllerReset = true
unlock(&s.lock)
}
// run is the body of the main scavenging loop.
//
// Returns the number of bytes released and the estimated time spent
// releasing those bytes.
//
// Must be run on the scavenger goroutine.
func (s *scavengerState) run() (released uintptr, worked float64) {
lock(&s.lock)
if getg() != s.g {
throw("tried to run scavenger from another goroutine")
}
unlock(&s.lock)
for worked < minScavWorkTime {
// If something from outside tells us to stop early, stop.
if s.shouldStop() {
break
}
// scavengeQuantum is the amount of memory we try to scavenge
// in one go. A smaller value means the scavenger is more responsive
// to the scheduler in case of e.g. preemption. A larger value means
// that the overheads of scavenging are better amortized, so better
// scavenging throughput.
//
// The current value is chosen assuming a cost of ~10µs/physical page
// (this is somewhat pessimistic), which implies a worst-case latency of
// about 160µs for 4 KiB physical pages. The current value is biased
// toward latency over throughput.
const scavengeQuantum = 64 << 10
// Accumulate the amount of time spent scavenging.
r, duration := s.scavenge(scavengeQuantum)
// On some platforms we may see end >= start if the time it takes to scavenge
// memory is less than the minimum granularity of its clock (e.g. Windows) or
// due to clock bugs.
//
// In this case, just assume scavenging takes 10 µs per regular physical page
// (determined empirically), and conservatively ignore the impact of huge pages
// on timing.
const approxWorkedNSPerPhysicalPage = 10e3
if duration == 0 {
worked += approxWorkedNSPerPhysicalPage * float64(r/physPageSize)
} else {
// TODO(mknyszek): If duration is small compared to worked, it could be
// rounded down to zero. Probably not a problem in practice because the
// values are all within a few orders of magnitude of each other but maybe
// worth worrying about.
worked += float64(duration)
}
released += r
// scavenge does not return until it either finds the requisite amount of
// memory to scavenge, or exhausts the heap. If we haven't found enough
// to scavenge, then the heap must be exhausted.
if r < scavengeQuantum {
break
}
// When using fake time just do one loop.
if faketime != 0 {
break
}
}
if released > 0 && released < physPageSize {
// If this happens, it means that we may have attempted to release part
// of a physical page, but the likely effect of that is that it released
// the whole physical page, some of which may have still been in-use.
// This could lead to memory corruption. Throw.
throw("released less than one physical page of memory")
}
return
}
// Background scavenger.
//
// The background scavenger maintains the RSS of the application below
// the line described by the proportional scavenging statistics in
// the mheap struct.
func bgscavenge(c chan int) {
scavenger.init()
c <- 1
scavenger.park()
for {
released, workTime := scavenger.run()
if released == 0 {
scavenger.park()
continue
}
mheap_.pages.scav.releasedBg.Add(released)
scavenger.sleep(workTime)
}
}
// scavenge scavenges nbytes worth of free pages, starting with the
// highest address first. Successive calls continue from where it left
// off until the heap is exhausted. force makes all memory available to
// scavenge, ignoring huge page heuristics.
//
// Returns the amount of memory scavenged in bytes.
//
// scavenge always tries to scavenge nbytes worth of memory, and will
// only fail to do so if the heap is exhausted for now.
func (p *pageAlloc) scavenge(nbytes uintptr, shouldStop func() bool, force bool) uintptr {
released := uintptr(0)
for released < nbytes {
ci, pageIdx := p.scav.index.find(force)
if ci == 0 {
break
}
systemstack(func() {
released += p.scavengeOne(ci, pageIdx, nbytes-released)
})
if shouldStop != nil && shouldStop() {
break
}
}
return released
}
// printScavTrace prints a scavenge trace line to standard error.
//
// released should be the amount of memory released since the last time this
// was called, and forced indicates whether the scavenge was forced by the
// application.
//
// scavenger.lock must be held.
func printScavTrace(releasedBg, releasedEager uintptr, forced bool) {
assertLockHeld(&scavenger.lock)
printlock()
print("scav ",
releasedBg>>10, " KiB work (bg), ",
releasedEager>>10, " KiB work (eager), ",
gcController.heapReleased.load()>>10, " KiB now, ",
(gcController.heapInUse.load()*100)/heapRetained(), "% util",
)
if forced {
print(" (forced)")
} else if scavenger.printControllerReset {
print(" [controller reset]")
scavenger.printControllerReset = false
}
println()
printunlock()
}
// scavengeOne walks over the chunk at chunk index ci and searches for
// a contiguous run of pages to scavenge. It will try to scavenge
// at most max bytes at once, but may scavenge more to avoid
// breaking huge pages. Once it scavenges some memory it returns
// how much it scavenged in bytes.
//
// searchIdx is the page index to start searching from in ci.
//
// Returns the number of bytes scavenged.
//
// Must run on the systemstack because it acquires p.mheapLock.
//
//go:systemstack
func (p *pageAlloc) scavengeOne(ci chunkIdx, searchIdx uint, max uintptr) uintptr {
// Calculate the maximum number of pages to scavenge.
//
// This should be alignUp(max, pageSize) / pageSize but max can and will
// be ^uintptr(0), so we need to be very careful not to overflow here.
// Rather than use alignUp, calculate the number of pages rounded down
// first, then add back one if necessary.
maxPages := max / pageSize
if max%pageSize != 0 {
maxPages++
}
// Calculate the minimum number of pages we can scavenge.
//
// Because we can only scavenge whole physical pages, we must
// ensure that we scavenge at least minPages each time, aligned
// to minPages*pageSize.
minPages := physPageSize / pageSize
if minPages < 1 {
minPages = 1
}
lock(p.mheapLock)
if p.summary[len(p.summary)-1][ci].max() >= uint(minPages) {
// We only bother looking for a candidate if there at least
// minPages free pages at all.
base, npages := p.chunkOf(ci).findScavengeCandidate(searchIdx, minPages, maxPages)
// If we found something, scavenge it and return!
if npages != 0 {
// Compute the full address for the start of the range.
addr := chunkBase(ci) + uintptr(base)*pageSize
// Mark the range we're about to scavenge as allocated, because
// we don't want any allocating goroutines to grab it while
// the scavenging is in progress. Be careful here -- just do the
// bare minimum to avoid stepping on our own scavenging stats.
p.chunkOf(ci).allocRange(base, npages)
p.update(addr, uintptr(npages), true, true)
// With that done, it's safe to unlock.
unlock(p.mheapLock)
if !p.test {
// Only perform sys* operations if we're not in a test.
// It's dangerous to do so otherwise.
sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
// Update global accounting only when not in test, otherwise
// the runtime's accounting will be wrong.
nbytes := int64(npages * pageSize)
gcController.heapReleased.add(nbytes)
gcController.heapFree.add(-nbytes)
stats := memstats.heapStats.acquire()
atomic.Xaddint64(&stats.committed, -nbytes)
atomic.Xaddint64(&stats.released, nbytes)
memstats.heapStats.release()
}
// Relock the heap, because now we need to make these pages
// available allocation. Free them back to the page allocator.
lock(p.mheapLock)
if b := (offAddr{addr}); b.lessThan(p.searchAddr) {
p.searchAddr = b
}
p.chunkOf(ci).free(base, npages)
p.update(addr, uintptr(npages), true, false)
// Mark the range as scavenged.
p.chunkOf(ci).scavenged.setRange(base, npages)
unlock(p.mheapLock)
return uintptr(npages) * pageSize
}
}
// Mark this chunk as having no free pages.
p.scav.index.setEmpty(ci)
unlock(p.mheapLock)
return 0
}
// fillAligned returns x but with all zeroes in m-aligned
// groups of m bits set to 1 if any bit in the group is non-zero.
//
// For example, fillAligned(0x0100a3, 8) == 0xff00ff.
//
// Note that if m == 1, this is a no-op.
//
// m must be a power of 2 <= maxPagesPerPhysPage.
func fillAligned(x uint64, m uint) uint64 {
apply := func(x uint64, c uint64) uint64 {
// The technique used it here is derived from
// https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
// and extended for more than just bytes (like nibbles
// and uint16s) by using an appropriate constant.
//
// To summarize the technique, quoting from that page:
// "[It] works by first zeroing the high bits of the [8]
// bytes in the word. Subsequently, it adds a number that
// will result in an overflow to the high bit of a byte if
// any of the low bits were initially set. Next the high
// bits of the original word are ORed with these values;
// thus, the high bit of a byte is set iff any bit in the
// byte was set. Finally, we determine if any of these high
// bits are zero by ORing with ones everywhere except the
// high bits and inverting the result."
return ^((((x & c) + c) | x) | c)
}
// Transform x to contain a 1 bit at the top of each m-aligned
// group of m zero bits.
switch m {
case 1:
return x
case 2:
x = apply(x, 0x5555555555555555)
case 4:
x = apply(x, 0x7777777777777777)
case 8:
x = apply(x, 0x7f7f7f7f7f7f7f7f)
case 16:
x = apply(x, 0x7fff7fff7fff7fff)
case 32:
x = apply(x, 0x7fffffff7fffffff)
case 64: // == maxPagesPerPhysPage
x = apply(x, 0x7fffffffffffffff)
default:
throw("bad m value")
}
// Now, the top bit of each m-aligned group in x is set
// that group was all zero in the original x.
// From each group of m bits subtract 1.
// Because we know only the top bits of each
// m-aligned group are set, we know this will
// set each group to have all the bits set except
// the top bit, so just OR with the original
// result to set all the bits.
return ^((x - (x >> (m - 1))) | x)
}
// findScavengeCandidate returns a start index and a size for this pallocData
// segment which represents a contiguous region of free and unscavenged memory.
//
// searchIdx indicates the page index within this chunk to start the search, but
// note that findScavengeCandidate searches backwards through the pallocData. As
// a result, it will return the highest scavenge candidate in address order.
//
// min indicates a hard minimum size and alignment for runs of pages. That is,
// findScavengeCandidate will not return a region smaller than min pages in size,
// or that is min pages or greater in size but not aligned to min. min must be
// a non-zero power of 2 <= maxPagesPerPhysPage.
//
// max is a hint for how big of a region is desired. If max >= pallocChunkPages, then
// findScavengeCandidate effectively returns entire free and unscavenged regions.
// If max < pallocChunkPages, it may truncate the returned region such that size is
// max. However, findScavengeCandidate may still return a larger region if, for
// example, it chooses to preserve huge pages, or if max is not aligned to min (it
// will round up). That is, even if max is small, the returned size is not guaranteed
// to be equal to max. max is allowed to be less than min, in which case it is as if
// max == min.
func (m *pallocData) findScavengeCandidate(searchIdx uint, minimum, max uintptr) (uint, uint) {
if minimum&(minimum-1) != 0 || minimum == 0 {
print("runtime: min = ", minimum, "\n")
throw("min must be a non-zero power of 2")
} else if minimum > maxPagesPerPhysPage {
print("runtime: min = ", minimum, "\n")
throw("min too large")
}
// max may not be min-aligned, so we might accidentally truncate to
// a max value which causes us to return a non-min-aligned value.
// To prevent this, align max up to a multiple of min (which is always
// a power of 2). This also prevents max from ever being less than
// min, unless it's zero, so handle that explicitly.
if max == 0 {
max = minimum
} else {
max = alignUp(max, minimum)
}
i := int(searchIdx / 64)
// Start by quickly skipping over blocks of non-free or scavenged pages.
for ; i >= 0; i-- {
// 1s are scavenged OR non-free => 0s are unscavenged AND free
x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(minimum))
if x != ^uint64(0) {
break
}
}
if i < 0 {
// Failed to find any free/unscavenged pages.
return 0, 0
}
// We have something in the 64-bit chunk at i, but it could
// extend further. Loop until we find the extent of it.
// 1s are scavenged OR non-free => 0s are unscavenged AND free
x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(minimum))
z1 := uint(sys.LeadingZeros64(^x))
run, end := uint(0), uint(i)*64+(64-z1)
if x<<z1 != 0 {
// After shifting out z1 bits, we still have 1s,
// so the run ends inside this word.
run = uint(sys.LeadingZeros64(x << z1))
} else {
// After shifting out z1 bits, we have no more 1s.
// This means the run extends to the bottom of the
// word so it may extend into further words.
run = 64 - z1
for j := i - 1; j >= 0; j-- {
x := fillAligned(m.scavenged[j]|m.pallocBits[j], uint(minimum))
run += uint(sys.LeadingZeros64(x))
if x != 0 {
// The run stopped in this word.
break
}
}
}
// Split the run we found if it's larger than max but hold on to
// our original length, since we may need it later.
size := min(run, uint(max))
start := end - size
// Each huge page is guaranteed to fit in a single palloc chunk.
//
// TODO(mknyszek): Support larger huge page sizes.
// TODO(mknyszek): Consider taking pages-per-huge-page as a parameter
// so we can write tests for this.
if physHugePageSize > pageSize && physHugePageSize > physPageSize {
// We have huge pages, so let's ensure we don't break one by scavenging
// over a huge page boundary. If the range [start, start+size) overlaps with
// a free-and-unscavenged huge page, we want to grow the region we scavenge
// to include that huge page.
// Compute the huge page boundary above our candidate.
pagesPerHugePage := physHugePageSize / pageSize
hugePageAbove := uint(alignUp(uintptr(start), pagesPerHugePage))
// If that boundary is within our current candidate, then we may be breaking
// a huge page.
if hugePageAbove <= end {
// Compute the huge page boundary below our candidate.
hugePageBelow := uint(alignDown(uintptr(start), pagesPerHugePage))
if hugePageBelow >= end-run {
// We're in danger of breaking apart a huge page since start+size crosses
// a huge page boundary and rounding down start to the nearest huge
// page boundary is included in the full run we found. Include the entire
// huge page in the bound by rounding down to the huge page size.
size = size + (start - hugePageBelow)
start = hugePageBelow
}
}
}
return start, size
}
// scavengeIndex is a structure for efficiently managing which pageAlloc chunks have
// memory available to scavenge.
type scavengeIndex struct {
// chunks is a scavChunkData-per-chunk structure that indicates the presence of pages
// available for scavenging. Updates to the index are serialized by the pageAlloc lock.
//
// It tracks chunk occupancy and a generation counter per chunk. If a chunk's occupancy
// never exceeds pallocChunkDensePages over the course of a single GC cycle, the chunk
// becomes eligible for scavenging on the next cycle. If a chunk ever hits this density
// threshold it immediately becomes unavailable for scavenging in the current cycle as
// well as the next.
//
// [min, max) represents the range of chunks that is safe to access (i.e. will not cause
// a fault). As an optimization minHeapIdx represents the true minimum chunk that has been
// mapped, since min is likely rounded down to include the system page containing minHeapIdx.
//
// For a chunk size of 4 MiB this structure will only use 2 MiB for a 1 TiB contiguous heap.
chunks []atomicScavChunkData
min, max atomic.Uintptr
minHeapIdx atomic.Uintptr
// searchAddr* is the maximum address (in the offset address space, so we have a linear
// view of the address space; see mranges.go:offAddr) containing memory available to
// scavenge. It is a hint to the find operation to avoid O(n^2) behavior in repeated lookups.
//
// searchAddr* is always inclusive and should be the base address of the highest runtime
// page available for scavenging.
//
// searchAddrForce is managed by find and free.
// searchAddrBg is managed by find and nextGen.
//
// Normally, find monotonically decreases searchAddr* as it finds no more free pages to
// scavenge. However, mark, when marking a new chunk at an index greater than the current
// searchAddr, sets searchAddr to the *negative* index into chunks of that page. The trick here
// is that concurrent calls to find will fail to monotonically decrease searchAddr*, and so they
// won't barge over new memory becoming available to scavenge. Furthermore, this ensures
// that some future caller of find *must* observe the new high index. That caller
// (or any other racing with it), then makes searchAddr positive before continuing, bringing
// us back to our monotonically decreasing steady-state.
//
// A pageAlloc lock serializes updates between min, max, and searchAddr, so abs(searchAddr)
// is always guaranteed to be >= min and < max (converted to heap addresses).
//
// searchAddrBg is increased only on each new generation and is mainly used by the
// background scavenger and heap-growth scavenging. searchAddrForce is increased continuously
// as memory gets freed and is mainly used by eager memory reclaim such as debug.FreeOSMemory
// and scavenging to maintain the memory limit.
searchAddrBg atomicOffAddr
searchAddrForce atomicOffAddr
// freeHWM is the highest address (in offset address space) that was freed
// this generation.
freeHWM offAddr
// Generation counter. Updated by nextGen at the end of each mark phase.
gen uint32
// test indicates whether or not we're in a test.
test bool
}
// init initializes the scavengeIndex.
//
// Returns the amount added to sysStat.
func (s *scavengeIndex) init(test bool, sysStat *sysMemStat) uintptr {
s.searchAddrBg.Clear()
s.searchAddrForce.Clear()
s.freeHWM = minOffAddr
s.test = test
return s.sysInit(test, sysStat)
}
// sysGrow updates the index's backing store in response to a heap growth.
//
// Returns the amount of memory added to sysStat.
func (s *scavengeIndex) grow(base, limit uintptr, sysStat *sysMemStat) uintptr {
// Update minHeapIdx. Note that even if there's no mapping work to do,
// we may still have a new, lower minimum heap address.
minHeapIdx := s.minHeapIdx.Load()
if baseIdx := uintptr(chunkIndex(base)); minHeapIdx == 0 || baseIdx < minHeapIdx {
s.minHeapIdx.Store(baseIdx)
}
return s.sysGrow(base, limit, sysStat)
}
// find returns the highest chunk index that may contain pages available to scavenge.
// It also returns an offset to start searching in the highest chunk.
func (s *scavengeIndex) find(force bool) (chunkIdx, uint) {
cursor := &s.searchAddrBg
if force {
cursor = &s.searchAddrForce
}
searchAddr, marked := cursor.Load()
if searchAddr == minOffAddr.addr() {
// We got a cleared search addr.
return 0, 0
}
// Starting from searchAddr's chunk, iterate until we find a chunk with pages to scavenge.
gen := s.gen
min := chunkIdx(s.minHeapIdx.Load())
start := chunkIndex(searchAddr)
// N.B. We'll never map the 0'th chunk, so minHeapIdx ensures this loop overflow.
for i := start; i >= min; i-- {
// Skip over chunks.
if !s.chunks[i].load().shouldScavenge(gen, force) {
continue
}
// We're still scavenging this chunk.
if i == start {
return i, chunkPageIndex(searchAddr)
}
// Try to reduce searchAddr to newSearchAddr.
newSearchAddr := chunkBase(i) + pallocChunkBytes - pageSize
if marked {
// Attempt to be the first one to decrease the searchAddr
// after an increase. If we fail, that means there was another
// increase, or somebody else got to it before us. Either way,
// it doesn't matter. We may lose some performance having an
// incorrect search address, but it's far more important that
// we don't miss updates.
cursor.StoreUnmark(searchAddr, newSearchAddr)
} else {
// Decrease searchAddr.
cursor.StoreMin(newSearchAddr)
}
return i, pallocChunkPages - 1
}
// Clear searchAddr, because we've exhausted the heap.
cursor.Clear()
return 0, 0
}
// alloc updates metadata for chunk at index ci with the fact that
// an allocation of npages occurred. It also eagerly attempts to collapse
// the chunk's memory into hugepage if the chunk has become sufficiently
// dense and we're not allocating the whole chunk at once (which suggests
// the allocation is part of a bigger one and it's probably not worth
// eagerly collapsing).
//
// alloc may only run concurrently with find.
func (s *scavengeIndex) alloc(ci chunkIdx, npages uint) {
sc := s.chunks[ci].load()
sc.alloc(npages, s.gen)
// TODO(mknyszek): Consider eagerly backing memory with huge pages
// here and track whether we believe this chunk is backed by huge pages.
// In the past we've attempted to use sysHugePageCollapse (which uses
// MADV_COLLAPSE on Linux, and is unsupported elswhere) for this purpose,
// but that caused performance issues in production environments.
s.chunks[ci].store(sc)
}
// free updates metadata for chunk at index ci with the fact that
// a free of npages occurred.
//
// free may only run concurrently with find.
func (s *scavengeIndex) free(ci chunkIdx, page, npages uint) {
sc := s.chunks[ci].load()
sc.free(npages, s.gen)
s.chunks[ci].store(sc)
// Update scavenge search addresses.
addr := chunkBase(ci) + uintptr(page+npages-1)*pageSize
if s.freeHWM.lessThan(offAddr{addr}) {
s.freeHWM = offAddr{addr}
}
// N.B. Because free is serialized, it's not necessary to do a
// full CAS here. free only ever increases searchAddr, while
// find only ever decreases it. Since we only ever race with
// decreases, even if the value we loaded is stale, the actual
// value will never be larger.
searchAddr, _ := s.searchAddrForce.Load()
if (offAddr{searchAddr}).lessThan(offAddr{addr}) {
s.searchAddrForce.StoreMarked(addr)
}
}
// nextGen moves the scavenger forward one generation. Must be called
// once per GC cycle, but may be called more often to force more memory
// to be released.
//
// nextGen may only run concurrently with find.
func (s *scavengeIndex) nextGen() {
s.gen++
searchAddr, _ := s.searchAddrBg.Load()
if (offAddr{searchAddr}).lessThan(s.freeHWM) {
s.searchAddrBg.StoreMarked(s.freeHWM.addr())
}
s.freeHWM = minOffAddr
}
// setEmpty marks that the scavenger has finished looking at ci
// for now to prevent the scavenger from getting stuck looking
// at the same chunk.
//
// setEmpty may only run concurrently with find.
func (s *scavengeIndex) setEmpty(ci chunkIdx) {
val := s.chunks[ci].load()
val.setEmpty()
s.chunks[ci].store(val)
}
// atomicScavChunkData is an atomic wrapper around a scavChunkData
// that stores it in its packed form.
type atomicScavChunkData struct {
value atomic.Uint64
}
// load loads and unpacks a scavChunkData.
func (sc *atomicScavChunkData) load() scavChunkData {
return unpackScavChunkData(sc.value.Load())
}
// store packs and writes a new scavChunkData. store must be serialized
// with other calls to store.
func (sc *atomicScavChunkData) store(ssc scavChunkData) {
sc.value.Store(ssc.pack())
}
// scavChunkData tracks information about a palloc chunk for
// scavenging. It packs well into 64 bits.
//
// The zero value always represents a valid newly-grown chunk.
type scavChunkData struct {
// inUse indicates how many pages in this chunk are currently
// allocated.
//
// Only the first 10 bits are used.
inUse uint16
// lastInUse indicates how many pages in this chunk were allocated
// when we transitioned from gen-1 to gen.
//
// Only the first 10 bits are used.
lastInUse uint16
// gen is the generation counter from a scavengeIndex from the
// last time this scavChunkData was updated.
gen uint32
// scavChunkFlags represents additional flags
//
// Note: only 6 bits are available.
scavChunkFlags
}
// unpackScavChunkData unpacks a scavChunkData from a uint64.
func unpackScavChunkData(sc uint64) scavChunkData {
return scavChunkData{
inUse: uint16(sc),
lastInUse: uint16(sc>>16) & scavChunkInUseMask,
gen: uint32(sc >> 32),
scavChunkFlags: scavChunkFlags(uint8(sc>>(16+logScavChunkInUseMax)) & scavChunkFlagsMask),
}
}
// pack returns sc packed into a uint64.
func (sc scavChunkData) pack() uint64 {
return uint64(sc.inUse) |
(uint64(sc.lastInUse) << 16) |
(uint64(sc.scavChunkFlags) << (16 + logScavChunkInUseMax)) |
(uint64(sc.gen) << 32)
}
const (
// scavChunkHasFree indicates whether the chunk has anything left to
// scavenge. This is the opposite of "empty," used elsewhere in this
// file. The reason we say "HasFree" here is so the zero value is
// correct for a newly-grown chunk. (New memory is scavenged.)
scavChunkHasFree scavChunkFlags = 1 << iota
// scavChunkMaxFlags is the maximum number of flags we can have, given how
// a scavChunkData is packed into 8 bytes.
scavChunkMaxFlags = 6
scavChunkFlagsMask = (1 << scavChunkMaxFlags) - 1
// logScavChunkInUseMax is the number of bits needed to represent the number
// of pages allocated in a single chunk. This is 1 more than log2 of the
// number of pages in the chunk because we need to represent a fully-allocated
// chunk.
logScavChunkInUseMax = logPallocChunkPages + 1
scavChunkInUseMask = (1 << logScavChunkInUseMax) - 1
)
// scavChunkFlags is a set of bit-flags for the scavenger for each palloc chunk.
type scavChunkFlags uint8
// isEmpty returns true if the hasFree flag is unset.
func (sc *scavChunkFlags) isEmpty() bool {
return (*sc)&scavChunkHasFree == 0
}
// setEmpty clears the hasFree flag.
func (sc *scavChunkFlags) setEmpty() {
*sc &^= scavChunkHasFree
}
// setNonEmpty sets the hasFree flag.
func (sc *scavChunkFlags) setNonEmpty() {
*sc |= scavChunkHasFree
}
// shouldScavenge returns true if the corresponding chunk should be interrogated
// by the scavenger.
func (sc scavChunkData) shouldScavenge(currGen uint32, force bool) bool {
if sc.isEmpty() {
// Nothing to scavenge.
return false
}
if force {
// We're forcing the memory to be scavenged.
return true
}
if sc.gen == currGen {
// In the current generation, if either the current or last generation
// is dense, then skip scavenging. Inverting that, we should scavenge
// if both the current and last generation were not dense.
return sc.inUse < scavChunkHiOccPages && sc.lastInUse < scavChunkHiOccPages
}
// If we're one or more generations ahead, we know inUse represents the current
// state of the chunk, since otherwise it would've been updated already.
return sc.inUse < scavChunkHiOccPages
}
// alloc updates sc given that npages were allocated in the corresponding chunk.
func (sc *scavChunkData) alloc(npages uint, newGen uint32) {
if uint(sc.inUse)+npages > pallocChunkPages {
print("runtime: inUse=", sc.inUse, " npages=", npages, "\n")
throw("too many pages allocated in chunk?")
}
if sc.gen != newGen {
sc.lastInUse = sc.inUse
sc.gen = newGen
}
sc.inUse += uint16(npages)
if sc.inUse == pallocChunkPages {
// There's nothing for the scavenger to take from here.
sc.setEmpty()
}
}
// free updates sc given that npages was freed in the corresponding chunk.
func (sc *scavChunkData) free(npages uint, newGen uint32) {
if uint(sc.inUse) < npages {
print("runtime: inUse=", sc.inUse, " npages=", npages, "\n")
throw("allocated pages below zero?")
}
if sc.gen != newGen {
sc.lastInUse = sc.inUse
sc.gen = newGen
}
sc.inUse -= uint16(npages)
// The scavenger can no longer be done with this chunk now that
// new memory has been freed into it.
sc.setNonEmpty()
}
type piController struct {
kp float64 // Proportional constant.
ti float64 // Integral time constant.
tt float64 // Reset time.
min, max float64 // Output boundaries.
// PI controller state.
errIntegral float64 // Integral of the error from t=0 to now.
// Error flags.
errOverflow bool // Set if errIntegral ever overflowed.
inputOverflow bool // Set if an operation with the input overflowed.
}
// next provides a new sample to the controller.
//
// input is the sample, setpoint is the desired point, and period is how much
// time (in whatever unit makes the most sense) has passed since the last sample.
//
// Returns a new value for the variable it's controlling, and whether the operation
// completed successfully. One reason this might fail is if error has been growing
// in an unbounded manner, to the point of overflow.
//
// In the specific case of an error overflow occurs, the errOverflow field will be
// set and the rest of the controller's internal state will be fully reset.
func (c *piController) next(input, setpoint, period float64) (float64, bool) {
// Compute the raw output value.
prop := c.kp * (setpoint - input)
rawOutput := prop + c.errIntegral
// Clamp rawOutput into output.
output := rawOutput
if isInf(output) || isNaN(output) {
// The input had a large enough magnitude that either it was already
// overflowed, or some operation with it overflowed.
// Set a flag and reset. That's the safest thing to do.
c.reset()
c.inputOverflow = true
return c.min, false
}
if output < c.min {
output = c.min
} else if output > c.max {
output = c.max
}
// Update the controller's state.
if c.ti != 0 && c.tt != 0 {
c.errIntegral += (c.kp*period/c.ti)*(setpoint-input) + (period/c.tt)*(output-rawOutput)
if isInf(c.errIntegral) || isNaN(c.errIntegral) {
// So much error has accumulated that we managed to overflow.
// The assumptions around the controller have likely broken down.
// Set a flag and reset. That's the safest thing to do.
c.reset()
c.errOverflow = true
return c.min, false
}
}
return output, true
}
// reset resets the controller state, except for controller error flags.
func (c *piController) reset() {
c.errIntegral = 0
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector: stack objects and stack tracing
// See the design doc at https://docs.google.com/document/d/1un-Jn47yByHL7I0aVIP_uVCMxjdM5mpelJhiKlIqxkE/edit?usp=sharing
// Also see issue 22350.
// Stack tracing solves the problem of determining which parts of the
// stack are live and should be scanned. It runs as part of scanning
// a single goroutine stack.
//
// Normally determining which parts of the stack are live is easy to
// do statically, as user code has explicit references (reads and
// writes) to stack variables. The compiler can do a simple dataflow
// analysis to determine liveness of stack variables at every point in
// the code. See cmd/compile/internal/gc/plive.go for that analysis.
//
// However, when we take the address of a stack variable, determining
// whether that variable is still live is less clear. We can still
// look for static accesses, but accesses through a pointer to the
// variable are difficult in general to track statically. That pointer
// can be passed among functions on the stack, conditionally retained,
// etc.
//
// Instead, we will track pointers to stack variables dynamically.
// All pointers to stack-allocated variables will themselves be on the
// stack somewhere (or in associated locations, like defer records), so
// we can find them all efficiently.
//
// Stack tracing is organized as a mini garbage collection tracing
// pass. The objects in this garbage collection are all the variables
// on the stack whose address is taken, and which themselves contain a
// pointer. We call these variables "stack objects".
//
// We begin by determining all the stack objects on the stack and all
// the statically live pointers that may point into the stack. We then
// process each pointer to see if it points to a stack object. If it
// does, we scan that stack object. It may contain pointers into the
// heap, in which case those pointers are passed to the main garbage
// collection. It may also contain pointers into the stack, in which
// case we add them to our set of stack pointers.
//
// Once we're done processing all the pointers (including the ones we
// added during processing), we've found all the stack objects that
// are live. Any dead stack objects are not scanned and their contents
// will not keep heap objects live. Unlike the main garbage
// collection, we can't sweep the dead stack objects; they live on in
// a moribund state until the stack frame that contains them is
// popped.
//
// A stack can look like this:
//
// +----------+
// | foo() |
// | +------+ |
// | | A | | <---\
// | +------+ | |
// | | |
// | +------+ | |
// | | B | | |
// | +------+ | |
// | | |
// +----------+ |
// | bar() | |
// | +------+ | |
// | | C | | <-\ |
// | +----|-+ | | |
// | | | | |
// | +----v-+ | | |
// | | D ---------/
// | +------+ | |
// | | |
// +----------+ |
// | baz() | |
// | +------+ | |
// | | E -------/
// | +------+ |
// | ^ |
// | F: --/ |
// | |
// +----------+
//
// foo() calls bar() calls baz(). Each has a frame on the stack.
// foo() has stack objects A and B.
// bar() has stack objects C and D, with C pointing to D and D pointing to A.
// baz() has a stack object E pointing to C, and a local variable F pointing to E.
//
// Starting from the pointer in local variable F, we will eventually
// scan all of E, C, D, and A (in that order). B is never scanned
// because there is no live pointer to it. If B is also statically
// dead (meaning that foo() never accesses B again after it calls
// bar()), then B's pointers into the heap are not considered live.
package runtime
import (
"internal/goarch"
"internal/runtime/sys"
"unsafe"
)
const stackTraceDebug = false
// Buffer for pointers found during stack tracing.
// Must be smaller than or equal to workbuf.
type stackWorkBuf struct {
_ sys.NotInHeap
stackWorkBufHdr
obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
}
// Header declaration must come after the buf declaration above, because of issue #14620.
type stackWorkBufHdr struct {
_ sys.NotInHeap
workbufhdr
next *stackWorkBuf // linked list of workbufs
// Note: we could theoretically repurpose lfnode.next as this next pointer.
// It would save 1 word, but that probably isn't worth busting open
// the lfnode API.
}
// Buffer for stack objects found on a goroutine stack.
// Must be smaller than or equal to workbuf.
type stackObjectBuf struct {
_ sys.NotInHeap
stackObjectBufHdr
obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject
}
type stackObjectBufHdr struct {
_ sys.NotInHeap
workbufhdr
next *stackObjectBuf
}
func init() {
if unsafe.Sizeof(stackWorkBuf{}) > unsafe.Sizeof(workbuf{}) {
panic("stackWorkBuf too big")
}
if unsafe.Sizeof(stackObjectBuf{}) > unsafe.Sizeof(workbuf{}) {
panic("stackObjectBuf too big")
}
}
// A stackObject represents a variable on the stack that has had
// its address taken.
type stackObject struct {
_ sys.NotInHeap
off uint32 // offset above stack.lo
size uint32 // size of object
r *stackObjectRecord // info of the object (for ptr/nonptr bits). nil if object has been scanned.
left *stackObject // objects with lower addresses
right *stackObject // objects with higher addresses
}
// obj.r = r, but with no write barrier.
//
//go:nowritebarrier
func (obj *stackObject) setRecord(r *stackObjectRecord) {
// Types of stack objects are always in read-only memory, not the heap.
// So not using a write barrier is ok.
*(*uintptr)(unsafe.Pointer(&obj.r)) = uintptr(unsafe.Pointer(r))
}
// A stackScanState keeps track of the state used during the GC walk
// of a goroutine.
type stackScanState struct {
// stack limits
stack stack
// conservative indicates that the next frame must be scanned conservatively.
// This applies only to the innermost frame at an async safe-point.
conservative bool
// buf contains the set of possible pointers to stack objects.
// Organized as a LIFO linked list of buffers.
// All buffers except possibly the head buffer are full.
buf *stackWorkBuf
freeBuf *stackWorkBuf // keep around one free buffer for allocation hysteresis
// cbuf contains conservative pointers to stack objects. If
// all pointers to a stack object are obtained via
// conservative scanning, then the stack object may be dead
// and may contain dead pointers, so it must be scanned
// defensively.
cbuf *stackWorkBuf
// list of stack objects
// Objects are in increasing address order.
head *stackObjectBuf
tail *stackObjectBuf
nobjs int
// root of binary tree for fast object lookup by address
// Initialized by buildIndex.
root *stackObject
}
// Add p as a potential pointer to a stack object.
// p must be a stack address.
func (s *stackScanState) putPtr(p uintptr, conservative bool) {
if p < s.stack.lo || p >= s.stack.hi {
throw("address not a stack address")
}
head := &s.buf
if conservative {
head = &s.cbuf
}
buf := *head
if buf == nil {
// Initial setup.
buf = (*stackWorkBuf)(unsafe.Pointer(getempty()))
buf.nobj = 0
buf.next = nil
*head = buf
} else if buf.nobj == len(buf.obj) {
if s.freeBuf != nil {
buf = s.freeBuf
s.freeBuf = nil
} else {
buf = (*stackWorkBuf)(unsafe.Pointer(getempty()))
}
buf.nobj = 0
buf.next = *head
*head = buf
}
buf.obj[buf.nobj] = p
buf.nobj++
}
// Remove and return a potential pointer to a stack object.
// Returns 0 if there are no more pointers available.
//
// This prefers non-conservative pointers so we scan stack objects
// precisely if there are any non-conservative pointers to them.
func (s *stackScanState) getPtr() (p uintptr, conservative bool) {
for _, head := range []**stackWorkBuf{&s.buf, &s.cbuf} {
buf := *head
if buf == nil {
// Never had any data.
continue
}
if buf.nobj == 0 {
if s.freeBuf != nil {
// Free old freeBuf.
putempty((*workbuf)(unsafe.Pointer(s.freeBuf)))
}
// Move buf to the freeBuf.
s.freeBuf = buf
buf = buf.next
*head = buf
if buf == nil {
// No more data in this list.
continue
}
}
buf.nobj--
return buf.obj[buf.nobj], head == &s.cbuf
}
// No more data in either list.
if s.freeBuf != nil {
putempty((*workbuf)(unsafe.Pointer(s.freeBuf)))
s.freeBuf = nil
}
return 0, false
}
// addObject adds a stack object at addr of type typ to the set of stack objects.
func (s *stackScanState) addObject(addr uintptr, r *stackObjectRecord) {
x := s.tail
if x == nil {
// initial setup
x = (*stackObjectBuf)(unsafe.Pointer(getempty()))
x.next = nil
s.head = x
s.tail = x
}
if x.nobj > 0 && uint32(addr-s.stack.lo) < x.obj[x.nobj-1].off+x.obj[x.nobj-1].size {
throw("objects added out of order or overlapping")
}
if x.nobj == len(x.obj) {
// full buffer - allocate a new buffer, add to end of linked list
y := (*stackObjectBuf)(unsafe.Pointer(getempty()))
y.next = nil
x.next = y
s.tail = y
x = y
}
obj := &x.obj[x.nobj]
x.nobj++
obj.off = uint32(addr - s.stack.lo)
obj.size = uint32(r.size)
obj.setRecord(r)
// obj.left and obj.right will be initialized by buildIndex before use.
s.nobjs++
}
// buildIndex initializes s.root to a binary search tree.
// It should be called after all addObject calls but before
// any call of findObject.
func (s *stackScanState) buildIndex() {
s.root, _, _ = binarySearchTree(s.head, 0, s.nobjs)
}
// Build a binary search tree with the n objects in the list
// x.obj[idx], x.obj[idx+1], ..., x.next.obj[0], ...
// Returns the root of that tree, and the buf+idx of the nth object after x.obj[idx].
// (The first object that was not included in the binary search tree.)
// If n == 0, returns nil, x.
func binarySearchTree(x *stackObjectBuf, idx int, n int) (root *stackObject, restBuf *stackObjectBuf, restIdx int) {
if n == 0 {
return nil, x, idx
}
var left, right *stackObject
left, x, idx = binarySearchTree(x, idx, n/2)
root = &x.obj[idx]
idx++
if idx == len(x.obj) {
x = x.next
idx = 0
}
right, x, idx = binarySearchTree(x, idx, n-n/2-1)
root.left = left
root.right = right
return root, x, idx
}
// findObject returns the stack object containing address a, if any.
// Must have called buildIndex previously.
func (s *stackScanState) findObject(a uintptr) *stackObject {
off := uint32(a - s.stack.lo)
obj := s.root
for {
if obj == nil {
return nil
}
if off < obj.off {
obj = obj.left
continue
}
if off >= obj.off+obj.size {
obj = obj.right
continue
}
return obj
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector: sweeping
// The sweeper consists of two different algorithms:
//
// * The object reclaimer finds and frees unmarked slots in spans. It
// can free a whole span if none of the objects are marked, but that
// isn't its goal. This can be driven either synchronously by
// mcentral.cacheSpan for mcentral spans, or asynchronously by
// sweepone, which looks at all the mcentral lists.
//
// * The span reclaimer looks for spans that contain no marked objects
// and frees whole spans. This is a separate algorithm because
// freeing whole spans is the hardest task for the object reclaimer,
// but is critical when allocating new spans. The entry point for
// this is mheap_.reclaim and it's driven by a sequential scan of
// the page marks bitmap in the heap arenas.
//
// Both algorithms ultimately call mspan.sweep, which sweeps a single
// heap span.
package runtime
import (
"internal/runtime/atomic"
"unsafe"
)
var sweep sweepdata
// State of background sweep.
type sweepdata struct {
lock mutex
g *g
parked bool
// active tracks outstanding sweepers and the sweep
// termination condition.
active activeSweep
// centralIndex is the current unswept span class.
// It represents an index into the mcentral span
// sets. Accessed and updated via its load and
// update methods. Not protected by a lock.
//
// Reset at mark termination.
// Used by mheap.nextSpanForSweep.
centralIndex sweepClass
}
// sweepClass is a spanClass and one bit to represent whether we're currently
// sweeping partial or full spans.
type sweepClass uint32
const (
numSweepClasses = numSpanClasses * 2
sweepClassDone sweepClass = sweepClass(^uint32(0))
)
func (s *sweepClass) load() sweepClass {
return sweepClass(atomic.Load((*uint32)(s)))
}
func (s *sweepClass) update(sNew sweepClass) {
// Only update *s if its current value is less than sNew,
// since *s increases monotonically.
sOld := s.load()
for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) {
sOld = s.load()
}
// TODO(mknyszek): This isn't the only place we have
// an atomic monotonically increasing counter. It would
// be nice to have an "atomic max" which is just implemented
// as the above on most architectures. Some architectures
// like RISC-V however have native support for an atomic max.
}
func (s *sweepClass) clear() {
atomic.Store((*uint32)(s), 0)
}
// split returns the underlying span class as well as
// whether we're interested in the full or partial
// unswept lists for that class, indicated as a boolean
// (true means "full").
func (s sweepClass) split() (spc spanClass, full bool) {
return spanClass(s >> 1), s&1 == 0
}
// nextSpanForSweep finds and pops the next span for sweeping from the
// central sweep buffers. It returns ownership of the span to the caller.
// Returns nil if no such span exists.
func (h *mheap) nextSpanForSweep() *mspan {
sg := h.sweepgen
for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ {
spc, full := sc.split()
c := &h.central[spc].mcentral
var s *mspan
if full {
s = c.fullUnswept(sg).pop()
} else {
s = c.partialUnswept(sg).pop()
}
if s != nil {
// Write down that we found something so future sweepers
// can start from here.
sweep.centralIndex.update(sc)
return s
}
}
// Write down that we found nothing.
sweep.centralIndex.update(sweepClassDone)
return nil
}
const sweepDrainedMask = 1 << 31
// activeSweep is a type that captures whether sweeping
// is done, and whether there are any outstanding sweepers.
//
// Every potential sweeper must call begin() before they look
// for work, and end() after they've finished sweeping.
type activeSweep struct {
// state is divided into two parts.
//
// The top bit (masked by sweepDrainedMask) is a boolean
// value indicating whether all the sweep work has been
// drained from the queue.
//
// The rest of the bits are a counter, indicating the
// number of outstanding concurrent sweepers.
state atomic.Uint32
}
// begin registers a new sweeper. Returns a sweepLocker
// for acquiring spans for sweeping. Any outstanding sweeper blocks
// sweep termination.
//
// If the sweepLocker is invalid, the caller can be sure that all
// outstanding sweep work has been drained, so there is nothing left
// to sweep. Note that there may be sweepers currently running, so
// this does not indicate that all sweeping has completed.
//
// Even if the sweepLocker is invalid, its sweepGen is always valid.
func (a *activeSweep) begin() sweepLocker {
for {
state := a.state.Load()
if state&sweepDrainedMask != 0 {
return sweepLocker{mheap_.sweepgen, false}
}
if a.state.CompareAndSwap(state, state+1) {
return sweepLocker{mheap_.sweepgen, true}
}
}
}
// end deregisters a sweeper. Must be called once for each time
// begin is called if the sweepLocker is valid.
func (a *activeSweep) end(sl sweepLocker) {
if sl.sweepGen != mheap_.sweepgen {
throw("sweeper left outstanding across sweep generations")
}
for {
state := a.state.Load()
if (state&^sweepDrainedMask)-1 >= sweepDrainedMask {
throw("mismatched begin/end of activeSweep")
}
if a.state.CompareAndSwap(state, state-1) {
if state-1 != sweepDrainedMask {
return
}
// We're the last sweeper, and there's nothing left to sweep.
if debug.gcpacertrace > 0 {
live := gcController.heapLive.Load()
print("pacer: sweep done at heap size ", live>>20, "MB; allocated ", (live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
}
// Now that sweeping is completely done, flush remaining cleanups.
gcCleanups.flush()
return
}
}
}
// markDrained marks the active sweep cycle as having drained
// all remaining work. This is safe to be called concurrently
// with all other methods of activeSweep, though may race.
//
// Returns true if this call was the one that actually performed
// the mark.
func (a *activeSweep) markDrained() bool {
for {
state := a.state.Load()
if state&sweepDrainedMask != 0 {
return false
}
if a.state.CompareAndSwap(state, state|sweepDrainedMask) {
return true
}
}
}
// sweepers returns the current number of active sweepers.
func (a *activeSweep) sweepers() uint32 {
return a.state.Load() &^ sweepDrainedMask
}
// isDone returns true if all sweep work has been drained and no more
// outstanding sweepers exist. That is, when the sweep phase is
// completely done.
func (a *activeSweep) isDone() bool {
return a.state.Load() == sweepDrainedMask
}
// reset sets up the activeSweep for the next sweep cycle.
//
// The world must be stopped.
func (a *activeSweep) reset() {
assertWorldStopped()
a.state.Store(0)
}
// finishsweep_m ensures that all spans are swept.
//
// The world must be stopped. This ensures there are no sweeps in
// progress.
//
//go:nowritebarrier
func finishsweep_m() {
assertWorldStopped()
// Sweeping must be complete before marking commences, so
// sweep any unswept spans. If this is a concurrent GC, there
// shouldn't be any spans left to sweep, so this should finish
// instantly. If GC was forced before the concurrent sweep
// finished, there may be spans to sweep.
for sweepone() != ^uintptr(0) {
}
// Make sure there aren't any outstanding sweepers left.
// At this point, with the world stopped, it means one of two
// things. Either we were able to preempt a sweeper, or that
// a sweeper didn't call sweep.active.end when it should have.
// Both cases indicate a bug, so throw.
if sweep.active.sweepers() != 0 {
throw("active sweepers found at start of mark phase")
}
// Reset all the unswept buffers, which should be empty.
// Do this in sweep termination as opposed to mark termination
// so that we can catch unswept spans and reclaim blocks as
// soon as possible.
sg := mheap_.sweepgen
for i := range mheap_.central {
c := &mheap_.central[i].mcentral
c.partialUnswept(sg).reset()
c.fullUnswept(sg).reset()
}
// Sweeping is done, so there won't be any new memory to
// scavenge for a bit.
//
// If the scavenger isn't already awake, wake it up. There's
// definitely work for it to do at this point.
scavenger.wake()
nextMarkBitArenaEpoch()
}
func bgsweep(c chan int) {
sweep.g = getg()
lockInit(&sweep.lock, lockRankSweep)
lock(&sweep.lock)
sweep.parked = true
c <- 1
goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceBlockGCSweep, 1)
for {
// bgsweep attempts to be a "low priority" goroutine by intentionally
// yielding time. It's OK if it doesn't run, because goroutines allocating
// memory will sweep and ensure that all spans are swept before the next
// GC cycle. We really only want to run when we're idle.
//
// However, calling Gosched after each span swept produces a tremendous
// amount of tracing events, sometimes up to 50% of events in a trace. It's
// also inefficient to call into the scheduler so much because sweeping a
// single span is in general a very fast operation, taking as little as 30 ns
// on modern hardware. (See #54767.)
//
// As a result, bgsweep sweeps in batches, and only calls into the scheduler
// at the end of every batch. Furthermore, it only yields its time if there
// isn't spare idle time available on other cores. If there's available idle
// time, helping to sweep can reduce allocation latencies by getting ahead of
// the proportional sweeper and having spans ready to go for allocation.
const sweepBatchSize = 10
nSwept := 0
for sweepone() != ^uintptr(0) {
nSwept++
if nSwept%sweepBatchSize == 0 {
goschedIfBusy()
}
}
for freeSomeWbufs(true) {
// N.B. freeSomeWbufs is already batched internally.
goschedIfBusy()
}
freeDeadSpanSPMCs()
lock(&sweep.lock)
if !isSweepDone() {
// This can happen if a GC runs between
// gosweepone returning ^0 above
// and the lock being acquired.
unlock(&sweep.lock)
// This goroutine must preempt when we have no work to do
// but isSweepDone returns false because of another existing sweeper.
// See issue #73499.
goschedIfBusy()
continue
}
sweep.parked = true
goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceBlockGCSweep, 1)
}
}
// sweepLocker acquires sweep ownership of spans.
type sweepLocker struct {
// sweepGen is the sweep generation of the heap.
sweepGen uint32
valid bool
}
// sweepLocked represents sweep ownership of a span.
type sweepLocked struct {
*mspan
}
// tryAcquire attempts to acquire sweep ownership of span s. If it
// successfully acquires ownership, it blocks sweep completion.
func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
if !l.valid {
throw("use of invalid sweepLocker")
}
// Check before attempting to CAS.
if atomic.Load(&s.sweepgen) != l.sweepGen-2 {
return sweepLocked{}, false
}
// Attempt to acquire sweep ownership of s.
if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) {
return sweepLocked{}, false
}
return sweepLocked{s}, true
}
// sweepone sweeps some unswept heap span and returns the number of pages returned
// to the heap, or ^uintptr(0) if there was nothing to sweep.
func sweepone() uintptr {
gp := getg()
// Increment locks to ensure that the goroutine is not preempted
// in the middle of sweep thus leaving the span in an inconsistent state for next GC
gp.m.locks++
// TODO(austin): sweepone is almost always called in a loop;
// lift the sweepLocker into its callers.
sl := sweep.active.begin()
if !sl.valid {
gp.m.locks--
return ^uintptr(0)
}
// Find a span to sweep.
npages := ^uintptr(0)
var noMoreWork bool
for {
s := mheap_.nextSpanForSweep()
if s == nil {
noMoreWork = sweep.active.markDrained()
break
}
if state := s.state.get(); state != mSpanInUse {
// This can happen if direct sweeping already
// swept this span, but in that case the sweep
// generation should always be up-to-date.
if !(s.sweepgen == sl.sweepGen || s.sweepgen == sl.sweepGen+3) {
print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sl.sweepGen, "\n")
throw("non in-use span in unswept list")
}
continue
}
if s, ok := sl.tryAcquire(s); ok {
// Sweep the span we found.
npages = s.npages
if s.sweep(false) {
// Whole span was freed. Count it toward the
// page reclaimer credit since these pages can
// now be used for span allocation.
mheap_.reclaimCredit.Add(npages)
} else {
// Span is still in-use, so this returned no
// pages to the heap and the span needs to
// move to the swept in-use list.
npages = 0
}
break
}
}
sweep.active.end(sl)
if noMoreWork {
// The sweep list is empty. There may still be
// concurrent sweeps running, but we're at least very
// close to done sweeping.
// Move the scavenge gen forward (signaling
// that there's new work to do) and wake the scavenger.
//
// The scavenger is signaled by the last sweeper because once
// sweeping is done, we will definitely have useful work for
// the scavenger to do, since the scavenger only runs over the
// heap once per GC cycle. This update is not done during sweep
// termination because in some cases there may be a long delay
// between sweep done and sweep termination (e.g. not enough
// allocations to trigger a GC) which would be nice to fill in
// with scavenging work.
if debug.scavtrace > 0 {
systemstack(func() {
lock(&mheap_.lock)
// Get released stats.
releasedBg := mheap_.pages.scav.releasedBg.Load()
releasedEager := mheap_.pages.scav.releasedEager.Load()
// Print the line.
printScavTrace(releasedBg, releasedEager, false)
// Update the stats.
mheap_.pages.scav.releasedBg.Add(-releasedBg)
mheap_.pages.scav.releasedEager.Add(-releasedEager)
unlock(&mheap_.lock)
})
}
scavenger.ready()
}
gp.m.locks--
return npages
}
// isSweepDone reports whether all spans are swept.
//
// Note that this condition may transition from false to true at any
// time as the sweeper runs. It may transition from true to false if a
// GC runs; to prevent that the caller must be non-preemptible or must
// somehow block GC progress.
func isSweepDone() bool {
return sweep.active.isDone()
}
// Returns only when span s has been swept.
//
//go:nowritebarrier
func (s *mspan) ensureSwept() {
// Caller must disable preemption.
// Otherwise when this function returns the span can become unswept again
// (if GC is triggered on another goroutine).
gp := getg()
if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 {
throw("mspan.ensureSwept: m is not locked")
}
// If this operation fails, then that means that there are
// no more spans to be swept. In this case, either s has already
// been swept, or is about to be acquired for sweeping and swept.
sl := sweep.active.begin()
if sl.valid {
// The caller must be sure that the span is a mSpanInUse span.
if s, ok := sl.tryAcquire(s); ok {
s.sweep(false)
sweep.active.end(sl)
return
}
sweep.active.end(sl)
}
// Unfortunately we can't sweep the span ourselves. Somebody else
// got to it first. We don't have efficient means to wait, but that's
// OK, it will be swept fairly soon.
for {
spangen := atomic.Load(&s.sweepgen)
if spangen == sl.sweepGen || spangen == sl.sweepGen+3 {
break
}
osyield()
}
}
// sweep frees or collects finalizers for blocks not marked in the mark phase.
// It clears the mark bits in preparation for the next GC round.
// Returns true if the span was returned to heap.
// If preserve=true, don't return it to heap nor relink in mcentral lists;
// caller takes care of it.
func (sl *sweepLocked) sweep(preserve bool) bool {
// It's critical that we enter this function with preemption disabled,
// GC must not start while we are in the middle of this function.
gp := getg()
if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 {
throw("mspan.sweep: m is not locked")
}
s := sl.mspan
if !preserve {
// We'll release ownership of this span. Nil it out to
// prevent the caller from accidentally using it.
sl.mspan = nil
}
sweepgen := mheap_.sweepgen
if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
throw("mspan.sweep: bad span state")
}
trace := traceAcquire()
if trace.ok() {
trace.GCSweepSpan(s.npages * pageSize)
traceRelease(trace)
}
mheap_.pagesSwept.Add(int64(s.npages))
spc := s.spanclass
size := s.elemsize
// The allocBits indicate which unmarked objects don't need to be
// processed since they were free at the end of the last GC cycle
// and were not allocated since then.
// If the allocBits index is >= s.freeindex and the bit
// is not marked then the object remains unallocated
// since the last GC.
// This situation is analogous to being on a freelist.
// Unlink & free special records for any objects we're about to free.
// Two complications here:
// 1. An object can have both finalizer and profile special records.
// In such case we need to queue finalizer for execution,
// mark the object as live and preserve the profile special.
// 2. A tiny object can have several finalizers setup for different offsets.
// If such object is not marked, we need to queue all finalizers at once.
// Both 1 and 2 are possible at the same time.
hadSpecials := s.specials != nil
siter := newSpecialsIter(s)
for siter.valid() {
// A finalizer can be set for an inner byte of an object, find object beginning.
objIndex := siter.s.offset / size
p := s.base() + objIndex*size
mbits := s.markBitsForIndex(objIndex)
if !mbits.isMarked() {
// This object is not marked and has at least one special record.
// Pass 1: see if it has a finalizer.
hasFinAndRevived := false
endOffset := p - s.base() + size
for tmp := siter.s; tmp != nil && tmp.offset < endOffset; tmp = tmp.next {
if tmp.kind == _KindSpecialFinalizer {
// Stop freeing of object if it has a finalizer.
mbits.setMarkedNonAtomic()
hasFinAndRevived = true
break
}
}
if hasFinAndRevived {
// Pass 2: queue all finalizers and clear any weak handles. Weak handles are cleared
// before finalization as specified by the weak package. See the documentation
// for that package for more details.
for siter.valid() && siter.s.offset < endOffset {
// Find the exact byte for which the special was setup
// (as opposed to object beginning).
special := siter.s
p := s.base() + special.offset
if special.kind == _KindSpecialFinalizer || special.kind == _KindSpecialWeakHandle {
siter.unlinkAndNext()
freeSpecial(special, unsafe.Pointer(p), size)
} else {
// All other specials only apply when an object is freed,
// so just keep the special record.
siter.next()
}
}
} else {
// Pass 2: the object is truly dead, free (and handle) all specials.
for siter.valid() && siter.s.offset < endOffset {
// Find the exact byte for which the special was setup
// (as opposed to object beginning).
special := siter.s
p := s.base() + special.offset
siter.unlinkAndNext()
freeSpecial(special, unsafe.Pointer(p), size)
}
}
} else {
// object is still live
if siter.s.kind == _KindSpecialReachable {
special := siter.unlinkAndNext()
(*specialReachable)(unsafe.Pointer(special)).reachable = true
freeSpecial(special, unsafe.Pointer(p), size)
} else {
// keep special record
siter.next()
}
}
}
if hadSpecials && s.specials == nil {
spanHasNoSpecials(s)
}
if traceAllocFreeEnabled() || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled {
// Find all newly freed objects.
mbits := s.markBitsForBase()
abits := s.allocBitsForIndex(0)
for i := uintptr(0); i < uintptr(s.nelems); i++ {
if !mbits.isMarked() && (abits.index < uintptr(s.freeindex) || abits.isMarked()) {
x := s.base() + i*s.elemsize
if traceAllocFreeEnabled() {
trace := traceAcquire()
if trace.ok() {
trace.HeapObjectFree(x)
traceRelease(trace)
}
}
if debug.clobberfree != 0 {
clobberfree(unsafe.Pointer(x), size)
}
// User arenas are handled on explicit free.
if raceenabled && !s.isUserArenaChunk {
racefree(unsafe.Pointer(x), size)
}
if msanenabled && !s.isUserArenaChunk {
msanfree(unsafe.Pointer(x), size)
}
if asanenabled && !s.isUserArenaChunk {
asanpoison(unsafe.Pointer(x), size)
}
if valgrindenabled && !s.isUserArenaChunk {
valgrindFree(unsafe.Pointer(x))
}
}
mbits.advance()
abits.advance()
}
}
// Copy over and clear the inline mark bits if necessary.
if gcUsesSpanInlineMarkBits(s.elemsize) {
s.moveInlineMarks(s.gcmarkBits)
}
// Check for zombie objects.
if s.freeindex < s.nelems {
// Everything < freeindex is allocated and hence
// cannot be zombies.
//
// Check the first bitmap byte, where we have to be
// careful with freeindex.
obj := uintptr(s.freeindex)
if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
s.reportZombies()
}
// Check remaining bytes.
for i := obj/8 + 1; i < divRoundUp(uintptr(s.nelems), 8); i++ {
if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
s.reportZombies()
}
}
}
// Count the number of free objects in this span.
nalloc := uint16(s.countAlloc())
nfreed := s.allocCount - nalloc
if nalloc > s.allocCount {
// The zombie check above should have caught this in
// more detail.
print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
throw("sweep increased allocation count")
}
s.allocCount = nalloc
s.freeindex = 0 // reset allocation index to start of span.
s.freeIndexForScan = 0
if traceEnabled() {
getg().m.p.ptr().trace.reclaimed += uintptr(nfreed) * s.elemsize
}
// gcmarkBits becomes the allocBits.
// get a fresh cleared gcmarkBits in preparation for next GC
s.allocBits = s.gcmarkBits
s.gcmarkBits = newMarkBits(uintptr(s.nelems))
// refresh pinnerBits if they exists
if s.pinnerBits != nil {
s.refreshPinnerBits()
}
// Initialize alloc bits cache.
s.refillAllocCache(0)
// The span must be in our exclusive ownership until we update sweepgen,
// check for potential races.
if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
throw("mspan.sweep: bad span state after sweep")
}
if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 {
throw("swept cached span")
}
// We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
// because of the potential for a concurrent free/SetFinalizer.
//
// But we need to set it before we make the span available for allocation
// (return it to heap or mcentral), because allocation code assumes that a
// span is already swept if available for allocation.
//
// Serialization point.
// At this point the mark bits are cleared and allocation ready
// to go so release the span.
atomic.Store(&s.sweepgen, sweepgen)
if s.isUserArenaChunk {
if preserve {
// This is a case that should never be handled by a sweeper that
// preserves the span for reuse.
throw("sweep: tried to preserve a user arena span")
}
if nalloc > 0 {
// There still exist pointers into the span or the span hasn't been
// freed yet. It's not ready to be reused. Put it back on the
// full swept list for the next cycle.
mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
return false
}
// It's only at this point that the sweeper doesn't actually need to look
// at this arena anymore, so subtract from pagesInUse now.
mheap_.pagesInUse.Add(-s.npages)
s.state.set(mSpanDead)
// The arena is ready to be recycled. Remove it from the quarantine list
// and place it on the ready list. Don't add it back to any sweep lists.
systemstack(func() {
// It's the arena code's responsibility to get the chunk on the quarantine
// list by the time all references to the chunk are gone.
if s.list != &mheap_.userArena.quarantineList {
throw("user arena span is on the wrong list")
}
lock(&mheap_.lock)
mheap_.userArena.quarantineList.remove(s)
mheap_.userArena.readyList.insert(s)
unlock(&mheap_.lock)
})
return false
}
if spc.sizeclass() != 0 {
// Handle spans for small objects.
if nfreed > 0 {
// Only mark the span as needing zeroing if we've freed any
// objects, because a fresh span that had been allocated into,
// wasn't totally filled, but then swept, still has all of its
// free slots zeroed.
s.needzero = 1
stats := memstats.heapStats.acquire()
atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed))
memstats.heapStats.release()
// Count the frees in the inconsistent, internal stats.
gcController.totalFree.Add(int64(nfreed) * int64(s.elemsize))
}
if !preserve {
// The caller may not have removed this span from whatever
// unswept set its on but taken ownership of the span for
// sweeping by updating sweepgen. If this span still is in
// an unswept set, then the mcentral will pop it off the
// set, check its sweepgen, and ignore it.
if nalloc == 0 {
// Free totally free span directly back to the heap.
mheap_.freeSpan(s)
return true
}
// Return span back to the right mcentral list.
if nalloc == s.nelems {
mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
} else {
mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
}
}
} else if !preserve {
// Handle spans for large objects.
if nfreed != 0 {
// Free large object span to heap.
// Count the free in the consistent, external stats.
//
// Do this before freeSpan, which might update heapStats' inHeap
// value. If it does so, then metrics that subtract object footprint
// from inHeap might overflow. See #67019.
stats := memstats.heapStats.acquire()
atomic.Xadd64(&stats.largeFreeCount, 1)
atomic.Xadd64(&stats.largeFree, int64(size))
memstats.heapStats.release()
// Count the free in the inconsistent, internal stats.
gcController.totalFree.Add(int64(size))
// NOTE(rsc,dvyukov): The original implementation of efence
// in CL 22060046 used sysFree instead of sysFault, so that
// the operating system would eventually give the memory
// back to us again, so that an efence program could run
// longer without running out of memory. Unfortunately,
// calling sysFree here without any kind of adjustment of the
// heap data structures means that when the memory does
// come back to us, we have the wrong metadata for it, either in
// the mspan structures or in the garbage collection bitmap.
// Using sysFault here means that the program will run out of
// memory fairly quickly in efence mode, but at least it won't
// have mysterious crashes due to confused memory reuse.
// It should be possible to switch back to sysFree if we also
// implement and then call some kind of mheap.deleteSpan.
if debug.efence > 0 {
s.limit = 0 // prevent mlookup from finding this span
sysFault(unsafe.Pointer(s.base()), size)
} else {
mheap_.freeSpan(s)
}
return true
}
// Add a large span directly onto the full+swept list.
mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
}
return false
}
// reportZombies reports any marked but free objects in s and throws.
//
// This generally means one of the following:
//
// 1. User code converted a pointer to a uintptr and then back
// unsafely, and a GC ran while the uintptr was the only reference to
// an object.
//
// 2. User code (or a compiler bug) constructed a bad pointer that
// points to a free slot, often a past-the-end pointer.
//
// 3. The GC two cycles ago missed a pointer and freed a live object,
// but it was still live in the last cycle, so this GC cycle found a
// pointer to that object and marked it.
func (s *mspan) reportZombies() {
printlock()
print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer or having race conditions? try -d=checkptr or -race)\n")
mbits := s.markBitsForBase()
abits := s.allocBitsForIndex(0)
for i := uintptr(0); i < uintptr(s.nelems); i++ {
addr := s.base() + i*s.elemsize
print(hex(addr))
alloc := i < uintptr(s.freeindex) || abits.isMarked()
if alloc {
print(" alloc")
} else {
print(" free ")
}
if mbits.isMarked() {
print(" marked ")
} else {
print(" unmarked")
}
zombie := mbits.isMarked() && !alloc
if zombie {
print(" zombie")
}
print("\n")
if zombie {
length := s.elemsize
if length > 1024 {
length = 1024
}
hexdumpWords(addr, length, nil)
}
mbits.advance()
abits.advance()
}
throw("found pointer to free object")
}
// deductSweepCredit deducts sweep credit for allocating a span of
// size spanBytes. This must be performed *before* the span is
// allocated to ensure the system has enough credit. If necessary, it
// performs sweeping to prevent going in to debt. If the caller will
// also sweep pages (e.g., for a large allocation), it can pass a
// non-zero callerSweepPages to leave that many pages unswept.
//
// deductSweepCredit makes a worst-case assumption that all spanBytes
// bytes of the ultimately allocated span will be available for object
// allocation.
//
// deductSweepCredit is the core of the "proportional sweep" system.
// It uses statistics gathered by the garbage collector to perform
// enough sweeping so that all pages are swept during the concurrent
// sweep phase between GC cycles.
//
// mheap_ must NOT be locked.
func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
if mheap_.sweepPagesPerByte == 0 {
// Proportional sweep is done or disabled.
return
}
trace := traceAcquire()
if trace.ok() {
trace.GCSweepStart()
traceRelease(trace)
}
// Fix debt if necessary.
retry:
sweptBasis := mheap_.pagesSweptBasis.Load()
live := gcController.heapLive.Load()
liveBasis := mheap_.sweepHeapLiveBasis
newHeapLive := spanBytes
if liveBasis < live {
// Only do this subtraction when we don't overflow. Otherwise, pagesTarget
// might be computed as something really huge, causing us to get stuck
// sweeping here until the next mark phase.
//
// Overflow can happen here if gcPaceSweeper is called concurrently with
// sweeping (i.e. not during a STW, like it usually is) because this code
// is intentionally racy. A concurrent call to gcPaceSweeper can happen
// if a GC tuning parameter is modified and we read an older value of
// heapLive than what was used to set the basis.
//
// This state should be transient, so it's fine to just let newHeapLive
// be a relatively small number. We'll probably just skip this attempt to
// sweep.
//
// See issue #57523.
newHeapLive += uintptr(live - liveBasis)
}
pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
for pagesTarget > int64(mheap_.pagesSwept.Load()-sweptBasis) {
if sweepone() == ^uintptr(0) {
mheap_.sweepPagesPerByte = 0
break
}
if mheap_.pagesSweptBasis.Load() != sweptBasis {
// Sweep pacing changed. Recompute debt.
goto retry
}
}
trace = traceAcquire()
if trace.ok() {
trace.GCSweepDone()
traceRelease(trace)
}
}
// clobberfree sets the memory content at x to bad content, for debugging
// purposes.
func clobberfree(x unsafe.Pointer, size uintptr) {
// size (span.elemsize) is always a multiple of 4.
for i := uintptr(0); i < size; i += 4 {
*(*uint32)(add(x, i)) = 0xdeadbeef
}
}
// gcPaceSweeper updates the sweeper's pacing parameters.
//
// Must be called whenever the GC's pacing is updated.
//
// The world must be stopped, or mheap_.lock must be held.
func gcPaceSweeper(trigger uint64) {
assertWorldStoppedOrLockHeld(&mheap_.lock)
// Update sweep pacing.
if isSweepDone() {
mheap_.sweepPagesPerByte = 0
} else {
// Concurrent sweep needs to sweep all of the in-use
// pages by the time the allocated heap reaches the GC
// trigger. Compute the ratio of in-use pages to sweep
// per byte allocated, accounting for the fact that
// some might already be swept.
heapLiveBasis := gcController.heapLive.Load()
heapDistance := int64(trigger) - int64(heapLiveBasis)
// Add a little margin so rounding errors and
// concurrent sweep are less likely to leave pages
// unswept when GC starts.
heapDistance -= 1024 * 1024
if heapDistance < pageSize {
// Avoid setting the sweep ratio extremely high
heapDistance = pageSize
}
pagesSwept := mheap_.pagesSwept.Load()
pagesInUse := mheap_.pagesInUse.Load()
sweepDistancePages := int64(pagesInUse) - int64(pagesSwept)
if sweepDistancePages <= 0 {
mheap_.sweepPagesPerByte = 0
} else {
mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance)
mheap_.sweepHeapLiveBasis = heapLiveBasis
// Write pagesSweptBasis last, since this
// signals concurrent sweeps to recompute
// their debt.
mheap_.pagesSweptBasis.Store(pagesSwept)
}
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/goarch"
"internal/goexperiment"
"internal/runtime/atomic"
"internal/runtime/gc"
"internal/runtime/sys"
"unsafe"
)
const (
_WorkbufSize = 2048 // in bytes; larger values result in less contention
// workbufAlloc is the number of bytes to allocate at a time
// for new workbufs. This must be a multiple of pageSize and
// should be a multiple of _WorkbufSize.
//
// Larger values reduce workbuf allocation overhead. Smaller
// values reduce heap fragmentation.
workbufAlloc = 32 << 10
)
func init() {
if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
throw("bad workbufAlloc")
}
}
// Garbage collector work pool abstraction.
//
// This implements a producer/consumer model for pointers to grey
// objects.
//
// For objects in workbufs, a grey object is one that is marked and
// on a work queue. A black object is marked and not on a work queue.
//
// For objects in the span queue, a grey object is one that is marked
// and has an unset scan bit. A black object is marked and has its scan
// bit set. (Green Tea GC only.)
//
// Write barriers, root discovery, stack scanning, and object scanning
// produce pointers to grey objects. Scanning consumes pointers to
// grey objects, thus blackening them, and then scans them,
// potentially producing new pointers to grey objects.
//
// Work queues must be prioritized in the following order wherever work
// is processed.
//
// +----------------------------------------------------------+
// | Priority | Work queue | Restrictions | Function |
// |----------------------------------------------------------|
// | 1 | Workbufs | P-local | tryGetObjFast |
// | 2 | Span queue | P-local | tryGetSpanFast | [greenteagc]
// | 3 | Workbufs | None | tryGetObj |
// | 4 | Span queue | None | tryGetSpan | [greenteagc]
// | 5 | Span queue | None | tryStealSpan | [greenteagc]
// +----------------------------------------------------------+
//
// The rationale behind this ordering comes from two insights:
// 1. It's always preferable to look for P-local work first to avoid hammering on
// global lists.
// 2. It's always preferable to scan individual objects first to increase the
// likelihood that spans will accumulate more objects to scan.
// A gcWork provides the interface to produce and consume work for the
// garbage collector.
//
// A gcWork can be used on the stack as follows:
//
// (preemption must be disabled)
// gcw := &getg().m.p.ptr().gcw
// .. call gcw.put() to produce and gcw.tryGet() to consume ..
//
// It's important that any use of gcWork during the mark phase prevent
// the garbage collector from transitioning to mark termination since
// gcWork may locally hold GC work buffers. This can be done by
// disabling preemption (systemstack or acquirem).
type gcWork struct {
id int32 // same ID as the parent P
// wbuf1 and wbuf2 are the primary and secondary work buffers.
//
// This can be thought of as a stack of both work buffers'
// pointers concatenated. When we pop the last pointer, we
// shift the stack up by one work buffer by bringing in a new
// full buffer and discarding an empty one. When we fill both
// buffers, we shift the stack down by one work buffer by
// bringing in a new empty buffer and discarding a full one.
// This way we have one buffer's worth of hysteresis, which
// amortizes the cost of getting or putting a work buffer over
// at least one buffer of work and reduces contention on the
// global work lists.
//
// wbuf1 is always the buffer we're currently pushing to and
// popping from and wbuf2 is the buffer that will be discarded
// next.
//
// Invariant: Both wbuf1 and wbuf2 are nil or neither are.
wbuf1, wbuf2 *workbuf
// spanq is a queue of spans to process.
//
// Only used if goexperiment.GreenTeaGC.
spanq spanQueue
// ptrBuf is a temporary buffer used by span scanning.
ptrBuf *[pageSize / goarch.PtrSize]uintptr
// Bytes marked (blackened) on this gcWork. This is aggregated
// into work.bytesMarked by dispose.
bytesMarked uint64
// Heap scan work performed on this gcWork. This is aggregated into
// gcController by dispose and may also be flushed by callers.
// Other types of scan work are flushed immediately.
heapScanWork int64
// flushedWork indicates that a non-empty work buffer was
// flushed to the global work list since the last gcMarkDone
// termination check. Specifically, this indicates that this
// gcWork may have communicated work to another gcWork.
flushedWork bool
// mayNeedWorker is a hint that we may need to spin up a new
// worker, and that gcDrain* should call enlistWorker. This flag
// is set only if goexperiment.GreenTeaGC. If !goexperiment.GreenTeaGC,
// enlistWorker is called directly instead.
mayNeedWorker bool
// stats are scan stats broken down by size class.
stats [gc.NumSizeClasses]sizeClassScanStats
}
// Most of the methods of gcWork are go:nowritebarrierrec because the
// write barrier itself can invoke gcWork methods but the methods are
// not generally re-entrant. Hence, if a gcWork method invoked the
// write barrier while the gcWork was in an inconsistent state, and
// the write barrier in turn invoked a gcWork method, it could
// permanently corrupt the gcWork.
func (w *gcWork) init() {
w.wbuf1 = getempty()
wbuf2 := trygetfull()
if wbuf2 == nil {
wbuf2 = getempty()
}
w.wbuf2 = wbuf2
}
// putObj enqueues a pointer for the garbage collector to trace.
// obj must point to the beginning of a heap object or an oblet.
//
//go:nowritebarrierrec
func (w *gcWork) putObj(obj uintptr) {
flushed := false
wbuf := w.wbuf1
// Record that this may acquire the wbufSpans or heap lock to
// allocate a workbuf.
lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
if wbuf == nil {
w.init()
wbuf = w.wbuf1
// wbuf is empty at this point.
} else if wbuf.nobj == len(wbuf.obj) {
w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
wbuf = w.wbuf1
if wbuf.nobj == len(wbuf.obj) {
putfull(wbuf)
w.flushedWork = true
wbuf = getempty()
w.wbuf1 = wbuf
flushed = true
}
}
wbuf.obj[wbuf.nobj] = obj
wbuf.nobj++
// If we put a buffer on full, let the GC controller know so
// it can encourage more workers to run. We delay this until
// the end of put so that w is in a consistent state, since
// enlistWorker may itself manipulate w.
if flushed && gcphase == _GCmark {
if goexperiment.GreenTeaGC {
w.mayNeedWorker = true
} else {
gcController.enlistWorker()
}
}
}
// putObjFast does a put and reports whether it can be done quickly
// otherwise it returns false and the caller needs to call put.
//
//go:nowritebarrierrec
func (w *gcWork) putObjFast(obj uintptr) bool {
wbuf := w.wbuf1
if wbuf == nil || wbuf.nobj == len(wbuf.obj) {
return false
}
wbuf.obj[wbuf.nobj] = obj
wbuf.nobj++
return true
}
// putObjBatch performs a put on every pointer in obj. See put for
// constraints on these pointers.
//
//go:nowritebarrierrec
func (w *gcWork) putObjBatch(obj []uintptr) {
if len(obj) == 0 {
return
}
flushed := false
wbuf := w.wbuf1
if wbuf == nil {
w.init()
wbuf = w.wbuf1
}
for len(obj) > 0 {
for wbuf.nobj == len(wbuf.obj) {
putfull(wbuf)
w.flushedWork = true
w.wbuf1, w.wbuf2 = w.wbuf2, getempty()
wbuf = w.wbuf1
flushed = true
}
n := copy(wbuf.obj[wbuf.nobj:], obj)
wbuf.nobj += n
obj = obj[n:]
}
if flushed && gcphase == _GCmark {
if goexperiment.GreenTeaGC {
w.mayNeedWorker = true
} else {
gcController.enlistWorker()
}
}
}
// tryGetObj dequeues a pointer for the garbage collector to trace.
//
// If there are no pointers remaining in this gcWork or in the global
// queue, tryGet returns 0. Note that there may still be pointers in
// other gcWork instances or other caches.
//
//go:nowritebarrierrec
func (w *gcWork) tryGetObj() uintptr {
wbuf := w.wbuf1
if wbuf == nil {
w.init()
wbuf = w.wbuf1
// wbuf is empty at this point.
}
if wbuf.nobj == 0 {
w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
wbuf = w.wbuf1
if wbuf.nobj == 0 {
owbuf := wbuf
wbuf = trygetfull()
if wbuf == nil {
return 0
}
putempty(owbuf)
w.wbuf1 = wbuf
}
}
wbuf.nobj--
return wbuf.obj[wbuf.nobj]
}
// tryGetObjFast dequeues a pointer for the garbage collector to trace
// if one is readily available. Otherwise it returns 0 and
// the caller is expected to call tryGet().
//
//go:nowritebarrierrec
func (w *gcWork) tryGetObjFast() uintptr {
wbuf := w.wbuf1
if wbuf == nil || wbuf.nobj == 0 {
return 0
}
wbuf.nobj--
return wbuf.obj[wbuf.nobj]
}
// dispose returns any cached pointers to the global queue.
// The buffers are being put on the full queue so that the
// write barriers will not simply reacquire them before the
// GC can inspect them. This helps reduce the mutator's
// ability to hide pointers during the concurrent mark phase.
//
//go:nowritebarrierrec
func (w *gcWork) dispose() {
if wbuf := w.wbuf1; wbuf != nil {
if wbuf.nobj == 0 {
putempty(wbuf)
} else {
putfull(wbuf)
w.flushedWork = true
}
w.wbuf1 = nil
wbuf = w.wbuf2
if wbuf.nobj == 0 {
putempty(wbuf)
} else {
putfull(wbuf)
w.flushedWork = true
}
w.wbuf2 = nil
}
if !w.spanq.empty() {
w.spanq.flush() // Flush any local work.
// There's globally-visible work now, so make everyone aware of it.
//
// Note that we need to make everyone aware even if flush didn't
// flush any local work. The global work was always visible, but
// the bitmap bit may have been unset.
//
// See the comment in tryStealSpan, which explains how it relies
// on this behavior.
work.spanqMask.set(w.id)
w.flushedWork = true
}
if w.bytesMarked != 0 {
// dispose happens relatively infrequently. If this
// atomic becomes a problem, we should first try to
// dispose less and if necessary aggregate in a per-P
// counter.
atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked))
w.bytesMarked = 0
}
if w.heapScanWork != 0 {
gcController.heapScanWork.Add(w.heapScanWork)
w.heapScanWork = 0
}
}
// balance moves some work that's cached in this gcWork back on the
// global queue.
//
//go:nowritebarrierrec
func (w *gcWork) balance() {
if w.wbuf1 == nil {
return
}
if wbuf := w.wbuf2; wbuf.nobj != 0 {
putfull(wbuf)
w.flushedWork = true
w.wbuf2 = getempty()
} else if wbuf := w.wbuf1; wbuf.nobj > 4 {
w.wbuf1 = handoff(wbuf)
w.flushedWork = true // handoff did putfull
} else {
return
}
// We flushed a buffer to the full list, so wake a worker.
if gcphase == _GCmark {
if goexperiment.GreenTeaGC {
w.mayNeedWorker = true
} else {
gcController.enlistWorker()
}
}
}
// empty reports whether w has no mark work available.
//
//go:nowritebarrierrec
func (w *gcWork) empty() bool {
return (w.wbuf1 == nil || (w.wbuf1.nobj == 0 && w.wbuf2.nobj == 0)) && w.spanq.empty()
}
// Internally, the GC work pool is kept in arrays in work buffers.
// The gcWork interface caches a work buffer until full (or empty) to
// avoid contending on the global work buffer lists.
type workbufhdr struct {
node lfnode // must be first
nobj int
}
type workbuf struct {
_ sys.NotInHeap
workbufhdr
// account for the above fields
obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
}
// workbuf factory routines. These funcs are used to manage the
// workbufs.
// If the GC asks for some work these are the only routines that
// make wbufs available to the GC.
func (b *workbuf) checknonempty() {
if b.nobj == 0 {
throw("workbuf is empty")
}
}
func (b *workbuf) checkempty() {
if b.nobj != 0 {
throw("workbuf is not empty")
}
}
// getempty pops an empty work buffer off the work.empty list,
// allocating new buffers if none are available.
//
//go:nowritebarrier
func getempty() *workbuf {
var b *workbuf
if work.empty != 0 {
b = (*workbuf)(work.empty.pop())
if b != nil {
b.checkempty()
}
}
// Record that this may acquire the wbufSpans or heap lock to
// allocate a workbuf.
lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
if b == nil {
// Allocate more workbufs.
var s *mspan
if work.wbufSpans.free.first != nil {
lock(&work.wbufSpans.lock)
s = work.wbufSpans.free.first
if s != nil {
work.wbufSpans.free.remove(s)
work.wbufSpans.busy.insert(s)
}
unlock(&work.wbufSpans.lock)
}
if s == nil {
systemstack(func() {
s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
})
if s == nil {
throw("out of memory")
}
// Record the new span in the busy list.
lock(&work.wbufSpans.lock)
work.wbufSpans.busy.insert(s)
unlock(&work.wbufSpans.lock)
}
// Slice up the span into new workbufs. Return one and
// put the rest on the empty list.
for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize {
newb := (*workbuf)(unsafe.Pointer(s.base() + i))
newb.nobj = 0
lfnodeValidate(&newb.node)
if i == 0 {
b = newb
} else {
putempty(newb)
}
}
}
return b
}
// putempty puts a workbuf onto the work.empty list.
// Upon entry this goroutine owns b. The lfstack.push relinquishes ownership.
//
//go:nowritebarrier
func putempty(b *workbuf) {
b.checkempty()
work.empty.push(&b.node)
}
// putfull puts the workbuf on the work.full list for the GC.
// putfull accepts partially full buffers so the GC can avoid competing
// with the mutators for ownership of partially full buffers.
//
//go:nowritebarrier
func putfull(b *workbuf) {
b.checknonempty()
work.full.push(&b.node)
}
// trygetfull tries to get a full or partially empty workbuffer.
// If one is not immediately available return nil.
//
//go:nowritebarrier
func trygetfull() *workbuf {
b := (*workbuf)(work.full.pop())
if b != nil {
b.checknonempty()
return b
}
return b
}
//go:nowritebarrier
func handoff(b *workbuf) *workbuf {
// Make new buffer with half of b's pointers.
b1 := getempty()
n := b.nobj / 2
b.nobj -= n
b1.nobj = n
memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
// Put b on full list - let first half of b get stolen.
putfull(b)
return b1
}
// prepareFreeWorkbufs moves busy workbuf spans to free list so they
// can be freed to the heap. This must only be called when all
// workbufs are on the empty list.
func prepareFreeWorkbufs() {
lock(&work.wbufSpans.lock)
if work.full != 0 {
throw("cannot free workbufs when work.full != 0")
}
// Since all workbufs are on the empty list, we don't care
// which ones are in which spans. We can wipe the entire empty
// list and move all workbuf spans to the free list.
work.empty = 0
work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
unlock(&work.wbufSpans.lock)
}
// freeSomeWbufs frees some workbufs back to the heap and returns
// true if it should be called again to free more.
func freeSomeWbufs(preemptible bool) bool {
const batchSize = 64 // ~1–2 µs per span.
lock(&work.wbufSpans.lock)
if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
unlock(&work.wbufSpans.lock)
return false
}
systemstack(func() {
gp := getg().m.curg
for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ {
span := work.wbufSpans.free.first
if span == nil {
break
}
work.wbufSpans.free.remove(span)
mheap_.freeManual(span, spanAllocWorkBuf)
}
})
more := !work.wbufSpans.free.isEmpty()
unlock(&work.wbufSpans.lock)
return more
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Page heap.
//
// See malloc.go for overview.
package runtime
import (
"internal/abi"
"internal/cpu"
"internal/goarch"
"internal/goexperiment"
"internal/runtime/atomic"
"internal/runtime/gc"
"internal/runtime/sys"
"unsafe"
)
const (
// minPhysPageSize is a lower-bound on the physical page size. The
// true physical page size may be larger than this. In contrast,
// sys.PhysPageSize is an upper-bound on the physical page size.
minPhysPageSize = 4096
// maxPhysPageSize is the maximum page size the runtime supports.
maxPhysPageSize = 512 << 10
// maxPhysHugePageSize sets an upper-bound on the maximum huge page size
// that the runtime supports.
maxPhysHugePageSize = pallocChunkBytes
// pagesPerReclaimerChunk indicates how many pages to scan from the
// pageInUse bitmap at a time. Used by the page reclaimer.
//
// Higher values reduce contention on scanning indexes (such as
// h.reclaimIndex), but increase the minimum latency of the
// operation.
//
// The time required to scan this many pages can vary a lot depending
// on how many spans are actually freed. Experimentally, it can
// scan for pages at ~300 GB/ms on a 2.6GHz Core i7, but can only
// free spans at ~32 MB/ms. Using 512 pages bounds this at
// roughly 100µs.
//
// Must be a multiple of the pageInUse bitmap element size and
// must also evenly divide pagesPerArena.
pagesPerReclaimerChunk = min(512, pagesPerArena)
// physPageAlignedStacks indicates whether stack allocations must be
// physical page aligned. This is a requirement for MAP_STACK on
// OpenBSD.
physPageAlignedStacks = GOOS == "openbsd"
)
// Main malloc heap.
// The heap use pageAlloc to manage free and scavenged pages,
// but all the other global data is here too.
//
// mheap must not be heap-allocated because it contains mSpanLists,
// which must not be heap-allocated.
type mheap struct {
_ sys.NotInHeap
// lock must only be acquired on the system stack, otherwise a g
// could self-deadlock if its stack grows with the lock held.
lock mutex
pages pageAlloc // page allocation data structure
sweepgen uint32 // sweep generation, see comment in mspan; written during STW
// allspans is a slice of all mspans ever created. Each mspan
// appears exactly once.
//
// The memory for allspans is manually managed and can be
// reallocated and move as the heap grows.
//
// In general, allspans is protected by mheap_.lock, which
// prevents concurrent access as well as freeing the backing
// store. Accesses during STW might not hold the lock, but
// must ensure that allocation cannot happen around the
// access (since that may free the backing store).
allspans []*mspan // all spans out there
// Proportional sweep
//
// These parameters represent a linear function from gcController.heapLive
// to page sweep count. The proportional sweep system works to
// stay in the black by keeping the current page sweep count
// above this line at the current gcController.heapLive.
//
// The line has slope sweepPagesPerByte and passes through a
// basis point at (sweepHeapLiveBasis, pagesSweptBasis). At
// any given time, the system is at (gcController.heapLive,
// pagesSwept) in this space.
//
// It is important that the line pass through a point we
// control rather than simply starting at a 0,0 origin
// because that lets us adjust sweep pacing at any time while
// accounting for current progress. If we could only adjust
// the slope, it would create a discontinuity in debt if any
// progress has already been made.
pagesInUse atomic.Uintptr // pages of spans in stats mSpanInUse
pagesSwept atomic.Uint64 // pages swept this cycle
pagesSweptBasis atomic.Uint64 // pagesSwept to use as the origin of the sweep ratio
sweepHeapLiveBasis uint64 // value of gcController.heapLive to use as the origin of sweep ratio; written with lock, read without
sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
// Page reclaimer state
// reclaimIndex is the page index in heapArenas of next page to
// reclaim. Specifically, it refers to page (i %
// pagesPerArena) of arena heapArenas[i / pagesPerArena].
//
// If this is >= 1<<63, the page reclaimer is done scanning
// the page marks.
reclaimIndex atomic.Uint64
// reclaimCredit is spare credit for extra pages swept. Since
// the page reclaimer works in large chunks, it may reclaim
// more than requested. Any spare pages released go to this
// credit pool.
reclaimCredit atomic.Uintptr
_ cpu.CacheLinePad // prevents false-sharing between arenas and preceding variables
// arenas is the heap arena map. It points to the metadata for
// the heap for every arena frame of the entire usable virtual
// address space.
//
// Use arenaIndex to compute indexes into this array.
//
// For regions of the address space that are not backed by the
// Go heap, the arena map contains nil.
//
// Modifications are protected by mheap_.lock. Reads can be
// performed without locking; however, a given entry can
// transition from nil to non-nil at any time when the lock
// isn't held. (Entries never transitions back to nil.)
//
// In general, this is a two-level mapping consisting of an L1
// map and possibly many L2 maps. This saves space when there
// are a huge number of arena frames. However, on many
// platforms (even 64-bit), arenaL1Bits is 0, making this
// effectively a single-level map. In this case, arenas[0]
// will never be nil.
arenas [1 << arenaL1Bits]*[1 << arenaL2Bits]*heapArena
// arenasHugePages indicates whether arenas' L2 entries are eligible
// to be backed by huge pages.
arenasHugePages bool
// heapArenaAlloc is pre-reserved space for allocating heapArena
// objects. This is only used on 32-bit, where we pre-reserve
// this space to avoid interleaving it with the heap itself.
heapArenaAlloc linearAlloc
// arenaHints is a list of addresses at which to attempt to
// add more heap arenas. This is initially populated with a
// set of general hint addresses, and grown with the bounds of
// actual heap arena ranges.
arenaHints *arenaHint
// arena is a pre-reserved space for allocating heap arenas
// (the actual arenas). This is only used on 32-bit.
arena linearAlloc
// heapArenas is the arenaIndex of every mapped arena mapped for the heap.
// This can be used to iterate through the heap address space.
//
// Access is protected by mheap_.lock. However, since this is
// append-only and old backing arrays are never freed, it is
// safe to acquire mheap_.lock, copy the slice header, and
// then release mheap_.lock.
heapArenas []arenaIdx
// userArenaArenas is the arenaIndex of every mapped arena mapped for
// user arenas.
//
// Access is protected by mheap_.lock. However, since this is
// append-only and old backing arrays are never freed, it is
// safe to acquire mheap_.lock, copy the slice header, and
// then release mheap_.lock.
userArenaArenas []arenaIdx
// sweepArenas is a snapshot of heapArenas taken at the
// beginning of the sweep cycle. This can be read safely by
// simply blocking GC (by disabling preemption).
sweepArenas []arenaIdx
// markArenas is a snapshot of heapArenas taken at the beginning
// of the mark cycle. Because heapArenas is append-only, neither
// this slice nor its contents will change during the mark, so
// it can be read safely.
markArenas []arenaIdx
// curArena is the arena that the heap is currently growing
// into. This should always be physPageSize-aligned.
curArena struct {
base, end uintptr
}
// central free lists for small size classes.
// the padding makes sure that the mcentrals are
// spaced CacheLinePadSize bytes apart, so that each mcentral.lock
// gets its own cache line.
// central is indexed by spanClass.
central [numSpanClasses]struct {
mcentral mcentral
pad [(cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
}
spanalloc fixalloc // allocator for span
spanSPMCAlloc fixalloc // allocator for spanSPMC, protected by work.spanSPMCs.lock
cachealloc fixalloc // allocator for mcache
specialfinalizeralloc fixalloc // allocator for specialfinalizer
specialCleanupAlloc fixalloc // allocator for specialCleanup
specialCheckFinalizerAlloc fixalloc // allocator for specialCheckFinalizer
specialTinyBlockAlloc fixalloc // allocator for specialTinyBlock
specialprofilealloc fixalloc // allocator for specialprofile
specialReachableAlloc fixalloc // allocator for specialReachable
specialPinCounterAlloc fixalloc // allocator for specialPinCounter
specialWeakHandleAlloc fixalloc // allocator for specialWeakHandle
specialBubbleAlloc fixalloc // allocator for specialBubble
specialSecretAlloc fixalloc // allocator for specialSecret
speciallock mutex // lock for special record allocators.
arenaHintAlloc fixalloc // allocator for arenaHints
// User arena state.
//
// Protected by mheap_.lock.
userArena struct {
// arenaHints is a list of addresses at which to attempt to
// add more heap arenas for user arena chunks. This is initially
// populated with a set of general hint addresses, and grown with
// the bounds of actual heap arena ranges.
arenaHints *arenaHint
// quarantineList is a list of user arena spans that have been set to fault, but
// are waiting for all pointers into them to go away. Sweeping handles
// identifying when this is true, and moves the span to the ready list.
quarantineList mSpanList
// readyList is a list of empty user arena spans that are ready for reuse.
readyList mSpanList
}
// cleanupID is a counter which is incremented each time a cleanup special is added
// to a span. It's used to create globally unique identifiers for individual cleanup.
// cleanupID is protected by mheap_.speciallock. It must only be incremented while holding
// the lock. ID 0 is reserved. Users should increment first, then read the value.
cleanupID uint64
_ cpu.CacheLinePad
immortalWeakHandles immortalWeakHandleMap
unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF
}
var mheap_ mheap
// A heapArena stores metadata for a heap arena. heapArenas are stored
// outside of the Go heap and accessed via the mheap_.arenas index.
type heapArena struct {
_ sys.NotInHeap
// spans maps from virtual address page ID within this arena to *mspan.
// For allocated spans, their pages map to the span itself.
// For free spans, only the lowest and highest pages map to the span itself.
// Internal pages map to an arbitrary span.
// For pages that have never been allocated, spans entries are nil.
//
// Modifications are protected by mheap.lock. Reads can be
// performed without locking, but ONLY from indexes that are
// known to contain in-use or stack spans. This means there
// must not be a safe-point between establishing that an
// address is live and looking it up in the spans array.
spans [pagesPerArena]*mspan
// pageInUse is a bitmap that indicates which spans are in
// state mSpanInUse. This bitmap is indexed by page number,
// but only the bit corresponding to the first page in each
// span is used.
//
// Reads and writes are atomic.
pageInUse [pagesPerArena / 8]uint8
// pageMarks is a bitmap that indicates which spans have any
// marked objects on them. Like pageInUse, only the bit
// corresponding to the first page in each span is used.
//
// Writes are done atomically during marking. Reads are
// non-atomic and lock-free since they only occur during
// sweeping (and hence never race with writes).
//
// This is used to quickly find whole spans that can be freed.
//
// TODO(austin): It would be nice if this was uint64 for
// faster scanning, but we don't have 64-bit atomic bit
// operations.
pageMarks [pagesPerArena / 8]uint8
// pageSpecials is a bitmap that indicates which spans have
// specials (finalizers or other). Like pageInUse, only the bit
// corresponding to the first page in each span is used.
//
// Writes are done atomically whenever a special is added to
// a span and whenever the last special is removed from a span.
// Reads are done atomically to find spans containing specials
// during marking.
pageSpecials [pagesPerArena / 8]uint8
// pageUseSpanInlineMarkBits is a bitmap where each bit corresponds
// to a span, as only spans one page in size can have inline mark bits.
// The bit indicates that the span has a spanInlineMarkBits struct
// stored directly at the top end of the span's memory.
pageUseSpanInlineMarkBits [pagesPerArena / 8]uint8
// checkmarks stores the debug.gccheckmark state. It is only
// used if debug.gccheckmark > 0 or debug.checkfinalizers > 0.
checkmarks *checkmarksMap
// zeroedBase marks the first byte of the first page in this
// arena which hasn't been used yet and is therefore already
// zero. zeroedBase is relative to the arena base.
// Increases monotonically until it hits heapArenaBytes.
//
// This field is sufficient to determine if an allocation
// needs to be zeroed because the page allocator follows an
// address-ordered first-fit policy.
//
// Read atomically and written with an atomic CAS.
zeroedBase uintptr
}
// arenaHint is a hint for where to grow the heap arenas. See
// mheap_.arenaHints.
type arenaHint struct {
_ sys.NotInHeap
addr uintptr
down bool
next *arenaHint
}
// An mspan is a run of pages.
//
// When a mspan is in the heap free treap, state == mSpanFree
// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
// If the mspan is in the heap scav treap, then in addition to the
// above scavenged == true. scavenged == false in all other cases.
//
// When a mspan is allocated, state == mSpanInUse or mSpanManual
// and heapmap(i) == span for all s->start <= i < s->start+s->npages.
// Every mspan is in one doubly-linked list, either in the mheap's
// busy list or one of the mcentral's span lists.
// An mspan representing actual memory has state mSpanInUse,
// mSpanManual, or mSpanFree. Transitions between these states are
// constrained as follows:
//
// - A span may transition from free to in-use or manual during any GC
// phase.
//
// - During sweeping (gcphase == _GCoff), a span may transition from
// in-use to free (as a result of sweeping) or manual to free (as a
// result of stacks being freed).
//
// - During GC (gcphase != _GCoff), a span *must not* transition from
// manual or in-use to free. Because concurrent GC may read a pointer
// and then look up its span, the span state must be monotonic.
//
// Setting mspan.state to mSpanInUse or mSpanManual must be done
// atomically and only after all other span fields are valid.
// Likewise, if inspecting a span is contingent on it being
// mSpanInUse, the state should be loaded atomically and checked
// before depending on other fields. This allows the garbage collector
// to safely deal with potentially invalid pointers, since resolving
// such pointers may race with a span being allocated.
type mSpanState uint8
const (
mSpanDead mSpanState = iota
mSpanInUse // allocated for garbage collected heap
mSpanManual // allocated for manual management (e.g., stack allocator)
)
// mSpanStateNames are the names of the span states, indexed by
// mSpanState.
var mSpanStateNames = []string{
"mSpanDead",
"mSpanInUse",
"mSpanManual",
}
// mSpanStateBox holds an atomic.Uint8 to provide atomic operations on
// an mSpanState. This is a separate type to disallow accidental comparison
// or assignment with mSpanState.
type mSpanStateBox struct {
s atomic.Uint8
}
// It is nosplit to match get, below.
//go:nosplit
func (b *mSpanStateBox) set(s mSpanState) {
b.s.Store(uint8(s))
}
// It is nosplit because it's called indirectly by typedmemclr,
// which must not be preempted.
//go:nosplit
func (b *mSpanStateBox) get() mSpanState {
return mSpanState(b.s.Load())
}
type mspan struct {
_ sys.NotInHeap
next *mspan // next span in list, or nil if none
prev *mspan // previous span in list, or nil if none
list *mSpanList // For debugging.
startAddr uintptr // address of first byte of span aka s.base()
npages uintptr // number of pages in span
manualFreeList gclinkptr // list of free objects in mSpanManual spans
// freeindex is the slot index between 0 and nelems at which to begin scanning
// for the next free object in this span.
// Each allocation scans allocBits starting at freeindex until it encounters a 0
// indicating a free object. freeindex is then adjusted so that subsequent scans begin
// just past the newly discovered free object.
//
// If freeindex == nelems, this span has no free objects, though might have reusable objects.
//
// allocBits is a bitmap of objects in this span.
// If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0
// then object n is free;
// otherwise, object n is allocated. Bits starting at nelems are
// undefined and should never be referenced.
//
// Object n starts at address n*elemsize + (start << pageShift).
freeindex uint16
// TODO: Look up nelems from sizeclass and remove this field if it
// helps performance.
nelems uint16 // number of object in the span.
// freeIndexForScan is like freeindex, except that freeindex is
// used by the allocator whereas freeIndexForScan is used by the
// GC scanner. They are two fields so that the GC sees the object
// is allocated only when the object and the heap bits are
// initialized (see also the assignment of freeIndexForScan in
// mallocgc, and issue 54596).
freeIndexForScan uint16
// Cache of the allocBits at freeindex. allocCache is shifted
// such that the lowest bit corresponds to the bit freeindex.
// allocCache holds the complement of allocBits, thus allowing
// ctz (count trailing zero) to use it directly.
// allocCache may contain bits beyond s.nelems; the caller must ignore
// these.
allocCache uint64
// allocBits and gcmarkBits hold pointers to a span's mark and
// allocation bits. The pointers are 8 byte aligned.
// There are three arenas where this data is held.
// free: Dirty arenas that are no longer accessed
// and can be reused.
// next: Holds information to be used in the next GC cycle.
// current: Information being used during this GC cycle.
// previous: Information being used during the last GC cycle.
// A new GC cycle starts with the call to finishsweep_m.
// finishsweep_m moves the previous arena to the free arena,
// the current arena to the previous arena, and
// the next arena to the current arena.
// The next arena is populated as the spans request
// memory to hold gcmarkBits for the next GC cycle as well
// as allocBits for newly allocated spans.
//
// The pointer arithmetic is done "by hand" instead of using
// arrays to avoid bounds checks along critical performance
// paths.
// The sweep will free the old allocBits and set allocBits to the
// gcmarkBits. The gcmarkBits are replaced with a fresh zeroed
// out memory.
allocBits *gcBits
gcmarkBits *gcBits
pinnerBits *gcBits // bitmap for pinned objects; accessed atomically
// sweep generation:
// if sweepgen == h->sweepgen - 2, the span needs sweeping
// if sweepgen == h->sweepgen - 1, the span is currently being swept
// if sweepgen == h->sweepgen, the span is swept and ready to use
// if sweepgen == h->sweepgen + 1, the span was cached before sweep began and is still cached, and needs sweeping
// if sweepgen == h->sweepgen + 3, the span was swept and then cached and is still cached
// h->sweepgen is incremented by 2 after every GC
sweepgen uint32
divMul uint32 // for divide by elemsize
allocCount uint16 // number of allocated objects
spanclass spanClass // size class and noscan (uint8)
state mSpanStateBox // mSpanInUse etc; accessed atomically (get/set methods)
needzero uint8 // needs to be zeroed before allocation
isUserArenaChunk bool // whether or not this span represents a user arena
allocCountBeforeCache uint16 // a copy of allocCount that is stored just before this span is cached
elemsize uintptr // computed from sizeclass or from npages
limit uintptr // end of data in span
speciallock mutex // guards specials list and changes to pinnerBits
specials *special // linked list of special records sorted by offset.
userArenaChunkFree addrRange // interval for managing chunk allocation
largeType *_type // malloc header for large objects.
}
func (s *mspan) base() uintptr {
return s.startAddr
}
// recordspan adds a newly allocated span to h.allspans.
//
// This only happens the first time a span is allocated from
// mheap.spanalloc (it is not called when a span is reused).
//
// Write barriers are disallowed here because it can be called from
// gcWork when allocating new workbufs. However, because it's an
// indirect call from the fixalloc initializer, the compiler can't see
// this.
//
// The heap lock must be held.
//
//go:nowritebarrierrec
func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
h := (*mheap)(vh)
s := (*mspan)(p)
assertLockHeld(&h.lock)
if len(h.allspans) >= cap(h.allspans) {
n := 64 * 1024 / goarch.PtrSize
if n < cap(h.allspans)*3/2 {
n = cap(h.allspans) * 3 / 2
}
var new []*mspan
sp := (*slice)(unsafe.Pointer(&new))
sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys, "allspans array")
if sp.array == nil {
throw("runtime: cannot allocate memory")
}
sp.len = len(h.allspans)
sp.cap = n
if len(h.allspans) > 0 {
copy(new, h.allspans)
}
oldAllspans := h.allspans
*(*notInHeapSlice)(unsafe.Pointer(&h.allspans)) = *(*notInHeapSlice)(unsafe.Pointer(&new))
if len(oldAllspans) != 0 {
sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys)
}
}
h.allspans = h.allspans[:len(h.allspans)+1]
h.allspans[len(h.allspans)-1] = s
}
// A spanClass represents the size class and noscan-ness of a span.
//
// Each size class has a noscan spanClass and a scan spanClass. The
// noscan spanClass contains only noscan objects, which do not contain
// pointers and thus do not need to be scanned by the garbage
// collector.
type spanClass uint8
const (
numSpanClasses = gc.NumSizeClasses << 1
tinySpanClass = spanClass(tinySizeClass<<1 | 1)
)
func makeSpanClass(sizeclass uint8, noscan bool) spanClass {
return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))
}
//go:nosplit
func (sc spanClass) sizeclass() int8 {
return int8(sc >> 1)
}
//go:nosplit
func (sc spanClass) noscan() bool {
return sc&1 != 0
}
// arenaIndex returns the index into mheap_.arenas of the arena
// containing metadata for p. This index combines of an index into the
// L1 map and an index into the L2 map and should be used as
// mheap_.arenas[ai.l1()][ai.l2()].
//
// If p is outside the range of valid heap addresses, either l1() or
// l2() will be out of bounds.
//
// It is nosplit because it's called by spanOf and several other
// nosplit functions.
//
//go:nosplit
func arenaIndex(p uintptr) arenaIdx {
return arenaIdx((p - arenaBaseOffset) / heapArenaBytes)
}
// arenaBase returns the low address of the region covered by heap
// arena i.
func arenaBase(i arenaIdx) uintptr {
return uintptr(i)*heapArenaBytes + arenaBaseOffset
}
type arenaIdx uint
// l1 returns the "l1" portion of an arenaIdx.
//
// Marked nosplit because it's called by spanOf and other nosplit
// functions.
//
//go:nosplit
func (i arenaIdx) l1() uint {
if arenaL1Bits == 0 {
// Let the compiler optimize this away if there's no
// L1 map.
return 0
} else {
return uint(i) >> arenaL1Shift
}
}
// l2 returns the "l2" portion of an arenaIdx.
//
// Marked nosplit because it's called by spanOf and other nosplit funcs.
// functions.
//
//go:nosplit
func (i arenaIdx) l2() uint {
if arenaL1Bits == 0 {
return uint(i)
} else {
return uint(i) & (1<<arenaL2Bits - 1)
}
}
// inheap reports whether b is a pointer into a (potentially dead) heap object.
// It returns false for pointers into mSpanManual spans.
// Non-preemptible because it is used by write barriers.
//
//go:nowritebarrier
//go:nosplit
func inheap(b uintptr) bool {
return spanOfHeap(b) != nil
}
// inHeapOrStack is a variant of inheap that returns true for pointers
// into any allocated heap span.
//
//go:nowritebarrier
//go:nosplit
func inHeapOrStack(b uintptr) bool {
s := spanOf(b)
if s == nil || b < s.base() {
return false
}
switch s.state.get() {
case mSpanInUse, mSpanManual:
return b < s.limit
default:
return false
}
}
// spanOf returns the span of p. If p does not point into the heap
// arena or no span has ever contained p, spanOf returns nil.
//
// If p does not point to allocated memory, this may return a non-nil
// span that does *not* contain p. If this is a possibility, the
// caller should either call spanOfHeap or check the span bounds
// explicitly.
//
// Must be nosplit because it has callers that are nosplit.
//
//go:nosplit
func spanOf(p uintptr) *mspan {
// This function looks big, but we use a lot of constant
// folding around arenaL1Bits to get it under the inlining
// budget. Also, many of the checks here are safety checks
// that Go needs to do anyway, so the generated code is quite
// short.
ri := arenaIndex(p)
if arenaL1Bits == 0 {
// If there's no L1, then ri.l1() can't be out of bounds but ri.l2() can.
if ri.l2() >= uint(len(mheap_.arenas[0])) {
return nil
}
} else {
// If there's an L1, then ri.l1() can be out of bounds but ri.l2() can't.
if ri.l1() >= uint(len(mheap_.arenas)) {
return nil
}
}
l2 := mheap_.arenas[ri.l1()]
if arenaL1Bits != 0 && l2 == nil { // Should never happen if there's no L1.
return nil
}
ha := l2[ri.l2()]
if ha == nil {
return nil
}
return ha.spans[(p/pageSize)%pagesPerArena]
}
// spanOfUnchecked is equivalent to spanOf, but the caller must ensure
// that p points into an allocated heap arena.
//
// Must be nosplit because it has callers that are nosplit.
//
//go:nosplit
func spanOfUnchecked(p uintptr) *mspan {
ai := arenaIndex(p)
return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena]
}
// spanOfHeap is like spanOf, but returns nil if p does not point to a
// heap object.
//
// Must be nosplit because it has callers that are nosplit.
//
//go:nosplit
func spanOfHeap(p uintptr) *mspan {
s := spanOf(p)
// s is nil if it's never been allocated. Otherwise, we check
// its state first because we don't trust this pointer, so we
// have to synchronize with span initialization. Then, it's
// still possible we picked up a stale span pointer, so we
// have to check the span's bounds.
if s == nil || s.state.get() != mSpanInUse || p < s.base() || p >= s.limit {
return nil
}
return s
}
// pageIndexOf returns the arena, page index, and page mask for pointer p.
// The caller must ensure p is in the heap.
func pageIndexOf(p uintptr) (arena *heapArena, pageIdx uintptr, pageMask uint8) {
ai := arenaIndex(p)
arena = mheap_.arenas[ai.l1()][ai.l2()]
pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse))
pageMask = byte(1 << ((p / pageSize) % 8))
return
}
// heapArenaOf returns the heap arena for p, if one exists.
func heapArenaOf(p uintptr) *heapArena {
ri := arenaIndex(p)
if arenaL1Bits == 0 {
// If there's no L1, then ri.l1() can't be out of bounds but ri.l2() can.
if ri.l2() >= uint(len(mheap_.arenas[0])) {
return nil
}
} else {
// If there's an L1, then ri.l1() can be out of bounds but ri.l2() can't.
if ri.l1() >= uint(len(mheap_.arenas)) {
return nil
}
}
l2 := mheap_.arenas[ri.l1()]
if arenaL1Bits != 0 && l2 == nil { // Should never happen if there's no L1.
return nil
}
return l2[ri.l2()]
}
// Initialize the heap.
func (h *mheap) init() {
lockInit(&h.lock, lockRankMheap)
lockInit(&h.speciallock, lockRankMheapSpecial)
h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
h.spanSPMCAlloc.init(unsafe.Sizeof(spanSPMC{}), nil, nil, &memstats.gcMiscSys)
h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
h.specialCleanupAlloc.init(unsafe.Sizeof(specialCleanup{}), nil, nil, &memstats.other_sys)
h.specialCheckFinalizerAlloc.init(unsafe.Sizeof(specialCheckFinalizer{}), nil, nil, &memstats.other_sys)
h.specialTinyBlockAlloc.init(unsafe.Sizeof(specialTinyBlock{}), nil, nil, &memstats.other_sys)
h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys)
h.specialPinCounterAlloc.init(unsafe.Sizeof(specialPinCounter{}), nil, nil, &memstats.other_sys)
h.specialSecretAlloc.init(unsafe.Sizeof(specialSecret{}), nil, nil, &memstats.other_sys)
h.specialWeakHandleAlloc.init(unsafe.Sizeof(specialWeakHandle{}), nil, nil, &memstats.gcMiscSys)
h.specialBubbleAlloc.init(unsafe.Sizeof(specialBubble{}), nil, nil, &memstats.other_sys)
h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys)
// Don't zero mspan allocations. Background sweeping can
// inspect a span concurrently with allocating it, so it's
// important that the span's sweepgen survive across freeing
// and re-allocating a span to prevent background sweeping
// from improperly cas'ing it from 0.
//
// This is safe because mspan contains no heap pointers.
h.spanalloc.zero = false
// h->mapcache needs no init
for i := range h.central {
h.central[i].mcentral.init(spanClass(i))
}
h.pages.init(&h.lock, &memstats.gcMiscSys, false)
xRegInitAlloc()
}
// reclaim sweeps and reclaims at least npage pages into the heap.
// It is called before allocating npage pages to keep growth in check.
//
// reclaim implements the page-reclaimer half of the sweeper.
//
// h.lock must NOT be held.
func (h *mheap) reclaim(npage uintptr) {
// TODO(austin): Half of the time spent freeing spans is in
// locking/unlocking the heap (even with low contention). We
// could make the slow path here several times faster by
// batching heap frees.
// Bail early if there's no more reclaim work.
if h.reclaimIndex.Load() >= 1<<63 {
return
}
// Disable preemption so the GC can't start while we're
// sweeping, so we can read h.sweepArenas, and so
// traceGCSweepStart/Done pair on the P.
mp := acquirem()
trace := traceAcquire()
if trace.ok() {
trace.GCSweepStart()
traceRelease(trace)
}
arenas := h.sweepArenas
locked := false
for npage > 0 {
// Pull from accumulated credit first.
if credit := h.reclaimCredit.Load(); credit > 0 {
take := credit
if take > npage {
// Take only what we need.
take = npage
}
if h.reclaimCredit.CompareAndSwap(credit, credit-take) {
npage -= take
}
continue
}
// Claim a chunk of work.
idx := uintptr(h.reclaimIndex.Add(pagesPerReclaimerChunk) - pagesPerReclaimerChunk)
if idx/pagesPerArena >= uintptr(len(arenas)) {
// Page reclaiming is done.
h.reclaimIndex.Store(1 << 63)
break
}
if !locked {
// Lock the heap for reclaimChunk.
lock(&h.lock)
locked = true
}
// Scan this chunk.
nfound := h.reclaimChunk(arenas, idx, pagesPerReclaimerChunk)
if nfound <= npage {
npage -= nfound
} else {
// Put spare pages toward global credit.
h.reclaimCredit.Add(nfound - npage)
npage = 0
}
}
if locked {
unlock(&h.lock)
}
trace = traceAcquire()
if trace.ok() {
trace.GCSweepDone()
traceRelease(trace)
}
releasem(mp)
}
// reclaimChunk sweeps unmarked spans that start at page indexes [pageIdx, pageIdx+n).
// It returns the number of pages returned to the heap.
//
// h.lock must be held and the caller must be non-preemptible. Note: h.lock may be
// temporarily unlocked and re-locked in order to do sweeping or if tracing is
// enabled.
func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
// The heap lock must be held because this accesses the
// heapArena.spans arrays using potentially non-live pointers.
// In particular, if a span were freed and merged concurrently
// with this probing heapArena.spans, it would be possible to
// observe arbitrary, stale span pointers.
assertLockHeld(&h.lock)
n0 := n
var nFreed uintptr
sl := sweep.active.begin()
if !sl.valid {
return 0
}
for n > 0 {
ai := arenas[pageIdx/pagesPerArena]
ha := h.arenas[ai.l1()][ai.l2()]
// Get a chunk of the bitmap to work on.
arenaPage := uint(pageIdx % pagesPerArena)
inUse := ha.pageInUse[arenaPage/8:]
marked := ha.pageMarks[arenaPage/8:]
if uintptr(len(inUse)) > n/8 {
inUse = inUse[:n/8]
marked = marked[:n/8]
}
// Scan this bitmap chunk for spans that are in-use
// but have no marked objects on them.
for i := range inUse {
inUseUnmarked := atomic.Load8(&inUse[i]) &^ marked[i]
if inUseUnmarked == 0 {
continue
}
for j := uint(0); j < 8; j++ {
if inUseUnmarked&(1<<j) != 0 {
s := ha.spans[arenaPage+uint(i)*8+j]
if s, ok := sl.tryAcquire(s); ok {
npages := s.npages
unlock(&h.lock)
if s.sweep(false) {
nFreed += npages
}
lock(&h.lock)
// Reload inUse. It's possible nearby
// spans were freed when we dropped the
// lock and we don't want to get stale
// pointers from the spans array.
inUseUnmarked = atomic.Load8(&inUse[i]) &^ marked[i]
}
}
}
}
// Advance.
pageIdx += uintptr(len(inUse) * 8)
n -= uintptr(len(inUse) * 8)
}
sweep.active.end(sl)
trace := traceAcquire()
if trace.ok() {
unlock(&h.lock)
// Account for pages scanned but not reclaimed.
trace.GCSweepSpan((n0 - nFreed) * pageSize)
traceRelease(trace)
lock(&h.lock)
}
assertLockHeld(&h.lock) // Must be locked on return.
return nFreed
}
// spanAllocType represents the type of allocation to make, or
// the type of allocation to be freed.
type spanAllocType uint8
const (
spanAllocHeap spanAllocType = iota // heap span
spanAllocStack // stack span
spanAllocWorkBuf // work buf span
)
// manual returns true if the span allocation is manually managed.
func (s spanAllocType) manual() bool {
return s != spanAllocHeap
}
// alloc allocates a new span of npage pages from the GC'd heap.
//
// spanclass indicates the span's size class and scannability.
//
// Returns a span that has been fully initialized. span.needzero indicates
// whether the span has been zeroed. Note that it may not be.
func (h *mheap) alloc(npages uintptr, spanclass spanClass) *mspan {
// Don't do any operations that lock the heap on the G stack.
// It might trigger stack growth, and the stack growth code needs
// to be able to allocate heap.
var s *mspan
systemstack(func() {
// To prevent excessive heap growth, before allocating n pages
// we need to sweep and reclaim at least n pages.
if !isSweepDone() {
h.reclaim(npages)
}
s = h.allocSpan(npages, spanAllocHeap, spanclass)
})
return s
}
// allocManual allocates a manually-managed span of npage pages.
// allocManual returns nil if allocation fails.
//
// allocManual adds the bytes used to *stat, which should be a
// memstats in-use field. Unlike allocations in the GC'd heap, the
// allocation does *not* count toward heapInUse.
//
// The memory backing the returned span may not be zeroed if
// span.needzero is set.
//
// allocManual must be called on the system stack because it may
// acquire the heap lock via allocSpan. See mheap for details.
//
// If new code is written to call allocManual, do NOT use an
// existing spanAllocType value and instead declare a new one.
//
//go:systemstack
func (h *mheap) allocManual(npages uintptr, typ spanAllocType) *mspan {
if !typ.manual() {
throw("manual span allocation called with non-manually-managed type")
}
return h.allocSpan(npages, typ, 0)
}
// setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize))
// is s.
func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
p := base / pageSize
ai := arenaIndex(base)
ha := h.arenas[ai.l1()][ai.l2()]
for n := uintptr(0); n < npage; n++ {
i := (p + n) % pagesPerArena
if i == 0 {
ai = arenaIndex(base + n*pageSize)
ha = h.arenas[ai.l1()][ai.l2()]
}
ha.spans[i] = s
}
}
// allocNeedsZero checks if the region of address space [base, base+npage*pageSize),
// assumed to be allocated, needs to be zeroed, updating heap arena metadata for
// future allocations.
//
// This must be called each time pages are allocated from the heap, even if the page
// allocator can otherwise prove the memory it's allocating is already zero because
// they're fresh from the operating system. It updates heapArena metadata that is
// critical for future page allocations.
//
// There are no locking constraints on this method.
func (h *mheap) allocNeedsZero(base, npage uintptr) (needZero bool) {
for npage > 0 {
ai := arenaIndex(base)
ha := h.arenas[ai.l1()][ai.l2()]
zeroedBase := atomic.Loaduintptr(&ha.zeroedBase)
arenaBase := base % heapArenaBytes
if arenaBase < zeroedBase {
// We extended into the non-zeroed part of the
// arena, so this region needs to be zeroed before use.
//
// zeroedBase is monotonically increasing, so if we see this now then
// we can be sure we need to zero this memory region.
//
// We still need to update zeroedBase for this arena, and
// potentially more arenas.
needZero = true
}
// We may observe arenaBase > zeroedBase if we're racing with one or more
// allocations which are acquiring memory directly before us in the address
// space. But, because we know no one else is acquiring *this* memory, it's
// still safe to not zero.
// Compute how far into the arena we extend into, capped
// at heapArenaBytes.
arenaLimit := arenaBase + npage*pageSize
if arenaLimit > heapArenaBytes {
arenaLimit = heapArenaBytes
}
// Increase ha.zeroedBase so it's >= arenaLimit.
// We may be racing with other updates.
for arenaLimit > zeroedBase {
if atomic.Casuintptr(&ha.zeroedBase, zeroedBase, arenaLimit) {
break
}
zeroedBase = atomic.Loaduintptr(&ha.zeroedBase)
// Double check basic conditions of zeroedBase.
if zeroedBase <= arenaLimit && zeroedBase > arenaBase {
// The zeroedBase moved into the space we were trying to
// claim. That's very bad, and indicates someone allocated
// the same region we did.
throw("potentially overlapping in-use allocations detected")
}
}
// Move base forward and subtract from npage to move into
// the next arena, or finish.
base += arenaLimit - arenaBase
npage -= (arenaLimit - arenaBase) / pageSize
}
return
}
// tryAllocMSpan attempts to allocate an mspan object from
// the P-local cache, but may fail.
//
// h.lock need not be held.
//
// This caller must ensure that its P won't change underneath
// it during this function. Currently to ensure that we enforce
// that the function is run on the system stack, because that's
// the only place it is used now. In the future, this requirement
// may be relaxed if its use is necessary elsewhere.
//
//go:systemstack
func (h *mheap) tryAllocMSpan() *mspan {
pp := getg().m.p.ptr()
// If we don't have a p or the cache is empty, we can't do
// anything here.
if pp == nil || pp.mspancache.len == 0 {
return nil
}
// Pull off the last entry in the cache.
s := pp.mspancache.buf[pp.mspancache.len-1]
pp.mspancache.len--
return s
}
// allocMSpanLocked allocates an mspan object.
//
// h.lock must be held.
//
// allocMSpanLocked must be called on the system stack because
// its caller holds the heap lock. See mheap for details.
// Running on the system stack also ensures that we won't
// switch Ps during this function. See tryAllocMSpan for details.
//
//go:systemstack
func (h *mheap) allocMSpanLocked() *mspan {
assertLockHeld(&h.lock)
pp := getg().m.p.ptr()
if pp == nil {
// We don't have a p so just do the normal thing.
return (*mspan)(h.spanalloc.alloc())
}
// Refill the cache if necessary.
if pp.mspancache.len == 0 {
const refillCount = len(pp.mspancache.buf) / 2
for i := 0; i < refillCount; i++ {
pp.mspancache.buf[i] = (*mspan)(h.spanalloc.alloc())
}
pp.mspancache.len = refillCount
}
// Pull off the last entry in the cache.
s := pp.mspancache.buf[pp.mspancache.len-1]
pp.mspancache.len--
return s
}
// freeMSpanLocked free an mspan object.
//
// h.lock must be held.
//
// freeMSpanLocked must be called on the system stack because
// its caller holds the heap lock. See mheap for details.
// Running on the system stack also ensures that we won't
// switch Ps during this function. See tryAllocMSpan for details.
//
//go:systemstack
func (h *mheap) freeMSpanLocked(s *mspan) {
assertLockHeld(&h.lock)
pp := getg().m.p.ptr()
// First try to free the mspan directly to the cache.
if pp != nil && pp.mspancache.len < len(pp.mspancache.buf) {
pp.mspancache.buf[pp.mspancache.len] = s
pp.mspancache.len++
return
}
// Failing that (or if we don't have a p), just free it to
// the heap.
h.spanalloc.free(unsafe.Pointer(s))
}
// allocSpan allocates an mspan which owns npages worth of memory.
//
// If typ.manual() == false, allocSpan allocates a heap span of class spanclass
// and updates heap accounting. If manual == true, allocSpan allocates a
// manually-managed span (spanclass is ignored), and the caller is
// responsible for any accounting related to its use of the span. Either
// way, allocSpan will atomically add the bytes in the newly allocated
// span to *sysStat.
//
// The returned span is fully initialized.
//
// h.lock must not be held.
//
// allocSpan must be called on the system stack both because it acquires
// the heap lock and because it must block GC transitions.
//
//go:systemstack
func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) {
// Function-global state.
gp := getg()
base, scav := uintptr(0), uintptr(0)
growth := uintptr(0)
// On some platforms we need to provide physical page aligned stack
// allocations. Where the page size is less than the physical page
// size, we already manage to do this by default.
needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize
// If the allocation is small enough, try the page cache!
// The page cache does not support aligned allocations, so we cannot use
// it if we need to provide a physical page aligned stack allocation.
pp := gp.m.p.ptr()
if !needPhysPageAlign && pp != nil && npages < pageCachePages/4 {
c := &pp.pcache
// If the cache is empty, refill it.
if c.empty() {
lock(&h.lock)
*c = h.pages.allocToCache()
unlock(&h.lock)
}
// Try to allocate from the cache.
base, scav = c.alloc(npages)
if base != 0 {
s = h.tryAllocMSpan()
if s != nil {
goto HaveSpan
}
// We have a base but no mspan, so we need
// to lock the heap.
}
}
// For one reason or another, we couldn't get the
// whole job done without the heap lock.
lock(&h.lock)
if needPhysPageAlign {
// Overallocate by a physical page to allow for later alignment.
extraPages := physPageSize / pageSize
// Find a big enough region first, but then only allocate the
// aligned portion. We can't just allocate and then free the
// edges because we need to account for scavenged memory, and
// that's difficult with alloc.
//
// Note that we skip updates to searchAddr here. It's OK if
// it's stale and higher than normal; it'll operate correctly,
// just come with a performance cost.
base, _ = h.pages.find(npages + extraPages)
if base == 0 {
var ok bool
growth, ok = h.grow(npages + extraPages)
if !ok {
unlock(&h.lock)
return nil
}
base, _ = h.pages.find(npages + extraPages)
if base == 0 {
throw("grew heap, but no adequate free space found")
}
}
base = alignUp(base, physPageSize)
scav = h.pages.allocRange(base, npages)
}
if base == 0 {
// Try to acquire a base address.
base, scav = h.pages.alloc(npages)
if base == 0 {
var ok bool
growth, ok = h.grow(npages)
if !ok {
unlock(&h.lock)
return nil
}
base, scav = h.pages.alloc(npages)
if base == 0 {
throw("grew heap, but no adequate free space found")
}
}
}
if s == nil {
// We failed to get an mspan earlier, so grab
// one now that we have the heap lock.
s = h.allocMSpanLocked()
}
unlock(&h.lock)
HaveSpan:
// Decide if we need to scavenge in response to what we just allocated.
// Specifically, we track the maximum amount of memory to scavenge of all
// the alternatives below, assuming that the maximum satisfies *all*
// conditions we check (e.g. if we need to scavenge X to satisfy the
// memory limit and Y to satisfy heap-growth scavenging, and Y > X, then
// it's fine to pick Y, because the memory limit is still satisfied).
//
// It's fine to do this after allocating because we expect any scavenged
// pages not to get touched until we return. Simultaneously, it's important
// to do this before calling sysUsed because that may commit address space.
bytesToScavenge := uintptr(0)
forceScavenge := false
if limit := gcController.memoryLimit.Load(); !gcCPULimiter.limiting() {
// Assist with scavenging to maintain the memory limit by the amount
// that we expect to page in.
inuse := gcController.mappedReady.Load()
// Be careful about overflow, especially with uintptrs. Even on 32-bit platforms
// someone can set a really big memory limit that isn't math.MaxInt64.
if uint64(scav)+inuse > uint64(limit) {
bytesToScavenge = uintptr(uint64(scav) + inuse - uint64(limit))
forceScavenge = true
}
}
if goal := scavenge.gcPercentGoal.Load(); goal != ^uint64(0) && growth > 0 {
// We just caused a heap growth, so scavenge down what will soon be used.
// By scavenging inline we deal with the failure to allocate out of
// memory fragments by scavenging the memory fragments that are least
// likely to be re-used.
//
// Only bother with this because we're not using a memory limit. We don't
// care about heap growths as long as we're under the memory limit, and the
// previous check for scaving already handles that.
if retained := heapRetained(); retained+uint64(growth) > goal {
// The scavenging algorithm requires the heap lock to be dropped so it
// can acquire it only sparingly. This is a potentially expensive operation
// so it frees up other goroutines to allocate in the meanwhile. In fact,
// they can make use of the growth we just created.
todo := growth
if overage := uintptr(retained + uint64(growth) - goal); todo > overage {
todo = overage
}
if todo > bytesToScavenge {
bytesToScavenge = todo
}
}
}
// There are a few very limited circumstances where we won't have a P here.
// It's OK to simply skip scavenging in these cases. Something else will notice
// and pick up the tab.
var now int64
if pp != nil && bytesToScavenge > 0 {
// Measure how long we spent scavenging and add that measurement to the assist
// time so we can track it for the GC CPU limiter.
//
// Limiter event tracking might be disabled if we end up here
// while on a mark worker.
start := nanotime()
track := pp.limiterEvent.start(limiterEventScavengeAssist, start)
// Scavenge, but back out if the limiter turns on.
released := h.pages.scavenge(bytesToScavenge, func() bool {
return gcCPULimiter.limiting()
}, forceScavenge)
mheap_.pages.scav.releasedEager.Add(released)
// Finish up accounting.
now = nanotime()
if track {
pp.limiterEvent.stop(limiterEventScavengeAssist, now)
}
scavenge.assistTime.Add(now - start)
}
// Initialize the span.
h.initSpan(s, typ, spanclass, base, npages, scav)
if valgrindenabled {
valgrindMempoolMalloc(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base), npages*pageSize)
}
// Commit and account for any scavenged memory that the span now owns.
nbytes := npages * pageSize
if scav != 0 {
// sysUsed all the pages that are actually available
// in the span since some of them might be scavenged.
sysUsed(unsafe.Pointer(base), nbytes, scav)
gcController.heapReleased.add(-int64(scav))
}
// Update stats.
gcController.heapFree.add(-int64(nbytes - scav))
if typ == spanAllocHeap {
gcController.heapInUse.add(int64(nbytes))
}
// Update consistent stats.
stats := memstats.heapStats.acquire()
atomic.Xaddint64(&stats.committed, int64(scav))
atomic.Xaddint64(&stats.released, -int64(scav))
switch typ {
case spanAllocHeap:
atomic.Xaddint64(&stats.inHeap, int64(nbytes))
case spanAllocStack:
atomic.Xaddint64(&stats.inStacks, int64(nbytes))
case spanAllocWorkBuf:
atomic.Xaddint64(&stats.inWorkBufs, int64(nbytes))
}
memstats.heapStats.release()
// Trace the span alloc.
if traceAllocFreeEnabled() {
trace := traceAcquire()
if trace.ok() {
trace.SpanAlloc(s)
traceRelease(trace)
}
}
return s
}
// initSpan initializes a blank span s which will represent the range
// [base, base+npages*pageSize). typ is the type of span being allocated.
func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages, scav uintptr) {
// At this point, both s != nil and base != 0, and the heap
// lock is no longer held. Initialize the span.
s.init(base, npages)
// Always call allocNeedsZero to update the arena's zeroedBase watermark
// and determine if the memory is considered dirty.
needZero := h.allocNeedsZero(base, npages)
// If these pages were scavenged (returned to the OS), the kernel guarantees
// they will be zero-filled on next use (fault-in), so we can treat them as
// already zeroed and skip explicit clearing.
if (needZeroAfterSysUnused() || scav != npages*pageSize) && needZero {
s.needzero = 1
}
nbytes := npages * pageSize
if typ.manual() {
s.manualFreeList = 0
s.nelems = 0
s.state.set(mSpanManual)
} else {
// We must set span properties before the span is published anywhere
// since we're not holding the heap lock.
s.spanclass = spanclass
if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
s.elemsize = nbytes
s.nelems = 1
s.divMul = 0
} else {
s.elemsize = uintptr(gc.SizeClassToSize[sizeclass])
if goexperiment.GreenTeaGC {
var reserve uintptr
if gcUsesSpanInlineMarkBits(s.elemsize) {
// Reserve space for the inline mark bits.
reserve += unsafe.Sizeof(spanInlineMarkBits{})
}
if heapBitsInSpan(s.elemsize) && !s.spanclass.noscan() {
// Reserve space for the pointer/scan bitmap at the end.
reserve += nbytes / goarch.PtrSize / 8
}
s.nelems = uint16((nbytes - reserve) / s.elemsize)
} else {
if !s.spanclass.noscan() && heapBitsInSpan(s.elemsize) {
// Reserve space for the pointer/scan bitmap at the end.
s.nelems = uint16((nbytes - (nbytes / goarch.PtrSize / 8)) / s.elemsize)
} else {
s.nelems = uint16(nbytes / s.elemsize)
}
}
s.divMul = gc.SizeClassToDivMagic[sizeclass]
}
// Initialize mark and allocation structures.
s.freeindex = 0
s.freeIndexForScan = 0
s.allocCache = ^uint64(0) // all 1s indicating all free.
s.gcmarkBits = newMarkBits(uintptr(s.nelems))
s.allocBits = newAllocBits(uintptr(s.nelems))
// Adjust s.limit down to the object-containing part of the span.
s.limit = s.base() + s.elemsize*uintptr(s.nelems)
// It's safe to access h.sweepgen without the heap lock because it's
// only ever updated with the world stopped and we run on the
// systemstack which blocks a STW transition.
atomic.Store(&s.sweepgen, h.sweepgen)
// Now that the span is filled in, set its state. This
// is a publication barrier for the other fields in
// the span. While valid pointers into this span
// should never be visible until the span is returned,
// if the garbage collector finds an invalid pointer,
// access to the span may race with initialization of
// the span. We resolve this race by atomically
// setting the state after the span is fully
// initialized, and atomically checking the state in
// any situation where a pointer is suspect.
s.state.set(mSpanInUse)
}
// Publish the span in various locations.
// This is safe to call without the lock held because the slots
// related to this span will only ever be read or modified by
// this thread until pointers into the span are published (and
// we execute a publication barrier at the end of this function
// before that happens) or pageInUse is updated.
h.setSpans(s.base(), npages, s)
if !typ.manual() {
// Mark in-use span in arena page bitmap.
//
// This publishes the span to the page sweeper, so
// it's imperative that the span be completely initialized
// prior to this line.
arena, pageIdx, pageMask := pageIndexOf(s.base())
atomic.Or8(&arena.pageInUse[pageIdx], pageMask)
// Mark packed span.
if gcUsesSpanInlineMarkBits(s.elemsize) {
atomic.Or8(&arena.pageUseSpanInlineMarkBits[pageIdx], pageMask)
}
// Update related page sweeper stats.
h.pagesInUse.Add(npages)
}
// Make sure the newly allocated span will be observed
// by the GC before pointers into the span are published.
publicationBarrier()
}
// Try to add at least npage pages of memory to the heap,
// returning how much the heap grew by and whether it worked.
//
// h.lock must be held.
func (h *mheap) grow(npage uintptr) (uintptr, bool) {
assertLockHeld(&h.lock)
firstGrow := h.curArena.base == 0
// We must grow the heap in whole palloc chunks.
// We call sysMap below but note that because we
// round up to pallocChunkPages which is on the order
// of MiB (generally >= to the huge page size) we
// won't be calling it too much.
ask := alignUp(npage, pallocChunkPages) * pageSize
totalGrowth := uintptr(0)
// This may overflow because ask could be very large
// and is otherwise unrelated to h.curArena.base.
end := h.curArena.base + ask
nBase := alignUp(end, physPageSize)
if nBase > h.curArena.end || /* overflow */ end < h.curArena.base {
// Not enough room in the current arena. Allocate more
// arena space. This may not be contiguous with the
// current arena, so we have to request the full ask.
av, asize := h.sysAlloc(ask, &h.arenaHints, &h.heapArenas)
if av == nil {
inUse := gcController.heapFree.load() + gcController.heapReleased.load() + gcController.heapInUse.load()
print("runtime: out of memory: cannot allocate ", ask, "-byte block (", inUse, " in use)\n")
return 0, false
}
if uintptr(av) == h.curArena.end {
// The new space is contiguous with the old
// space, so just extend the current space.
h.curArena.end = uintptr(av) + asize
} else {
// The new space is discontiguous. Track what
// remains of the current space and switch to
// the new space. This should be rare.
if size := h.curArena.end - h.curArena.base; size != 0 {
// Transition this space from Reserved to Prepared and mark it
// as released since we'll be able to start using it after updating
// the page allocator and releasing the lock at any time.
sysMap(unsafe.Pointer(h.curArena.base), size, &gcController.heapReleased, "heap")
// Update stats.
stats := memstats.heapStats.acquire()
atomic.Xaddint64(&stats.released, int64(size))
memstats.heapStats.release()
// Update the page allocator's structures to make this
// space ready for allocation.
h.pages.grow(h.curArena.base, size)
totalGrowth += size
}
// Switch to the new space.
h.curArena.base = uintptr(av)
h.curArena.end = uintptr(av) + asize
if firstGrow && randomizeHeapBase {
// The top heapAddrBits-logHeapArenaBytes are randomized, we now
// want to randomize the next
// logHeapArenaBytes-log2(pallocChunkBytes) bits, making sure
// h.curArena.base is aligned to pallocChunkBytes.
bits := logHeapArenaBytes - logPallocChunkBytes
offset := nextHeapRandBits(bits)
h.curArena.base = alignDown(h.curArena.base|(offset<<logPallocChunkBytes), pallocChunkBytes)
}
}
// Recalculate nBase.
// We know this won't overflow, because sysAlloc returned
// a valid region starting at h.curArena.base which is at
// least ask bytes in size.
nBase = alignUp(h.curArena.base+ask, physPageSize)
}
// Grow into the current arena.
v := h.curArena.base
h.curArena.base = nBase
// Transition the space we're going to use from Reserved to Prepared.
//
// The allocation is always aligned to the heap arena
// size which is always > physPageSize, so its safe to
// just add directly to heapReleased.
sysMap(unsafe.Pointer(v), nBase-v, &gcController.heapReleased, "heap")
// The memory just allocated counts as both released
// and idle, even though it's not yet backed by spans.
stats := memstats.heapStats.acquire()
atomic.Xaddint64(&stats.released, int64(nBase-v))
memstats.heapStats.release()
// Update the page allocator's structures to make this
// space ready for allocation.
h.pages.grow(v, nBase-v)
totalGrowth += nBase - v
if firstGrow && randomizeHeapBase {
// The top heapAddrBits-log2(pallocChunkBytes) bits are now randomized,
// we finally want to randomize the next
// log2(pallocChunkBytes)-log2(pageSize) bits, while maintaining
// alignment to pageSize. We do this by calculating a random number of
// pages into the current arena, and marking them as allocated. The
// address of the next available page becomes our fully randomized base
// heap address.
randOffset := nextHeapRandBits(logPallocChunkBytes)
randNumPages := alignDown(randOffset, pageSize) / pageSize
if randNumPages != 0 {
h.pages.markRandomPaddingPages(v, randNumPages)
}
}
return totalGrowth, true
}
// Free the span back into the heap.
func (h *mheap) freeSpan(s *mspan) {
systemstack(func() {
// Trace the span free.
if traceAllocFreeEnabled() {
trace := traceAcquire()
if trace.ok() {
trace.SpanFree(s)
traceRelease(trace)
}
}
lock(&h.lock)
if msanenabled {
// Tell msan that this entire span is no longer in use.
base := unsafe.Pointer(s.base())
bytes := s.npages << gc.PageShift
msanfree(base, bytes)
}
if asanenabled {
// Tell asan that this entire span is no longer in use.
base := unsafe.Pointer(s.base())
bytes := s.npages << gc.PageShift
asanpoison(base, bytes)
}
if valgrindenabled {
base := s.base()
valgrindMempoolFree(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base))
}
h.freeSpanLocked(s, spanAllocHeap)
unlock(&h.lock)
})
}
// freeManual frees a manually-managed span returned by allocManual.
// typ must be the same as the spanAllocType passed to the allocManual that
// allocated s.
//
// This must only be called when gcphase == _GCoff. See mSpanState for
// an explanation.
//
// freeManual must be called on the system stack because it acquires
// the heap lock. See mheap for details.
//
//go:systemstack
func (h *mheap) freeManual(s *mspan, typ spanAllocType) {
// Trace the span free.
if traceAllocFreeEnabled() {
trace := traceAcquire()
if trace.ok() {
trace.SpanFree(s)
traceRelease(trace)
}
}
s.needzero = 1
lock(&h.lock)
if valgrindenabled {
base := s.base()
valgrindMempoolFree(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base))
}
h.freeSpanLocked(s, typ)
unlock(&h.lock)
}
func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
assertLockHeld(&h.lock)
switch s.state.get() {
case mSpanManual:
if s.allocCount != 0 {
throw("mheap.freeSpanLocked - invalid stack free")
}
case mSpanInUse:
if s.isUserArenaChunk {
throw("mheap.freeSpanLocked - invalid free of user arena chunk")
}
if s.allocCount != 0 || s.sweepgen != h.sweepgen {
print("mheap.freeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
throw("mheap.freeSpanLocked - invalid free")
}
h.pagesInUse.Add(-s.npages)
// Clear in-use bit in arena page bitmap.
arena, pageIdx, pageMask := pageIndexOf(s.base())
atomic.And8(&arena.pageInUse[pageIdx], ^pageMask)
// Clear small heap span bit if necessary.
if gcUsesSpanInlineMarkBits(s.elemsize) {
atomic.And8(&arena.pageUseSpanInlineMarkBits[pageIdx], ^pageMask)
}
default:
throw("mheap.freeSpanLocked - invalid span state")
}
// Update stats.
//
// Mirrors the code in allocSpan.
nbytes := s.npages * pageSize
gcController.heapFree.add(int64(nbytes))
if typ == spanAllocHeap {
gcController.heapInUse.add(-int64(nbytes))
}
// Update consistent stats.
stats := memstats.heapStats.acquire()
switch typ {
case spanAllocHeap:
atomic.Xaddint64(&stats.inHeap, -int64(nbytes))
case spanAllocStack:
atomic.Xaddint64(&stats.inStacks, -int64(nbytes))
case spanAllocWorkBuf:
atomic.Xaddint64(&stats.inWorkBufs, -int64(nbytes))
}
memstats.heapStats.release()
// Mark the space as free.
h.pages.free(s.base(), s.npages)
// Free the span structure. We no longer have a use for it.
s.state.set(mSpanDead)
h.freeMSpanLocked(s)
}
// scavengeAll acquires the heap lock (blocking any additional
// manipulation of the page allocator) and iterates over the whole
// heap, scavenging every free page available.
//
// Must run on the system stack because it acquires the heap lock.
//
//go:systemstack
func (h *mheap) scavengeAll() {
// Disallow malloc or panic while holding the heap lock. We do
// this here because this is a non-mallocgc entry-point to
// the mheap API.
gp := getg()
gp.m.mallocing++
// Force scavenge everything.
released := h.pages.scavenge(^uintptr(0), nil, true)
gp.m.mallocing--
if debug.scavtrace > 0 {
printScavTrace(0, released, true)
}
}
//go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
func runtime_debug_freeOSMemory() {
GC()
systemstack(func() { mheap_.scavengeAll() })
}
// Initialize a new span with the given start and npages.
func (span *mspan) init(base uintptr, npages uintptr) {
// span is *not* zeroed.
span.next = nil
span.prev = nil
span.list = nil
span.startAddr = base
span.npages = npages
span.limit = base + npages*gc.PageSize // see go.dev/issue/74288; adjusted later for heap spans
span.allocCount = 0
span.spanclass = 0
span.elemsize = 0
span.speciallock.key = 0
span.specials = nil
span.needzero = 0
span.freeindex = 0
span.freeIndexForScan = 0
span.allocBits = nil
span.gcmarkBits = nil
span.pinnerBits = nil
span.state.set(mSpanDead)
lockInit(&span.speciallock, lockRankMspanSpecial)
}
func (span *mspan) inList() bool {
return span.list != nil
}
// mSpanList heads a linked list of spans.
type mSpanList struct {
_ sys.NotInHeap
first *mspan // first span in list, or nil if none
last *mspan // last span in list, or nil if none
}
// Initialize an empty doubly-linked list.
func (list *mSpanList) init() {
list.first = nil
list.last = nil
}
func (list *mSpanList) remove(span *mspan) {
if span.list != list {
print("runtime: failed mSpanList.remove span.npages=", span.npages,
" span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n")
throw("mSpanList.remove")
}
if list.first == span {
list.first = span.next
} else {
span.prev.next = span.next
}
if list.last == span {
list.last = span.prev
} else {
span.next.prev = span.prev
}
span.next = nil
span.prev = nil
span.list = nil
}
func (list *mSpanList) isEmpty() bool {
return list.first == nil
}
func (list *mSpanList) insert(span *mspan) {
if span.next != nil || span.prev != nil || span.list != nil {
println("runtime: failed mSpanList.insert", span, span.next, span.prev, span.list)
throw("mSpanList.insert")
}
span.next = list.first
if list.first != nil {
// The list contains at least one span; link it in.
// The last span in the list doesn't change.
list.first.prev = span
} else {
// The list contains no spans, so this is also the last span.
list.last = span
}
list.first = span
span.list = list
}
func (list *mSpanList) insertBack(span *mspan) {
if span.next != nil || span.prev != nil || span.list != nil {
println("runtime: failed mSpanList.insertBack", span, span.next, span.prev, span.list)
throw("mSpanList.insertBack")
}
span.prev = list.last
if list.last != nil {
// The list contains at least one span.
list.last.next = span
} else {
// The list contains no spans, so this is also the first span.
list.first = span
}
list.last = span
span.list = list
}
// takeAll removes all spans from other and inserts them at the front
// of list.
func (list *mSpanList) takeAll(other *mSpanList) {
if other.isEmpty() {
return
}
// Reparent everything in other to list.
for s := other.first; s != nil; s = s.next {
s.list = list
}
// Concatenate the lists.
if list.isEmpty() {
*list = *other
} else {
// Neither list is empty. Put other before list.
other.last.next = list.first
list.first.prev = other.last
list.first = other.first
}
other.first, other.last = nil, nil
}
const (
// _KindSpecialTinyBlock indicates that a given allocation is a tiny block.
// Ordered before KindSpecialFinalizer and KindSpecialCleanup so that it
// always appears first in the specials list.
// Used only if debug.checkfinalizers != 0.
_KindSpecialTinyBlock = 1
// _KindSpecialFinalizer is for tracking finalizers.
_KindSpecialFinalizer = 2
// _KindSpecialWeakHandle is used for creating weak pointers.
_KindSpecialWeakHandle = 3
// _KindSpecialProfile is for memory profiling.
_KindSpecialProfile = 4
// _KindSpecialReachable is a special used for tracking
// reachability during testing.
_KindSpecialReachable = 5
// _KindSpecialPinCounter is a special used for objects that are pinned
// multiple times
_KindSpecialPinCounter = 6
// _KindSpecialCleanup is for tracking cleanups.
_KindSpecialCleanup = 7
// _KindSpecialCheckFinalizer adds additional context to a finalizer or cleanup.
// Used only if debug.checkfinalizers != 0.
_KindSpecialCheckFinalizer = 8
// _KindSpecialBubble is used to associate objects with synctest bubbles.
_KindSpecialBubble = 9
// _KindSpecialSecret is a special used to mark an object
// as needing zeroing immediately upon freeing.
_KindSpecialSecret = 10
)
type special struct {
_ sys.NotInHeap
next *special // linked list in span
offset uintptr // span offset of object
kind byte // kind of special
}
// spanHasSpecials marks a span as having specials in the arena bitmap.
func spanHasSpecials(s *mspan) {
arenaPage := (s.base() / pageSize) % pagesPerArena
ai := arenaIndex(s.base())
ha := mheap_.arenas[ai.l1()][ai.l2()]
atomic.Or8(&ha.pageSpecials[arenaPage/8], uint8(1)<<(arenaPage%8))
}
// spanHasNoSpecials marks a span as having no specials in the arena bitmap.
func spanHasNoSpecials(s *mspan) {
arenaPage := (s.base() / pageSize) % pagesPerArena
ai := arenaIndex(s.base())
ha := mheap_.arenas[ai.l1()][ai.l2()]
atomic.And8(&ha.pageSpecials[arenaPage/8], ^(uint8(1) << (arenaPage % 8)))
}
// addspecial adds the special record s to the list of special records for
// the object p. All fields of s should be filled in except for
// offset & next, which this routine will fill in.
// Returns true if the special was successfully added, false otherwise.
// (The add will fail only if a record with the same p and s->kind
// already exists unless force is set to true.)
func addspecial(p unsafe.Pointer, s *special, force bool) bool {
span := spanOfHeap(uintptr(p))
if span == nil {
throw("addspecial on invalid pointer")
}
// Ensure that the span is swept.
// Sweeping accesses the specials list w/o locks, so we have
// to synchronize with it. And it's just much safer.
mp := acquirem()
span.ensureSwept()
offset := uintptr(p) - span.base()
kind := s.kind
lock(&span.speciallock)
// Find splice point, check for existing record.
iter, exists := span.specialFindSplicePoint(offset, kind)
if !exists || force {
// Splice in record, fill in offset.
s.offset = offset
s.next = *iter
*iter = s
spanHasSpecials(span)
}
unlock(&span.speciallock)
releasem(mp)
// We're converting p to a uintptr and looking it up, and we
// don't want it to die and get swept while we're doing so.
KeepAlive(p)
return !exists || force // already exists or addition was forced
}
// Removes the Special record of the given kind for the object p.
// Returns the record if the record existed, nil otherwise.
// The caller must FixAlloc_Free the result.
func removespecial(p unsafe.Pointer, kind uint8) *special {
span := spanOfHeap(uintptr(p))
if span == nil {
throw("removespecial on invalid pointer")
}
// Ensure that the span is swept.
// Sweeping accesses the specials list w/o locks, so we have
// to synchronize with it. And it's just much safer.
mp := acquirem()
span.ensureSwept()
offset := uintptr(p) - span.base()
var result *special
lock(&span.speciallock)
iter, exists := span.specialFindSplicePoint(offset, kind)
if exists {
s := *iter
*iter = s.next
result = s
}
if span.specials == nil {
spanHasNoSpecials(span)
}
unlock(&span.speciallock)
releasem(mp)
return result
}
// Find a splice point in the sorted list and check for an already existing
// record. Returns a pointer to the next-reference in the list predecessor.
// Returns true, if the referenced item is an exact match.
func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special, bool) {
// Find splice point, check for existing record.
iter := &span.specials
found := false
for {
s := *iter
if s == nil {
break
}
if offset == s.offset && kind == s.kind {
found = true
break
}
if offset < s.offset || (offset == s.offset && kind < s.kind) {
break
}
iter = &s.next
}
return iter, found
}
// The described object has a finalizer set for it.
//
// specialfinalizer is allocated from non-GC'd memory, so any heap
// pointers must be specially handled.
type specialfinalizer struct {
_ sys.NotInHeap
special special
fn *funcval // May be a heap pointer.
nret uintptr
fint *_type // May be a heap pointer, but always live.
ot *ptrtype // May be a heap pointer, but always live.
}
// Adds a finalizer to the object p. Returns true if it succeeded.
func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
lock(&mheap_.speciallock)
s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
unlock(&mheap_.speciallock)
s.special.kind = _KindSpecialFinalizer
s.fn = f
s.nret = nret
s.fint = fint
s.ot = ot
if addspecial(p, &s.special, false) {
// This is responsible for maintaining the same
// GC-related invariants as markrootSpans in any
// situation where it's possible that markrootSpans
// has already run but mark termination hasn't yet.
if gcphase != _GCoff {
base, span, _ := findObject(uintptr(p), 0, 0)
mp := acquirem()
gcw := &mp.p.ptr().gcw
// Mark everything reachable from the object
// so it's retained for the finalizer.
if !span.spanclass.noscan() {
scanObject(base, gcw)
}
// Mark the finalizer itself, since the
// special isn't part of the GC'd heap.
scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
releasem(mp)
}
return true
}
// There was an old finalizer
lock(&mheap_.speciallock)
mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
unlock(&mheap_.speciallock)
return false
}
// Removes the finalizer (if any) from the object p.
func removefinalizer(p unsafe.Pointer) {
s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
if s == nil {
return // there wasn't a finalizer to remove
}
lock(&mheap_.speciallock)
mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
unlock(&mheap_.speciallock)
}
// The described object has a cleanup set for it.
type specialCleanup struct {
_ sys.NotInHeap
special special
cleanup cleanupFn
// Globally unique ID for the cleanup, obtained from mheap_.cleanupID.
id uint64
}
// addCleanup attaches a cleanup function to the object. Multiple
// cleanups are allowed on an object, and even the same pointer.
// A cleanup id is returned which can be used to uniquely identify
// the cleanup.
func addCleanup(p unsafe.Pointer, c cleanupFn) uint64 {
// TODO(mknyszek): Consider pooling specialCleanups on the P
// so we don't have to take the lock every time. Just locking
// is a considerable part of the cost of AddCleanup. This
// would also require reserving some cleanup IDs on the P.
lock(&mheap_.speciallock)
s := (*specialCleanup)(mheap_.specialCleanupAlloc.alloc())
mheap_.cleanupID++ // Increment first. ID 0 is reserved.
id := mheap_.cleanupID
unlock(&mheap_.speciallock)
s.special.kind = _KindSpecialCleanup
s.cleanup = c
s.id = id
mp := acquirem()
addspecial(p, &s.special, true)
// This is responsible for maintaining the same
// GC-related invariants as markrootSpans in any
// situation where it's possible that markrootSpans
// has already run but mark termination hasn't yet.
if gcphase != _GCoff {
// Mark the cleanup itself, since the
// special isn't part of the GC'd heap.
gcScanCleanup(s, &mp.p.ptr().gcw)
}
releasem(mp)
// Keep c and its referents alive. There's a window in this function
// where it's only reachable via the special while the special hasn't
// been added to the specials list yet. This is similar to a bug
// discovered for weak handles, see #70455.
KeepAlive(c)
return id
}
// Always paired with a specialCleanup or specialfinalizer, adds context.
type specialCheckFinalizer struct {
_ sys.NotInHeap
special special
cleanupID uint64 // Needed to disambiguate cleanups.
createPC uintptr
funcPC uintptr
ptrType *_type
}
// setFinalizerContext adds a specialCheckFinalizer to ptr. ptr must already have a
// finalizer special attached.
func setFinalizerContext(ptr unsafe.Pointer, ptrType *_type, createPC, funcPC uintptr) {
setCleanupContext(ptr, ptrType, createPC, funcPC, 0)
}
// setCleanupContext adds a specialCheckFinalizer to ptr. ptr must already have a
// finalizer or cleanup special attached. Pass 0 for the cleanupID to indicate
// a finalizer.
func setCleanupContext(ptr unsafe.Pointer, ptrType *_type, createPC, funcPC uintptr, cleanupID uint64) {
lock(&mheap_.speciallock)
s := (*specialCheckFinalizer)(mheap_.specialCheckFinalizerAlloc.alloc())
unlock(&mheap_.speciallock)
s.special.kind = _KindSpecialCheckFinalizer
s.cleanupID = cleanupID
s.createPC = createPC
s.funcPC = funcPC
s.ptrType = ptrType
mp := acquirem()
addspecial(ptr, &s.special, true)
releasem(mp)
KeepAlive(ptr)
}
func getCleanupContext(ptr uintptr, cleanupID uint64) *specialCheckFinalizer {
assertWorldStopped()
span := spanOfHeap(ptr)
if span == nil {
return nil
}
var found *specialCheckFinalizer
offset := ptr - span.base()
iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCheckFinalizer)
if exists {
for {
s := *iter
if s == nil {
// Reached the end of the linked list. Stop searching at this point.
break
}
if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
(*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
// The special is a cleanup and contains a matching cleanup id.
*iter = s.next
found = (*specialCheckFinalizer)(unsafe.Pointer(s))
break
}
if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
// The special is outside the region specified for that kind of
// special. The specials are sorted by kind.
break
}
// Try the next special.
iter = &s.next
}
}
return found
}
// clearFinalizerContext removes the specialCheckFinalizer for the given pointer, if any.
func clearFinalizerContext(ptr uintptr) {
clearCleanupContext(ptr, 0)
}
// clearFinalizerContext removes the specialCheckFinalizer for the given pointer and cleanup ID, if any.
func clearCleanupContext(ptr uintptr, cleanupID uint64) {
// The following block removes the Special record of type cleanup for the object c.ptr.
span := spanOfHeap(ptr)
if span == nil {
return
}
// Ensure that the span is swept.
// Sweeping accesses the specials list w/o locks, so we have
// to synchronize with it. And it's just much safer.
mp := acquirem()
span.ensureSwept()
offset := ptr - span.base()
var found *special
lock(&span.speciallock)
iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCheckFinalizer)
if exists {
for {
s := *iter
if s == nil {
// Reached the end of the linked list. Stop searching at this point.
break
}
if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
(*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
// The special is a cleanup and contains a matching cleanup id.
*iter = s.next
found = s
break
}
if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
// The special is outside the region specified for that kind of
// special. The specials are sorted by kind.
break
}
// Try the next special.
iter = &s.next
}
}
if span.specials == nil {
spanHasNoSpecials(span)
}
unlock(&span.speciallock)
releasem(mp)
if found == nil {
return
}
lock(&mheap_.speciallock)
mheap_.specialCheckFinalizerAlloc.free(unsafe.Pointer(found))
unlock(&mheap_.speciallock)
}
// Indicates that an allocation is a tiny block.
// Used only if debug.checkfinalizers != 0.
type specialTinyBlock struct {
_ sys.NotInHeap
special special
}
// setTinyBlockContext marks an allocation as a tiny block to diagnostics like
// checkfinalizer.
//
// A tiny block is only marked if it actually contains more than one distinct
// value, since we're using this for debugging.
func setTinyBlockContext(ptr unsafe.Pointer) {
lock(&mheap_.speciallock)
s := (*specialTinyBlock)(mheap_.specialTinyBlockAlloc.alloc())
unlock(&mheap_.speciallock)
s.special.kind = _KindSpecialTinyBlock
mp := acquirem()
addspecial(ptr, &s.special, false)
releasem(mp)
KeepAlive(ptr)
}
// inTinyBlock returns whether ptr is in a tiny alloc block, at one point grouped
// with other distinct values.
func inTinyBlock(ptr uintptr) bool {
assertWorldStopped()
ptr = alignDown(ptr, maxTinySize)
span := spanOfHeap(ptr)
if span == nil {
return false
}
offset := ptr - span.base()
_, exists := span.specialFindSplicePoint(offset, _KindSpecialTinyBlock)
return exists
}
// The described object has a weak pointer.
//
// Weak pointers in the GC have the following invariants:
//
// - Strong-to-weak conversions must ensure the strong pointer
// remains live until the weak handle is installed. This ensures
// that creating a weak pointer cannot fail.
//
// - Weak-to-strong conversions require the weakly-referenced
// object to be swept before the conversion may proceed. This
// ensures that weak-to-strong conversions cannot resurrect
// dead objects by sweeping them before that happens.
//
// - Weak handles are unique and canonical for each byte offset into
// an object that a strong pointer may point to, until an object
// becomes unreachable.
//
// - Weak handles contain nil as soon as an object becomes unreachable
// the first time, before a finalizer makes it reachable again. New
// weak handles created after resurrection are newly unique.
//
// specialWeakHandle is allocated from non-GC'd memory, so any heap
// pointers must be specially handled.
type specialWeakHandle struct {
_ sys.NotInHeap
special special
// handle is a reference to the actual weak pointer.
// It is always heap-allocated and must be explicitly kept
// live so long as this special exists.
handle *atomic.Uintptr
}
//go:linkname internal_weak_runtime_registerWeakPointer weak.runtime_registerWeakPointer
func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer {
return unsafe.Pointer(getOrAddWeakHandle(p))
}
//go:linkname internal_weak_runtime_makeStrongFromWeak weak.runtime_makeStrongFromWeak
func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer {
handle := (*atomic.Uintptr)(u)
// Prevent preemption. We want to make sure that another GC cycle can't start
// and that work.strongFromWeak.block can't change out from under us.
mp := acquirem()
// Yield to the GC if necessary.
if work.strongFromWeak.block {
releasem(mp)
// Try to park and wait for mark termination.
// N.B. gcParkStrongFromWeak calls acquirem before returning.
mp = gcParkStrongFromWeak()
}
p := handle.Load()
if p == 0 {
releasem(mp)
return nil
}
// Be careful. p may or may not refer to valid memory anymore, as it could've been
// swept and released already. It's always safe to ensure a span is swept, though,
// even if it's just some random span.
span := spanOfHeap(p)
if span == nil {
// If it's immortal, then just return the pointer.
//
// Stay non-preemptible so the GC can't see us convert this potentially
// completely bogus value to an unsafe.Pointer.
if isGoPointerWithoutSpan(unsafe.Pointer(p)) {
releasem(mp)
return unsafe.Pointer(p)
}
// It's heap-allocated, so the span probably just got swept and released.
releasem(mp)
return nil
}
// Ensure the span is swept.
span.ensureSwept()
// Now we can trust whatever we get from handle, so make a strong pointer.
//
// Even if we just swept some random span that doesn't contain this object, because
// this object is long dead and its memory has since been reused, we'll just observe nil.
ptr := unsafe.Pointer(handle.Load())
// This is responsible for maintaining the same GC-related
// invariants as the Yuasa part of the write barrier. During
// the mark phase, it's possible that we just created the only
// valid pointer to the object pointed to by ptr. If it's only
// ever referenced from our stack, and our stack is blackened
// already, we could fail to mark it. So, mark it now.
if gcphase != _GCoff {
shade(uintptr(ptr))
}
releasem(mp)
// Explicitly keep ptr alive. This seems unnecessary since we return ptr,
// but let's be explicit since it's important we keep ptr alive across the
// call to shade.
KeepAlive(ptr)
return ptr
}
// gcParkStrongFromWeak puts the current goroutine on the weak->strong queue and parks.
func gcParkStrongFromWeak() *m {
// Prevent preemption as we check strongFromWeak, so it can't change out from under us.
mp := acquirem()
for work.strongFromWeak.block {
lock(&work.strongFromWeak.lock)
releasem(mp) // N.B. Holding the lock prevents preemption.
// Queue ourselves up.
work.strongFromWeak.q.pushBack(getg())
// Park.
goparkunlock(&work.strongFromWeak.lock, waitReasonGCWeakToStrongWait, traceBlockGCWeakToStrongWait, 2)
// Re-acquire the current M since we're going to check the condition again.
mp = acquirem()
// Re-check condition. We may have awoken in the next GC's mark termination phase.
}
return mp
}
// gcWakeAllStrongFromWeak wakes all currently blocked weak->strong
// conversions. This is used at the end of a GC cycle.
//
// work.strongFromWeak.block must be false to prevent woken goroutines
// from immediately going back to sleep.
func gcWakeAllStrongFromWeak() {
lock(&work.strongFromWeak.lock)
list := work.strongFromWeak.q.popList()
injectglist(&list)
unlock(&work.strongFromWeak.lock)
}
// Retrieves or creates a weak pointer handle for the object p.
func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
if debug.sbrk != 0 {
// debug.sbrk never frees memory, so it'll never go nil. However, we do still
// need a weak handle that's specific to p. Use the immortal weak handle map.
// Keep p alive across the call to getOrAdd defensively, though it doesn't
// really matter in this particular case.
handle := mheap_.immortalWeakHandles.getOrAdd(uintptr(p))
KeepAlive(p)
return handle
}
// First try to retrieve without allocating.
if handle := getWeakHandle(p); handle != nil {
// Keep p alive for the duration of the function to ensure
// that it cannot die while we're trying to do this.
KeepAlive(p)
return handle
}
lock(&mheap_.speciallock)
s := (*specialWeakHandle)(mheap_.specialWeakHandleAlloc.alloc())
unlock(&mheap_.speciallock)
// N.B. Pad the weak handle to ensure it doesn't share a tiny
// block with any other allocations. This can lead to leaks, such
// as in go.dev/issue/76007. As an alternative, we could consider
// using the currently-unused 8-byte noscan size class.
type weakHandleBox struct {
h atomic.Uintptr
_ [maxTinySize - unsafe.Sizeof(atomic.Uintptr{})]byte
}
handle := &(new(weakHandleBox).h)
s.special.kind = _KindSpecialWeakHandle
s.handle = handle
handle.Store(uintptr(p))
if addspecial(p, &s.special, false) {
// This is responsible for maintaining the same
// GC-related invariants as markrootSpans in any
// situation where it's possible that markrootSpans
// has already run but mark termination hasn't yet.
if gcphase != _GCoff {
mp := acquirem()
gcw := &mp.p.ptr().gcw
// Mark the weak handle itself, since the
// special isn't part of the GC'd heap.
scanblock(uintptr(unsafe.Pointer(&s.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
releasem(mp)
}
// Keep p alive for the duration of the function to ensure
// that it cannot die while we're trying to do this.
//
// Same for handle, which is only stored in the special.
// There's a window where it might die if we don't keep it
// alive explicitly. Returning it here is probably good enough,
// but let's be defensive and explicit. See #70455.
KeepAlive(p)
KeepAlive(handle)
return handle
}
// There was an existing handle. Free the special
// and try again. We must succeed because we're explicitly
// keeping p live until the end of this function. Either
// we, or someone else, must have succeeded, because we can
// only fail in the event of a race, and p will still be
// be valid no matter how much time we spend here.
lock(&mheap_.speciallock)
mheap_.specialWeakHandleAlloc.free(unsafe.Pointer(s))
unlock(&mheap_.speciallock)
handle = getWeakHandle(p)
if handle == nil {
throw("failed to get or create weak handle")
}
// Keep p alive for the duration of the function to ensure
// that it cannot die while we're trying to do this.
//
// Same for handle, just to be defensive.
KeepAlive(p)
KeepAlive(handle)
return handle
}
func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
span := spanOfHeap(uintptr(p))
if span == nil {
if isGoPointerWithoutSpan(p) {
return mheap_.immortalWeakHandles.getOrAdd(uintptr(p))
}
throw("getWeakHandle on invalid pointer")
}
// Ensure that the span is swept.
// Sweeping accesses the specials list w/o locks, so we have
// to synchronize with it. And it's just much safer.
mp := acquirem()
span.ensureSwept()
offset := uintptr(p) - span.base()
lock(&span.speciallock)
// Find the existing record and return the handle if one exists.
var handle *atomic.Uintptr
iter, exists := span.specialFindSplicePoint(offset, _KindSpecialWeakHandle)
if exists {
handle = ((*specialWeakHandle)(unsafe.Pointer(*iter))).handle
}
unlock(&span.speciallock)
releasem(mp)
// Keep p alive for the duration of the function to ensure
// that it cannot die while we're trying to do this.
KeepAlive(p)
return handle
}
type immortalWeakHandleMap struct {
root atomic.UnsafePointer // *immortalWeakHandle (can't use generics because it's notinheap)
}
// immortalWeakHandle is a lock-free append-only hash-trie.
//
// Key features:
// - 2-ary trie. Child nodes are indexed by the highest bit (remaining) of the hash of the address.
// - New nodes are placed at the first empty level encountered.
// - When the first child is added to a node, the existing value is not moved into a child.
// This means that we must check the value at each level, not just at the leaf.
// - No deletion or rebalancing.
// - Intentionally devolves into a linked list on hash collisions (the hash bits will all
// get shifted out during iteration, and new nodes will just be appended to the 0th child).
type immortalWeakHandle struct {
_ sys.NotInHeap
children [2]atomic.UnsafePointer // *immortalObjectMapNode (can't use generics because it's notinheap)
ptr uintptr // &ptr is the weak handle
}
// handle returns a canonical weak handle.
func (h *immortalWeakHandle) handle() *atomic.Uintptr {
// N.B. Since we just need an *atomic.Uintptr that never changes, we can trivially
// reference ptr to save on some memory in immortalWeakHandle and avoid extra atomics
// in getOrAdd.
return (*atomic.Uintptr)(unsafe.Pointer(&h.ptr))
}
// getOrAdd introduces p, which must be a pointer to immortal memory (for example, a linker-allocated
// object) and returns a weak handle. The weak handle will never become nil.
func (tab *immortalWeakHandleMap) getOrAdd(p uintptr) *atomic.Uintptr {
var newNode *immortalWeakHandle
m := &tab.root
hash := memhash(abi.NoEscape(unsafe.Pointer(&p)), 0, goarch.PtrSize)
hashIter := hash
for {
n := (*immortalWeakHandle)(m.Load())
if n == nil {
// Try to insert a new map node. We may end up discarding
// this node if we fail to insert because it turns out the
// value is already in the map.
//
// The discard will only happen if two threads race on inserting
// the same value. Both might create nodes, but only one will
// succeed on insertion. If two threads race to insert two
// different values, then both nodes will *always* get inserted,
// because the equality checking below will always fail.
//
// Performance note: contention on insertion is likely to be
// higher for small maps, but since this data structure is
// append-only, either the map stays small because there isn't
// much activity, or the map gets big and races to insert on
// the same node are much less likely.
if newNode == nil {
newNode = (*immortalWeakHandle)(persistentalloc(unsafe.Sizeof(immortalWeakHandle{}), goarch.PtrSize, &memstats.gcMiscSys))
newNode.ptr = p
}
if m.CompareAndSwapNoWB(nil, unsafe.Pointer(newNode)) {
return newNode.handle()
}
// Reload n. Because pointers are only stored once,
// we must have lost the race, and therefore n is not nil
// anymore.
n = (*immortalWeakHandle)(m.Load())
}
if n.ptr == p {
return n.handle()
}
m = &n.children[hashIter>>(8*goarch.PtrSize-1)]
hashIter <<= 1
}
}
// The described object is being heap profiled.
type specialprofile struct {
_ sys.NotInHeap
special special
b *bucket
}
// Set the heap profile bucket associated with addr to b.
func setprofilebucket(p unsafe.Pointer, b *bucket) {
lock(&mheap_.speciallock)
s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
unlock(&mheap_.speciallock)
s.special.kind = _KindSpecialProfile
s.b = b
if !addspecial(p, &s.special, false) {
throw("setprofilebucket: profile already set")
}
}
// specialReachable tracks whether an object is reachable on the next
// GC cycle. This is used by testing.
type specialReachable struct {
special special
done bool
reachable bool
}
// specialPinCounter tracks whether an object is pinned multiple times.
type specialPinCounter struct {
special special
counter uintptr
}
// specialSecret tracks whether we need to zero an object immediately
// upon freeing.
type specialSecret struct {
_ sys.NotInHeap
special special
size uintptr
}
// specialsIter helps iterate over specials lists.
type specialsIter struct {
pprev **special
s *special
}
func newSpecialsIter(span *mspan) specialsIter {
return specialsIter{&span.specials, span.specials}
}
func (i *specialsIter) valid() bool {
return i.s != nil
}
func (i *specialsIter) next() {
i.pprev = &i.s.next
i.s = *i.pprev
}
// unlinkAndNext removes the current special from the list and moves
// the iterator to the next special. It returns the unlinked special.
func (i *specialsIter) unlinkAndNext() *special {
cur := i.s
i.s = cur.next
*i.pprev = i.s
return cur
}
// freeSpecial performs any cleanup on special s and deallocates it.
// s must already be unlinked from the specials list.
// TODO(mknyszek): p and size together DO NOT represent a valid allocation.
// size is the size of the allocation block in the span (mspan.elemsize), and p is
// whatever pointer the special was attached to, which need not point to the
// beginning of the block, though it may.
// Consider passing the arguments differently to avoid giving the impression
// that p and size together represent an address range.
func freeSpecial(s *special, p unsafe.Pointer, size uintptr) {
switch s.kind {
case _KindSpecialFinalizer:
sf := (*specialfinalizer)(unsafe.Pointer(s))
queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
lock(&mheap_.speciallock)
mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
unlock(&mheap_.speciallock)
case _KindSpecialWeakHandle:
sw := (*specialWeakHandle)(unsafe.Pointer(s))
sw.handle.Store(0)
lock(&mheap_.speciallock)
mheap_.specialWeakHandleAlloc.free(unsafe.Pointer(s))
unlock(&mheap_.speciallock)
case _KindSpecialProfile:
sp := (*specialprofile)(unsafe.Pointer(s))
mProf_Free(sp.b)
lock(&mheap_.speciallock)
mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
unlock(&mheap_.speciallock)
case _KindSpecialReachable:
sp := (*specialReachable)(unsafe.Pointer(s))
sp.done = true
// The creator frees these.
case _KindSpecialPinCounter:
lock(&mheap_.speciallock)
mheap_.specialPinCounterAlloc.free(unsafe.Pointer(s))
unlock(&mheap_.speciallock)
case _KindSpecialCleanup:
sc := (*specialCleanup)(unsafe.Pointer(s))
// Cleanups, unlike finalizers, do not resurrect the objects
// they're attached to, so we only need to pass the cleanup
// function, not the object.
gcCleanups.enqueue(sc.cleanup)
lock(&mheap_.speciallock)
mheap_.specialCleanupAlloc.free(unsafe.Pointer(sc))
unlock(&mheap_.speciallock)
case _KindSpecialCheckFinalizer:
sc := (*specialCheckFinalizer)(unsafe.Pointer(s))
lock(&mheap_.speciallock)
mheap_.specialCheckFinalizerAlloc.free(unsafe.Pointer(sc))
unlock(&mheap_.speciallock)
case _KindSpecialTinyBlock:
st := (*specialTinyBlock)(unsafe.Pointer(s))
lock(&mheap_.speciallock)
mheap_.specialTinyBlockAlloc.free(unsafe.Pointer(st))
unlock(&mheap_.speciallock)
case _KindSpecialBubble:
st := (*specialBubble)(unsafe.Pointer(s))
lock(&mheap_.speciallock)
mheap_.specialBubbleAlloc.free(unsafe.Pointer(st))
unlock(&mheap_.speciallock)
case _KindSpecialSecret:
ss := (*specialSecret)(unsafe.Pointer(s))
// p is the actual byte location that the special was
// attached to, but the size argument is the span
// element size. If we were to zero out using the size
// argument, we'd trounce over adjacent memory in cases
// where the allocation contains a header. Hence, we use
// the user-visible size which we stash in the special itself.
//
// p always points to the beginning of the user-visible
// allocation since the only way to attach a secret special
// is via the allocation path. This isn't universal for
// tiny allocs, but we avoid them in mallocgc anyway.
memclrNoHeapPointers(p, ss.size)
lock(&mheap_.speciallock)
mheap_.specialSecretAlloc.free(unsafe.Pointer(s))
unlock(&mheap_.speciallock)
default:
throw("bad special kind")
panic("not reached")
}
}
// gcBits is an alloc/mark bitmap. This is always used as gcBits.x.
type gcBits struct {
_ sys.NotInHeap
x uint8
}
// bytep returns a pointer to the n'th byte of b.
func (b *gcBits) bytep(n uintptr) *uint8 {
return addb(&b.x, n)
}
// bitp returns a pointer to the byte containing bit n and a mask for
// selecting that bit from *bytep.
func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) {
return b.bytep(n / 8), 1 << (n % 8)
}
const gcBitsChunkBytes = uintptr(64 << 10)
const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
type gcBitsHeader struct {
free uintptr // free is the index into bits of the next free byte.
next uintptr // *gcBits triggers recursive type bug. (issue 14620)
}
type gcBitsArena struct {
_ sys.NotInHeap
// gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
free uintptr // free is the index into bits of the next free byte; read/write atomically
next *gcBitsArena
bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits
}
var gcBitsArenas struct {
lock mutex
free *gcBitsArena
next *gcBitsArena // Read atomically. Write atomically under lock.
current *gcBitsArena
previous *gcBitsArena
}
// tryAlloc allocates from b or returns nil if b does not have enough room.
// This is safe to call concurrently.
func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits {
if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) {
return nil
}
// Try to allocate from this block.
end := atomic.Xadduintptr(&b.free, bytes)
if end > uintptr(len(b.bits)) {
return nil
}
// There was enough room.
start := end - bytes
return &b.bits[start]
}
// newMarkBits returns a pointer to 8 byte aligned bytes
// to be used for a span's mark bits.
func newMarkBits(nelems uintptr) *gcBits {
blocksNeeded := (nelems + 63) / 64
bytesNeeded := blocksNeeded * 8
// Try directly allocating from the current head arena.
head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next)))
if p := head.tryAlloc(bytesNeeded); p != nil {
return p
}
// There's not enough room in the head arena. We may need to
// allocate a new arena.
lock(&gcBitsArenas.lock)
// Try the head arena again, since it may have changed. Now
// that we hold the lock, the list head can't change, but its
// free position still can.
if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
unlock(&gcBitsArenas.lock)
return p
}
// Allocate a new arena. This may temporarily drop the lock.
fresh := newArenaMayUnlock()
// If newArenaMayUnlock dropped the lock, another thread may
// have put a fresh arena on the "next" list. Try allocating
// from next again.
if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
// Put fresh back on the free list.
// TODO: Mark it "already zeroed"
fresh.next = gcBitsArenas.free
gcBitsArenas.free = fresh
unlock(&gcBitsArenas.lock)
return p
}
// Allocate from the fresh arena. We haven't linked it in yet, so
// this cannot race and is guaranteed to succeed.
p := fresh.tryAlloc(bytesNeeded)
if p == nil {
throw("markBits overflow")
}
// Add the fresh arena to the "next" list.
fresh.next = gcBitsArenas.next
atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh))
unlock(&gcBitsArenas.lock)
return p
}
// newAllocBits returns a pointer to 8 byte aligned bytes
// to be used for this span's alloc bits.
// newAllocBits is used to provide newly initialized spans
// allocation bits. For spans not being initialized the
// mark bits are repurposed as allocation bits when
// the span is swept.
func newAllocBits(nelems uintptr) *gcBits {
return newMarkBits(nelems)
}
// nextMarkBitArenaEpoch establishes a new epoch for the arenas
// holding the mark bits. The arenas are named relative to the
// current GC cycle which is demarcated by the call to finishweep_m.
//
// All current spans have been swept.
// During that sweep each span allocated room for its gcmarkBits in
// gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current
// where the GC will mark objects and after each span is swept these bits
// will be used to allocate objects.
// gcBitsArenas.current becomes gcBitsArenas.previous where the span's
// gcAllocBits live until all the spans have been swept during this GC cycle.
// The span's sweep extinguishes all the references to gcBitsArenas.previous
// by pointing gcAllocBits into the gcBitsArenas.current.
// The gcBitsArenas.previous is released to the gcBitsArenas.free list.
func nextMarkBitArenaEpoch() {
lock(&gcBitsArenas.lock)
if gcBitsArenas.previous != nil {
if gcBitsArenas.free == nil {
gcBitsArenas.free = gcBitsArenas.previous
} else {
// Find end of previous arenas.
last := gcBitsArenas.previous
for last = gcBitsArenas.previous; last.next != nil; last = last.next {
}
last.next = gcBitsArenas.free
gcBitsArenas.free = gcBitsArenas.previous
}
}
gcBitsArenas.previous = gcBitsArenas.current
gcBitsArenas.current = gcBitsArenas.next
atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil) // newMarkBits calls newArena when needed
unlock(&gcBitsArenas.lock)
}
// newArenaMayUnlock allocates and zeroes a gcBits arena.
// The caller must hold gcBitsArena.lock. This may temporarily release it.
func newArenaMayUnlock() *gcBitsArena {
var result *gcBitsArena
if gcBitsArenas.free == nil {
unlock(&gcBitsArenas.lock)
result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys, "gc bits"))
if result == nil {
throw("runtime: cannot allocate memory")
}
lock(&gcBitsArenas.lock)
} else {
result = gcBitsArenas.free
gcBitsArenas.free = gcBitsArenas.free.next
memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes)
}
result.next = nil
// If result.bits is not 8 byte aligned adjust index so
// that &result.bits[result.free] is 8 byte aligned.
if unsafe.Offsetof(gcBitsArena{}.bits)&7 == 0 {
result.free = 0
} else {
result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7)
}
return result
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
func strmin(x, y string) string {
if y < x {
return y
}
return x
}
func strmax(x, y string) string {
if y > x {
return y
}
return x
}
func fmin32(x, y float32) float32 { return fmin(x, y) }
func fmin64(x, y float64) float64 { return fmin(x, y) }
func fmax32(x, y float32) float32 { return fmax(x, y) }
func fmax64(x, y float64) float64 { return fmax(x, y) }
type floaty interface{ ~float32 | ~float64 }
func fmin[F floaty](x, y F) F {
if y != y || y < x {
return y
}
if x != x || x < y || x != 0 {
return x
}
// x and y are both ±0
// if either is -0, return -0; else return +0
return forbits(x, y)
}
func fmax[F floaty](x, y F) F {
if y != y || y > x {
return y
}
if x != x || x > y || x != 0 {
return x
}
// x and y are both ±0
// if both are -0, return -0; else return +0
return fandbits(x, y)
}
func forbits[F floaty](x, y F) F {
switch unsafe.Sizeof(x) {
case 4:
*(*uint32)(unsafe.Pointer(&x)) |= *(*uint32)(unsafe.Pointer(&y))
case 8:
*(*uint64)(unsafe.Pointer(&x)) |= *(*uint64)(unsafe.Pointer(&y))
}
return x
}
func fandbits[F floaty](x, y F) F {
switch unsafe.Sizeof(x) {
case 4:
*(*uint32)(unsafe.Pointer(&x)) &= *(*uint32)(unsafe.Pointer(&y))
case 8:
*(*uint64)(unsafe.Pointer(&x)) &= *(*uint64)(unsafe.Pointer(&y))
}
return x
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Page allocator.
//
// The page allocator manages mapped pages (defined by pageSize, NOT
// physPageSize) for allocation and re-use. It is embedded into mheap.
//
// Pages are managed using a bitmap that is sharded into chunks.
// In the bitmap, 1 means in-use, and 0 means free. The bitmap spans the
// process's address space. Chunks are managed in a sparse-array-style structure
// similar to mheap.arenas, since the bitmap may be large on some systems.
//
// The bitmap is efficiently searched by using a radix tree in combination
// with fast bit-wise intrinsics. Allocation is performed using an address-ordered
// first-fit approach.
//
// Each entry in the radix tree is a summary that describes three properties of
// a particular region of the address space: the number of contiguous free pages
// at the start and end of the region it represents, and the maximum number of
// contiguous free pages found anywhere in that region.
//
// Each level of the radix tree is stored as one contiguous array, which represents
// a different granularity of subdivision of the processes' address space. Thus, this
// radix tree is actually implicit in these large arrays, as opposed to having explicit
// dynamically-allocated pointer-based node structures. Naturally, these arrays may be
// quite large for system with large address spaces, so in these cases they are mapped
// into memory as needed. The leaf summaries of the tree correspond to a bitmap chunk.
//
// The root level (referred to as L0 and index 0 in pageAlloc.summary) has each
// summary represent the largest section of address space (16 GiB on 64-bit systems),
// with each subsequent level representing successively smaller subsections until we
// reach the finest granularity at the leaves, a chunk.
//
// More specifically, each summary in each level (except for leaf summaries)
// represents some number of entries in the following level. For example, each
// summary in the root level may represent a 16 GiB region of address space,
// and in the next level there could be 8 corresponding entries which represent 2
// GiB subsections of that 16 GiB region, each of which could correspond to 8
// entries in the next level which each represent 256 MiB regions, and so on.
//
// Thus, this design only scales to heaps so large, but can always be extended to
// larger heaps by simply adding levels to the radix tree, which mostly costs
// additional virtual address space. The choice of managing large arrays also means
// that a large amount of virtual address space may be reserved by the runtime.
package runtime
import (
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/gc"
"unsafe"
)
const (
// The size of a bitmap chunk, i.e. the amount of bits (that is, pages) to consider
// in the bitmap at once. It is 4MB on most platforms, except on Wasm it is 512KB.
// We use a smaller chuck size on Wasm for the same reason as the smaller arena
// size (see heapArenaBytes).
pallocChunkPages = 1 << logPallocChunkPages
pallocChunkBytes = pallocChunkPages * pageSize
logPallocChunkPages = 9*(1-goarch.IsWasm) + 6*goarch.IsWasm
logPallocChunkBytes = logPallocChunkPages + gc.PageShift
// The number of radix bits for each level.
//
// The value of 3 is chosen such that the block of summaries we need to scan at
// each level fits in 64 bytes (2^3 summaries * 8 bytes per summary), which is
// close to the L1 cache line width on many systems. Also, a value of 3 fits 4 tree
// levels perfectly into the 21-bit pallocBits summary field at the root level.
//
// The following equation explains how each of the constants relate:
// summaryL0Bits + (summaryLevels-1)*summaryLevelBits + logPallocChunkBytes = heapAddrBits
//
// summaryLevels is an architecture-dependent value defined in mpagealloc_*.go.
summaryLevelBits = 3
summaryL0Bits = heapAddrBits - logPallocChunkBytes - (summaryLevels-1)*summaryLevelBits
// pallocChunksL2Bits is the number of bits of the chunk index number
// covered by the second level of the chunks map.
//
// See (*pageAlloc).chunks for more details. Update the documentation
// there should this change.
pallocChunksL2Bits = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits
pallocChunksL1Shift = pallocChunksL2Bits
vmaNamePageAllocIndex = "page alloc index"
)
// maxSearchAddr returns the maximum searchAddr value, which indicates
// that the heap has no free space.
//
// This function exists just to make it clear that this is the maximum address
// for the page allocator's search space. See maxOffAddr for details.
//
// It's a function (rather than a variable) because it needs to be
// usable before package runtime's dynamic initialization is complete.
// See #51913 for details.
func maxSearchAddr() offAddr { return maxOffAddr }
// Global chunk index.
//
// Represents an index into the leaf level of the radix tree.
// Similar to arenaIndex, except instead of arenas, it divides the address
// space into chunks.
type chunkIdx uint
// chunkIndex returns the global index of the palloc chunk containing the
// pointer p.
func chunkIndex(p uintptr) chunkIdx {
return chunkIdx((p - arenaBaseOffset) / pallocChunkBytes)
}
// chunkBase returns the base address of the palloc chunk at index ci.
func chunkBase(ci chunkIdx) uintptr {
return uintptr(ci)*pallocChunkBytes + arenaBaseOffset
}
// chunkPageIndex computes the index of the page that contains p,
// relative to the chunk which contains p.
func chunkPageIndex(p uintptr) uint {
return uint(p % pallocChunkBytes / pageSize)
}
// l1 returns the index into the first level of (*pageAlloc).chunks.
func (i chunkIdx) l1() uint {
if pallocChunksL1Bits == 0 {
// Let the compiler optimize this away if there's no
// L1 map.
return 0
} else {
return uint(i) >> pallocChunksL1Shift
}
}
// l2 returns the index into the second level of (*pageAlloc).chunks.
func (i chunkIdx) l2() uint {
if pallocChunksL1Bits == 0 {
return uint(i)
} else {
return uint(i) & (1<<pallocChunksL2Bits - 1)
}
}
// offAddrToLevelIndex converts an address in the offset address space
// to the index into summary[level] containing addr.
func offAddrToLevelIndex(level int, addr offAddr) int {
return int((addr.a - arenaBaseOffset) >> levelShift[level])
}
// levelIndexToOffAddr converts an index into summary[level] into
// the corresponding address in the offset address space.
func levelIndexToOffAddr(level, idx int) offAddr {
return offAddr{(uintptr(idx) << levelShift[level]) + arenaBaseOffset}
}
// addrsToSummaryRange converts base and limit pointers into a range
// of entries for the given summary level.
//
// The returned range is inclusive on the lower bound and exclusive on
// the upper bound.
func addrsToSummaryRange(level int, base, limit uintptr) (lo int, hi int) {
// This is slightly more nuanced than just a shift for the exclusive
// upper-bound. Note that the exclusive upper bound may be within a
// summary at this level, meaning if we just do the obvious computation
// hi will end up being an inclusive upper bound. Unfortunately, just
// adding 1 to that is too broad since we might be on the very edge
// of a summary's max page count boundary for this level
// (1 << levelLogPages[level]). So, make limit an inclusive upper bound
// then shift, then add 1, so we get an exclusive upper bound at the end.
lo = int((base - arenaBaseOffset) >> levelShift[level])
hi = int(((limit-1)-arenaBaseOffset)>>levelShift[level]) + 1
return
}
// blockAlignSummaryRange aligns indices into the given level to that
// level's block width (1 << levelBits[level]). It assumes lo is inclusive
// and hi is exclusive, and so aligns them down and up respectively.
func blockAlignSummaryRange(level int, lo, hi int) (int, int) {
e := uintptr(1) << levelBits[level]
return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e))
}
type pageAlloc struct {
// Radix tree of summaries.
//
// Each slice's cap represents the whole memory reservation.
// Each slice's len reflects the allocator's maximum known
// mapped heap address for that level.
//
// The backing store of each summary level is reserved in init
// and may or may not be committed in grow (small address spaces
// may commit all the memory in init).
//
// The purpose of keeping len <= cap is to enforce bounds checks
// on the top end of the slice so that instead of an unknown
// runtime segmentation fault, we get a much friendlier out-of-bounds
// error.
//
// To iterate over a summary level, use inUse to determine which ranges
// are currently available. Otherwise one might try to access
// memory which is only Reserved which may result in a hard fault.
//
// We may still get segmentation faults < len since some of that
// memory may not be committed yet.
summary [summaryLevels][]pallocSum
// chunks is a slice of bitmap chunks.
//
// The total size of chunks is quite large on most 64-bit platforms
// (O(GiB) or more) if flattened, so rather than making one large mapping
// (which has problems on some platforms, even when PROT_NONE) we use a
// two-level sparse array approach similar to the arena index in mheap.
//
// To find the chunk containing a memory address `a`, do:
// chunkOf(chunkIndex(a))
//
// Below is a table describing the configuration for chunks for various
// heapAddrBits supported by the runtime.
//
// heapAddrBits | L1 Bits | L2 Bits | L2 Entry Size
// ------------------------------------------------
// 32 | 0 | 10 | 128 KiB
// 32 (wasm) | 0 | 13 | 128 KiB
// 33 (iOS) | 0 | 11 | 256 KiB
// 48 | 13 | 13 | 1 MiB
//
// There's no reason to use the L1 part of chunks on 32-bit, the
// address space is small so the L2 is small. For platforms with a
// 48-bit address space, we pick the L1 such that the L2 is 1 MiB
// in size, which is a good balance between low granularity without
// making the impact on BSS too high (note the L1 is stored directly
// in pageAlloc).
//
// To iterate over the bitmap, use inUse to determine which ranges
// are currently available. Otherwise one might iterate over unused
// ranges.
//
// Protected by mheapLock.
//
// TODO(mknyszek): Consider changing the definition of the bitmap
// such that 1 means free and 0 means in-use so that summaries and
// the bitmaps align better on zero-values.
chunks [1 << pallocChunksL1Bits]*[1 << pallocChunksL2Bits]pallocData
// The address to start an allocation search with. It must never
// point to any memory that is not contained in inUse, i.e.
// inUse.contains(searchAddr.addr()) must always be true. The one
// exception to this rule is that it may take on the value of
// maxOffAddr to indicate that the heap is exhausted.
//
// We guarantee that all valid heap addresses below this value
// are allocated and not worth searching.
searchAddr offAddr
// start and end represent the chunk indices
// which pageAlloc knows about. It assumes
// chunks in the range [start, end) are
// currently ready to use.
start, end chunkIdx
// inUse is a slice of ranges of address space which are
// known by the page allocator to be currently in-use (passed
// to grow).
//
// We care much more about having a contiguous heap in these cases
// and take additional measures to ensure that, so in nearly all
// cases this should have just 1 element.
//
// All access is protected by the mheapLock.
inUse addrRanges
// scav stores the scavenger state.
scav struct {
// index is an efficient index of chunks that have pages available to
// scavenge.
index scavengeIndex
// releasedBg is the amount of memory released in the background this
// scavenge cycle.
releasedBg atomic.Uintptr
// releasedEager is the amount of memory released eagerly this scavenge
// cycle.
releasedEager atomic.Uintptr
}
// mheap_.lock. This level of indirection makes it possible
// to test pageAlloc independently of the runtime allocator.
mheapLock *mutex
// sysStat is the runtime memstat to update when new system
// memory is committed by the pageAlloc for allocation metadata.
sysStat *sysMemStat
// summaryMappedReady is the number of bytes mapped in the Ready state
// in the summary structure. Used only for testing currently.
//
// Protected by mheapLock.
summaryMappedReady uintptr
// chunkHugePages indicates whether page bitmap chunks should be backed
// by huge pages.
chunkHugePages bool
// Whether or not this struct is being used in tests.
test bool
}
func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat, test bool) {
if levelLogPages[0] > logMaxPackedValue {
// We can't represent 1<<levelLogPages[0] pages, the maximum number
// of pages we need to represent at the root level, in a summary, which
// is a big problem. Throw.
print("runtime: root level max pages = ", 1<<levelLogPages[0], "\n")
print("runtime: summary max pages = ", maxPackedValue, "\n")
throw("root level max pages doesn't fit in summary")
}
p.sysStat = sysStat
// Initialize p.inUse.
p.inUse.init(sysStat)
// System-dependent initialization.
p.sysInit(test)
// Start with the searchAddr in a state indicating there's no free memory.
p.searchAddr = maxSearchAddr()
// Set the mheapLock.
p.mheapLock = mheapLock
// Initialize the scavenge index.
p.summaryMappedReady += p.scav.index.init(test, sysStat)
// Set if we're in a test.
p.test = test
}
// tryChunkOf returns the bitmap data for the given chunk.
//
// Returns nil if the chunk data has not been mapped.
func (p *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
l2 := p.chunks[ci.l1()]
if l2 == nil {
return nil
}
return &l2[ci.l2()]
}
// chunkOf returns the chunk at the given chunk index.
//
// The chunk index must be valid or this method may throw.
func (p *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
return &p.chunks[ci.l1()][ci.l2()]
}
// grow sets up the metadata for the address range [base, base+size).
// It may allocate metadata, in which case *p.sysStat will be updated.
//
// p.mheapLock must be held.
func (p *pageAlloc) grow(base, size uintptr) {
assertLockHeld(p.mheapLock)
// Round up to chunks, since we can't deal with increments smaller
// than chunks. Also, sysGrow expects aligned values.
limit := alignUp(base+size, pallocChunkBytes)
base = alignDown(base, pallocChunkBytes)
// Grow the summary levels in a system-dependent manner.
// We just update a bunch of additional metadata here.
p.sysGrow(base, limit)
// Grow the scavenge index.
p.summaryMappedReady += p.scav.index.grow(base, limit, p.sysStat)
// Update p.start and p.end.
// If no growth happened yet, start == 0. This is generally
// safe since the zero page is unmapped.
firstGrowth := p.start == 0
start, end := chunkIndex(base), chunkIndex(limit)
if firstGrowth || start < p.start {
p.start = start
}
if end > p.end {
p.end = end
}
// Note that [base, limit) will never overlap with any existing
// range inUse because grow only ever adds never-used memory
// regions to the page allocator.
p.inUse.add(makeAddrRange(base, limit))
// A grow operation is a lot like a free operation, so if our
// chunk ends up below p.searchAddr, update p.searchAddr to the
// new address, just like in free.
if b := (offAddr{base}); b.lessThan(p.searchAddr) {
p.searchAddr = b
}
// Add entries into chunks, which is sparse, if needed. Then,
// initialize the bitmap.
//
// Newly-grown memory is always considered scavenged.
// Set all the bits in the scavenged bitmaps high.
for c := chunkIndex(base); c < chunkIndex(limit); c++ {
if p.chunks[c.l1()] == nil {
// Create the necessary l2 entry.
const l2Size = unsafe.Sizeof(*p.chunks[0])
r := sysAlloc(l2Size, p.sysStat, vmaNamePageAllocIndex)
if r == nil {
throw("pageAlloc: out of memory")
}
if !p.test {
// Make the chunk mapping eligible or ineligible
// for huge pages, depending on what our current
// state is.
if p.chunkHugePages {
sysHugePage(r, l2Size)
} else {
sysNoHugePage(r, l2Size)
}
}
// Store the new chunk block but avoid a write barrier.
// grow is used in call chains that disallow write barriers.
*(*uintptr)(unsafe.Pointer(&p.chunks[c.l1()])) = uintptr(r)
}
p.chunkOf(c).scavenged.setRange(0, pallocChunkPages)
}
// Update summaries accordingly. The grow acts like a free, so
// we need to ensure this newly-free memory is visible in the
// summaries.
p.update(base, size/pageSize, true, false)
}
// enableChunkHugePages enables huge pages for the chunk bitmap mappings (disabled by default).
//
// This function is idempotent.
//
// A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant
// time, but may take time proportional to the size of the mapped heap beyond that.
//
// The heap lock must not be held over this operation, since it will briefly acquire
// the heap lock.
//
// Must be called on the system stack because it acquires the heap lock.
//
//go:systemstack
func (p *pageAlloc) enableChunkHugePages() {
// Grab the heap lock to turn on huge pages for new chunks and clone the current
// heap address space ranges.
//
// After the lock is released, we can be sure that bitmaps for any new chunks may
// be backed with huge pages, and we have the address space for the rest of the
// chunks. At the end of this function, all chunk metadata should be backed by huge
// pages.
lock(&mheap_.lock)
if p.chunkHugePages {
unlock(&mheap_.lock)
return
}
p.chunkHugePages = true
var inUse addrRanges
inUse.sysStat = p.sysStat
p.inUse.cloneInto(&inUse)
unlock(&mheap_.lock)
// This might seem like a lot of work, but all these loops are for generality.
//
// For a 1 GiB contiguous heap, a 48-bit address space, 13 L1 bits, a palloc chunk size
// of 4 MiB, and adherence to the default set of heap address hints, this will result in
// exactly 1 call to sysHugePage.
for _, r := range p.inUse.ranges {
for i := chunkIndex(r.base.addr()).l1(); i < chunkIndex(r.limit.addr()-1).l1(); i++ {
// N.B. We can assume that p.chunks[i] is non-nil and in a mapped part of p.chunks
// because it's derived from inUse, which never shrinks.
sysHugePage(unsafe.Pointer(p.chunks[i]), unsafe.Sizeof(*p.chunks[0]))
}
}
}
// update updates heap metadata. It must be called each time the bitmap
// is updated.
//
// If contig is true, update does some optimizations assuming that there was
// a contiguous allocation or free between addr and addr+npages. alloc indicates
// whether the operation performed was an allocation or a free.
//
// p.mheapLock must be held.
func (p *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
assertLockHeld(p.mheapLock)
// base, limit, start, and end are inclusive.
limit := base + npages*pageSize - 1
sc, ec := chunkIndex(base), chunkIndex(limit)
// Handle updating the lowest level first.
if sc == ec {
// Fast path: the allocation doesn't span more than one chunk,
// so update this one and if the summary didn't change, return.
x := p.summary[len(p.summary)-1][sc]
y := p.chunkOf(sc).summarize()
if x == y {
return
}
p.summary[len(p.summary)-1][sc] = y
} else if contig {
// Slow contiguous path: the allocation spans more than one chunk
// and at least one summary is guaranteed to change.
summary := p.summary[len(p.summary)-1]
// Update the summary for chunk sc.
summary[sc] = p.chunkOf(sc).summarize()
// Update the summaries for chunks in between, which are
// either totally allocated or freed.
whole := p.summary[len(p.summary)-1][sc+1 : ec]
if alloc {
clear(whole)
} else {
for i := range whole {
whole[i] = freeChunkSum
}
}
// Update the summary for chunk ec.
summary[ec] = p.chunkOf(ec).summarize()
} else {
// Slow general path: the allocation spans more than one chunk
// and at least one summary is guaranteed to change.
//
// We can't assume a contiguous allocation happened, so walk over
// every chunk in the range and manually recompute the summary.
summary := p.summary[len(p.summary)-1]
for c := sc; c <= ec; c++ {
summary[c] = p.chunkOf(c).summarize()
}
}
// Walk up the radix tree and update the summaries appropriately.
changed := true
for l := len(p.summary) - 2; l >= 0 && changed; l-- {
// Update summaries at level l from summaries at level l+1.
changed = false
// "Constants" for the previous level which we
// need to compute the summary from that level.
logEntriesPerBlock := levelBits[l+1]
logMaxPages := levelLogPages[l+1]
// lo and hi describe all the parts of the level we need to look at.
lo, hi := addrsToSummaryRange(l, base, limit+1)
// Iterate over each block, updating the corresponding summary in the less-granular level.
for i := lo; i < hi; i++ {
children := p.summary[l+1][i<<logEntriesPerBlock : (i+1)<<logEntriesPerBlock]
sum := mergeSummaries(children, logMaxPages)
old := p.summary[l][i]
if old != sum {
changed = true
p.summary[l][i] = sum
}
}
}
}
// allocRange marks the range of memory [base, base+npages*pageSize) as
// allocated. It also updates the summaries to reflect the newly-updated
// bitmap.
//
// Returns the amount of scavenged memory in bytes present in the
// allocated range.
//
// p.mheapLock must be held.
func (p *pageAlloc) allocRange(base, npages uintptr) uintptr {
assertLockHeld(p.mheapLock)
limit := base + npages*pageSize - 1
sc, ec := chunkIndex(base), chunkIndex(limit)
si, ei := chunkPageIndex(base), chunkPageIndex(limit)
scav := uint(0)
if sc == ec {
// The range doesn't cross any chunk boundaries.
chunk := p.chunkOf(sc)
scav += chunk.scavenged.popcntRange(si, ei+1-si)
chunk.allocRange(si, ei+1-si)
p.scav.index.alloc(sc, ei+1-si)
} else {
// The range crosses at least one chunk boundary.
chunk := p.chunkOf(sc)
scav += chunk.scavenged.popcntRange(si, pallocChunkPages-si)
chunk.allocRange(si, pallocChunkPages-si)
p.scav.index.alloc(sc, pallocChunkPages-si)
for c := sc + 1; c < ec; c++ {
chunk := p.chunkOf(c)
scav += chunk.scavenged.popcntRange(0, pallocChunkPages)
chunk.allocAll()
p.scav.index.alloc(c, pallocChunkPages)
}
chunk = p.chunkOf(ec)
scav += chunk.scavenged.popcntRange(0, ei+1)
chunk.allocRange(0, ei+1)
p.scav.index.alloc(ec, ei+1)
}
p.update(base, npages, true, true)
return uintptr(scav) * pageSize
}
// findMappedAddr returns the smallest mapped offAddr that is
// >= addr. That is, if addr refers to mapped memory, then it is
// returned. If addr is higher than any mapped region, then
// it returns maxOffAddr.
//
// p.mheapLock must be held.
func (p *pageAlloc) findMappedAddr(addr offAddr) offAddr {
assertLockHeld(p.mheapLock)
// If we're not in a test, validate first by checking mheap_.arenas.
// This is a fast path which is only safe to use outside of testing.
ai := arenaIndex(addr.addr())
if p.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil {
vAddr, ok := p.inUse.findAddrGreaterEqual(addr.addr())
if ok {
return offAddr{vAddr}
} else {
// The candidate search address is greater than any
// known address, which means we definitely have no
// free memory left.
return maxOffAddr
}
}
return addr
}
// find searches for the first (address-ordered) contiguous free region of
// npages in size and returns a base address for that region.
//
// It uses p.searchAddr to prune its search and assumes that no palloc chunks
// below chunkIndex(p.searchAddr) contain any free memory at all.
//
// find also computes and returns a candidate p.searchAddr, which may or
// may not prune more of the address space than p.searchAddr already does.
// This candidate is always a valid p.searchAddr.
//
// find represents the slow path and the full radix tree search.
//
// Returns a base address of 0 on failure, in which case the candidate
// searchAddr returned is invalid and must be ignored.
//
// p.mheapLock must be held.
func (p *pageAlloc) find(npages uintptr) (uintptr, offAddr) {
assertLockHeld(p.mheapLock)
// Search algorithm.
//
// This algorithm walks each level l of the radix tree from the root level
// to the leaf level. It iterates over at most 1 << levelBits[l] of entries
// in a given level in the radix tree, and uses the summary information to
// find either:
// 1) That a given subtree contains a large enough contiguous region, at
// which point it continues iterating on the next level, or
// 2) That there are enough contiguous boundary-crossing bits to satisfy
// the allocation, at which point it knows exactly where to start
// allocating from.
//
// i tracks the index into the current level l's structure for the
// contiguous 1 << levelBits[l] entries we're actually interested in.
//
// NOTE: Technically this search could allocate a region which crosses
// the arenaBaseOffset boundary, which when arenaBaseOffset != 0, is
// a discontinuity. However, the only way this could happen is if the
// page at the zero address is mapped, and this is impossible on
// every system we support where arenaBaseOffset != 0. So, the
// discontinuity is already encoded in the fact that the OS will never
// map the zero page for us, and this function doesn't try to handle
// this case in any way.
// i is the beginning of the block of entries we're searching at the
// current level.
i := 0
// firstFree is the region of address space that we are certain to
// find the first free page in the heap. base and bound are the inclusive
// bounds of this window, and both are addresses in the linearized, contiguous
// view of the address space (with arenaBaseOffset pre-added). At each level,
// this window is narrowed as we find the memory region containing the
// first free page of memory. To begin with, the range reflects the
// full process address space.
//
// firstFree is updated by calling foundFree each time free space in the
// heap is discovered.
//
// At the end of the search, base.addr() is the best new
// searchAddr we could deduce in this search.
firstFree := struct {
base, bound offAddr
}{
base: minOffAddr,
bound: maxOffAddr,
}
// foundFree takes the given address range [addr, addr+size) and
// updates firstFree if it is a narrower range. The input range must
// either be fully contained within firstFree or not overlap with it
// at all.
//
// This way, we'll record the first summary we find with any free
// pages on the root level and narrow that down if we descend into
// that summary. But as soon as we need to iterate beyond that summary
// in a level to find a large enough range, we'll stop narrowing.
foundFree := func(addr offAddr, size uintptr) {
if firstFree.base.lessEqual(addr) && addr.add(size-1).lessEqual(firstFree.bound) {
// This range fits within the current firstFree window, so narrow
// down the firstFree window to the base and bound of this range.
firstFree.base = addr
firstFree.bound = addr.add(size - 1)
} else if !(addr.add(size-1).lessThan(firstFree.base) || firstFree.bound.lessThan(addr)) {
// This range only partially overlaps with the firstFree range,
// so throw.
print("runtime: addr = ", hex(addr.addr()), ", size = ", size, "\n")
print("runtime: base = ", hex(firstFree.base.addr()), ", bound = ", hex(firstFree.bound.addr()), "\n")
throw("range partially overlaps")
}
}
// lastSum is the summary which we saw on the previous level that made us
// move on to the next level. Used to print additional information in the
// case of a catastrophic failure.
// lastSumIdx is that summary's index in the previous level.
lastSum := packPallocSum(0, 0, 0)
lastSumIdx := -1
nextLevel:
for l := 0; l < len(p.summary); l++ {
// For the root level, entriesPerBlock is the whole level.
entriesPerBlock := 1 << levelBits[l]
logMaxPages := levelLogPages[l]
// We've moved into a new level, so let's update i to our new
// starting index. This is a no-op for level 0.
i <<= levelBits[l]
// Slice out the block of entries we care about.
entries := p.summary[l][i : i+entriesPerBlock]
// Determine j0, the first index we should start iterating from.
// The searchAddr may help us eliminate iterations if we followed the
// searchAddr on the previous level or we're on the root level, in which
// case the searchAddr should be the same as i after levelShift.
j0 := 0
if searchIdx := offAddrToLevelIndex(l, p.searchAddr); searchIdx&^(entriesPerBlock-1) == i {
j0 = searchIdx & (entriesPerBlock - 1)
}
// Run over the level entries looking for
// a contiguous run of at least npages either
// within an entry or across entries.
//
// base contains the page index (relative to
// the first entry's first page) of the currently
// considered run of consecutive pages.
//
// size contains the size of the currently considered
// run of consecutive pages.
var base, size uint
for j := j0; j < len(entries); j++ {
sum := entries[j]
if sum == 0 {
// A full entry means we broke any streak and
// that we should skip it altogether.
size = 0
continue
}
// We've encountered a non-zero summary which means
// free memory, so update firstFree.
foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize)
s := sum.start()
if size+s >= uint(npages) {
// If size == 0 we don't have a run yet,
// which means base isn't valid. So, set
// base to the first page in this block.
if size == 0 {
base = uint(j) << logMaxPages
}
// We hit npages; we're done!
size += s
break
}
if sum.max() >= uint(npages) {
// The entry itself contains npages contiguous
// free pages, so continue on the next level
// to find that run.
i += j
lastSumIdx = i
lastSum = sum
continue nextLevel
}
if size == 0 || s < 1<<logMaxPages {
// We either don't have a current run started, or this entry
// isn't totally free (meaning we can't continue the current
// one), so try to begin a new run by setting size and base
// based on sum.end.
size = sum.end()
base = uint(j+1)<<logMaxPages - size
continue
}
// The entry is completely free, so continue the run.
size += 1 << logMaxPages
}
if size >= uint(npages) {
// We found a sufficiently large run of free pages straddling
// some boundary, so compute the address and return it.
addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
return addr, p.findMappedAddr(firstFree.base)
}
if l == 0 {
// We're at level zero, so that means we've exhausted our search.
return 0, maxSearchAddr()
}
// We're not at level zero, and we exhausted the level we were looking in.
// This means that either our calculations were wrong or the level above
// lied to us. In either case, dump some useful state and throw.
print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n")
print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n")
print("runtime: p.searchAddr = ", hex(p.searchAddr.addr()), ", i = ", i, "\n")
print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n")
for j := 0; j < len(entries); j++ {
sum := entries[j]
print("runtime: summary[", l, "][", i+j, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
}
throw("bad summary data")
}
// Since we've gotten to this point, that means we haven't found a
// sufficiently-sized free region straddling some boundary (chunk or larger).
// This means the last summary we inspected must have had a large enough "max"
// value, so look inside the chunk to find a suitable run.
//
// After iterating over all levels, i must contain a chunk index which
// is what the final level represents.
ci := chunkIdx(i)
j, searchIdx := p.chunkOf(ci).find(npages, 0)
if j == ^uint(0) {
// We couldn't find any space in this chunk despite the summaries telling
// us it should be there. There's likely a bug, so dump some state and throw.
sum := p.summary[len(p.summary)-1][i]
print("runtime: summary[", len(p.summary)-1, "][", i, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
print("runtime: npages = ", npages, "\n")
throw("bad summary data")
}
// Compute the address at which the free space starts.
addr := chunkBase(ci) + uintptr(j)*pageSize
// Since we actually searched the chunk, we may have
// found an even narrower free window.
searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr)
return addr, p.findMappedAddr(firstFree.base)
}
// alloc allocates npages worth of memory from the page heap, returning the base
// address for the allocation and the amount of scavenged memory in bytes
// contained in the region [base address, base address + npages*pageSize).
//
// Returns a 0 base address on failure, in which case other returned values
// should be ignored.
//
// p.mheapLock must be held.
//
// Must run on the system stack because p.mheapLock must be held.
//
//go:systemstack
func (p *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
assertLockHeld(p.mheapLock)
// If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory.
if chunkIndex(p.searchAddr.addr()) >= p.end {
return 0, 0
}
// If npages has a chance of fitting in the chunk where the searchAddr is,
// search it directly.
searchAddr := minOffAddr
if pallocChunkPages-chunkPageIndex(p.searchAddr.addr()) >= uint(npages) {
// npages is guaranteed to be no greater than pallocChunkPages here.
i := chunkIndex(p.searchAddr.addr())
if max := p.summary[len(p.summary)-1][i].max(); max >= uint(npages) {
j, searchIdx := p.chunkOf(i).find(npages, chunkPageIndex(p.searchAddr.addr()))
if j == ^uint(0) {
print("runtime: max = ", max, ", npages = ", npages, "\n")
print("runtime: searchIdx = ", chunkPageIndex(p.searchAddr.addr()), ", p.searchAddr = ", hex(p.searchAddr.addr()), "\n")
throw("bad summary data")
}
addr = chunkBase(i) + uintptr(j)*pageSize
searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize}
goto Found
}
}
// We failed to use a searchAddr for one reason or another, so try
// the slow path.
addr, searchAddr = p.find(npages)
if addr == 0 {
if npages == 1 {
// We failed to find a single free page, the smallest unit
// of allocation. This means we know the heap is completely
// exhausted. Otherwise, the heap still might have free
// space in it, just not enough contiguous space to
// accommodate npages.
p.searchAddr = maxSearchAddr()
}
return 0, 0
}
Found:
// Go ahead and actually mark the bits now that we have an address.
scav = p.allocRange(addr, npages)
// If we found a higher searchAddr, we know that all the
// heap memory before that searchAddr in an offset address space is
// allocated, so bump p.searchAddr up to the new one.
if p.searchAddr.lessThan(searchAddr) {
p.searchAddr = searchAddr
}
return addr, scav
}
// free returns npages worth of memory starting at base back to the page heap.
//
// p.mheapLock must be held.
//
// Must run on the system stack because p.mheapLock must be held.
//
//go:systemstack
func (p *pageAlloc) free(base, npages uintptr) {
assertLockHeld(p.mheapLock)
// If we're freeing pages below the p.searchAddr, update searchAddr.
if b := (offAddr{base}); b.lessThan(p.searchAddr) {
p.searchAddr = b
}
limit := base + npages*pageSize - 1
if npages == 1 {
// Fast path: we're clearing a single bit, and we know exactly
// where it is, so mark it directly.
i := chunkIndex(base)
pi := chunkPageIndex(base)
p.chunkOf(i).free1(pi)
p.scav.index.free(i, pi, 1)
} else {
// Slow path: we're clearing more bits so we may need to iterate.
sc, ec := chunkIndex(base), chunkIndex(limit)
si, ei := chunkPageIndex(base), chunkPageIndex(limit)
if sc == ec {
// The range doesn't cross any chunk boundaries.
p.chunkOf(sc).free(si, ei+1-si)
p.scav.index.free(sc, si, ei+1-si)
} else {
// The range crosses at least one chunk boundary.
p.chunkOf(sc).free(si, pallocChunkPages-si)
p.scav.index.free(sc, si, pallocChunkPages-si)
for c := sc + 1; c < ec; c++ {
p.chunkOf(c).freeAll()
p.scav.index.free(c, 0, pallocChunkPages)
}
p.chunkOf(ec).free(0, ei+1)
p.scav.index.free(ec, 0, ei+1)
}
}
p.update(base, npages, true, false)
}
// markRandomPaddingPages marks the range of memory [base, base+npages*pageSize]
// as both allocated and scavenged. This is used for randomizing the base heap
// address. Both the alloc and scav bits are set so that the pages are not used
// and so the memory accounting stats are correctly calculated.
//
// Similar to allocRange, it also updates the summaries to reflect the
// newly-updated bitmap.
//
// p.mheapLock must be held.
func (p *pageAlloc) markRandomPaddingPages(base uintptr, npages uintptr) {
assertLockHeld(p.mheapLock)
limit := base + npages*pageSize - 1
sc, ec := chunkIndex(base), chunkIndex(limit)
si, ei := chunkPageIndex(base), chunkPageIndex(limit)
if sc == ec {
chunk := p.chunkOf(sc)
chunk.allocRange(si, ei+1-si)
p.scav.index.alloc(sc, ei+1-si)
chunk.scavenged.setRange(si, ei+1-si)
} else {
chunk := p.chunkOf(sc)
chunk.allocRange(si, pallocChunkPages-si)
p.scav.index.alloc(sc, pallocChunkPages-si)
chunk.scavenged.setRange(si, pallocChunkPages-si)
for c := sc + 1; c < ec; c++ {
chunk := p.chunkOf(c)
chunk.allocAll()
p.scav.index.alloc(c, pallocChunkPages)
chunk.scavenged.setAll()
}
chunk = p.chunkOf(ec)
chunk.allocRange(0, ei+1)
p.scav.index.alloc(ec, ei+1)
chunk.scavenged.setRange(0, ei+1)
}
p.update(base, npages, true, true)
}
const (
pallocSumBytes = unsafe.Sizeof(pallocSum(0))
// maxPackedValue is the maximum value that any of the three fields in
// the pallocSum may take on.
maxPackedValue = 1 << logMaxPackedValue
logMaxPackedValue = logPallocChunkPages + (summaryLevels-1)*summaryLevelBits
freeChunkSum = pallocSum(uint64(pallocChunkPages) |
uint64(pallocChunkPages<<logMaxPackedValue) |
uint64(pallocChunkPages<<(2*logMaxPackedValue)))
)
// pallocSum is a packed summary type which packs three numbers: start, max,
// and end into a single 8-byte value. Each of these values are a summary of
// a bitmap and are thus counts, each of which may have a maximum value of
// 2^21 - 1, or all three may be equal to 2^21. The latter case is represented
// by just setting the 64th bit.
type pallocSum uint64
// packPallocSum takes a start, max, and end value and produces a pallocSum.
func packPallocSum(start, max, end uint) pallocSum {
if max == maxPackedValue {
return pallocSum(uint64(1 << 63))
}
return pallocSum((uint64(start) & (maxPackedValue - 1)) |
((uint64(max) & (maxPackedValue - 1)) << logMaxPackedValue) |
((uint64(end) & (maxPackedValue - 1)) << (2 * logMaxPackedValue)))
}
// start extracts the start value from a packed sum.
func (p pallocSum) start() uint {
if uint64(p)&uint64(1<<63) != 0 {
return maxPackedValue
}
return uint(uint64(p) & (maxPackedValue - 1))
}
// max extracts the max value from a packed sum.
func (p pallocSum) max() uint {
if uint64(p)&uint64(1<<63) != 0 {
return maxPackedValue
}
return uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1))
}
// end extracts the end value from a packed sum.
func (p pallocSum) end() uint {
if uint64(p)&uint64(1<<63) != 0 {
return maxPackedValue
}
return uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1))
}
// unpack unpacks all three values from the summary.
func (p pallocSum) unpack() (uint, uint, uint) {
if uint64(p)&uint64(1<<63) != 0 {
return maxPackedValue, maxPackedValue, maxPackedValue
}
return uint(uint64(p) & (maxPackedValue - 1)),
uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1)),
uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1))
}
// mergeSummaries merges consecutive summaries which may each represent at
// most 1 << logMaxPagesPerSum pages each together into one.
func mergeSummaries(sums []pallocSum, logMaxPagesPerSum uint) pallocSum {
// Merge the summaries in sums into one.
//
// We do this by keeping a running summary representing the merged
// summaries of sums[:i] in start, most, and end.
start, most, end := sums[0].unpack()
for i := 1; i < len(sums); i++ {
// Merge in sums[i].
si, mi, ei := sums[i].unpack()
// Merge in sums[i].start only if the running summary is
// completely free, otherwise this summary's start
// plays no role in the combined sum.
if start == uint(i)<<logMaxPagesPerSum {
start += si
}
// Recompute the max value of the running sum by looking
// across the boundary between the running sum and sums[i]
// and at the max sums[i], taking the greatest of those two
// and the max of the running sum.
most = max(most, end+si, mi)
// Merge in end by checking if this new summary is totally
// free. If it is, then we want to extend the running sum's
// end by the new summary. If not, then we have some alloc'd
// pages in there and we just want to take the end value in
// sums[i].
if ei == 1<<logMaxPagesPerSum {
end += 1 << logMaxPagesPerSum
} else {
end = ei
}
}
return packPallocSum(start, most, end)
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x
package runtime
import (
"unsafe"
)
const (
// The number of levels in the radix tree.
summaryLevels = 5
// Constants for testing.
pageAlloc32Bit = 0
pageAlloc64Bit = 1
// Number of bits needed to represent all indices into the L1 of the
// chunks map.
//
// See (*pageAlloc).chunks for more details. Update the documentation
// there should this number change.
pallocChunksL1Bits = 13
)
// levelBits is the number of bits in the radix for a given level in the super summary
// structure.
//
// The sum of all the entries of levelBits should equal heapAddrBits.
var levelBits = [summaryLevels]uint{
summaryL0Bits,
summaryLevelBits,
summaryLevelBits,
summaryLevelBits,
summaryLevelBits,
}
// levelShift is the number of bits to shift to acquire the radix for a given level
// in the super summary structure.
//
// With levelShift, one can compute the index of the summary at level l related to a
// pointer p by doing:
//
// p >> levelShift[l]
var levelShift = [summaryLevels]uint{
heapAddrBits - summaryL0Bits,
heapAddrBits - summaryL0Bits - 1*summaryLevelBits,
heapAddrBits - summaryL0Bits - 2*summaryLevelBits,
heapAddrBits - summaryL0Bits - 3*summaryLevelBits,
heapAddrBits - summaryL0Bits - 4*summaryLevelBits,
}
// levelLogPages is log2 the maximum number of runtime pages in the address space
// a summary in the given level represents.
//
// The leaf level always represents exactly log2 of 1 chunk's worth of pages.
var levelLogPages = [summaryLevels]uint{
logPallocChunkPages + 4*summaryLevelBits,
logPallocChunkPages + 3*summaryLevelBits,
logPallocChunkPages + 2*summaryLevelBits,
logPallocChunkPages + 1*summaryLevelBits,
logPallocChunkPages,
}
// sysInit performs architecture-dependent initialization of fields
// in pageAlloc. pageAlloc should be uninitialized except for sysStat
// if any runtime statistic should be updated.
func (p *pageAlloc) sysInit(test bool) {
// Reserve memory for each level. This will get mapped in
// as R/W by setArenas.
for l, shift := range levelShift {
entries := 1 << (heapAddrBits - shift)
// Reserve b bytes of memory anywhere in the address space.
b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize)
r := sysReserve(nil, b, "page summary")
if r == nil {
throw("failed to reserve page summary memory")
}
// Put this reservation into a slice.
sl := notInHeapSlice{(*notInHeap)(r), 0, entries}
p.summary[l] = *(*[]pallocSum)(unsafe.Pointer(&sl))
}
}
// sysGrow performs architecture-dependent operations on heap
// growth for the page allocator, such as mapping in new memory
// for summaries. It also updates the length of the slices in
// p.summary.
//
// base is the base of the newly-added heap memory and limit is
// the first address past the end of the newly-added heap memory.
// Both must be aligned to pallocChunkBytes.
//
// The caller must update p.start and p.end after calling sysGrow.
func (p *pageAlloc) sysGrow(base, limit uintptr) {
if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 {
print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
throw("sysGrow bounds not aligned to pallocChunkBytes")
}
// addrRangeToSummaryRange converts a range of addresses into a range
// of summary indices which must be mapped to support those addresses
// in the summary range.
addrRangeToSummaryRange := func(level int, r addrRange) (int, int) {
sumIdxBase, sumIdxLimit := addrsToSummaryRange(level, r.base.addr(), r.limit.addr())
return blockAlignSummaryRange(level, sumIdxBase, sumIdxLimit)
}
// summaryRangeToSumAddrRange converts a range of indices in any
// level of p.summary into page-aligned addresses which cover that
// range of indices.
summaryRangeToSumAddrRange := func(level, sumIdxBase, sumIdxLimit int) addrRange {
baseOffset := alignDown(uintptr(sumIdxBase)*pallocSumBytes, physPageSize)
limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize)
base := unsafe.Pointer(&p.summary[level][0])
return addrRange{
offAddr{uintptr(add(base, baseOffset))},
offAddr{uintptr(add(base, limitOffset))},
}
}
// addrRangeToSumAddrRange is a convenience function that converts
// an address range r to the address range of the given summary level
// that stores the summaries for r.
addrRangeToSumAddrRange := func(level int, r addrRange) addrRange {
sumIdxBase, sumIdxLimit := addrRangeToSummaryRange(level, r)
return summaryRangeToSumAddrRange(level, sumIdxBase, sumIdxLimit)
}
// Find the first inUse index which is strictly greater than base.
//
// Because this function will never be asked remap the same memory
// twice, this index is effectively the index at which we would insert
// this new growth, and base will never overlap/be contained within
// any existing range.
//
// This will be used to look at what memory in the summary array is already
// mapped before and after this new range.
inUseIndex := p.inUse.findSucc(base)
// Walk up the radix tree and map summaries in as needed.
for l := range p.summary {
// Figure out what part of the summary array this new address space needs.
needIdxBase, needIdxLimit := addrRangeToSummaryRange(l, makeAddrRange(base, limit))
// Update the summary slices with a new upper-bound. This ensures
// we get tight bounds checks on at least the top bound.
//
// We must do this regardless of whether we map new memory.
if needIdxLimit > len(p.summary[l]) {
p.summary[l] = p.summary[l][:needIdxLimit]
}
// Compute the needed address range in the summary array for level l.
need := summaryRangeToSumAddrRange(l, needIdxBase, needIdxLimit)
// Prune need down to what needs to be newly mapped. Some parts of it may
// already be mapped by what inUse describes due to page alignment requirements
// for mapping. Because this function will never be asked to remap the same
// memory twice, it should never be possible to prune in such a way that causes
// need to be split.
if inUseIndex > 0 {
need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex-1]))
}
if inUseIndex < len(p.inUse.ranges) {
need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex]))
}
// It's possible that after our pruning above, there's nothing new to map.
if need.size() == 0 {
continue
}
// Map and commit need.
sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat, "page alloc")
sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
p.summaryMappedReady += need.size()
}
}
// sysGrow increases the index's backing store in response to a heap growth.
//
// Returns the amount of memory added to sysStat.
func (s *scavengeIndex) sysGrow(base, limit uintptr, sysStat *sysMemStat) uintptr {
if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 {
print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
throw("sysGrow bounds not aligned to pallocChunkBytes")
}
scSize := unsafe.Sizeof(atomicScavChunkData{})
// Map and commit the pieces of chunks that we need.
//
// We always map the full range of the minimum heap address to the
// maximum heap address. We don't do this for the summary structure
// because it's quite large and a discontiguous heap could cause a
// lot of memory to be used. In this situation, the worst case overhead
// is in the single-digit MiB if we map the whole thing.
//
// The base address of the backing store is always page-aligned,
// because it comes from the OS, so it's sufficient to align the
// index.
haveMin := s.min.Load()
haveMax := s.max.Load()
needMin := alignDown(uintptr(chunkIndex(base)), physPageSize/scSize)
needMax := alignUp(uintptr(chunkIndex(limit)), physPageSize/scSize)
// We need a contiguous range, so extend the range if there's no overlap.
if needMax < haveMin {
needMax = haveMin
}
if haveMax != 0 && needMin > haveMax {
needMin = haveMax
}
// Avoid a panic from indexing one past the last element.
chunksBase := uintptr(unsafe.Pointer(&s.chunks[0]))
have := makeAddrRange(chunksBase+haveMin*scSize, chunksBase+haveMax*scSize)
need := makeAddrRange(chunksBase+needMin*scSize, chunksBase+needMax*scSize)
// Subtract any overlap from rounding. We can't re-map memory because
// it'll be zeroed.
need = need.subtract(have)
// If we've got something to map, map it, and update the slice bounds.
if need.size() != 0 {
sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat, "scavenge index")
sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
// Update the indices only after the new memory is valid.
if haveMax == 0 || needMin < haveMin {
s.min.Store(needMin)
}
if needMax > haveMax {
s.max.Store(needMax)
}
}
return need.size()
}
// sysInit initializes the scavengeIndex' chunks array.
//
// Returns the amount of memory added to sysStat.
func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) uintptr {
n := uintptr(1<<heapAddrBits) / pallocChunkBytes
nbytes := n * unsafe.Sizeof(atomicScavChunkData{})
r := sysReserve(nil, nbytes, "scavenge index")
sl := notInHeapSlice{(*notInHeap)(r), int(n), int(n)}
s.chunks = *(*[]atomicScavChunkData)(unsafe.Pointer(&sl))
return 0 // All memory above is mapped Reserved.
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/runtime/sys"
"unsafe"
)
const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
// pageCache represents a per-p cache of pages the allocator can
// allocate from without a lock. More specifically, it represents
// a pageCachePages*pageSize chunk of memory with 0 or more free
// pages in it.
type pageCache struct {
base uintptr // base address of the chunk
cache uint64 // 64-bit bitmap representing free pages (1 means free)
scav uint64 // 64-bit bitmap representing scavenged pages (1 means scavenged)
}
// empty reports whether the page cache has no free pages.
func (c *pageCache) empty() bool {
return c.cache == 0
}
// alloc allocates npages from the page cache and is the main entry
// point for allocation.
//
// Returns a base address and the amount of scavenged memory in the
// allocated region in bytes.
//
// Returns a base address of zero on failure, in which case the
// amount of scavenged memory should be ignored.
func (c *pageCache) alloc(npages uintptr) (uintptr, uintptr) {
if c.cache == 0 {
return 0, 0
}
if npages == 1 {
i := uintptr(sys.TrailingZeros64(c.cache))
scav := (c.scav >> i) & 1
c.cache &^= 1 << i // set bit to mark in-use
c.scav &^= 1 << i // clear bit to mark unscavenged
return c.base + i*pageSize, uintptr(scav) * pageSize
}
return c.allocN(npages)
}
// allocN is a helper which attempts to allocate npages worth of pages
// from the cache. It represents the general case for allocating from
// the page cache.
//
// Returns a base address and the amount of scavenged memory in the
// allocated region in bytes.
func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr) {
i := findBitRange64(c.cache, uint(npages))
if i >= 64 {
return 0, 0
}
mask := ((uint64(1) << npages) - 1) << i
scav := sys.OnesCount64(c.scav & mask)
c.cache &^= mask // mark in-use bits
c.scav &^= mask // clear scavenged bits
return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
}
// flush empties out unallocated free pages in the given cache
// into s. Then, it clears the cache, such that empty returns
// true.
//
// p.mheapLock must be held.
//
// Must run on the system stack because p.mheapLock must be held.
//
//go:systemstack
func (c *pageCache) flush(p *pageAlloc) {
assertLockHeld(p.mheapLock)
if c.empty() {
return
}
ci := chunkIndex(c.base)
pi := chunkPageIndex(c.base)
// This method is called very infrequently, so just do the
// slower, safer thing by iterating over each bit individually.
for i := uint(0); i < 64; i++ {
if c.cache&(1<<i) != 0 {
p.chunkOf(ci).free1(pi + i)
// Update density statistics.
p.scav.index.free(ci, pi+i, 1)
}
if c.scav&(1<<i) != 0 {
p.chunkOf(ci).scavenged.setRange(pi+i, 1)
}
}
// Since this is a lot like a free, we need to make sure
// we update the searchAddr just like free does.
if b := (offAddr{c.base}); b.lessThan(p.searchAddr) {
p.searchAddr = b
}
p.update(c.base, pageCachePages, false, false)
*c = pageCache{}
}
// allocToCache acquires a pageCachePages-aligned chunk of free pages which
// may not be contiguous, and returns a pageCache structure which owns the
// chunk.
//
// p.mheapLock must be held.
//
// Must run on the system stack because p.mheapLock must be held.
//
//go:systemstack
func (p *pageAlloc) allocToCache() pageCache {
assertLockHeld(p.mheapLock)
// If the searchAddr refers to a region which has a higher address than
// any known chunk, then we know we're out of memory.
if chunkIndex(p.searchAddr.addr()) >= p.end {
return pageCache{}
}
c := pageCache{}
ci := chunkIndex(p.searchAddr.addr()) // chunk index
var chunk *pallocData
if p.summary[len(p.summary)-1][ci] != 0 {
// Fast path: there's free pages at or near the searchAddr address.
chunk = p.chunkOf(ci)
j, _ := chunk.find(1, chunkPageIndex(p.searchAddr.addr()))
if j == ^uint(0) {
throw("bad summary data")
}
c = pageCache{
base: chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
cache: ^chunk.pages64(j),
scav: chunk.scavenged.block64(j),
}
} else {
// Slow path: the searchAddr address had nothing there, so go find
// the first free page the slow way.
addr, _ := p.find(1)
if addr == 0 {
// We failed to find adequate free space, so mark the searchAddr as OoM
// and return an empty pageCache.
p.searchAddr = maxSearchAddr()
return pageCache{}
}
ci = chunkIndex(addr)
chunk = p.chunkOf(ci)
c = pageCache{
base: alignDown(addr, 64*pageSize),
cache: ^chunk.pages64(chunkPageIndex(addr)),
scav: chunk.scavenged.block64(chunkPageIndex(addr)),
}
}
// Set the page bits as allocated and clear the scavenged bits, but
// be careful to only set and clear the relevant bits.
cpi := chunkPageIndex(c.base)
chunk.allocPages64(cpi, c.cache)
chunk.scavenged.clearBlock64(cpi, c.cache&c.scav /* free and scavenged */)
// Update as an allocation, but note that it's not contiguous.
p.update(c.base, pageCachePages, false, true)
// Update density statistics.
p.scav.index.alloc(ci, uint(sys.OnesCount64(c.cache)))
// Set the search address to the last page represented by the cache.
// Since all of the pages in this block are going to the cache, and we
// searched for the first free page, we can confidently start at the
// next page.
//
// However, p.searchAddr is not allowed to point into unmapped heap memory
// unless it is maxSearchAddr, so make it the last page as opposed to
// the page after.
p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
return c
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/runtime/sys"
)
// pageBits is a bitmap representing one bit per page in a palloc chunk.
type pageBits [pallocChunkPages / 64]uint64
// get returns the value of the i'th bit in the bitmap.
func (b *pageBits) get(i uint) uint {
return uint((b[i/64] >> (i % 64)) & 1)
}
// block64 returns the 64-bit aligned block of bits containing the i'th bit.
func (b *pageBits) block64(i uint) uint64 {
return b[i/64]
}
// set sets bit i of pageBits.
func (b *pageBits) set(i uint) {
b[i/64] |= 1 << (i % 64)
}
// setRange sets bits in the range [i, i+n).
func (b *pageBits) setRange(i, n uint) {
_ = b[i/64]
if n == 1 {
// Fast path for the n == 1 case.
b.set(i)
return
}
// Set bits [i, j].
j := i + n - 1
if i/64 == j/64 {
b[i/64] |= ((uint64(1) << n) - 1) << (i % 64)
return
}
_ = b[j/64]
// Set leading bits.
b[i/64] |= ^uint64(0) << (i % 64)
for k := i/64 + 1; k < j/64; k++ {
b[k] = ^uint64(0)
}
// Set trailing bits.
b[j/64] |= (uint64(1) << (j%64 + 1)) - 1
}
// setAll sets all the bits of b.
func (b *pageBits) setAll() {
for i := range b {
b[i] = ^uint64(0)
}
}
// setBlock64 sets the 64-bit aligned block of bits containing the i'th bit that
// are set in v.
func (b *pageBits) setBlock64(i uint, v uint64) {
b[i/64] |= v
}
// clear clears bit i of pageBits.
func (b *pageBits) clear(i uint) {
b[i/64] &^= 1 << (i % 64)
}
// clearRange clears bits in the range [i, i+n).
func (b *pageBits) clearRange(i, n uint) {
_ = b[i/64]
if n == 1 {
// Fast path for the n == 1 case.
b.clear(i)
return
}
// Clear bits [i, j].
j := i + n - 1
if i/64 == j/64 {
b[i/64] &^= ((uint64(1) << n) - 1) << (i % 64)
return
}
_ = b[j/64]
// Clear leading bits.
b[i/64] &^= ^uint64(0) << (i % 64)
clear(b[i/64+1 : j/64])
// Clear trailing bits.
b[j/64] &^= (uint64(1) << (j%64 + 1)) - 1
}
// clearAll frees all the bits of b.
func (b *pageBits) clearAll() {
clear(b[:])
}
// clearBlock64 clears the 64-bit aligned block of bits containing the i'th bit that
// are set in v.
func (b *pageBits) clearBlock64(i uint, v uint64) {
b[i/64] &^= v
}
// popcntRange counts the number of set bits in the
// range [i, i+n).
func (b *pageBits) popcntRange(i, n uint) (s uint) {
if n == 1 {
return uint((b[i/64] >> (i % 64)) & 1)
}
_ = b[i/64]
j := i + n - 1
if i/64 == j/64 {
return uint(sys.OnesCount64((b[i/64] >> (i % 64)) & ((1 << n) - 1)))
}
_ = b[j/64]
s += uint(sys.OnesCount64(b[i/64] >> (i % 64)))
for k := i/64 + 1; k < j/64; k++ {
s += uint(sys.OnesCount64(b[k]))
}
s += uint(sys.OnesCount64(b[j/64] & ((1 << (j%64 + 1)) - 1)))
return
}
// pallocBits is a bitmap that tracks page allocations for at most one
// palloc chunk.
//
// The precise representation is an implementation detail, but for the
// sake of documentation, 0s are free pages and 1s are allocated pages.
type pallocBits pageBits
// summarize returns a packed summary of the bitmap in pallocBits.
func (b *pallocBits) summarize() pallocSum {
var start, most, cur uint
const notSetYet = ^uint(0) // sentinel for start value
start = notSetYet
for i := 0; i < len(b); i++ {
x := b[i]
if x == 0 {
cur += 64
continue
}
t := uint(sys.TrailingZeros64(x))
l := uint(sys.LeadingZeros64(x))
// Finish any region spanning the uint64s
cur += t
if start == notSetYet {
start = cur
}
most = max(most, cur)
// Final region that might span to next uint64
cur = l
}
if start == notSetYet {
// Made it all the way through without finding a single 1 bit.
const n = uint(64 * len(b))
return packPallocSum(n, n, n)
}
most = max(most, cur)
if most >= 64-2 {
// There is no way an internal run of zeros could beat max.
return packPallocSum(start, most, cur)
}
// Now look inside each uint64 for runs of zeros.
// All uint64s must be nonzero, or we would have aborted above.
outer:
for i := 0; i < len(b); i++ {
x := b[i]
// Look inside this uint64. We have a pattern like
// 000000 1xxxxx1 000000
// We need to look inside the 1xxxxx1 for any contiguous
// region of zeros.
// We already know the trailing zeros are no larger than max. Remove them.
x >>= sys.TrailingZeros64(x) & 63
if x&(x+1) == 0 { // no more zeros (except at the top).
continue
}
// Strategy: shrink all runs of zeros by max. If any runs of zero
// remain, then we've identified a larger maximum zero run.
p := most // number of zeros we still need to shrink by.
k := uint(1) // current minimum length of runs of ones in x.
for {
// Shrink all runs of zeros by p places (except the top zeros).
for p > 0 {
if p <= k {
// Shift p ones down into the top of each run of zeros.
x |= x >> (p & 63)
if x&(x+1) == 0 { // no more zeros (except at the top).
continue outer
}
break
}
// Shift k ones down into the top of each run of zeros.
x |= x >> (k & 63)
if x&(x+1) == 0 { // no more zeros (except at the top).
continue outer
}
p -= k
// We've just doubled the minimum length of 1-runs.
// This allows us to shift farther in the next iteration.
k *= 2
}
// The length of the lowest-order zero run is an increment to our maximum.
j := uint(sys.TrailingZeros64(^x)) // count contiguous trailing ones
x >>= j & 63 // remove trailing ones
j = uint(sys.TrailingZeros64(x)) // count contiguous trailing zeros
x >>= j & 63 // remove zeros
most += j // we have a new maximum!
if x&(x+1) == 0 { // no more zeros (except at the top).
continue outer
}
p = j // remove j more zeros from each zero run.
}
}
return packPallocSum(start, most, cur)
}
// find searches for npages contiguous free pages in pallocBits and returns
// the index where that run starts, as well as the index of the first free page
// it found in the search. searchIdx represents the first known free page and
// where to begin the next search from.
//
// If find fails to find any free space, it returns an index of ^uint(0) and
// the new searchIdx should be ignored.
//
// Note that if npages == 1, the two returned values will always be identical.
func (b *pallocBits) find(npages uintptr, searchIdx uint) (uint, uint) {
if npages == 1 {
addr := b.find1(searchIdx)
return addr, addr
} else if npages <= 64 {
return b.findSmallN(npages, searchIdx)
}
return b.findLargeN(npages, searchIdx)
}
// find1 is a helper for find which searches for a single free page
// in the pallocBits and returns the index.
//
// See find for an explanation of the searchIdx parameter.
func (b *pallocBits) find1(searchIdx uint) uint {
_ = b[0] // lift nil check out of loop
for i := searchIdx / 64; i < uint(len(b)); i++ {
x := b[i]
if ^x == 0 {
continue
}
return i*64 + uint(sys.TrailingZeros64(^x))
}
return ^uint(0)
}
// findSmallN is a helper for find which searches for npages contiguous free pages
// in this pallocBits and returns the index where that run of contiguous pages
// starts as well as the index of the first free page it finds in its search.
//
// See find for an explanation of the searchIdx parameter.
//
// Returns a ^uint(0) index on failure and the new searchIdx should be ignored.
//
// findSmallN assumes npages <= 64, where any such contiguous run of pages
// crosses at most one aligned 64-bit boundary in the bits.
func (b *pallocBits) findSmallN(npages uintptr, searchIdx uint) (uint, uint) {
end, newSearchIdx := uint(0), ^uint(0)
for i := searchIdx / 64; i < uint(len(b)); i++ {
bi := b[i]
if ^bi == 0 {
end = 0
continue
}
// First see if we can pack our allocation in the trailing
// zeros plus the end of the last 64 bits.
if newSearchIdx == ^uint(0) {
// The new searchIdx is going to be at these 64 bits after any
// 1s we file, so count trailing 1s.
newSearchIdx = i*64 + uint(sys.TrailingZeros64(^bi))
}
start := uint(sys.TrailingZeros64(bi))
if end+start >= uint(npages) {
return i*64 - end, newSearchIdx
}
// Next, check the interior of the 64-bit chunk.
j := findBitRange64(^bi, uint(npages))
if j < 64 {
return i*64 + j, newSearchIdx
}
end = uint(sys.LeadingZeros64(bi))
}
return ^uint(0), newSearchIdx
}
// findLargeN is a helper for find which searches for npages contiguous free pages
// in this pallocBits and returns the index where that run starts, as well as the
// index of the first free page it found it its search.
//
// See alloc for an explanation of the searchIdx parameter.
//
// Returns a ^uint(0) index on failure and the new searchIdx should be ignored.
//
// findLargeN assumes npages > 64, where any such run of free pages
// crosses at least one aligned 64-bit boundary in the bits.
func (b *pallocBits) findLargeN(npages uintptr, searchIdx uint) (uint, uint) {
start, size, newSearchIdx := ^uint(0), uint(0), ^uint(0)
for i := searchIdx / 64; i < uint(len(b)); i++ {
x := b[i]
if x == ^uint64(0) {
size = 0
continue
}
if newSearchIdx == ^uint(0) {
// The new searchIdx is going to be at these 64 bits after any
// 1s we file, so count trailing 1s.
newSearchIdx = i*64 + uint(sys.TrailingZeros64(^x))
}
if size == 0 {
size = uint(sys.LeadingZeros64(x))
start = i*64 + 64 - size
continue
}
s := uint(sys.TrailingZeros64(x))
if s+size >= uint(npages) {
return start, newSearchIdx
}
if s < 64 {
size = uint(sys.LeadingZeros64(x))
start = i*64 + 64 - size
continue
}
size += 64
}
if size < uint(npages) {
return ^uint(0), newSearchIdx
}
return start, newSearchIdx
}
// allocRange allocates the range [i, i+n).
func (b *pallocBits) allocRange(i, n uint) {
(*pageBits)(b).setRange(i, n)
}
// allocAll allocates all the bits of b.
func (b *pallocBits) allocAll() {
(*pageBits)(b).setAll()
}
// free1 frees a single page in the pallocBits at i.
func (b *pallocBits) free1(i uint) {
(*pageBits)(b).clear(i)
}
// free frees the range [i, i+n) of pages in the pallocBits.
func (b *pallocBits) free(i, n uint) {
(*pageBits)(b).clearRange(i, n)
}
// freeAll frees all the bits of b.
func (b *pallocBits) freeAll() {
(*pageBits)(b).clearAll()
}
// pages64 returns a 64-bit bitmap representing a block of 64 pages aligned
// to 64 pages. The returned block of pages is the one containing the i'th
// page in this pallocBits. Each bit represents whether the page is in-use.
func (b *pallocBits) pages64(i uint) uint64 {
return (*pageBits)(b).block64(i)
}
// allocPages64 allocates a 64-bit block of 64 pages aligned to 64 pages according
// to the bits set in alloc. The block set is the one containing the i'th page.
func (b *pallocBits) allocPages64(i uint, alloc uint64) {
(*pageBits)(b).setBlock64(i, alloc)
}
// findBitRange64 returns the bit index of the first set of
// n consecutive 1 bits. If no consecutive set of 1 bits of
// size n may be found in c, then it returns an integer >= 64.
// n must be > 0.
func findBitRange64(c uint64, n uint) uint {
// This implementation is based on shrinking the length of
// runs of contiguous 1 bits. We remove the top n-1 1 bits
// from each run of 1s, then look for the first remaining 1 bit.
p := n - 1 // number of 1s we want to remove.
k := uint(1) // current minimum width of runs of 0 in c.
for p > 0 {
if p <= k {
// Shift p 0s down into the top of each run of 1s.
c &= c >> (p & 63)
break
}
// Shift k 0s down into the top of each run of 1s.
c &= c >> (k & 63)
if c == 0 {
return 64
}
p -= k
// We've just doubled the minimum length of 0-runs.
// This allows us to shift farther in the next iteration.
k *= 2
}
// Find first remaining 1.
// Since we shrunk from the top down, the first 1 is in
// its correct original position.
return uint(sys.TrailingZeros64(c))
}
// pallocData encapsulates pallocBits and a bitmap for
// whether or not a given page is scavenged in a single
// structure. It's effectively a pallocBits with
// additional functionality.
//
// Update the comment on (*pageAlloc).chunks should this
// structure change.
type pallocData struct {
pallocBits
scavenged pageBits
}
// allocRange sets bits [i, i+n) in the bitmap to 1 and
// updates the scavenged bits appropriately.
func (m *pallocData) allocRange(i, n uint) {
// Clear the scavenged bits when we alloc the range.
m.pallocBits.allocRange(i, n)
m.scavenged.clearRange(i, n)
}
// allocAll sets every bit in the bitmap to 1 and updates
// the scavenged bits appropriately.
func (m *pallocData) allocAll() {
// Clear the scavenged bits when we alloc the range.
m.pallocBits.allocAll()
m.scavenged.clearAll()
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Malloc profiling.
// Patterned after tcmalloc's algorithms; shorter code.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/profilerecord"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
// NOTE(rsc): Everything here could use cas if contention became an issue.
var (
// profInsertLock protects changes to the start of all *bucket linked lists
profInsertLock mutex
// profBlockLock protects the contents of every blockRecord struct
profBlockLock mutex
// profMemActiveLock protects the active field of every memRecord struct
profMemActiveLock mutex
// profMemFutureLock is a set of locks that protect the respective elements
// of the future array of every memRecord struct
profMemFutureLock [len(memRecord{}.future)]mutex
)
// All memory allocations are local and do not escape outside of the profiler.
// The profiler is forbidden from referring to garbage-collected memory.
const (
// profile types
memProfile bucketType = 1 + iota
blockProfile
mutexProfile
// size of bucket hash table
buckHashSize = 179999
// maxSkip is to account for deferred inline expansion
// when using frame pointer unwinding. We record the stack
// with "physical" frame pointers but handle skipping "logical"
// frames at some point after collecting the stack. So
// we need extra space in order to avoid getting fewer than the
// desired maximum number of frames after expansion.
// This should be at least as large as the largest skip value
// used for profiling; otherwise stacks may be truncated inconsistently
maxSkip = 6
// maxProfStackDepth is the highest valid value for debug.profstackdepth.
// It's used for the bucket.stk func.
// TODO(fg): can we get rid of this?
maxProfStackDepth = 1024
)
type bucketType int
// A bucket holds per-call-stack profiling information.
// The representation is a bit sleazy, inherited from C.
// This struct defines the bucket header. It is followed in
// memory by the stack words and then the actual record
// data, either a memRecord or a blockRecord.
//
// Per-call-stack profiling information.
// Lookup by hashing call stack into a linked-list hash table.
//
// None of the fields in this bucket header are modified after
// creation, including its next and allnext links.
//
// No heap pointers.
type bucket struct {
_ sys.NotInHeap
next *bucket
allnext *bucket
typ bucketType // memBucket or blockBucket (includes mutexProfile)
hash uintptr
size uintptr
nstk uintptr
}
// A memRecord is the bucket data for a bucket of type memProfile,
// part of the memory profile.
type memRecord struct {
// The following complex 3-stage scheme of stats accumulation
// is required to obtain a consistent picture of mallocs and frees
// for some point in time.
// The problem is that mallocs come in real time, while frees
// come only after a GC during concurrent sweeping. So if we would
// naively count them, we would get a skew toward mallocs.
//
// Hence, we delay information to get consistent snapshots as
// of mark termination. Allocations count toward the next mark
// termination's snapshot, while sweep frees count toward the
// previous mark termination's snapshot:
//
// MT MT MT MT
// .·| .·| .·| .·|
// .·˙ | .·˙ | .·˙ | .·˙ |
// .·˙ | .·˙ | .·˙ | .·˙ |
// .·˙ |.·˙ |.·˙ |.·˙ |
//
// alloc → ▲ ← free
// ┠┅┅┅┅┅┅┅┅┅┅┅P
// C+2 → C+1 → C
//
// alloc → ▲ ← free
// ┠┅┅┅┅┅┅┅┅┅┅┅P
// C+2 → C+1 → C
//
// Since we can't publish a consistent snapshot until all of
// the sweep frees are accounted for, we wait until the next
// mark termination ("MT" above) to publish the previous mark
// termination's snapshot ("P" above). To do this, allocation
// and free events are accounted to *future* heap profile
// cycles ("C+n" above) and we only publish a cycle once all
// of the events from that cycle must be done. Specifically:
//
// Mallocs are accounted to cycle C+2.
// Explicit frees are accounted to cycle C+2.
// GC frees (done during sweeping) are accounted to cycle C+1.
//
// After mark termination, we increment the global heap
// profile cycle counter and accumulate the stats from cycle C
// into the active profile.
// active is the currently published profile. A profiling
// cycle can be accumulated into active once its complete.
active memRecordCycle
// future records the profile events we're counting for cycles
// that have not yet been published. This is ring buffer
// indexed by the global heap profile cycle C and stores
// cycles C, C+1, and C+2. Unlike active, these counts are
// only for a single cycle; they are not cumulative across
// cycles.
//
// We store cycle C here because there's a window between when
// C becomes the active cycle and when we've flushed it to
// active.
future [3]memRecordCycle
}
// memRecordCycle
type memRecordCycle struct {
allocs, frees uintptr
}
// add accumulates b into a. It does not zero b.
func (a *memRecordCycle) add(b *memRecordCycle) {
a.allocs += b.allocs
a.frees += b.frees
}
// A blockRecord is the bucket data for a bucket of type blockProfile,
// which is used in blocking and mutex profiles.
type blockRecord struct {
count float64
cycles int64
}
var (
mbuckets atomic.UnsafePointer // *bucket, memory profile buckets
bbuckets atomic.UnsafePointer // *bucket, blocking profile buckets
xbuckets atomic.UnsafePointer // *bucket, mutex profile buckets
buckhash atomic.UnsafePointer // *buckhashArray
mProfCycle mProfCycleHolder
)
type buckhashArray [buckHashSize]atomic.UnsafePointer // *bucket
const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
// mProfCycleHolder holds the global heap profile cycle number (wrapped at
// mProfCycleWrap, stored starting at bit 1), and a flag (stored at bit 0) to
// indicate whether future[cycle] in all buckets has been queued to flush into
// the active profile.
type mProfCycleHolder struct {
value atomic.Uint32
}
// read returns the current cycle count.
func (c *mProfCycleHolder) read() (cycle uint32) {
v := c.value.Load()
cycle = v >> 1
return cycle
}
// setFlushed sets the flushed flag. It returns the current cycle count and the
// previous value of the flushed flag.
func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool) {
for {
prev := c.value.Load()
cycle = prev >> 1
alreadyFlushed = (prev & 0x1) != 0
next := prev | 0x1
if c.value.CompareAndSwap(prev, next) {
return cycle, alreadyFlushed
}
}
}
// increment increases the cycle count by one, wrapping the value at
// mProfCycleWrap. It clears the flushed flag.
func (c *mProfCycleHolder) increment() {
// We explicitly wrap mProfCycle rather than depending on
// uint wraparound because the memRecord.future ring does not
// itself wrap at a power of two.
for {
prev := c.value.Load()
cycle := prev >> 1
cycle = (cycle + 1) % mProfCycleWrap
next := cycle << 1
if c.value.CompareAndSwap(prev, next) {
break
}
}
}
// newBucket allocates a bucket with the given type and number of stack entries.
func newBucket(typ bucketType, nstk int) *bucket {
size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
switch typ {
default:
throw("invalid profile bucket type")
case memProfile:
size += unsafe.Sizeof(memRecord{})
case blockProfile, mutexProfile:
size += unsafe.Sizeof(blockRecord{})
}
b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
b.typ = typ
b.nstk = uintptr(nstk)
return b
}
// stk returns the slice in b holding the stack. The caller can assume that the
// backing array is immutable.
func (b *bucket) stk() []uintptr {
stk := (*[maxProfStackDepth]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
if b.nstk > maxProfStackDepth {
// prove that slicing works; otherwise a failure requires a P
throw("bad profile stack count")
}
return stk[:b.nstk:b.nstk]
}
// mp returns the memRecord associated with the memProfile bucket b.
func (b *bucket) mp() *memRecord {
if b.typ != memProfile {
throw("bad use of bucket.mp")
}
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
return (*memRecord)(data)
}
// bp returns the blockRecord associated with the blockProfile bucket b.
func (b *bucket) bp() *blockRecord {
if b.typ != blockProfile && b.typ != mutexProfile {
throw("bad use of bucket.bp")
}
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
return (*blockRecord)(data)
}
// Return the bucket for stk[0:nstk], allocating new bucket if needed.
func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
bh := (*buckhashArray)(buckhash.Load())
if bh == nil {
lock(&profInsertLock)
// check again under the lock
bh = (*buckhashArray)(buckhash.Load())
if bh == nil {
bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys, "profiler hash buckets"))
if bh == nil {
throw("runtime: cannot allocate memory")
}
buckhash.StoreNoWB(unsafe.Pointer(bh))
}
unlock(&profInsertLock)
}
// Hash stack.
var h uintptr
for _, pc := range stk {
h += pc
h += h << 10
h ^= h >> 6
}
// hash in size
h += size
h += h << 10
h ^= h >> 6
// finalize
h += h << 3
h ^= h >> 11
i := int(h % buckHashSize)
// first check optimistically, without the lock
for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
return b
}
}
if !alloc {
return nil
}
lock(&profInsertLock)
// check again under the insertion lock
for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
unlock(&profInsertLock)
return b
}
}
// Create new bucket.
b := newBucket(typ, len(stk))
copy(b.stk(), stk)
b.hash = h
b.size = size
var allnext *atomic.UnsafePointer
if typ == memProfile {
allnext = &mbuckets
} else if typ == mutexProfile {
allnext = &xbuckets
} else {
allnext = &bbuckets
}
b.next = (*bucket)(bh[i].Load())
b.allnext = (*bucket)(allnext.Load())
bh[i].StoreNoWB(unsafe.Pointer(b))
allnext.StoreNoWB(unsafe.Pointer(b))
unlock(&profInsertLock)
return b
}
func eqslice(x, y []uintptr) bool {
if len(x) != len(y) {
return false
}
for i, xi := range x {
if xi != y[i] {
return false
}
}
return true
}
// mProf_NextCycle publishes the next heap profile cycle and creates a
// fresh heap profile cycle. This operation is fast and can be done
// during STW. The caller must call mProf_Flush before calling
// mProf_NextCycle again.
//
// This is called by mark termination during STW so allocations and
// frees after the world is started again count towards a new heap
// profiling cycle.
func mProf_NextCycle() {
mProfCycle.increment()
}
// mProf_Flush flushes the events from the current heap profiling
// cycle into the active profile. After this it is safe to start a new
// heap profiling cycle with mProf_NextCycle.
//
// This is called by GC after mark termination starts the world. In
// contrast with mProf_NextCycle, this is somewhat expensive, but safe
// to do concurrently.
func mProf_Flush() {
cycle, alreadyFlushed := mProfCycle.setFlushed()
if alreadyFlushed {
return
}
index := cycle % uint32(len(memRecord{}.future))
lock(&profMemActiveLock)
lock(&profMemFutureLock[index])
mProf_FlushLocked(index)
unlock(&profMemFutureLock[index])
unlock(&profMemActiveLock)
}
// mProf_FlushLocked flushes the events from the heap profiling cycle at index
// into the active profile. The caller must hold the lock for the active profile
// (profMemActiveLock) and for the profiling cycle at index
// (profMemFutureLock[index]).
func mProf_FlushLocked(index uint32) {
assertLockHeld(&profMemActiveLock)
assertLockHeld(&profMemFutureLock[index])
head := (*bucket)(mbuckets.Load())
for b := head; b != nil; b = b.allnext {
mp := b.mp()
// Flush cycle C into the published profile and clear
// it for reuse.
mpc := &mp.future[index]
mp.active.add(mpc)
*mpc = memRecordCycle{}
}
}
// mProf_PostSweep records that all sweep frees for this GC cycle have
// completed. This has the effect of publishing the heap profile
// snapshot as of the last mark termination without advancing the heap
// profile cycle.
func mProf_PostSweep() {
// Flush cycle C+1 to the active profile so everything as of
// the last mark termination becomes visible. *Don't* advance
// the cycle, since we're still accumulating allocs in cycle
// C+2, which have to become C+1 in the next mark termination
// and so on.
cycle := mProfCycle.read() + 1
index := cycle % uint32(len(memRecord{}.future))
lock(&profMemActiveLock)
lock(&profMemFutureLock[index])
mProf_FlushLocked(index)
unlock(&profMemFutureLock[index])
unlock(&profMemActiveLock)
}
// Called by malloc to record a profiled block.
func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) {
if mp.profStack == nil {
// mp.profStack is nil if we happen to sample an allocation during the
// initialization of mp. This case is rare, so we just ignore such
// allocations. Change MemProfileRate to 1 if you need to reproduce such
// cases for testing purposes.
return
}
// Only use the part of mp.profStack we need and ignore the extra space
// reserved for delayed inline expansion with frame pointer unwinding.
nstk := callers(3, mp.profStack[:debug.profstackdepth+2])
index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
b := stkbucket(memProfile, size, mp.profStack[:nstk], true)
mr := b.mp()
mpc := &mr.future[index]
lock(&profMemFutureLock[index])
mpc.allocs++
unlock(&profMemFutureLock[index])
// Setprofilebucket locks a bunch of other mutexes, so we call it outside of
// the profiler locks. This reduces potential contention and chances of
// deadlocks. Since the object must be alive during the call to
// mProf_Malloc, it's fine to do this non-atomically.
systemstack(func() {
setprofilebucket(p, b)
})
}
// Called when freeing a profiled block.
func mProf_Free(b *bucket) {
index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future))
mp := b.mp()
mpc := &mp.future[index]
lock(&profMemFutureLock[index])
mpc.frees++
unlock(&profMemFutureLock[index])
}
var blockprofilerate uint64 // in CPU ticks
// SetBlockProfileRate controls the fraction of goroutine blocking events
// that are reported in the blocking profile. The profiler aims to sample
// an average of one blocking event per rate nanoseconds spent blocked.
//
// To include every blocking event in the profile, pass rate = 1.
// To turn off profiling entirely, pass rate <= 0.
func SetBlockProfileRate(rate int) {
var r int64
if rate <= 0 {
r = 0 // disable profiling
} else if rate == 1 {
r = 1 // profile everything
} else {
// convert ns to cycles, use float64 to prevent overflow during multiplication
r = int64(float64(rate) * float64(ticksPerSecond()) / (1000 * 1000 * 1000))
if r == 0 {
r = 1
}
}
atomic.Store64(&blockprofilerate, uint64(r))
}
func blockevent(cycles int64, skip int) {
if cycles <= 0 {
cycles = 1
}
rate := int64(atomic.Load64(&blockprofilerate))
if blocksampled(cycles, rate) {
saveblockevent(cycles, rate, skip+1, blockProfile)
}
}
// blocksampled returns true for all events where cycles >= rate. Shorter
// events have a cycles/rate random chance of returning true.
func blocksampled(cycles, rate int64) bool {
if rate <= 0 || (rate > cycles && cheaprand64()%rate > cycles) {
return false
}
return true
}
// saveblockevent records a profile event of the type specified by which.
// cycles is the quantity associated with this event and rate is the sampling rate,
// used to adjust the cycles value in the manner determined by the profile type.
// skip is the number of frames to omit from the traceback associated with the event.
// The traceback will be recorded from the stack of the goroutine associated with the current m.
// skip should be positive if this event is recorded from the current stack
// (e.g. when this is not called from a system stack)
func saveblockevent(cycles, rate int64, skip int, which bucketType) {
if debug.profstackdepth == 0 {
// profstackdepth is set to 0 by the user, so mp.profStack is nil and we
// can't record a stack trace.
return
}
if skip > maxSkip {
print("requested skip=", skip)
throw("invalid skip value")
}
gp := getg()
mp := acquirem() // we must not be preempted while accessing profstack
var nstk int
if tracefpunwindoff() || gp.m.hasCgoOnStack() {
if gp.m.curg == nil || gp.m.curg == gp {
nstk = callers(skip, mp.profStack)
} else {
nstk = gcallers(gp.m.curg, skip, mp.profStack)
}
} else {
if gp.m.curg == nil || gp.m.curg == gp {
if skip > 0 {
// We skip one fewer frame than the provided value for frame
// pointer unwinding because the skip value includes the current
// frame, whereas the saved frame pointer will give us the
// caller's return address first (so, not including
// saveblockevent)
skip -= 1
}
nstk = fpTracebackPartialExpand(skip, unsafe.Pointer(getfp()), mp.profStack)
} else {
mp.profStack[0] = gp.m.curg.sched.pc
nstk = 1 + fpTracebackPartialExpand(skip, unsafe.Pointer(gp.m.curg.sched.bp), mp.profStack[1:])
}
}
saveBlockEventStack(cycles, rate, mp.profStack[:nstk], which)
releasem(mp)
}
// fpTracebackPartialExpand records a call stack obtained starting from fp.
// This function will skip the given number of frames, properly accounting for
// inlining, and save remaining frames as "physical" return addresses. The
// consumer should later use CallersFrames or similar to expand inline frames.
func fpTracebackPartialExpand(skip int, fp unsafe.Pointer, pcBuf []uintptr) int {
var n int
lastFuncID := abi.FuncIDNormal
skipOrAdd := func(retPC uintptr) bool {
if skip > 0 {
skip--
} else if n < len(pcBuf) {
pcBuf[n] = retPC
n++
}
return n < len(pcBuf)
}
for n < len(pcBuf) && fp != nil {
// return addr sits one word above the frame pointer
pc := *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
if skip > 0 {
callPC := pc - 1
fi := findfunc(callPC)
u, uf := newInlineUnwinder(fi, callPC)
for ; uf.valid(); uf = u.next(uf) {
sf := u.srcFunc(uf)
if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(lastFuncID) {
// ignore wrappers
} else if more := skipOrAdd(uf.pc + 1); !more {
return n
}
lastFuncID = sf.funcID
}
} else {
// We've skipped the desired number of frames, so no need
// to perform further inline expansion now.
pcBuf[n] = pc
n++
}
// follow the frame pointer to the next one
fp = unsafe.Pointer(*(*uintptr)(fp))
}
return n
}
// mLockProfile holds information about the runtime-internal lock contention
// experienced and caused by this M, to report in metrics and profiles.
//
// These measurements are subject to some notable constraints: First, the fast
// path for lock and unlock must remain very fast, with a minimal critical
// section. Second, the critical section during contention has to remain small
// too, so low levels of contention are less likely to snowball into large ones.
// The reporting code cannot acquire new locks until the M has released all
// other locks, which means no memory allocations and encourages use of
// (temporary) M-local storage.
//
// The M has space for storing one call stack that caused contention, and the
// magnitude of that contention. It also has space to store the magnitude of
// additional contention the M caused, since it might encounter several
// contention events before it releases all of its locks and is thus able to
// transfer the locally buffered call stack and magnitude into the profile.
//
// The M collects the call stack when it unlocks the contended lock. The
// traceback takes place outside of the lock's critical section.
//
// The profile for contention on sync.Mutex blames the caller of Unlock for the
// amount of contention experienced by the callers of Lock which had to wait.
// When there are several critical sections, this allows identifying which of
// them is responsible. We must match that reporting behavior for contention on
// runtime-internal locks.
//
// When the M unlocks its last mutex, it transfers the locally buffered call
// stack and magnitude into the profile. As part of that step, it also transfers
// any "additional contention" time to the profile. Any lock contention that it
// experiences while adding samples to the profile will be recorded later as
// "additional contention" and not include a call stack, to avoid an echo.
type mLockProfile struct {
waitTime atomic.Int64 // (nanotime) total time this M has spent waiting in runtime.lockWithRank. Read by runtime/metrics.
stack []uintptr // call stack at the point of this M's unlock call, when other Ms had to wait
cycles int64 // (cputicks) cycles attributable to "stack"
cyclesLost int64 // (cputicks) contention for which we weren't able to record a call stack
haveStack bool // stack and cycles are to be added to the mutex profile (even if cycles is 0)
disabled bool // attribute all time to "lost"
}
func (prof *mLockProfile) start() int64 {
if cheaprandn(gTrackingPeriod) == 0 {
return nanotime()
}
return 0
}
func (prof *mLockProfile) end(start int64) {
if start != 0 {
prof.waitTime.Add((nanotime() - start) * gTrackingPeriod)
}
}
// recordUnlock prepares data for later addition to the mutex contention
// profile. The M may hold arbitrary locks during this call.
//
// From unlock2, we might not be holding a p in this code.
//
//go:nowritebarrierrec
func (prof *mLockProfile) recordUnlock(cycles int64) {
if cycles < 0 {
cycles = 0
}
if prof.disabled {
// We're experiencing contention while attempting to report contention.
// Make a note of its magnitude, but don't allow it to be the sole cause
// of another contention report.
prof.cyclesLost += cycles
return
}
if prev := prof.cycles; prev > 0 {
// We can only store one call stack for runtime-internal lock contention
// on this M, and we've already got one. Decide which should stay, and
// add the other to the report for runtime._LostContendedRuntimeLock.
if cycles == 0 {
return
}
prevScore := cheaprandu64() % uint64(prev)
thisScore := cheaprandu64() % uint64(cycles)
if prevScore > thisScore {
prof.cyclesLost += cycles
return
} else {
prof.cyclesLost += prev
}
}
prof.captureStack()
prof.cycles = cycles
}
func (prof *mLockProfile) captureStack() {
if debug.profstackdepth == 0 {
// profstackdepth is set to 0 by the user, so mp.profStack is nil and we
// can't record a stack trace.
return
}
skip := 4 // runtime.(*mLockProfile).recordUnlock runtime.unlock2Wake runtime.unlock2 runtime.unlockWithRank
if staticLockRanking {
// When static lock ranking is enabled, we'll always be on the system
// stack at this point. There will be a runtime.unlockWithRank.func1
// frame, and if the call to runtime.unlock took place on a user stack
// then there'll also be a runtime.systemstack frame. To keep stack
// traces somewhat consistent whether or not static lock ranking is
// enabled, we'd like to skip those. But it's hard to tell how long
// we've been on the system stack so accept an extra frame in that case,
// with a leaf of "runtime.unlockWithRank runtime.unlock" instead of
// "runtime.unlock".
skip += 1 // runtime.unlockWithRank.func1
}
prof.haveStack = true
var nstk int
gp := getg()
sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
systemstack(func() {
var u unwinder
u.initAt(pc, sp, 0, gp, unwindSilentErrors|unwindJumpStack)
nstk = tracebackPCs(&u, skip, prof.stack)
})
if nstk < len(prof.stack) {
prof.stack[nstk] = 0
}
}
// store adds the M's local record to the mutex contention profile.
//
// From unlock2, we might not be holding a p in this code.
//
//go:nowritebarrierrec
func (prof *mLockProfile) store() {
if gp := getg(); gp.m.locks == 1 && gp.m.mLockProfile.haveStack {
prof.storeSlow()
}
}
func (prof *mLockProfile) storeSlow() {
// Report any contention we experience within this function as "lost"; it's
// important that the act of reporting a contention event not lead to a
// reportable contention event. This also means we can use prof.stack
// without copying, since it won't change during this function.
mp := acquirem()
prof.disabled = true
nstk := int(debug.profstackdepth)
for i := 0; i < nstk; i++ {
if pc := prof.stack[i]; pc == 0 {
nstk = i
break
}
}
cycles, lost := prof.cycles, prof.cyclesLost
prof.cycles, prof.cyclesLost = 0, 0
prof.haveStack = false
rate := int64(atomic.Load64(&mutexprofilerate))
saveBlockEventStack(cycles, rate, prof.stack[:nstk], mutexProfile)
if lost > 0 {
lostStk := [...]uintptr{
abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum,
}
saveBlockEventStack(lost, rate, lostStk[:], mutexProfile)
}
prof.disabled = false
releasem(mp)
}
func saveBlockEventStack(cycles, rate int64, stk []uintptr, which bucketType) {
b := stkbucket(which, 0, stk, true)
bp := b.bp()
lock(&profBlockLock)
// We want to up-scale the count and cycles according to the
// probability that the event was sampled. For block profile events,
// the sample probability is 1 if cycles >= rate, and cycles / rate
// otherwise. For mutex profile events, the sample probability is 1 / rate.
// We scale the events by 1 / (probability the event was sampled).
if which == blockProfile && cycles < rate {
// Remove sampling bias, see discussion on http://golang.org/cl/299991.
bp.count += float64(rate) / float64(cycles)
bp.cycles += rate
} else if which == mutexProfile {
bp.count += float64(rate)
bp.cycles += rate * cycles
} else {
bp.count++
bp.cycles += cycles
}
unlock(&profBlockLock)
}
var mutexprofilerate uint64 // fraction sampled
// SetMutexProfileFraction controls the fraction of mutex contention events
// that are reported in the mutex profile. On average 1/rate events are
// reported. The previous rate is returned.
//
// To turn off profiling entirely, pass rate 0.
// To just read the current rate, pass rate < 0.
// (For n>1 the details of sampling may change.)
func SetMutexProfileFraction(rate int) int {
if rate < 0 {
return int(mutexprofilerate)
}
old := mutexprofilerate
atomic.Store64(&mutexprofilerate, uint64(rate))
return int(old)
}
func mutexevent(cycles int64, skip int) {
if cycles < 0 {
cycles = 0
}
rate := int64(atomic.Load64(&mutexprofilerate))
if rate > 0 && cheaprand64()%rate == 0 {
saveblockevent(cycles, rate, skip+1, mutexProfile)
}
}
// Go interface to profile data.
// A StackRecord describes a single execution stack.
type StackRecord struct {
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
}
// Stack returns the stack trace associated with the record,
// a prefix of r.Stack0.
func (r *StackRecord) Stack() []uintptr {
for i, v := range r.Stack0 {
if v == 0 {
return r.Stack0[0:i]
}
}
return r.Stack0[0:]
}
// MemProfileRate controls the fraction of memory allocations
// that are recorded and reported in the memory profile.
// The profiler aims to sample an average of
// one allocation per MemProfileRate bytes allocated.
//
// To include every allocated block in the profile, set MemProfileRate to 1.
// To turn off profiling entirely, set MemProfileRate to 0.
//
// The tools that process the memory profiles assume that the
// profile rate is constant across the lifetime of the program
// and equal to the current value. Programs that change the
// memory profiling rate should do so just once, as early as
// possible in the execution of the program (for example,
// at the beginning of main).
var MemProfileRate int = 512 * 1024
// disableMemoryProfiling is set by the linker if memory profiling
// is not used and the link type guarantees nobody else could use it
// elsewhere.
// We check if the runtime.memProfileInternal symbol is present.
var disableMemoryProfiling bool
// A MemProfileRecord describes the live objects allocated
// by a particular call sequence (stack trace).
type MemProfileRecord struct {
AllocBytes, FreeBytes int64 // number of bytes allocated, freed
AllocObjects, FreeObjects int64 // number of objects allocated, freed
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
}
// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
func (r *MemProfileRecord) InUseObjects() int64 {
return r.AllocObjects - r.FreeObjects
}
// Stack returns the stack trace associated with the record,
// a prefix of r.Stack0.
func (r *MemProfileRecord) Stack() []uintptr {
for i, v := range r.Stack0 {
if v == 0 {
return r.Stack0[0:i]
}
}
return r.Stack0[0:]
}
// MemProfile returns a profile of memory allocated and freed per allocation
// site.
//
// MemProfile returns n, the number of records in the current memory profile.
// If len(p) >= n, MemProfile copies the profile into p and returns n, true.
// If len(p) < n, MemProfile does not change p and returns n, false.
//
// If inuseZero is true, the profile includes allocation records
// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
// These are sites where memory was allocated, but it has all
// been released back to the runtime.
//
// The returned profile may be up to two garbage collection cycles old.
// This is to avoid skewing the profile toward allocations; because
// allocations happen in real time but frees are delayed until the garbage
// collector performs sweeping, the profile only accounts for allocations
// that have had a chance to be freed by the garbage collector.
//
// Most clients should use the runtime/pprof package or
// the testing package's -test.memprofile flag instead
// of calling MemProfile directly.
func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
return memProfileInternal(len(p), inuseZero, func(r profilerecord.MemProfileRecord) {
copyMemProfileRecord(&p[0], r)
p = p[1:]
})
}
// memProfileInternal returns the number of records n in the profile. If there
// are less than size records, copyFn is invoked for each record, and ok returns
// true.
//
// The linker set disableMemoryProfiling to true to disable memory profiling
// if this function is not reachable. Mark it noinline to ensure the symbol exists.
// (This function is big and normally not inlined anyway.)
// See also disableMemoryProfiling above and cmd/link/internal/ld/lib.go:linksetup.
//
//go:noinline
func memProfileInternal(size int, inuseZero bool, copyFn func(profilerecord.MemProfileRecord)) (n int, ok bool) {
cycle := mProfCycle.read()
// If we're between mProf_NextCycle and mProf_Flush, take care
// of flushing to the active profile so we only have to look
// at the active profile below.
index := cycle % uint32(len(memRecord{}.future))
lock(&profMemActiveLock)
lock(&profMemFutureLock[index])
mProf_FlushLocked(index)
unlock(&profMemFutureLock[index])
clear := true
head := (*bucket)(mbuckets.Load())
for b := head; b != nil; b = b.allnext {
mp := b.mp()
if inuseZero || mp.active.allocs != mp.active.frees {
n++
}
if mp.active.allocs != 0 || mp.active.frees != 0 {
clear = false
}
}
if clear {
// Absolutely no data, suggesting that a garbage collection
// has not yet happened. In order to allow profiling when
// garbage collection is disabled from the beginning of execution,
// accumulate all of the cycles, and recount buckets.
n = 0
for b := head; b != nil; b = b.allnext {
mp := b.mp()
for c := range mp.future {
lock(&profMemFutureLock[c])
mp.active.add(&mp.future[c])
mp.future[c] = memRecordCycle{}
unlock(&profMemFutureLock[c])
}
if inuseZero || mp.active.allocs != mp.active.frees {
n++
}
}
}
if n <= size {
ok = true
for b := head; b != nil; b = b.allnext {
mp := b.mp()
if inuseZero || mp.active.allocs != mp.active.frees {
r := profilerecord.MemProfileRecord{
ObjectSize: int64(b.size),
AllocObjects: int64(mp.active.allocs),
FreeObjects: int64(mp.active.frees),
Stack: b.stk(),
}
copyFn(r)
}
}
}
unlock(&profMemActiveLock)
return
}
func copyMemProfileRecord(dst *MemProfileRecord, src profilerecord.MemProfileRecord) {
dst.AllocBytes = src.AllocObjects * src.ObjectSize
dst.FreeBytes = src.FreeObjects * src.ObjectSize
dst.AllocObjects = src.AllocObjects
dst.FreeObjects = src.FreeObjects
if raceenabled {
racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), sys.GetCallerPC(), abi.FuncPCABIInternal(MemProfile))
}
if msanenabled {
msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
}
if asanenabled {
asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
}
i := copy(dst.Stack0[:], src.Stack)
clear(dst.Stack0[i:])
}
//go:linkname pprof_memProfileInternal
func pprof_memProfileInternal(p []profilerecord.MemProfileRecord, inuseZero bool) (n int, ok bool) {
return memProfileInternal(len(p), inuseZero, func(r profilerecord.MemProfileRecord) {
p[0] = r
p = p[1:]
})
}
func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
lock(&profMemActiveLock)
head := (*bucket)(mbuckets.Load())
for b := head; b != nil; b = b.allnext {
mp := b.mp()
fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
}
unlock(&profMemActiveLock)
}
// BlockProfileRecord describes blocking events originated
// at a particular call sequence (stack trace).
type BlockProfileRecord struct {
Count int64
Cycles int64
StackRecord
}
// BlockProfile returns n, the number of records in the current blocking profile.
// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
// If len(p) < n, BlockProfile does not change p and returns n, false.
//
// Most clients should use the [runtime/pprof] package or
// the [testing] package's -test.blockprofile flag instead
// of calling BlockProfile directly.
func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
var m int
n, ok = blockProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
copyBlockProfileRecord(&p[m], r)
m++
})
if ok {
expandFrames(p[:n])
}
return
}
func expandFrames(p []BlockProfileRecord) {
expandedStack := makeProfStack()
for i := range p {
cf := CallersFrames(p[i].Stack())
j := 0
for j < len(expandedStack) {
f, more := cf.Next()
// f.PC is a "call PC", but later consumers will expect
// "return PCs"
expandedStack[j] = f.PC + 1
j++
if !more {
break
}
}
k := copy(p[i].Stack0[:], expandedStack[:j])
clear(p[i].Stack0[k:])
}
}
// blockProfileInternal returns the number of records n in the profile. If there
// are less than size records, copyFn is invoked for each record, and ok returns
// true.
func blockProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool) {
lock(&profBlockLock)
head := (*bucket)(bbuckets.Load())
for b := head; b != nil; b = b.allnext {
n++
}
if n <= size {
ok = true
for b := head; b != nil; b = b.allnext {
bp := b.bp()
r := profilerecord.BlockProfileRecord{
Count: int64(bp.count),
Cycles: bp.cycles,
Stack: b.stk(),
}
// Prevent callers from having to worry about division by zero errors.
// See discussion on http://golang.org/cl/299991.
if r.Count == 0 {
r.Count = 1
}
copyFn(r)
}
}
unlock(&profBlockLock)
return
}
// copyBlockProfileRecord copies the sample values and call stack from src to dst.
// The call stack is copied as-is. The caller is responsible for handling inline
// expansion, needed when the call stack was collected with frame pointer unwinding.
func copyBlockProfileRecord(dst *BlockProfileRecord, src profilerecord.BlockProfileRecord) {
dst.Count = src.Count
dst.Cycles = src.Cycles
if raceenabled {
racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), sys.GetCallerPC(), abi.FuncPCABIInternal(BlockProfile))
}
if msanenabled {
msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
}
if asanenabled {
asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
}
// We just copy the stack here without inline expansion
// (needed if frame pointer unwinding is used)
// since this function is called under the profile lock,
// and doing something that might allocate can violate lock ordering.
i := copy(dst.Stack0[:], src.Stack)
clear(dst.Stack0[i:])
}
//go:linkname pprof_blockProfileInternal
func pprof_blockProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool) {
return blockProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
p[0] = r
p = p[1:]
})
}
// MutexProfile returns n, the number of records in the current mutex profile.
// If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
// Otherwise, MutexProfile does not change p, and returns n, false.
//
// Most clients should use the [runtime/pprof] package
// instead of calling MutexProfile directly.
func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
var m int
n, ok = mutexProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
copyBlockProfileRecord(&p[m], r)
m++
})
if ok {
expandFrames(p[:n])
}
return
}
// mutexProfileInternal returns the number of records n in the profile. If there
// are less than size records, copyFn is invoked for each record, and ok returns
// true.
func mutexProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool) {
lock(&profBlockLock)
head := (*bucket)(xbuckets.Load())
for b := head; b != nil; b = b.allnext {
n++
}
if n <= size {
ok = true
for b := head; b != nil; b = b.allnext {
bp := b.bp()
r := profilerecord.BlockProfileRecord{
Count: int64(bp.count),
Cycles: bp.cycles,
Stack: b.stk(),
}
copyFn(r)
}
}
unlock(&profBlockLock)
return
}
//go:linkname pprof_mutexProfileInternal
func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool) {
return mutexProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
p[0] = r
p = p[1:]
})
}
// ThreadCreateProfile returns n, the number of records in the thread creation profile.
// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
//
// Most clients should use the runtime/pprof package instead
// of calling ThreadCreateProfile directly.
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) {
i := copy(p[0].Stack0[:], r.Stack)
clear(p[0].Stack0[i:])
p = p[1:]
})
}
// threadCreateProfileInternal returns the number of records n in the profile.
// If there are less than size records, copyFn is invoked for each record, and
// ok returns true.
func threadCreateProfileInternal(size int, copyFn func(profilerecord.StackRecord)) (n int, ok bool) {
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
for mp := first; mp != nil; mp = mp.alllink {
n++
}
if n <= size {
ok = true
for mp := first; mp != nil; mp = mp.alllink {
r := profilerecord.StackRecord{Stack: mp.createstack[:]}
copyFn(r)
}
}
return
}
//go:linkname pprof_threadCreateInternal
func pprof_threadCreateInternal(p []profilerecord.StackRecord) (n int, ok bool) {
return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) {
p[0] = r
p = p[1:]
})
}
//go:linkname pprof_goroutineProfileWithLabels
func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
return goroutineProfileWithLabels(p, labels)
}
// labels may be nil. If labels is non-nil, it must have the same length as p.
func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
if labels != nil && len(labels) != len(p) {
labels = nil
}
return goroutineProfileWithLabelsConcurrent(p, labels)
}
//go:linkname pprof_goroutineLeakProfileWithLabels
func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
return goroutineLeakProfileWithLabels(p, labels)
}
// labels may be nil. If labels is non-nil, it must have the same length as p.
func goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
if labels != nil && len(labels) != len(p) {
labels = nil
}
return goroutineLeakProfileWithLabelsConcurrent(p, labels)
}
var goroutineProfile = struct {
sema uint32
active bool
offset atomic.Int64
records []profilerecord.StackRecord
labels []unsafe.Pointer
}{
sema: 1,
}
// goroutineProfileState indicates the status of a goroutine's stack for the
// current in-progress goroutine profile. Goroutines' stacks are initially
// "Absent" from the profile, and end up "Satisfied" by the time the profile is
// complete. While a goroutine's stack is being captured, its
// goroutineProfileState will be "InProgress" and it will not be able to run
// until the capture completes and the state moves to "Satisfied".
//
// Some goroutines (the finalizer goroutine, which at various times can be
// either a "system" or a "user" goroutine, and the goroutine that is
// coordinating the profile, any goroutines created during the profile) move
// directly to the "Satisfied" state.
type goroutineProfileState uint32
const (
goroutineProfileAbsent goroutineProfileState = iota
goroutineProfileInProgress
goroutineProfileSatisfied
)
type goroutineProfileStateHolder atomic.Uint32
func (p *goroutineProfileStateHolder) Load() goroutineProfileState {
return goroutineProfileState((*atomic.Uint32)(p).Load())
}
func (p *goroutineProfileStateHolder) Store(value goroutineProfileState) {
(*atomic.Uint32)(p).Store(uint32(value))
}
func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileState) bool {
return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
}
func goroutineLeakProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
if len(p) == 0 {
// An empty slice is obviously too small. Return a rough
// allocation estimate.
return work.goroutineLeak.count, false
}
pcbuf := makeProfStack() // see saveg() for explanation
// Prepare a profile large enough to store all leaked goroutines.
n = work.goroutineLeak.count
if n > len(p) {
// There's not enough space in p to store the whole profile, so
// we're not allowed to write to p at all and must return n, false.
return n, false
}
// Visit each leaked goroutine and try to record its stack.
var offset int
forEachGRace(func(gp1 *g) {
if readgstatus(gp1)&^_Gscan == _Gleaked {
systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &p[offset], pcbuf) })
if labels != nil {
labels[offset] = gp1.labels
}
offset++
}
})
if raceenabled {
raceacquire(unsafe.Pointer(&labelSync))
}
return n, true
}
func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
if len(p) == 0 {
// An empty slice is obviously too small. Return a rough
// allocation estimate without bothering to STW. As long as
// this is close, then we'll only need to STW once (on the next
// call).
return int(gcount(false)), false
}
semacquire(&goroutineProfile.sema)
ourg := getg()
pcbuf := makeProfStack() // see saveg() for explanation
stw := stopTheWorld(stwGoroutineProfile)
// Using gcount while the world is stopped should give us a consistent view
// of the number of live goroutines, minus the number of goroutines that are
// alive and permanently marked as "system". But to make this count agree
// with what we'd get from isSystemGoroutine, we need special handling for
// goroutines that can vary between user and system to ensure that the count
// doesn't change during the collection. So, check the finalizer goroutine
// and cleanup goroutines in particular.
n = int(gcount(false))
if fingStatus.Load()&fingRunningFinalizer != 0 {
n++
}
n += int(gcCleanups.running.Load())
if n > len(p) {
// There's not enough space in p to store the whole profile, so (per the
// contract of runtime.GoroutineProfile) we're not allowed to write to p
// at all and must return n, false.
startTheWorld(stw)
semrelease(&goroutineProfile.sema)
return n, false
}
// Save current goroutine.
sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
systemstack(func() {
saveg(pc, sp, ourg, &p[0], pcbuf)
})
if labels != nil {
labels[0] = ourg.labels
}
ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
goroutineProfile.offset.Store(1)
// Prepare for all other goroutines to enter the profile. Aside from ourg,
// every goroutine struct in the allgs list has its goroutineProfiled field
// cleared. Any goroutine created from this point on (while
// goroutineProfile.active is set) will start with its goroutineProfiled
// field set to goroutineProfileSatisfied.
goroutineProfile.active = true
goroutineProfile.records = p
goroutineProfile.labels = labels
startTheWorld(stw)
// Visit each goroutine that existed as of the startTheWorld call above.
//
// New goroutines may not be in this list, but we didn't want to know about
// them anyway. If they do appear in this list (via reusing a dead goroutine
// struct, or racing to launch between the world restarting and us getting
// the list), they will already have their goroutineProfiled field set to
// goroutineProfileSatisfied before their state transitions out of _Gdead.
//
// Any goroutine that the scheduler tries to execute concurrently with this
// call will start by adding itself to the profile (before the act of
// executing can cause any changes in its stack).
forEachGRace(func(gp1 *g) {
tryRecordGoroutineProfile(gp1, pcbuf, Gosched)
})
stw = stopTheWorld(stwGoroutineProfileCleanup)
endOffset := goroutineProfile.offset.Swap(0)
goroutineProfile.active = false
goroutineProfile.records = nil
goroutineProfile.labels = nil
startTheWorld(stw)
// Restore the invariant that every goroutine struct in allgs has its
// goroutineProfiled field cleared.
forEachGRace(func(gp1 *g) {
gp1.goroutineProfiled.Store(goroutineProfileAbsent)
})
if raceenabled {
raceacquire(unsafe.Pointer(&labelSync))
}
if n != int(endOffset) {
// It's a big surprise that the number of goroutines changed while we
// were collecting the profile. But probably better to return a
// truncated profile than to crash the whole process.
//
// For instance, needm moves a goroutine out of the _Gdeadextra state and so
// might be able to change the goroutine count without interacting with
// the scheduler. For code like that, the race windows are small and the
// combination of features is uncommon, so it's hard to be (and remain)
// sure we've caught them all.
}
semrelease(&goroutineProfile.sema)
return n, true
}
// tryRecordGoroutineProfileWB asserts that write barriers are allowed and calls
// tryRecordGoroutineProfile.
//
//go:yeswritebarrierrec
func tryRecordGoroutineProfileWB(gp1 *g) {
if getg().m.p.ptr() == nil {
throw("no P available, write barriers are forbidden")
}
tryRecordGoroutineProfile(gp1, nil, osyield)
}
// tryRecordGoroutineProfile ensures that gp1 has the appropriate representation
// in the current goroutine profile: either that it should not be profiled, or
// that a snapshot of its call stack and labels are now in the profile.
func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) {
if status := readgstatus(gp1); status == _Gdead || status == _Gdeadextra {
// Dead goroutines should not appear in the profile. Goroutines that
// start while profile collection is active will get goroutineProfiled
// set to goroutineProfileSatisfied before transitioning out of _Gdead,
// so here we check _Gdead first.
return
}
for {
prev := gp1.goroutineProfiled.Load()
if prev == goroutineProfileSatisfied {
// This goroutine is already in the profile (or is new since the
// start of collection, so shouldn't appear in the profile).
break
}
if prev == goroutineProfileInProgress {
// Something else is adding gp1 to the goroutine profile right now.
// Give that a moment to finish.
yield()
continue
}
// While we have gp1.goroutineProfiled set to
// goroutineProfileInProgress, gp1 may appear _Grunnable but will not
// actually be able to run. Disable preemption for ourselves, to make
// sure we finish profiling gp1 right away instead of leaving it stuck
// in this limbo.
mp := acquirem()
if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) {
doRecordGoroutineProfile(gp1, pcbuf)
gp1.goroutineProfiled.Store(goroutineProfileSatisfied)
}
releasem(mp)
}
}
// doRecordGoroutineProfile writes gp1's call stack and labels to an in-progress
// goroutine profile. Preemption is disabled.
//
// This may be called via tryRecordGoroutineProfile in two ways: by the
// goroutine that is coordinating the goroutine profile (running on its own
// stack), or from the scheduler in preparation to execute gp1 (running on the
// system stack).
func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) {
if isSystemGoroutine(gp1, false) {
// System goroutines should not appear in the profile.
// Check this here and not in tryRecordGoroutineProfile because isSystemGoroutine
// may change on a goroutine while it is executing, so while the scheduler might
// see a system goroutine, goroutineProfileWithLabelsConcurrent might not, and
// this inconsistency could cause invariants to be violated, such as trying to
// record the stack of a running goroutine below. In short, we still want system
// goroutines to participate in the same state machine on gp1.goroutineProfiled as
// everything else, we just don't record the stack in the profile.
return
}
// Double-check that we didn't make a grave mistake. If the G is running then in
// general, we cannot safely read its stack.
//
// However, there is one case where it's OK. There's a small window of time in
// exitsyscall where a goroutine could be in _Grunning as it's exiting a syscall.
// This is OK because goroutine will not exit the syscall until it passes through
// a call to tryRecordGoroutineProfile. (An explicit one on the fast path, an
// implicit one via the scheduler on the slow path.)
//
// This is also why it's safe to check syscallsp here. The syscall path mutates
// syscallsp only after passing through tryRecordGoroutineProfile.
if readgstatus(gp1) == _Grunning && gp1.syscallsp == 0 {
print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
throw("cannot read stack of running goroutine")
}
offset := int(goroutineProfile.offset.Add(1)) - 1
if offset >= len(goroutineProfile.records) {
// Should be impossible, but better to return a truncated profile than
// to crash the entire process at this point. Instead, deal with it in
// goroutineProfileWithLabelsConcurrent where we have more context.
return
}
// saveg calls gentraceback, which may call cgo traceback functions. When
// called from the scheduler, this is on the system stack already so
// traceback.go:cgoContextPCs will avoid calling back into the scheduler.
//
// When called from the goroutine coordinating the profile, we still have
// set gp1.goroutineProfiled to goroutineProfileInProgress and so are still
// preventing it from being truly _Grunnable. So we'll use the system stack
// to avoid schedule delays.
systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset], pcbuf) })
if goroutineProfile.labels != nil {
goroutineProfile.labels[offset] = gp1.labels
}
}
func goroutineProfileWithLabelsSync(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
gp := getg()
isOK := func(gp1 *g) bool {
// Checking isSystemGoroutine here makes GoroutineProfile
// consistent with both NumGoroutine and Stack.
if gp1 == gp {
return false
}
if status := readgstatus(gp1); status == _Gdead || status == _Gdeadextra {
return false
}
if isSystemGoroutine(gp1, false) {
return false
}
return true
}
pcbuf := makeProfStack() // see saveg() for explanation
stw := stopTheWorld(stwGoroutineProfile)
// World is stopped, no locking required.
n = 1
forEachGRace(func(gp1 *g) {
if isOK(gp1) {
n++
}
})
if n <= len(p) {
ok = true
r, lbl := p, labels
// Save current goroutine.
sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
systemstack(func() {
saveg(pc, sp, gp, &r[0], pcbuf)
})
r = r[1:]
// If we have a place to put our goroutine labelmap, insert it there.
if labels != nil {
lbl[0] = gp.labels
lbl = lbl[1:]
}
// Save other goroutines.
forEachGRace(func(gp1 *g) {
if !isOK(gp1) {
return
}
if len(r) == 0 {
// Should be impossible, but better to return a
// truncated profile than to crash the entire process.
return
}
// saveg calls gentraceback, which may call cgo traceback functions.
// The world is stopped, so it cannot use cgocall (which will be
// blocked at exitsyscall). Do it on the system stack so it won't
// call into the schedular (see traceback.go:cgoContextPCs).
systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0], pcbuf) })
if labels != nil {
lbl[0] = gp1.labels
lbl = lbl[1:]
}
r = r[1:]
})
}
if raceenabled {
raceacquire(unsafe.Pointer(&labelSync))
}
startTheWorld(stw)
return n, ok
}
// GoroutineProfile returns n, the number of records in the active goroutine stack profile.
// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
// If len(p) < n, GoroutineProfile does not change p and returns n, false.
//
// Most clients should use the [runtime/pprof] package instead
// of calling GoroutineProfile directly.
func GoroutineProfile(p []StackRecord) (n int, ok bool) {
records := make([]profilerecord.StackRecord, len(p))
n, ok = goroutineProfileInternal(records)
if !ok {
return
}
for i, mr := range records[0:n] {
l := copy(p[i].Stack0[:], mr.Stack)
clear(p[i].Stack0[l:])
}
return
}
func goroutineProfileInternal(p []profilerecord.StackRecord) (n int, ok bool) {
return goroutineProfileWithLabels(p, nil)
}
func saveg(pc, sp uintptr, gp *g, r *profilerecord.StackRecord, pcbuf []uintptr) {
// To reduce memory usage, we want to allocate a r.Stack that is just big
// enough to hold gp's stack trace. Naively we might achieve this by
// recording our stack trace into mp.profStack, and then allocating a
// r.Stack of the right size. However, mp.profStack is also used for
// allocation profiling, so it could get overwritten if the slice allocation
// gets profiled. So instead we record the stack trace into a temporary
// pcbuf which is usually given to us by our caller. When it's not, we have
// to allocate one here. This will only happen for goroutines that were in a
// syscall when the goroutine profile started or for goroutines that manage
// to execute before we finish iterating over all the goroutines.
if pcbuf == nil {
pcbuf = makeProfStack()
}
var u unwinder
u.initAt(pc, sp, 0, gp, unwindSilentErrors)
n := tracebackPCs(&u, 0, pcbuf)
r.Stack = make([]uintptr, n)
copy(r.Stack, pcbuf)
}
// Stack formats a stack trace of the calling goroutine into buf
// and returns the number of bytes written to buf.
// If all is true, Stack formats stack traces of all other goroutines
// into buf after the trace for the current goroutine.
func Stack(buf []byte, all bool) int {
var stw worldStop
if all {
stw = stopTheWorld(stwAllGoroutinesStack)
}
n := 0
if len(buf) > 0 {
gp := getg()
sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
systemstack(func() {
g0 := getg()
// Force traceback=1 to override GOTRACEBACK setting,
// so that Stack's results are consistent.
// GOTRACEBACK is only about crash dumps.
g0.m.traceback = 1
g0.writebuf = buf[0:0:len(buf)]
goroutineheader(gp)
traceback(pc, sp, 0, gp)
if all {
tracebackothers(gp)
}
g0.m.traceback = 0
n = len(g0.writebuf)
g0.writebuf = nil
})
}
if all {
startTheWorld(stw)
}
return n
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Address range data structure.
//
// This file contains an implementation of a data structure which
// manages ordered address ranges.
package runtime
import (
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
)
// addrRange represents a region of address space.
//
// An addrRange must never span a gap in the address space.
type addrRange struct {
// base and limit together represent the region of address space
// [base, limit). That is, base is inclusive, limit is exclusive.
// These are address over an offset view of the address space on
// platforms with a segmented address space, that is, on platforms
// where arenaBaseOffset != 0.
base, limit offAddr
}
// makeAddrRange creates a new address range from two virtual addresses.
//
// Throws if the base and limit are not in the same memory segment.
func makeAddrRange(base, limit uintptr) addrRange {
r := addrRange{offAddr{base}, offAddr{limit}}
if (base-arenaBaseOffset >= base) != (limit-arenaBaseOffset >= limit) {
throw("addr range base and limit are not in the same memory segment")
}
return r
}
// size returns the size of the range represented in bytes.
func (a addrRange) size() uintptr {
if !a.base.lessThan(a.limit) {
return 0
}
// Subtraction is safe because limit and base must be in the same
// segment of the address space.
return a.limit.diff(a.base)
}
// contains returns whether or not the range contains a given address.
func (a addrRange) contains(addr uintptr) bool {
return a.base.lessEqual(offAddr{addr}) && (offAddr{addr}).lessThan(a.limit)
}
// subtract takes the addrRange toPrune and cuts out any overlap with
// from, then returns the new range. subtract assumes that a and b
// either don't overlap at all, only overlap on one side, or are equal.
// If b is strictly contained in a, thus forcing a split, it will throw.
func (a addrRange) subtract(b addrRange) addrRange {
if b.base.lessEqual(a.base) && a.limit.lessEqual(b.limit) {
return addrRange{}
} else if a.base.lessThan(b.base) && b.limit.lessThan(a.limit) {
throw("bad prune")
} else if b.limit.lessThan(a.limit) && a.base.lessThan(b.limit) {
a.base = b.limit
} else if a.base.lessThan(b.base) && b.base.lessThan(a.limit) {
a.limit = b.base
}
return a
}
// takeFromFront takes len bytes from the front of the address range, aligning
// the base to align first. On success, returns the aligned start of the region
// taken and true.
func (a *addrRange) takeFromFront(len uintptr, align uint8) (uintptr, bool) {
base := alignUp(a.base.addr(), uintptr(align)) + len
if base > a.limit.addr() {
return 0, false
}
a.base = offAddr{base}
return base - len, true
}
// takeFromBack takes len bytes from the end of the address range, aligning
// the limit to align after subtracting len. On success, returns the aligned
// start of the region taken and true.
func (a *addrRange) takeFromBack(len uintptr, align uint8) (uintptr, bool) {
limit := alignDown(a.limit.addr()-len, uintptr(align))
if a.base.addr() > limit {
return 0, false
}
a.limit = offAddr{limit}
return limit, true
}
// removeGreaterEqual removes all addresses in a greater than or equal
// to addr and returns the new range.
func (a addrRange) removeGreaterEqual(addr uintptr) addrRange {
if (offAddr{addr}).lessEqual(a.base) {
return addrRange{}
}
if a.limit.lessEqual(offAddr{addr}) {
return a
}
return makeAddrRange(a.base.addr(), addr)
}
var (
// minOffAddr is the minimum address in the offset space, and
// it corresponds to the virtual address arenaBaseOffset.
minOffAddr = offAddr{arenaBaseOffset}
// maxOffAddr is the maximum address in the offset address
// space. It corresponds to the highest virtual address representable
// by the page alloc chunk and heap arena maps.
maxOffAddr = offAddr{(((1 << heapAddrBits) - 1) + arenaBaseOffset) & uintptrMask}
)
// offAddr represents an address in a contiguous view
// of the address space on systems where the address space is
// segmented. On other systems, it's just a normal address.
type offAddr struct {
// a is just the virtual address, but should never be used
// directly. Call addr() to get this value instead.
a uintptr
}
// add adds a uintptr offset to the offAddr.
func (l offAddr) add(bytes uintptr) offAddr {
return offAddr{a: l.a + bytes}
}
// sub subtracts a uintptr offset from the offAddr.
func (l offAddr) sub(bytes uintptr) offAddr {
return offAddr{a: l.a - bytes}
}
// diff returns the amount of bytes in between the
// two offAddrs.
func (l1 offAddr) diff(l2 offAddr) uintptr {
return l1.a - l2.a
}
// lessThan returns true if l1 is less than l2 in the offset
// address space.
func (l1 offAddr) lessThan(l2 offAddr) bool {
return (l1.a - arenaBaseOffset) < (l2.a - arenaBaseOffset)
}
// lessEqual returns true if l1 is less than or equal to l2 in
// the offset address space.
func (l1 offAddr) lessEqual(l2 offAddr) bool {
return (l1.a - arenaBaseOffset) <= (l2.a - arenaBaseOffset)
}
// equal returns true if the two offAddr values are equal.
func (l1 offAddr) equal(l2 offAddr) bool {
// No need to compare in the offset space, it
// means the same thing.
return l1 == l2
}
// addr returns the virtual address for this offset address.
func (l offAddr) addr() uintptr {
return l.a
}
// atomicOffAddr is like offAddr, but operations on it are atomic.
// It also contains operations to be able to store marked addresses
// to ensure that they're not overridden until they've been seen.
type atomicOffAddr struct {
// a contains the offset address, unlike offAddr.
a atomic.Int64
}
// Clear attempts to store minOffAddr in atomicOffAddr. It may fail
// if a marked value is placed in the box in the meanwhile.
func (b *atomicOffAddr) Clear() {
for {
old := b.a.Load()
if old < 0 {
return
}
if b.a.CompareAndSwap(old, int64(minOffAddr.addr()-arenaBaseOffset)) {
return
}
}
}
// StoreMin stores addr if it's less than the current value in the
// offset address space if the current value is not marked.
func (b *atomicOffAddr) StoreMin(addr uintptr) {
new := int64(addr - arenaBaseOffset)
for {
old := b.a.Load()
if old < new {
return
}
if b.a.CompareAndSwap(old, new) {
return
}
}
}
// StoreUnmark attempts to unmark the value in atomicOffAddr and
// replace it with newAddr. markedAddr must be a marked address
// returned by Load. This function will not store newAddr if the
// box no longer contains markedAddr.
func (b *atomicOffAddr) StoreUnmark(markedAddr, newAddr uintptr) {
b.a.CompareAndSwap(-int64(markedAddr-arenaBaseOffset), int64(newAddr-arenaBaseOffset))
}
// StoreMarked stores addr but first converted to the offset address
// space and then negated.
func (b *atomicOffAddr) StoreMarked(addr uintptr) {
b.a.Store(-int64(addr - arenaBaseOffset))
}
// Load returns the address in the box as a virtual address. It also
// returns if the value was marked or not.
func (b *atomicOffAddr) Load() (uintptr, bool) {
v := b.a.Load()
wasMarked := false
if v < 0 {
wasMarked = true
v = -v
}
return uintptr(v) + arenaBaseOffset, wasMarked
}
// addrRanges is a data structure holding a collection of ranges of
// address space.
//
// The ranges are coalesced eagerly to reduce the
// number ranges it holds.
//
// The slice backing store for this field is persistentalloc'd
// and thus there is no way to free it.
//
// addrRanges is not thread-safe.
type addrRanges struct {
// ranges is a slice of ranges sorted by base.
ranges []addrRange
// totalBytes is the total amount of address space in bytes counted by
// this addrRanges.
totalBytes uintptr
// sysStat is the stat to track allocations by this type
sysStat *sysMemStat
}
func (a *addrRanges) init(sysStat *sysMemStat) {
ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
ranges.len = 0
ranges.cap = 16
ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat))
a.sysStat = sysStat
a.totalBytes = 0
}
// findSucc returns the first index in a such that addr is
// less than the base of the addrRange at that index.
func (a *addrRanges) findSucc(addr uintptr) int {
base := offAddr{addr}
// Narrow down the search space via a binary search
// for large addrRanges until we have at most iterMax
// candidates left.
const iterMax = 8
bot, top := 0, len(a.ranges)
for top-bot > iterMax {
i := int(uint(bot+top) >> 1)
if a.ranges[i].contains(base.addr()) {
// a.ranges[i] contains base, so
// its successor is the next index.
return i + 1
}
if base.lessThan(a.ranges[i].base) {
// In this case i might actually be
// the successor, but we can't be sure
// until we check the ones before it.
top = i
} else {
// In this case we know base is
// greater than or equal to a.ranges[i].limit-1,
// so i is definitely not the successor.
// We already checked i, so pick the next
// one.
bot = i + 1
}
}
// There are top-bot candidates left, so
// iterate over them and find the first that
// base is strictly less than.
for i := bot; i < top; i++ {
if base.lessThan(a.ranges[i].base) {
return i
}
}
return top
}
// findAddrGreaterEqual returns the smallest address represented by a
// that is >= addr. Thus, if the address is represented by a,
// then it returns addr. The second return value indicates whether
// such an address exists for addr in a. That is, if addr is larger than
// any address known to a, the second return value will be false.
func (a *addrRanges) findAddrGreaterEqual(addr uintptr) (uintptr, bool) {
i := a.findSucc(addr)
if i == 0 {
return a.ranges[0].base.addr(), true
}
if a.ranges[i-1].contains(addr) {
return addr, true
}
if i < len(a.ranges) {
return a.ranges[i].base.addr(), true
}
return 0, false
}
// contains returns true if a covers the address addr.
func (a *addrRanges) contains(addr uintptr) bool {
i := a.findSucc(addr)
if i == 0 {
return false
}
return a.ranges[i-1].contains(addr)
}
// add inserts a new address range to a.
//
// r must not overlap with any address range in a and r.size() must be > 0.
func (a *addrRanges) add(r addrRange) {
// The copies in this function are potentially expensive, but this data
// structure is meant to represent the Go heap. At worst, copying this
// would take ~160µs assuming a conservative copying rate of 25 GiB/s (the
// copy will almost never trigger a page fault) for a 1 TiB heap with 4 MiB
// arenas which is completely discontiguous. ~160µs is still a lot, but in
// practice most platforms have 64 MiB arenas (which cuts this by a factor
// of 16) and Go heaps are usually mostly contiguous, so the chance that
// an addrRanges even grows to that size is extremely low.
// An empty range has no effect on the set of addresses represented
// by a, but passing a zero-sized range is almost always a bug.
if r.size() == 0 {
print("runtime: range = {", hex(r.base.addr()), ", ", hex(r.limit.addr()), "}\n")
throw("attempted to add zero-sized address range")
}
// Because we assume r is not currently represented in a,
// findSucc gives us our insertion index.
i := a.findSucc(r.base.addr())
coalescesDown := i > 0 && a.ranges[i-1].limit.equal(r.base)
coalescesUp := i < len(a.ranges) && r.limit.equal(a.ranges[i].base)
if coalescesUp && coalescesDown {
// We have neighbors and they both border us.
// Merge a.ranges[i-1], r, and a.ranges[i] together into a.ranges[i-1].
a.ranges[i-1].limit = a.ranges[i].limit
// Delete a.ranges[i].
copy(a.ranges[i:], a.ranges[i+1:])
a.ranges = a.ranges[:len(a.ranges)-1]
} else if coalescesDown {
// We have a neighbor at a lower address only and it borders us.
// Merge the new space into a.ranges[i-1].
a.ranges[i-1].limit = r.limit
} else if coalescesUp {
// We have a neighbor at a higher address only and it borders us.
// Merge the new space into a.ranges[i].
a.ranges[i].base = r.base
} else {
// We may or may not have neighbors which don't border us.
// Add the new range.
if len(a.ranges)+1 > cap(a.ranges) {
// Grow the array. Note that this leaks the old array, but since
// we're doubling we have at most 2x waste. For a 1 TiB heap and
// 4 MiB arenas which are all discontiguous (both very conservative
// assumptions), this would waste at most 4 MiB of memory.
oldRanges := a.ranges
ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
ranges.len = len(oldRanges) + 1
ranges.cap = cap(oldRanges) * 2
ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat))
// Copy in the old array, but make space for the new range.
copy(a.ranges[:i], oldRanges[:i])
copy(a.ranges[i+1:], oldRanges[i:])
} else {
a.ranges = a.ranges[:len(a.ranges)+1]
copy(a.ranges[i+1:], a.ranges[i:])
}
a.ranges[i] = r
}
a.totalBytes += r.size()
}
// removeLast removes and returns the highest-addressed contiguous range
// of a, or the last nBytes of that range, whichever is smaller. If a is
// empty, it returns an empty range.
func (a *addrRanges) removeLast(nBytes uintptr) addrRange {
if len(a.ranges) == 0 {
return addrRange{}
}
r := a.ranges[len(a.ranges)-1]
size := r.size()
if size > nBytes {
newEnd := r.limit.sub(nBytes)
a.ranges[len(a.ranges)-1].limit = newEnd
a.totalBytes -= nBytes
return addrRange{newEnd, r.limit}
}
a.ranges = a.ranges[:len(a.ranges)-1]
a.totalBytes -= size
return r
}
// removeGreaterEqual removes the ranges of a which are above addr, and additionally
// splits any range containing addr.
func (a *addrRanges) removeGreaterEqual(addr uintptr) {
pivot := a.findSucc(addr)
if pivot == 0 {
// addr is before all ranges in a.
a.totalBytes = 0
a.ranges = a.ranges[:0]
return
}
removed := uintptr(0)
for _, r := range a.ranges[pivot:] {
removed += r.size()
}
if r := a.ranges[pivot-1]; r.contains(addr) {
removed += r.size()
r = r.removeGreaterEqual(addr)
if r.size() == 0 {
pivot--
} else {
removed -= r.size()
a.ranges[pivot-1] = r
}
}
a.ranges = a.ranges[:pivot]
a.totalBytes -= removed
}
// cloneInto makes a deep clone of a's state into b, re-using
// b's ranges if able.
func (a *addrRanges) cloneInto(b *addrRanges) {
if len(a.ranges) > cap(b.ranges) {
// Grow the array.
ranges := (*notInHeapSlice)(unsafe.Pointer(&b.ranges))
ranges.len = 0
ranges.cap = cap(a.ranges)
ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat))
}
b.ranges = b.ranges[:len(a.ranges)]
b.totalBytes = a.totalBytes
copy(b.ranges, a.ranges)
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !msan
// Dummy MSan support API, used when not built with -msan.
package runtime
import (
"unsafe"
)
const msanenabled = false
// Because msanenabled is false, none of these functions should be called.
func msanread(addr unsafe.Pointer, sz uintptr) { throw("msan") }
func msanwrite(addr unsafe.Pointer, sz uintptr) { throw("msan") }
func msanmalloc(addr unsafe.Pointer, sz uintptr) { throw("msan") }
func msanfree(addr unsafe.Pointer, sz uintptr) { throw("msan") }
func msanmove(dst, src unsafe.Pointer, sz uintptr) { throw("msan") }
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Malloc small size classes.
//
// See malloc.go for overview.
// See also mksizeclasses.go for how we decide what size classes to use.
package runtime
import "internal/runtime/gc"
// Returns size of the memory block that mallocgc will allocate if you ask for the size,
// minus any inline space for metadata.
func roundupsize(size uintptr, noscan bool) (reqSize uintptr) {
reqSize = size
if reqSize <= maxSmallSize-gc.MallocHeaderSize {
// Small object.
if !noscan && reqSize > gc.MinSizeForMallocHeader { // !noscan && !heapBitsInSpan(reqSize)
reqSize += gc.MallocHeaderSize
}
// (reqSize - size) is either mallocHeaderSize or 0. We need to subtract mallocHeaderSize
// from the result if we have one, since mallocgc will add it back in.
if reqSize <= gc.SmallSizeMax-8 {
return uintptr(gc.SizeClassToSize[gc.SizeToSizeClass8[divRoundUp(reqSize, gc.SmallSizeDiv)]]) - (reqSize - size)
}
return uintptr(gc.SizeClassToSize[gc.SizeToSizeClass128[divRoundUp(reqSize-gc.SmallSizeMax, gc.LargeSizeDiv)]]) - (reqSize - size)
}
// Large object. Align reqSize up to the next page. Check for overflow.
reqSize += pageSize - 1
if reqSize < size {
return size
}
return reqSize &^ (pageSize - 1)
}
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/cpu"
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
)
// A spanSet is a set of *mspans.
//
// spanSet is safe for concurrent push and pop operations.
type spanSet struct {
// A spanSet is a two-level data structure consisting of a
// growable spine that points to fixed-sized blocks. The spine
// can be accessed without locks, but adding a block or
// growing it requires taking the spine lock.
//
// Because each mspan covers at least 8K of heap and takes at
// most 8 bytes in the spanSet, the growth of the spine is
// quite limited.
//
// The spine and all blocks are allocated off-heap, which
// allows this to be used in the memory manager and avoids the
// need for write barriers on all of these. spanSetBlocks are
// managed in a pool, though never freed back to the operating
// system. We never release spine memory because there could be
// concurrent lock-free access and we're likely to reuse it
// anyway. (In principle, we could do this during STW.)
spineLock mutex
spine atomicSpanSetSpinePointer // *[N]atomic.Pointer[spanSetBlock]
spineLen atomic.Uintptr // Spine array length
spineCap uintptr // Spine array cap, accessed under spineLock
// index is the head and tail of the spanSet in a single field.
// The head and the tail both represent an index into the logical
// concatenation of all blocks, with the head always behind or
// equal to the tail (indicating an empty set). This field is
// always accessed atomically.
//
// The head and the tail are only 32 bits wide, which means we
// can only support up to 2^32 pushes before a reset. If every
// span in the heap were stored in this set, and each span were
// the minimum size (1 runtime page, 8 KiB), then roughly the
// smallest heap which would be unrepresentable is 32 TiB in size.
index atomicHeadTailIndex
}
const (
spanSetBlockEntries = 512 // 4KB on 64-bit
spanSetInitSpineCap = 256 // Enough for 1GB heap on 64-bit
)
type spanSetBlockHeader struct {
// Free spanSetBlocks are managed via a lock-free stack.
lfnode
// popped is the number of pop operations that have occurred on
// this block. This number is used to help determine when a block
// may be safely recycled.
popped atomic.Uint32
}
type spanSetBlockHeader2 struct {
spanSetBlockHeader
pad [tagAlign - unsafe.Sizeof(spanSetBlockHeader{})]byte
}
type spanSetBlock struct {
spanSetBlockHeader2
// spans is the set of spans in this block.
spans [spanSetBlockEntries]atomicMSpanPointer
}
// push adds span s to buffer b. push is safe to call concurrently
// with other push and pop operations.
func (b *spanSet) push(s *mspan) {
// Obtain our slot.
cursor := uintptr(b.index.incTail().tail() - 1)
top, bottom := cursor/spanSetBlockEntries, cursor%spanSetBlockEntries
// Do we need to add a block?
spineLen := b.spineLen.Load()
var block *spanSetBlock
retry:
if top < spineLen {
block = b.spine.Load().lookup(top).Load()
} else {
// Add a new block to the spine, potentially growing
// the spine.
lock(&b.spineLock)
// spineLen cannot change until we release the lock,
// but may have changed while we were waiting.
spineLen = b.spineLen.Load()
if top < spineLen {
unlock(&b.spineLock)
goto retry
}
spine := b.spine.Load()
if spineLen == b.spineCap {
// Grow the spine.
newCap := b.spineCap * 2
if newCap == 0 {
newCap = spanSetInitSpineCap
}
newSpine := persistentalloc(newCap*goarch.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
if b.spineCap != 0 {
// Blocks are allocated off-heap, so
// no write barriers.
memmove(newSpine, spine.p, b.spineCap*goarch.PtrSize)
}
spine = spanSetSpinePointer{newSpine}
// Spine is allocated off-heap, so no write barrier.
b.spine.StoreNoWB(spine)
b.spineCap = newCap
// We can't immediately free the old spine
// since a concurrent push with a lower index
// could still be reading from it. We let it
// leak because even a 1TB heap would waste
// less than 2MB of memory on old spines. If
// this is a problem, we could free old spines
// during STW.
}
// Allocate a new block from the pool.
block = spanSetBlockPool.alloc()
// Add it to the spine.
// Blocks are allocated off-heap, so no write barrier.
spine.lookup(top).StoreNoWB(block)
b.spineLen.Store(spineLen + 1)
unlock(&b.spineLock)
}
// We have a block. Insert the span atomically, since there may be
// concurrent readers via the block API.
block.spans[bottom].StoreNoWB(s)
}
// pop removes and returns a span from buffer b, or nil if b is empty.
// pop is safe to call concurrently with other pop and push operations.
func (b *spanSet) pop() *mspan {
var head, tail uint32
var backoff uint32
// TODO: tweak backoff parameters on other architectures.
if GOARCH == "arm64" {
backoff = 128
}
claimLoop:
for {
headtail := b.index.load()
head, tail = headtail.split()
if head >= tail {
// The buf is empty, as far as we can tell.
return nil
}
// Check if the head position we want to claim is actually
// backed by a block.
spineLen := b.spineLen.Load()
if spineLen <= uintptr(head)/spanSetBlockEntries {
// We're racing with a spine growth and the allocation of
// a new block (and maybe a new spine!), and trying to grab
// the span at the index which is currently being pushed.
// Instead of spinning, let's just notify the caller that
// there's nothing currently here. Spinning on this is
// almost definitely not worth it.
return nil
}
// Try to claim the current head by CASing in an updated head.
// This may fail transiently due to a push which modifies the
// tail, so keep trying while the head isn't changing.
want := head
for want == head {
if b.index.cas(headtail, makeHeadTailIndex(want+1, tail)) {
break claimLoop
}
// Use a backoff approach to reduce demand to the shared memory location
// decreases memory contention and allows for other threads to make quicker
// progress.
// Read more in this Arm blog post:
// https://community.arm.com/arm-community-blogs/b/architectures-and-processors-blog/posts/multi-threaded-applications-arm
procyield(backoff)
// Increase backoff time.
backoff += backoff / 2
headtail = b.index.load()
head, tail = headtail.split()
}
// We failed to claim the spot we were after and the head changed,
// meaning a popper got ahead of us. Try again from the top because
// the buf may not be empty.
}
top, bottom := head/spanSetBlockEntries, head%spanSetBlockEntries
// We may be reading a stale spine pointer, but because the length
// grows monotonically and we've already verified it, we'll definitely
// be reading from a valid block.
blockp := b.spine.Load().lookup(uintptr(top))
// Given that the spine length is correct, we know we will never
// see a nil block here, since the length is always updated after
// the block is set.
block := blockp.Load()
s := block.spans[bottom].Load()
for s == nil {
// We raced with the span actually being set, but given that we
// know a block for this span exists, the race window here is
// extremely small. Try again.
s = block.spans[bottom].Load()
}
// Clear the pointer. This isn't strictly necessary, but defensively
// avoids accidentally re-using blocks which could lead to memory
// corruption. This way, we'll get a nil pointer access instead.
block.spans[bottom].StoreNoWB(nil)
// Increase the popped count. If we are the last possible popper
// in the block (note that bottom need not equal spanSetBlockEntries-1
// due to races) then it's our responsibility to free the block.
//
// If we increment popped to spanSetBlockEntries, we can be sure that
// we're the last popper for this block, and it's thus safe to free it.
// Every other popper must have crossed this barrier (and thus finished
// popping its corresponding mspan) by the time we get here. Because
// we're the last popper, we also don't have to worry about concurrent
// pushers (there can't be any). Note that we may not be the popper
// which claimed the last slot in the block, we're just the last one
// to finish popping.
if block.popped.Add(1) == spanSetBlockEntries {
// Clear the block's pointer.
blockp.StoreNoWB(nil)
// Return the block to the block pool.
spanSetBlockPool.free(block)
}
return s
}
// reset resets a spanSet which is empty. It will also clean up
// any left over blocks.
//
// Throws if the buf is not empty.
//
// reset may not be called concurrently with any other operations
// on the span set.
func (b *spanSet) reset() {
head, tail := b.index.load().split()
if head < tail {
print("head = ", head, ", tail = ", tail, "\n")
throw("attempt to clear non-empty span set")
}
top := head / spanSetBlockEntries
if uintptr(top) < b.spineLen.Load() {
// If the head catches up to the tail and the set is empty,
// we may not clean up the block containing the head and tail
// since it may be pushed into again. In order to avoid leaking
// memory since we're going to reset the head and tail, clean
// up such a block now, if it exists.
blockp := b.spine.Load().lookup(uintptr(top))
block := blockp.Load()
if block != nil {
// Check the popped value.
if block.popped.Load() == 0 {
// popped should never be zero because that means we have
// pushed at least one value but not yet popped if this
// block pointer is not nil.
throw("span set block with unpopped elements found in reset")
}
if block.popped.Load() == spanSetBlockEntries {
// popped should also never be equal to spanSetBlockEntries
// because the last popper should have made the block pointer
// in this slot nil.
throw("fully empty unfreed span set block found in reset")
}
// Clear the pointer to the block.
blockp.StoreNoWB(nil)
// Return the block to the block pool.
spanSetBlockPool.free(block)
}
}
b.index.reset()
b.spineLen.Store(0)
}
// atomicSpanSetSpinePointer is an atomically-accessed spanSetSpinePointer.
//
// It has the same semantics as atomic.UnsafePointer.
type atomicSpanSetSpinePointer struct {
a atomic.UnsafePointer
}
// Loads the spanSetSpinePointer and returns it.
//
// It has the same semantics as atomic.UnsafePointer.
func (s *atomicSpanSetSpinePointer) Load() spanSetSpinePointer {
return spanSetSpinePointer{s.a.Load()}
}
// Stores the spanSetSpinePointer.
//
// It has the same semantics as [atomic.UnsafePointer].
func (s *atomicSpanSetSpinePointer) StoreNoWB(p spanSetSpinePointer) {
s.a.StoreNoWB(p.p)
}
// spanSetSpinePointer represents a pointer to a contiguous block of atomic.Pointer[spanSetBlock].
type spanSetSpinePointer struct {
p unsafe.Pointer
}
// lookup returns &s[idx].
func (s spanSetSpinePointer) lookup(idx uintptr) *atomic.Pointer[spanSetBlock] {
return (*atomic.Pointer[spanSetBlock])(add(s.p, goarch.PtrSize*idx))
}
// spanSetBlockPool is a global pool of spanSetBlocks.
var spanSetBlockPool spanSetBlockAlloc
// spanSetBlockAlloc represents a concurrent pool of spanSetBlocks.
type spanSetBlockAlloc struct {
stack lfstack
}
// alloc tries to grab a spanSetBlock out of the pool, and if it fails
// persistentallocs a new one and returns it.
func (p *spanSetBlockAlloc) alloc() *spanSetBlock {
if s := (*spanSetBlock)(p.stack.pop()); s != nil {
return s
}
return (*spanSetBlock)(persistentalloc(unsafe.Sizeof(spanSetBlock{}), max(cpu.CacheLineSize, tagAlign), &memstats.gcMiscSys))
}
// free returns a spanSetBlock back to the pool.
func (p *spanSetBlockAlloc) free(block *spanSetBlock) {
block.popped.Store(0)
p.stack.push(&block.lfnode)
}
// headTailIndex represents a combined 32-bit head and 32-bit tail
// of a queue into a single 64-bit value.
type headTailIndex uint64
// makeHeadTailIndex creates a headTailIndex value from a separate
// head and tail.
func makeHeadTailIndex(head, tail uint32) headTailIndex {
return headTailIndex(uint64(head)<<32 | uint64(tail))
}
// head returns the head of a headTailIndex value.
func (h headTailIndex) head() uint32 {
return uint32(h >> 32)
}
// tail returns the tail of a headTailIndex value.
func (h headTailIndex) tail() uint32 {
return uint32(h)
}
// split splits the headTailIndex value into its parts.
func (h headTailIndex) split() (head uint32, tail uint32) {
return h.head(), h.tail()
}
// atomicHeadTailIndex is an atomically-accessed headTailIndex.
type atomicHeadTailIndex struct {
u atomic.Uint64
}
// load atomically reads a headTailIndex value.
func (h *atomicHeadTailIndex) load() headTailIndex {
return headTailIndex(h.u.Load())
}
// cas atomically compares-and-swaps a headTailIndex value.
func (h *atomicHeadTailIndex) cas(old, new headTailIndex) bool {
return h.u.CompareAndSwap(uint64(old), uint64(new))
}
// incHead atomically increments the head of a headTailIndex.
func (h *atomicHeadTailIndex) incHead() headTailIndex {
return headTailIndex(h.u.Add(1 << 32))
}
// decHead atomically decrements the head of a headTailIndex.
func (h *atomicHeadTailIndex) decHead() headTailIndex {
return headTailIndex(h.u.Add(-(1 << 32)))
}
// incTail atomically increments the tail of a headTailIndex.
func (h *atomicHeadTailIndex) incTail() headTailIndex {
ht := headTailIndex(h.u.Add(1))
// Check for overflow.
if ht.tail() == 0 {
print("runtime: head = ", ht.head(), ", tail = ", ht.tail(), "\n")
throw("headTailIndex overflow")
}
return ht
}
// reset clears the headTailIndex to (0, 0).
func (h *atomicHeadTailIndex) reset() {
h.u.Store(0)
}
// atomicMSpanPointer is an atomic.Pointer[mspan]. Can't use generics because it's NotInHeap.
type atomicMSpanPointer struct {
p atomic.UnsafePointer
}
// Load returns the *mspan.
func (p *atomicMSpanPointer) Load() *mspan {
return (*mspan)(p.p.Load())
}
// StoreNoWB stores an *mspan.
func (p *atomicMSpanPointer) StoreNoWB(s *mspan) {
p.p.StoreNoWB(unsafe.Pointer(s))
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Memory statistics
package runtime
import (
"internal/runtime/atomic"
"internal/runtime/gc"
"unsafe"
)
type mstats struct {
// Statistics about malloc heap.
heapStats consistentHeapStats
// Statistics about stacks.
stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
// Statistics about allocation of low-level fixed-size structures.
mspan_sys sysMemStat
mcache_sys sysMemStat
buckhash_sys sysMemStat // profiling bucket hash table
// Statistics about GC overhead.
gcMiscSys sysMemStat // updated atomically or during STW
// Miscellaneous statistics.
other_sys sysMemStat // updated atomically or during STW
// Statistics about the garbage collector.
// Protected by mheap or worldsema during GC.
last_gc_unix uint64 // last gc (in unix time)
pause_total_ns uint64
pause_ns [256]uint64 // circular buffer of recent gc pause lengths
pause_end [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970)
numgc uint32
numforcedgc uint32 // number of user-forced GCs
gc_cpu_fraction float64 // fraction of CPU time used by GC
last_gc_nanotime uint64 // last gc (monotonic time)
lastHeapInUse uint64 // heapInUse at mark termination of the previous GC
lastScanStats [gc.NumSizeClasses]sizeClassScanStats
enablegc bool
}
var memstats mstats
// A MemStats records statistics about the memory allocator.
type MemStats struct {
// General statistics.
// Alloc is bytes of allocated heap objects.
//
// This is the same as HeapAlloc (see below).
Alloc uint64
// TotalAlloc is cumulative bytes allocated for heap objects.
//
// TotalAlloc increases as heap objects are allocated, but
// unlike Alloc and HeapAlloc, it does not decrease when
// objects are freed.
TotalAlloc uint64
// Sys is the total bytes of memory obtained from the OS.
//
// Sys is the sum of the XSys fields below. Sys measures the
// virtual address space reserved by the Go runtime for the
// heap, stacks, and other internal data structures. It's
// likely that not all of the virtual address space is backed
// by physical memory at any given moment, though in general
// it all was at some point.
Sys uint64
// Lookups is the number of pointer lookups performed by the
// runtime.
//
// This is primarily useful for debugging runtime internals.
Lookups uint64
// Mallocs is the cumulative count of heap objects allocated.
// The number of live objects is Mallocs - Frees.
Mallocs uint64
// Frees is the cumulative count of heap objects freed.
Frees uint64
// Heap memory statistics.
//
// Interpreting the heap statistics requires some knowledge of
// how Go organizes memory. Go divides the virtual address
// space of the heap into "spans", which are contiguous
// regions of memory 8K or larger. A span may be in one of
// three states:
//
// An "idle" span contains no objects or other data. The
// physical memory backing an idle span can be released back
// to the OS (but the virtual address space never is), or it
// can be converted into an "in use" or "stack" span.
//
// An "in use" span contains at least one heap object and may
// have free space available to allocate more heap objects.
//
// A "stack" span is used for goroutine stacks. Stack spans
// are not considered part of the heap. A span can change
// between heap and stack memory; it is never used for both
// simultaneously.
// HeapAlloc is bytes of allocated heap objects.
//
// "Allocated" heap objects include all reachable objects, as
// well as unreachable objects that the garbage collector has
// not yet freed. Specifically, HeapAlloc increases as heap
// objects are allocated and decreases as the heap is swept
// and unreachable objects are freed. Sweeping occurs
// incrementally between GC cycles, so these two processes
// occur simultaneously, and as a result HeapAlloc tends to
// change smoothly (in contrast with the sawtooth that is
// typical of stop-the-world garbage collectors).
HeapAlloc uint64
// HeapSys is bytes of heap memory obtained from the OS.
//
// HeapSys measures the amount of virtual address space
// reserved for the heap. This includes virtual address space
// that has been reserved but not yet used, which consumes no
// physical memory, but tends to be small, as well as virtual
// address space for which the physical memory has been
// returned to the OS after it became unused (see HeapReleased
// for a measure of the latter).
//
// HeapSys estimates the largest size the heap has had.
HeapSys uint64
// HeapIdle is bytes in idle (unused) spans.
//
// Idle spans have no objects in them. These spans could be
// (and may already have been) returned to the OS, or they can
// be reused for heap allocations, or they can be reused as
// stack memory.
//
// HeapIdle minus HeapReleased estimates the amount of memory
// that could be returned to the OS, but is being retained by
// the runtime so it can grow the heap without requesting more
// memory from the OS. If this difference is significantly
// larger than the heap size, it indicates there was a recent
// transient spike in live heap size.
HeapIdle uint64
// HeapInuse is bytes in in-use spans.
//
// In-use spans have at least one object in them. These spans
// can only be used for other objects of roughly the same
// size.
//
// HeapInuse minus HeapAlloc estimates the amount of memory
// that has been dedicated to particular size classes, but is
// not currently being used. This is an upper bound on
// fragmentation, but in general this memory can be reused
// efficiently.
HeapInuse uint64
// HeapReleased is bytes of physical memory returned to the OS.
//
// This counts heap memory from idle spans that was returned
// to the OS and has not yet been reacquired for the heap.
HeapReleased uint64
// HeapObjects is the number of allocated heap objects.
//
// Like HeapAlloc, this increases as objects are allocated and
// decreases as the heap is swept and unreachable objects are
// freed.
HeapObjects uint64
// Stack memory statistics.
//
// Stacks are not considered part of the heap, but the runtime
// can reuse a span of heap memory for stack memory, and
// vice-versa.
// StackInuse is bytes in stack spans.
//
// In-use stack spans have at least one stack in them. These
// spans can only be used for other stacks of the same size.
//
// There is no StackIdle because unused stack spans are
// returned to the heap (and hence counted toward HeapIdle).
StackInuse uint64
// StackSys is bytes of stack memory obtained from the OS.
//
// StackSys is StackInuse, plus any memory obtained directly
// from the OS for OS thread stacks.
//
// In non-cgo programs this metric is currently equal to StackInuse
// (but this should not be relied upon, and the value may change in
// the future).
//
// In cgo programs this metric includes OS thread stacks allocated
// directly from the OS. Currently, this only accounts for one stack in
// c-shared and c-archive build modes and other sources of stacks from
// the OS (notably, any allocated by C code) are not currently measured.
// Note this too may change in the future.
StackSys uint64
// Off-heap memory statistics.
//
// The following statistics measure runtime-internal
// structures that are not allocated from heap memory (usually
// because they are part of implementing the heap). Unlike
// heap or stack memory, any memory allocated to these
// structures is dedicated to these structures.
//
// These are primarily useful for debugging runtime memory
// overheads.
// MSpanInuse is bytes of allocated mspan structures.
MSpanInuse uint64
// MSpanSys is bytes of memory obtained from the OS for mspan
// structures.
MSpanSys uint64
// MCacheInuse is bytes of allocated mcache structures.
MCacheInuse uint64
// MCacheSys is bytes of memory obtained from the OS for
// mcache structures.
MCacheSys uint64
// BuckHashSys is bytes of memory in profiling bucket hash tables.
BuckHashSys uint64
// GCSys is bytes of memory in garbage collection metadata.
GCSys uint64
// OtherSys is bytes of memory in miscellaneous off-heap
// runtime allocations.
OtherSys uint64
// Garbage collector statistics.
// NextGC is the target heap size of the next GC cycle.
//
// The garbage collector's goal is to keep HeapAlloc ≤ NextGC.
// At the end of each GC cycle, the target for the next cycle
// is computed based on the amount of reachable data and the
// value of GOGC.
NextGC uint64
// LastGC is the time the last garbage collection finished, as
// nanoseconds since 1970 (the UNIX epoch).
LastGC uint64
// PauseTotalNs is the cumulative nanoseconds in GC
// stop-the-world pauses since the program started.
//
// During a stop-the-world pause, all goroutines are paused
// and only the garbage collector can run.
PauseTotalNs uint64
// PauseNs is a circular buffer of recent GC stop-the-world
// pause times in nanoseconds.
//
// The most recent pause is at PauseNs[(NumGC+255)%256]. In
// general, PauseNs[N%256] records the time paused in the most
// recent N%256th GC cycle. There may be multiple pauses per
// GC cycle; this is the sum of all pauses during a cycle.
PauseNs [256]uint64
// PauseEnd is a circular buffer of recent GC pause end times,
// as nanoseconds since 1970 (the UNIX epoch).
//
// This buffer is filled the same way as PauseNs. There may be
// multiple pauses per GC cycle; this records the end of the
// last pause in a cycle.
PauseEnd [256]uint64
// NumGC is the number of completed GC cycles.
NumGC uint32
// NumForcedGC is the number of GC cycles that were forced by
// the application calling the GC function.
NumForcedGC uint32
// GCCPUFraction is the fraction of this program's available
// CPU time used by the GC since the program started.
//
// GCCPUFraction is expressed as a number between 0 and 1,
// where 0 means GC has consumed none of this program's CPU. A
// program's available CPU time is defined as the integral of
// GOMAXPROCS since the program started. That is, if
// GOMAXPROCS is 2 and a program has been running for 10
// seconds, its "available CPU" is 20 seconds. GCCPUFraction
// does not include CPU time used for write barrier activity.
//
// This is the same as the fraction of CPU reported by
// GODEBUG=gctrace=1.
GCCPUFraction float64
// EnableGC indicates that GC is enabled. It is always true,
// even if GOGC=off.
EnableGC bool
// DebugGC is currently unused.
DebugGC bool
// BySize reports per-size class allocation statistics.
//
// BySize[N] gives statistics for allocations of size S where
// BySize[N-1].Size < S ≤ BySize[N].Size.
//
// This does not report allocations larger than BySize[60].Size.
BySize [61]struct {
// Size is the maximum byte size of an object in this
// size class.
Size uint32
// Mallocs is the cumulative count of heap objects
// allocated in this size class. The cumulative bytes
// of allocation is Size*Mallocs. The number of live
// objects in this size class is Mallocs - Frees.
Mallocs uint64
// Frees is the cumulative count of heap objects freed
// in this size class.
Frees uint64
}
}
func init() {
if offset := unsafe.Offsetof(memstats.heapStats); offset%8 != 0 {
println(offset)
throw("memstats.heapStats not aligned to 8 bytes")
}
// Ensure the size of heapStatsDelta causes adjacent fields/slots (e.g.
// [3]heapStatsDelta) to be 8-byte aligned.
if size := unsafe.Sizeof(heapStatsDelta{}); size%8 != 0 {
println(size)
throw("heapStatsDelta not a multiple of 8 bytes in size")
}
}
// ReadMemStats populates m with memory allocator statistics.
//
// The returned memory allocator statistics are up to date as of the
// call to ReadMemStats. This is in contrast with a heap profile,
// which is a snapshot as of the most recently completed garbage
// collection cycle.
func ReadMemStats(m *MemStats) {
_ = m.Alloc // nil check test before we switch stacks, see issue 61158
stw := stopTheWorld(stwReadMemStats)
systemstack(func() {
readmemstats_m(m)
})
startTheWorld(stw)
}
// doubleCheckReadMemStats controls a double-check mode for ReadMemStats that
// ensures consistency between the values that ReadMemStats is using and the
// runtime-internal stats.
var doubleCheckReadMemStats = false
// readmemstats_m populates stats for internal runtime values.
//
// The world must be stopped.
func readmemstats_m(stats *MemStats) {
assertWorldStopped()
// Flush mcaches to mcentral before doing anything else.
//
// Flushing to the mcentral may in general cause stats to
// change as mcentral data structures are manipulated.
systemstack(flushallmcaches)
// Calculate memory allocator stats.
// During program execution we only count number of frees and amount of freed memory.
// Current number of alive objects in the heap and amount of alive heap memory
// are calculated by scanning all spans.
// Total number of mallocs is calculated as number of frees plus number of alive objects.
// Similarly, total amount of allocated memory is calculated as amount of freed memory
// plus amount of alive heap memory.
// Collect consistent stats, which are the source-of-truth in some cases.
var consStats heapStatsDelta
memstats.heapStats.unsafeRead(&consStats)
// Collect large allocation stats.
totalAlloc := consStats.largeAlloc
nMalloc := consStats.largeAllocCount
totalFree := consStats.largeFree
nFree := consStats.largeFreeCount
// Collect per-sizeclass stats.
var bySize [gc.NumSizeClasses]struct {
Size uint32
Mallocs uint64
Frees uint64
}
for i := range bySize {
bySize[i].Size = uint32(gc.SizeClassToSize[i])
// Malloc stats.
a := consStats.smallAllocCount[i]
totalAlloc += a * uint64(gc.SizeClassToSize[i])
nMalloc += a
bySize[i].Mallocs = a
// Free stats.
f := consStats.smallFreeCount[i]
totalFree += f * uint64(gc.SizeClassToSize[i])
nFree += f
bySize[i].Frees = f
}
// Account for tiny allocations.
// For historical reasons, MemStats includes tiny allocations
// in both the total free and total alloc count. This double-counts
// memory in some sense because their tiny allocation block is also
// counted. Tracking the lifetime of individual tiny allocations is
// currently not done because it would be too expensive.
nFree += consStats.tinyAllocCount
nMalloc += consStats.tinyAllocCount
// Calculate derived stats.
stackInUse := uint64(consStats.inStacks)
gcWorkBufInUse := uint64(consStats.inWorkBufs)
totalMapped := gcController.heapInUse.load() + gcController.heapFree.load() + gcController.heapReleased.load() +
memstats.stacks_sys.load() + memstats.mspan_sys.load() + memstats.mcache_sys.load() +
memstats.buckhash_sys.load() + memstats.gcMiscSys.load() + memstats.other_sys.load() +
stackInUse + gcWorkBufInUse
heapGoal := gcController.heapGoal()
if doubleCheckReadMemStats {
// Only check this if we're debugging. It would be bad to crash an application
// just because the debugging stats are wrong. We mostly rely on tests to catch
// these issues, and we enable the double check mode for tests.
//
// The world is stopped, so the consistent stats (after aggregation)
// should be identical to some combination of memstats. In particular:
//
// * memstats.heapInUse == inHeap
// * memstats.heapReleased == released
// * memstats.heapInUse + memstats.heapFree == committed - inStacks - inWorkBufs
// * memstats.totalAlloc == totalAlloc
// * memstats.totalFree == totalFree
//
// Check if that's actually true.
//
// Prevent sysmon and the tracer from skewing the stats since they can
// act without synchronizing with a STW. See #64401.
lock(&sched.sysmonlock)
lock(&trace.lock)
if gcController.heapInUse.load() != uint64(consStats.inHeap) {
print("runtime: heapInUse=", gcController.heapInUse.load(), "\n")
print("runtime: consistent value=", consStats.inHeap, "\n")
throw("heapInUse and consistent stats are not equal")
}
if gcController.heapReleased.load() != uint64(consStats.released) {
print("runtime: heapReleased=", gcController.heapReleased.load(), "\n")
print("runtime: consistent value=", consStats.released, "\n")
throw("heapReleased and consistent stats are not equal")
}
heapRetained := gcController.heapInUse.load() + gcController.heapFree.load()
consRetained := uint64(consStats.committed - consStats.inStacks - consStats.inWorkBufs)
if heapRetained != consRetained {
print("runtime: global value=", heapRetained, "\n")
print("runtime: consistent value=", consRetained, "\n")
throw("measures of the retained heap are not equal")
}
if gcController.totalAlloc.Load() != totalAlloc {
print("runtime: totalAlloc=", gcController.totalAlloc.Load(), "\n")
print("runtime: consistent value=", totalAlloc, "\n")
throw("totalAlloc and consistent stats are not equal")
}
if gcController.totalFree.Load() != totalFree {
print("runtime: totalFree=", gcController.totalFree.Load(), "\n")
print("runtime: consistent value=", totalFree, "\n")
throw("totalFree and consistent stats are not equal")
}
// Also check that mappedReady lines up with totalMapped - released.
// This isn't really the same type of "make sure consistent stats line up" situation,
// but this is an opportune time to check.
if gcController.mappedReady.Load() != totalMapped-uint64(consStats.released) {
print("runtime: mappedReady=", gcController.mappedReady.Load(), "\n")
print("runtime: totalMapped=", totalMapped, "\n")
print("runtime: released=", uint64(consStats.released), "\n")
print("runtime: totalMapped-released=", totalMapped-uint64(consStats.released), "\n")
throw("mappedReady and other memstats are not equal")
}
unlock(&trace.lock)
unlock(&sched.sysmonlock)
}
// We've calculated all the values we need. Now, populate stats.
stats.Alloc = totalAlloc - totalFree
stats.TotalAlloc = totalAlloc
stats.Sys = totalMapped
stats.Mallocs = nMalloc
stats.Frees = nFree
stats.HeapAlloc = totalAlloc - totalFree
stats.HeapSys = gcController.heapInUse.load() + gcController.heapFree.load() + gcController.heapReleased.load()
// By definition, HeapIdle is memory that was mapped
// for the heap but is not currently used to hold heap
// objects. It also specifically is memory that can be
// used for other purposes, like stacks, but this memory
// is subtracted out of HeapSys before it makes that
// transition. Put another way:
//
// HeapSys = bytes allocated from the OS for the heap - bytes ultimately used for non-heap purposes
// HeapIdle = bytes allocated from the OS for the heap - bytes ultimately used for any purpose
//
// or
//
// HeapSys = sys - stacks_inuse - gcWorkBufInUse
// HeapIdle = sys - stacks_inuse - gcWorkBufInUse - heapInUse
//
// => HeapIdle = HeapSys - heapInUse = heapFree + heapReleased
stats.HeapIdle = gcController.heapFree.load() + gcController.heapReleased.load()
stats.HeapInuse = gcController.heapInUse.load()
stats.HeapReleased = gcController.heapReleased.load()
stats.HeapObjects = nMalloc - nFree
stats.StackInuse = stackInUse
// memstats.stacks_sys is only memory mapped directly for OS stacks.
// Add in heap-allocated stack memory for user consumption.
stats.StackSys = stackInUse + memstats.stacks_sys.load()
stats.MSpanInuse = uint64(mheap_.spanalloc.inuse)
stats.MSpanSys = memstats.mspan_sys.load()
stats.MCacheInuse = uint64(mheap_.cachealloc.inuse)
stats.MCacheSys = memstats.mcache_sys.load()
stats.BuckHashSys = memstats.buckhash_sys.load()
// MemStats defines GCSys as an aggregate of all memory related
// to the memory management system, but we track this memory
// at a more granular level in the runtime.
stats.GCSys = memstats.gcMiscSys.load() + gcWorkBufInUse
stats.OtherSys = memstats.other_sys.load()
stats.NextGC = heapGoal
stats.LastGC = memstats.last_gc_unix
stats.PauseTotalNs = memstats.pause_total_ns
stats.PauseNs = memstats.pause_ns
stats.PauseEnd = memstats.pause_end
stats.NumGC = memstats.numgc
stats.NumForcedGC = memstats.numforcedgc
stats.GCCPUFraction = memstats.gc_cpu_fraction
stats.EnableGC = true
// stats.BySize and bySize might not match in length.
// That's OK, stats.BySize cannot change due to backwards
// compatibility issues. copy will copy the minimum amount
// of values between the two of them.
copy(stats.BySize[:], bySize[:])
}
//go:linkname readGCStats runtime/debug.readGCStats
func readGCStats(pauses *[]uint64) {
systemstack(func() {
readGCStats_m(pauses)
})
}
// readGCStats_m must be called on the system stack because it acquires the heap
// lock. See mheap for details.
//
//go:systemstack
func readGCStats_m(pauses *[]uint64) {
p := *pauses
// Calling code in runtime/debug should make the slice large enough.
if cap(p) < len(memstats.pause_ns)+3 {
throw("short slice passed to readGCStats")
}
// Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
lock(&mheap_.lock)
n := memstats.numgc
if n > uint32(len(memstats.pause_ns)) {
n = uint32(len(memstats.pause_ns))
}
// The pause buffer is circular. The most recent pause is at
// pause_ns[(numgc-1)%len(pause_ns)], and then backward
// from there to go back farther in time. We deliver the times
// most recent first (in p[0]).
p = p[:cap(p)]
for i := uint32(0); i < n; i++ {
j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns))
p[i] = memstats.pause_ns[j]
p[n+i] = memstats.pause_end[j]
}
p[n+n] = memstats.last_gc_unix
p[n+n+1] = uint64(memstats.numgc)
p[n+n+2] = memstats.pause_total_ns
unlock(&mheap_.lock)
*pauses = p[:n+n+3]
}
// flushmcache flushes the mcache of allp[i].
//
// The world must be stopped.
//
//go:nowritebarrier
func flushmcache(i int) {
assertWorldStopped()
p := allp[i]
c := p.mcache
if c == nil {
return
}
c.releaseAll()
stackcache_clear(c)
}
// flushallmcaches flushes the mcaches of all Ps.
//
// The world must be stopped.
//
//go:nowritebarrier
func flushallmcaches() {
assertWorldStopped()
for i := 0; i < int(gomaxprocs); i++ {
flushmcache(i)
}
}
// sysMemStat represents a global system statistic that is managed atomically.
//
// This type must structurally be a uint64 so that mstats aligns with MemStats.
type sysMemStat uint64
// load atomically reads the value of the stat.
//
// Must be nosplit as it is called in runtime initialization, e.g. newosproc0.
//
//go:nosplit
func (s *sysMemStat) load() uint64 {
return atomic.Load64((*uint64)(s))
}
// add atomically adds the sysMemStat by n.
//
// Must be nosplit as it is called in runtime initialization, e.g. newosproc0.
//
//go:nosplit
func (s *sysMemStat) add(n int64) {
val := atomic.Xadd64((*uint64)(s), n)
if (n > 0 && int64(val) < n) || (n < 0 && int64(val)+n < n) {
print("runtime: val=", val, " n=", n, "\n")
throw("sysMemStat overflow")
}
}
// heapStatsDelta contains deltas of various runtime memory statistics
// that need to be updated together in order for them to be kept
// consistent with one another.
type heapStatsDelta struct {
// Memory stats.
committed int64 // byte delta of memory committed
released int64 // byte delta of released memory generated
inHeap int64 // byte delta of memory placed in the heap
inStacks int64 // byte delta of memory reserved for stacks
inWorkBufs int64 // byte delta of memory reserved for work bufs
// Allocator stats.
//
// These are all uint64 because they're cumulative, and could quickly wrap
// around otherwise.
tinyAllocCount uint64 // number of tiny allocations
largeAlloc uint64 // bytes allocated for large objects
largeAllocCount uint64 // number of large object allocations
smallAllocCount [gc.NumSizeClasses]uint64 // number of allocs for small objects
largeFree uint64 // bytes freed for large objects (>maxSmallSize)
largeFreeCount uint64 // number of frees for large objects (>maxSmallSize)
smallFreeCount [gc.NumSizeClasses]uint64 // number of frees for small objects (<=maxSmallSize)
// NOTE: This struct must be a multiple of 8 bytes in size because it
// is stored in an array. If it's not, atomic accesses to the above
// fields may be unaligned and fail on 32-bit platforms.
}
// merge adds in the deltas from b into a.
func (a *heapStatsDelta) merge(b *heapStatsDelta) {
a.committed += b.committed
a.released += b.released
a.inHeap += b.inHeap
a.inStacks += b.inStacks
a.inWorkBufs += b.inWorkBufs
a.tinyAllocCount += b.tinyAllocCount
a.largeAlloc += b.largeAlloc
a.largeAllocCount += b.largeAllocCount
for i := range b.smallAllocCount {
a.smallAllocCount[i] += b.smallAllocCount[i]
}
a.largeFree += b.largeFree
a.largeFreeCount += b.largeFreeCount
for i := range b.smallFreeCount {
a.smallFreeCount[i] += b.smallFreeCount[i]
}
}
// consistentHeapStats represents a set of various memory statistics
// whose updates must be viewed completely to get a consistent
// state of the world.
//
// To write updates to memory stats use the acquire and release
// methods. To obtain a consistent global snapshot of these statistics,
// use read.
type consistentHeapStats struct {
// stats is a ring buffer of heapStatsDelta values.
// Writers always atomically update the delta at index gen.
//
// Readers operate by rotating gen (0 -> 1 -> 2 -> 0 -> ...)
// and synchronizing with writers by observing each P's
// statsSeq field. If the reader observes a P not writing,
// it can be sure that it will pick up the new gen value the
// next time it writes.
//
// The reader then takes responsibility by clearing space
// in the ring buffer for the next reader to rotate gen to
// that space (i.e. it merges in values from index (gen-2) mod 3
// to index (gen-1) mod 3, then clears the former).
//
// Note that this means only one reader can be reading at a time.
// There is no way for readers to synchronize.
//
// This process is why we need a ring buffer of size 3 instead
// of 2: one is for the writers, one contains the most recent
// data, and the last one is clear so writers can begin writing
// to it the moment gen is updated.
stats [3]heapStatsDelta
// gen represents the current index into which writers
// are writing, and can take on the value of 0, 1, or 2.
gen atomic.Uint32
// noPLock is intended to provide mutual exclusion for updating
// stats when no P is available. It does not block other writers
// with a P, only other writers without a P and the reader. Because
// stats are usually updated when a P is available, contention on
// this lock should be minimal.
noPLock mutex
}
// acquire returns a heapStatsDelta to be updated. In effect,
// it acquires the shard for writing. release must be called
// as soon as the relevant deltas are updated.
//
// The returned heapStatsDelta must be updated atomically.
//
// The caller's P must not change between acquire and
// release. This also means that the caller should not
// acquire a P or release its P in between. A P also must
// not acquire a given consistentHeapStats if it hasn't
// yet released it.
//
// nosplit because a stack growth in this function could
// lead to a stack allocation that could reenter the
// function.
//
//go:nosplit
func (m *consistentHeapStats) acquire() *heapStatsDelta {
if pp := getg().m.p.ptr(); pp != nil {
seq := pp.statsSeq.Add(1)
if seq%2 == 0 {
// Should have been incremented to odd.
print("runtime: seq=", seq, "\n")
throw("bad sequence number")
}
} else {
lock(&m.noPLock)
}
gen := m.gen.Load() % 3
return &m.stats[gen]
}
// release indicates that the writer is done modifying
// the delta. The value returned by the corresponding
// acquire must no longer be accessed or modified after
// release is called.
//
// The caller's P must not change between acquire and
// release. This also means that the caller should not
// acquire a P or release its P in between.
//
// nosplit because a stack growth in this function could
// lead to a stack allocation that causes another acquire
// before this operation has completed.
//
//go:nosplit
func (m *consistentHeapStats) release() {
if pp := getg().m.p.ptr(); pp != nil {
seq := pp.statsSeq.Add(1)
if seq%2 != 0 {
// Should have been incremented to even.
print("runtime: seq=", seq, "\n")
throw("bad sequence number")
}
} else {
unlock(&m.noPLock)
}
}
// unsafeRead aggregates the delta for this shard into out.
//
// Unsafe because it does so without any synchronization. The
// world must be stopped.
func (m *consistentHeapStats) unsafeRead(out *heapStatsDelta) {
assertWorldStopped()
for i := range m.stats {
out.merge(&m.stats[i])
}
}
// unsafeClear clears the shard.
//
// Unsafe because the world must be stopped and values should
// be donated elsewhere before clearing.
func (m *consistentHeapStats) unsafeClear() {
assertWorldStopped()
clear(m.stats[:])
}
// read takes a globally consistent snapshot of m
// and puts the aggregated value in out. Even though out is a
// heapStatsDelta, the resulting values should be complete and
// valid statistic values.
//
// Not safe to call concurrently. The world must be stopped
// or metricsSema must be held.
func (m *consistentHeapStats) read(out *heapStatsDelta) {
// Getting preempted after this point is not safe because
// we read allp. We need to make sure a STW can't happen
// so it doesn't change out from under us.
mp := acquirem()
// Get the current generation. We can be confident that this
// will not change since read is serialized and is the only
// one that modifies currGen.
currGen := m.gen.Load()
prevGen := currGen - 1
if currGen == 0 {
prevGen = 2
}
// Prevent writers without a P from writing while we update gen.
lock(&m.noPLock)
// Rotate gen, effectively taking a snapshot of the state of
// these statistics at the point of the exchange by moving
// writers to the next set of deltas.
//
// This exchange is safe to do because we won't race
// with anyone else trying to update this value.
m.gen.Swap((currGen + 1) % 3)
// Allow P-less writers to continue. They'll be writing to the
// next generation now.
unlock(&m.noPLock)
for _, p := range allp {
// Spin until there are no more writers.
for p.statsSeq.Load()%2 != 0 {
}
}
// At this point we've observed that each sequence
// number is even, so any future writers will observe
// the new gen value. That means it's safe to read from
// the other deltas in the stats buffer.
// Perform our responsibilities and free up
// stats[prevGen] for the next time we want to take
// a snapshot.
m.stats[currGen].merge(&m.stats[prevGen])
m.stats[prevGen] = heapStatsDelta{}
// Finally, copy out the complete delta.
*out = m.stats[currGen]
releasem(mp)
}
type cpuStats struct {
// All fields are CPU time in nanoseconds computed by comparing
// calls of nanotime. This means they're all overestimates, because
// they don't accurately compute on-CPU time (so some of the time
// could be spent scheduled away by the OS).
GCAssistTime int64 // GC assists
GCDedicatedTime int64 // GC dedicated mark workers + pauses
GCIdleTime int64 // GC idle mark workers
GCPauseTime int64 // GC pauses (all GOMAXPROCS, even if just 1 is running)
GCTotalTime int64
ScavengeAssistTime int64 // background scavenger
ScavengeBgTime int64 // scavenge assists
ScavengeTotalTime int64
IdleTime int64 // Time Ps spent in _Pidle.
UserTime int64 // Time Ps spent in _Prunning that's not any of the above.
TotalTime int64 // GOMAXPROCS * (monotonic wall clock time elapsed)
}
// accumulateGCPauseTime add dt*stwProcs to the GC CPU pause time stats. dt should be
// the actual time spent paused, for orthogonality. maxProcs should be GOMAXPROCS,
// not work.stwprocs, since this number must be comparable to a total time computed
// from GOMAXPROCS.
func (s *cpuStats) accumulateGCPauseTime(dt int64, maxProcs int32) {
cpu := dt * int64(maxProcs)
s.GCPauseTime += cpu
s.GCTotalTime += cpu
}
// accumulate takes a cpuStats and adds in the current state of all GC CPU
// counters.
//
// gcMarkPhase indicates that we're in the mark phase and that certain counter
// values should be used.
func (s *cpuStats) accumulate(now int64, gcMarkPhase bool) {
// N.B. Mark termination and sweep termination pauses are
// accumulated in work.cpuStats at the end of their respective pauses.
var (
markAssistCpu int64
markDedicatedCpu int64
markFractionalCpu int64
markIdleCpu int64
)
if gcMarkPhase {
// N.B. These stats may have stale values if the GC is not
// currently in the mark phase.
markAssistCpu = gcController.assistTime.Load()
markDedicatedCpu = gcController.dedicatedMarkTime.Load()
markFractionalCpu = gcController.fractionalMarkTime.Load()
markIdleCpu = gcController.idleMarkTime.Load()
}
// The rest of the stats below are either derived from the above or
// are reset on each mark termination.
scavAssistCpu := scavenge.assistTime.Load()
scavBgCpu := scavenge.backgroundTime.Load()
// Update cumulative GC CPU stats.
s.GCAssistTime += markAssistCpu
s.GCDedicatedTime += markDedicatedCpu + markFractionalCpu
s.GCIdleTime += markIdleCpu
s.GCTotalTime += markAssistCpu + markDedicatedCpu + markFractionalCpu + markIdleCpu
// Update cumulative scavenge CPU stats.
s.ScavengeAssistTime += scavAssistCpu
s.ScavengeBgTime += scavBgCpu
s.ScavengeTotalTime += scavAssistCpu + scavBgCpu
// Update total CPU.
s.TotalTime = sched.totaltime + (now-sched.procresizetime)*int64(gomaxprocs)
s.IdleTime += sched.idleTime.Load()
// Compute userTime. We compute this indirectly as everything that's not the above.
//
// Since time spent in _Pgcstop is covered by gcPauseTime, and time spent in _Pidle
// is covered by idleTime, what we're left with is time spent in _Prunning,
// the latter of which is fine because the P will either go idle or get used for something
// else via sysmon. Meanwhile if we subtract GC time from whatever's left, we get non-GC
// _Prunning time. Note that this still leaves time spent in sweeping and in the scheduler,
// but that's fine. The overwhelming majority of this time will be actual user time.
s.UserTime = s.TotalTime - (s.GCTotalTime + s.ScavengeTotalTime + s.IdleTime)
}
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This implements the write barrier buffer. The write barrier itself
// is gcWriteBarrier and is implemented in assembly.
//
// See mbarrier.go for algorithmic details on the write barrier. This
// file deals only with the buffer.
//
// The write barrier has a fast path and a slow path. The fast path
// simply enqueues to a per-P write barrier buffer. It's written in
// assembly and doesn't clobber any general purpose registers, so it
// doesn't have the usual overheads of a Go call.
//
// When the buffer fills up, the write barrier invokes the slow path
// (wbBufFlush) to flush the buffer to the GC work queues. In this
// path, since the compiler didn't spill registers, we spill *all*
// registers and disallow any GC safe points that could observe the
// stack frame (since we don't know the types of the spilled
// registers).
package runtime
import (
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
)
// testSmallBuf forces a small write barrier buffer to stress write
// barrier flushing.
const testSmallBuf = false
// wbBuf is a per-P buffer of pointers queued by the write barrier.
// This buffer is flushed to the GC workbufs when it fills up and on
// various GC transitions.
//
// This is closely related to a "sequential store buffer" (SSB),
// except that SSBs are usually used for maintaining remembered sets,
// while this is used for marking.
type wbBuf struct {
// next points to the next slot in buf. It must not be a
// pointer type because it can point past the end of buf and
// must be updated without write barriers.
//
// This is a pointer rather than an index to optimize the
// write barrier assembly.
next uintptr
// end points to just past the end of buf. It must not be a
// pointer type because it points past the end of buf and must
// be updated without write barriers.
end uintptr
// buf stores a series of pointers to execute write barriers on.
buf [wbBufEntries]uintptr
}
const (
// wbBufEntries is the maximum number of pointers that can be
// stored in the write barrier buffer.
//
// This trades latency for throughput amortization. Higher
// values amortize flushing overhead more, but increase the
// latency of flushing. Higher values also increase the cache
// footprint of the buffer.
//
// TODO: What is the latency cost of this? Tune this value.
wbBufEntries = 512
// Maximum number of entries that we need to ask from the
// buffer in a single call.
wbMaxEntriesPerCall = 8
)
// reset empties b by resetting its next and end pointers.
func (b *wbBuf) reset() {
start := uintptr(unsafe.Pointer(&b.buf[0]))
b.next = start
if testSmallBuf {
// For testing, make the buffer smaller but more than
// 1 write barrier's worth, so it tests both the
// immediate flush and delayed flush cases.
b.end = uintptr(unsafe.Pointer(&b.buf[wbMaxEntriesPerCall+1]))
} else {
b.end = start + uintptr(len(b.buf))*unsafe.Sizeof(b.buf[0])
}
if (b.end-b.next)%unsafe.Sizeof(b.buf[0]) != 0 {
throw("bad write barrier buffer bounds")
}
}
// discard resets b's next pointer, but not its end pointer.
//
// This must be nosplit because it's called by wbBufFlush.
//
//go:nosplit
func (b *wbBuf) discard() {
b.next = uintptr(unsafe.Pointer(&b.buf[0]))
}
// empty reports whether b contains no pointers.
func (b *wbBuf) empty() bool {
return b.next == uintptr(unsafe.Pointer(&b.buf[0]))
}
// getX returns space in the write barrier buffer to store X pointers.
// getX will flush the buffer if necessary. Callers should use this as:
//
// buf := &getg().m.p.ptr().wbBuf
// p := buf.get2()
// p[0], p[1] = old, new
// ... actual memory write ...
//
// The caller must ensure there are no preemption points during the
// above sequence. There must be no preemption points while buf is in
// use because it is a per-P resource. There must be no preemption
// points between the buffer put and the write to memory because this
// could allow a GC phase change, which could result in missed write
// barriers.
//
// getX must be nowritebarrierrec to because write barriers here would
// corrupt the write barrier buffer. It (and everything it calls, if
// it called anything) has to be nosplit to avoid scheduling on to a
// different P and a different buffer.
//
//go:nowritebarrierrec
//go:nosplit
func (b *wbBuf) get1() *[1]uintptr {
if b.next+goarch.PtrSize > b.end {
wbBufFlush()
}
p := (*[1]uintptr)(unsafe.Pointer(b.next))
b.next += goarch.PtrSize
return p
}
//go:nowritebarrierrec
//go:nosplit
func (b *wbBuf) get2() *[2]uintptr {
if b.next+2*goarch.PtrSize > b.end {
wbBufFlush()
}
p := (*[2]uintptr)(unsafe.Pointer(b.next))
b.next += 2 * goarch.PtrSize
return p
}
// wbBufFlush flushes the current P's write barrier buffer to the GC
// workbufs.
//
// This must not have write barriers because it is part of the write
// barrier implementation.
//
// This and everything it calls must be nosplit because 1) the stack
// contains untyped slots from gcWriteBarrier and 2) there must not be
// a GC safe point between the write barrier test in the caller and
// flushing the buffer.
//
// TODO: A "go:nosplitrec" annotation would be perfect for this.
//
//go:nowritebarrierrec
//go:nosplit
func wbBufFlush() {
// Note: Every possible return from this function must reset
// the buffer's next pointer to prevent buffer overflow.
if getg().m.dying > 0 {
// We're going down. Not much point in write barriers
// and this way we can allow write barriers in the
// panic path.
getg().m.p.ptr().wbBuf.discard()
return
}
// Switch to the system stack so we don't have to worry about
// safe points.
systemstack(func() {
wbBufFlush1(getg().m.p.ptr())
})
}
// wbBufFlush1 flushes p's write barrier buffer to the GC work queue.
//
// This must not have write barriers because it is part of the write
// barrier implementation, so this may lead to infinite loops or
// buffer corruption.
//
// This must be non-preemptible because it uses the P's workbuf.
//
//go:nowritebarrierrec
//go:systemstack
func wbBufFlush1(pp *p) {
// Get the buffered pointers.
start := uintptr(unsafe.Pointer(&pp.wbBuf.buf[0]))
n := (pp.wbBuf.next - start) / unsafe.Sizeof(pp.wbBuf.buf[0])
ptrs := pp.wbBuf.buf[:n]
// Poison the buffer to make extra sure nothing is enqueued
// while we're processing the buffer.
pp.wbBuf.next = 0
if useCheckmark {
// Slow path for checkmark mode.
for _, ptr := range ptrs {
shade(ptr)
}
pp.wbBuf.reset()
return
}
// Mark all of the pointers in the buffer and record only the
// pointers we greyed. We use the buffer itself to temporarily
// record greyed pointers.
//
// TODO: Should scanObject/scanblock just stuff pointers into
// the wbBuf? Then this would become the sole greying path.
//
// TODO: We could avoid shading any of the "new" pointers in
// the buffer if the stack has been shaded, or even avoid
// putting them in the buffer at all (which would double its
// capacity). This is slightly complicated with the buffer; we
// could track whether any un-shaded goroutine has used the
// buffer, or just track globally whether there are any
// un-shaded stacks and flush after each stack scan.
gcw := &pp.gcw
pos := 0
for _, ptr := range ptrs {
if ptr < minLegalPointer {
// nil pointers are very common, especially
// for the "old" values. Filter out these and
// other "obvious" non-heap pointers ASAP.
//
// TODO: Should we filter out nils in the fast
// path to reduce the rate of flushes?
continue
}
if tryDeferToSpanScan(ptr, gcw) {
continue
}
obj, span, objIndex := findObject(ptr, 0, 0)
if obj == 0 {
continue
}
// TODO: Consider making two passes where the first
// just prefetches the mark bits.
mbits := span.markBitsForIndex(objIndex)
if mbits.isMarked() {
continue
}
mbits.setMarked()
// Mark span.
arena, pageIdx, pageMask := pageIndexOf(span.base())
if arena.pageMarks[pageIdx]&pageMask == 0 {
atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
}
if span.spanclass.noscan() {
gcw.bytesMarked += uint64(span.elemsize)
continue
}
ptrs[pos] = obj
pos++
}
// Enqueue the greyed objects.
gcw.putObjBatch(ptrs[:pos])
pp.wbBuf.reset()
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris
package runtime
func nonblockingPipe() (r, w int32, errno int32) {
return pipe2(_O_NONBLOCK | _O_CLOEXEC)
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix || (js && wasm) || wasip1 || windows
package runtime
import (
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
// Integrated network poller (platform-independent part).
// A particular implementation (epoll/kqueue/port/AIX/Windows)
// must define the following functions:
//
// func netpollinit()
// Initialize the poller. Only called once.
//
// func netpollopen(fd uintptr, pd *pollDesc) int32
// Arm edge-triggered notifications for fd. The pd argument is to pass
// back to netpollready when fd is ready. Return an errno value.
//
// func netpollclose(fd uintptr) int32
// Disable notifications for fd. Return an errno value.
//
// func netpoll(delta int64) (gList, int32)
// Poll the network. If delta < 0, block indefinitely. If delta == 0,
// poll without blocking. If delta > 0, block for up to delta nanoseconds.
// Return a list of goroutines built by calling netpollready,
// and a delta to add to netpollWaiters when all goroutines are ready.
// This must never return an empty list with a non-zero delta.
//
// func netpollBreak()
// Wake up the network poller, assumed to be blocked in netpoll.
//
// func netpollIsPollDescriptor(fd uintptr) bool
// Reports whether fd is a file descriptor used by the poller.
// Error codes returned by runtime_pollReset and runtime_pollWait.
// These must match the values in internal/poll/fd_poll_runtime.go.
const (
pollNoError = 0 // no error
pollErrClosing = 1 // descriptor is closed
pollErrTimeout = 2 // I/O timeout
pollErrNotPollable = 3 // general error polling descriptor
)
// pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
// goroutines respectively. The semaphore can be in the following states:
//
// pdReady - io readiness notification is pending;
// a goroutine consumes the notification by changing the state to pdNil.
// pdWait - a goroutine prepares to park on the semaphore, but not yet parked;
// the goroutine commits to park by changing the state to G pointer,
// or, alternatively, concurrent io notification changes the state to pdReady,
// or, alternatively, concurrent timeout/close changes the state to pdNil.
// G pointer - the goroutine is blocked on the semaphore;
// io notification or timeout/close changes the state to pdReady or pdNil respectively
// and unparks the goroutine.
// pdNil - none of the above.
const (
pdNil uintptr = 0
pdReady uintptr = 1
pdWait uintptr = 2
)
const pollBlockSize = 4 * 1024
// Network poller descriptor.
//
// No heap pointers.
type pollDesc struct {
_ sys.NotInHeap
link *pollDesc // in pollcache, protected by pollcache.lock
fd uintptr // constant for pollDesc usage lifetime
fdseq atomic.Uintptr // protects against stale pollDesc
// atomicInfo holds bits from closing, rd, and wd,
// which are only ever written while holding the lock,
// summarized for use by netpollcheckerr,
// which cannot acquire the lock.
// After writing these fields under lock in a way that
// might change the summary, code must call publishInfo
// before releasing the lock.
// Code that changes fields and then calls netpollunblock
// (while still holding the lock) must call publishInfo
// before calling netpollunblock, because publishInfo is what
// stops netpollblock from blocking anew
// (by changing the result of netpollcheckerr).
// atomicInfo also holds the eventErr bit,
// recording whether a poll event on the fd got an error;
// atomicInfo is the only source of truth for that bit.
atomicInfo atomic.Uint32 // atomic pollInfo
// rg, wg are accessed atomically and hold g pointers.
// (Using atomic.Uintptr here is similar to using guintptr elsewhere.)
rg atomic.Uintptr // pdReady, pdWait, G waiting for read or pdNil
wg atomic.Uintptr // pdReady, pdWait, G waiting for write or pdNil
lock mutex // protects the following fields
closing bool
rrun bool // whether rt is running
wrun bool // whether wt is running
user uint32 // user settable cookie
rseq uintptr // protects from stale read timers
rt timer // read deadline timer
rd int64 // read deadline (a nanotime in the future, -1 when expired)
wseq uintptr // protects from stale write timers
wt timer // write deadline timer
wd int64 // write deadline (a nanotime in the future, -1 when expired)
self *pollDesc // storage for indirect interface. See (*pollDesc).makeArg.
}
// pollInfo is the bits needed by netpollcheckerr, stored atomically,
// mostly duplicating state that is manipulated under lock in pollDesc.
// The one exception is the pollEventErr bit, which is maintained only
// in the pollInfo.
type pollInfo uint32
const (
pollClosing = 1 << iota
pollEventErr
pollExpiredReadDeadline
pollExpiredWriteDeadline
pollFDSeq // 20 bit field, low 20 bits of fdseq field
)
const (
pollFDSeqBits = 20 // number of bits in pollFDSeq
pollFDSeqMask = 1<<pollFDSeqBits - 1 // mask for pollFDSeq
)
func (i pollInfo) closing() bool { return i&pollClosing != 0 }
func (i pollInfo) eventErr() bool { return i&pollEventErr != 0 }
func (i pollInfo) expiredReadDeadline() bool { return i&pollExpiredReadDeadline != 0 }
func (i pollInfo) expiredWriteDeadline() bool { return i&pollExpiredWriteDeadline != 0 }
// info returns the pollInfo corresponding to pd.
func (pd *pollDesc) info() pollInfo {
return pollInfo(pd.atomicInfo.Load())
}
// publishInfo updates pd.atomicInfo (returned by pd.info)
// using the other values in pd.
// It must be called while holding pd.lock,
// and it must be called after changing anything
// that might affect the info bits.
// In practice this means after changing closing
// or changing rd or wd from < 0 to >= 0.
func (pd *pollDesc) publishInfo() {
var info uint32
if pd.closing {
info |= pollClosing
}
if pd.rd < 0 {
info |= pollExpiredReadDeadline
}
if pd.wd < 0 {
info |= pollExpiredWriteDeadline
}
info |= uint32(pd.fdseq.Load()&pollFDSeqMask) << pollFDSeq
// Set all of x except the pollEventErr bit.
x := pd.atomicInfo.Load()
for !pd.atomicInfo.CompareAndSwap(x, (x&pollEventErr)|info) {
x = pd.atomicInfo.Load()
}
}
// setEventErr sets the result of pd.info().eventErr() to b.
// We only change the error bit if seq == 0 or if seq matches pollFDSeq
// (issue #59545).
func (pd *pollDesc) setEventErr(b bool, seq uintptr) {
mSeq := uint32(seq & pollFDSeqMask)
x := pd.atomicInfo.Load()
xSeq := (x >> pollFDSeq) & pollFDSeqMask
if seq != 0 && xSeq != mSeq {
return
}
for (x&pollEventErr != 0) != b && !pd.atomicInfo.CompareAndSwap(x, x^pollEventErr) {
x = pd.atomicInfo.Load()
xSeq := (x >> pollFDSeq) & pollFDSeqMask
if seq != 0 && xSeq != mSeq {
return
}
}
}
type pollCache struct {
lock mutex
first *pollDesc
// PollDesc objects must be type-stable,
// because we can get ready notification from epoll/kqueue
// after the descriptor is closed/reused.
// Stale notifications are detected using seq variable,
// seq is incremented when deadlines are changed or descriptor is reused.
}
var (
netpollInitLock mutex
netpollInited atomic.Uint32
pollcache pollCache
netpollWaiters atomic.Uint32
)
// netpollWaiters is accessed in tests
//go:linkname netpollWaiters
//go:linkname poll_runtime_pollServerInit internal/poll.runtime_pollServerInit
func poll_runtime_pollServerInit() {
netpollGenericInit()
}
func netpollGenericInit() {
if netpollInited.Load() == 0 {
lockInit(&netpollInitLock, lockRankNetpollInit)
lockInit(&pollcache.lock, lockRankPollCache)
lock(&netpollInitLock)
if netpollInited.Load() == 0 {
netpollinit()
netpollInited.Store(1)
}
unlock(&netpollInitLock)
}
}
func netpollinited() bool {
return netpollInited.Load() != 0
}
//go:linkname poll_runtime_isPollServerDescriptor internal/poll.runtime_isPollServerDescriptor
// poll_runtime_isPollServerDescriptor reports whether fd is a
// descriptor being used by netpoll.
func poll_runtime_isPollServerDescriptor(fd uintptr) bool {
return netpollIsPollDescriptor(fd)
}
//go:linkname poll_runtime_pollOpen internal/poll.runtime_pollOpen
func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
pd := pollcache.alloc()
lock(&pd.lock)
wg := pd.wg.Load()
if wg != pdNil && wg != pdReady {
throw("runtime: blocked write on free polldesc")
}
rg := pd.rg.Load()
if rg != pdNil && rg != pdReady {
throw("runtime: blocked read on free polldesc")
}
pd.fd = fd
if pd.fdseq.Load() == 0 {
// The value 0 is special in setEventErr, so don't use it.
pd.fdseq.Store(1)
}
pd.closing = false
pd.setEventErr(false, 0)
pd.rseq++
pd.rg.Store(pdNil)
pd.rd = 0
pd.wseq++
pd.wg.Store(pdNil)
pd.wd = 0
pd.self = pd
pd.publishInfo()
unlock(&pd.lock)
errno := netpollopen(fd, pd)
if errno != 0 {
pollcache.free(pd)
return nil, int(errno)
}
return pd, 0
}
//go:linkname poll_runtime_pollClose internal/poll.runtime_pollClose
func poll_runtime_pollClose(pd *pollDesc) {
if !pd.closing {
throw("runtime: close polldesc w/o unblock")
}
wg := pd.wg.Load()
if wg != pdNil && wg != pdReady {
throw("runtime: blocked write on closing polldesc")
}
rg := pd.rg.Load()
if rg != pdNil && rg != pdReady {
throw("runtime: blocked read on closing polldesc")
}
netpollclose(pd.fd)
pollcache.free(pd)
}
func (c *pollCache) free(pd *pollDesc) {
// pd can't be shared here, but lock anyhow because
// that's what publishInfo documents.
lock(&pd.lock)
// Increment the fdseq field, so that any currently
// running netpoll calls will not mark pd as ready.
fdseq := pd.fdseq.Load()
fdseq = (fdseq + 1) & (1<<tagBits - 1)
pd.fdseq.Store(fdseq)
pd.publishInfo()
unlock(&pd.lock)
lock(&c.lock)
pd.link = c.first
c.first = pd
unlock(&c.lock)
}
// poll_runtime_pollReset, which is internal/poll.runtime_pollReset,
// prepares a descriptor for polling in mode, which is 'r' or 'w'.
// This returns an error code; the codes are defined above.
//
//go:linkname poll_runtime_pollReset internal/poll.runtime_pollReset
func poll_runtime_pollReset(pd *pollDesc, mode int) int {
errcode := netpollcheckerr(pd, int32(mode))
if errcode != pollNoError {
return errcode
}
if mode == 'r' {
pd.rg.Store(pdNil)
} else if mode == 'w' {
pd.wg.Store(pdNil)
}
return pollNoError
}
// poll_runtime_pollWait, which is internal/poll.runtime_pollWait,
// waits for a descriptor to be ready for reading or writing,
// according to mode, which is 'r' or 'w'.
// This returns an error code; the codes are defined above.
//
//go:linkname poll_runtime_pollWait internal/poll.runtime_pollWait
func poll_runtime_pollWait(pd *pollDesc, mode int) int {
errcode := netpollcheckerr(pd, int32(mode))
if errcode != pollNoError {
return errcode
}
// As for now only Solaris, illumos, AIX and wasip1 use level-triggered IO.
if GOOS == "solaris" || GOOS == "illumos" || GOOS == "aix" || GOOS == "wasip1" {
netpollarm(pd, mode)
}
for !netpollblock(pd, int32(mode), false) {
errcode = netpollcheckerr(pd, int32(mode))
if errcode != pollNoError {
return errcode
}
// Can happen if timeout has fired and unblocked us,
// but before we had a chance to run, timeout has been reset.
// Pretend it has not happened and retry.
}
return pollNoError
}
//go:linkname poll_runtime_pollWaitCanceled internal/poll.runtime_pollWaitCanceled
func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int) {
// This function is used only on windows after a failed attempt to cancel
// a pending async IO operation. Wait for ioready, ignore closing or timeouts.
for !netpollblock(pd, int32(mode), true) {
}
}
//go:linkname poll_runtime_pollSetDeadline internal/poll.runtime_pollSetDeadline
func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
lock(&pd.lock)
if pd.closing {
unlock(&pd.lock)
return
}
rd0, wd0 := pd.rd, pd.wd
combo0 := rd0 > 0 && rd0 == wd0
if d > 0 {
d += nanotime()
if d <= 0 {
// If the user has a deadline in the future, but the delay calculation
// overflows, then set the deadline to the maximum possible value.
d = 1<<63 - 1
}
}
if mode == 'r' || mode == 'r'+'w' {
pd.rd = d
}
if mode == 'w' || mode == 'r'+'w' {
pd.wd = d
}
pd.publishInfo()
combo := pd.rd > 0 && pd.rd == pd.wd
rtf := netpollReadDeadline
if combo {
rtf = netpollDeadline
}
if !pd.rrun {
if pd.rd > 0 {
// Copy current seq into the timer arg.
// Timer func will check the seq against current descriptor seq,
// if they differ the descriptor was reused or timers were reset.
pd.rt.modify(pd.rd, 0, rtf, pd.makeArg(), pd.rseq)
pd.rrun = true
}
} else if pd.rd != rd0 || combo != combo0 {
pd.rseq++ // invalidate current timers
if pd.rd > 0 {
pd.rt.modify(pd.rd, 0, rtf, pd.makeArg(), pd.rseq)
} else {
pd.rt.stop()
pd.rrun = false
}
}
if !pd.wrun {
if pd.wd > 0 && !combo {
pd.wt.modify(pd.wd, 0, netpollWriteDeadline, pd.makeArg(), pd.wseq)
pd.wrun = true
}
} else if pd.wd != wd0 || combo != combo0 {
pd.wseq++ // invalidate current timers
if pd.wd > 0 && !combo {
pd.wt.modify(pd.wd, 0, netpollWriteDeadline, pd.makeArg(), pd.wseq)
} else {
pd.wt.stop()
pd.wrun = false
}
}
// If we set the new deadline in the past, unblock currently pending IO if any.
// Note that pd.publishInfo has already been called, above, immediately after modifying rd and wd.
delta := int32(0)
var rg, wg *g
if pd.rd < 0 {
rg = netpollunblock(pd, 'r', false, &delta)
}
if pd.wd < 0 {
wg = netpollunblock(pd, 'w', false, &delta)
}
unlock(&pd.lock)
if rg != nil {
netpollgoready(rg, 3)
}
if wg != nil {
netpollgoready(wg, 3)
}
netpollAdjustWaiters(delta)
}
//go:linkname poll_runtime_pollUnblock internal/poll.runtime_pollUnblock
func poll_runtime_pollUnblock(pd *pollDesc) {
lock(&pd.lock)
if pd.closing {
throw("runtime: unblock on closing polldesc")
}
pd.closing = true
pd.rseq++
pd.wseq++
var rg, wg *g
pd.publishInfo()
delta := int32(0)
rg = netpollunblock(pd, 'r', false, &delta)
wg = netpollunblock(pd, 'w', false, &delta)
if pd.rrun {
pd.rt.stop()
pd.rrun = false
}
if pd.wrun {
pd.wt.stop()
pd.wrun = false
}
unlock(&pd.lock)
if rg != nil {
netpollgoready(rg, 3)
}
if wg != nil {
netpollgoready(wg, 3)
}
netpollAdjustWaiters(delta)
}
// netpollready is called by the platform-specific netpoll function.
// It declares that the fd associated with pd is ready for I/O.
// The toRun argument is used to build a list of goroutines to return
// from netpoll. The mode argument is 'r', 'w', or 'r'+'w' to indicate
// whether the fd is ready for reading or writing or both.
//
// This returns a delta to apply to netpollWaiters.
//
// This may run while the world is stopped, so write barriers are not allowed.
//
//go:nowritebarrier
func netpollready(toRun *gList, pd *pollDesc, mode int32) int32 {
delta := int32(0)
var rg, wg *g
if mode == 'r' || mode == 'r'+'w' {
rg = netpollunblock(pd, 'r', true, &delta)
}
if mode == 'w' || mode == 'r'+'w' {
wg = netpollunblock(pd, 'w', true, &delta)
}
if rg != nil {
toRun.push(rg)
}
if wg != nil {
toRun.push(wg)
}
return delta
}
func netpollcheckerr(pd *pollDesc, mode int32) int {
info := pd.info()
if info.closing() {
return pollErrClosing
}
if (mode == 'r' && info.expiredReadDeadline()) || (mode == 'w' && info.expiredWriteDeadline()) {
return pollErrTimeout
}
// Report an event scanning error only on a read event.
// An error on a write event will be captured in a subsequent
// write call that is able to report a more specific error.
if mode == 'r' && info.eventErr() {
return pollErrNotPollable
}
return pollNoError
}
func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
r := atomic.Casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
if r {
// Bump the count of goroutines waiting for the poller.
// The scheduler uses this to decide whether to block
// waiting for the poller if there is nothing else to do.
netpollAdjustWaiters(1)
}
return r
}
func netpollgoready(gp *g, traceskip int) {
goready(gp, traceskip+1)
}
// returns true if IO is ready, or false if timed out or closed
// waitio - wait only for completed IO, ignore errors
// Concurrent calls to netpollblock in the same mode are forbidden, as pollDesc
// can hold only a single waiting goroutine for each mode.
func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
gpp := &pd.rg
if mode == 'w' {
gpp = &pd.wg
}
// set the gpp semaphore to pdWait
for {
// Consume notification if already ready.
if gpp.CompareAndSwap(pdReady, pdNil) {
return true
}
if gpp.CompareAndSwap(pdNil, pdWait) {
break
}
// Double check that this isn't corrupt; otherwise we'd loop
// forever.
if v := gpp.Load(); v != pdReady && v != pdNil {
throw("runtime: double wait")
}
}
// need to recheck error states after setting gpp to pdWait
// this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
// do the opposite: store to closing/rd/wd, publishInfo, load of rg/wg
if waitio || netpollcheckerr(pd, mode) == pollNoError {
gopark(netpollblockcommit, unsafe.Pointer(gpp), waitReasonIOWait, traceBlockNet, 5)
}
// be careful to not lose concurrent pdReady notification
old := gpp.Swap(pdNil)
if old > pdWait {
throw("runtime: corrupted polldesc")
}
return old == pdReady
}
// netpollunblock moves either pd.rg (if mode == 'r') or
// pd.wg (if mode == 'w') into the pdReady state.
// This returns any goroutine blocked on pd.{rg,wg}.
// It adds any adjustment to netpollWaiters to *delta;
// this adjustment should be applied after the goroutine has
// been marked ready.
func netpollunblock(pd *pollDesc, mode int32, ioready bool, delta *int32) *g {
gpp := &pd.rg
if mode == 'w' {
gpp = &pd.wg
}
for {
old := gpp.Load()
if old == pdReady {
return nil
}
if old == pdNil && !ioready {
// Only set pdReady for ioready. runtime_pollWait
// will check for timeout/cancel before waiting.
return nil
}
new := pdNil
if ioready {
new = pdReady
}
if gpp.CompareAndSwap(old, new) {
if old == pdWait {
old = pdNil
} else if old != pdNil {
*delta -= 1
}
return (*g)(unsafe.Pointer(old))
}
}
}
func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
lock(&pd.lock)
// Seq arg is seq when the timer was set.
// If it's stale, ignore the timer event.
currentSeq := pd.rseq
if !read {
currentSeq = pd.wseq
}
if seq != currentSeq {
// The descriptor was reused or timers were reset.
unlock(&pd.lock)
return
}
delta := int32(0)
var rg *g
if read {
if pd.rd <= 0 || !pd.rrun {
throw("runtime: inconsistent read deadline")
}
pd.rd = -1
pd.publishInfo()
rg = netpollunblock(pd, 'r', false, &delta)
}
var wg *g
if write {
if pd.wd <= 0 || !pd.wrun && !read {
throw("runtime: inconsistent write deadline")
}
pd.wd = -1
pd.publishInfo()
wg = netpollunblock(pd, 'w', false, &delta)
}
unlock(&pd.lock)
if rg != nil {
netpollgoready(rg, 0)
}
if wg != nil {
netpollgoready(wg, 0)
}
netpollAdjustWaiters(delta)
}
func netpollDeadline(arg any, seq uintptr, delta int64) {
netpolldeadlineimpl(arg.(*pollDesc), seq, true, true)
}
func netpollReadDeadline(arg any, seq uintptr, delta int64) {
netpolldeadlineimpl(arg.(*pollDesc), seq, true, false)
}
func netpollWriteDeadline(arg any, seq uintptr, delta int64) {
netpolldeadlineimpl(arg.(*pollDesc), seq, false, true)
}
// netpollAnyWaiters reports whether any goroutines are waiting for I/O.
func netpollAnyWaiters() bool {
return netpollWaiters.Load() > 0
}
// netpollAdjustWaiters adds delta to netpollWaiters.
func netpollAdjustWaiters(delta int32) {
if delta != 0 {
netpollWaiters.Add(delta)
}
}
func (c *pollCache) alloc() *pollDesc {
lock(&c.lock)
if c.first == nil {
type pollDescPadded struct {
pollDesc
pad [tagAlign - unsafe.Sizeof(pollDesc{})]byte
}
const pdSize = unsafe.Sizeof(pollDescPadded{})
n := pollBlockSize / pdSize
if n == 0 {
n = 1
}
// Must be in non-GC memory because can be referenced
// only from epoll/kqueue internals.
mem := persistentalloc(n*pdSize, tagAlign, &memstats.other_sys)
for i := uintptr(0); i < n; i++ {
pd := (*pollDesc)(add(mem, i*pdSize))
lockInit(&pd.lock, lockRankPollDesc)
pd.rt.init(nil, nil)
pd.wt.init(nil, nil)
pd.link = c.first
c.first = pd
}
}
pd := c.first
c.first = pd.link
unlock(&c.lock)
return pd
}
// makeArg converts pd to an interface{}.
// makeArg does not do any allocation. Normally, such
// a conversion requires an allocation because pointers to
// types which embed internal/runtime/sys.NotInHeap (which pollDesc is)
// must be stored in interfaces indirectly. See issue 42076.
func (pd *pollDesc) makeArg() (i any) {
x := (*eface)(unsafe.Pointer(&i))
x._type = pdType
x.data = unsafe.Pointer(&pd.self)
return
}
var (
pdEface any = (*pollDesc)(nil)
pdType *_type = efaceOf(&pdEface)._type
)
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
package runtime
import (
"internal/runtime/atomic"
"internal/runtime/syscall/linux"
"unsafe"
)
var (
epfd int32 = -1 // epoll descriptor
netpollEventFd uintptr // eventfd for netpollBreak
netpollWakeSig atomic.Uint32 // used to avoid duplicate calls of netpollBreak
)
func netpollinit() {
var errno uintptr
epfd, errno = linux.EpollCreate1(linux.EPOLL_CLOEXEC)
if errno != 0 {
println("runtime: epollcreate failed with", errno)
throw("runtime: netpollinit failed")
}
efd, errno := linux.Eventfd(0, linux.EFD_CLOEXEC|linux.EFD_NONBLOCK)
if errno != 0 {
println("runtime: eventfd failed with", errno)
throw("runtime: eventfd failed")
}
ev := linux.EpollEvent{
Events: linux.EPOLLIN,
}
*(**uintptr)(unsafe.Pointer(&ev.Data)) = &netpollEventFd
errno = linux.EpollCtl(epfd, linux.EPOLL_CTL_ADD, efd, &ev)
if errno != 0 {
println("runtime: epollctl failed with", errno)
throw("runtime: epollctl failed")
}
netpollEventFd = uintptr(efd)
}
func netpollIsPollDescriptor(fd uintptr) bool {
return fd == uintptr(epfd) || fd == netpollEventFd
}
func netpollopen(fd uintptr, pd *pollDesc) uintptr {
var ev linux.EpollEvent
ev.Events = linux.EPOLLIN | linux.EPOLLOUT | linux.EPOLLRDHUP | linux.EPOLLET
tp := taggedPointerPack(unsafe.Pointer(pd), pd.fdseq.Load())
*(*taggedPointer)(unsafe.Pointer(&ev.Data)) = tp
return linux.EpollCtl(epfd, linux.EPOLL_CTL_ADD, int32(fd), &ev)
}
func netpollclose(fd uintptr) uintptr {
var ev linux.EpollEvent
return linux.EpollCtl(epfd, linux.EPOLL_CTL_DEL, int32(fd), &ev)
}
func netpollarm(pd *pollDesc, mode int) {
throw("runtime: unused")
}
// netpollBreak interrupts an epollwait.
func netpollBreak() {
// Failing to cas indicates there is an in-flight wakeup, so we're done here.
if !netpollWakeSig.CompareAndSwap(0, 1) {
return
}
var one uint64 = 1
oneSize := int32(unsafe.Sizeof(one))
for {
n := write(netpollEventFd, noescape(unsafe.Pointer(&one)), oneSize)
if n == oneSize {
break
}
if n == -_EINTR {
continue
}
if n == -_EAGAIN {
return
}
println("runtime: netpollBreak write failed with", -n)
throw("runtime: netpollBreak write failed")
}
}
// netpoll checks for ready network connections.
// Returns a list of goroutines that become runnable,
// and a delta to add to netpollWaiters.
// This must never return an empty list with a non-zero delta.
//
// delay < 0: blocks indefinitely
// delay == 0: does not block, just polls
// delay > 0: block for up to that many nanoseconds
func netpoll(delay int64) (gList, int32) {
if epfd == -1 {
return gList{}, 0
}
var waitms int32
if delay < 0 {
waitms = -1
} else if delay == 0 {
waitms = 0
} else if delay < 1e6 {
waitms = 1
} else if delay < 1e15 {
waitms = int32(delay / 1e6)
} else {
// An arbitrary cap on how long to wait for a timer.
// 1e9 ms == ~11.5 days.
waitms = 1e9
}
var events [128]linux.EpollEvent
retry:
n, errno := linux.EpollWait(epfd, events[:], int32(len(events)), waitms)
if errno != 0 {
if errno != _EINTR {
println("runtime: epollwait on fd", epfd, "failed with", errno)
throw("runtime: netpoll failed")
}
// If a timed sleep was interrupted, just return to
// recalculate how long we should sleep now.
if waitms > 0 {
return gList{}, 0
}
goto retry
}
var toRun gList
delta := int32(0)
for i := int32(0); i < n; i++ {
ev := events[i]
if ev.Events == 0 {
continue
}
if *(**uintptr)(unsafe.Pointer(&ev.Data)) == &netpollEventFd {
if ev.Events != linux.EPOLLIN {
println("runtime: netpoll: eventfd ready for", ev.Events)
throw("runtime: netpoll: eventfd ready for something unexpected")
}
if delay != 0 {
// netpollBreak could be picked up by a
// nonblocking poll. Only read the 8-byte
// integer if blocking.
// Since EFD_SEMAPHORE was not specified,
// the eventfd counter will be reset to 0.
var one uint64
read(int32(netpollEventFd), noescape(unsafe.Pointer(&one)), int32(unsafe.Sizeof(one)))
netpollWakeSig.Store(0)
}
continue
}
var mode int32
if ev.Events&(linux.EPOLLIN|linux.EPOLLRDHUP|linux.EPOLLHUP|linux.EPOLLERR) != 0 {
mode += 'r'
}
if ev.Events&(linux.EPOLLOUT|linux.EPOLLHUP|linux.EPOLLERR) != 0 {
mode += 'w'
}
if mode != 0 {
tp := *(*taggedPointer)(unsafe.Pointer(&ev.Data))
pd := (*pollDesc)(tp.pointer())
tag := tp.tag()
if pd.fdseq.Load() == tag {
pd.setEventErr(ev.Events == linux.EPOLLERR, tag)
delta += netpollready(&toRun, pd, mode)
}
}
}
return toRun, delta
}
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows
package runtime
// osRelaxMinNS is the number of nanoseconds of idleness to tolerate
// without performing an osRelax. Since osRelax may reduce the
// precision of timers, this should be enough larger than the relaxed
// timer precision to keep the timer error acceptable.
const osRelaxMinNS = 0
var haveHighResSleep = true
// osRelax is called by the scheduler when transitioning to and from
// all Ps being idle.
func osRelax(relax bool) {}
// enableWER is called by setTraceback("wer").
// Windows Error Reporting (WER) is only supported on Windows.
func enableWER() {}
// winlibcall is not implemented on non-Windows systems,
// but it is used in non-OS-specific parts of the runtime.
// Define it as an empty struct to avoid wasting stack space.
type winlibcall struct{}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/syscall/linux"
"internal/strconv"
"unsafe"
)
// sigPerThreadSyscall is the same signal (SIGSETXID) used by glibc for
// per-thread syscalls on Linux. We use it for the same purpose in non-cgo
// binaries.
const sigPerThreadSyscall = _SIGRTMIN + 1
type mOS struct {
// profileTimer holds the ID of the POSIX interval timer for profiling CPU
// usage on this thread.
//
// It is valid when the profileTimerValid field is true. A thread
// creates and manages its own timer, and these fields are read and written
// only by this thread. But because some of the reads on profileTimerValid
// are in signal handling code, this field should be atomic type.
profileTimer int32
profileTimerValid atomic.Bool
// needPerThreadSyscall indicates that a per-thread syscall is required
// for doAllThreadsSyscall.
needPerThreadSyscall atomic.Uint8
// This is a pointer to a chunk of memory allocated with a special
// mmap invocation in vgetrandomGetState().
vgetrandomState uintptr
waitsema uint32 // semaphore for parking on locks
}
// Linux futex.
//
// futexsleep(uint32 *addr, uint32 val)
// futexwakeup(uint32 *addr)
//
// Futexsleep atomically checks if *addr == val and if so, sleeps on addr.
// Futexwakeup wakes up threads sleeping on addr.
// Futexsleep is allowed to wake up spuriously.
const (
_FUTEX_PRIVATE_FLAG = 128
_FUTEX_WAIT_PRIVATE = 0 | _FUTEX_PRIVATE_FLAG
_FUTEX_WAKE_PRIVATE = 1 | _FUTEX_PRIVATE_FLAG
)
// Atomically,
//
// if(*addr == val) sleep
//
// Might be woken up spuriously; that's allowed.
// Don't sleep longer than ns; ns < 0 means forever.
//
//go:nosplit
func futexsleep(addr *uint32, val uint32, ns int64) {
// Some Linux kernels have a bug where futex of
// FUTEX_WAIT returns an internal error code
// as an errno. Libpthread ignores the return value
// here, and so can we: as it says a few lines up,
// spurious wakeups are allowed.
if ns < 0 {
futex(unsafe.Pointer(addr), _FUTEX_WAIT_PRIVATE, val, nil, nil, 0)
return
}
var ts timespec
ts.setNsec(ns)
futex(unsafe.Pointer(addr), _FUTEX_WAIT_PRIVATE, val, &ts, nil, 0)
}
// If any procs are sleeping on addr, wake up at most cnt.
//
//go:nosplit
func futexwakeup(addr *uint32, cnt uint32) {
ret := futex(unsafe.Pointer(addr), _FUTEX_WAKE_PRIVATE, cnt, nil, nil, 0)
if ret >= 0 {
return
}
// I don't know that futex wakeup can return
// EAGAIN or EINTR, but if it does, it would be
// safe to loop and call futex again.
systemstack(func() {
print("futexwakeup addr=", addr, " returned ", ret, "\n")
})
*(*int32)(unsafe.Pointer(uintptr(0x1006))) = 0x1006
}
func getCPUCount() int32 {
// This buffer is huge (8 kB) but we are on the system stack
// and there should be plenty of space (64 kB).
// Also this is a leaf, so we're not holding up the memory for long.
// See golang.org/issue/11823.
// The suggested behavior here is to keep trying with ever-larger
// buffers, but we don't have a dynamic memory allocator at the
// moment, so that's a bit tricky and seems like overkill.
const maxCPUs = 64 * 1024
var buf [maxCPUs / 8]byte
r := sched_getaffinity(0, unsafe.Sizeof(buf), &buf[0])
if r < 0 {
return 1
}
n := int32(0)
for _, v := range buf[:r] {
for v != 0 {
n += int32(v & 1)
v >>= 1
}
}
if n == 0 {
n = 1
}
return n
}
// Clone, the Linux rfork.
const (
_CLONE_VM = 0x100
_CLONE_FS = 0x200
_CLONE_FILES = 0x400
_CLONE_SIGHAND = 0x800
_CLONE_PTRACE = 0x2000
_CLONE_VFORK = 0x4000
_CLONE_PARENT = 0x8000
_CLONE_THREAD = 0x10000
_CLONE_NEWNS = 0x20000
_CLONE_SYSVSEM = 0x40000
_CLONE_SETTLS = 0x80000
_CLONE_PARENT_SETTID = 0x100000
_CLONE_CHILD_CLEARTID = 0x200000
_CLONE_UNTRACED = 0x800000
_CLONE_CHILD_SETTID = 0x1000000
_CLONE_STOPPED = 0x2000000
_CLONE_NEWUTS = 0x4000000
_CLONE_NEWIPC = 0x8000000
// As of QEMU 2.8.0 (5ea2fc84d), user emulation requires all six of these
// flags to be set when creating a thread; attempts to share the other
// five but leave SYSVSEM unshared will fail with -EINVAL.
//
// In non-QEMU environments CLONE_SYSVSEM is inconsequential as we do not
// use System V semaphores.
cloneFlags = _CLONE_VM | /* share memory */
_CLONE_FS | /* share cwd, etc */
_CLONE_FILES | /* share fd table */
_CLONE_SIGHAND | /* share sig handler table */
_CLONE_SYSVSEM | /* share SysV semaphore undo lists (see issue #20763) */
_CLONE_THREAD /* revisit - okay for now */
)
//go:noescape
func clone(flags int32, stk, mp, gp, fn unsafe.Pointer) int32
// May run with m.p==nil, so write barriers are not allowed.
//
//go:nowritebarrier
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
/*
* note: strace gets confused if we use CLONE_PTRACE here.
*/
if false {
print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " clone=", abi.FuncPCABI0(clone), " id=", mp.id, " ostk=", &mp, "\n")
}
// Disable signals during clone, so that the new thread starts
// with signals disabled. It will enable them in minit.
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
ret := retryOnEAGAIN(func() int32 {
r := clone(cloneFlags, stk, unsafe.Pointer(mp), unsafe.Pointer(mp.g0), unsafe.Pointer(abi.FuncPCABI0(mstart)))
// clone returns positive TID, negative errno.
// We don't care about the TID.
if r >= 0 {
return 0
}
return -r
})
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret != 0 {
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", ret, ")\n")
if ret == _EAGAIN {
println("runtime: may need to increase max user processes (ulimit -u)")
}
throw("newosproc")
}
}
// Version of newosproc that doesn't require a valid G.
//
//go:nosplit
func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
stack := sysAlloc(stacksize, &memstats.stacks_sys, "OS thread stack")
if stack == nil {
writeErrStr(failallocatestack)
exit(1)
}
ret := clone(cloneFlags, unsafe.Pointer(uintptr(stack)+stacksize), nil, nil, fn)
if ret < 0 {
writeErrStr(failthreadcreate)
exit(1)
}
}
const (
_AT_NULL = 0 // End of vector
_AT_PAGESZ = 6 // System physical page size
_AT_PLATFORM = 15 // string identifying platform
_AT_HWCAP = 16 // hardware capability bit vector
_AT_SECURE = 23 // secure mode boolean
_AT_RANDOM = 25 // introduced in 2.6.29
_AT_HWCAP2 = 26 // hardware capability bit vector 2
)
var procAuxv = []byte("/proc/self/auxv\x00")
var addrspace_vec [1]byte
func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
var auxvreadbuf [128]uintptr
func sysargs(argc int32, argv **byte) {
n := argc + 1
// skip over argv, envp to get to auxv
for argv_index(argv, n) != nil {
n++
}
// skip NULL separator
n++
// now argv+n is auxv
auxvp := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
if pairs := sysauxv(auxvp[:]); pairs != 0 {
auxv = auxvp[: pairs*2 : pairs*2]
return
}
// In some situations we don't get a loader-provided
// auxv, such as when loaded as a library on Android.
// Fall back to /proc/self/auxv.
fd := open(&procAuxv[0], 0 /* O_RDONLY */, 0)
if fd < 0 {
// On Android, /proc/self/auxv might be unreadable (issue 9229), so we fallback to
// try using mincore to detect the physical page size.
// mincore should return EINVAL when address is not a multiple of system page size.
const size = 256 << 10 // size of memory region to allocate
p, err := mmap(nil, size, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if err != 0 {
return
}
var n uintptr
for n = 4 << 10; n < size; n <<= 1 {
err := mincore(unsafe.Pointer(uintptr(p)+n), 1, &addrspace_vec[0])
if err == 0 {
physPageSize = n
break
}
}
if physPageSize == 0 {
physPageSize = size
}
munmap(p, size)
return
}
n = read(fd, noescape(unsafe.Pointer(&auxvreadbuf[0])), int32(unsafe.Sizeof(auxvreadbuf)))
closefd(fd)
if n < 0 {
return
}
// Make sure buf is terminated, even if we didn't read
// the whole file.
auxvreadbuf[len(auxvreadbuf)-2] = _AT_NULL
pairs := sysauxv(auxvreadbuf[:])
auxv = auxvreadbuf[: pairs*2 : pairs*2]
}
// secureMode holds the value of AT_SECURE passed in the auxiliary vector.
var secureMode bool
func sysauxv(auxv []uintptr) (pairs int) {
// Process the auxiliary vector entries provided by the kernel when the
// program is executed. See getauxval(3).
var i int
for ; auxv[i] != _AT_NULL; i += 2 {
tag, val := auxv[i], auxv[i+1]
switch tag {
case _AT_RANDOM:
// The kernel provides a pointer to 16 bytes of cryptographically
// random data. Note that in cgo programs this value may have
// already been used by libc at this point, and in particular glibc
// and musl use the value as-is for stack and pointer protector
// cookies from libc_start_main and/or dl_start. Also, cgo programs
// may use the value after we do.
startupRand = (*[16]byte)(unsafe.Pointer(val))[:]
case _AT_PAGESZ:
physPageSize = val
case _AT_SECURE:
secureMode = val == 1
}
archauxv(tag, val)
vdsoauxv(tag, val)
}
return i / 2
}
var sysTHPSizePath = []byte("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size\x00")
func getHugePageSize() uintptr {
var numbuf [20]byte
fd := open(&sysTHPSizePath[0], 0 /* O_RDONLY */, 0)
if fd < 0 {
return 0
}
ptr := noescape(unsafe.Pointer(&numbuf[0]))
n := read(fd, ptr, int32(len(numbuf)))
closefd(fd)
if n <= 0 {
return 0
}
n-- // remove trailing newline
v, err := strconv.Atoi(slicebytetostringtmp((*byte)(ptr), int(n)))
if err != nil || v < 0 {
v = 0
}
if v&(v-1) != 0 {
// v is not a power of 2
return 0
}
return uintptr(v)
}
func osinit() {
numCPUStartup = getCPUCount()
physHugePageSize = getHugePageSize()
vgetrandomInit()
configure64bitsTimeOn32BitsArchitectures()
}
var urandom_dev = []byte("/dev/urandom\x00")
func readRandom(r []byte) int {
// Note that all supported Linux kernels should provide AT_RANDOM which
// populates startupRand, so this fallback should be unreachable.
fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
closefd(fd)
return int(n)
}
func goenvs() {
goenvs_unix()
}
// Called to do synchronous initialization of Go code built with
// -buildmode=c-archive or -buildmode=c-shared.
// None of the Go runtime is initialized.
//
//go:nosplit
//go:nowritebarrierrec
func libpreinit() {
initsig(true)
}
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m) {
mp.gsignal = malg(32 * 1024) // Linux wants >= 2K
mp.gsignal.m = mp
}
func gettid() uint32
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
minitSignals()
// Cgo-created threads and the bootstrap m are missing a
// procid. We need this for asynchronous preemption and it's
// useful in debuggers.
getg().m.procid = uint64(gettid())
}
// Called from dropm to undo the effect of an minit.
//
//go:nosplit
func unminit() {
unminitSignals()
getg().m.procid = 0
}
// Called from mexit, but not from dropm, to undo the effect of thread-owned
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
//
// This always runs without a P, so //go:nowritebarrierrec is required.
//
//go:nowritebarrierrec
func mdestroy(mp *m) {
}
// #ifdef GOARCH_386
// #define sa_handler k_sa_handler
// #endif
func sigreturn__sigaction()
func sigtramp() // Called via C ABI
func cgoSigtramp()
//go:noescape
func sigaltstack(new, old *stackt)
//go:noescape
func setitimer(mode int32, new, old *itimerval)
//go:noescape
func timer_create(clockid int32, sevp *sigevent, timerid *int32) int32
//go:noescape
func timer_delete(timerid int32) int32
//go:noescape
func rtsigprocmask(how int32, new, old *sigset, size int32)
//go:nosplit
//go:nowritebarrierrec
func sigprocmask(how int32, new, old *sigset) {
rtsigprocmask(how, new, old, int32(unsafe.Sizeof(*new)))
}
func raise(sig uint32)
func raiseproc(sig uint32)
//go:noescape
func sched_getaffinity(pid, len uintptr, buf *byte) int32
func osyield()
//go:nosplit
func osyield_no_g() {
osyield()
}
func pipe2(flags int32) (r, w int32, errno int32)
//go:nosplit
func fcntl(fd, cmd, arg int32) (ret int32, errno int32) {
r, _, err := linux.Syscall6(linux.SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0)
return int32(r), int32(err)
}
const (
_si_max_size = 128
_sigev_max_size = 64
)
//go:nosplit
//go:nowritebarrierrec
func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTORER | _SA_RESTART
sigfillset(&sa.sa_mask)
// Although Linux manpage says "sa_restorer element is obsolete and
// should not be used". x86_64 kernel requires it. Only use it on
// x86. Note that on 386 this is cleared when using the C sigaction
// function via cgo; see fixSigactionForCgo.
if GOARCH == "386" || GOARCH == "amd64" {
sa.sa_restorer = abi.FuncPCABI0(sigreturn__sigaction)
}
if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
if iscgo {
fn = abi.FuncPCABI0(cgoSigtramp)
} else {
fn = abi.FuncPCABI0(sigtramp)
}
}
sa.sa_handler = fn
sigaction(i, &sa, nil)
}
//go:nosplit
//go:nowritebarrierrec
func setsigstack(i uint32) {
var sa sigactiont
sigaction(i, nil, &sa)
if sa.sa_flags&_SA_ONSTACK != 0 {
return
}
sa.sa_flags |= _SA_ONSTACK
sigaction(i, &sa, nil)
}
//go:nosplit
//go:nowritebarrierrec
func getsig(i uint32) uintptr {
var sa sigactiont
sigaction(i, nil, &sa)
return sa.sa_handler
}
// setSignalstackSP sets the ss_sp field of a stackt.
//
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
*(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp
}
//go:nosplit
func (c *sigctxt) fixsigcode(sig uint32) {
}
// sysSigaction calls the rt_sigaction system call.
//
//go:nosplit
func sysSigaction(sig uint32, new, old *sigactiont) {
if rt_sigaction(uintptr(sig), new, old, unsafe.Sizeof(sigactiont{}.sa_mask)) != 0 {
// Workaround for bugs in QEMU user mode emulation.
//
// QEMU turns calls to the sigaction system call into
// calls to the C library sigaction call; the C
// library call rejects attempts to call sigaction for
// SIGCANCEL (32) or SIGSETXID (33).
//
// QEMU rejects calling sigaction on SIGRTMAX (64).
//
// Just ignore the error in these case. There isn't
// anything we can do about it anyhow.
if sig != 32 && sig != 33 && sig != 64 {
// Use system stack to avoid split stack overflow on ppc64/ppc64le.
systemstack(func() {
throw("sigaction failed")
})
}
}
}
// rt_sigaction is implemented in assembly.
//
//go:noescape
func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32
// fixSigactionForCgo is called when we are using cgo to call the
// C sigaction function. On 386 the C function does not expect the
// SA_RESTORER flag to be set, and in some cases will fail if it is set:
// it will pass the SA_RESTORER flag to the kernel without passing
// the sa_restorer field. Since the C function will handle SA_RESTORER
// for us, we need not pass it. See issue #75253.
//
//go:nosplit
func fixSigactionForCgo(new *sigactiont) {
if GOARCH == "386" && new != nil {
new.sa_flags &^= _SA_RESTORER
new.sa_restorer = 0
}
}
func getpid() int
func tgkill(tgid, tid, sig int)
// signalM sends a signal to mp.
func signalM(mp *m, sig int) {
tgkill(getpid(), int(mp.procid), sig)
}
// validSIGPROF compares this signal delivery's code against the signal sources
// that the profiler uses, returning whether the delivery should be processed.
// To be processed, a signal delivery from a known profiling mechanism should
// correspond to the best profiling mechanism available to this thread. Signals
// from other sources are always considered valid.
//
//go:nosplit
func validSIGPROF(mp *m, c *sigctxt) bool {
code := int32(c.sigcode())
setitimer := code == _SI_KERNEL
timer_create := code == _SI_TIMER
if !(setitimer || timer_create) {
// The signal doesn't correspond to a profiling mechanism that the
// runtime enables itself. There's no reason to process it, but there's
// no reason to ignore it either.
return true
}
if mp == nil {
// Since we don't have an M, we can't check if there's an active
// per-thread timer for this thread. We don't know how long this thread
// has been around, and if it happened to interact with the Go scheduler
// at a time when profiling was active (causing it to have a per-thread
// timer). But it may have never interacted with the Go scheduler, or
// never while profiling was active. To avoid double-counting, process
// only signals from setitimer.
//
// When a custom cgo traceback function has been registered (on
// platforms that support runtime.SetCgoTraceback), SIGPROF signals
// delivered to a thread that cannot find a matching M do this check in
// the assembly implementations of runtime.cgoSigtramp.
return setitimer
}
// Having an M means the thread interacts with the Go scheduler, and we can
// check whether there's an active per-thread timer for this thread.
if mp.profileTimerValid.Load() {
// If this M has its own per-thread CPU profiling interval timer, we
// should track the SIGPROF signals that come from that timer (for
// accurate reporting of its CPU usage; see issue 35057) and ignore any
// that it gets from the process-wide setitimer (to not over-count its
// CPU consumption).
return timer_create
}
// No active per-thread timer means the only valid profiler is setitimer.
return setitimer
}
func setProcessCPUProfiler(hz int32) {
setProcessCPUProfilerTimer(hz)
}
func setThreadCPUProfiler(hz int32) {
mp := getg().m
mp.profilehz = hz
// destroy any active timer
if mp.profileTimerValid.Load() {
timerid := mp.profileTimer
mp.profileTimerValid.Store(false)
mp.profileTimer = 0
ret := timer_delete(timerid)
if ret != 0 {
print("runtime: failed to disable profiling timer; timer_delete(", timerid, ") errno=", -ret, "\n")
throw("timer_delete")
}
}
if hz == 0 {
// If the goal was to disable profiling for this thread, then the job's done.
return
}
// The period of the timer should be 1/Hz. For every "1/Hz" of additional
// work, the user should expect one additional sample in the profile.
//
// But to scale down to very small amounts of application work, to observe
// even CPU usage of "one tenth" of the requested period, set the initial
// timing delay in a different way: So that "one tenth" of a period of CPU
// spend shows up as a 10% chance of one sample (for an expected value of
// 0.1 samples), and so that "two and six tenths" periods of CPU spend show
// up as a 60% chance of 3 samples and a 40% chance of 2 samples (for an
// expected value of 2.6). Set the initial delay to a value in the uniform
// random distribution between 0 and the desired period. And because "0"
// means "disable timer", add 1 so the half-open interval [0,period) turns
// into (0,period].
//
// Otherwise, this would show up as a bias away from short-lived threads and
// from threads that are only occasionally active: for example, when the
// garbage collector runs on a mostly-idle system, the additional threads it
// activates may do a couple milliseconds of GC-related work and nothing
// else in the few seconds that the profiler observes.
spec := new(itimerspec)
spec.it_value.setNsec(1 + int64(cheaprandn(uint32(1e9/hz))))
spec.it_interval.setNsec(1e9 / int64(hz))
var timerid int32
var sevp sigevent
sevp.notify = _SIGEV_THREAD_ID
sevp.signo = _SIGPROF
sevp.sigev_notify_thread_id = int32(mp.procid)
ret := timer_create(_CLOCK_THREAD_CPUTIME_ID, &sevp, &timerid)
if ret != 0 {
// If we cannot create a timer for this M, leave profileTimerValid false
// to fall back to the process-wide setitimer profiler.
return
}
ret = timer_settime(timerid, 0, spec, nil)
if ret != 0 {
print("runtime: failed to configure profiling timer; timer_settime(", timerid,
", 0, {interval: {",
spec.it_interval.tv_sec, "s + ", spec.it_interval.tv_nsec, "ns} value: {",
spec.it_value.tv_sec, "s + ", spec.it_value.tv_nsec, "ns}}, nil) errno=", -ret, "\n")
throw("timer_settime")
}
mp.profileTimer = timerid
mp.profileTimerValid.Store(true)
}
// perThreadSyscallArgs contains the system call number, arguments, and
// expected return values for a system call to be executed on all threads.
type perThreadSyscallArgs struct {
trap uintptr
a1 uintptr
a2 uintptr
a3 uintptr
a4 uintptr
a5 uintptr
a6 uintptr
r1 uintptr
r2 uintptr
}
// perThreadSyscall is the system call to execute for the ongoing
// doAllThreadsSyscall.
//
// perThreadSyscall may only be written while mp.needPerThreadSyscall == 0 on
// all Ms.
var perThreadSyscall perThreadSyscallArgs
// syscall_runtime_doAllThreadsSyscall and executes a specified system call on
// all Ms.
//
// The system call is expected to succeed and return the same value on every
// thread. If any threads do not match, the runtime throws.
//
//go:linkname syscall_runtime_doAllThreadsSyscall syscall.runtime_doAllThreadsSyscall
//go:uintptrescapes
func syscall_runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
if iscgo {
// In cgo, we are not aware of threads created in C, so this approach will not work.
panic("doAllThreadsSyscall not supported with cgo enabled")
}
// STW to guarantee that user goroutines see an atomic change to thread
// state. Without STW, goroutines could migrate Ms while change is in
// progress and e.g., see state old -> new -> old -> new.
//
// N.B. Internally, this function does not depend on STW to
// successfully change every thread. It is only needed for user
// expectations, per above.
stw := stopTheWorld(stwAllThreadsSyscall)
// This function depends on several properties:
//
// 1. All OS threads that already exist are associated with an M in
// allm. i.e., we won't miss any pre-existing threads.
// 2. All Ms listed in allm will eventually have an OS thread exist.
// i.e., they will set procid and be able to receive signals.
// 3. OS threads created after we read allm will clone from a thread
// that has executed the system call. i.e., they inherit the
// modified state.
//
// We achieve these through different mechanisms:
//
// 1. Addition of new Ms to allm in allocm happens before clone of its
// OS thread later in newm.
// 2. newm does acquirem to avoid being preempted, ensuring that new Ms
// created in allocm will eventually reach OS thread clone later in
// newm.
// 3. We take allocmLock for write here to prevent allocation of new Ms
// while this function runs. Per (1), this prevents clone of OS
// threads that are not yet in allm.
allocmLock.lock()
// Disable preemption, preventing us from changing Ms, as we handle
// this M specially.
//
// N.B. STW and lock() above do this as well, this is added for extra
// clarity.
acquirem()
// N.B. allocmLock also prevents concurrent execution of this function,
// serializing use of perThreadSyscall, mp.needPerThreadSyscall, and
// ensuring all threads execute system calls from multiple calls in the
// same order.
r1, r2, errno := linux.Syscall6(trap, a1, a2, a3, a4, a5, a6)
if GOARCH == "ppc64" || GOARCH == "ppc64le" {
// TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.
r2 = 0
}
if errno != 0 {
releasem(getg().m)
allocmLock.unlock()
startTheWorld(stw)
return r1, r2, errno
}
perThreadSyscall = perThreadSyscallArgs{
trap: trap,
a1: a1,
a2: a2,
a3: a3,
a4: a4,
a5: a5,
a6: a6,
r1: r1,
r2: r2,
}
// Wait for all threads to start.
//
// As described above, some Ms have been added to allm prior to
// allocmLock, but not yet completed OS clone and set procid.
//
// At minimum we must wait for a thread to set procid before we can
// send it a signal.
//
// We take this one step further and wait for all threads to start
// before sending any signals. This prevents system calls from getting
// applied twice: once in the parent and once in the child, like so:
//
// A B C
// add C to allm
// doAllThreadsSyscall
// allocmLock.lock()
// signal B
// <receive signal>
// execute syscall
// <signal return>
// clone C
// <thread start>
// set procid
// signal C
// <receive signal>
// execute syscall
// <signal return>
//
// In this case, thread C inherited the syscall-modified state from
// thread B and did not need to execute the syscall, but did anyway
// because doAllThreadsSyscall could not be sure whether it was
// required.
//
// Some system calls may not be idempotent, so we ensure each thread
// executes the system call exactly once.
for mp := allm; mp != nil; mp = mp.alllink {
for atomic.Load64(&mp.procid) == 0 {
// Thread is starting.
osyield()
}
}
// Signal every other thread, where they will execute perThreadSyscall
// from the signal handler.
gp := getg()
tid := gp.m.procid
for mp := allm; mp != nil; mp = mp.alllink {
if atomic.Load64(&mp.procid) == tid {
// Our thread already performed the syscall.
continue
}
mp.needPerThreadSyscall.Store(1)
signalM(mp, sigPerThreadSyscall)
}
// Wait for all threads to complete.
for mp := allm; mp != nil; mp = mp.alllink {
if mp.procid == tid {
continue
}
for mp.needPerThreadSyscall.Load() != 0 {
osyield()
}
}
perThreadSyscall = perThreadSyscallArgs{}
releasem(getg().m)
allocmLock.unlock()
startTheWorld(stw)
return r1, r2, errno
}
// runPerThreadSyscall runs perThreadSyscall for this M if required.
//
// This function throws if the system call returns with anything other than the
// expected values.
//
//go:nosplit
func runPerThreadSyscall() {
gp := getg()
if gp.m.needPerThreadSyscall.Load() == 0 {
return
}
args := perThreadSyscall
r1, r2, errno := linux.Syscall6(args.trap, args.a1, args.a2, args.a3, args.a4, args.a5, args.a6)
if GOARCH == "ppc64" || GOARCH == "ppc64le" {
// TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.
r2 = 0
}
if errno != 0 || r1 != args.r1 || r2 != args.r2 {
print("trap:", args.trap, ", a123456=[", args.a1, ",", args.a2, ",", args.a3, ",", args.a4, ",", args.a5, ",", args.a6, "]\n")
print("results: got {r1=", r1, ",r2=", r2, ",errno=", errno, "}, want {r1=", args.r1, ",r2=", args.r2, ",errno=0}\n")
fatal("AllThreadsSyscall6 results differ between threads; runtime corrupted")
}
gp.m.needPerThreadSyscall.Store(0)
}
const (
_SI_USER = 0
_SI_TKILL = -6
_SYS_SECCOMP = 1
)
// sigFromUser reports whether the signal was sent because of a call
// to kill or tgkill.
//
//go:nosplit
func (c *sigctxt) sigFromUser() bool {
code := int32(c.sigcode())
return code == _SI_USER || code == _SI_TKILL
}
// sigFromSeccomp reports whether the signal was sent from seccomp.
//
//go:nosplit
func (c *sigctxt) sigFromSeccomp() bool {
code := int32(c.sigcode())
return code == _SYS_SECCOMP
}
//go:nosplit
func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (ret int32, errno int32) {
r, _, err := linux.Syscall6(linux.SYS_MPROTECT, uintptr(addr), n, uintptr(prot), 0, 0, 0)
return int32(r), int32(err)
}
type kernelVersion struct {
major int
minor int
}
// getKernelVersion returns major and minor kernel version numbers
// parsed from the uname release field.
func getKernelVersion() kernelVersion {
var buf linux.Utsname
if e := linux.Uname(&buf); e != 0 {
throw("uname failed")
}
rel := gostringnocopy(&buf.Release[0])
major, minor, _, ok := parseRelease(rel)
if !ok {
throw("failed to parse kernel version from uname")
}
return kernelVersion{major: major, minor: minor}
}
// parseRelease parses a dot-separated version number. It follows the
// semver syntax, but allows the minor and patch versions to be
// elided.
func parseRelease(rel string) (major, minor, patch int, ok bool) {
// Strip anything after a dash or plus.
for i := 0; i < len(rel); i++ {
if rel[i] == '-' || rel[i] == '+' {
rel = rel[:i]
break
}
}
next := func() (int, bool) {
for i := 0; i < len(rel); i++ {
if rel[i] == '.' {
ver, err := strconv.Atoi(rel[:i])
rel = rel[i+1:]
return ver, err == nil
}
}
ver, err := strconv.Atoi(rel)
rel = ""
return ver, err == nil
}
if major, ok = next(); !ok || rel == "" {
return
}
if minor, ok = next(); !ok || rel == "" {
return
}
patch, ok = next()
return
}
// GE checks if the running kernel version
// is greater than or equal to the provided version.
func (kv kernelVersion) GE(x, y int) bool {
return kv.major > x || (kv.major == x && kv.minor >= y)
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && !(386 || arm || mips || mipsle || (gccgo && (ppc || s390)))
package runtime
import (
"unsafe"
)
func configure64bitsTimeOn32BitsArchitectures() {}
//go:noescape
func futex(addr unsafe.Pointer, op int32, val uint32, ts *timespec, addr2 unsafe.Pointer, val3 uint32) int32
//go:noescape
func timer_settime(timerid int32, flags int32, new, old *itimerspec) int32
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !mips && !mipsle && !mips64 && !mips64le && !s390x && !ppc64 && linux
package runtime
const (
_SS_DISABLE = 2
_NSIG = 65
_SIG_BLOCK = 0
_SIG_UNBLOCK = 1
_SIG_SETMASK = 2
)
// It's hard to tease out exactly how big a Sigset is, but
// rt_sigprocmask crashes if we get it wrong, so if binaries
// are running, this is right.
type sigset [2]uint32
var sigset_all = sigset{^uint32(0), ^uint32(0)}
//go:nosplit
//go:nowritebarrierrec
func sigaddset(mask *sigset, i int) {
(*mask)[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31)
}
func sigdelset(mask *sigset, i int) {
(*mask)[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31)
}
//go:nosplit
func sigfillset(mask *uint64) {
*mask = ^uint64(0)
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && !arm && !arm64 && !loong64 && !mips && !mipsle && !mips64 && !mips64le && !s390x && !ppc64 && !ppc64le
package runtime
func archauxv(tag, val uintptr) {
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !openbsd
package runtime
// osStackAlloc performs OS-specific initialization before s is used
// as stack memory.
func osStackAlloc(s *mspan) {
}
// osStackFree undoes the effect of osStackAlloc before s is returned
// to the heap.
func osStackFree(s *mspan) {
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package runtime
const (
// These values are the same on all known Unix systems.
// If we find a discrepancy some day, we can split them out.
_F_SETFD = 2
_FD_CLOEXEC = 1
)
//go:nosplit
func closeonexec(fd int32) {
fcntl(fd, _F_SETFD, _FD_CLOEXEC)
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"internal/stringslite"
"unsafe"
)
// throwType indicates the current type of ongoing throw, which affects the
// amount of detail printed to stderr. Higher values include more detail.
type throwType uint32
const (
// throwTypeNone means that we are not throwing.
throwTypeNone throwType = iota
// throwTypeUser is a throw due to a problem with the application.
//
// These throws do not include runtime frames, system goroutines, or
// frame metadata.
throwTypeUser
// throwTypeRuntime is a throw due to a problem with Go itself.
//
// These throws include as much information as possible to aid in
// debugging the runtime, including runtime frames, system goroutines,
// and frame metadata.
throwTypeRuntime
)
// We have two different ways of doing defers. The older way involves creating a
// defer record at the time that a defer statement is executing and adding it to a
// defer chain. This chain is inspected by the deferreturn call at all function
// exits in order to run the appropriate defer calls. A cheaper way (which we call
// open-coded defers) is used for functions in which no defer statements occur in
// loops. In that case, we simply store the defer function/arg information into
// specific stack slots at the point of each defer statement, as well as setting a
// bit in a bitmask. At each function exit, we add inline code to directly make
// the appropriate defer calls based on the bitmask and fn/arg information stored
// on the stack. During panic/Goexit processing, the appropriate defer calls are
// made using extra funcdata info that indicates the exact stack slots that
// contain the bitmask and defer fn/args.
// Check to make sure we can really generate a panic. If the panic
// was generated from the runtime, or from inside malloc, then convert
// to a throw of msg.
// pc should be the program counter of the compiler-generated code that
// triggered this panic.
func panicCheck1(pc uintptr, msg string) {
if goarch.IsWasm == 0 && stringslite.HasPrefix(funcname(findfunc(pc)), "runtime.") {
// Note: wasm can't tail call, so we can't get the original caller's pc.
throw(msg)
}
// TODO: is this redundant? How could we be in malloc
// but not in the runtime? internal/runtime/*, maybe?
gp := getg()
if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
throw(msg)
}
}
// Same as above, but calling from the runtime is allowed.
//
// Using this function is necessary for any panic that may be
// generated by runtime.sigpanic, since those are always called by the
// runtime.
func panicCheck2(err string) {
// panic allocates, so to avoid recursive malloc, turn panics
// during malloc into throws.
gp := getg()
if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
throw(err)
}
}
// Many of the following panic entry-points turn into throws when they
// happen in various runtime contexts. These should never happen in
// the runtime, and if they do, they indicate a serious issue and
// should not be caught by user code.
//
// The panic{Index,Slice,divide,shift} functions are called by
// code generated by the compiler for out of bounds index expressions,
// out of bounds slice expressions, division by zero, and shift by negative.
// The panicdivide (again), panicoverflow, panicfloat, and panicmem
// functions are called by the signal handler when a signal occurs
// indicating the respective problem.
//
// Since panic{Index,Slice,shift} are never called directly, and
// since the runtime package should never have an out of bounds slice
// or array reference or negative shift, if we see those functions called from the
// runtime package we turn the panic into a throw. That will dump the
// entire runtime stack for easier debugging.
//
// The entry points called by the signal handler will be called from
// runtime.sigpanic, so we can't disallow calls from the runtime to
// these (they always look like they're called from the runtime).
// Hence, for these, we just check for clearly bad runtime conditions.
//
// The goPanic{Index,Slice} functions are only used by wasm. All the other architectures
// use panic{Bounds,Extend} in assembly, which then call to panicBounds{64,32,32X}.
// failures in the comparisons for s[x], 0 <= x < y (y == len(s))
//
//go:yeswritebarrierrec
func goPanicIndex(x int, y int) {
panicCheck1(sys.GetCallerPC(), "index out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsIndex})
}
//go:yeswritebarrierrec
func goPanicIndexU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "index out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsIndex})
}
// failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
//
//go:yeswritebarrierrec
func goPanicSliceAlen(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSliceAlen})
}
//go:yeswritebarrierrec
func goPanicSliceAlenU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSliceAlen})
}
//go:yeswritebarrierrec
func goPanicSliceAcap(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSliceAcap})
}
//go:yeswritebarrierrec
func goPanicSliceAcapU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSliceAcap})
}
// failures in the comparisons for s[x:y], 0 <= x <= y
//
//go:yeswritebarrierrec
func goPanicSliceB(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSliceB})
}
//go:yeswritebarrierrec
func goPanicSliceBU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSliceB})
}
// failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
func goPanicSlice3Alen(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3Alen})
}
func goPanicSlice3AlenU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3Alen})
}
func goPanicSlice3Acap(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3Acap})
}
func goPanicSlice3AcapU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3Acap})
}
// failures in the comparisons for s[:x:y], 0 <= x <= y
func goPanicSlice3B(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3B})
}
func goPanicSlice3BU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3B})
}
// failures in the comparisons for s[x:y:], 0 <= x <= y
func goPanicSlice3C(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3C})
}
func goPanicSlice3CU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3C})
}
// failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s)
func goPanicSliceConvert(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice length too short to convert to array or pointer to array")
panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsConvert})
}
// Implemented in assembly. Declared here to mark them as ABIInternal.
func panicBounds() // in asm_GOARCH.s files, called from generated code
func panicExtend() // in asm_GOARCH.s files, called from generated code (on 32-bit archs)
func panicBounds64(pc uintptr, regs *[16]int64) { // called from panicBounds on 64-bit archs
f := findfunc(pc)
v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1)
code, signed, xIsReg, yIsReg, xVal, yVal := abi.BoundsDecode(int(v))
if code == abi.BoundsIndex {
panicCheck1(pc, "index out of range")
} else {
panicCheck1(pc, "slice bounds out of range")
}
var e boundsError
e.code = code
e.signed = signed
if xIsReg {
e.x = regs[xVal]
} else {
e.x = int64(xVal)
}
if yIsReg {
e.y = int(regs[yVal])
} else {
e.y = yVal
}
panic(e)
}
func panicBounds32(pc uintptr, regs *[16]int32) { // called from panicBounds on 32-bit archs
f := findfunc(pc)
v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1)
code, signed, xIsReg, yIsReg, xVal, yVal := abi.BoundsDecode(int(v))
if code == abi.BoundsIndex {
panicCheck1(pc, "index out of range")
} else {
panicCheck1(pc, "slice bounds out of range")
}
var e boundsError
e.code = code
e.signed = signed
if xIsReg {
if signed {
e.x = int64(regs[xVal])
} else {
e.x = int64(uint32(regs[xVal]))
}
} else {
e.x = int64(xVal)
}
if yIsReg {
e.y = int(regs[yVal])
} else {
e.y = yVal
}
panic(e)
}
func panicBounds32X(pc uintptr, regs *[16]int32) { // called from panicExtend on 32-bit archs
f := findfunc(pc)
v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1)
code, signed, xIsReg, yIsReg, xVal, yVal := abi.BoundsDecode(int(v))
if code == abi.BoundsIndex {
panicCheck1(pc, "index out of range")
} else {
panicCheck1(pc, "slice bounds out of range")
}
var e boundsError
e.code = code
e.signed = signed
if xIsReg {
// Our 4-bit register numbers are actually 2 2-bit register numbers.
lo := xVal & 3
hi := xVal >> 2
e.x = int64(regs[hi])<<32 + int64(uint32(regs[lo]))
} else {
e.x = int64(xVal)
}
if yIsReg {
e.y = int(regs[yVal])
} else {
e.y = yVal
}
panic(e)
}
var shiftError = error(errorString("negative shift amount"))
//go:yeswritebarrierrec
func panicshift() {
panicCheck1(sys.GetCallerPC(), "negative shift amount")
panic(shiftError)
}
var divideError = error(errorString("integer divide by zero"))
//go:yeswritebarrierrec
func panicdivide() {
panicCheck2("integer divide by zero")
panic(divideError)
}
var overflowError = error(errorString("integer overflow"))
func panicoverflow() {
panicCheck2("integer overflow")
panic(overflowError)
}
var floatError = error(errorString("floating point error"))
func panicfloat() {
panicCheck2("floating point error")
panic(floatError)
}
var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
func panicmem() {
panicCheck2("invalid memory address or nil pointer dereference")
panic(memoryError)
}
func panicmemAddr(addr uintptr) {
panicCheck2("invalid memory address or nil pointer dereference")
panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
}
var simdImmError = error(errorString("out-of-range immediate for simd intrinsic"))
func panicSimdImm() {
panicCheck2("simd immediate error")
panic(simdImmError)
}
// Create a new deferred function fn, which has no arguments and results.
// The compiler turns a defer statement into a call to this.
func deferproc(fn func()) {
gp := getg()
if gp.m.curg != gp {
// go code on the system stack can't defer
throw("defer on system stack")
}
d := newdefer()
d.link = gp._defer
gp._defer = d
d.fn = fn
d.pc = sys.GetCallerPC()
// We must not be preempted between calling GetCallerSP and
// storing it to d.sp because GetCallerSP's result is a
// uintptr stack pointer.
d.sp = sys.GetCallerSP()
}
var rangeDoneError = error(errorString("range function continued iteration after function for loop body returned false"))
var rangePanicError = error(errorString("range function continued iteration after loop body panic"))
var rangeExhaustedError = error(errorString("range function continued iteration after whole loop exit"))
var rangeMissingPanicError = error(errorString("range function recovered a loop body panic and did not resume panicking"))
//go:noinline
func panicrangestate(state int) {
switch abi.RF_State(state) {
case abi.RF_DONE:
panic(rangeDoneError)
case abi.RF_PANIC:
panic(rangePanicError)
case abi.RF_EXHAUSTED:
panic(rangeExhaustedError)
case abi.RF_MISSING_PANIC:
panic(rangeMissingPanicError)
}
throw("unexpected state passed to panicrangestate")
}
// deferrangefunc is called by functions that are about to
// execute a range-over-function loop in which the loop body
// may execute a defer statement. That defer needs to add to
// the chain for the current function, not the func literal synthesized
// to represent the loop body. To do that, the original function
// calls deferrangefunc to obtain an opaque token representing
// the current frame, and then the loop body uses deferprocat
// instead of deferproc to add to that frame's defer lists.
//
// The token is an 'any' with underlying type *atomic.Pointer[_defer].
// It is the atomically-updated head of a linked list of _defer structs
// representing deferred calls. At the same time, we create a _defer
// struct on the main g._defer list with d.head set to this head pointer.
//
// The g._defer list is now a linked list of deferred calls,
// but an atomic list hanging off:
//
// g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil
// | .head
// |
// +--> dY -> dX -> nil
//
// with each -> indicating a d.link pointer, and where drangefunc
// has the d.rangefunc = true bit set.
// Note that the function being ranged over may have added
// its own defers (d4 and d3), so drangefunc need not be at the
// top of the list when deferprocat is used. This is why we pass
// the atomic head explicitly.
//
// To keep misbehaving programs from crashing the runtime,
// deferprocat pushes new defers onto the .head list atomically.
// The fact that it is a separate list from the main goroutine
// defer list means that the main goroutine's defers can still
// be handled non-atomically.
//
// In the diagram, dY and dX are meant to be processed when
// drangefunc would be processed, which is to say the defer order
// should be d4, d3, dY, dX, d2, d1. To make that happen,
// when defer processing reaches a d with rangefunc=true,
// it calls deferconvert to atomically take the extras
// away from d.head and then adds them to the main list.
//
// That is, deferconvert changes this list:
//
// g._defer => drangefunc -> d2 -> d1 -> nil
// | .head
// |
// +--> dY -> dX -> nil
//
// into this list:
//
// g._defer => dY -> dX -> d2 -> d1 -> nil
//
// It also poisons *drangefunc.head so that any future
// deferprocat using that head will throw.
// (The atomic head is ordinary garbage collected memory so that
// it's not a problem if user code holds onto it beyond
// the lifetime of drangefunc.)
//
// TODO: We could arrange for the compiler to call into the
// runtime after the loop finishes normally, to do an eager
// deferconvert, which would catch calling the loop body
// and having it defer after the loop is done. If we have a
// more general catch of loop body misuse, though, this
// might not be worth worrying about in addition.
//
// See also ../cmd/compile/internal/rangefunc/rewrite.go.
func deferrangefunc() any {
gp := getg()
if gp.m.curg != gp {
// go code on the system stack can't defer
throw("defer on system stack")
}
d := newdefer()
d.link = gp._defer
gp._defer = d
d.pc = sys.GetCallerPC()
// We must not be preempted between calling GetCallerSP and
// storing it to d.sp because GetCallerSP's result is a
// uintptr stack pointer.
d.sp = sys.GetCallerSP()
d.rangefunc = true
d.head = new(atomic.Pointer[_defer])
return d.head
}
// badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head.
func badDefer() *_defer {
return (*_defer)(unsafe.Pointer(uintptr(1)))
}
// deferprocat is like deferproc but adds to the atomic list represented by frame.
// See the doc comment for deferrangefunc for details.
func deferprocat(fn func(), frame any) {
head := frame.(*atomic.Pointer[_defer])
if raceenabled {
racewritepc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferprocat))
}
d1 := newdefer()
d1.fn = fn
for {
d1.link = head.Load()
if d1.link == badDefer() {
throw("defer after range func returned")
}
if head.CompareAndSwap(d1.link, d1) {
break
}
}
}
// deferconvert converts the rangefunc defer list of d0 into an ordinary list
// following d0.
// See the doc comment for deferrangefunc for details.
func deferconvert(d0 *_defer) {
head := d0.head
if raceenabled {
racereadpc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferconvert))
}
tail := d0.link
d0.rangefunc = false
var d *_defer
for {
d = head.Load()
if head.CompareAndSwap(d, badDefer()) {
break
}
}
if d == nil {
return
}
for d1 := d; ; d1 = d1.link {
d1.sp = d0.sp
d1.pc = d0.pc
if d1.link == nil {
d1.link = tail
break
}
}
d0.link = d
return
}
// deferprocStack queues a new deferred function with a defer record on the stack.
// The defer record must have its fn field initialized.
// All other fields can contain junk.
// Nosplit because of the uninitialized pointer fields on the stack.
//
//go:nosplit
func deferprocStack(d *_defer) {
gp := getg()
if gp.m.curg != gp {
// go code on the system stack can't defer
throw("defer on system stack")
}
// fn is already set.
// The other fields are junk on entry to deferprocStack and
// are initialized here.
d.heap = false
d.rangefunc = false
d.sp = sys.GetCallerSP()
d.pc = sys.GetCallerPC()
// The lines below implement:
// d.link = gp._defer
// d.head = nil
// gp._defer = d
// But without write barriers. The first two are writes to
// the stack so they don't need a write barrier, and furthermore
// are to uninitialized memory, so they must not use a write barrier.
// The third write does not require a write barrier because we
// explicitly mark all the defer structures, so we don't need to
// keep track of pointers to them with a write barrier.
*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
*(*uintptr)(unsafe.Pointer(&d.head)) = 0
*(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
}
// Each P holds a pool for defers.
// Allocate a Defer, usually using per-P pool.
// Each defer must be released with freedefer. The defer is not
// added to any defer chain yet.
func newdefer() *_defer {
var d *_defer
mp := acquirem()
pp := mp.p.ptr()
if len(pp.deferpool) == 0 && sched.deferpool != nil {
lock(&sched.deferlock)
for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
d := sched.deferpool
sched.deferpool = d.link
d.link = nil
pp.deferpool = append(pp.deferpool, d)
}
unlock(&sched.deferlock)
}
if n := len(pp.deferpool); n > 0 {
d = pp.deferpool[n-1]
pp.deferpool[n-1] = nil
pp.deferpool = pp.deferpool[:n-1]
}
releasem(mp)
mp, pp = nil, nil
if d == nil {
// Allocate new defer.
d = new(_defer)
}
d.heap = true
return d
}
// popDefer pops the head of gp's defer list and frees it.
func popDefer(gp *g) {
d := gp._defer
d.fn = nil // Can in theory point to the stack
// We must not copy the stack between the updating gp._defer and setting
// d.link to nil. Between these two steps, d is not on any defer list, so
// stack copying won't adjust stack pointers in it (namely, d.link). Hence,
// if we were to copy the stack, d could then contain a stale pointer.
gp._defer = d.link
d.link = nil
// After this point we can copy the stack.
if !d.heap {
return
}
mp := acquirem()
pp := mp.p.ptr()
if len(pp.deferpool) == cap(pp.deferpool) {
// Transfer half of local cache to the central cache.
var first, last *_defer
for len(pp.deferpool) > cap(pp.deferpool)/2 {
n := len(pp.deferpool)
d := pp.deferpool[n-1]
pp.deferpool[n-1] = nil
pp.deferpool = pp.deferpool[:n-1]
if first == nil {
first = d
} else {
last.link = d
}
last = d
}
lock(&sched.deferlock)
last.link = sched.deferpool
sched.deferpool = first
unlock(&sched.deferlock)
}
*d = _defer{}
pp.deferpool = append(pp.deferpool, d)
releasem(mp)
mp, pp = nil, nil
}
// deferreturn runs deferred functions for the caller's frame.
// The compiler inserts a call to this at the end of any
// function which calls defer.
func deferreturn() {
var p _panic
p.deferreturn = true
p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP()))
for {
fn, ok := p.nextDefer()
if !ok {
break
}
fn()
}
}
// Goexit terminates the goroutine that calls it. No other goroutine is affected.
// Goexit runs all deferred calls before terminating the goroutine. Because Goexit
// is not a panic, any recover calls in those deferred functions will return nil.
//
// Calling Goexit from the main goroutine terminates that goroutine
// without func main returning. Since func main has not returned,
// the program continues execution of other goroutines.
// If all other goroutines exit, the program crashes.
//
// It crashes if called from a thread not created by the Go runtime.
func Goexit() {
// Create a panic object for Goexit, so we can recognize when it might be
// bypassed by a recover().
var p _panic
p.goexit = true
p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP()))
for {
fn, ok := p.nextDefer()
if !ok {
break
}
fn()
}
goexit1()
}
// Call all Error and String methods before freezing the world.
// Used when crashing with panicking.
func preprintpanics(p *_panic) {
defer func() {
text := "panic while printing panic value"
switch r := recover().(type) {
case nil:
// nothing to do
case string:
throw(text + ": " + r)
default:
throw(text + ": type " + toRType(efaceOf(&r)._type).string())
}
}()
for p != nil {
if p.link != nil && *efaceOf(&p.link.arg) == *efaceOf(&p.arg) {
// This panic contains the same value as the next one in the chain.
// Mark it as repanicked. We will skip printing it twice in a row.
p.link.repanicked = true
p = p.link
continue
}
switch v := p.arg.(type) {
case error:
p.arg = v.Error()
case stringer:
p.arg = v.String()
}
p = p.link
}
}
// Print all currently active panics. Used when crashing.
// Should only be called after preprintpanics.
func printpanics(p *_panic) {
if p.link != nil {
printpanics(p.link)
if p.link.repanicked {
return
}
if !p.link.goexit {
print("\t")
}
}
if p.goexit {
return
}
print("panic: ")
printpanicval(p.arg)
if p.recovered && p.repanicked {
print(" [recovered, repanicked]")
} else if p.recovered {
print(" [recovered]")
}
print("\n")
}
// readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the
// uint32 and a pointer to the byte following the varint.
//
// The implementation is the same with runtime.readvarint, except that this function
// uses unsafe.Pointer for speed.
func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) {
var r uint32
var shift int
for {
b := *(*uint8)(fd)
fd = add(fd, unsafe.Sizeof(b))
if b < 128 {
return r + uint32(b)<<shift, fd
}
r += uint32(b&0x7F) << (shift & 31)
shift += 7
if shift > 28 {
panic("Bad varint")
}
}
}
// A PanicNilError happens when code calls panic(nil).
//
// Before Go 1.21, programs that called panic(nil) observed recover returning nil.
// Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError.
// Programs can change back to the old behavior by setting GODEBUG=panicnil=1.
type PanicNilError struct {
// This field makes PanicNilError structurally different from
// any other struct in this package, and the _ makes it different
// from any struct in other packages too.
// This avoids any accidental conversions being possible
// between this struct and some other struct sharing the same fields,
// like happened in go.dev/issue/56603.
_ [0]*PanicNilError
}
func (*PanicNilError) Error() string { return "panic called with nil argument" }
func (*PanicNilError) RuntimeError() {}
var panicnil = &godebugInc{name: "panicnil"}
// The implementation of the predeclared function panic.
// The compiler emits calls to this function.
//
// gopanic should be an internal detail,
// but historically, widely used packages access it using linkname.
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname gopanic
func gopanic(e any) {
if e == nil {
if debug.panicnil.Load() != 1 {
e = new(PanicNilError)
} else {
panicnil.IncNonDefault()
}
}
gp := getg()
if gp.m.curg != gp {
print("panic: ")
printpanicval(e)
print("\n")
throw("panic on system stack")
}
if gp.m.mallocing != 0 {
print("panic: ")
printpanicval(e)
print("\n")
throw("panic during malloc")
}
if gp.m.preemptoff != "" {
print("panic: ")
printpanicval(e)
print("\n")
print("preempt off reason: ")
print(gp.m.preemptoff)
print("\n")
throw("panic during preemptoff")
}
if gp.m.locks != 0 {
print("panic: ")
printpanicval(e)
print("\n")
throw("panic holding locks")
}
var p _panic
p.arg = e
p.gopanicFP = unsafe.Pointer(sys.GetCallerSP())
runningPanicDefers.Add(1)
p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP()))
for {
fn, ok := p.nextDefer()
if !ok {
break
}
fn()
}
// If we're tracing, flush the current generation to make the trace more
// readable.
//
// TODO(aktau): Handle a panic from within traceAdvance more gracefully.
// Currently it would hang. Not handled now because it is very unlikely, and
// already unrecoverable.
if traceEnabled() {
traceAdvance(false)
}
// ran out of deferred calls - old-school panic now
// Because it is unsafe to call arbitrary user code after freezing
// the world, we call preprintpanics to invoke all necessary Error
// and String methods to prepare the panic strings before startpanic.
preprintpanics(&p)
fatalpanic(&p) // should not return
*(*int)(nil) = 0 // not reached
}
// start initializes a panic to start unwinding the stack.
//
// If p.goexit is true, then start may return multiple times.
func (p *_panic) start(pc uintptr, sp unsafe.Pointer) {
gp := getg()
// Record the caller's PC and SP, so recovery can identify panics
// that have been recovered. Also, so that if p is from Goexit, we
// can restart its defer processing loop if a recovered panic tries
// to jump past it.
p.startPC = sys.GetCallerPC()
p.startSP = unsafe.Pointer(sys.GetCallerSP())
if p.deferreturn {
p.sp = sp
if s := (*savedOpenDeferState)(gp.param); s != nil {
// recovery saved some state for us, so that we can resume
// calling open-coded defers without unwinding the stack.
gp.param = nil
p.retpc = s.retpc
p.deferBitsPtr = (*byte)(add(sp, s.deferBitsOffset))
p.slotsPtr = add(sp, s.slotsOffset)
}
return
}
p.link = gp._panic
gp._panic = (*_panic)(noescape(unsafe.Pointer(p)))
// Initialize state machine, and find the first frame with a defer.
//
// Note: We could use startPC and startSP here, but callers will
// never have defer statements themselves. By starting at their
// caller instead, we avoid needing to unwind through an extra
// frame. It also somewhat simplifies the terminating condition for
// deferreturn.
p.pc, p.sp = pc, sp
p.nextFrame()
}
// nextDefer returns the next deferred function to invoke, if any.
//
// Note: The "ok bool" result is necessary to correctly handle when
// the deferred function itself was nil (e.g., "defer (func())(nil)").
func (p *_panic) nextDefer() (func(), bool) {
gp := getg()
if !p.deferreturn {
if gp._panic != p {
throw("bad panic stack")
}
if p.recovered {
mcall(recovery) // does not return
throw("recovery failed")
}
}
for {
for p.deferBitsPtr != nil {
bits := *p.deferBitsPtr
// Check whether any open-coded defers are still pending.
//
// Note: We need to check this upfront (rather than after
// clearing the top bit) because it's possible that Goexit
// invokes a deferred call, and there were still more pending
// open-coded defers in the frame; but then the deferred call
// panic and invoked the remaining defers in the frame, before
// recovering and restarting the Goexit loop.
if bits == 0 {
p.deferBitsPtr = nil
break
}
// Find index of top bit set.
i := 7 - uintptr(sys.LeadingZeros8(bits))
// Clear bit and store it back.
bits &^= 1 << i
*p.deferBitsPtr = bits
return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true
}
Recheck:
if d := gp._defer; d != nil && d.sp == uintptr(p.sp) {
if d.rangefunc {
deferconvert(d)
popDefer(gp)
goto Recheck
}
fn := d.fn
p.retpc = d.pc
// Unlink and free.
popDefer(gp)
return fn, true
}
if !p.nextFrame() {
return nil, false
}
}
}
// nextFrame finds the next frame that contains deferred calls, if any.
func (p *_panic) nextFrame() (ok bool) {
if p.pc == 0 {
return false
}
gp := getg()
systemstack(func() {
var limit uintptr
if d := gp._defer; d != nil {
limit = d.sp
}
var u unwinder
u.initAt(p.pc, uintptr(p.sp), 0, gp, 0)
for {
if !u.valid() {
p.pc = 0
return // ok == false
}
// TODO(mdempsky): If we populate u.frame.fn.deferreturn for
// every frame containing a defer (not just open-coded defers),
// then we can simply loop until we find the next frame where
// it's non-zero.
if u.frame.sp == limit {
break // found a frame with linked defers
}
if p.initOpenCodedDefers(u.frame.fn, unsafe.Pointer(u.frame.varp)) {
break // found a frame with open-coded defers
}
if p.link != nil && uintptr(u.frame.sp) == uintptr(p.link.startSP) && uintptr(p.link.sp) > u.frame.sp {
// Skip ahead to where the next panic up the stack was last looking
// for defers. See issue 77062.
//
// The startSP condition is to check when we have walked up the stack
// to where the next panic up the stack started. If so, the processing
// of that panic has run all the defers up to its current scanning
// position.
//
// The final condition is just to make sure that the line below
// is actually helpful.
u.initAt(p.link.pc, uintptr(p.link.sp), 0, gp, 0)
continue
}
u.next()
}
p.pc = u.frame.pc
p.sp = unsafe.Pointer(u.frame.sp)
p.fp = unsafe.Pointer(u.frame.fp)
ok = true
})
return
}
func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool {
fd := funcdata(fn, abi.FUNCDATA_OpenCodedDeferInfo)
if fd == nil {
return false
}
if fn.deferreturn == 0 {
throw("missing deferreturn")
}
deferBitsOffset, fd := readvarintUnsafe(fd)
deferBitsPtr := (*uint8)(add(varp, -uintptr(deferBitsOffset)))
if *deferBitsPtr == 0 {
return false // has open-coded defers, but none pending
}
slotsOffset, fd := readvarintUnsafe(fd)
p.retpc = fn.entry() + uintptr(fn.deferreturn)
p.deferBitsPtr = deferBitsPtr
p.slotsPtr = add(varp, -uintptr(slotsOffset))
return true
}
// The implementation of the predeclared function recover.
func gorecover() any {
gp := getg()
p := gp._panic
if p == nil || p.goexit || p.recovered {
return nil
}
// Check to see if the function that called recover() was
// deferred directly from the panicking function.
// For code like:
// func foo() {
// defer bar()
// panic("panic")
// }
// func bar() {
// recover()
// }
// Normally the stack would look like this:
// foo
// runtime.gopanic
// bar
// runtime.gorecover
//
// However, if the function we deferred requires a wrapper
// of some sort, we need to ignore the wrapper. In that case,
// the stack looks like:
// foo
// runtime.gopanic
// wrapper
// bar
// runtime.gorecover
// And we should also successfully recover.
//
// Finally, in the weird case "defer recover()", the stack looks like:
// foo
// runtime.gopanic
// wrapper
// runtime.gorecover
// And we should not recover in that case.
//
// So our criteria is, there must be exactly one non-wrapper
// frame between gopanic and gorecover.
//
// We don't recover this:
// defer func() { func() { recover() }() }()
// because there are 2 non-wrapper frames.
//
// We don't recover this:
// defer recover()
// because there are 0 non-wrapper frames.
canRecover := false
systemstack(func() {
var u unwinder
u.init(gp, 0)
u.next() // skip systemstack_switch
u.next() // skip gorecover
nonWrapperFrames := 0
loop:
for ; u.valid(); u.next() {
for iu, f := newInlineUnwinder(u.frame.fn, u.symPC()); f.valid(); f = iu.next(f) {
sf := iu.srcFunc(f)
switch sf.funcID {
case abi.FuncIDWrapper:
continue
case abi.FuncID_gopanic:
if u.frame.fp == uintptr(p.gopanicFP) && nonWrapperFrames > 0 {
canRecover = true
}
break loop
default:
nonWrapperFrames++
if nonWrapperFrames > 1 {
break loop
}
}
}
}
})
if !canRecover {
return nil
}
p.recovered = true
return p.arg
}
//go:linkname sync_throw sync.throw
func sync_throw(s string) {
throw(s)
}
//go:linkname sync_fatal sync.fatal
func sync_fatal(s string) {
fatal(s)
}
//go:linkname rand_fatal crypto/rand.fatal
func rand_fatal(s string) {
fatal(s)
}
//go:linkname sysrand_fatal crypto/internal/sysrand.fatal
func sysrand_fatal(s string) {
fatal(s)
}
//go:linkname fips_fatal crypto/internal/fips140.fatal
func fips_fatal(s string) {
fatal(s)
}
//go:linkname maps_fatal internal/runtime/maps.fatal
func maps_fatal(s string) {
fatal(s)
}
//go:linkname internal_sync_throw internal/sync.throw
func internal_sync_throw(s string) {
throw(s)
}
//go:linkname internal_sync_fatal internal/sync.fatal
func internal_sync_fatal(s string) {
fatal(s)
}
//go:linkname cgroup_throw internal/runtime/cgroup.throw
func cgroup_throw(s string) {
throw(s)
}
// throw triggers a fatal error that dumps a stack trace and exits.
//
// throw should be used for runtime-internal fatal errors where Go itself,
// rather than user code, may be at fault for the failure.
//
// throw should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
// - github.com/cockroachdb/pebble
// - github.com/dgraph-io/ristretto
// - github.com/outcaste-io/ristretto
// - github.com/pingcap/br
// - gvisor.dev/gvisor
// - github.com/sagernet/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname throw
//go:nosplit
func throw(s string) {
// Everything throw does should be recursively nosplit so it
// can be called even when it's unsafe to grow the stack.
systemstack(func() {
print("fatal error: ")
printindented(s) // logically printpanicval(s), but avoids convTstring write barrier
print("\n")
})
fatalthrow(throwTypeRuntime)
}
// fatal triggers a fatal error that dumps a stack trace and exits.
//
// fatal is equivalent to throw, but is used when user code is expected to be
// at fault for the failure, such as racing map writes.
//
// fatal does not include runtime frames, system goroutines, or frame metadata
// (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher.
//
//go:nosplit
func fatal(s string) {
p := getg()._panic
// Everything fatal does should be recursively nosplit so it
// can be called even when it's unsafe to grow the stack.
printlock() // Prevent multiple interleaved fatal reports. See issue 69447.
systemstack(func() {
printPreFatalDeferPanic(p)
print("fatal error: ")
printindented(s) // logically printpanicval(s), but avoids convTstring write barrier
print("\n")
})
fatalthrow(throwTypeUser)
printunlock()
}
// printPreFatalDeferPanic prints the panic
// when fatal occurs in panics while running defer.
func printPreFatalDeferPanic(p *_panic) {
// Don`t call preprintpanics, because
// don't want to call String/Error on the panicked values.
// When we fatal we really want to just print and exit,
// no more executing user Go code.
for x := p; x != nil; x = x.link {
if x.link != nil && *efaceOf(&x.link.arg) == *efaceOf(&x.arg) {
// This panic contains the same value as the next one in the chain.
// Mark it as repanicked. We will skip printing it twice in a row.
x.link.repanicked = true
}
}
if p != nil {
printpanics(p)
// make fatal have the same indentation as non-first panics.
print("\t")
}
}
// runningPanicDefers is non-zero while running deferred functions for panic.
// This is used to try hard to get a panic stack trace out when exiting.
var runningPanicDefers atomic.Uint32
// panicking is non-zero when crashing the program for an unrecovered panic.
var panicking atomic.Uint32
// paniclk is held while printing the panic information and stack trace,
// so that two concurrent panics don't overlap their output.
var paniclk mutex
// Unwind the stack after a deferred function calls recover
// after a panic. Then arrange to continue running as though
// the caller of the deferred function returned normally.
//
// However, if unwinding the stack would skip over a Goexit call, we
// return into the Goexit loop instead, so it can continue processing
// defers instead.
func recovery(gp *g) {
p := gp._panic
pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp)
p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0
// The linker records the f-relative address of a call to deferreturn in f's funcInfo.
// Assuming a "normal" call to recover() inside one of f's deferred functions
// invoked for a panic, that is the desired PC for exiting f.
f := findfunc(pc)
if f.deferreturn == 0 {
throw("no deferreturn")
}
gotoPc := f.entry() + uintptr(f.deferreturn)
// Unwind the panic stack.
for ; p != nil && uintptr(p.startSP) < sp; p = p.link {
// Don't allow jumping past a pending Goexit.
// Instead, have its _panic.start() call return again.
//
// TODO(mdempsky): In this case, Goexit will resume walking the
// stack where it left off, which means it will need to rewalk
// frames that we've already processed.
//
// There's a similar issue with nested panics, when the inner
// panic supersedes the outer panic. Again, we end up needing to
// walk the same stack frames.
//
// These are probably pretty rare occurrences in practice, and
// they don't seem any worse than the existing logic. But if we
// move the unwinding state into _panic, we could detect when we
// run into where the last panic started, and then just pick up
// where it left off instead.
//
// With how subtle defer handling is, this might not actually be
// worthwhile though.
if p.goexit {
gotoPc, sp = p.startPC, uintptr(p.startSP)
saveOpenDeferState = false // goexit is unwinding the stack anyway
break
}
runningPanicDefers.Add(-1)
}
gp._panic = p
if p == nil { // must be done with signal
gp.sig = 0
}
if gp.param != nil {
throw("unexpected gp.param")
}
if saveOpenDeferState {
// If we're returning to deferreturn and there are more open-coded
// defers for it to call, save enough state for it to be able to
// pick up where p0 left off.
gp.param = unsafe.Pointer(&savedOpenDeferState{
retpc: p0.retpc,
// We need to save deferBitsPtr and slotsPtr too, but those are
// stack pointers. To avoid issues around heap objects pointing
// to the stack, save them as offsets from SP.
deferBitsOffset: uintptr(unsafe.Pointer(p0.deferBitsPtr)) - uintptr(p0.sp),
slotsOffset: uintptr(p0.slotsPtr) - uintptr(p0.sp),
})
}
// TODO(mdempsky): Currently, we rely on frames containing "defer"
// to end with "CALL deferreturn; RET". This allows deferreturn to
// finish running any pending defers in the frame.
//
// But we should be able to tell whether there are still pending
// defers here. If there aren't, we can just jump directly to the
// "RET" instruction. And if there are, we don't need an actual
// "CALL deferreturn" instruction; we can simulate it with something
// like:
//
// if usesLR {
// lr = pc
// } else {
// sp -= sizeof(pc)
// *(*uintptr)(sp) = pc
// }
// pc = funcPC(deferreturn)
//
// So that we effectively tail call into deferreturn, such that it
// then returns to the simple "RET" epilogue. That would save the
// overhead of the "deferreturn" call when there aren't actually any
// pending defers left, and shrink the TEXT size of compiled
// binaries. (Admittedly, both of these are modest savings.)
// Ensure we're recovering within the appropriate stack.
if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
throw("bad recovery")
}
// branch directly to the deferreturn
gp.sched.sp = sp
gp.sched.pc = gotoPc
gp.sched.lr = 0
// Restore the bp on platforms that support frame pointers.
// N.B. It's fine to not set anything for platforms that don't
// support frame pointers, since nothing consumes them.
switch {
case goarch.IsAmd64 != 0:
// on x86, fp actually points one word higher than the top of
// the frame since the return address is saved on the stack by
// the caller
gp.sched.bp = fp - 2*goarch.PtrSize
case goarch.IsArm64 != 0:
// on arm64, the architectural bp points one word higher
// than the sp. fp is totally useless to us here, because it
// only gets us to the caller's fp.
gp.sched.bp = sp - goarch.PtrSize
}
gogo(&gp.sched)
}
// fatalthrow implements an unrecoverable runtime throw. It freezes the
// system, prints stack traces starting from its caller, and terminates the
// process.
//
//go:nosplit
func fatalthrow(t throwType) {
pc := sys.GetCallerPC()
sp := sys.GetCallerSP()
gp := getg()
if gp.m.throwing == throwTypeNone {
gp.m.throwing = t
}
// Switch to the system stack to avoid any stack growth, which may make
// things worse if the runtime is in a bad state.
systemstack(func() {
if isSecureMode() {
exit(2)
}
startpanic_m()
if dopanic_m(gp, pc, sp, nil) {
// crash uses a decent amount of nosplit stack and we're already
// low on stack in throw, so crash on the system stack (unlike
// fatalpanic).
crash()
}
exit(2)
})
*(*int)(nil) = 0 // not reached
}
// fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
// that if msgs != nil, fatalpanic also prints panic messages and decrements
// runningPanicDefers once main is blocked from exiting.
//
//go:nosplit
func fatalpanic(msgs *_panic) {
pc := sys.GetCallerPC()
sp := sys.GetCallerSP()
gp := getg()
var docrash bool
// Switch to the system stack to avoid any stack growth, which
// may make things worse if the runtime is in a bad state.
systemstack(func() {
if startpanic_m() && msgs != nil {
// There were panic messages and startpanic_m
// says it's okay to try to print them.
// startpanic_m set panicking, which will
// block main from exiting, so now OK to
// decrement runningPanicDefers.
runningPanicDefers.Add(-1)
printpanics(msgs)
}
// If this panic is the result of a synctest bubble deadlock,
// print stacks for the goroutines in the bubble.
var bubble *synctestBubble
if de, ok := msgs.arg.(synctestDeadlockError); ok {
bubble = de.bubble
}
docrash = dopanic_m(gp, pc, sp, bubble)
})
if docrash {
// By crashing outside the above systemstack call, debuggers
// will not be confused when generating a backtrace.
// Function crash is marked nosplit to avoid stack growth.
crash()
}
systemstack(func() {
exit(2)
})
*(*int)(nil) = 0 // not reached
}
// startpanic_m prepares for an unrecoverable panic.
//
// It returns true if panic messages should be printed, or false if
// the runtime is in bad shape and should just print stacks.
//
// It must not have write barriers even though the write barrier
// explicitly ignores writes once dying > 0. Write barriers still
// assume that g.m.p != nil, and this function may not have P
// in some contexts (e.g. a panic in a signal handler for a signal
// sent to an M with no P).
//
//go:nowritebarrierrec
func startpanic_m() bool {
gp := getg()
if mheap_.cachealloc.size == 0 { // very early
print("runtime: panic before malloc heap initialized\n")
}
// Disallow malloc during an unrecoverable panic. A panic
// could happen in a signal handler, or in a throw, or inside
// malloc itself. We want to catch if an allocation ever does
// happen (even if we're not in one of these situations).
gp.m.mallocing++
// If we're dying because of a bad lock count, set it to a
// good lock count so we don't recursively panic below.
if gp.m.locks < 0 {
gp.m.locks = 1
}
switch gp.m.dying {
case 0:
// Setting dying >0 has the side-effect of disabling this G's writebuf.
gp.m.dying = 1
panicking.Add(1)
lock(&paniclk)
if debug.schedtrace > 0 || debug.scheddetail > 0 {
schedtrace(true)
}
freezetheworld()
return true
case 1:
// Something failed while panicking.
// Just print a stack trace and exit.
gp.m.dying = 2
print("panic during panic\n")
return false
case 2:
// This is a genuine bug in the runtime, we couldn't even
// print the stack trace successfully.
gp.m.dying = 3
print("stack trace unavailable\n")
exit(4)
fallthrough
default:
// Can't even print! Just exit.
exit(5)
return false // Need to return something.
}
}
var didothers bool
var deadlock mutex
// gp is the crashing g running on this M, but may be a user G, while getg() is
// always g0.
// If bubble is non-nil, print the stacks for goroutines in this group as well.
func dopanic_m(gp *g, pc, sp uintptr, bubble *synctestBubble) bool {
if gp.sig != 0 {
signame := signame(gp.sig)
if signame != "" {
print("[signal ", signame)
} else {
print("[signal ", hex(gp.sig))
}
print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
}
level, all, docrash := gotraceback()
if level > 0 {
if gp != gp.m.curg {
all = true
}
if gp != gp.m.g0 {
print("\n")
goroutineheader(gp)
traceback(pc, sp, 0, gp)
} else if level >= 2 || gp.m.throwing >= throwTypeRuntime {
print("\nruntime stack:\n")
traceback(pc, sp, 0, gp)
}
if !didothers {
if all {
didothers = true
tracebackothers(gp)
} else if bubble != nil {
// This panic is caused by a synctest bubble deadlock.
// Print stacks for goroutines in the deadlocked bubble.
tracebacksomeothers(gp, func(other *g) bool {
return bubble == other.bubble
})
}
}
}
unlock(&paniclk)
if panicking.Add(-1) != 0 {
// Some other m is panicking too.
// Let it print what it needs to print.
// Wait forever without chewing up cpu.
// It will exit when it's done.
lock(&deadlock)
lock(&deadlock)
}
printDebugLog()
return docrash
}
// canpanic returns false if a signal should throw instead of
// panicking.
//
//go:nosplit
func canpanic() bool {
gp := getg()
mp := acquirem()
// Is it okay for gp to panic instead of crashing the program?
// Yes, as long as it is running Go code, not runtime code,
// and not stuck in a system call.
if gp != mp.curg {
releasem(mp)
return false
}
// N.B. mp.locks != 1 instead of 0 to account for acquirem.
if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 {
releasem(mp)
return false
}
status := readgstatus(gp)
if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
releasem(mp)
return false
}
if GOOS == "windows" && mp.libcallsp != 0 {
releasem(mp)
return false
}
releasem(mp)
return true
}
// shouldPushSigpanic reports whether pc should be used as sigpanic's
// return PC (pushing a frame for the call). Otherwise, it should be
// left alone so that LR is used as sigpanic's return PC, effectively
// replacing the top-most frame with sigpanic. This is used by
// preparePanic.
func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
if pc == 0 {
// Probably a call to a nil func. The old LR is more
// useful in the stack trace. Not pushing the frame
// will make the trace look like a call to sigpanic
// instead. (Otherwise the trace will end at sigpanic
// and we won't get to see who faulted.)
return false
}
// If we don't recognize the PC as code, but we do recognize
// the link register as code, then this assumes the panic was
// caused by a call to non-code. In this case, we want to
// ignore this call to make unwinding show the context.
//
// If we running C code, we're not going to recognize pc as a
// Go function, so just assume it's good. Otherwise, traceback
// may try to read a stale LR that looks like a Go code
// pointer and wander into the woods.
if gp.m.incgo || findfunc(pc).valid() {
// This wasn't a bad call, so use PC as sigpanic's
// return PC.
return true
}
if findfunc(lr).valid() {
// This was a bad call, but the LR is good, so use the
// LR as sigpanic's return PC.
return false
}
// Neither the PC or LR is good. Hopefully pushing a frame
// will work.
return true
}
// isAbortPC reports whether pc is the program counter at which
// runtime.abort raises a signal.
//
// It is nosplit because it's part of the isgoexception
// implementation.
//
//go:nosplit
func isAbortPC(pc uintptr) bool {
f := findfunc(pc)
if !f.valid() {
return false
}
return f.funcID == abi.FuncID_abort
}
// For debugging only.
//go:noinline
//go:nosplit
func dumpPanicDeferState(where string, gp *g) {
systemstack(func() {
println("DUMPPANICDEFERSTATE", where)
p := gp._panic
d := gp._defer
var u unwinder
for u.init(gp, 0); u.valid(); u.next() {
// Print frame.
println(" frame sp=", hex(u.frame.sp), "fp=", hex(u.frame.fp), "pc=", pcName(u.frame.pc), "+", pcOff(u.frame.pc))
// Print panic.
for p != nil && uintptr(p.sp) == u.frame.sp {
println(" panic", p, "sp=", p.sp, "fp=", p.fp, "arg=", p.arg, "recovered=", p.recovered, "pc=", pcName(p.pc), "+", pcOff(p.pc), "retpc=", pcName(p.retpc), "+", pcOff(p.retpc), "startsp=", p.startSP, "gopanicfp=", p.gopanicFP, "startPC=", hex(p.startPC), pcName(p.startPC), "+", pcOff(p.startPC))
p = p.link
}
// Print linked defers.
for d != nil && d.sp == u.frame.sp {
println(" defer(link)", "heap=", d.heap, "rangefunc=", d.rangefunc, fnName(d.fn))
d = d.link
}
// Print open-coded defers.
// (A function is all linked or all open-coded, so we don't
// need to interleave this loop with the one above.)
fd := funcdata(u.frame.fn, abi.FUNCDATA_OpenCodedDeferInfo)
if fd != nil {
deferBitsOffset, fd := readvarintUnsafe(fd)
m := *(*uint8)(unsafe.Pointer(u.frame.varp - uintptr(deferBitsOffset)))
slotsOffset, fd := readvarintUnsafe(fd)
slots := u.frame.varp - uintptr(slotsOffset)
for i := 7; i >= 0; i-- {
if m>>i&1 == 0 {
continue
}
fn := *(*func())(unsafe.Pointer(slots + uintptr(i)*goarch.PtrSize))
println(" defer(open)", fnName(fn))
}
}
}
if p != nil {
println(" REMAINING PANICS!", p)
}
if d != nil {
println(" REMAINING DEFERS!")
}
})
}
func pcName(pc uintptr) string {
fn := findfunc(pc)
if !fn.valid() {
return "<unk>"
}
return funcname(fn)
}
func pcOff(pc uintptr) hex {
fn := findfunc(pc)
if !fn.valid() {
return 0
}
return hex(pc - fn.entry())
}
func fnName(fn func()) string {
return pcName(**(**uintptr)(unsafe.Pointer(&fn)))
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/runtime/atomic"
"unsafe"
)
// A Pinner is a set of Go objects each pinned to a fixed location in memory. The
// [Pinner.Pin] method pins one object, while [Pinner.Unpin] unpins all pinned
// objects.
//
// The purpose of a Pinner is two-fold.
// First, it allows C code to safely use Go pointers that have not been passed
// explicitly to the C code via a cgo call.
// For example, for safely interacting with a pointer stored inside of a struct
// whose pointer is passed to a C function.
// Second, it allows C memory to safely retain that Go pointer even after the
// cgo call returns, provided the object remains pinned.
//
// A Pinner arranges for its objects to be automatically unpinned some time after
// it becomes unreachable, so its referents will not leak. However, this means the
// Pinner itself must be kept alive across a cgo call, or as long as C retains a
// reference to the pinned Go pointers.
//
// Reusing a Pinner is safe, and in fact encouraged, to avoid the cost of
// initializing new Pinners on first use.
//
// The zero value of Pinner is ready to use.
type Pinner struct {
*pinner
}
// Pin pins a Go object, preventing it from being moved or freed by the garbage
// collector until the [Pinner.Unpin] method has been called.
//
// A pointer to a pinned object can be directly stored in C memory or can be
// contained in Go memory passed to C functions. If the pinned object itself
// contains pointers to Go objects, these objects must be pinned separately if they
// are going to be accessed from C code.
//
// The argument must be a pointer of any type or an [unsafe.Pointer].
//
// It's safe to call Pin on non-Go pointers, in which case Pin will do nothing.
func (p *Pinner) Pin(pointer any) {
if p.pinner == nil {
// Check the pinner cache first.
mp := acquirem()
if pp := mp.p.ptr(); pp != nil {
p.pinner = pp.pinnerCache
pp.pinnerCache = nil
}
releasem(mp)
if p.pinner == nil {
// Didn't get anything from the pinner cache.
p.pinner = new(pinner)
p.refs = p.refStore[:0]
// We set this finalizer once and never clear it. Thus, if the
// pinner gets cached, we'll reuse it, along with its finalizer.
// This lets us avoid the relatively expensive SetFinalizer call
// when reusing from the cache. The finalizer however has to be
// resilient to an empty pinner being finalized, which is done
// by checking p.refs' length.
SetFinalizer(p.pinner, func(i *pinner) {
if len(i.refs) != 0 {
i.unpin() // only required to make the test idempotent
pinnerLeakPanic()
}
})
}
}
ptr := pinnerGetPtr(&pointer)
if setPinned(ptr, true) {
p.refs = append(p.refs, ptr)
}
}
// Unpin unpins all pinned objects of the [Pinner].
// It's safe and encouraged to reuse a Pinner after calling Unpin.
func (p *Pinner) Unpin() {
p.pinner.unpin()
mp := acquirem()
if pp := mp.p.ptr(); pp != nil && pp.pinnerCache == nil {
// Put the pinner back in the cache, but only if the
// cache is empty. If application code is reusing Pinners
// on its own, we want to leave the backing store in place
// so reuse is more efficient.
pp.pinnerCache = p.pinner
p.pinner = nil
}
releasem(mp)
}
const (
pinnerSize = 64
pinnerRefStoreSize = (pinnerSize - unsafe.Sizeof([]unsafe.Pointer{})) / unsafe.Sizeof(unsafe.Pointer(nil))
)
type pinner struct {
refs []unsafe.Pointer
refStore [pinnerRefStoreSize]unsafe.Pointer
}
func (p *pinner) unpin() {
if p == nil || p.refs == nil {
return
}
for i := range p.refs {
setPinned(p.refs[i], false)
}
// The following two lines make all pointers to references
// in p.refs unreachable, either by deleting them or dropping
// p.refs' backing store (if it was not backed by refStore).
p.refStore = [pinnerRefStoreSize]unsafe.Pointer{}
p.refs = p.refStore[:0]
}
func pinnerGetPtr(i *any) unsafe.Pointer {
e := efaceOf(i)
etyp := e._type
if etyp == nil {
panic(errorString("runtime.Pinner: argument is nil"))
}
if kind := etyp.Kind(); kind != abi.Pointer && kind != abi.UnsafePointer {
panic(errorString("runtime.Pinner: argument is not a pointer: " + toRType(etyp).string()))
}
if inUserArenaChunk(uintptr(e.data)) {
// Arena-allocated objects are not eligible for pinning.
panic(errorString("runtime.Pinner: object was allocated into an arena"))
}
return e.data
}
// isPinned checks if a Go pointer is pinned.
// nosplit, because it's called from nosplit code in cgocheck.
//
//go:nosplit
func isPinned(ptr unsafe.Pointer) bool {
span := spanOfHeap(uintptr(ptr))
if span == nil {
// this code is only called for Go pointer, so this must be a
// linker-allocated global object.
return true
}
pinnerBits := span.getPinnerBits()
// these pinnerBits might get unlinked by a concurrently running sweep, but
// that's OK because gcBits don't get cleared until the following GC cycle
// (nextMarkBitArenaEpoch)
if pinnerBits == nil {
return false
}
objIndex := span.objIndex(uintptr(ptr))
pinState := pinnerBits.ofObject(objIndex)
KeepAlive(ptr) // make sure ptr is alive until we are done so the span can't be freed
return pinState.isPinned()
}
// setPinned marks or unmarks a Go pointer as pinned, when the ptr is a Go pointer.
// It will be ignored while trying to pin a non-Go pointer.
// It will panic while trying to unpin a non-Go pointer,
// which should not happen in normal usage.
func setPinned(ptr unsafe.Pointer, pin bool) bool {
span := spanOfHeap(uintptr(ptr))
if span == nil {
if !pin {
panic(errorString("tried to unpin non-Go pointer"))
}
// This is a linker-allocated, zero size object or other object,
// nothing to do, silently ignore it.
return false
}
// ensure that the span is swept, b/c sweeping accesses the specials list
// w/o locks.
mp := acquirem()
span.ensureSwept()
KeepAlive(ptr) // make sure ptr is still alive after span is swept
objIndex := span.objIndex(uintptr(ptr))
lock(&span.speciallock) // guard against concurrent calls of setPinned on same span
pinnerBits := span.getPinnerBits()
if pinnerBits == nil {
pinnerBits = span.newPinnerBits()
span.setPinnerBits(pinnerBits)
}
pinState := pinnerBits.ofObject(objIndex)
if pin {
if pinState.isPinned() {
// multiple pins on same object, set multipin bit
pinState.setMultiPinned(true)
// and increase the pin counter
// TODO(mknyszek): investigate if systemstack is necessary here
systemstack(func() {
offset := objIndex * span.elemsize
span.incPinCounter(offset)
})
} else {
// set pin bit
pinState.setPinned(true)
}
} else {
// unpin
if pinState.isPinned() {
if pinState.isMultiPinned() {
var exists bool
// TODO(mknyszek): investigate if systemstack is necessary here
systemstack(func() {
offset := objIndex * span.elemsize
exists = span.decPinCounter(offset)
})
if !exists {
// counter is 0, clear multipin bit
pinState.setMultiPinned(false)
}
} else {
// no multipins recorded. unpin object.
pinState.setPinned(false)
}
} else {
// unpinning unpinned object, bail out
throw("runtime.Pinner: object already unpinned")
}
}
unlock(&span.speciallock)
releasem(mp)
return true
}
type pinState struct {
bytep *uint8
byteVal uint8
mask uint8
}
// nosplit, because it's called by isPinned, which is nosplit
//
//go:nosplit
func (v *pinState) isPinned() bool {
return (v.byteVal & v.mask) != 0
}
func (v *pinState) isMultiPinned() bool {
return (v.byteVal & (v.mask << 1)) != 0
}
func (v *pinState) setPinned(val bool) {
v.set(val, false)
}
func (v *pinState) setMultiPinned(val bool) {
v.set(val, true)
}
// set sets the pin bit of the pinState to val. If multipin is true, it
// sets/unsets the multipin bit instead.
func (v *pinState) set(val bool, multipin bool) {
mask := v.mask
if multipin {
mask <<= 1
}
if val {
atomic.Or8(v.bytep, mask)
} else {
atomic.And8(v.bytep, ^mask)
}
}
// pinnerBits is the same type as gcBits but has different methods.
type pinnerBits gcBits
// ofObject returns the pinState of the n'th object.
// nosplit, because it's called by isPinned, which is nosplit
//
//go:nosplit
func (p *pinnerBits) ofObject(n uintptr) pinState {
bytep, mask := (*gcBits)(p).bitp(n * 2)
byteVal := atomic.Load8(bytep)
return pinState{bytep, byteVal, mask}
}
func (s *mspan) pinnerBitSize() uintptr {
return divRoundUp(uintptr(s.nelems)*2, 8)
}
// newPinnerBits returns a pointer to 8 byte aligned bytes to be used for this
// span's pinner bits. newPinnerBits is used to mark objects that are pinned.
// They are copied when the span is swept.
func (s *mspan) newPinnerBits() *pinnerBits {
return (*pinnerBits)(newMarkBits(uintptr(s.nelems) * 2))
}
// nosplit, because it's called by isPinned, which is nosplit
//
//go:nosplit
func (s *mspan) getPinnerBits() *pinnerBits {
return (*pinnerBits)(atomic.Loadp(unsafe.Pointer(&s.pinnerBits)))
}
func (s *mspan) setPinnerBits(p *pinnerBits) {
atomicstorep(unsafe.Pointer(&s.pinnerBits), unsafe.Pointer(p))
}
// refreshPinnerBits replaces pinnerBits with a fresh copy in the arenas for the
// next GC cycle. If it does not contain any pinned objects, pinnerBits of the
// span is set to nil.
func (s *mspan) refreshPinnerBits() {
p := s.getPinnerBits()
if p == nil {
return
}
hasPins := false
bytes := alignUp(s.pinnerBitSize(), 8)
// Iterate over each 8-byte chunk and check for pins. Note that
// newPinnerBits guarantees that pinnerBits will be 8-byte aligned, so we
// don't have to worry about edge cases, irrelevant bits will simply be
// zero.
for _, x := range unsafe.Slice((*uint64)(unsafe.Pointer(&p.x)), bytes/8) {
if x != 0 {
hasPins = true
break
}
}
if hasPins {
newPinnerBits := s.newPinnerBits()
memmove(unsafe.Pointer(&newPinnerBits.x), unsafe.Pointer(&p.x), bytes)
s.setPinnerBits(newPinnerBits)
} else {
s.setPinnerBits(nil)
}
}
// incPinCounter is only called for multiple pins of the same object and records
// the _additional_ pins.
func (span *mspan) incPinCounter(offset uintptr) {
var rec *specialPinCounter
ref, exists := span.specialFindSplicePoint(offset, _KindSpecialPinCounter)
if !exists {
lock(&mheap_.speciallock)
rec = (*specialPinCounter)(mheap_.specialPinCounterAlloc.alloc())
unlock(&mheap_.speciallock)
// splice in record, fill in offset.
rec.special.offset = offset
rec.special.kind = _KindSpecialPinCounter
rec.special.next = *ref
*ref = (*special)(unsafe.Pointer(rec))
spanHasSpecials(span)
} else {
rec = (*specialPinCounter)(unsafe.Pointer(*ref))
}
rec.counter++
}
// decPinCounter decreases the counter. If the counter reaches 0, the counter
// special is deleted and false is returned. Otherwise true is returned.
func (span *mspan) decPinCounter(offset uintptr) bool {
ref, exists := span.specialFindSplicePoint(offset, _KindSpecialPinCounter)
if !exists {
throw("runtime.Pinner: decreased non-existing pin counter")
}
counter := (*specialPinCounter)(unsafe.Pointer(*ref))
counter.counter--
if counter.counter == 0 {
*ref = counter.special.next
if span.specials == nil {
spanHasNoSpecials(span)
}
lock(&mheap_.speciallock)
mheap_.specialPinCounterAlloc.free(unsafe.Pointer(counter))
unlock(&mheap_.speciallock)
return false
}
return true
}
// only for tests
func pinnerGetPinCounter(addr unsafe.Pointer) *uintptr {
_, span, objIndex := findObject(uintptr(addr), 0, 0)
offset := objIndex * span.elemsize
t, exists := span.specialFindSplicePoint(offset, _KindSpecialPinCounter)
if !exists {
return nil
}
counter := (*specialPinCounter)(unsafe.Pointer(*t))
return &counter.counter
}
// to be able to test that the GC panics when a pinned pointer is leaking, this
// panic function is a variable, that can be overwritten by a test.
var pinnerLeakPanic = func() {
panic(errorString("runtime.Pinner: found leaking pinned pointer; forgot to call Unpin()?"))
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"unsafe"
)
//go:linkname plugin_lastmoduleinit plugin.lastmoduleinit
func plugin_lastmoduleinit() (path string, syms map[string]any, initTasks []*initTask, errstr string) {
var md *moduledata
for pmd := firstmoduledata.next; pmd != nil; pmd = pmd.next {
if pmd.bad {
md = nil // we only want the last module
continue
}
md = pmd
}
if md == nil {
throw("runtime: no plugin module data")
}
if md.pluginpath == "" {
throw("runtime: plugin has empty pluginpath")
}
if md.typemap != nil {
return "", nil, nil, "plugin already loaded"
}
for _, pmd := range activeModules() {
if pmd.pluginpath == md.pluginpath {
md.bad = true
return "", nil, nil, "plugin already loaded"
}
if inRange(pmd.text, pmd.etext, md.text, md.etext) ||
inRange(pmd.bss, pmd.ebss, md.bss, md.ebss) ||
inRange(pmd.data, pmd.edata, md.data, md.edata) ||
inRange(pmd.types, pmd.etypes, md.types, md.etypes) {
println("plugin: new module data overlaps with previous moduledata")
println("\tpmd.text-etext=", hex(pmd.text), "-", hex(pmd.etext))
println("\tpmd.bss-ebss=", hex(pmd.bss), "-", hex(pmd.ebss))
println("\tpmd.data-edata=", hex(pmd.data), "-", hex(pmd.edata))
println("\tpmd.types-etypes=", hex(pmd.types), "-", hex(pmd.etypes))
println("\tmd.text-etext=", hex(md.text), "-", hex(md.etext))
println("\tmd.bss-ebss=", hex(md.bss), "-", hex(md.ebss))
println("\tmd.data-edata=", hex(md.data), "-", hex(md.edata))
println("\tmd.types-etypes=", hex(md.types), "-", hex(md.etypes))
throw("plugin: new module data overlaps with previous moduledata")
}
}
for _, pkghash := range md.pkghashes {
if pkghash.linktimehash != *pkghash.runtimehash {
md.bad = true
return "", nil, nil, "plugin was built with a different version of package " + pkghash.modulename
}
}
// Initialize the freshly loaded module.
modulesinit()
typelinksinit()
pluginftabverify(md)
moduledataverify1(md)
lock(&itabLock)
addModuleItabs(md)
unlock(&itabLock)
// Build a map of symbol names to symbols. Here in the runtime
// we fill out the first word of the interface, the type. We
// pass these zero value interfaces to the plugin package,
// where the symbol value is filled in (usually via cgo).
//
// Because functions are handled specially in the plugin package,
// function symbol names are prefixed here with '.' to avoid
// a dependency on the reflect package.
syms = make(map[string]any, len(md.ptab))
for _, ptab := range md.ptab {
symName := resolveNameOff(unsafe.Pointer(md.types), ptab.name)
t := toRType((*_type)(unsafe.Pointer(md.types))).typeOff(ptab.typ) // TODO can this stack of conversions be simpler?
var val any
valp := (*[2]unsafe.Pointer)(unsafe.Pointer(&val))
(*valp)[0] = unsafe.Pointer(t)
name := symName.Name()
if t.Kind() == abi.Func {
name = "." + name
}
syms[name] = val
}
return md.pluginpath, syms, md.inittasks, ""
}
func pluginftabverify(md *moduledata) {
badtable := false
for i := 0; i < len(md.ftab); i++ {
entry := md.textAddr(md.ftab[i].entryoff)
if md.minpc <= entry && entry <= md.maxpc {
continue
}
f := funcInfo{(*_func)(unsafe.Pointer(&md.pclntable[md.ftab[i].funcoff])), md}
name := funcname(f)
// A common bug is f.entry has a relocation to a duplicate
// function symbol, meaning if we search for its PC we get
// a valid entry with a name that is useful for debugging.
name2 := "none"
entry2 := uintptr(0)
f2 := findfunc(entry)
if f2.valid() {
name2 = funcname(f2)
entry2 = f2.entry()
}
badtable = true
println("ftab entry", hex(entry), "/", hex(entry2), ": ",
name, "/", name2, "outside pc range:[", hex(md.minpc), ",", hex(md.maxpc), "], modulename=", md.modulename, ", pluginpath=", md.pluginpath)
}
if badtable {
throw("runtime: plugin has bad symbol table")
}
}
// inRange reports whether v0 or v1 are in the range [r0, r1].
func inRange(r0, r1, v0, v1 uintptr) bool {
return (v0 >= r0 && v0 <= r1) || (v1 >= r0 && v1 <= r1)
}
// A ptabEntry is generated by the compiler for each exported function
// and global variable in the main package of a plugin. It is used to
// initialize the plugin module's symbol map.
type ptabEntry struct {
name nameOff
typ typeOff
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Goroutine preemption
//
// A goroutine can be preempted at any safe-point. Currently, there
// are a few categories of safe-points:
//
// 1. A blocked safe-point occurs for the duration that a goroutine is
// descheduled, blocked on synchronization, or in a system call.
//
// 2. Synchronous safe-points occur when a running goroutine checks
// for a preemption request.
//
// 3. Asynchronous safe-points occur at any instruction in user code
// where the goroutine can be safely paused and a conservative
// stack and register scan can find stack roots. The runtime can
// stop a goroutine at an async safe-point using a signal.
//
// At both blocked and synchronous safe-points, a goroutine's CPU
// state is minimal and the garbage collector has complete information
// about its entire stack. This makes it possible to deschedule a
// goroutine with minimal space, and to precisely scan a goroutine's
// stack.
//
// Synchronous safe-points are implemented by overloading the stack
// bound check in function prologues. To preempt a goroutine at the
// next synchronous safe-point, the runtime poisons the goroutine's
// stack bound to a value that will cause the next stack bound check
// to fail and enter the stack growth implementation, which will
// detect that it was actually a preemption and redirect to preemption
// handling.
//
// Preemption at asynchronous safe-points is implemented by suspending
// the thread using an OS mechanism (e.g., signals) and inspecting its
// state to determine if the goroutine was at an asynchronous
// safe-point. Since the thread suspension itself is generally
// asynchronous, it also checks if the running goroutine wants to be
// preempted, since this could have changed. If all conditions are
// satisfied, it adjusts the signal context to make it look like the
// signaled thread just called asyncPreempt and resumes the thread.
// asyncPreempt spills all registers and enters the scheduler.
//
// (An alternative would be to preempt in the signal handler itself.
// This would let the OS save and restore the register state and the
// runtime would only need to know how to extract potentially
// pointer-containing registers from the signal context. However, this
// would consume an M for every preempted G, and the scheduler itself
// is not designed to run from a signal handler, as it tends to
// allocate memory and start threads in the preemption path.)
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"internal/stringslite"
)
type suspendGState struct {
g *g
// dead indicates the goroutine was not suspended because it
// is dead. This goroutine could be reused after the dead
// state was observed, so the caller must not assume that it
// remains dead.
dead bool
// stopped indicates that this suspendG transitioned the G to
// _Gwaiting via g.preemptStop and thus is responsible for
// readying it when done.
stopped bool
}
// suspendG suspends goroutine gp at a safe-point and returns the
// state of the suspended goroutine. The caller gets read access to
// the goroutine until it calls resumeG.
//
// It is safe for multiple callers to attempt to suspend the same
// goroutine at the same time. The goroutine may execute between
// subsequent successful suspend operations. The current
// implementation grants exclusive access to the goroutine, and hence
// multiple callers will serialize. However, the intent is to grant
// shared read access, so please don't depend on exclusive access.
//
// This must be called from the system stack and the user goroutine on
// the current M (if any) must be in a preemptible state. This
// prevents deadlocks where two goroutines attempt to suspend each
// other and both are in non-preemptible states. There are other ways
// to resolve this deadlock, but this seems simplest.
//
// TODO(austin): What if we instead required this to be called from a
// user goroutine? Then we could deschedule the goroutine while
// waiting instead of blocking the thread. If two goroutines tried to
// suspend each other, one of them would win and the other wouldn't
// complete the suspend until it was resumed. We would have to be
// careful that they couldn't actually queue up suspend for each other
// and then both be suspended. This would also avoid the need for a
// kernel context switch in the synchronous case because we could just
// directly schedule the waiter. The context switch is unavoidable in
// the signal case.
//
//go:systemstack
func suspendG(gp *g) suspendGState {
if mp := getg().m; mp.curg != nil && readgstatus(mp.curg) == _Grunning {
// Since we're on the system stack of this M, the user
// G is stuck at an unsafe point. If another goroutine
// were to try to preempt m.curg, it could deadlock.
throw("suspendG from non-preemptible goroutine")
}
// See https://golang.org/cl/21503 for justification of the yield delay.
const yieldDelay = 10 * 1000
var nextYield int64
// Drive the goroutine to a preemption point.
stopped := false
var asyncM *m
var asyncGen uint32
var nextPreemptM int64
for i := 0; ; i++ {
switch s := readgstatus(gp); s {
default:
if s&_Gscan != 0 {
// Someone else is suspending it. Wait
// for them to finish.
//
// TODO: It would be nicer if we could
// coalesce suspends.
break
}
dumpgstatus(gp)
throw("invalid g status")
case _Gdead, _Gdeadextra:
// Nothing to suspend.
//
// preemptStop may need to be cleared, but
// doing that here could race with goroutine
// reuse. Instead, goexit0 clears it.
return suspendGState{dead: true}
case _Gcopystack:
// The stack is being copied. We need to wait
// until this is done.
case _Gpreempted:
// We (or someone else) suspended the G. Claim
// ownership of it by transitioning it to
// _Gwaiting.
if !casGFromPreempted(gp, _Gpreempted, _Gwaiting) {
break
}
// We stopped the G, so we have to ready it later.
stopped = true
s = _Gwaiting
fallthrough
case _Grunnable, _Gsyscall, _Gwaiting, _Gleaked:
// Claim goroutine by setting scan bit.
// This may race with execution or readying of gp.
// The scan bit keeps it from transition state.
if !castogscanstatus(gp, s, s|_Gscan) {
break
}
// Clear the preemption request. It's safe to
// reset the stack guard because we hold the
// _Gscan bit and thus own the stack.
gp.preemptStop = false
gp.preempt = false
gp.stackguard0 = gp.stack.lo + stackGuard
// The goroutine was already at a safe-point
// and we've now locked that in.
//
// TODO: It would be much better if we didn't
// leave it in _Gscan, but instead gently
// prevented its scheduling until resumption.
// Maybe we only use this to bump a suspended
// count and the scheduler skips suspended
// goroutines? That wouldn't be enough for
// {_Gsyscall,_Gwaiting} -> _Grunning. Maybe
// for all those transitions we need to check
// suspended and deschedule?
return suspendGState{g: gp, stopped: stopped}
case _Grunning:
// Optimization: if there is already a pending preemption request
// (from the previous loop iteration), don't bother with the atomics.
if gp.preemptStop && gp.preempt && gp.stackguard0 == stackPreempt && asyncM == gp.m && asyncM.preemptGen.Load() == asyncGen {
break
}
// Temporarily block state transitions.
if !castogscanstatus(gp, _Grunning, _Gscanrunning) {
break
}
// Request synchronous preemption.
gp.preemptStop = true
gp.preempt = true
gp.stackguard0 = stackPreempt
// Prepare for asynchronous preemption.
asyncM2 := gp.m
asyncGen2 := asyncM2.preemptGen.Load()
needAsync := asyncM != asyncM2 || asyncGen != asyncGen2
asyncM = asyncM2
asyncGen = asyncGen2
casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
// Send asynchronous preemption. We do this
// after CASing the G back to _Grunning
// because preemptM may be synchronous and we
// don't want to catch the G just spinning on
// its status.
if preemptMSupported && debug.asyncpreemptoff == 0 && needAsync {
// Rate limit preemptM calls. This is
// particularly important on Windows
// where preemptM is actually
// synchronous and the spin loop here
// can lead to live-lock.
now := nanotime()
if now >= nextPreemptM {
nextPreemptM = now + yieldDelay/2
preemptM(asyncM)
}
}
}
// TODO: Don't busy wait. This loop should really only
// be a simple read/decide/CAS loop that only fails if
// there's an active race. Once the CAS succeeds, we
// should queue up the preemption (which will require
// it to be reliable in the _Grunning case, not
// best-effort) and then sleep until we're notified
// that the goroutine is suspended.
if i == 0 {
nextYield = nanotime() + yieldDelay
}
if nanotime() < nextYield {
procyield(10)
} else {
osyield()
nextYield = nanotime() + yieldDelay/2
}
}
}
// resumeG undoes the effects of suspendG, allowing the suspended
// goroutine to continue from its current safe-point.
func resumeG(state suspendGState) {
if state.dead {
// We didn't actually stop anything.
return
}
gp := state.g
switch s := readgstatus(gp); s {
default:
dumpgstatus(gp)
throw("unexpected g status")
case _Grunnable | _Gscan,
_Gwaiting | _Gscan,
_Gleaked | _Gscan,
_Gsyscall | _Gscan:
casfrom_Gscanstatus(gp, s, s&^_Gscan)
}
if state.stopped {
// We stopped it, so we need to re-schedule it.
ready(gp, 0, true)
}
}
// canPreemptM reports whether mp is in a state that is safe to preempt.
//
// It is nosplit because it has nosplit callers.
//
//go:nosplit
func canPreemptM(mp *m) bool {
return mp.locks == 0 && mp.mallocing == 0 && mp.preemptoff == "" && mp.p.ptr().status == _Prunning && mp.curg != nil && readgstatus(mp.curg)&^_Gscan != _Gsyscall
}
//go:generate go run mkpreempt.go
// asyncPreempt saves all user registers and calls asyncPreempt2.
//
// It saves GP registers (anything that might contain a pointer) to the G stack.
// Hence, when stack scanning encounters an asyncPreempt frame, it scans that
// frame and its parent frame conservatively.
//
// On some platforms, it saves large additional scalar-only register state such
// as vector registers to an "extended register state" on the P.
//
// asyncPreempt is implemented in assembly.
func asyncPreempt()
// asyncPreempt2 is the Go continuation of asyncPreempt.
//
// It must be deeply nosplit because there's untyped data on the stack from
// asyncPreempt.
//
// It must not have any write barriers because we need to limit the amount of
// stack it uses.
//
//go:nosplit
//go:nowritebarrierrec
func asyncPreempt2() {
// We can't grow the stack with untyped data from asyncPreempt, so switch to
// the system stack right away.
mcall(func(gp *g) {
gp.asyncSafePoint = true
// Move the extended register state from the P to the G. We do this now that
// we're on the system stack to avoid stack splits.
xRegSave(gp)
if gp.preemptStop {
preemptPark(gp)
} else {
gopreempt_m(gp)
}
// The above functions never return.
})
// Do not grow the stack below here!
gp := getg()
// Put the extended register state back on the M so resumption can find it.
// We can't do this in asyncPreemptM because the park calls never return.
xRegRestore(gp)
gp.asyncSafePoint = false
}
// asyncPreemptStack is the bytes of stack space required to inject an
// asyncPreempt call.
var asyncPreemptStack = ^uintptr(0)
func init() {
f := findfunc(abi.FuncPCABI0(asyncPreempt))
total := funcMaxSPDelta(f)
f = findfunc(abi.FuncPCABIInternal(asyncPreempt2))
total += funcMaxSPDelta(f)
f = findfunc(abi.FuncPCABIInternal(xRegRestore))
total += funcMaxSPDelta(f)
// Add some overhead for return PCs, etc.
asyncPreemptStack = uintptr(total) + 8*goarch.PtrSize
if asyncPreemptStack > stackNosplit {
// We need more than the nosplit limit. This isn't unsafe, but it may
// limit asynchronous preemption. Consider moving state into xRegState.
print("runtime: asyncPreemptStack=", asyncPreemptStack, "\n")
throw("async stack too large")
}
}
// wantAsyncPreempt returns whether an asynchronous preemption is
// queued for gp.
func wantAsyncPreempt(gp *g) bool {
// Check both the G and the P.
return (gp.preempt || gp.m.p != 0 && gp.m.p.ptr().preempt) && readgstatus(gp)&^_Gscan == _Grunning
}
// isAsyncSafePoint reports whether gp at instruction PC is an
// asynchronous safe point. This indicates that:
//
// 1. It's safe to suspend gp and conservatively scan its stack and
// registers. There are no potentially hidden pointer values and it's
// not in the middle of an atomic sequence like a write barrier.
//
// 2. gp has enough stack space to inject the asyncPreempt call.
//
// 3. It's generally safe to interact with the runtime, even if we're
// in a signal handler stopped here. For example, there are no runtime
// locks held, so acquiring a runtime lock won't self-deadlock.
//
// In some cases the PC is safe for asynchronous preemption but it
// also needs to adjust the resumption PC. The new PC is returned in
// the second result.
func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) {
mp := gp.m
// Only user Gs can have safe-points. We check this first
// because it's extremely common that we'll catch mp in the
// scheduler processing this G preemption.
if mp.curg != gp {
return false, 0
}
// Check M state.
if mp.p == 0 || !canPreemptM(mp) {
return false, 0
}
// Check stack space.
if sp < gp.stack.lo || sp-gp.stack.lo < asyncPreemptStack {
return false, 0
}
// If we're in the middle of a secret computation, we can't
// allow any conservative scanning of stacks, as that may lead
// to secrets leaking out from the stack into work buffers.
// Additionally, the preemption code will store the
// machine state (including registers which may contain confidential
// information) into the preemption buffers.
//
// TODO(dmo): there's technically nothing stopping us from doing the
// preemption, granted that don't conservatively scan and we clean up after
// ourselves. This is made slightly harder by the xRegs cached allocations
// that can move between Gs and Ps. In any case, for the intended users (cryptography code)
// they are unlikely get stuck in unterminating loops.
if goexperiment.RuntimeSecret && gp.secret > 0 {
return false, 0
}
// Check if PC is an unsafe-point.
f := findfunc(pc)
if !f.valid() {
// Not Go code.
return false, 0
}
if (GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "mips64" || GOARCH == "mips64le") && lr == pc+8 && funcspdelta(f, pc) == 0 {
// We probably stopped at a half-executed CALL instruction,
// where the LR is updated but the PC has not. If we preempt
// here we'll see a seemingly self-recursive call, which is in
// fact not.
// This is normally ok, as we use the return address saved on
// stack for unwinding, not the LR value. But if this is a
// call to morestack, we haven't created the frame, and we'll
// use the LR for unwinding, which will be bad.
return false, 0
}
up, startpc := pcdatavalue2(f, abi.PCDATA_UnsafePoint, pc)
if up == abi.UnsafePointUnsafe {
// Unsafe-point marked by compiler. This includes
// atomic sequences (e.g., write barrier) and nosplit
// functions (except at calls).
return false, 0
}
if fd := funcdata(f, abi.FUNCDATA_LocalsPointerMaps); fd == nil || f.flag&abi.FuncFlagAsm != 0 {
// This is assembly code. Don't assume it's well-formed.
// TODO: Empirically we still need the fd == nil check. Why?
//
// TODO: Are there cases that are safe but don't have a
// locals pointer map, like empty frame functions?
// It might be possible to preempt any assembly functions
// except the ones that have funcFlag_SPWRITE set in f.flag.
return false, 0
}
// Check the inner-most name
u, uf := newInlineUnwinder(f, pc)
name := u.srcFunc(uf).name()
if stringslite.HasPrefix(name, "runtime.") ||
stringslite.HasPrefix(name, "internal/runtime/") ||
stringslite.HasPrefix(name, "reflect.") {
// For now we never async preempt the runtime or
// anything closely tied to the runtime. Known issues
// include: various points in the scheduler ("don't
// preempt between here and here"), much of the defer
// implementation (untyped info on stack), bulk write
// barriers (write barrier check), atomic functions in
// internal/runtime/atomic, reflect.{makeFuncStub,methodValueCall}.
//
// Note that this is a subset of the runtimePkgs in pkgspecial.go
// and these checks are theoretically redundant because the compiler
// marks "all points" in runtime functions as unsafe for async preemption.
// But for some reason, we can't eliminate these checks until https://go.dev/issue/72031
// is resolved.
//
// TODO(austin): We should improve this, or opt things
// in incrementally.
return false, 0
}
switch up {
case abi.UnsafePointRestart1, abi.UnsafePointRestart2:
// Restartable instruction sequence. Back off PC to
// the start PC.
if startpc == 0 || startpc > pc || pc-startpc > 20 {
throw("bad restart PC")
}
return true, startpc
case abi.UnsafePointRestartAtEntry:
// Restart from the function entry at resumption.
return true, f.entry()
}
return true, pc
}
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows
package runtime
//go:nosplit
func osPreemptExtEnter(mp *m) {}
//go:nosplit
func osPreemptExtExit(mp *m) {}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 || arm64 || loong64
// This provides common support for architectures that use extended register
// state in asynchronous preemption.
//
// While asynchronous preemption stores general-purpose (GP) registers on the
// preempted goroutine's own stack, extended register state can be used to save
// non-GP state off the stack. In particular, this is meant for large vector
// register files. This memory is conservatively scanned to enable using
// non-GP registers for operations that may involve pointers.
//
// For an architecture to support extended register state, it must provide a Go
// definition of an xRegState type for storing the state, and its asyncPreempt
// implementation must write this register state to p.xRegs.scratch.
package runtime
import (
"internal/abi"
"internal/runtime/sys"
"unsafe"
)
// xRegState is long-lived extended register state. It is allocated off-heap and
// manually managed.
type xRegState struct {
_ sys.NotInHeap // Allocated from xRegAlloc
regs xRegs
}
// xRegPerG stores extended register state while a goroutine is asynchronously
// preempted. This is nil otherwise, so we can reuse a (likely small) pool of
// xRegState objects.
type xRegPerG struct {
state *xRegState
}
type xRegPerP struct {
// scratch temporary per-P space where [asyncPreempt] saves the register
// state before entering Go. It's quickly copied to per-G state.
scratch xRegs
// cache is a 1-element allocation cache of extended register state used by
// asynchronous preemption. On entry to preemption, this is used as a simple
// allocation cache. On exit from preemption, the G's xRegState is always
// stored here where it can be restored, and later either freed or reused
// for another preemption. On exit, this serves the dual purpose of
// delay-freeing the allocated xRegState until after we've definitely
// restored it.
cache *xRegState
}
// xRegAlloc allocates xRegState objects.
var xRegAlloc struct {
lock mutex
alloc fixalloc
}
func xRegInitAlloc() {
lockInit(&xRegAlloc.lock, lockRankXRegAlloc)
xRegAlloc.alloc.init(unsafe.Sizeof(xRegState{}), nil, nil, &memstats.other_sys)
}
// xRegSave saves the extended register state on this P to gp.
//
// This must run on the system stack because it assumes the P won't change.
//
//go:systemstack
func xRegSave(gp *g) {
if gp.xRegs.state != nil {
// Double preempt?
throw("gp.xRegState.p != nil on async preempt")
}
// Get the place to save the register state.
var dest *xRegState
pp := gp.m.p.ptr()
if pp.xRegs.cache != nil {
// Use the cached allocation.
dest = pp.xRegs.cache
pp.xRegs.cache = nil
} else {
// Allocate a new save block.
lock(&xRegAlloc.lock)
dest = (*xRegState)(xRegAlloc.alloc.alloc())
unlock(&xRegAlloc.lock)
}
// Copy state saved in the scratchpad to dest.
//
// If we ever need to save less state (e.g., avoid saving vector registers
// that aren't in use), we could have multiple allocation pools for
// different size states and copy only the registers we need.
dest.regs = pp.xRegs.scratch
// Save on the G.
gp.xRegs.state = dest
}
// xRegRestore prepares the extended register state on gp to be restored.
//
// It moves the state to gp.m.p.xRegs.cache where [asyncPreempt] expects to find
// it. This means nothing else may use the cache between this call and the
// return to asyncPreempt. This is not quite symmetric with [xRegSave], which
// uses gp.m.p.xRegs.scratch. By using cache instead, we save a block copy.
//
// This is called with asyncPreempt on the stack and thus must not grow the
// stack.
//
//go:nosplit
func xRegRestore(gp *g) {
if gp.xRegs.state == nil {
throw("gp.xRegState.p == nil on return from async preempt")
}
// If the P has a block cached on it, free that so we can replace it.
pp := gp.m.p.ptr()
if pp.xRegs.cache != nil {
// Don't grow the G stack.
systemstack(func() {
pp.xRegs.free()
})
}
pp.xRegs.cache = gp.xRegs.state
gp.xRegs.state = nil
}
func (xRegs *xRegPerP) free() {
if xRegs.cache != nil {
lock(&xRegAlloc.lock)
xRegAlloc.alloc.free(unsafe.Pointer(xRegs.cache))
xRegs.cache = nil
unlock(&xRegAlloc.lock)
}
}
// xRegScan conservatively scans the extended register state.
//
// This is supposed to be called only by scanstack when it handles async preemption.
func xRegScan(gp *g, gcw *gcWork, state *stackScanState) {
// Regular async preemption always provides the extended register state.
if gp.xRegs.state == nil {
var u unwinder
for u.init(gp, 0); u.valid(); u.next() {
if u.frame.fn.valid() && u.frame.fn.funcID == abi.FuncID_debugCallV2 {
return
}
}
println("runtime: gp=", gp, ", goid=", gp.goid)
throw("gp.xRegs.state == nil on a scanstack attempt during async preemption")
}
b := uintptr(unsafe.Pointer(&gp.xRegs.state.regs))
n := uintptr(unsafe.Sizeof(gp.xRegs.state.regs))
if debugScanConservative {
print("begin scan xRegs of goroutine ", gp.goid, " at [", hex(b), ",", hex(b+n), ")\n")
}
scanConservative(b, n, nil, gcw, state)
if debugScanConservative {
print("end scan xRegs of goroutine ", gp.goid, "\n")
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/strconv"
"unsafe"
)
// The compiler knows that a print of a value of this type
// should use printhex instead of printuint (decimal).
type hex uint64
// The compiler knows that a print of a value of this type should use
// printquoted instead of printstring.
type quoted string
func bytes(s string) (ret []byte) {
rp := (*slice)(unsafe.Pointer(&ret))
sp := stringStructOf(&s)
rp.array = sp.str
rp.len = sp.len
rp.cap = sp.len
return
}
var (
// printBacklog is a circular buffer of messages written with the builtin
// print* functions, for use in postmortem analysis of core dumps.
printBacklog [512]byte
printBacklogIndex int
)
// recordForPanic maintains a circular buffer of messages written by the
// runtime leading up to a process crash, allowing the messages to be
// extracted from a core dump.
//
// The text written during a process crash (following "panic" or "fatal
// error") is not saved, since the goroutine stacks will generally be readable
// from the runtime data structures in the core file.
func recordForPanic(b []byte) {
printlock()
if panicking.Load() == 0 {
// Not actively crashing: maintain circular buffer of print output.
for i := 0; i < len(b); {
n := copy(printBacklog[printBacklogIndex:], b[i:])
i += n
printBacklogIndex += n
printBacklogIndex %= len(printBacklog)
}
}
printunlock()
}
var debuglock mutex
// The compiler emits calls to printlock and printunlock around
// the multiple calls that implement a single Go print or println
// statement. Some of the print helpers (printslice, for example)
// call print recursively. There is also the problem of a crash
// happening during the print routines and needing to acquire
// the print lock to print information about the crash.
// For both these reasons, let a thread acquire the printlock 'recursively'.
func printlock() {
mp := getg().m
mp.locks++ // do not reschedule between printlock++ and lock(&debuglock).
mp.printlock++
if mp.printlock == 1 {
lock(&debuglock)
}
mp.locks-- // now we know debuglock is held and holding up mp.locks for us.
}
func printunlock() {
mp := getg().m
mp.printlock--
if mp.printlock == 0 {
unlock(&debuglock)
}
}
// write to goroutine-local buffer if diverting output,
// or else standard error.
func gwrite(b []byte) {
if len(b) == 0 {
return
}
recordForPanic(b)
gp := getg()
// Don't use the writebuf if gp.m is dying. We want anything
// written through gwrite to appear in the terminal rather
// than be written to in some buffer, if we're in a panicking state.
// Note that we can't just clear writebuf in the gp.m.dying case
// because a panic isn't allowed to have any write barriers.
if gp == nil || gp.writebuf == nil || gp.m.dying > 0 {
writeErr(b)
return
}
n := copy(gp.writebuf[len(gp.writebuf):cap(gp.writebuf)], b)
gp.writebuf = gp.writebuf[:len(gp.writebuf)+n]
}
func printsp() {
printstring(" ")
}
func printnl() {
printstring("\n")
}
func printbool(v bool) {
if v {
printstring("true")
} else {
printstring("false")
}
}
// float64 requires 1+17+1+1+1+3 = 24 bytes max (sign+digits+decimal point+e+sign+exponent digits).
const float64Bytes = 24
func printfloat64(v float64) {
var buf [float64Bytes]byte
gwrite(strconv.AppendFloat(buf[:0], v, 'g', -1, 64))
}
// float32 requires 1+9+1+1+1+2 = 15 bytes max (sign+digits+decimal point+e+sign+exponent digits).
const float32Bytes = 15
func printfloat32(v float32) {
var buf [float32Bytes]byte
gwrite(strconv.AppendFloat(buf[:0], float64(v), 'g', -1, 32))
}
// complex128 requires 24+24+1+1+1 = 51 bytes max (paren+float64+float64+i+paren).
const complex128Bytes = 2*float64Bytes + 3
func printcomplex128(c complex128) {
var buf [complex128Bytes]byte
gwrite(strconv.AppendComplex(buf[:0], c, 'g', -1, 128))
}
// complex64 requires 15+15+1+1+1 = 33 bytes max (paren+float32+float32+i+paren).
const complex64Bytes = 2*float32Bytes + 3
func printcomplex64(c complex64) {
var buf [complex64Bytes]byte
gwrite(strconv.AppendComplex(buf[:0], complex128(c), 'g', -1, 64))
}
func printuint(v uint64) {
// Note: Avoiding strconv.AppendUint so that it's clearer
// that there are no allocations in this routine.
// cmd/link/internal/ld.TestAbstractOriginSanity
// sees the append and doesn't realize it doesn't allocate.
var buf [20]byte
i := strconv.RuntimeFormatBase10(buf[:], v)
gwrite(buf[i:])
}
func printint(v int64) {
// Note: Avoiding strconv.AppendUint so that it's clearer
// that there are no allocations in this routine.
// cmd/link/internal/ld.TestAbstractOriginSanity
// sees the append and doesn't realize it doesn't allocate.
neg := v < 0
u := uint64(v)
if neg {
u = -u
}
var buf [20]byte
i := strconv.RuntimeFormatBase10(buf[:], u)
if neg {
i--
buf[i] = '-'
}
gwrite(buf[i:])
}
var minhexdigits = 0 // protected by printlock
func printhexopts(include0x bool, mindigits int, v uint64) {
const dig = "0123456789abcdef"
var buf [100]byte
i := len(buf)
for i--; i > 0; i-- {
buf[i] = dig[v%16]
if v < 16 && len(buf)-i >= mindigits {
break
}
v /= 16
}
if include0x {
i--
buf[i] = 'x'
i--
buf[i] = '0'
}
gwrite(buf[i:])
}
func printhex(v uint64) {
printhexopts(true, minhexdigits, v)
}
func printquoted(s string) {
printlock()
gwrite([]byte(`"`))
for i, r := range s {
switch r {
case '\n':
gwrite([]byte(`\n`))
continue
case '\r':
gwrite([]byte(`\r`))
continue
case '\t':
gwrite([]byte(`\t`))
print()
continue
case '\\', '"':
gwrite([]byte{byte('\\'), byte(r)})
continue
case runeError:
// Distinguish errors from a valid encoding of U+FFFD.
if _, j := decoderune(s, uint(i)); j == uint(i+1) {
gwrite(bytes(`\x`))
printhexopts(false, 2, uint64(s[i]))
continue
}
// Fall through to quoting.
}
// For now, only allow basic printable ascii through unescaped
if r >= ' ' && r <= '~' {
gwrite([]byte{byte(r)})
} else if r < 127 {
gwrite(bytes(`\x`))
printhexopts(false, 2, uint64(r))
} else if r < 0x1_0000 {
gwrite(bytes(`\u`))
printhexopts(false, 4, uint64(r))
} else {
gwrite(bytes(`\U`))
printhexopts(false, 8, uint64(r))
}
}
gwrite([]byte{byte('"')})
printunlock()
}
func printpointer(p unsafe.Pointer) {
printhex(uint64(uintptr(p)))
}
func printuintptr(p uintptr) {
printhex(uint64(p))
}
func printstring(s string) {
gwrite(bytes(s))
}
func printslice(s []byte) {
sp := (*slice)(unsafe.Pointer(&s))
print("[", len(s), "/", cap(s), "]")
printpointer(sp.array)
}
func printeface(e eface) {
print("(", e._type, ",", e.data, ")")
}
func printiface(i iface) {
print("(", i.tab, ",", i.data, ")")
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/cpu"
"internal/goarch"
"internal/goexperiment"
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/exithook"
"internal/runtime/sys"
"internal/strconv"
"internal/stringslite"
"unsafe"
)
// set using cmd/go/internal/modload.ModInfoProg
var modinfo string
// Goroutine scheduler
// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
//
// The main concepts are:
// G - goroutine.
// M - worker thread, or machine.
// P - processor, a resource that is required to execute Go code.
// M must have an associated P to execute Go code, however it can be
// blocked or in a syscall w/o an associated P.
//
// Design doc at https://golang.org/s/go11sched.
// Worker thread parking/unparking.
// We need to balance between keeping enough running worker threads to utilize
// available hardware parallelism and parking excessive running worker threads
// to conserve CPU resources and power. This is not simple for two reasons:
// (1) scheduler state is intentionally distributed (in particular, per-P work
// queues), so it is not possible to compute global predicates on fast paths;
// (2) for optimal thread management we would need to know the future (don't park
// a worker thread when a new goroutine will be readied in near future).
//
// Three rejected approaches that would work badly:
// 1. Centralize all scheduler state (would inhibit scalability).
// 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
// is a spare P, unpark a thread and handoff it the thread and the goroutine.
// This would lead to thread state thrashing, as the thread that readied the
// goroutine can be out of work the very next moment, we will need to park it.
// Also, it would destroy locality of computation as we want to preserve
// dependent goroutines on the same thread; and introduce additional latency.
// 3. Unpark an additional thread whenever we ready a goroutine and there is an
// idle P, but don't do handoff. This would lead to excessive thread parking/
// unparking as the additional threads will instantly park without discovering
// any work to do.
//
// The current approach:
//
// This approach applies to three primary sources of potential work: readying a
// goroutine, new/modified-earlier timers, and idle-priority GC. See below for
// additional details.
//
// We unpark an additional thread when we submit work if (this is wakep()):
// 1. There is an idle P, and
// 2. There are no "spinning" worker threads.
//
// A worker thread is considered spinning if it is out of local work and did
// not find work in the global run queue or netpoller; the spinning state is
// denoted in m.spinning and in sched.nmspinning. Threads unparked this way are
// also considered spinning; we don't do goroutine handoff so such threads are
// out of work initially. Spinning threads spin on looking for work in per-P
// run queues and timer heaps or from the GC before parking. If a spinning
// thread finds work it takes itself out of the spinning state and proceeds to
// execution. If it does not find work it takes itself out of the spinning
// state and then parks.
//
// If there is at least one spinning thread (sched.nmspinning>1), we don't
// unpark new threads when submitting work. To compensate for that, if the last
// spinning thread finds work and stops spinning, it must unpark a new spinning
// thread. This approach smooths out unjustified spikes of thread unparking,
// but at the same time guarantees eventual maximal CPU parallelism
// utilization.
//
// The main implementation complication is that we need to be very careful
// during spinning->non-spinning thread transition. This transition can race
// with submission of new work, and either one part or another needs to unpark
// another worker thread. If they both fail to do that, we can end up with
// semi-persistent CPU underutilization.
//
// The general pattern for submission is:
// 1. Submit work to the local or global run queue, timer heap, or GC state.
// 2. #StoreLoad-style memory barrier.
// 3. Check sched.nmspinning.
//
// The general pattern for spinning->non-spinning transition is:
// 1. Decrement nmspinning.
// 2. #StoreLoad-style memory barrier.
// 3. Check all per-P work queues and GC for new work.
//
// Note that all this complexity does not apply to global run queue as we are
// not sloppy about thread unparking when submitting to global queue. Also see
// comments for nmspinning manipulation.
//
// How these different sources of work behave varies, though it doesn't affect
// the synchronization approach:
// * Ready goroutine: this is an obvious source of work; the goroutine is
// immediately ready and must run on some thread eventually.
// * New/modified-earlier timer: The current timer implementation (see time.go)
// uses netpoll in a thread with no work available to wait for the soonest
// timer. If there is no thread waiting, we want a new spinning thread to go
// wait.
// * Idle-priority GC: The GC wakes a stopped idle thread to contribute to
// background GC work (note: currently disabled per golang.org/issue/19112).
// Also see golang.org/issue/44313, as this should be extended to all GC
// workers.
var (
m0 m
g0 g
mcache0 *mcache
raceprocctx0 uintptr
raceFiniLock mutex
)
// This slice records the initializing tasks that need to be
// done to start up the runtime. It is built by the linker.
var runtime_inittasks []*initTask
// mainInitDone is a signal used by cgocallbackg that initialization
// has been completed. If this is false, wait on mainInitDoneChan.
var mainInitDone atomic.Bool
// mainInitDoneChan is closed after initialization has been completed.
// It is made before _cgo_notify_runtime_init_done, so all cgo
// calls can rely on it existing.
var mainInitDoneChan chan bool
//go:linkname main_main main.main
func main_main()
// mainStarted indicates that the main M has started.
var mainStarted bool
// runtimeInitTime is the nanotime() at which the runtime started.
var runtimeInitTime int64
// Value to use for signal mask for newly created M's.
var initSigmask sigset
// The main goroutine.
func main() {
mp := getg().m
// Racectx of m0->g0 is used only as the parent of the main goroutine.
// It must not be used for anything else.
mp.g0.racectx = 0
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
// Using decimal instead of binary GB and MB because
// they look nicer in the stack overflow failure message.
if goarch.PtrSize == 8 {
maxstacksize = 1000000000
} else {
maxstacksize = 250000000
}
// An upper limit for max stack size. Used to avoid random crashes
// after calling SetMaxStack and trying to allocate a stack that is too big,
// since stackalloc works with 32-bit sizes.
maxstackceiling = 2 * maxstacksize
// Allow newproc to start new Ms.
mainStarted = true
if haveSysmon {
systemstack(func() {
newm(sysmon, nil, -1)
})
}
// Lock the main goroutine onto this, the main OS thread,
// during initialization. Most programs won't care, but a few
// do require certain calls to be made by the main thread.
// Those can arrange for main.main to run in the main thread
// by calling runtime.LockOSThread during initialization
// to preserve the lock.
lockOSThread()
if mp != &m0 {
throw("runtime.main not on m0")
}
// Record when the world started.
// Must be before doInit for tracing init.
runtimeInitTime = nanotime()
if runtimeInitTime == 0 {
throw("nanotime returning zero")
}
if debug.inittrace != 0 {
inittrace.id = getg().goid
inittrace.active = true
}
doInit(runtime_inittasks) // Must be before defer.
// Defer unlock so that runtime.Goexit during init does the unlock too.
needUnlock := true
defer func() {
if needUnlock {
unlockOSThread()
}
}()
gcenable()
defaultGOMAXPROCSUpdateEnable() // don't STW before runtime initialized.
mainInitDoneChan = make(chan bool)
if iscgo {
if _cgo_pthread_key_created == nil {
throw("_cgo_pthread_key_created missing")
}
if GOOS != "windows" {
if _cgo_thread_start == nil {
throw("_cgo_thread_start missing")
}
if _cgo_setenv == nil {
throw("_cgo_setenv missing")
}
if _cgo_unsetenv == nil {
throw("_cgo_unsetenv missing")
}
}
if _cgo_notify_runtime_init_done == nil {
throw("_cgo_notify_runtime_init_done missing")
}
// Set the x_crosscall2_ptr C function pointer variable point to crosscall2.
if set_crosscall2 == nil {
throw("set_crosscall2 missing")
}
set_crosscall2()
// Start the template thread in case we enter Go from
// a C-created thread and need to create a new thread.
startTemplateThread()
cgocall(_cgo_notify_runtime_init_done, nil)
}
// Run the initializing tasks. Depending on build mode this
// list can arrive a few different ways, but it will always
// contain the init tasks computed by the linker for all the
// packages in the program (excluding those added at runtime
// by package plugin). Run through the modules in dependency
// order (the order they are initialized by the dynamic
// loader, i.e. they are added to the moduledata linked list).
last := lastmoduledatap // grab before loop starts. Any added modules after this point will do their own doInit calls.
for m := &firstmoduledata; true; m = m.next {
doInit(m.inittasks)
if m == last {
break
}
}
// Disable init tracing after main init done to avoid overhead
// of collecting statistics in malloc and newproc
inittrace.active = false
mainInitDone.Store(true)
close(mainInitDoneChan)
needUnlock = false
unlockOSThread()
if isarchive || islibrary {
// A program compiled with -buildmode=c-archive or c-shared
// has a main, but it is not executed.
if GOARCH == "wasm" {
// On Wasm, pause makes it return to the host.
// Unlike cgo callbacks where Ms are created on demand,
// on Wasm we have only one M. So we keep this M (and this
// G) for callbacks.
// Using the caller's SP unwinds this frame and backs to
// goexit. The -16 is: 8 for goexit's (fake) return PC,
// and pause's epilogue pops 8.
pause(sys.GetCallerSP() - 16) // should not return
panic("unreachable")
}
return
}
fn := main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
fn()
// Check for C memory leaks if using ASAN and we've made cgo calls,
// or if we are running as a library in a C program.
// We always make one cgo call, above, to notify_runtime_init_done,
// so we ignore that one.
// No point in leak checking if no cgo calls, since leak checking
// just looks for objects allocated using malloc and friends.
// Just checking iscgo doesn't help because asan implies iscgo.
exitHooksRun := false
if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
runExitHooks(0) // lsandoleakcheck may not return
exitHooksRun = true
lsandoleakcheck()
}
// Make racy client program work: if panicking on
// another goroutine at the same time as main returns,
// let the other goroutine finish printing the panic trace.
// Once it does, it will exit. See issues 3934 and 20018.
if runningPanicDefers.Load() != 0 {
// Running deferred functions should not take long.
for c := 0; c < 1000; c++ {
if runningPanicDefers.Load() == 0 {
break
}
Gosched()
}
}
if panicking.Load() != 0 {
gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
}
if !exitHooksRun {
runExitHooks(0)
}
if raceenabled {
racefini() // does not return
}
exit(0)
for {
var x *int32
*x = 0
}
}
// os_beforeExit is called from os.Exit(0).
//
//go:linkname os_beforeExit os.runtime_beforeExit
func os_beforeExit(exitCode int) {
runExitHooks(exitCode)
if exitCode == 0 && raceenabled {
racefini()
}
// See comment in main, above.
if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
lsandoleakcheck()
}
}
func init() {
exithook.Gosched = Gosched
exithook.Goid = func() uint64 { return getg().goid }
exithook.Throw = throw
}
func runExitHooks(code int) {
exithook.Run(code)
}
// start forcegc helper goroutine
func init() {
go forcegchelper()
}
func forcegchelper() {
forcegc.g = getg()
lockInit(&forcegc.lock, lockRankForcegc)
for {
lock(&forcegc.lock)
if forcegc.idle.Load() {
throw("forcegc: phase error")
}
forcegc.idle.Store(true)
goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
// this goroutine is explicitly resumed by sysmon
if debug.gctrace > 0 {
println("GC forced")
}
// Time-triggered, fully concurrent.
gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
}
}
// Gosched yields the processor, allowing other goroutines to run. It does not
// suspend the current goroutine, so execution resumes automatically.
//
//go:nosplit
func Gosched() {
checkTimeouts()
mcall(gosched_m)
}
// goschedguarded yields the processor like gosched, but also checks
// for forbidden states and opts out of the yield in those cases.
//
//go:nosplit
func goschedguarded() {
mcall(goschedguarded_m)
}
// goschedIfBusy yields the processor like gosched, but only does so if
// there are no idle Ps or if we're on the only P and there's nothing in
// the run queue. In both cases, there is freely available idle time.
//
//go:nosplit
func goschedIfBusy() {
gp := getg()
// Call gosched if gp.preempt is set; we may be in a tight loop that
// doesn't otherwise yield.
if !gp.preempt && sched.npidle.Load() > 0 {
return
}
mcall(gosched_m)
}
// Puts the current goroutine into a waiting state and calls unlockf on the
// system stack.
//
// If unlockf returns false, the goroutine is resumed.
//
// unlockf must not access this G's stack, as it may be moved between
// the call to gopark and the call to unlockf.
//
// Note that because unlockf is called after putting the G into a waiting
// state, the G may have already been readied by the time unlockf is called
// unless there is external synchronization preventing the G from being
// readied. If unlockf returns false, it must guarantee that the G cannot be
// externally readied.
//
// Reason explains why the goroutine has been parked. It is displayed in stack
// traces and heap dumps. Reasons should be unique and descriptive. Do not
// re-use reasons, add new ones.
//
// gopark should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gvisor.dev/gvisor
// - github.com/sagernet/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname gopark
func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
if reason != waitReasonSleep {
checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
}
mp := acquirem()
gp := mp.curg
status := readgstatus(gp)
if status != _Grunning && status != _Gscanrunning {
throw("gopark: bad g status")
}
mp.waitlock = lock
mp.waitunlockf = unlockf
gp.waitreason = reason
mp.waitTraceBlockReason = traceReason
mp.waitTraceSkip = traceskip
releasem(mp)
// can't do anything that might move the G between Ms here.
mcall(park_m)
}
// Puts the current goroutine into a waiting state and unlocks the lock.
// The goroutine can be made runnable again by calling goready(gp).
func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
}
// goready should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gvisor.dev/gvisor
// - github.com/sagernet/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname goready
func goready(gp *g, traceskip int) {
systemstack(func() {
ready(gp, traceskip, true)
})
}
//go:nosplit
func acquireSudog() *sudog {
// Delicate dance: the semaphore implementation calls
// acquireSudog, acquireSudog calls new(sudog),
// new calls malloc, malloc can call the garbage collector,
// and the garbage collector calls the semaphore implementation
// in stopTheWorld.
// Break the cycle by doing acquirem/releasem around new(sudog).
// The acquirem/releasem increments m.locks during new(sudog),
// which keeps the garbage collector from being invoked.
mp := acquirem()
pp := mp.p.ptr()
if len(pp.sudogcache) == 0 {
lock(&sched.sudoglock)
// First, try to grab a batch from central cache.
for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
s := sched.sudogcache
sched.sudogcache = s.next
s.next = nil
pp.sudogcache = append(pp.sudogcache, s)
}
unlock(&sched.sudoglock)
// If the central cache is empty, allocate a new one.
if len(pp.sudogcache) == 0 {
pp.sudogcache = append(pp.sudogcache, new(sudog))
}
}
n := len(pp.sudogcache)
s := pp.sudogcache[n-1]
pp.sudogcache[n-1] = nil
pp.sudogcache = pp.sudogcache[:n-1]
if s.elem.get() != nil {
throw("acquireSudog: found s.elem != nil in cache")
}
releasem(mp)
return s
}
//go:nosplit
func releaseSudog(s *sudog) {
if s.elem.get() != nil {
throw("runtime: sudog with non-nil elem")
}
if s.isSelect {
throw("runtime: sudog with non-false isSelect")
}
if s.next != nil {
throw("runtime: sudog with non-nil next")
}
if s.prev != nil {
throw("runtime: sudog with non-nil prev")
}
if s.waitlink != nil {
throw("runtime: sudog with non-nil waitlink")
}
if s.c.get() != nil {
throw("runtime: sudog with non-nil c")
}
gp := getg()
if gp.param != nil {
throw("runtime: releaseSudog with non-nil gp.param")
}
mp := acquirem() // avoid rescheduling to another P
pp := mp.p.ptr()
if len(pp.sudogcache) == cap(pp.sudogcache) {
// Transfer half of local cache to the central cache.
var first, last *sudog
for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
n := len(pp.sudogcache)
p := pp.sudogcache[n-1]
pp.sudogcache[n-1] = nil
pp.sudogcache = pp.sudogcache[:n-1]
if first == nil {
first = p
} else {
last.next = p
}
last = p
}
lock(&sched.sudoglock)
last.next = sched.sudogcache
sched.sudogcache = first
unlock(&sched.sudoglock)
}
pp.sudogcache = append(pp.sudogcache, s)
releasem(mp)
}
// called from assembly.
func badmcall(fn func(*g)) {
throw("runtime: mcall called on m->g0 stack")
}
func badmcall2(fn func(*g)) {
throw("runtime: mcall function returned")
}
func badreflectcall() {
panic(plainError("arg size to reflect.call more than 1GB"))
}
//go:nosplit
//go:nowritebarrierrec
func badmorestackg0() {
if !crashStackImplemented {
writeErrStr("fatal: morestack on g0\n")
return
}
g := getg()
switchToCrashStack(func() {
print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
g.m.traceback = 2 // include pc and sp in stack trace
traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
print("\n")
throw("morestack on g0")
})
}
//go:nosplit
//go:nowritebarrierrec
func badmorestackgsignal() {
writeErrStr("fatal: morestack on gsignal\n")
}
//go:nosplit
func badctxt() {
throw("ctxt != 0")
}
// gcrash is a fake g that can be used when crashing due to bad
// stack conditions.
var gcrash g
var crashingG atomic.Pointer[g]
// Switch to crashstack and call fn, with special handling of
// concurrent and recursive cases.
//
// Nosplit as it is called in a bad stack condition (we know
// morestack would fail).
//
//go:nosplit
//go:nowritebarrierrec
func switchToCrashStack(fn func()) {
me := getg()
if crashingG.CompareAndSwapNoWB(nil, me) {
switchToCrashStack0(fn) // should never return
abort()
}
if crashingG.Load() == me {
// recursive crashing. too bad.
writeErrStr("fatal: recursive switchToCrashStack\n")
abort()
}
// Another g is crashing. Give it some time, hopefully it will finish traceback.
usleep_no_g(100)
writeErrStr("fatal: concurrent switchToCrashStack\n")
abort()
}
// Disable crash stack on Windows for now. Apparently, throwing an exception
// on a non-system-allocated crash stack causes EXCEPTION_STACK_OVERFLOW and
// hangs the process (see issue 63938).
const crashStackImplemented = GOOS != "windows"
//go:noescape
func switchToCrashStack0(fn func()) // in assembly
func lockedOSThread() bool {
gp := getg()
return gp.lockedm != 0 && gp.m.lockedg != 0
}
var (
// allgs contains all Gs ever created (including dead Gs), and thus
// never shrinks.
//
// Access via the slice is protected by allglock or stop-the-world.
// Readers that cannot take the lock may (carefully!) use the atomic
// variables below.
allglock mutex
allgs []*g
// allglen and allgptr are atomic variables that contain len(allgs) and
// &allgs[0] respectively. Proper ordering depends on totally-ordered
// loads and stores. Writes are protected by allglock.
//
// allgptr is updated before allglen. Readers should read allglen
// before allgptr to ensure that allglen is always <= len(allgptr). New
// Gs appended during the race can be missed. For a consistent view of
// all Gs, allglock must be held.
//
// allgptr copies should always be stored as a concrete type or
// unsafe.Pointer, not uintptr, to ensure that GC can still reach it
// even if it points to a stale array.
allglen uintptr
allgptr **g
)
func allgadd(gp *g) {
if readgstatus(gp) == _Gidle {
throw("allgadd: bad status Gidle")
}
lock(&allglock)
allgs = append(allgs, gp)
if &allgs[0] != allgptr {
atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
}
atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
unlock(&allglock)
}
// allGsSnapshot returns a snapshot of the slice of all Gs.
//
// The world must be stopped or allglock must be held.
func allGsSnapshot() []*g {
assertWorldStoppedOrLockHeld(&allglock)
// Because the world is stopped or allglock is held, allgadd
// cannot happen concurrently with this. allgs grows
// monotonically and existing entries never change, so we can
// simply return a copy of the slice header. For added safety,
// we trim everything past len because that can still change.
return allgs[:len(allgs):len(allgs)]
}
// atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex.
func atomicAllG() (**g, uintptr) {
length := atomic.Loaduintptr(&allglen)
ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
return ptr, length
}
// atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.
func atomicAllGIndex(ptr **g, i uintptr) *g {
return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
}
// forEachG calls fn on every G from allgs.
//
// forEachG takes a lock to exclude concurrent addition of new Gs.
func forEachG(fn func(gp *g)) {
lock(&allglock)
for _, gp := range allgs {
fn(gp)
}
unlock(&allglock)
}
// forEachGRace calls fn on every G from allgs.
//
// forEachGRace avoids locking, but does not exclude addition of new Gs during
// execution, which may be missed.
func forEachGRace(fn func(gp *g)) {
ptr, length := atomicAllG()
for i := uintptr(0); i < length; i++ {
gp := atomicAllGIndex(ptr, i)
fn(gp)
}
return
}
const (
// Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
_GoidCacheBatch = 16
)
// cpuinit sets up CPU feature flags and calls internal/cpu.Initialize. env should be the complete
// value of the GODEBUG environment variable.
func cpuinit(env string) {
cpu.Initialize(env)
// Support cpu feature variables are used in code generated by the compiler
// to guard execution of instructions that can not be assumed to be always supported.
switch GOARCH {
case "386", "amd64":
x86HasAVX = cpu.X86.HasAVX
x86HasFMA = cpu.X86.HasFMA
x86HasPOPCNT = cpu.X86.HasPOPCNT
x86HasSSE41 = cpu.X86.HasSSE41
case "arm":
armHasVFPv4 = cpu.ARM.HasVFPv4
case "arm64":
arm64HasATOMICS = cpu.ARM64.HasATOMICS
case "loong64":
loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
loong64HasLSX = cpu.Loong64.HasLSX
case "riscv64":
riscv64HasZbb = cpu.RISCV64.HasZbb
}
}
// getGodebugEarly extracts the environment variable GODEBUG from the environment on
// Unix-like operating systems and returns it. This function exists to extract GODEBUG
// early before much of the runtime is initialized.
//
// Returns nil, false if OS doesn't provide env vars early in the init sequence.
func getGodebugEarly() (string, bool) {
const prefix = "GODEBUG="
var env string
switch GOOS {
case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
// Similar to goenv_unix but extracts the environment value for
// GODEBUG directly.
// TODO(moehrmann): remove when general goenvs() can be called before cpuinit()
n := int32(0)
for argv_index(argv, argc+1+n) != nil {
n++
}
for i := int32(0); i < n; i++ {
p := argv_index(argv, argc+1+i)
s := unsafe.String(p, findnull(p))
if stringslite.HasPrefix(s, prefix) {
env = gostringnocopy(p)[len(prefix):]
break
}
}
break
default:
return "", false
}
return env, true
}
// The bootstrap sequence is:
//
// call osinit
// call schedinit
// make & queue new G
// call runtime·mstart
//
// The new G calls runtime·main.
func schedinit() {
lockInit(&sched.lock, lockRankSched)
lockInit(&sched.sysmonlock, lockRankSysmon)
lockInit(&sched.deferlock, lockRankDefer)
lockInit(&sched.sudoglock, lockRankSudog)
lockInit(&deadlock, lockRankDeadlock)
lockInit(&paniclk, lockRankPanic)
lockInit(&allglock, lockRankAllg)
lockInit(&allpLock, lockRankAllp)
lockInit(&reflectOffs.lock, lockRankReflectOffs)
lockInit(&finlock, lockRankFin)
lockInit(&cpuprof.lock, lockRankCpuprof)
lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
traceLockInit()
// Enforce that this lock is always a leaf lock.
// All of this lock's critical sections should be
// extremely short.
lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
lockVerifyMSize()
sched.midle.init(unsafe.Offsetof(m{}.idleNode))
// raceinit must be the first call to race detector.
// In particular, it must be done before mallocinit below calls racemapshadow.
gp := getg()
if raceenabled {
gp.racectx, raceprocctx0 = raceinit()
}
sched.maxmcount = 10000
crashFD.Store(^uintptr(0))
// The world starts stopped.
worldStopped()
godebug, parsedGodebug := getGodebugEarly()
if parsedGodebug {
parseRuntimeDebugVars(godebug)
}
ticks.init() // run as early as possible
moduledataverify()
stackinit()
randinit() // must run before mallocinit, alginit, mcommoninit
mallocinit()
cpuinit(godebug) // must run before alginit
alginit() // maps, hash, rand must not be used before this call
mcommoninit(gp.m, -1)
modulesinit() // provides activeModules
typelinksinit() // uses maps, activeModules
itabsinit() // uses activeModules
stkobjinit() // must run before GC starts
sigsave(&gp.m.sigmask)
initSigmask = gp.m.sigmask
goargs()
goenvs()
secure()
checkfds()
if !parsedGodebug {
// Some platforms, e.g., Windows, didn't make env vars available "early",
// so try again now.
parseRuntimeDebugVars(gogetenv("GODEBUG"))
}
finishDebugVarsSetup()
gcinit()
// Allocate stack space that can be used when crashing due to bad stack
// conditions, e.g. morestack on g0.
gcrash.stack = stackalloc(16384)
gcrash.stackguard0 = gcrash.stack.lo + 1000
gcrash.stackguard1 = gcrash.stack.lo + 1000
// if disableMemoryProfiling is set, update MemProfileRate to 0 to turn off memprofile.
// Note: parsedebugvars may update MemProfileRate, but when disableMemoryProfiling is
// set to true by the linker, it means that nothing is consuming the profile, it is
// safe to set MemProfileRate to 0.
if disableMemoryProfiling {
MemProfileRate = 0
}
// mcommoninit runs before parsedebugvars, so init profstacks again.
mProfStackInit(gp.m)
defaultGOMAXPROCSInit()
lock(&sched.lock)
sched.lastpoll.Store(nanotime())
var procs int32
if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
procs = int32(n)
sched.customGOMAXPROCS = true
} else {
// Use numCPUStartup for initial GOMAXPROCS for two reasons:
//
// 1. We just computed it in osinit, recomputing is (minorly) wasteful.
//
// 2. More importantly, if debug.containermaxprocs == 0 &&
// debug.updatemaxprocs == 0, we want to guarantee that
// runtime.GOMAXPROCS(0) always equals runtime.NumCPU (which is
// just numCPUStartup).
procs = defaultGOMAXPROCS(numCPUStartup)
}
if procresize(procs) != nil {
throw("unknown runnable goroutine during bootstrap")
}
unlock(&sched.lock)
// World is effectively started now, as P's can run.
worldStarted()
if buildVersion == "" {
// Condition should never trigger. This code just serves
// to ensure runtime·buildVersion is kept in the resulting binary.
buildVersion = "unknown"
}
if len(modinfo) == 1 {
// Condition should never trigger. This code just serves
// to ensure runtime·modinfo is kept in the resulting binary.
modinfo = ""
}
}
func dumpgstatus(gp *g) {
thisg := getg()
print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
}
// sched.lock must be held.
func checkmcount() {
assertLockHeld(&sched.lock)
// Exclude extra M's, which are used for cgocallback from threads
// created in C.
//
// The purpose of the SetMaxThreads limit is to avoid accidental fork
// bomb from something like millions of goroutines blocking on system
// calls, causing the runtime to create millions of threads. By
// definition, this isn't a problem for threads created in C, so we
// exclude them from the limit. See https://go.dev/issue/60004.
count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
if count > sched.maxmcount {
print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
throw("thread exhaustion")
}
}
// mReserveID returns the next ID to use for a new m. This new m is immediately
// considered 'running' by checkdead.
//
// sched.lock must be held.
func mReserveID() int64 {
assertLockHeld(&sched.lock)
if sched.mnext+1 < sched.mnext {
throw("runtime: thread ID overflow")
}
id := sched.mnext
sched.mnext++
checkmcount()
return id
}
// Pre-allocated ID may be passed as 'id', or omitted by passing -1.
func mcommoninit(mp *m, id int64) {
gp := getg()
// g0 stack won't make sense for user (and is not necessary unwindable).
if gp != gp.m.g0 {
callers(1, mp.createstack[:])
}
lock(&sched.lock)
if id >= 0 {
mp.id = id
} else {
mp.id = mReserveID()
}
mp.self = newMWeakPointer(mp)
mrandinit(mp)
mpreinit(mp)
if mp.gsignal != nil {
mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
}
// Add to allm so garbage collector doesn't free g->m
// when it is just in a register or thread-local storage.
mp.alllink = allm
// NumCgoCall and others iterate over allm w/o schedlock,
// so we need to publish it safely.
atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
unlock(&sched.lock)
// Allocate memory to hold a cgo traceback if the cgo call crashes.
if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
mp.cgoCallers = new(cgoCallers)
}
mProfStackInit(mp)
}
// mProfStackInit is used to eagerly initialize stack trace buffers for
// profiling. Lazy allocation would have to deal with reentrancy issues in
// malloc and runtime locks for mLockProfile.
// TODO(mknyszek): Implement lazy allocation if this becomes a problem.
func mProfStackInit(mp *m) {
if debug.profstackdepth == 0 {
// debug.profstack is set to 0 by the user, or we're being called from
// schedinit before parsedebugvars.
return
}
mp.profStack = makeProfStackFP()
mp.mLockProfile.stack = makeProfStackFP()
}
// makeProfStackFP creates a buffer large enough to hold a maximum-sized stack
// trace as well as any additional frames needed for frame pointer unwinding
// with delayed inline expansion.
func makeProfStackFP() []uintptr {
// The "1" term is to account for the first stack entry being
// taken up by a "skip" sentinel value for profilers which
// defer inline frame expansion until the profile is reported.
// The "maxSkip" term is for frame pointer unwinding, where we
// want to end up with debug.profstackdebth frames but will discard
// some "physical" frames to account for skipping.
return make([]uintptr, 1+maxSkip+debug.profstackdepth)
}
// makeProfStack returns a buffer large enough to hold a maximum-sized stack
// trace.
func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
//go:linkname pprof_makeProfStack
func pprof_makeProfStack() []uintptr { return makeProfStack() }
func (mp *m) becomeSpinning() {
mp.spinning = true
sched.nmspinning.Add(1)
sched.needspinning.Store(0)
}
// Take a snapshot of allp, for use after dropping the P.
//
// Must be called with a P, but the returned slice may be used after dropping
// the P. The M holds a reference on the snapshot to keep the backing array
// alive.
//
//go:yeswritebarrierrec
func (mp *m) snapshotAllp() []*p {
mp.allpSnapshot = allp
return mp.allpSnapshot
}
// Clear the saved allp snapshot. Should be called as soon as the snapshot is
// no longer required.
//
// Must be called after reacquiring a P, as it requires a write barrier.
//
//go:yeswritebarrierrec
func (mp *m) clearAllpSnapshot() {
mp.allpSnapshot = nil
}
func (mp *m) hasCgoOnStack() bool {
return mp.ncgo > 0 || mp.isextra
}
const (
// osHasLowResTimer indicates that the platform's internal timer system has a low resolution,
// typically on the order of 1 ms or more.
osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
// osHasLowResClockInt is osHasLowResClock but in integer form, so it can be used to create
// constants conditionally.
osHasLowResClockInt = goos.IsWindows
// osHasLowResClock indicates that timestamps produced by nanotime on the platform have a
// low resolution, typically on the order of 1 ms or more.
osHasLowResClock = osHasLowResClockInt > 0
)
// Mark gp ready to run.
func ready(gp *g, traceskip int, next bool) {
status := readgstatus(gp)
// Mark runnable.
mp := acquirem() // disable preemption because it can be holding p in a local var
if status&^_Gscan != _Gwaiting {
dumpgstatus(gp)
throw("bad g->status in ready")
}
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
trace := traceAcquire()
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.ok() {
trace.GoUnpark(gp, traceskip)
traceRelease(trace)
}
runqput(mp.p.ptr(), gp, next)
wakep()
releasem(mp)
}
// freezeStopWait is a large value that freezetheworld sets
// sched.stopwait to in order to request that all Gs permanently stop.
const freezeStopWait = 0x7fffffff
// freezing is set to non-zero if the runtime is trying to freeze the
// world.
var freezing atomic.Bool
// Similar to stopTheWorld but best-effort and can be called several times.
// There is no reverse operation, used during crashing.
// This function must not lock any mutexes.
func freezetheworld() {
freezing.Store(true)
if debug.dontfreezetheworld > 0 {
// Don't prempt Ps to stop goroutines. That will perturb
// scheduler state, making debugging more difficult. Instead,
// allow goroutines to continue execution.
//
// fatalpanic will tracebackothers to trace all goroutines. It
// is unsafe to trace a running goroutine, so tracebackothers
// will skip running goroutines. That is OK and expected, we
// expect users of dontfreezetheworld to use core files anyway.
//
// However, allowing the scheduler to continue running free
// introduces a race: a goroutine may be stopped when
// tracebackothers checks its status, and then start running
// later when we are in the middle of traceback, potentially
// causing a crash.
//
// To mitigate this, when an M naturally enters the scheduler,
// schedule checks if freezing is set and if so stops
// execution. This guarantees that while Gs can transition from
// running to stopped, they can never transition from stopped
// to running.
//
// The sleep here allows racing Ms that missed freezing and are
// about to run a G to complete the transition to running
// before we start traceback.
usleep(1000)
return
}
// stopwait and preemption requests can be lost
// due to races with concurrently executing threads,
// so try several times
for i := 0; i < 5; i++ {
// this should tell the scheduler to not start any new goroutines
sched.stopwait = freezeStopWait
sched.gcwaiting.Store(true)
// this should stop running goroutines
if !preemptall() {
break // no running goroutines
}
usleep(1000)
}
// to be sure
usleep(1000)
preemptall()
usleep(1000)
}
// All reads and writes of g's status go through readgstatus, casgstatus
// castogscanstatus, casfrom_Gscanstatus.
//
//go:nosplit
func readgstatus(gp *g) uint32 {
return gp.atomicstatus.Load()
}
// The Gscanstatuses are acting like locks and this releases them.
// If it proves to be a performance hit we should be able to make these
// simple atomic stores but for now we are going to throw if
// we see an inconsistent state.
func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
success := false
// Check that transition is valid.
switch oldval {
default:
print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
dumpgstatus(gp)
throw("casfrom_Gscanstatus:top gp->status is not in scan state")
case _Gscanrunnable,
_Gscanwaiting,
_Gscanrunning,
_Gscansyscall,
_Gscanleaked,
_Gscanpreempted,
_Gscandeadextra:
if newval == oldval&^_Gscan {
success = gp.atomicstatus.CompareAndSwap(oldval, newval)
}
}
if !success {
print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
dumpgstatus(gp)
throw("casfrom_Gscanstatus: gp->status is not in scan state")
}
releaseLockRankAndM(lockRankGscan)
}
// This will return false if the gp is not in the expected status and the cas fails.
// This acts like a lock acquire while the casfromgstatus acts like a lock release.
func castogscanstatus(gp *g, oldval, newval uint32) bool {
switch oldval {
case _Grunnable,
_Grunning,
_Gwaiting,
_Gleaked,
_Gsyscall,
_Gdeadextra:
if newval == oldval|_Gscan {
r := gp.atomicstatus.CompareAndSwap(oldval, newval)
if r {
acquireLockRankAndM(lockRankGscan)
}
return r
}
}
print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
throw("bad oldval passed to castogscanstatus")
return false
}
// casgstatusAlwaysTrack is a debug flag that causes casgstatus to always track
// various latencies on every transition instead of sampling them.
var casgstatusAlwaysTrack = false
// If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
// and casfrom_Gscanstatus instead.
// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
// put it in the Gscan state is finished.
//
//go:nosplit
func casgstatus(gp *g, oldval, newval uint32) {
if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
systemstack(func() {
// Call on the systemstack to prevent print and throw from counting
// against the nosplit stack reservation.
print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
throw("casgstatus: bad incoming values")
})
}
lockWithRankMayAcquire(nil, lockRankGscan)
// See https://golang.org/cl/21503 for justification of the yield delay.
const yieldDelay = 5 * 1000
var nextYield int64
// loop if gp->atomicstatus is in a scan state giving
// GC time to finish and change the state to oldval.
for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
systemstack(func() {
// Call on the systemstack to prevent throw from counting
// against the nosplit stack reservation.
throw("casgstatus: waiting for Gwaiting but is Grunnable")
})
}
if i == 0 {
nextYield = nanotime() + yieldDelay
}
if nanotime() < nextYield {
for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
procyield(1)
}
} else {
osyield()
nextYield = nanotime() + yieldDelay/2
}
}
if gp.bubble != nil {
systemstack(func() {
gp.bubble.changegstatus(gp, oldval, newval)
})
}
if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
// Track every gTrackingPeriod time a goroutine transitions out of _Grunning or _Gsyscall.
// Do not track _Grunning <-> _Gsyscall transitions, since they're two very similar states.
if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
gp.tracking = true
}
gp.trackingSeq++
}
if !gp.tracking {
return
}
// Handle various kinds of tracking.
//
// Currently:
// - Time spent in runnable.
// - Time spent blocked on a sync.Mutex or sync.RWMutex.
switch oldval {
case _Grunnable:
// We transitioned out of runnable, so measure how much
// time we spent in this state and add it to
// runnableTime.
now := nanotime()
gp.runnableTime += now - gp.trackingStamp
gp.trackingStamp = 0
case _Gwaiting:
if !gp.waitreason.isMutexWait() {
// Not blocking on a lock.
break
}
// Blocking on a lock, measure it. Note that because we're
// sampling, we have to multiply by our sampling period to get
// a more representative estimate of the absolute value.
// gTrackingPeriod also represents an accurate sampling period
// because we can only enter this state from _Grunning.
now := nanotime()
sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
gp.trackingStamp = 0
}
switch newval {
case _Gwaiting:
if !gp.waitreason.isMutexWait() {
// Not blocking on a lock.
break
}
// Blocking on a lock. Write down the timestamp.
now := nanotime()
gp.trackingStamp = now
case _Grunnable:
// We just transitioned into runnable, so record what
// time that happened.
now := nanotime()
gp.trackingStamp = now
case _Grunning:
// We're transitioning into running, so turn off
// tracking and record how much time we spent in
// runnable.
gp.tracking = false
sched.timeToRun.record(gp.runnableTime)
gp.runnableTime = 0
}
}
// casGToWaiting transitions gp from old to _Gwaiting, and sets the wait reason.
//
// Use this over casgstatus when possible to ensure that a waitreason is set.
func casGToWaiting(gp *g, old uint32, reason waitReason) {
// Set the wait reason before calling casgstatus, because casgstatus will use it.
gp.waitreason = reason
casgstatus(gp, old, _Gwaiting)
}
// casGToWaitingForSuspendG transitions gp from old to _Gwaiting, and sets the wait reason.
// The wait reason must be a valid isWaitingForSuspendG wait reason.
//
// While a goroutine is in this state, it's stack is effectively pinned.
// The garbage collector must not shrink or otherwise mutate the goroutine's stack.
//
// Use this over casgstatus when possible to ensure that a waitreason is set.
func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
if !reason.isWaitingForSuspendG() {
throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
}
casGToWaiting(gp, old, reason)
}
// casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted.
//
// TODO(austin): This is the only status operation that both changes
// the status and locks the _Gscan bit. Rethink this.
func casGToPreemptScan(gp *g, old, new uint32) {
if old != _Grunning || new != _Gscan|_Gpreempted {
throw("bad g transition")
}
acquireLockRankAndM(lockRankGscan)
for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
}
// We never notify gp.bubble that the goroutine state has moved
// from _Grunning to _Gpreempted. We call bubble.changegstatus
// after status changes happen, but doing so here would violate the
// ordering between the gscan and synctest locks. The bubble doesn't
// distinguish between _Grunning and _Gpreempted anyway, so not
// notifying it is fine.
}
// casGFromPreempted attempts to transition gp from _Gpreempted to
// _Gwaiting. If successful, the caller is responsible for
// re-scheduling gp.
func casGFromPreempted(gp *g, old, new uint32) bool {
if old != _Gpreempted || new != _Gwaiting {
throw("bad g transition")
}
gp.waitreason = waitReasonPreempted
if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
return false
}
if bubble := gp.bubble; bubble != nil {
bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
}
return true
}
// stwReason is an enumeration of reasons the world is stopping.
type stwReason uint8
// Reasons to stop-the-world.
//
// Avoid reusing reasons and add new ones instead.
const (
stwUnknown stwReason = iota // "unknown"
stwGCMarkTerm // "GC mark termination"
stwGCSweepTerm // "GC sweep termination"
stwWriteHeapDump // "write heap dump"
stwGoroutineProfile // "goroutine profile"
stwGoroutineProfileCleanup // "goroutine profile cleanup"
stwAllGoroutinesStack // "all goroutines stack trace"
stwReadMemStats // "read mem stats"
stwAllThreadsSyscall // "AllThreadsSyscall"
stwGOMAXPROCS // "GOMAXPROCS"
stwStartTrace // "start trace"
stwStopTrace // "stop trace"
stwForTestCountPagesInUse // "CountPagesInUse (test)"
stwForTestReadMetricsSlow // "ReadMetricsSlow (test)"
stwForTestReadMemStatsSlow // "ReadMemStatsSlow (test)"
stwForTestPageCachePagesLeaked // "PageCachePagesLeaked (test)"
stwForTestResetDebugLog // "ResetDebugLog (test)"
)
func (r stwReason) String() string {
return stwReasonStrings[r]
}
func (r stwReason) isGC() bool {
return r == stwGCMarkTerm || r == stwGCSweepTerm
}
// If you add to this list, also add it to src/internal/trace/parser.go.
// If you change the values of any of the stw* constants, bump the trace
// version number and make a copy of this.
var stwReasonStrings = [...]string{
stwUnknown: "unknown",
stwGCMarkTerm: "GC mark termination",
stwGCSweepTerm: "GC sweep termination",
stwWriteHeapDump: "write heap dump",
stwGoroutineProfile: "goroutine profile",
stwGoroutineProfileCleanup: "goroutine profile cleanup",
stwAllGoroutinesStack: "all goroutines stack trace",
stwReadMemStats: "read mem stats",
stwAllThreadsSyscall: "AllThreadsSyscall",
stwGOMAXPROCS: "GOMAXPROCS",
stwStartTrace: "start trace",
stwStopTrace: "stop trace",
stwForTestCountPagesInUse: "CountPagesInUse (test)",
stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
stwForTestResetDebugLog: "ResetDebugLog (test)",
}
// worldStop provides context from the stop-the-world required by the
// start-the-world.
type worldStop struct {
reason stwReason
startedStopping int64
finishedStopping int64
stoppingCPUTime int64
}
// Temporary variable for stopTheWorld, when it can't write to the stack.
//
// Protected by worldsema.
var stopTheWorldContext worldStop
// stopTheWorld stops all P's from executing goroutines, interrupting
// all goroutines at GC safe points and records reason as the reason
// for the stop. On return, only the current goroutine's P is running.
// stopTheWorld must not be called from a system stack and the caller
// must not hold worldsema. The caller must call startTheWorld when
// other P's should resume execution.
//
// stopTheWorld is safe for multiple goroutines to call at the
// same time. Each will execute its own stop, and the stops will
// be serialized.
//
// This is also used by routines that do stack dumps. If the system is
// in panic or being exited, this may not reliably stop all
// goroutines.
//
// Returns the STW context. When starting the world, this context must be
// passed to startTheWorld.
func stopTheWorld(reason stwReason) worldStop {
semacquire(&worldsema)
gp := getg()
gp.m.preemptoff = reason.String()
systemstack(func() {
stopTheWorldContext = stopTheWorldWithSema(reason) // avoid write to stack
})
return stopTheWorldContext
}
// startTheWorld undoes the effects of stopTheWorld.
//
// w must be the worldStop returned by stopTheWorld.
func startTheWorld(w worldStop) {
systemstack(func() { startTheWorldWithSema(0, w) })
// worldsema must be held over startTheWorldWithSema to ensure
// gomaxprocs cannot change while worldsema is held.
//
// Release worldsema with direct handoff to the next waiter, but
// acquirem so that semrelease1 doesn't try to yield our time.
//
// Otherwise if e.g. ReadMemStats is being called in a loop,
// it might stomp on other attempts to stop the world, such as
// for starting or ending GC. The operation this blocks is
// so heavy-weight that we should just try to be as fair as
// possible here.
//
// We don't want to just allow us to get preempted between now
// and releasing the semaphore because then we keep everyone
// (including, for example, GCs) waiting longer.
mp := acquirem()
mp.preemptoff = ""
semrelease1(&worldsema, true, 0)
releasem(mp)
}
// stopTheWorldGC has the same effect as stopTheWorld, but blocks
// until the GC is not running. It also blocks a GC from starting
// until startTheWorldGC is called.
func stopTheWorldGC(reason stwReason) worldStop {
semacquire(&gcsema)
return stopTheWorld(reason)
}
// startTheWorldGC undoes the effects of stopTheWorldGC.
//
// w must be the worldStop returned by stopTheWorld.
func startTheWorldGC(w worldStop) {
startTheWorld(w)
semrelease(&gcsema)
}
// Holding worldsema grants an M the right to try to stop the world.
var worldsema uint32 = 1
// Holding gcsema grants the M the right to block a GC, and blocks
// until the current GC is done. In particular, it prevents gomaxprocs
// from changing concurrently.
//
// TODO(mknyszek): Once gomaxprocs and the execution tracer can handle
// being changed/enabled during a GC, remove this.
var gcsema uint32 = 1
// stopTheWorldWithSema is the core implementation of stopTheWorld.
// The caller is responsible for acquiring worldsema and disabling
// preemption first and then should stopTheWorldWithSema on the system
// stack:
//
// semacquire(&worldsema, 0)
// m.preemptoff = "reason"
// var stw worldStop
// systemstack(func() {
// stw = stopTheWorldWithSema(reason)
// })
//
// When finished, the caller must either call startTheWorld or undo
// these three operations separately:
//
// m.preemptoff = ""
// systemstack(func() {
// now = startTheWorldWithSema(stw)
// })
// semrelease(&worldsema)
//
// It is allowed to acquire worldsema once and then execute multiple
// startTheWorldWithSema/stopTheWorldWithSema pairs.
// Other P's are able to execute between successive calls to
// startTheWorldWithSema and stopTheWorldWithSema.
// Holding worldsema causes any other goroutines invoking
// stopTheWorld to block.
//
// Returns the STW context. When starting the world, this context must be
// passed to startTheWorldWithSema.
//
//go:systemstack
func stopTheWorldWithSema(reason stwReason) worldStop {
// Mark the goroutine which called stopTheWorld preemptible so its
// stack may be scanned by the GC or observed by the execution tracer.
//
// This lets a mark worker scan us or the execution tracer take our
// stack while we try to stop the world since otherwise we could get
// in a mutual preemption deadlock.
//
// casGToWaitingForSuspendG marks the goroutine as ineligible for a
// stack shrink, effectively pinning the stack in memory for the duration.
//
// N.B. The execution tracer is not aware of this status transition and
// handles it specially based on the wait reason.
casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
trace := traceAcquire()
if trace.ok() {
trace.STWStart(reason)
traceRelease(trace)
}
gp := getg()
// If we hold a lock, then we won't be able to stop another M
// that is blocked trying to acquire the lock.
if gp.m.locks > 0 {
throw("stopTheWorld: holding locks")
}
lock(&sched.lock)
start := nanotime() // exclude time waiting for sched.lock from start and total time metrics.
sched.stopwait = gomaxprocs
sched.gcwaiting.Store(true)
preemptall()
// Stop current P.
gp.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
gp.m.p.ptr().gcStopTime = start
sched.stopwait--
// Try to retake all P's in syscalls.
for _, pp := range allp {
if thread, ok := setBlockOnExitSyscall(pp); ok {
thread.gcstopP()
thread.resume()
}
}
// Stop idle Ps.
now := nanotime()
for {
pp, _ := pidleget(now)
if pp == nil {
break
}
pp.status = _Pgcstop
pp.gcStopTime = nanotime()
sched.stopwait--
}
wait := sched.stopwait > 0
unlock(&sched.lock)
// Wait for remaining Ps to stop voluntarily.
if wait {
for {
// wait for 100us, then try to re-preempt in case of any races
if notetsleep(&sched.stopnote, 100*1000) {
noteclear(&sched.stopnote)
break
}
preemptall()
}
}
finish := nanotime()
startTime := finish - start
if reason.isGC() {
sched.stwStoppingTimeGC.record(startTime)
} else {
sched.stwStoppingTimeOther.record(startTime)
}
// Double-check we actually stopped everything, and all the invariants hold.
// Also accumulate all the time spent by each P in _Pgcstop up to the point
// where everything was stopped. This will be accumulated into the total pause
// CPU time by the caller.
stoppingCPUTime := int64(0)
bad := ""
if sched.stopwait != 0 {
bad = "stopTheWorld: not stopped (stopwait != 0)"
} else {
for _, pp := range allp {
if pp.status != _Pgcstop {
bad = "stopTheWorld: not stopped (status != _Pgcstop)"
}
if pp.gcStopTime == 0 && bad == "" {
bad = "stopTheWorld: broken CPU time accounting"
}
stoppingCPUTime += finish - pp.gcStopTime
pp.gcStopTime = 0
}
}
if freezing.Load() {
// Some other thread is panicking. This can cause the
// sanity checks above to fail if the panic happens in
// the signal handler on a stopped thread. Either way,
// we should halt this thread.
lock(&deadlock)
lock(&deadlock)
}
if bad != "" {
throw(bad)
}
worldStopped()
// Switch back to _Grunning, now that the world is stopped.
casgstatus(getg().m.curg, _Gwaiting, _Grunning)
return worldStop{
reason: reason,
startedStopping: start,
finishedStopping: finish,
stoppingCPUTime: stoppingCPUTime,
}
}
// reason is the same STW reason passed to stopTheWorld. start is the start
// time returned by stopTheWorld.
//
// now is the current time; prefer to pass 0 to capture a fresh timestamp.
//
// stattTheWorldWithSema returns now.
func startTheWorldWithSema(now int64, w worldStop) int64 {
assertWorldStopped()
mp := acquirem() // disable preemption because it can be holding p in a local var
if netpollinited() {
list, delta := netpoll(0) // non-blocking
injectglist(&list)
netpollAdjustWaiters(delta)
}
lock(&sched.lock)
procs := gomaxprocs
if newprocs != 0 {
procs = newprocs
newprocs = 0
}
p1 := procresize(procs)
sched.gcwaiting.Store(false)
if sched.sysmonwait.Load() {
sched.sysmonwait.Store(false)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
worldStarted()
for p1 != nil {
p := p1
p1 = p1.link.ptr()
if p.m != 0 {
mp := p.m.ptr()
p.m = 0
if mp.nextp != 0 {
throw("startTheWorld: inconsistent mp->nextp")
}
mp.nextp.set(p)
notewakeup(&mp.park)
} else {
// Start M to run P. Do not start another M below.
newm(nil, p, -1)
}
}
// Capture start-the-world time before doing clean-up tasks.
if now == 0 {
now = nanotime()
}
totalTime := now - w.startedStopping
if w.reason.isGC() {
sched.stwTotalTimeGC.record(totalTime)
} else {
sched.stwTotalTimeOther.record(totalTime)
}
trace := traceAcquire()
if trace.ok() {
trace.STWDone()
traceRelease(trace)
}
// Wakeup an additional proc in case we have excessive runnable goroutines
// in local queues or in the global queue. If we don't, the proc will park itself.
// If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
wakep()
releasem(mp)
return now
}
// usesLibcall indicates whether this runtime performs system calls
// via libcall.
func usesLibcall() bool {
switch GOOS {
case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
return true
}
return false
}
// mStackIsSystemAllocated indicates whether this runtime starts on a
// system-allocated stack.
func mStackIsSystemAllocated() bool {
switch GOOS {
case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
return true
}
return false
}
// mstart is the entry-point for new Ms.
// It is written in assembly, uses ABI0, is marked TOPFRAME, and calls mstart0.
func mstart()
// mstart0 is the Go entry-point for new Ms.
// This must not split the stack because we may not even have stack
// bounds set up yet.
//
// May run during STW (because it doesn't have a P yet), so write
// barriers are not allowed.
//
//go:nosplit
//go:nowritebarrierrec
func mstart0() {
gp := getg()
osStack := gp.stack.lo == 0
if osStack {
// Initialize stack bounds from system stack.
// Cgo may have left stack size in stack.hi.
// minit may update the stack bounds.
//
// Note: these bounds may not be very accurate.
// We set hi to &size, but there are things above
// it. The 1024 is supposed to compensate this,
// but is somewhat arbitrary.
size := gp.stack.hi
if size == 0 {
size = 16384 * sys.StackGuardMultiplier
}
gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
gp.stack.lo = gp.stack.hi - size + 1024
}
// Initialize stack guard so that we can start calling regular
// Go code.
gp.stackguard0 = gp.stack.lo + stackGuard
// This is the g0, so we can also call go:systemstack
// functions, which check stackguard1.
gp.stackguard1 = gp.stackguard0
mstart1()
// Exit this thread.
if mStackIsSystemAllocated() {
// Windows, Solaris, illumos, Darwin, AIX and Plan 9 always system-allocate
// the stack, but put it in gp.stack before mstart,
// so the logic above hasn't set osStack yet.
osStack = true
}
mexit(osStack)
}
// The go:noinline is to guarantee the sys.GetCallerPC/sys.GetCallerSP below are safe,
// so that we can set up g0.sched to return to the call of mstart1 above.
//
//go:noinline
func mstart1() {
gp := getg()
if gp != gp.m.g0 {
throw("bad runtime·mstart")
}
// Set up m.g0.sched as a label returning to just
// after the mstart1 call in mstart0 above, for use by goexit0 and mcall.
// We're never coming back to mstart1 after we call schedule,
// so other calls can reuse the current frame.
// And goexit0 does a gogo that needs to return from mstart1
// and let mstart0 exit the thread.
gp.sched.g = guintptr(unsafe.Pointer(gp))
gp.sched.pc = sys.GetCallerPC()
gp.sched.sp = sys.GetCallerSP()
asminit()
minit()
// Install signal handlers; after minit so that minit can
// prepare the thread to be able to handle the signals.
if gp.m == &m0 {
mstartm0()
}
if debug.dataindependenttiming == 1 {
sys.EnableDIT()
}
if fn := gp.m.mstartfn; fn != nil {
fn()
}
if gp.m != &m0 {
acquirep(gp.m.nextp.ptr())
gp.m.nextp = 0
}
schedule()
}
// mstartm0 implements part of mstart1 that only runs on the m0.
//
// Write barriers are allowed here because we know the GC can't be
// running yet, so they'll be no-ops.
//
//go:yeswritebarrierrec
func mstartm0() {
// Create an extra M for callbacks on threads not created by Go.
// An extra M is also needed on Windows for callbacks created by
// syscall.NewCallback. See issue #6751 for details.
if (iscgo || GOOS == "windows") && !cgoHasExtraM {
cgoHasExtraM = true
newextram()
}
initsig(false)
}
// mPark causes a thread to park itself, returning once woken.
//
//go:nosplit
func mPark() {
gp := getg()
// This M might stay parked through an entire GC cycle.
// Erase any leftovers on the signal stack.
if goexperiment.RuntimeSecret {
eraseSecretsSignalStk()
}
notesleep(&gp.m.park)
noteclear(&gp.m.park)
}
// mexit tears down and exits the current thread.
//
// Don't call this directly to exit the thread, since it must run at
// the top of the thread stack. Instead, use gogo(&gp.m.g0.sched) to
// unwind the stack to the point that exits the thread.
//
// It is entered with m.p != nil, so write barriers are allowed. It
// will release the P before exiting.
//
//go:yeswritebarrierrec
func mexit(osStack bool) {
mp := getg().m
if mp == &m0 {
// This is the main thread. Just wedge it.
//
// On Linux, exiting the main thread puts the process
// into a non-waitable zombie state. On Plan 9,
// exiting the main thread unblocks wait even though
// other threads are still running. On Solaris we can
// neither exitThread nor return from mstart. Other
// bad things probably happen on other platforms.
//
// We could try to clean up this M more before wedging
// it, but that complicates signal handling.
handoffp(releasep())
lock(&sched.lock)
sched.nmfreed++
checkdead()
unlock(&sched.lock)
mPark()
throw("locked m0 woke up")
}
sigblock(true)
unminit()
// Free the gsignal stack.
if mp.gsignal != nil {
stackfree(mp.gsignal.stack)
if valgrindenabled {
valgrindDeregisterStack(mp.gsignal.valgrindStackID)
mp.gsignal.valgrindStackID = 0
}
// On some platforms, when calling into VDSO (e.g. nanotime)
// we store our g on the gsignal stack, if there is one.
// Now the stack is freed, unlink it from the m, so we
// won't write to it when calling VDSO code.
mp.gsignal = nil
}
// Free vgetrandom state.
vgetrandomDestroy(mp)
// Clear the self pointer so Ps don't access this M after it is freed,
// or keep it alive.
mp.self.clear()
// Remove m from allm.
lock(&sched.lock)
for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
if *pprev == mp {
*pprev = mp.alllink
goto found
}
}
throw("m not found in allm")
found:
// Events must not be traced after this point.
// Delay reaping m until it's done with the stack.
//
// Put mp on the free list, though it will not be reaped while freeWait
// is freeMWait. mp is no longer reachable via allm, so even if it is
// on an OS stack, we must keep a reference to mp alive so that the GC
// doesn't free mp while we are still using it.
//
// Note that the free list must not be linked through alllink because
// some functions walk allm without locking, so may be using alllink.
//
// N.B. It's important that the M appears on the free list simultaneously
// with it being removed so that the tracer can find it.
mp.freeWait.Store(freeMWait)
mp.freelink = sched.freem
sched.freem = mp
unlock(&sched.lock)
atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
// Release the P.
handoffp(releasep())
// After this point we must not have write barriers.
// Invoke the deadlock detector. This must happen after
// handoffp because it may have started a new M to take our
// P's work.
lock(&sched.lock)
sched.nmfreed++
checkdead()
unlock(&sched.lock)
if GOOS == "darwin" || GOOS == "ios" {
// Make sure pendingPreemptSignals is correct when an M exits.
// For #41702.
if mp.signalPending.Load() != 0 {
pendingPreemptSignals.Add(-1)
}
}
// Destroy all allocated resources. After this is called, we may no
// longer take any locks.
mdestroy(mp)
if osStack {
// No more uses of mp, so it is safe to drop the reference.
mp.freeWait.Store(freeMRef)
// Return from mstart and let the system thread
// library free the g0 stack and terminate the thread.
return
}
// mstart is the thread's entry point, so there's nothing to
// return to. Exit the thread directly. exitThread will clear
// m.freeWait when it's done with the stack and the m can be
// reaped.
exitThread(&mp.freeWait)
}
// forEachP calls fn(p) for every P p when p reaches a GC safe point.
// If a P is currently executing code, this will bring the P to a GC
// safe point and execute fn on that P. If the P is not executing code
// (it is idle or in a syscall), this will call fn(p) directly while
// preventing the P from exiting its state. This does not ensure that
// fn will run on every CPU executing Go code, but it acts as a global
// memory barrier. GC uses this as a "ragged barrier."
//
// The caller must hold worldsema. fn must not refer to any
// part of the current goroutine's stack, since the GC may move it.
func forEachP(reason waitReason, fn func(*p)) {
systemstack(func() {
gp := getg().m.curg
// Mark the user stack as preemptible so that it may be scanned
// by the GC or observed by the execution tracer. Otherwise, our
// attempt to force all P's to a safepoint could result in a
// deadlock as we attempt to preempt a goroutine that's trying
// to preempt us (e.g. for a stack scan).
//
// casGToWaitingForSuspendG marks the goroutine as ineligible for a
// stack shrink, effectively pinning the stack in memory for the duration.
//
// N.B. The execution tracer is not aware of this status transition and
// handles it specially based on the wait reason.
casGToWaitingForSuspendG(gp, _Grunning, reason)
forEachPInternal(fn)
casgstatus(gp, _Gwaiting, _Grunning)
})
}
// forEachPInternal calls fn(p) for every P p when p reaches a GC safe point.
// It is the internal implementation of forEachP.
//
// The caller must hold worldsema and either must ensure that a GC is not
// running (otherwise this may deadlock with the GC trying to preempt this P)
// or it must leave its goroutine in a preemptible state before it switches
// to the systemstack. Due to these restrictions, prefer forEachP when possible.
//
//go:systemstack
func forEachPInternal(fn func(*p)) {
mp := acquirem()
pp := getg().m.p.ptr()
lock(&sched.lock)
if sched.safePointWait != 0 {
throw("forEachP: sched.safePointWait != 0")
}
sched.safePointWait = gomaxprocs - 1
sched.safePointFn = fn
// Ask all Ps to run the safe point function.
for _, p2 := range allp {
if p2 != pp {
atomic.Store(&p2.runSafePointFn, 1)
}
}
preemptall()
// Any P entering _Pidle or a system call from now on will observe
// p.runSafePointFn == 1 and will call runSafePointFn when
// changing its status to _Pidle.
// Run safe point function for all idle Ps. sched.pidle will
// not change because we hold sched.lock.
for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
if atomic.Cas(&p.runSafePointFn, 1, 0) {
fn(p)
sched.safePointWait--
}
}
wait := sched.safePointWait > 0
unlock(&sched.lock)
// Run fn for the current P.
fn(pp)
// Force Ps currently in a system call into _Pidle and hand them
// off to induce safe point function execution.
for _, p2 := range allp {
if atomic.Load(&p2.runSafePointFn) != 1 {
// Already ran it.
continue
}
if thread, ok := setBlockOnExitSyscall(p2); ok {
thread.takeP()
thread.resume()
handoffp(p2)
}
}
// Wait for remaining Ps to run fn.
if wait {
for {
// Wait for 100us, then try to re-preempt in
// case of any races.
//
// Requires system stack.
if notetsleep(&sched.safePointNote, 100*1000) {
noteclear(&sched.safePointNote)
break
}
preemptall()
}
}
if sched.safePointWait != 0 {
throw("forEachP: not done")
}
for _, p2 := range allp {
if p2.runSafePointFn != 0 {
throw("forEachP: P did not run fn")
}
}
lock(&sched.lock)
sched.safePointFn = nil
unlock(&sched.lock)
releasem(mp)
}
// runSafePointFn runs the safe point function, if any, for this P.
// This should be called like
//
// if getg().m.p.runSafePointFn != 0 {
// runSafePointFn()
// }
//
// runSafePointFn must be checked on any transition in to _Pidle or
// when entering a system call to avoid a race where forEachP sees
// that the P is running just before the P goes into _Pidle/system call
// and neither forEachP nor the P run the safe-point function.
func runSafePointFn() {
p := getg().m.p.ptr()
// Resolve the race between forEachP running the safe-point
// function on this P's behalf and this P running the
// safe-point function directly.
if !atomic.Cas(&p.runSafePointFn, 1, 0) {
return
}
sched.safePointFn(p)
lock(&sched.lock)
sched.safePointWait--
if sched.safePointWait == 0 {
notewakeup(&sched.safePointNote)
}
unlock(&sched.lock)
}
// When running with cgo, we call _cgo_thread_start
// to start threads for us so that we can play nicely with
// foreign code.
var cgoThreadStart unsafe.Pointer
type cgothreadstart struct {
g guintptr
tls *uint64
fn unsafe.Pointer
}
// Allocate a new m unassociated with any thread.
// Can use p for allocation context if needed.
// fn is recorded as the new m's m.mstartfn.
// id is optional pre-allocated m ID. Omit by passing -1.
//
// This function is allowed to have write barriers even if the caller
// isn't because it borrows pp.
//
//go:yeswritebarrierrec
func allocm(pp *p, fn func(), id int64) *m {
allocmLock.rlock()
// The caller owns pp, but we may borrow (i.e., acquirep) it. We must
// disable preemption to ensure it is not stolen, which would make the
// caller lose ownership.
acquirem()
gp := getg()
if gp.m.p == 0 {
acquirep(pp) // temporarily borrow p for mallocs in this function
}
// Release the free M list. We need to do this somewhere and
// this may free up a stack we can use.
if sched.freem != nil {
lock(&sched.lock)
var newList *m
for freem := sched.freem; freem != nil; {
// Wait for freeWait to indicate that freem's stack is unused.
wait := freem.freeWait.Load()
if wait == freeMWait {
next := freem.freelink
freem.freelink = newList
newList = freem
freem = next
continue
}
// Drop any remaining trace resources.
// Ms can continue to emit events all the way until wait != freeMWait,
// so it's only safe to call traceThreadDestroy at this point.
if traceEnabled() || traceShuttingDown() {
traceThreadDestroy(freem)
}
// Free the stack if needed. For freeMRef, there is
// nothing to do except drop freem from the sched.freem
// list.
if wait == freeMStack {
// stackfree must be on the system stack, but allocm is
// reachable off the system stack transitively from
// startm.
systemstack(func() {
stackfree(freem.g0.stack)
if valgrindenabled {
valgrindDeregisterStack(freem.g0.valgrindStackID)
freem.g0.valgrindStackID = 0
}
})
}
freem = freem.freelink
}
sched.freem = newList
unlock(&sched.lock)
}
mp := &new(mPadded).m
mp.mstartfn = fn
mcommoninit(mp, id)
// In case of cgo or Solaris or illumos or Darwin, pthread_create will make us a stack.
// Windows and Plan 9 will layout sched stack on OS stack.
if iscgo || mStackIsSystemAllocated() {
mp.g0 = malg(-1)
} else {
mp.g0 = malg(16384 * sys.StackGuardMultiplier)
}
mp.g0.m = mp
if pp == gp.m.p.ptr() {
releasep()
}
releasem(gp.m)
allocmLock.runlock()
return mp
}
// needm is called when a cgo callback happens on a
// thread without an m (a thread not created by Go).
// In this case, needm is expected to find an m to use
// and return with m, g initialized correctly.
// Since m and g are not set now (likely nil, but see below)
// needm is limited in what routines it can call. In particular
// it can only call nosplit functions (textflag 7) and cannot
// do any scheduling that requires an m.
//
// In order to avoid needing heavy lifting here, we adopt
// the following strategy: there is a stack of available m's
// that can be stolen. Using compare-and-swap
// to pop from the stack has ABA races, so we simulate
// a lock by doing an exchange (via Casuintptr) to steal the stack
// head and replace the top pointer with MLOCKED (1).
// This serves as a simple spin lock that we can use even
// without an m. The thread that locks the stack in this way
// unlocks the stack by storing a valid stack head pointer.
//
// In order to make sure that there is always an m structure
// available to be stolen, we maintain the invariant that there
// is always one more than needed. At the beginning of the
// program (if cgo is in use) the list is seeded with a single m.
// If needm finds that it has taken the last m off the list, its job
// is - once it has installed its own m so that it can do things like
// allocate memory - to create a spare m and put it on the list.
//
// Each of these extra m's also has a g0 and a curg that are
// pressed into service as the scheduling stack and current
// goroutine for the duration of the cgo callback.
//
// It calls dropm to put the m back on the list,
// 1. when the callback is done with the m in non-pthread platforms,
// 2. or when the C thread exiting on pthread platforms.
//
// The signal argument indicates whether we're called from a signal
// handler.
//
//go:nosplit
func needm(signal bool) {
if (iscgo || GOOS == "windows") && !cgoHasExtraM {
// Can happen if C/C++ code calls Go from a global ctor.
// Can also happen on Windows if a global ctor uses a
// callback created by syscall.NewCallback. See issue #6751
// for details.
//
// Can not throw, because scheduler is not initialized yet.
writeErrStr("fatal error: cgo callback before cgo call\n")
exit(1)
}
// Save and block signals before getting an M.
// The signal handler may call needm itself,
// and we must avoid a deadlock. Also, once g is installed,
// any incoming signals will try to execute,
// but we won't have the sigaltstack settings and other data
// set up appropriately until the end of minit, which will
// unblock the signals. This is the same dance as when
// starting a new m to run Go code via newosproc.
var sigmask sigset
sigsave(&sigmask)
sigblock(false)
// getExtraM is safe here because of the invariant above,
// that the extra list always contains or will soon contain
// at least one m.
mp, last := getExtraM()
// Set needextram when we've just emptied the list,
// so that the eventual call into cgocallbackg will
// allocate a new m for the extra list. We delay the
// allocation until then so that it can be done
// after exitsyscall makes sure it is okay to be
// running at all (that is, there's no garbage collection
// running right now).
mp.needextram = last
// Store the original signal mask for use by minit.
mp.sigmask = sigmask
// Install TLS on some platforms (previously setg
// would do this if necessary).
osSetupTLS(mp)
// Install g (= m->g0) and set the stack bounds
// to match the current stack.
setg(mp.g0)
sp := sys.GetCallerSP()
callbackUpdateSystemStack(mp, sp, signal)
// We must mark that we are already in Go now.
// Otherwise, we may call needm again when we get a signal, before cgocallbackg1,
// which means the extram list may be empty, that will cause a deadlock.
mp.isExtraInC = false
// Initialize this thread to use the m.
asminit()
minit()
// Emit a trace event for this dead -> syscall transition,
// but only if we're not in a signal handler.
//
// N.B. the tracer can run on a bare M just fine, we just have
// to make sure to do this before setg(nil) and unminit.
var trace traceLocker
if !signal {
trace = traceAcquire()
}
// mp.curg is now a real goroutine.
casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
sched.ngsys.Add(-1)
// This is technically inaccurate, but we set isExtraInC to false above,
// and so we need to update addGSyscallNoP to keep the two pieces of state
// consistent (it's only updated when isExtraInC is false). More specifically,
// When we get to cgocallbackg and exitsyscall, we'll be looking for a P, and
// since isExtraInC is false, we will decrement this metric.
//
// The inaccuracy is thankfully transient: only until this thread can get a P.
// We're going into Go anyway, so it's okay to pretend we're a real goroutine now.
addGSyscallNoP(mp)
if !signal {
if trace.ok() {
trace.GoCreateSyscall(mp.curg)
traceRelease(trace)
}
}
mp.isExtraInSig = signal
}
// Acquire an extra m and bind it to the C thread when a pthread key has been created.
//
//go:nosplit
func needAndBindM() {
needm(false)
if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
cgoBindM()
}
}
// newextram allocates m's and puts them on the extra list.
// It is called with a working local m, so that it can do things
// like call schedlock and allocate.
func newextram() {
c := extraMWaiters.Swap(0)
if c > 0 {
for i := uint32(0); i < c; i++ {
oneNewExtraM()
}
} else if extraMLength.Load() == 0 {
// Make sure there is at least one extra M.
oneNewExtraM()
}
}
// oneNewExtraM allocates an m and puts it on the extra list.
func oneNewExtraM() {
// Create extra goroutine locked to extra m.
// The goroutine is the context in which the cgo callback will run.
// The sched.pc will never be returned to, but setting it to
// goexit makes clear to the traceback routines where
// the goroutine stack ends.
mp := allocm(nil, nil, -1)
gp := malg(4096)
gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
gp.sched.sp = gp.stack.hi
gp.sched.sp -= 4 * goarch.PtrSize // extra space in case of reads slightly beyond frame
gp.sched.lr = 0
gp.sched.g = guintptr(unsafe.Pointer(gp))
gp.syscallpc = gp.sched.pc
gp.syscallsp = gp.sched.sp
gp.stktopsp = gp.sched.sp
// malg returns status as _Gidle. Change to _Gdeadextra before
// adding to allg where GC can see it. _Gdeadextra hides this
// from traceback and stack scans.
casgstatus(gp, _Gidle, _Gdeadextra)
gp.m = mp
mp.curg = gp
mp.isextra = true
// mark we are in C by default.
mp.isExtraInC = true
mp.lockedInt++
mp.lockedg.set(gp)
gp.lockedm.set(mp)
gp.goid = sched.goidgen.Add(1)
if raceenabled {
gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
}
// put on allg for garbage collector
allgadd(gp)
// gp is now on the allg list, but we don't want it to be
// counted by gcount. It would be more "proper" to increment
// sched.ngfree, but that requires locking. Incrementing ngsys
// has the same effect.
sched.ngsys.Add(1)
// Add m to the extra list.
addExtraM(mp)
}
// dropm puts the current m back onto the extra list.
//
// 1. On systems without pthreads, like Windows
// dropm is called when a cgo callback has called needm but is now
// done with the callback and returning back into the non-Go thread.
//
// The main expense here is the call to signalstack to release the
// m's signal stack, and then the call to needm on the next callback
// from this thread. It is tempting to try to save the m for next time,
// which would eliminate both these costs, but there might not be
// a next time: the current thread (which Go does not control) might exit.
// If we saved the m for that thread, there would be an m leak each time
// such a thread exited. Instead, we acquire and release an m on each
// call. These should typically not be scheduling operations, just a few
// atomics, so the cost should be small.
//
// 2. On systems with pthreads
// dropm is called while a non-Go thread is exiting.
// We allocate a pthread per-thread variable using pthread_key_create,
// to register a thread-exit-time destructor.
// And store the g into a thread-specific value associated with the pthread key,
// when first return back to C.
// So that the destructor would invoke dropm while the non-Go thread is exiting.
// This is much faster since it avoids expensive signal-related syscalls.
//
// This may run without a P, so //go:nowritebarrierrec is required.
//
// This may run with a different stack than was recorded in g0 (there is no
// call to callbackUpdateSystemStack prior to dropm), so this must be
// //go:nosplit to avoid the stack bounds check.
//
//go:nowritebarrierrec
//go:nosplit
func dropm() {
// Clear m and g, and return m to the extra list.
// After the call to setg we can only call nosplit functions
// with no pointer manipulation.
mp := getg().m
// Emit a trace event for this syscall -> dead transition.
//
// N.B. the tracer can run on a bare M just fine, we just have
// to make sure to do this before setg(nil) and unminit.
var trace traceLocker
if !mp.isExtraInSig {
trace = traceAcquire()
}
// Return mp.curg to _Gdeadextra state.
casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
mp.curg.preemptStop = false
sched.ngsys.Add(1)
decGSyscallNoP(mp)
if !mp.isExtraInSig {
if trace.ok() {
trace.GoDestroySyscall()
traceRelease(trace)
}
}
// Trash syscalltick so that it doesn't line up with mp.old.syscalltick anymore.
//
// In the new tracer, we model needm and dropm and a goroutine being created and
// destroyed respectively. The m then might get reused with a different procid but
// still with a reference to oldp, and still with the same syscalltick. The next
// time a G is "created" in needm, it'll return and quietly reacquire its P from a
// different m with a different procid, which will confuse the trace parser. By
// trashing syscalltick, we ensure that it'll appear as if we lost the P to the
// tracer parser and that we just reacquired it.
//
// Trash the value by decrementing because that gets us as far away from the value
// the syscall exit code expects as possible. Setting to zero is risky because
// syscalltick could already be zero (and in fact, is initialized to zero).
mp.syscalltick--
// Reset trace state unconditionally. This goroutine is being 'destroyed'
// from the perspective of the tracer.
mp.curg.trace.reset()
// Flush all the M's buffers. This is necessary because the M might
// be used on a different thread with a different procid, so we have
// to make sure we don't write into the same buffer.
if traceEnabled() || traceShuttingDown() {
// Acquire sched.lock across thread destruction. One of the invariants of the tracer
// is that a thread cannot disappear from the tracer's view (allm or freem) without
// it noticing, so it requires that sched.lock be held over traceThreadDestroy.
//
// This isn't strictly necessary in this case, because this thread never leaves allm,
// but the critical section is short and dropm is rare on pthread platforms, so just
// take the lock and play it safe. traceThreadDestroy also asserts that the lock is held.
lock(&sched.lock)
traceThreadDestroy(mp)
unlock(&sched.lock)
}
mp.isExtraInSig = false
// Block signals before unminit.
// Unminit unregisters the signal handling stack (but needs g on some systems).
// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
// It's important not to try to handle a signal between those two steps.
sigmask := mp.sigmask
sigblock(false)
unminit()
setg(nil)
// Clear g0 stack bounds to ensure that needm always refreshes the
// bounds when reusing this M.
g0 := mp.g0
g0.stack.hi = 0
g0.stack.lo = 0
g0.stackguard0 = 0
g0.stackguard1 = 0
mp.g0StackAccurate = false
putExtraM(mp)
msigrestore(sigmask)
}
// bindm store the g0 of the current m into a thread-specific value.
//
// We allocate a pthread per-thread variable using pthread_key_create,
// to register a thread-exit-time destructor.
// We are here setting the thread-specific value of the pthread key, to enable the destructor.
// So that the pthread_key_destructor would dropm while the C thread is exiting.
//
// And the saved g will be used in pthread_key_destructor,
// since the g stored in the TLS by Go might be cleared in some platforms,
// before the destructor invoked, so, we restore g by the stored g, before dropm.
//
// We store g0 instead of m, to make the assembly code simpler,
// since we need to restore g0 in runtime.cgocallback.
//
// On systems without pthreads, like Windows, bindm shouldn't be used.
//
// NOTE: this always runs without a P, so, nowritebarrierrec required.
//
//go:nosplit
//go:nowritebarrierrec
func cgoBindM() {
if GOOS == "windows" || GOOS == "plan9" {
fatal("bindm in unexpected GOOS")
}
g := getg()
if g.m.g0 != g {
fatal("the current g is not g0")
}
if _cgo_bindm != nil {
asmcgocall(_cgo_bindm, unsafe.Pointer(g))
}
}
// A helper function for EnsureDropM.
//
// getm should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - fortio.org/log
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname getm
func getm() uintptr {
return uintptr(unsafe.Pointer(getg().m))
}
var (
// Locking linked list of extra M's, via mp.schedlink. Must be accessed
// only via lockextra/unlockextra.
//
// Can't be atomic.Pointer[m] because we use an invalid pointer as a
// "locked" sentinel value. M's on this list remain visible to the GC
// because their mp.curg is on allgs.
extraM atomic.Uintptr
// Number of M's in the extraM list.
extraMLength atomic.Uint32
// Number of waiters in lockextra.
extraMWaiters atomic.Uint32
// Number of extra M's in use by threads.
extraMInUse atomic.Uint32
)
// lockextra locks the extra list and returns the list head.
// The caller must unlock the list by storing a new list head
// to extram. If nilokay is true, then lockextra will
// return a nil list head if that's what it finds. If nilokay is false,
// lockextra will keep waiting until the list head is no longer nil.
//
//go:nosplit
func lockextra(nilokay bool) *m {
const locked = 1
incr := false
for {
old := extraM.Load()
if old == locked {
osyield_no_g()
continue
}
if old == 0 && !nilokay {
if !incr {
// Add 1 to the number of threads
// waiting for an M.
// This is cleared by newextram.
extraMWaiters.Add(1)
incr = true
}
usleep_no_g(1)
continue
}
if extraM.CompareAndSwap(old, locked) {
return (*m)(unsafe.Pointer(old))
}
osyield_no_g()
continue
}
}
//go:nosplit
func unlockextra(mp *m, delta int32) {
extraMLength.Add(delta)
extraM.Store(uintptr(unsafe.Pointer(mp)))
}
// Return an M from the extra M list. Returns last == true if the list becomes
// empty because of this call.
//
// Spins waiting for an extra M, so caller must ensure that the list always
// contains or will soon contain at least one M.
//
//go:nosplit
func getExtraM() (mp *m, last bool) {
mp = lockextra(false)
extraMInUse.Add(1)
unlockextra(mp.schedlink.ptr(), -1)
return mp, mp.schedlink.ptr() == nil
}
// Returns an extra M back to the list. mp must be from getExtraM. Newly
// allocated M's should use addExtraM.
//
//go:nosplit
func putExtraM(mp *m) {
extraMInUse.Add(-1)
addExtraM(mp)
}
// Adds a newly allocated M to the extra M list.
//
//go:nosplit
func addExtraM(mp *m) {
mnext := lockextra(true)
mp.schedlink.set(mnext)
unlockextra(mp, 1)
}
var (
// allocmLock is locked for read when creating new Ms in allocm and their
// addition to allm. Thus acquiring this lock for write blocks the
// creation of new Ms.
allocmLock rwmutex
// execLock serializes exec and clone to avoid bugs or unspecified
// behaviour around exec'ing while creating/destroying threads. See
// issue #19546.
execLock rwmutex
)
// These errors are reported (via writeErrStr) by some OS-specific
// versions of newosproc and newosproc0.
const (
failthreadcreate = "runtime: failed to create new OS thread\n"
failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
)
// newmHandoff contains a list of m structures that need new OS threads.
// This is used by newm in situations where newm itself can't safely
// start an OS thread.
var newmHandoff struct {
lock mutex
// newm points to a list of M structures that need new OS
// threads. The list is linked through m.schedlink.
newm muintptr
// waiting indicates that wake needs to be notified when an m
// is put on the list.
waiting bool
wake note
// haveTemplateThread indicates that the templateThread has
// been started. This is not protected by lock. Use cas to set
// to 1.
haveTemplateThread uint32
}
// Create a new m. It will start off with a call to fn, or else the scheduler.
// fn needs to be static and not a heap allocated closure.
// May run with m.p==nil, so write barriers are not allowed.
//
// id is optional pre-allocated m ID. Omit by passing -1.
//
//go:nowritebarrierrec
func newm(fn func(), pp *p, id int64) {
// allocm adds a new M to allm, but they do not start until created by
// the OS in newm1 or the template thread.
//
// doAllThreadsSyscall requires that every M in allm will eventually
// start and be signal-able, even with a STW.
//
// Disable preemption here until we start the thread to ensure that
// newm is not preempted between allocm and starting the new thread,
// ensuring that anything added to allm is guaranteed to eventually
// start.
acquirem()
mp := allocm(pp, fn, id)
mp.nextp.set(pp)
mp.sigmask = initSigmask
if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
// We're on a locked M or a thread that may have been
// started by C. The kernel state of this thread may
// be strange (the user may have locked it for that
// purpose). We don't want to clone that into another
// thread. Instead, ask a known-good thread to create
// the thread for us.
//
// This is disabled on Plan 9. See golang.org/issue/22227.
//
// TODO: This may be unnecessary on Windows, which
// doesn't model thread creation off fork.
lock(&newmHandoff.lock)
if newmHandoff.haveTemplateThread == 0 {
throw("on a locked thread with no template thread")
}
mp.schedlink = newmHandoff.newm
newmHandoff.newm.set(mp)
if newmHandoff.waiting {
newmHandoff.waiting = false
notewakeup(&newmHandoff.wake)
}
unlock(&newmHandoff.lock)
// The M has not started yet, but the template thread does not
// participate in STW, so it will always process queued Ms and
// it is safe to releasem.
releasem(getg().m)
return
}
newm1(mp)
releasem(getg().m)
}
func newm1(mp *m) {
if iscgo && _cgo_thread_start != nil {
var ts cgothreadstart
ts.g.set(mp.g0)
ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
if msanenabled {
msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
}
if asanenabled {
asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
}
execLock.rlock() // Prevent process clone.
asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
execLock.runlock()
return
}
execLock.rlock() // Prevent process clone.
newosproc(mp)
execLock.runlock()
}
// startTemplateThread starts the template thread if it is not already
// running.
//
// The calling thread must itself be in a known-good state.
func startTemplateThread() {
if GOARCH == "wasm" { // no threads on wasm yet
return
}
// Disable preemption to guarantee that the template thread will be
// created before a park once haveTemplateThread is set.
mp := acquirem()
if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
releasem(mp)
return
}
newm(templateThread, nil, -1)
releasem(mp)
}
// templateThread is a thread in a known-good state that exists solely
// to start new threads in known-good states when the calling thread
// may not be in a good state.
//
// Many programs never need this, so templateThread is started lazily
// when we first enter a state that might lead to running on a thread
// in an unknown state.
//
// templateThread runs on an M without a P, so it must not have write
// barriers.
//
//go:nowritebarrierrec
func templateThread() {
lock(&sched.lock)
sched.nmsys++
checkdead()
unlock(&sched.lock)
for {
lock(&newmHandoff.lock)
for newmHandoff.newm != 0 {
newm := newmHandoff.newm.ptr()
newmHandoff.newm = 0
unlock(&newmHandoff.lock)
for newm != nil {
next := newm.schedlink.ptr()
newm.schedlink = 0
newm1(newm)
newm = next
}
lock(&newmHandoff.lock)
}
newmHandoff.waiting = true
noteclear(&newmHandoff.wake)
unlock(&newmHandoff.lock)
notesleep(&newmHandoff.wake)
}
}
// Stops execution of the current m until new work is available.
// Returns with acquired P.
func stopm() {
gp := getg()
if gp.m.locks != 0 {
throw("stopm holding locks")
}
if gp.m.p != 0 {
throw("stopm holding p")
}
if gp.m.spinning {
throw("stopm spinning")
}
lock(&sched.lock)
mput(gp.m)
unlock(&sched.lock)
mPark()
acquirep(gp.m.nextp.ptr())
gp.m.nextp = 0
}
func mspinning() {
// startm's caller incremented nmspinning. Set the new M's spinning.
getg().m.spinning = true
}
// Schedules some M to run the p (creates an M if necessary).
// If p==nil, tries to get an idle P, if no idle P's does nothing.
// May run with m.p==nil, so write barriers are not allowed.
// If spinning is set, the caller has incremented nmspinning and must provide a
// P. startm will set m.spinning in the newly started M.
//
// Callers passing a non-nil P must call from a non-preemptible context. See
// comment on acquirem below.
//
// Argument lockheld indicates whether the caller already acquired the
// scheduler lock. Callers holding the lock when making the call must pass
// true. The lock might be temporarily dropped, but will be reacquired before
// returning.
//
// Must not have write barriers because this may be called without a P.
//
//go:nowritebarrierrec
func startm(pp *p, spinning, lockheld bool) {
// Disable preemption.
//
// Every owned P must have an owner that will eventually stop it in the
// event of a GC stop request. startm takes transient ownership of a P
// (either from argument or pidleget below) and transfers ownership to
// a started M, which will be responsible for performing the stop.
//
// Preemption must be disabled during this transient ownership,
// otherwise the P this is running on may enter GC stop while still
// holding the transient P, leaving that P in limbo and deadlocking the
// STW.
//
// Callers passing a non-nil P must already be in non-preemptible
// context, otherwise such preemption could occur on function entry to
// startm. Callers passing a nil P may be preemptible, so we must
// disable preemption before acquiring a P from pidleget below.
mp := acquirem()
if !lockheld {
lock(&sched.lock)
}
if pp == nil {
if spinning {
// TODO(prattmic): All remaining calls to this function
// with _p_ == nil could be cleaned up to find a P
// before calling startm.
throw("startm: P required for spinning=true")
}
pp, _ = pidleget(0)
if pp == nil {
if !lockheld {
unlock(&sched.lock)
}
releasem(mp)
return
}
}
nmp := mget()
if nmp == nil {
// No M is available, we must drop sched.lock and call newm.
// However, we already own a P to assign to the M.
//
// Once sched.lock is released, another G (e.g., in a syscall),
// could find no idle P while checkdead finds a runnable G but
// no running M's because this new M hasn't started yet, thus
// throwing in an apparent deadlock.
// This apparent deadlock is possible when startm is called
// from sysmon, which doesn't count as a running M.
//
// Avoid this situation by pre-allocating the ID for the new M,
// thus marking it as 'running' before we drop sched.lock. This
// new M will eventually run the scheduler to execute any
// queued G's.
id := mReserveID()
unlock(&sched.lock)
var fn func()
if spinning {
// The caller incremented nmspinning, so set m.spinning in the new M.
fn = mspinning
}
newm(fn, pp, id)
if lockheld {
lock(&sched.lock)
}
// Ownership transfer of pp committed by start in newm.
// Preemption is now safe.
releasem(mp)
return
}
if !lockheld {
unlock(&sched.lock)
}
if nmp.spinning {
throw("startm: m is spinning")
}
if nmp.nextp != 0 {
throw("startm: m has p")
}
if spinning && !runqempty(pp) {
throw("startm: p has runnable gs")
}
// The caller incremented nmspinning, so set m.spinning in the new M.
nmp.spinning = spinning
nmp.nextp.set(pp)
notewakeup(&nmp.park)
// Ownership transfer of pp committed by wakeup. Preemption is now
// safe.
releasem(mp)
}
// Hands off P from syscall or locked M.
// Always runs without a P, so write barriers are not allowed.
//
//go:nowritebarrierrec
func handoffp(pp *p) {
// handoffp must start an M in any situation where
// findRunnable would return a G to run on pp.
// if it has local work, start it straight away
if !runqempty(pp) || !sched.runq.empty() {
startm(pp, false, false)
return
}
// if there's trace work to do, start it straight away
if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
startm(pp, false, false)
return
}
// if it has GC work, start it straight away
if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
startm(pp, false, false)
return
}
// no local work, check that there are no spinning/idle M's,
// otherwise our help is not required
if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) { // TODO: fast atomic
sched.needspinning.Store(0)
startm(pp, true, false)
return
}
lock(&sched.lock)
if sched.gcwaiting.Load() {
pp.status = _Pgcstop
pp.gcStopTime = nanotime()
sched.stopwait--
if sched.stopwait == 0 {
notewakeup(&sched.stopnote)
}
unlock(&sched.lock)
return
}
if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
sched.safePointFn(pp)
sched.safePointWait--
if sched.safePointWait == 0 {
notewakeup(&sched.safePointNote)
}
}
if !sched.runq.empty() {
unlock(&sched.lock)
startm(pp, false, false)
return
}
// If this is the last running P and nobody is polling network,
// need to wakeup another M to poll network.
if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
unlock(&sched.lock)
startm(pp, false, false)
return
}
// The scheduler lock cannot be held when calling wakeNetPoller below
// because wakeNetPoller may call wakep which may call startm.
when := pp.timers.wakeTime()
pidleput(pp, 0)
unlock(&sched.lock)
if when != 0 {
wakeNetPoller(when)
}
}
// Tries to add one more P to execute G's.
// Called when a G is made runnable (newproc, ready).
// Must be called with a P.
//
// wakep should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gvisor.dev/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname wakep
func wakep() {
// Be conservative about spinning threads, only start one if none exist
// already.
if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
return
}
// Disable preemption until ownership of pp transfers to the next M in
// startm. Otherwise preemption here would leave pp stuck waiting to
// enter _Pgcstop.
//
// See preemption comment on acquirem in startm for more details.
mp := acquirem()
var pp *p
lock(&sched.lock)
pp, _ = pidlegetSpinning(0)
if pp == nil {
if sched.nmspinning.Add(-1) < 0 {
throw("wakep: negative nmspinning")
}
unlock(&sched.lock)
releasem(mp)
return
}
// Since we always have a P, the race in the "No M is available"
// comment in startm doesn't apply during the small window between the
// unlock here and lock in startm. A checkdead in between will always
// see at least one running M (ours).
unlock(&sched.lock)
startm(pp, true, false)
releasem(mp)
}
// Stops execution of the current m that is locked to a g until the g is runnable again.
// Returns with acquired P.
func stoplockedm() {
gp := getg()
if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
throw("stoplockedm: inconsistent locking")
}
if gp.m.p != 0 {
// Schedule another M to run this p.
pp := releasep()
handoffp(pp)
}
incidlelocked(1)
// Wait until another thread schedules lockedg again.
mPark()
status := readgstatus(gp.m.lockedg.ptr())
if status&^_Gscan != _Grunnable {
print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
dumpgstatus(gp.m.lockedg.ptr())
throw("stoplockedm: not runnable")
}
acquirep(gp.m.nextp.ptr())
gp.m.nextp = 0
}
// Schedules the locked m to run the locked gp.
// May run during STW, so write barriers are not allowed.
//
//go:nowritebarrierrec
func startlockedm(gp *g) {
mp := gp.lockedm.ptr()
if mp == getg().m {
throw("startlockedm: locked to me")
}
if mp.nextp != 0 {
throw("startlockedm: m has p")
}
// directly handoff current P to the locked m
incidlelocked(-1)
pp := releasep()
mp.nextp.set(pp)
notewakeup(&mp.park)
stopm()
}
// Stops the current m for stopTheWorld.
// Returns when the world is restarted.
func gcstopm() {
gp := getg()
if !sched.gcwaiting.Load() {
throw("gcstopm: not waiting for gc")
}
if gp.m.spinning {
gp.m.spinning = false
// OK to just drop nmspinning here,
// startTheWorld will unpark threads as necessary.
if sched.nmspinning.Add(-1) < 0 {
throw("gcstopm: negative nmspinning")
}
}
pp := releasep()
lock(&sched.lock)
pp.status = _Pgcstop
pp.gcStopTime = nanotime()
sched.stopwait--
if sched.stopwait == 0 {
notewakeup(&sched.stopnote)
}
unlock(&sched.lock)
stopm()
}
// Schedules gp to run on the current M.
// If inheritTime is true, gp inherits the remaining time in the
// current time slice. Otherwise, it starts a new time slice.
// Never returns.
//
// Write barriers are allowed because this is called immediately after
// acquiring a P in several places.
//
//go:yeswritebarrierrec
func execute(gp *g, inheritTime bool) {
mp := getg().m
if goroutineProfile.active {
// Make sure that gp has had its stack written out to the goroutine
// profile, exactly as it was when the goroutine profiler first stopped
// the world.
tryRecordGoroutineProfile(gp, nil, osyield)
}
// Assign gp.m before entering _Grunning so running Gs have an M.
mp.curg = gp
gp.m = mp
gp.syncSafePoint = false // Clear the flag, which may have been set by morestack.
casgstatus(gp, _Grunnable, _Grunning)
gp.waitsince = 0
gp.preempt = false
gp.stackguard0 = gp.stack.lo + stackGuard
if !inheritTime {
mp.p.ptr().schedtick++
}
if sys.DITSupported && debug.dataindependenttiming != 1 {
if gp.ditWanted && !mp.ditEnabled {
// The current M doesn't have DIT enabled, but the goroutine we're
// executing does need it, so turn it on.
sys.EnableDIT()
mp.ditEnabled = true
} else if !gp.ditWanted && mp.ditEnabled {
// The current M has DIT enabled, but the goroutine we're executing does
// not need it, so turn it off.
// NOTE: turning off DIT here means that the scheduler will have DIT enabled
// when it runs after this goroutine yields or is preempted. This may have
// a minor performance impact on the scheduler.
sys.DisableDIT()
mp.ditEnabled = false
}
}
// Check whether the profiler needs to be turned on or off.
hz := sched.profilehz
if mp.profilehz != hz {
setThreadCPUProfiler(hz)
}
trace := traceAcquire()
if trace.ok() {
trace.GoStart()
traceRelease(trace)
}
gogo(&gp.sched)
}
// Finds a runnable goroutine to execute.
// Tries to steal from other P's, get g from local or global queue, poll network.
// tryWakeP indicates that the returned goroutine is not normal (GC worker, trace
// reader) so the caller should try to wake a P.
func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
mp := getg().m
// The conditions here and in handoffp must agree: if
// findRunnable would return a G to run, handoffp must start
// an M.
top:
// We may have collected an allp snapshot below. The snapshot is only
// required in each loop iteration. Clear it to all GC to collect the
// slice.
mp.clearAllpSnapshot()
pp := mp.p.ptr()
if sched.gcwaiting.Load() {
gcstopm()
goto top
}
if pp.runSafePointFn != 0 {
runSafePointFn()
}
// now and pollUntil are saved for work stealing later,
// which may steal timers. It's important that between now
// and then, nothing blocks, so these numbers remain mostly
// relevant.
now, pollUntil, _ := pp.timers.check(0, nil)
// Try to schedule the trace reader.
if traceEnabled() || traceShuttingDown() {
gp := traceReader()
if gp != nil {
trace := traceAcquire()
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.ok() {
trace.GoUnpark(gp, 0)
traceRelease(trace)
}
return gp, false, true
}
}
// Try to schedule a GC worker.
if gcBlackenEnabled != 0 {
gp, tnow := gcController.findRunnableGCWorker(pp, now)
if gp != nil {
return gp, false, true
}
now = tnow
}
// Check the global runnable queue once in a while to ensure fairness.
// Otherwise two goroutines can completely occupy the local runqueue
// by constantly respawning each other.
if pp.schedtick%61 == 0 && !sched.runq.empty() {
lock(&sched.lock)
gp := globrunqget()
unlock(&sched.lock)
if gp != nil {
return gp, false, false
}
}
// Wake up the finalizer G.
if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
if gp := wakefing(); gp != nil {
ready(gp, 0, true)
}
}
// Wake up one or more cleanup Gs.
if gcCleanups.needsWake() {
gcCleanups.wake()
}
if *cgo_yield != nil {
asmcgocall(*cgo_yield, nil)
}
// local runq
if gp, inheritTime := runqget(pp); gp != nil {
return gp, inheritTime, false
}
// global runq
if !sched.runq.empty() {
lock(&sched.lock)
gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
unlock(&sched.lock)
if gp != nil {
if runqputbatch(pp, &q); !q.empty() {
throw("Couldn't put Gs into empty local runq")
}
return gp, false, false
}
}
// Poll network.
// This netpoll is only an optimization before we resort to stealing.
// We can safely skip it if there are no waiters or a thread is blocked
// in netpoll already. If there is any kind of logical race with that
// blocked thread (e.g. it has already returned from netpoll, but does
// not set lastpoll yet), this thread will do blocking netpoll below
// anyway.
// We only poll from one thread at a time to avoid kernel contention
// on machines with many cores.
if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
list, delta := netpoll(0)
sched.pollingNet.Store(0)
if !list.empty() { // non-blocking
gp := list.pop()
injectglist(&list)
netpollAdjustWaiters(delta)
trace := traceAcquire()
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.ok() {
trace.GoUnpark(gp, 0)
traceRelease(trace)
}
return gp, false, false
}
}
// Spinning Ms: steal work from other Ps.
//
// Limit the number of spinning Ms to half the number of busy Ps.
// This is necessary to prevent excessive CPU consumption when
// GOMAXPROCS>>1 but the program parallelism is low.
if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
if !mp.spinning {
mp.becomeSpinning()
}
gp, inheritTime, tnow, w, newWork := stealWork(now)
if gp != nil {
// Successfully stole.
return gp, inheritTime, false
}
if newWork {
// There may be new timer or GC work; restart to
// discover.
goto top
}
now = tnow
if w != 0 && (pollUntil == 0 || w < pollUntil) {
// Earlier timer to wait for.
pollUntil = w
}
}
// We have nothing to do.
//
// If we're in the GC mark phase, can safely scan and blacken objects,
// and have work to do, run idle-time marking rather than give up the P.
if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
if node != nil {
pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
gp := node.gp.ptr()
trace := traceAcquire()
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.ok() {
trace.GoUnpark(gp, 0)
traceRelease(trace)
}
return gp, false, false
}
gcController.removeIdleMarkWorker()
}
// wasm only:
// If a callback returned and no other goroutine is awake,
// then wake event handler goroutine which pauses execution
// until a callback was triggered.
gp, otherReady := beforeIdle(now, pollUntil)
if gp != nil {
trace := traceAcquire()
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.ok() {
trace.GoUnpark(gp, 0)
traceRelease(trace)
}
return gp, false, false
}
if otherReady {
goto top
}
// Before we drop our P, make a snapshot of the allp slice,
// which can change underfoot once we no longer block
// safe-points. We don't need to snapshot the contents because
// everything up to cap(allp) is immutable.
//
// We clear the snapshot from the M after return via
// mp.clearAllpSnapshop (in schedule) and on each iteration of the top
// loop.
allpSnapshot := mp.snapshotAllp()
// Also snapshot masks. Value changes are OK, but we can't allow
// len to change out from under us.
idlepMaskSnapshot := idlepMask
timerpMaskSnapshot := timerpMask
// return P and block
lock(&sched.lock)
if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
unlock(&sched.lock)
goto top
}
if !sched.runq.empty() {
gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
unlock(&sched.lock)
if gp == nil {
throw("global runq empty with non-zero runqsize")
}
if runqputbatch(pp, &q); !q.empty() {
throw("Couldn't put Gs into empty local runq")
}
return gp, false, false
}
if !mp.spinning && sched.needspinning.Load() == 1 {
// See "Delicate dance" comment below.
mp.becomeSpinning()
unlock(&sched.lock)
goto top
}
if releasep() != pp {
throw("findRunnable: wrong p")
}
now = pidleput(pp, now)
unlock(&sched.lock)
// Delicate dance: thread transitions from spinning to non-spinning
// state, potentially concurrently with submission of new work. We must
// drop nmspinning first and then check all sources again (with
// #StoreLoad memory barrier in between). If we do it the other way
// around, another thread can submit work after we've checked all
// sources but before we drop nmspinning; as a result nobody will
// unpark a thread to run the work.
//
// This applies to the following sources of work:
//
// * Goroutines added to the global or a per-P run queue.
// * New/modified-earlier timers on a per-P timer heap.
// * Idle-priority GC work (barring golang.org/issue/19112).
//
// If we discover new work below, we need to restore m.spinning as a
// signal for resetspinning to unpark a new worker thread (because
// there can be more than one starving goroutine).
//
// However, if after discovering new work we also observe no idle Ps
// (either here or in resetspinning), we have a problem. We may be
// racing with a non-spinning M in the block above, having found no
// work and preparing to release its P and park. Allowing that P to go
// idle will result in loss of work conservation (idle P while there is
// runnable work). This could result in complete deadlock in the
// unlikely event that we discover new work (from netpoll) right as we
// are racing with _all_ other Ps going idle.
//
// We use sched.needspinning to synchronize with non-spinning Ms going
// idle. If needspinning is set when they are about to drop their P,
// they abort the drop and instead become a new spinning M on our
// behalf. If we are not racing and the system is truly fully loaded
// then no spinning threads are required, and the next thread to
// naturally become spinning will clear the flag.
//
// Also see "Worker thread parking/unparking" comment at the top of the
// file.
wasSpinning := mp.spinning
if mp.spinning {
mp.spinning = false
if sched.nmspinning.Add(-1) < 0 {
throw("findRunnable: negative nmspinning")
}
// Note the for correctness, only the last M transitioning from
// spinning to non-spinning must perform these rechecks to
// ensure no missed work. However, the runtime has some cases
// of transient increments of nmspinning that are decremented
// without going through this path, so we must be conservative
// and perform the check on all spinning Ms.
//
// See https://go.dev/issue/43997.
// Check global and P runqueues again.
lock(&sched.lock)
if !sched.runq.empty() {
pp, _ := pidlegetSpinning(0)
if pp != nil {
gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
unlock(&sched.lock)
if gp == nil {
throw("global runq empty with non-zero runqsize")
}
if runqputbatch(pp, &q); !q.empty() {
throw("Couldn't put Gs into empty local runq")
}
acquirep(pp)
mp.becomeSpinning()
return gp, false, false
}
}
unlock(&sched.lock)
pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
if pp != nil {
acquirep(pp)
mp.becomeSpinning()
goto top
}
// Check for idle-priority GC work again.
pp, gp := checkIdleGCNoP()
if pp != nil {
acquirep(pp)
mp.becomeSpinning()
// Run the idle worker.
pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
trace := traceAcquire()
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.ok() {
trace.GoUnpark(gp, 0)
traceRelease(trace)
}
return gp, false, false
}
// Finally, check for timer creation or expiry concurrently with
// transitioning from spinning to non-spinning.
//
// Note that we cannot use checkTimers here because it calls
// adjusttimers which may need to allocate memory, and that isn't
// allowed when we don't have an active P.
pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
}
// We don't need allp anymore at this pointer, but can't clear the
// snapshot without a P for the write barrier..
// Poll network until next timer.
if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
sched.pollUntil.Store(pollUntil)
if mp.p != 0 {
throw("findRunnable: netpoll with p")
}
if mp.spinning {
throw("findRunnable: netpoll with spinning")
}
delay := int64(-1)
if pollUntil != 0 {
if now == 0 {
now = nanotime()
}
delay = pollUntil - now
if delay < 0 {
delay = 0
}
}
if faketime != 0 {
// When using fake time, just poll.
delay = 0
}
list, delta := netpoll(delay) // block until new work is available
// Refresh now again, after potentially blocking.
now = nanotime()
sched.pollUntil.Store(0)
sched.lastpoll.Store(now)
if faketime != 0 && list.empty() {
// Using fake time and nothing is ready; stop M.
// When all M's stop, checkdead will call timejump.
stopm()
goto top
}
lock(&sched.lock)
pp, _ := pidleget(now)
unlock(&sched.lock)
if pp == nil {
injectglist(&list)
netpollAdjustWaiters(delta)
} else {
acquirep(pp)
if !list.empty() {
gp := list.pop()
injectglist(&list)
netpollAdjustWaiters(delta)
trace := traceAcquire()
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.ok() {
trace.GoUnpark(gp, 0)
traceRelease(trace)
}
return gp, false, false
}
if wasSpinning {
mp.becomeSpinning()
}
goto top
}
} else if pollUntil != 0 && netpollinited() {
pollerPollUntil := sched.pollUntil.Load()
if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
netpollBreak()
}
}
stopm()
goto top
}
// pollWork reports whether there is non-background work this P could
// be doing. This is a fairly lightweight check to be used for
// background work loops, like idle GC. It checks a subset of the
// conditions checked by the actual scheduler.
func pollWork() bool {
if !sched.runq.empty() {
return true
}
p := getg().m.p.ptr()
if !runqempty(p) {
return true
}
if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
if list, delta := netpoll(0); !list.empty() {
injectglist(&list)
netpollAdjustWaiters(delta)
return true
}
}
return false
}
// stealWork attempts to steal a runnable goroutine or timer from any P.
//
// If newWork is true, new work may have been readied.
//
// If now is not 0 it is the current time. stealWork returns the passed time or
// the current time if now was passed as 0.
func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
pp := getg().m.p.ptr()
ranTimer := false
const stealTries = 4
for i := 0; i < stealTries; i++ {
stealTimersOrRunNextG := i == stealTries-1
for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
if sched.gcwaiting.Load() {
// GC work may be available.
return nil, false, now, pollUntil, true
}
p2 := allp[enum.position()]
if pp == p2 {
continue
}
// Steal timers from p2. This call to checkTimers is the only place
// where we might hold a lock on a different P's timers. We do this
// once on the last pass before checking runnext because stealing
// from the other P's runnext should be the last resort, so if there
// are timers to steal do that first.
//
// We only check timers on one of the stealing iterations because
// the time stored in now doesn't change in this loop and checking
// the timers for each P more than once with the same value of now
// is probably a waste of time.
//
// timerpMask tells us whether the P may have timers at all. If it
// can't, no need to check at all.
if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
tnow, w, ran := p2.timers.check(now, nil)
now = tnow
if w != 0 && (pollUntil == 0 || w < pollUntil) {
pollUntil = w
}
if ran {
// Running the timers may have
// made an arbitrary number of G's
// ready and added them to this P's
// local run queue. That invalidates
// the assumption of runqsteal
// that it always has room to add
// stolen G's. So check now if there
// is a local G to run.
if gp, inheritTime := runqget(pp); gp != nil {
return gp, inheritTime, now, pollUntil, ranTimer
}
ranTimer = true
}
}
// Don't bother to attempt to steal if p2 is idle.
if !idlepMask.read(enum.position()) {
if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
return gp, false, now, pollUntil, ranTimer
}
}
}
}
// No goroutines found to steal. Regardless, running a timer may have
// made some goroutine ready that we missed. Indicate the next timer to
// wait for.
return nil, false, now, pollUntil, ranTimer
}
// Check all Ps for a runnable G to steal.
//
// On entry we have no P. If a G is available to steal and a P is available,
// the P is returned which the caller should acquire and attempt to steal the
// work to.
func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
for id, p2 := range allpSnapshot {
if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
lock(&sched.lock)
pp, _ := pidlegetSpinning(0)
if pp == nil {
// Can't get a P, don't bother checking remaining Ps.
unlock(&sched.lock)
return nil
}
unlock(&sched.lock)
return pp
}
}
// No work available.
return nil
}
// Check all Ps for a timer expiring sooner than pollUntil.
//
// Returns updated pollUntil value.
func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
for id, p2 := range allpSnapshot {
if timerpMaskSnapshot.read(uint32(id)) {
w := p2.timers.wakeTime()
if w != 0 && (pollUntil == 0 || w < pollUntil) {
pollUntil = w
}
}
}
return pollUntil
}
// Check for idle-priority GC, without a P on entry.
//
// If some GC work, a P, and a worker G are all available, the P and G will be
// returned. The returned P has not been wired yet.
func checkIdleGCNoP() (*p, *g) {
// N.B. Since we have no P, gcBlackenEnabled may change at any time; we
// must check again after acquiring a P. As an optimization, we also check
// if an idle mark worker is needed at all. This is OK here, because if we
// observe that one isn't needed, at least one is currently running. Even if
// it stops running, its own journey into the scheduler should schedule it
// again, if need be (at which point, this check will pass, if relevant).
if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
return nil, nil
}
if !gcShouldScheduleWorker(nil) {
return nil, nil
}
// Work is available; we can start an idle GC worker only if there is
// an available P and available worker G.
//
// We can attempt to acquire these in either order, though both have
// synchronization concerns (see below). Workers are almost always
// available (see comment in findRunnableGCWorker for the one case
// there may be none). Since we're slightly less likely to find a P,
// check for that first.
//
// Synchronization: note that we must hold sched.lock until we are
// committed to keeping it. Otherwise we cannot put the unnecessary P
// back in sched.pidle without performing the full set of idle
// transition checks.
//
// If we were to check gcBgMarkWorkerPool first, we must somehow handle
// the assumption in gcControllerState.findRunnableGCWorker that an
// empty gcBgMarkWorkerPool is only possible if gcMarkDone is running.
lock(&sched.lock)
pp, now := pidlegetSpinning(0)
if pp == nil {
unlock(&sched.lock)
return nil, nil
}
// Now that we own a P, gcBlackenEnabled can't change (as it requires STW).
if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
pidleput(pp, now)
unlock(&sched.lock)
return nil, nil
}
node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
if node == nil {
pidleput(pp, now)
unlock(&sched.lock)
gcController.removeIdleMarkWorker()
return nil, nil
}
unlock(&sched.lock)
return pp, node.gp.ptr()
}
// wakeNetPoller wakes up the thread sleeping in the network poller if it isn't
// going to wake up before the when argument; or it wakes an idle P to service
// timers and the network poller if there isn't one already.
func wakeNetPoller(when int64) {
if sched.lastpoll.Load() == 0 {
// In findRunnable we ensure that when polling the pollUntil
// field is either zero or the time to which the current
// poll is expected to run. This can have a spurious wakeup
// but should never miss a wakeup.
pollerPollUntil := sched.pollUntil.Load()
if pollerPollUntil == 0 || pollerPollUntil > when {
netpollBreak()
}
} else {
// There are no threads in the network poller, try to get
// one there so it can handle new timers.
if GOOS != "plan9" { // Temporary workaround - see issue #42303.
wakep()
}
}
}
func resetspinning() {
gp := getg()
if !gp.m.spinning {
throw("resetspinning: not a spinning m")
}
gp.m.spinning = false
nmspinning := sched.nmspinning.Add(-1)
if nmspinning < 0 {
throw("findRunnable: negative nmspinning")
}
// M wakeup policy is deliberately somewhat conservative, so check if we
// need to wakeup another P here. See "Worker thread parking/unparking"
// comment at the top of the file for details.
wakep()
}
// injectglist adds each runnable G on the list to some run queue,
// and clears glist. If there is no current P, they are added to the
// global queue, and up to npidle M's are started to run them.
// Otherwise, for each idle P, this adds a G to the global queue
// and starts an M. Any remaining G's are added to the current P's
// local run queue.
// This may temporarily acquire sched.lock.
// Can run concurrently with GC.
func injectglist(glist *gList) {
if glist.empty() {
return
}
// Mark all the goroutines as runnable before we put them
// on the run queues.
var tail *g
trace := traceAcquire()
for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
tail = gp
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.ok() {
trace.GoUnpark(gp, 0)
}
}
if trace.ok() {
traceRelease(trace)
}
// Turn the gList into a gQueue.
q := gQueue{glist.head, tail.guintptr(), glist.size}
*glist = gList{}
startIdle := func(n int32) {
for ; n > 0; n-- {
mp := acquirem() // See comment in startm.
lock(&sched.lock)
pp, _ := pidlegetSpinning(0)
if pp == nil {
unlock(&sched.lock)
releasem(mp)
break
}
startm(pp, false, true)
unlock(&sched.lock)
releasem(mp)
}
}
pp := getg().m.p.ptr()
if pp == nil {
n := q.size
lock(&sched.lock)
globrunqputbatch(&q)
unlock(&sched.lock)
startIdle(n)
return
}
var globq gQueue
npidle := sched.npidle.Load()
for ; npidle > 0 && !q.empty(); npidle-- {
g := q.pop()
globq.pushBack(g)
}
if !globq.empty() {
n := globq.size
lock(&sched.lock)
globrunqputbatch(&globq)
unlock(&sched.lock)
startIdle(n)
}
if runqputbatch(pp, &q); !q.empty() {
lock(&sched.lock)
globrunqputbatch(&q)
unlock(&sched.lock)
}
// Some P's might have become idle after we loaded `sched.npidle`
// but before any goroutines were added to the queue, which could
// lead to idle P's when there is work available in the global queue.
// That could potentially last until other goroutines become ready
// to run. That said, we need to find a way to hedge
//
// Calling wakep() here is the best bet, it will do nothing in the
// common case (no racing on `sched.npidle`), while it could wake one
// more P to execute G's, which might end up with >1 P's: the first one
// wakes another P and so forth until there is no more work, but this
// ought to be an extremely rare case.
//
// Also see "Worker thread parking/unparking" comment at the top of the file for details.
wakep()
}
// One round of scheduler: find a runnable goroutine and execute it.
// Never returns.
func schedule() {
mp := getg().m
if mp.locks != 0 {
throw("schedule: holding locks")
}
if mp.lockedg != 0 {
stoplockedm()
execute(mp.lockedg.ptr(), false) // Never returns.
}
// We should not schedule away from a g that is executing a cgo call,
// since the cgo call is using the m's g0 stack.
if mp.incgo {
throw("schedule: in cgo")
}
top:
pp := mp.p.ptr()
pp.preempt = false
// Safety check: if we are spinning, the run queue should be empty.
// Check this before calling checkTimers, as that might call
// goready to put a ready goroutine on the local run queue.
if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
throw("schedule: spinning with local work")
}
gp, inheritTime, tryWakeP := findRunnable() // blocks until work is available
// May be on a new P.
pp = mp.p.ptr()
// findRunnable may have collected an allp snapshot. The snapshot is
// only required within findRunnable. Clear it to all GC to collect the
// slice.
mp.clearAllpSnapshot()
// If the P was assigned a next GC mark worker but findRunnable
// selected anything else, release the worker so another P may run it.
//
// N.B. If this occurs because a higher-priority goroutine was selected
// (trace reader), then tryWakeP is set, which will wake another P to
// run the worker. If this occurs because the GC is no longer active,
// there is no need to wakep.
gcController.releaseNextGCMarkWorker(pp)
if debug.dontfreezetheworld > 0 && freezing.Load() {
// See comment in freezetheworld. We don't want to perturb
// scheduler state, so we didn't gcstopm in findRunnable, but
// also don't want to allow new goroutines to run.
//
// Deadlock here rather than in the findRunnable loop so if
// findRunnable is stuck in a loop we don't perturb that
// either.
lock(&deadlock)
lock(&deadlock)
}
// This thread is going to run a goroutine and is not spinning anymore,
// so if it was marked as spinning we need to reset it now and potentially
// start a new spinning M.
if mp.spinning {
resetspinning()
}
if sched.disable.user && !schedEnabled(gp) {
// Scheduling of this goroutine is disabled. Put it on
// the list of pending runnable goroutines for when we
// re-enable user scheduling and look again.
lock(&sched.lock)
if schedEnabled(gp) {
// Something re-enabled scheduling while we
// were acquiring the lock.
unlock(&sched.lock)
} else {
sched.disable.runnable.pushBack(gp)
unlock(&sched.lock)
goto top
}
}
// If about to schedule a not-normal goroutine (a GCworker or tracereader),
// wake a P if there is one.
if tryWakeP {
wakep()
}
if gp.lockedm != 0 {
// Hands off own p to the locked m,
// then blocks waiting for a new p.
startlockedm(gp)
goto top
}
execute(gp, inheritTime)
}
// dropg removes the association between m and the current goroutine m->curg (gp for short).
// Typically a caller sets gp's status away from Grunning and then
// immediately calls dropg to finish the job. The caller is also responsible
// for arranging that gp will be restarted using ready at an
// appropriate time. After calling dropg and arranging for gp to be
// readied later, the caller can do other work but eventually should
// call schedule to restart the scheduling of goroutines on this m.
func dropg() {
gp := getg()
setMNoWB(&gp.m.curg.m, nil)
setGNoWB(&gp.m.curg, nil)
}
func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
unlock((*mutex)(lock))
return true
}
// park continuation on g0.
func park_m(gp *g) {
mp := getg().m
trace := traceAcquire()
// If g is in a synctest group, we don't want to let the group
// become idle until after the waitunlockf (if any) has confirmed
// that the park is happening.
// We need to record gp.bubble here, since waitunlockf can change it.
bubble := gp.bubble
if bubble != nil {
bubble.incActive()
}
if trace.ok() {
// Trace the event before the transition. It may take a
// stack trace, but we won't own the stack after the
// transition anymore.
trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
}
// N.B. Not using casGToWaiting here because the waitreason is
// set by park_m's caller.
casgstatus(gp, _Grunning, _Gwaiting)
if trace.ok() {
traceRelease(trace)
}
dropg()
if fn := mp.waitunlockf; fn != nil {
ok := fn(gp, mp.waitlock)
mp.waitunlockf = nil
mp.waitlock = nil
if !ok {
trace := traceAcquire()
casgstatus(gp, _Gwaiting, _Grunnable)
if bubble != nil {
bubble.decActive()
}
if trace.ok() {
trace.GoUnpark(gp, 2)
traceRelease(trace)
}
execute(gp, true) // Schedule it back, never returns.
}
}
if bubble != nil {
bubble.decActive()
}
schedule()
}
func goschedImpl(gp *g, preempted bool) {
pp := gp.m.p.ptr()
trace := traceAcquire()
status := readgstatus(gp)
if status&^_Gscan != _Grunning {
dumpgstatus(gp)
throw("bad g status")
}
if trace.ok() {
// Trace the event before the transition. It may take a
// stack trace, but we won't own the stack after the
// transition anymore.
if preempted {
trace.GoPreempt()
} else {
trace.GoSched()
}
}
casgstatus(gp, _Grunning, _Grunnable)
if trace.ok() {
traceRelease(trace)
}
dropg()
if preempted && sched.gcwaiting.Load() {
// If preempted for STW, keep the G on the local P in runnext
// so it can keep running immediately after the STW.
runqput(pp, gp, true)
} else {
lock(&sched.lock)
globrunqput(gp)
unlock(&sched.lock)
}
if mainStarted {
wakep()
}
schedule()
}
// Gosched continuation on g0.
func gosched_m(gp *g) {
goschedImpl(gp, false)
}
// goschedguarded is a forbidden-states-avoided version of gosched_m.
func goschedguarded_m(gp *g) {
if !canPreemptM(gp.m) {
gogo(&gp.sched) // never return
}
goschedImpl(gp, false)
}
func gopreempt_m(gp *g) {
goschedImpl(gp, true)
}
// preemptPark parks gp and puts it in _Gpreempted.
//
//go:systemstack
func preemptPark(gp *g) {
status := readgstatus(gp)
if status&^_Gscan != _Grunning {
dumpgstatus(gp)
throw("bad g status")
}
if gp.asyncSafePoint {
// Double-check that async preemption does not
// happen in SPWRITE assembly functions.
// isAsyncSafePoint must exclude this case.
f := findfunc(gp.sched.pc)
if !f.valid() {
throw("preempt at unknown pc")
}
if f.flag&abi.FuncFlagSPWrite != 0 {
println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
throw("preempt SPWRITE")
}
}
// Transition from _Grunning to _Gscan|_Gpreempted. We can't
// be in _Grunning when we dropg because then we'd be running
// without an M, but the moment we're in _Gpreempted,
// something could claim this G before we've fully cleaned it
// up. Hence, we set the scan bit to lock down further
// transitions until we can dropg.
casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
// Be careful about ownership as we trace this next event.
//
// According to the tracer invariants (trace.go) it's unsafe
// for us to emit an event for a goroutine we do not own.
// The moment we CAS into _Gpreempted, suspendG could CAS the
// goroutine to _Gwaiting, effectively taking ownership. All of
// this could happen before we even get the chance to emit
// an event. The end result is that the events could appear
// out of order, and the tracer generally assumes the scheduler
// takes care of the ordering between GoPark and GoUnpark.
//
// The answer here is simple: emit the event while we still hold
// the _Gscan bit on the goroutine, since the _Gscan bit means
// ownership over transitions.
//
// We still need to traceAcquire and traceRelease across the CAS
// because the tracer could be what's calling suspendG in the first
// place. This also upholds the tracer invariant that we must hold
// traceAcquire/traceRelease across the transition. However, we
// specifically *only* emit the event while we still have ownership.
trace := traceAcquire()
if trace.ok() {
trace.GoPark(traceBlockPreempted, 0)
}
// Drop the goroutine from the M. Only do this after the tracer has
// emitted an event, because it needs the association for GoPark to
// work correctly.
dropg()
// Drop the scan bit and release the trace locker if necessary.
casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
if trace.ok() {
traceRelease(trace)
}
// All done.
schedule()
}
// goyield is like Gosched, but it:
// - emits a GoPreempt trace event instead of a GoSched trace event
// - puts the current G on the runq of the current P instead of the globrunq
//
// goyield should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gvisor.dev/gvisor
// - github.com/sagernet/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname goyield
func goyield() {
checkTimeouts()
mcall(goyield_m)
}
func goyield_m(gp *g) {
trace := traceAcquire()
pp := gp.m.p.ptr()
if trace.ok() {
// Trace the event before the transition. It may take a
// stack trace, but we won't own the stack after the
// transition anymore.
trace.GoPreempt()
}
casgstatus(gp, _Grunning, _Grunnable)
if trace.ok() {
traceRelease(trace)
}
dropg()
runqput(pp, gp, false)
schedule()
}
// Finishes execution of the current goroutine.
func goexit1() {
if raceenabled {
if gp := getg(); gp.bubble != nil {
racereleasemergeg(gp, gp.bubble.raceaddr())
}
racegoend()
}
trace := traceAcquire()
if trace.ok() {
trace.GoEnd()
traceRelease(trace)
}
mcall(goexit0)
}
// goexit continuation on g0.
func goexit0(gp *g) {
if goexperiment.RuntimeSecret && gp.secret > 0 {
// Erase the whole stack. This path only occurs when
// runtime.Goexit is called from within a runtime/secret.Do call.
memclrNoHeapPointers(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
// Since this is running on g0, our registers are already zeroed from going through
// mcall in secret mode.
}
gdestroy(gp)
schedule()
}
func gdestroy(gp *g) {
mp := getg().m
pp := mp.p.ptr()
casgstatus(gp, _Grunning, _Gdead)
gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
if isSystemGoroutine(gp, false) {
sched.ngsys.Add(-1)
}
gp.m = nil
locked := gp.lockedm != 0
gp.lockedm = 0
mp.lockedg = 0
gp.preemptStop = false
gp.paniconfault = false
gp._defer = nil // should be true already but just in case.
gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
gp.writebuf = nil
gp.waitreason = waitReasonZero
gp.param = nil
gp.labels = nil
gp.timer = nil
gp.bubble = nil
gp.fipsOnlyBypass = false
gp.secret = 0
if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
// Flush assist credit to the global pool. This gives
// better information to pacing if the application is
// rapidly creating an exiting goroutines.
assistWorkPerByte := gcController.assistWorkPerByte.Load()
scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
gcController.bgScanCredit.Add(scanCredit)
gp.gcAssistBytes = 0
}
dropg()
if GOARCH == "wasm" { // no threads yet on wasm
gfput(pp, gp)
return
}
if locked && mp.lockedInt != 0 {
print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
if mp.isextra {
throw("runtime.Goexit called in a thread that was not created by the Go runtime")
}
throw("exited a goroutine internally locked to the OS thread")
}
gfput(pp, gp)
if locked {
// The goroutine may have locked this thread because
// it put it in an unusual kernel state. Kill it
// rather than returning it to the thread pool.
// Return to mstart, which will release the P and exit
// the thread.
if GOOS != "plan9" { // See golang.org/issue/22227.
gogo(&mp.g0.sched)
} else {
// Clear lockedExt on plan9 since we may end up re-using
// this thread.
mp.lockedExt = 0
}
}
}
// save updates getg().sched to refer to pc and sp so that a following
// gogo will restore pc and sp.
//
// save must not have write barriers because invoking a write barrier
// can clobber getg().sched.
//
//go:nosplit
//go:nowritebarrierrec
func save(pc, sp, bp uintptr) {
gp := getg()
if gp == gp.m.g0 || gp == gp.m.gsignal {
// m.g0.sched is special and must describe the context
// for exiting the thread. mstart1 writes to it directly.
// m.gsignal.sched should not be used at all.
// This check makes sure save calls do not accidentally
// run in contexts where they'd write to system g's.
throw("save on system g not allowed")
}
gp.sched.pc = pc
gp.sched.sp = sp
gp.sched.lr = 0
gp.sched.bp = bp
// We need to ensure ctxt is zero, but can't have a write
// barrier here. However, it should always already be zero.
// Assert that.
if gp.sched.ctxt != nil {
badctxt()
}
}
// The goroutine g is about to enter a system call.
// Record that it's not using the cpu anymore.
// This is called only from the go syscall library and cgocall,
// not from the low-level system calls used by the runtime.
//
// Entersyscall cannot split the stack: the save must
// make g->sched refer to the caller's stack segment, because
// entersyscall is going to return immediately after.
//
// Nothing entersyscall calls can split the stack either.
// We cannot safely move the stack during an active call to syscall,
// because we do not know which of the uintptr arguments are
// really pointers (back into the stack).
// In practice, this means that we make the fast path run through
// entersyscall doing no-split things, and the slow path has to use systemstack
// to run bigger things on the system stack.
//
// reentersyscall is the entry point used by cgo callbacks, where explicitly
// saved SP and PC are restored. This is needed when exitsyscall will be called
// from a function further up in the call stack than the parent, as g->syscallsp
// must always point to a valid stack frame. entersyscall below is the normal
// entry point for syscalls, which obtains the SP and PC from the caller.
//
//go:nosplit
func reentersyscall(pc, sp, bp uintptr) {
gp := getg()
// Disable preemption because during this function g is in Gsyscall status,
// but can have inconsistent g->sched, do not let GC observe it.
gp.m.locks++
// This M may have a signal stack that is dirtied with secret information
// (see package "runtime/secret"). Since it's about to go into a syscall for
// an arbitrary amount of time and the G that put the secret info there
// might have returned from secret.Do, we have to zero it out now, lest we
// break the guarantee that secrets are purged by the next GC after a return
// to secret.Do.
//
// It might be tempting to think that we only need to zero out this if we're
// not running in secret mode anymore, but that leaves an ABA problem. The G
// that put the secrets onto our signal stack may not be the one that is
// currently executing.
//
// Logically, we should erase this when we lose our P, not when we enter the
// syscall. This would avoid a zeroing in the case where the call returns
// almost immediately. Since we use this path for cgo calls as well, these
// fast "syscalls" are quite common. However, since we only erase the signal
// stack if we were delivered a signal in secret mode and considering the
// cross-thread synchronization cost for the P, it hardly seems worth it.
//
// TODO(dmo): can we encode the goid into mp.signalSecret and avoid the ABA problem?
if goexperiment.RuntimeSecret {
eraseSecretsSignalStk()
}
// Entersyscall must not call any function that might split/grow the stack.
// (See details in comment above.)
// Catch calls that might, by replacing the stack guard with something that
// will trip any stack check and leaving a flag to tell newstack to die.
gp.stackguard0 = stackPreempt
gp.throwsplit = true
// Copy the syscalltick over so we can identify if the P got stolen later.
gp.m.syscalltick = gp.m.p.ptr().syscalltick
pp := gp.m.p.ptr()
if pp.runSafePointFn != 0 {
// runSafePointFn may stack split if run on this stack
systemstack(runSafePointFn)
}
gp.m.oldp.set(pp)
// Leave SP around for GC and traceback.
save(pc, sp, bp)
gp.syscallsp = sp
gp.syscallpc = pc
gp.syscallbp = bp
// Double-check sp and bp.
if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
systemstack(func() {
print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
throw("entersyscall")
})
}
if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
systemstack(func() {
print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
throw("entersyscall")
})
}
trace := traceAcquire()
if trace.ok() {
// Emit a trace event. Notably, actually emitting the event must happen before
// the casgstatus because it mutates the P, but the traceLocker must be held
// across the casgstatus since we're transitioning out of _Grunning
// (see trace.go invariants).
systemstack(func() {
trace.GoSysCall()
})
// systemstack clobbered gp.sched, so restore it.
save(pc, sp, bp)
}
if sched.gcwaiting.Load() {
// Optimization: If there's a pending STW, do the equivalent of
// entersyscallblock here at the last minute and immediately give
// away our P.
systemstack(func() {
entersyscallHandleGCWait(trace)
})
// systemstack clobbered gp.sched, so restore it.
save(pc, sp, bp)
}
// As soon as we switch to _Gsyscall, we are in danger of losing our P.
// We must not touch it after this point.
//
// Try to do a quick CAS to avoid calling into casgstatus in the common case.
// If we have a bubble, we need to fall into casgstatus.
if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
casgstatus(gp, _Grunning, _Gsyscall)
}
if staticLockRanking {
// casgstatus clobbers gp.sched via systemstack under staticLockRanking. Restore it.
save(pc, sp, bp)
}
if trace.ok() {
// N.B. We don't need to go on the systemstack because traceRelease is very
// carefully recursively nosplit. This also means we don't need to worry
// about clobbering gp.sched.
traceRelease(trace)
}
if sched.sysmonwait.Load() {
systemstack(entersyscallWakeSysmon)
// systemstack clobbered gp.sched, so restore it.
save(pc, sp, bp)
}
gp.m.locks--
}
// debugExtendGrunningNoP is a debug mode that extends the windows in which
// we're _Grunning without a P in order to try to shake out bugs with code
// assuming this state is impossible.
const debugExtendGrunningNoP = false
// Standard syscall entry used by the go syscall library and normal cgo calls.
//
// This is exported via linkname to assembly in the syscall package and x/sys.
//
// Other packages should not be accessing entersyscall directly,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gvisor.dev/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:nosplit
//go:linkname entersyscall
func entersyscall() {
// N.B. getcallerfp cannot be written directly as argument in the call
// to reentersyscall because it forces spilling the other arguments to
// the stack. This results in exceeding the nosplit stack requirements
// on some platforms.
fp := getcallerfp()
reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
}
func entersyscallWakeSysmon() {
lock(&sched.lock)
if sched.sysmonwait.Load() {
sched.sysmonwait.Store(false)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
}
func entersyscallHandleGCWait(trace traceLocker) {
gp := getg()
lock(&sched.lock)
if sched.stopwait > 0 {
// Set our P to _Pgcstop so the STW can take it.
pp := gp.m.p.ptr()
pp.m = 0
gp.m.p = 0
atomic.Store(&pp.status, _Pgcstop)
if trace.ok() {
trace.ProcStop(pp)
}
addGSyscallNoP(gp.m) // We gave up our P voluntarily.
pp.gcStopTime = nanotime()
pp.syscalltick++
if sched.stopwait--; sched.stopwait == 0 {
notewakeup(&sched.stopnote)
}
}
unlock(&sched.lock)
}
// The same as entersyscall(), but with a hint that the syscall is blocking.
// entersyscallblock should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gvisor.dev/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname entersyscallblock
//go:nosplit
func entersyscallblock() {
gp := getg()
gp.m.locks++ // see comment in entersyscall
gp.throwsplit = true
gp.stackguard0 = stackPreempt // see comment in entersyscall
gp.m.syscalltick = gp.m.p.ptr().syscalltick
gp.m.p.ptr().syscalltick++
addGSyscallNoP(gp.m) // We're going to give up our P.
// Leave SP around for GC and traceback.
pc := sys.GetCallerPC()
sp := sys.GetCallerSP()
bp := getcallerfp()
save(pc, sp, bp)
gp.syscallsp = gp.sched.sp
gp.syscallpc = gp.sched.pc
gp.syscallbp = gp.sched.bp
if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
sp1 := sp
sp2 := gp.sched.sp
sp3 := gp.syscallsp
systemstack(func() {
print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
throw("entersyscallblock")
})
}
// Once we switch to _Gsyscall, we can't safely touch
// our P anymore, so we need to hand it off beforehand.
// The tracer also needs to see the syscall before the P
// handoff, so the order here must be (1) trace,
// (2) handoff, (3) _Gsyscall switch.
trace := traceAcquire()
systemstack(func() {
if trace.ok() {
trace.GoSysCall()
}
handoffp(releasep())
})
// <--
// Caution: we're in a small window where we are in _Grunning without a P.
// -->
if debugExtendGrunningNoP {
usleep(10)
}
casgstatus(gp, _Grunning, _Gsyscall)
if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
systemstack(func() {
print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
throw("entersyscallblock")
})
}
if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
systemstack(func() {
print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
throw("entersyscallblock")
})
}
if trace.ok() {
systemstack(func() {
traceRelease(trace)
})
}
// Resave for traceback during blocked call.
save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
gp.m.locks--
}
// The goroutine g exited its system call.
// Arrange for it to run on a cpu again.
// This is called only from the go syscall library, not
// from the low-level system calls used by the runtime.
//
// Write barriers are not allowed because our P may have been stolen.
//
// This is exported via linkname to assembly in the syscall package.
//
// exitsyscall should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gvisor.dev/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:nosplit
//go:nowritebarrierrec
//go:linkname exitsyscall
func exitsyscall() {
gp := getg()
gp.m.locks++ // see comment in entersyscall
if sys.GetCallerSP() > gp.syscallsp {
throw("exitsyscall: syscall frame is no longer valid")
}
gp.waitsince = 0
if sched.stopwait == freezeStopWait {
// Wedge ourselves if there's an outstanding freezetheworld.
// If we transition to running, we might end up with our traceback
// being taken twice.
systemstack(func() {
lock(&deadlock)
lock(&deadlock)
})
}
// Optimistically assume we're going to keep running, and switch to running.
// Before this point, our P wiring is not ours. Once we get past this point,
// we can access our P if we have it, otherwise we lost it.
//
// N.B. Because we're transitioning to _Grunning here, traceAcquire doesn't
// need to be held ahead of time. We're effectively atomic with respect to
// the tracer because we're non-preemptible and in the runtime. It can't stop
// us to read a bad status.
//
// Try to do a quick CAS to avoid calling into casgstatus in the common case.
// If we have a bubble, we need to fall into casgstatus.
if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
casgstatus(gp, _Gsyscall, _Grunning)
}
// Caution: we're in a window where we may be in _Grunning without a P.
// Either we will grab a P or call exitsyscall0, where we'll switch to
// _Grunnable.
if debugExtendGrunningNoP {
usleep(10)
}
// Grab and clear our old P.
oldp := gp.m.oldp.ptr()
gp.m.oldp.set(nil)
// Check if we still have a P, and if not, try to acquire an idle P.
pp := gp.m.p.ptr()
if pp != nil {
// Fast path: we still have our P. Just emit a syscall exit event.
if trace := traceAcquire(); trace.ok() {
systemstack(func() {
// The truth is we truly never lost the P, but syscalltick
// is used to indicate whether the P should be treated as
// lost anyway. For example, when syscalltick is trashed by
// dropm.
//
// TODO(mknyszek): Consider a more explicit mechanism for this.
// Then syscalltick doesn't need to be trashed, and can be used
// exclusively by sysmon for deciding when it's time to retake.
if pp.syscalltick == gp.m.syscalltick {
trace.GoSysExit(false)
} else {
// Since we need to pretend we lost the P, but nobody ever
// took it, we need a ProcSteal event to model the loss.
// Then, continue with everything else we'd do if we lost
// the P.
trace.ProcSteal(pp)
trace.ProcStart()
trace.GoSysExit(true)
trace.GoStart()
}
traceRelease(trace)
})
}
} else {
// Slow path: we lost our P. Try to get another one.
systemstack(func() {
// Try to get some other P.
if pp := exitsyscallTryGetP(oldp); pp != nil {
// Install the P.
acquirepNoTrace(pp)
// We're going to start running again, so emit all the relevant events.
if trace := traceAcquire(); trace.ok() {
trace.ProcStart()
trace.GoSysExit(true)
trace.GoStart()
traceRelease(trace)
}
}
})
pp = gp.m.p.ptr()
}
// If we have a P, clean up and exit.
if pp != nil {
if goroutineProfile.active {
// Make sure that gp has had its stack written out to the goroutine
// profile, exactly as it was when the goroutine profiler first
// stopped the world.
systemstack(func() {
tryRecordGoroutineProfileWB(gp)
})
}
// Increment the syscalltick for P, since we're exiting a syscall.
pp.syscalltick++
// Garbage collector isn't running (since we are),
// so okay to clear syscallsp.
gp.syscallsp = 0
gp.m.locks--
if gp.preempt {
// Restore the preemption request in case we cleared it in newstack.
gp.stackguard0 = stackPreempt
} else {
// Otherwise restore the real stackGuard, we clobbered it in entersyscall/entersyscallblock.
gp.stackguard0 = gp.stack.lo + stackGuard
}
gp.throwsplit = false
if sched.disable.user && !schedEnabled(gp) {
// Scheduling of this goroutine is disabled.
Gosched()
}
return
}
// Slowest path: We couldn't get a P, so call into the scheduler.
gp.m.locks--
// Call the scheduler.
mcall(exitsyscallNoP)
// Scheduler returned, so we're allowed to run now.
// Delete the syscallsp information that we left for
// the garbage collector during the system call.
// Must wait until now because until gosched returns
// we don't know for sure that the garbage collector
// is not running.
gp.syscallsp = 0
gp.m.p.ptr().syscalltick++
gp.throwsplit = false
}
// exitsyscall's attempt to try to get any P, if it's missing one.
// Returns true on success.
//
// Must execute on the systemstack because exitsyscall is nosplit.
//
//go:systemstack
func exitsyscallTryGetP(oldp *p) *p {
// Try to steal our old P back.
if oldp != nil {
if thread, ok := setBlockOnExitSyscall(oldp); ok {
thread.takeP()
decGSyscallNoP(getg().m) // We got a P for ourselves.
thread.resume()
return oldp
}
}
// Try to get an idle P.
if sched.pidle != 0 {
lock(&sched.lock)
pp, _ := pidleget(0)
if pp != nil && sched.sysmonwait.Load() {
sched.sysmonwait.Store(false)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
if pp != nil {
decGSyscallNoP(getg().m) // We got a P for ourselves.
return pp
}
}
return nil
}
// exitsyscall slow path on g0.
// Failed to acquire P, enqueue gp as runnable.
//
// Called via mcall, so gp is the calling g from this M.
//
//go:nowritebarrierrec
func exitsyscallNoP(gp *g) {
traceExitingSyscall()
trace := traceAcquire()
casgstatus(gp, _Grunning, _Grunnable)
traceExitedSyscall()
if trace.ok() {
// Write out syscall exit eagerly.
//
// It's important that we write this *after* we know whether we
// lost our P or not (determined by exitsyscallfast).
trace.GoSysExit(true)
traceRelease(trace)
}
decGSyscallNoP(getg().m)
dropg()
lock(&sched.lock)
var pp *p
if schedEnabled(gp) {
pp, _ = pidleget(0)
}
var locked bool
if pp == nil {
globrunqput(gp)
// Below, we stoplockedm if gp is locked. globrunqput releases
// ownership of gp, so we must check if gp is locked prior to
// committing the release by unlocking sched.lock, otherwise we
// could race with another M transitioning gp from unlocked to
// locked.
locked = gp.lockedm != 0
} else if sched.sysmonwait.Load() {
sched.sysmonwait.Store(false)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
if pp != nil {
acquirep(pp)
execute(gp, false) // Never returns.
}
if locked {
// Wait until another thread schedules gp and so m again.
//
// N.B. lockedm must be this M, as this g was running on this M
// before entersyscall.
stoplockedm()
execute(gp, false) // Never returns.
}
stopm()
schedule() // Never returns.
}
// addGSyscallNoP must be called when a goroutine in a syscall loses its P.
// This function updates all relevant accounting.
//
// nosplit because it's called on the syscall paths.
//
//go:nosplit
func addGSyscallNoP(mp *m) {
// It's safe to read isExtraInC here because it's only mutated
// outside of _Gsyscall, and we know this thread is attached
// to a goroutine in _Gsyscall and blocked from exiting.
if !mp.isExtraInC {
// Increment nGsyscallNoP since we're taking away a P
// from a _Gsyscall goroutine, but only if isExtraInC
// is not set on the M. If it is, then this thread is
// back to being a full C thread, and will just inflate
// the count of not-in-go goroutines. See go.dev/issue/76435.
sched.nGsyscallNoP.Add(1)
}
}
// decGSsyscallNoP must be called whenever a goroutine in a syscall without
// a P exits the system call. This function updates all relevant accounting.
//
// nosplit because it's called from dropm.
//
//go:nosplit
func decGSyscallNoP(mp *m) {
// Update nGsyscallNoP, but only if this is not a thread coming
// out of C. See the comment in addGSyscallNoP. This logic must match,
// to avoid unmatched increments and decrements.
if !mp.isExtraInC {
sched.nGsyscallNoP.Add(-1)
}
}
// Called from syscall package before fork.
//
// syscall_runtime_BeforeFork is for package syscall,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gvisor.dev/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
//go:nosplit
func syscall_runtime_BeforeFork() {
gp := getg().m.curg
// Block signals during a fork, so that the child does not run
// a signal handler before exec if a signal is sent to the process
// group. See issue #18600.
gp.m.locks++
sigsave(&gp.m.sigmask)
sigblock(false)
// This function is called before fork in syscall package.
// Code between fork and exec must not allocate memory nor even try to grow stack.
// Here we spoil g.stackguard0 to reliably detect any attempts to grow stack.
// runtime_AfterFork will undo this in parent process, but not in child.
gp.stackguard0 = stackFork
}
// Called from syscall package after fork in parent.
//
// syscall_runtime_AfterFork is for package syscall,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gvisor.dev/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
//go:nosplit
func syscall_runtime_AfterFork() {
gp := getg().m.curg
// See the comments in beforefork.
gp.stackguard0 = gp.stack.lo + stackGuard
msigrestore(gp.m.sigmask)
gp.m.locks--
}
// inForkedChild is true while manipulating signals in the child process.
// This is used to avoid calling libc functions in case we are using vfork.
var inForkedChild bool
// Called from syscall package after fork in child.
// It resets non-sigignored signals to the default handler, and
// restores the signal mask in preparation for the exec.
//
// Because this might be called during a vfork, and therefore may be
// temporarily sharing address space with the parent process, this must
// not change any global variables or calling into C code that may do so.
//
// syscall_runtime_AfterForkInChild is for package syscall,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gvisor.dev/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild
//go:nosplit
//go:nowritebarrierrec
func syscall_runtime_AfterForkInChild() {
// It's OK to change the global variable inForkedChild here
// because we are going to change it back. There is no race here,
// because if we are sharing address space with the parent process,
// then the parent process can not be running concurrently.
inForkedChild = true
clearSignalHandlers()
// When we are the child we are the only thread running,
// so we know that nothing else has changed gp.m.sigmask.
msigrestore(getg().m.sigmask)
inForkedChild = false
}
// pendingPreemptSignals is the number of preemption signals
// that have been sent but not received. This is only used on Darwin.
// For #41702.
var pendingPreemptSignals atomic.Int32
// Called from syscall package before Exec.
//
//go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec
func syscall_runtime_BeforeExec() {
// Prevent thread creation during exec.
execLock.lock()
// On Darwin, wait for all pending preemption signals to
// be received. See issue #41702.
if GOOS == "darwin" || GOOS == "ios" {
for pendingPreemptSignals.Load() > 0 {
osyield()
}
}
}
// Called from syscall package after Exec.
//
//go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec
func syscall_runtime_AfterExec() {
execLock.unlock()
}
// Allocate a new g, with a stack big enough for stacksize bytes.
func malg(stacksize int32) *g {
newg := new(g)
if stacksize >= 0 {
stacksize = round2(stackSystem + stacksize)
systemstack(func() {
newg.stack = stackalloc(uint32(stacksize))
if valgrindenabled {
newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
}
})
newg.stackguard0 = newg.stack.lo + stackGuard
newg.stackguard1 = ^uintptr(0)
// Clear the bottom word of the stack. We record g
// there on gsignal stack during VDSO on ARM and ARM64.
*(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
}
return newg
}
// Create a new g running fn.
// Put it on the queue of g's waiting to run.
// The compiler turns a go statement into a call to this.
func newproc(fn *funcval) {
gp := getg()
pc := sys.GetCallerPC()
systemstack(func() {
newg := newproc1(fn, gp, pc, false, waitReasonZero)
pp := getg().m.p.ptr()
runqput(pp, newg, true)
if mainStarted {
wakep()
}
})
}
// Create a new g in state _Grunnable (or _Gwaiting if parked is true), starting at fn.
// callerpc is the address of the go statement that created this. The caller is responsible
// for adding the new g to the scheduler. If parked is true, waitreason must be non-zero.
func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
if fn == nil {
fatal("go of nil func value")
}
mp := acquirem() // disable preemption because we hold M and P in local vars.
pp := mp.p.ptr()
newg := gfget(pp)
if newg == nil {
newg = malg(stackMin)
casgstatus(newg, _Gidle, _Gdead)
allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
}
if newg.stack.hi == 0 {
throw("newproc1: newg missing stack")
}
if readgstatus(newg) != _Gdead {
throw("newproc1: new g is not Gdead")
}
totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
totalSize = alignUp(totalSize, sys.StackAlign)
sp := newg.stack.hi - totalSize
if usesLR {
// caller's LR
*(*uintptr)(unsafe.Pointer(sp)) = 0
prepGoExitFrame(sp)
}
if GOARCH == "arm64" {
// caller's FP
*(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
}
memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
newg.sched.sp = sp
newg.stktopsp = sp
newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function
newg.sched.g = guintptr(unsafe.Pointer(newg))
gostartcallfn(&newg.sched, fn)
newg.parentGoid = callergp.goid
newg.gopc = callerpc
newg.ancestors = saveAncestors(callergp)
newg.startpc = fn.fn
newg.runningCleanups.Store(false)
if isSystemGoroutine(newg, false) {
sched.ngsys.Add(1)
} else {
// Only user goroutines inherit synctest groups and pprof labels.
newg.bubble = callergp.bubble
if mp.curg != nil {
newg.labels = mp.curg.labels
}
if goroutineProfile.active {
// A concurrent goroutine profile is running. It should include
// exactly the set of goroutines that were alive when the goroutine
// profiler first stopped the world. That does not include newg, so
// mark it as not needing a profile before transitioning it from
// _Gdead.
newg.goroutineProfiled.Store(goroutineProfileSatisfied)
}
}
// Track initial transition?
newg.trackingSeq = uint8(cheaprand())
if newg.trackingSeq%gTrackingPeriod == 0 {
newg.tracking = true
}
gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
// Get a goid and switch to runnable. This needs to happen under traceAcquire
// since it's a goroutine transition. See tracer invariants in trace.go.
trace := traceAcquire()
var status uint32 = _Grunnable
if parked {
status = _Gwaiting
newg.waitreason = waitreason
}
if pp.goidcache == pp.goidcacheend {
// Sched.goidgen is the last allocated id,
// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
// At startup sched.goidgen=0, so main goroutine receives goid=1.
pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
pp.goidcache -= _GoidCacheBatch - 1
pp.goidcacheend = pp.goidcache + _GoidCacheBatch
}
newg.goid = pp.goidcache
casgstatus(newg, _Gdead, status)
pp.goidcache++
newg.trace.reset()
if trace.ok() {
trace.GoCreate(newg, newg.startpc, parked)
traceRelease(trace)
}
// fips140 bubble
newg.fipsOnlyBypass = callergp.fipsOnlyBypass
// dit bubble
newg.ditWanted = callergp.ditWanted
// Set up race context.
if raceenabled {
newg.racectx = racegostart(callerpc)
newg.raceignore = 0
if newg.labels != nil {
// See note in proflabel.go on labelSync's role in synchronizing
// with the reads in the signal handler.
racereleasemergeg(newg, unsafe.Pointer(&labelSync))
}
}
pp.goroutinesCreated++
releasem(mp)
return newg
}
// saveAncestors copies previous ancestors of the given caller g and
// includes info for the current caller into a new set of tracebacks for
// a g being created.
func saveAncestors(callergp *g) *[]ancestorInfo {
// Copy all prior info, except for the root goroutine (goid 0).
if debug.tracebackancestors <= 0 || callergp.goid == 0 {
return nil
}
var callerAncestors []ancestorInfo
if callergp.ancestors != nil {
callerAncestors = *callergp.ancestors
}
n := int32(len(callerAncestors)) + 1
if n > debug.tracebackancestors {
n = debug.tracebackancestors
}
ancestors := make([]ancestorInfo, n)
copy(ancestors[1:], callerAncestors)
var pcs [tracebackInnerFrames]uintptr
npcs := gcallers(callergp, 0, pcs[:])
ipcs := make([]uintptr, npcs)
copy(ipcs, pcs[:])
ancestors[0] = ancestorInfo{
pcs: ipcs,
goid: callergp.goid,
gopc: callergp.gopc,
}
ancestorsp := new([]ancestorInfo)
*ancestorsp = ancestors
return ancestorsp
}
// Put on gfree list.
// If local list is too long, transfer a batch to the global list.
func gfput(pp *p, gp *g) {
if readgstatus(gp) != _Gdead {
throw("gfput: bad status (not Gdead)")
}
stksize := gp.stack.hi - gp.stack.lo
if stksize != uintptr(startingStackSize) {
// non-standard stack size - free it.
stackfree(gp.stack)
gp.stack.lo = 0
gp.stack.hi = 0
gp.stackguard0 = 0
if valgrindenabled {
valgrindDeregisterStack(gp.valgrindStackID)
gp.valgrindStackID = 0
}
}
pp.gFree.push(gp)
if pp.gFree.size >= 64 {
var (
stackQ gQueue
noStackQ gQueue
)
for pp.gFree.size >= 32 {
gp := pp.gFree.pop()
if gp.stack.lo == 0 {
noStackQ.push(gp)
} else {
stackQ.push(gp)
}
}
lock(&sched.gFree.lock)
sched.gFree.noStack.pushAll(noStackQ)
sched.gFree.stack.pushAll(stackQ)
unlock(&sched.gFree.lock)
}
}
// Get from gfree list.
// If local list is empty, grab a batch from global list.
func gfget(pp *p) *g {
retry:
if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
lock(&sched.gFree.lock)
// Move a batch of free Gs to the P.
for pp.gFree.size < 32 {
// Prefer Gs with stacks.
gp := sched.gFree.stack.pop()
if gp == nil {
gp = sched.gFree.noStack.pop()
if gp == nil {
break
}
}
pp.gFree.push(gp)
}
unlock(&sched.gFree.lock)
goto retry
}
gp := pp.gFree.pop()
if gp == nil {
return nil
}
if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
// Deallocate old stack. We kept it in gfput because it was the
// right size when the goroutine was put on the free list, but
// the right size has changed since then.
systemstack(func() {
stackfree(gp.stack)
gp.stack.lo = 0
gp.stack.hi = 0
gp.stackguard0 = 0
if valgrindenabled {
valgrindDeregisterStack(gp.valgrindStackID)
gp.valgrindStackID = 0
}
})
}
if gp.stack.lo == 0 {
// Stack was deallocated in gfput or just above. Allocate a new one.
systemstack(func() {
gp.stack = stackalloc(startingStackSize)
if valgrindenabled {
gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
}
})
gp.stackguard0 = gp.stack.lo + stackGuard
} else {
if raceenabled {
racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
}
if msanenabled {
msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
}
if asanenabled {
asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
}
}
return gp
}
// Purge all cached G's from gfree list to the global list.
func gfpurge(pp *p) {
var (
stackQ gQueue
noStackQ gQueue
)
for !pp.gFree.empty() {
gp := pp.gFree.pop()
if gp.stack.lo == 0 {
noStackQ.push(gp)
} else {
stackQ.push(gp)
}
}
lock(&sched.gFree.lock)
sched.gFree.noStack.pushAll(noStackQ)
sched.gFree.stack.pushAll(stackQ)
unlock(&sched.gFree.lock)
}
// Breakpoint executes a breakpoint trap.
func Breakpoint() {
breakpoint()
}
// dolockOSThread is called by LockOSThread and lockOSThread below
// after they modify m.locked. Do not allow preemption during this call,
// or else the m might be different in this function than in the caller.
//
//go:nosplit
func dolockOSThread() {
if GOARCH == "wasm" {
return // no threads on wasm yet
}
gp := getg()
gp.m.lockedg.set(gp)
gp.lockedm.set(gp.m)
}
// LockOSThread wires the calling goroutine to its current operating system thread.
// The calling goroutine will always execute in that thread,
// and no other goroutine will execute in it,
// until the calling goroutine has made as many calls to
// [UnlockOSThread] as to LockOSThread.
// If the calling goroutine exits without unlocking the thread,
// the thread will be terminated.
//
// All init functions are run on the startup thread. Calling LockOSThread
// from an init function will cause the main function to be invoked on
// that thread.
//
// A goroutine should call LockOSThread before calling OS services or
// non-Go library functions that depend on per-thread state.
//
//go:nosplit
func LockOSThread() {
if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
// If we need to start a new thread from the locked
// thread, we need the template thread. Start it now
// while we're in a known-good state.
startTemplateThread()
}
gp := getg()
gp.m.lockedExt++
if gp.m.lockedExt == 0 {
gp.m.lockedExt--
panic("LockOSThread nesting overflow")
}
dolockOSThread()
}
//go:nosplit
func lockOSThread() {
getg().m.lockedInt++
dolockOSThread()
}
// dounlockOSThread is called by UnlockOSThread and unlockOSThread below
// after they update m->locked. Do not allow preemption during this call,
// or else the m might be in different in this function than in the caller.
//
//go:nosplit
func dounlockOSThread() {
if GOARCH == "wasm" {
return // no threads on wasm yet
}
gp := getg()
if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
return
}
gp.m.lockedg = 0
gp.lockedm = 0
}
// UnlockOSThread undoes an earlier call to LockOSThread.
// If this drops the number of active LockOSThread calls on the
// calling goroutine to zero, it unwires the calling goroutine from
// its fixed operating system thread.
// If there are no active LockOSThread calls, this is a no-op.
//
// Before calling UnlockOSThread, the caller must ensure that the OS
// thread is suitable for running other goroutines. If the caller made
// any permanent changes to the state of the thread that would affect
// other goroutines, it should not call this function and thus leave
// the goroutine locked to the OS thread until the goroutine (and
// hence the thread) exits.
//
//go:nosplit
func UnlockOSThread() {
gp := getg()
if gp.m.lockedExt == 0 {
return
}
gp.m.lockedExt--
dounlockOSThread()
}
//go:nosplit
func unlockOSThread() {
gp := getg()
if gp.m.lockedInt == 0 {
systemstack(badunlockosthread)
}
gp.m.lockedInt--
dounlockOSThread()
}
func badunlockosthread() {
throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
}
func gcount(includeSys bool) int32 {
n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
if !includeSys {
n -= sched.ngsys.Load()
}
for _, pp := range allp {
n -= pp.gFree.size
}
// All these variables can be changed concurrently, so the result can be inconsistent.
// But at least the current goroutine is running.
if n < 1 {
n = 1
}
return n
}
// goroutineleakcount returns the number of leaked goroutines last reported by
// the runtime.
//
//go:linkname goroutineleakcount runtime/pprof.runtime_goroutineleakcount
func goroutineleakcount() int {
return work.goroutineLeak.count
}
func mcount() int32 {
return int32(sched.mnext - sched.nmfreed)
}
var prof struct {
signalLock atomic.Uint32
// Must hold signalLock to write. Reads may be lock-free, but
// signalLock should be taken to synchronize with changes.
hz atomic.Int32
}
func _System() { _System() }
func _ExternalCode() { _ExternalCode() }
func _LostExternalCode() { _LostExternalCode() }
func _GC() { _GC() }
func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
func _VDSO() { _VDSO() }
// Called if we receive a SIGPROF signal.
// Called by the signal handler, may run during STW.
//
//go:nowritebarrierrec
func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
if prof.hz.Load() == 0 {
return
}
// If mp.profilehz is 0, then profiling is not enabled for this thread.
// We must check this to avoid a deadlock between setcpuprofilerate
// and the call to cpuprof.add, below.
if mp != nil && mp.profilehz == 0 {
return
}
// On mips{,le}/arm, 64bit atomics are emulated with spinlocks, in
// internal/runtime/atomic. If SIGPROF arrives while the program is inside
// the critical section, it creates a deadlock (when writing the sample).
// As a workaround, create a counter of SIGPROFs while in critical section
// to store the count, and pass it to sigprof.add() later when SIGPROF is
// received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc).
if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
if f := findfunc(pc); f.valid() {
if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
cpuprof.lostAtomic++
return
}
}
if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
// internal/runtime/atomic functions call into kernel
// helpers on arm < 7. See
// internal/runtime/atomic/sys_linux_arm.s.
cpuprof.lostAtomic++
return
}
}
// Profiling runs concurrently with GC, so it must not allocate.
// Set a trap in case the code does allocate.
// Note that on windows, one thread takes profiles of all the
// other threads, so mp is usually not getg().m.
// In fact mp may not even be stopped.
// See golang.org/issue/17165.
getg().m.mallocing++
var u unwinder
var stk [maxCPUProfStack]uintptr
n := 0
if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
cgoOff := 0
// Check cgoCallersUse to make sure that we are not
// interrupting other code that is fiddling with
// cgoCallers. We are running in a signal handler
// with all signals blocked, so we don't have to worry
// about any other code interrupting us.
if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
cgoOff++
}
n += copy(stk[:], mp.cgoCallers[:cgoOff])
mp.cgoCallers[0] = 0
}
// Collect Go stack that leads to the cgo call.
u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
} else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
// Libcall, i.e. runtime syscall on windows.
// Collect Go stack that leads to the call.
u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
} else if mp != nil && mp.vdsoSP != 0 {
// VDSO call, e.g. nanotime1 on Linux.
// Collect Go stack that leads to the call.
u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
} else {
u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
}
n += tracebackPCs(&u, 0, stk[n:])
if n <= 0 {
// Normal traceback is impossible or has failed.
// Account it against abstract "System" or "GC".
n = 2
if inVDSOPage(pc) {
pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
} else if pc > firstmoduledata.etext {
// "ExternalCode" is better than "etext".
pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
}
stk[0] = pc
if mp.preemptoff != "" {
stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
} else {
stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
}
}
if prof.hz.Load() != 0 {
// Note: it can happen on Windows that we interrupted a system thread
// with no g, so gp could nil. The other nil checks are done out of
// caution, but not expected to be nil in practice.
var tagPtr *unsafe.Pointer
if gp != nil && gp.m != nil && gp.m.curg != nil {
tagPtr = &gp.m.curg.labels
}
cpuprof.add(tagPtr, stk[:n])
gprof := gp
var mp *m
var pp *p
if gp != nil && gp.m != nil {
if gp.m.curg != nil {
gprof = gp.m.curg
}
mp = gp.m
pp = gp.m.p.ptr()
}
traceCPUSample(gprof, mp, pp, stk[:n])
}
getg().m.mallocing--
}
// setcpuprofilerate sets the CPU profiling rate to hz times per second.
// If hz <= 0, setcpuprofilerate turns off CPU profiling.
func setcpuprofilerate(hz int32) {
// Force sane arguments.
if hz < 0 {
hz = 0
}
// Disable preemption, otherwise we can be rescheduled to another thread
// that has profiling enabled.
gp := getg()
gp.m.locks++
// Stop profiler on this thread so that it is safe to lock prof.
// if a profiling signal came in while we had prof locked,
// it would deadlock.
setThreadCPUProfiler(0)
for !prof.signalLock.CompareAndSwap(0, 1) {
osyield()
}
if prof.hz.Load() != hz {
setProcessCPUProfiler(hz)
prof.hz.Store(hz)
}
prof.signalLock.Store(0)
lock(&sched.lock)
sched.profilehz = hz
unlock(&sched.lock)
if hz != 0 {
setThreadCPUProfiler(hz)
}
gp.m.locks--
}
// init initializes pp, which may be a freshly allocated p or a
// previously destroyed p, and transitions it to status _Pgcstop.
func (pp *p) init(id int32) {
pp.id = id
pp.gcw.id = id
pp.status = _Pgcstop
pp.sudogcache = pp.sudogbuf[:0]
pp.deferpool = pp.deferpoolbuf[:0]
pp.wbBuf.reset()
if pp.mcache == nil {
if id == 0 {
if mcache0 == nil {
throw("missing mcache?")
}
// Use the bootstrap mcache0. Only one P will get
// mcache0: the one with ID 0.
pp.mcache = mcache0
} else {
pp.mcache = allocmcache()
}
}
if raceenabled && pp.raceprocctx == 0 {
if id == 0 {
pp.raceprocctx = raceprocctx0
raceprocctx0 = 0 // bootstrap
} else {
pp.raceprocctx = raceproccreate()
}
}
lockInit(&pp.timers.mu, lockRankTimers)
// This P may get timers when it starts running. Set the mask here
// since the P may not go through pidleget (notably P 0 on startup).
timerpMask.set(id)
// Similarly, we may not go through pidleget before this P starts
// running if it is P 0 on startup.
idlepMask.clear(id)
}
// destroy releases all of the resources associated with pp and
// transitions it to status _Pdead.
//
// sched.lock must be held and the world must be stopped.
func (pp *p) destroy() {
assertLockHeld(&sched.lock)
assertWorldStopped()
// Move all runnable goroutines to the global queue
for pp.runqhead != pp.runqtail {
// Pop from tail of local queue
pp.runqtail--
gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
// Push onto head of global queue
globrunqputhead(gp)
}
if pp.runnext != 0 {
globrunqputhead(pp.runnext.ptr())
pp.runnext = 0
}
// Move all timers to the local P.
getg().m.p.ptr().timers.take(&pp.timers)
// No need to flush p's write barrier buffer or span queue, as Ps
// cannot be destroyed during the mark phase.
if phase := gcphase; phase != _GCoff {
println("runtime: p id", pp.id, "destroyed during GC phase", phase)
throw("P destroyed while GC is running")
}
// We should free the queues though.
pp.gcw.spanq.destroy()
clear(pp.sudogbuf[:])
pp.sudogcache = pp.sudogbuf[:0]
pp.pinnerCache = nil
clear(pp.deferpoolbuf[:])
pp.deferpool = pp.deferpoolbuf[:0]
systemstack(func() {
for i := 0; i < pp.mspancache.len; i++ {
// Safe to call since the world is stopped.
mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
}
pp.mspancache.len = 0
lock(&mheap_.lock)
pp.pcache.flush(&mheap_.pages)
unlock(&mheap_.lock)
})
freemcache(pp.mcache)
pp.mcache = nil
gfpurge(pp)
if raceenabled {
if pp.timers.raceCtx != 0 {
// The race detector code uses a callback to fetch
// the proc context, so arrange for that callback
// to see the right thing.
// This hack only works because we are the only
// thread running.
mp := getg().m
phold := mp.p.ptr()
mp.p.set(pp)
racectxend(pp.timers.raceCtx)
pp.timers.raceCtx = 0
mp.p.set(phold)
}
raceprocdestroy(pp.raceprocctx)
pp.raceprocctx = 0
}
pp.gcAssistTime = 0
gcCleanups.queued += pp.cleanupsQueued
pp.cleanupsQueued = 0
sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
pp.goroutinesCreated = 0
pp.xRegs.free()
pp.status = _Pdead
}
// Change number of processors.
//
// sched.lock must be held, and the world must be stopped.
//
// gcworkbufs must not be being modified by either the GC or the write barrier
// code, so the GC must not be running if the number of Ps actually changes.
//
// Returns list of Ps with local work, they need to be scheduled by the caller.
func procresize(nprocs int32) *p {
assertLockHeld(&sched.lock)
assertWorldStopped()
old := gomaxprocs
if old < 0 || nprocs <= 0 {
throw("procresize: invalid arg")
}
trace := traceAcquire()
if trace.ok() {
trace.Gomaxprocs(nprocs)
traceRelease(trace)
}
// update statistics
now := nanotime()
if sched.procresizetime != 0 {
sched.totaltime += int64(old) * (now - sched.procresizetime)
}
sched.procresizetime = now
// Grow allp if necessary.
if nprocs > int32(len(allp)) {
// Synchronize with retake, which could be running
// concurrently since it doesn't run on a P.
lock(&allpLock)
if nprocs <= int32(cap(allp)) {
allp = allp[:nprocs]
} else {
nallp := make([]*p, nprocs)
// Copy everything up to allp's cap so we
// never lose old allocated Ps.
copy(nallp, allp[:cap(allp)])
allp = nallp
}
idlepMask = idlepMask.resize(nprocs)
timerpMask = timerpMask.resize(nprocs)
work.spanqMask = work.spanqMask.resize(nprocs)
unlock(&allpLock)
}
// initialize new P's
for i := old; i < nprocs; i++ {
pp := allp[i]
if pp == nil {
pp = new(p)
}
pp.init(i)
atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
}
gp := getg()
if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
// continue to use the current P
gp.m.p.ptr().status = _Prunning
gp.m.p.ptr().mcache.prepareForSweep()
} else {
// release the current P and acquire allp[0].
//
// We must do this before destroying our current P
// because p.destroy itself has write barriers, so we
// need to do that from a valid P.
if gp.m.p != 0 {
trace := traceAcquire()
if trace.ok() {
// Pretend that we were descheduled
// and then scheduled again to keep
// the trace consistent.
trace.GoSched()
trace.ProcStop(gp.m.p.ptr())
traceRelease(trace)
}
gp.m.p.ptr().m = 0
}
gp.m.p = 0
pp := allp[0]
pp.m = 0
pp.status = _Pidle
acquirep(pp)
trace := traceAcquire()
if trace.ok() {
trace.GoStart()
traceRelease(trace)
}
}
// g.m.p is now set, so we no longer need mcache0 for bootstrapping.
mcache0 = nil
// release resources from unused P's
for i := nprocs; i < old; i++ {
pp := allp[i]
pp.destroy()
// can't free P itself because it can be referenced by an M in syscall
}
// Trim allp.
if int32(len(allp)) != nprocs {
lock(&allpLock)
allp = allp[:nprocs]
idlepMask = idlepMask.resize(nprocs)
timerpMask = timerpMask.resize(nprocs)
work.spanqMask = work.spanqMask.resize(nprocs)
unlock(&allpLock)
}
// Assign Ms to Ps with runnable goroutines.
var runnablePs *p
var runnablePsNeedM *p
var idlePs *p
for i := nprocs - 1; i >= 0; i-- {
pp := allp[i]
if gp.m.p.ptr() == pp {
continue
}
pp.status = _Pidle
if runqempty(pp) {
pp.link.set(idlePs)
idlePs = pp
continue
}
// Prefer to run on the most recent M if it is
// available.
//
// Ps with no oldm (or for which oldm is already taken
// by an earlier P), we delay until all oldm Ps are
// handled. Otherwise, mget may return an M that a
// later P has in oldm.
var mp *m
if oldm := pp.oldm.get(); oldm != nil {
// Returns nil if oldm is not idle.
mp = mgetSpecific(oldm)
}
if mp == nil {
// Call mget later.
pp.link.set(runnablePsNeedM)
runnablePsNeedM = pp
continue
}
pp.m.set(mp)
pp.link.set(runnablePs)
runnablePs = pp
}
// Assign Ms to remaining runnable Ps without usable oldm. See comment
// above.
for runnablePsNeedM != nil {
pp := runnablePsNeedM
runnablePsNeedM = pp.link.ptr()
mp := mget()
pp.m.set(mp)
pp.link.set(runnablePs)
runnablePs = pp
}
// Now that we've assigned Ms to Ps with runnable goroutines, assign GC
// mark workers to remaining idle Ps, if needed.
//
// By assigning GC workers to Ps here, we slightly speed up starting
// the world, as we will start enough Ps to run all of the user
// goroutines and GC mark workers all at once, rather than using a
// sequence of wakep calls as each P's findRunnable realizes it needs
// to run a mark worker instead of a user goroutine.
//
// By assigning GC workers to Ps only _after_ previously-running Ps are
// assigned Ms, we ensure that goroutines previously running on a P
// continue to run on the same P, with GC mark workers preferring
// previously-idle Ps. This helps prevent goroutines from shuffling
// around too much across STW.
//
// N.B., if there aren't enough Ps left in idlePs for all of the GC
// mark workers, then findRunnable will still choose to run mark
// workers on Ps assigned above.
//
// N.B., we do this during any STW in the mark phase, not just the
// sweep termination STW that starts the mark phase. gcBgMarkWorker
// always preempts by removing itself from the P, so even unrelated
// STWs during the mark require that Ps reselect mark workers upon
// restart.
if gcBlackenEnabled != 0 {
for idlePs != nil {
pp := idlePs
ok, _ := gcController.assignWaitingGCWorker(pp, now)
if !ok {
// No more mark workers needed.
break
}
// Got a worker, P is now runnable.
//
// mget may return nil if there aren't enough Ms, in
// which case startTheWorldWithSema will start one.
//
// N.B. findRunnableGCWorker will make the worker G
// itself runnable.
idlePs = pp.link.ptr()
mp := mget()
pp.m.set(mp)
pp.link.set(runnablePs)
runnablePs = pp
}
}
// Finally, any remaining Ps are truly idle.
for idlePs != nil {
pp := idlePs
idlePs = pp.link.ptr()
pidleput(pp, now)
}
stealOrder.reset(uint32(nprocs))
var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
if old != nprocs {
// Notify the limiter that the amount of procs has changed.
gcCPULimiter.resetCapacity(now, nprocs)
}
return runnablePs
}
// Associate p and the current m.
//
// This function is allowed to have write barriers even if the caller
// isn't because it immediately acquires pp.
//
//go:yeswritebarrierrec
func acquirep(pp *p) {
// Do the work.
acquirepNoTrace(pp)
// Emit the event.
trace := traceAcquire()
if trace.ok() {
trace.ProcStart()
traceRelease(trace)
}
}
// Internals of acquirep, just skipping the trace events.
//
//go:yeswritebarrierrec
func acquirepNoTrace(pp *p) {
// Do the part that isn't allowed to have write barriers.
wirep(pp)
// Have p; write barriers now allowed.
// The M we're associating with will be the old M after the next
// releasep. We must set this here because write barriers are not
// allowed in releasep.
pp.oldm = pp.m.ptr().self
// Perform deferred mcache flush before this P can allocate
// from a potentially stale mcache.
pp.mcache.prepareForSweep()
}
// wirep is the first step of acquirep, which actually associates the
// current M to pp. This is broken out so we can disallow write
// barriers for this part, since we don't yet have a P.
//
//go:nowritebarrierrec
//go:nosplit
func wirep(pp *p) {
gp := getg()
if gp.m.p != 0 {
// Call on the systemstack to avoid a nosplit overflow build failure
// on some platforms when built with -N -l. See #64113.
systemstack(func() {
throw("wirep: already in go")
})
}
if pp.m != 0 || pp.status != _Pidle {
// Call on the systemstack to avoid a nosplit overflow build failure
// on some platforms when built with -N -l. See #64113.
systemstack(func() {
id := int64(0)
if pp.m != 0 {
id = pp.m.ptr().id
}
print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
throw("wirep: invalid p state")
})
}
gp.m.p.set(pp)
pp.m.set(gp.m)
pp.status = _Prunning
}
// Disassociate p and the current m.
func releasep() *p {
trace := traceAcquire()
if trace.ok() {
trace.ProcStop(getg().m.p.ptr())
traceRelease(trace)
}
return releasepNoTrace()
}
// Disassociate p and the current m without tracing an event.
func releasepNoTrace() *p {
gp := getg()
if gp.m.p == 0 {
throw("releasep: invalid arg")
}
pp := gp.m.p.ptr()
if pp.m.ptr() != gp.m || pp.status != _Prunning {
print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
throw("releasep: invalid p state")
}
// P must clear if nextGCMarkWorker if it stops.
gcController.releaseNextGCMarkWorker(pp)
gp.m.p = 0
pp.m = 0
pp.status = _Pidle
return pp
}
func incidlelocked(v int32) {
lock(&sched.lock)
sched.nmidlelocked += v
if v > 0 {
checkdead()
}
unlock(&sched.lock)
}
// Check for deadlock situation.
// The check is based on number of running M's, if 0 -> deadlock.
// sched.lock must be held.
func checkdead() {
assertLockHeld(&sched.lock)
// For -buildmode=c-shared or -buildmode=c-archive it's OK if
// there are no running goroutines. The calling program is
// assumed to be running.
// One exception is Wasm, which is single-threaded. If we are
// in Go and all goroutines are blocked, it deadlocks.
if (islibrary || isarchive) && GOARCH != "wasm" {
return
}
// If we are dying because of a signal caught on an already idle thread,
// freezetheworld will cause all running threads to block.
// And runtime will essentially enter into deadlock state,
// except that there is a thread that will call exit soon.
if panicking.Load() > 0 {
return
}
// If we are not running under cgo, but we have an extra M then account
// for it. (It is possible to have an extra M on Windows without cgo to
// accommodate callbacks created by syscall.NewCallback. See issue #6751
// for details.)
var run0 int32
if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
run0 = 1
}
run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
if run > run0 {
return
}
if run < 0 {
print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
unlock(&sched.lock)
throw("checkdead: inconsistent counts")
}
grunning := 0
forEachG(func(gp *g) {
if isSystemGoroutine(gp, false) {
return
}
s := readgstatus(gp)
switch s &^ _Gscan {
case _Gwaiting,
_Gpreempted:
grunning++
case _Grunnable,
_Grunning,
_Gsyscall:
print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
unlock(&sched.lock)
throw("checkdead: runnable g")
}
})
if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang
fatal("no goroutines (main called runtime.Goexit) - deadlock!")
}
// Maybe jump time forward for playground.
if faketime != 0 {
if when := timeSleepUntil(); when < maxWhen {
faketime = when
// Start an M to steal the timer.
pp, _ := pidleget(faketime)
if pp == nil {
// There should always be a free P since
// nothing is running.
unlock(&sched.lock)
throw("checkdead: no p for timer")
}
mp := mget()
if mp == nil {
// There should always be a free M since
// nothing is running.
unlock(&sched.lock)
throw("checkdead: no m for timer")
}
// M must be spinning to steal. We set this to be
// explicit, but since this is the only M it would
// become spinning on its own anyways.
sched.nmspinning.Add(1)
mp.spinning = true
mp.nextp.set(pp)
notewakeup(&mp.park)
return
}
}
// There are no goroutines running, so we can look at the P's.
for _, pp := range allp {
if len(pp.timers.heap) > 0 {
return
}
}
unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang
fatal("all goroutines are asleep - deadlock!")
}
// forcegcperiod is the maximum time in nanoseconds between garbage
// collections. If we go this long without a garbage collection, one
// is forced to run.
//
// This is a variable for testing purposes. It normally doesn't change.
var forcegcperiod int64 = 2 * 60 * 1e9
// haveSysmon indicates whether there is sysmon thread support.
//
// No threads on wasm yet, so no sysmon.
const haveSysmon = GOARCH != "wasm"
// Always runs without a P, so write barriers are not allowed.
//
//go:nowritebarrierrec
func sysmon() {
lock(&sched.lock)
sched.nmsys++
checkdead()
unlock(&sched.lock)
lastgomaxprocs := int64(0)
lasttrace := int64(0)
idle := 0 // how many cycles in succession we had not wokeup somebody
delay := uint32(0)
for {
if idle == 0 { // start with 20us sleep...
delay = 20
} else if idle > 50 { // start doubling the sleep after 1ms...
delay *= 2
}
if delay > 10*1000 { // up to 10ms
delay = 10 * 1000
}
usleep(delay)
// sysmon should not enter deep sleep if schedtrace is enabled so that
// it can print that information at the right time.
//
// It should also not enter deep sleep if there are any active P's so
// that it can retake P's from syscalls, preempt long running G's, and
// poll the network if all P's are busy for long stretches.
//
// It should wakeup from deep sleep if any P's become active either due
// to exiting a syscall or waking up due to a timer expiring so that it
// can resume performing those duties. If it wakes from a syscall it
// resets idle and delay as a bet that since it had retaken a P from a
// syscall before, it may need to do it again shortly after the
// application starts work again. It does not reset idle when waking
// from a timer to avoid adding system load to applications that spend
// most of their time sleeping.
now := nanotime()
if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
lock(&sched.lock)
if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
syscallWake := false
next := timeSleepUntil()
if next > now {
sched.sysmonwait.Store(true)
unlock(&sched.lock)
// Make wake-up period small enough
// for the sampling to be correct.
sleep := forcegcperiod / 2
if next-now < sleep {
sleep = next - now
}
shouldRelax := sleep >= osRelaxMinNS
if shouldRelax {
osRelax(true)
}
syscallWake = notetsleep(&sched.sysmonnote, sleep)
if shouldRelax {
osRelax(false)
}
lock(&sched.lock)
sched.sysmonwait.Store(false)
noteclear(&sched.sysmonnote)
}
if syscallWake {
idle = 0
delay = 20
}
}
unlock(&sched.lock)
}
lock(&sched.sysmonlock)
// Update now in case we blocked on sysmonnote or spent a long time
// blocked on schedlock or sysmonlock above.
now = nanotime()
// trigger libc interceptors if needed
if *cgo_yield != nil {
asmcgocall(*cgo_yield, nil)
}
// poll network if not polled for more than 10ms
lastpoll := sched.lastpoll.Load()
if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
sched.lastpoll.CompareAndSwap(lastpoll, now)
list, delta := netpoll(0) // non-blocking - returns list of goroutines
if !list.empty() {
// Need to decrement number of idle locked M's
// (pretending that one more is running) before injectglist.
// Otherwise it can lead to the following situation:
// injectglist grabs all P's but before it starts M's to run the P's,
// another M returns from syscall, finishes running its G,
// observes that there is no work to do and no other running M's
// and reports deadlock.
incidlelocked(-1)
injectglist(&list)
incidlelocked(1)
netpollAdjustWaiters(delta)
}
}
// Check if we need to update GOMAXPROCS at most once per second.
if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
sysmonUpdateGOMAXPROCS()
lastgomaxprocs = now
}
if scavenger.sysmonWake.Load() != 0 {
// Kick the scavenger awake if someone requested it.
scavenger.wake()
}
// retake P's blocked in syscalls
// and preempt long running G's
if retake(now) != 0 {
idle = 0
} else {
idle++
}
// check if we need to force a GC
if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
lock(&forcegc.lock)
forcegc.idle.Store(false)
var list gList
list.push(forcegc.g)
injectglist(&list)
unlock(&forcegc.lock)
}
if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
lasttrace = now
schedtrace(debug.scheddetail > 0)
}
unlock(&sched.sysmonlock)
}
}
type sysmontick struct {
schedtick uint32
syscalltick uint32
schedwhen int64
syscallwhen int64
}
// forcePreemptNS is the time slice given to a G before it is
// preempted.
const forcePreemptNS = 10 * 1000 * 1000 // 10ms
func retake(now int64) uint32 {
n := 0
// Prevent allp slice changes. This lock will be completely
// uncontended unless we're already stopping the world.
lock(&allpLock)
// We can't use a range loop over allp because we may
// temporarily drop the allpLock. Hence, we need to re-fetch
// allp each time around the loop.
for i := 0; i < len(allp); i++ {
// Quickly filter out non-running Ps. Running Ps are either
// in a syscall or are actually executing. Idle Ps don't
// need to be retaken.
//
// This is best-effort, so it's OK that it's racy. Our target
// is to retake Ps that have been running or in a syscall for
// a long time (milliseconds), so the state has plenty of time
// to stabilize.
pp := allp[i]
if pp == nil || atomic.Load(&pp.status) != _Prunning {
// pp can be nil if procresize has grown
// allp but not yet created new Ps.
continue
}
pd := &pp.sysmontick
sysretake := false
// Preempt G if it's running on the same schedtick for
// too long. This could be from a single long-running
// goroutine or a sequence of goroutines run via
// runnext, which share a single schedtick time slice.
schedt := int64(pp.schedtick)
if int64(pd.schedtick) != schedt {
pd.schedtick = uint32(schedt)
pd.schedwhen = now
} else if pd.schedwhen+forcePreemptNS <= now {
preemptone(pp)
// If pp is in a syscall, preemptone doesn't work.
// The goroutine nor the thread can respond to a
// preemption request because they're not in Go code,
// so we need to take the P ourselves.
sysretake = true
}
// Drop allpLock so we can take sched.lock.
unlock(&allpLock)
// Need to decrement number of idle locked M's (pretending that
// one more is running) before we take the P and resume.
// Otherwise the M from which we retake can exit the syscall,
// increment nmidle and report deadlock.
//
// Can't call incidlelocked once we setBlockOnExitSyscall, due
// to a lock ordering violation between sched.lock and _Gscan.
incidlelocked(-1)
// Try to prevent the P from continuing in the syscall, if it's in one at all.
thread, ok := setBlockOnExitSyscall(pp)
if !ok {
// Not in a syscall, or something changed out from under us.
goto done
}
// Retake the P if it's there for more than 1 sysmon tick (at least 20us).
if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
pd.syscalltick = uint32(syst)
pd.syscallwhen = now
thread.resume()
goto done
}
// On the one hand we don't want to retake Ps if there is no other work to do,
// but on the other hand we want to retake them eventually
// because they can prevent the sysmon thread from deep sleep.
if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
thread.resume()
goto done
}
// Take the P. Note: because we have the scan bit, the goroutine
// is at worst stuck spinning in exitsyscall.
thread.takeP()
thread.resume()
n++
// Handoff the P for some other thread to run it.
handoffp(pp)
// The P has been handed off to another thread, so risk of a false
// deadlock report while we hold onto it is gone.
done:
incidlelocked(1)
lock(&allpLock)
}
unlock(&allpLock)
return uint32(n)
}
// syscallingThread represents a thread in a system call that temporarily
// cannot advance out of the system call.
type syscallingThread struct {
gp *g
mp *m
pp *p
status uint32
}
// setBlockOnExitSyscall prevents pp's thread from advancing out of
// exitsyscall. On success, returns the g/m/p state of the thread
// and true. At that point, the caller owns the g/m/p links referenced,
// the goroutine is in _Gsyscall, and prevented from transitioning out
// of it. On failure, it returns false, and none of these guarantees are
// made.
//
// Callers must call resume on the resulting thread state once
// they're done with thread, otherwise it will remain blocked forever.
//
// This function races with state changes on pp, and thus may fail
// if pp is not in a system call, or exits a system call concurrently
// with this function. However, this function is safe to call without
// any additional synchronization.
func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
if pp.status != _Prunning {
return syscallingThread{}, false
}
// Be very careful here, these reads are intentionally racy.
// Once we notice the G is in _Gsyscall, acquire its scan bit,
// and validate that it's still connected to the *same* M and P,
// we can actually get to work. Holding the scan bit will prevent
// the G from exiting the syscall.
//
// Our goal here is to interrupt long syscalls. If it turns out
// that we're wrong and the G switched to another syscall while
// we were trying to do this, that's completely fine. It's
// probably making more frequent syscalls and the typical
// preemption paths should be effective.
mp := pp.m.ptr()
if mp == nil {
// Nothing to do.
return syscallingThread{}, false
}
gp := mp.curg
if gp == nil {
// Nothing to do.
return syscallingThread{}, false
}
status := readgstatus(gp) &^ _Gscan
// A goroutine is considered in a syscall, and may have a corresponding
// P, if it's in _Gsyscall *or* _Gdeadextra. In the latter case, it's an
// extra M goroutine.
if status != _Gsyscall && status != _Gdeadextra {
// Not in a syscall, nothing to do.
return syscallingThread{}, false
}
if !castogscanstatus(gp, status, status|_Gscan) {
// Not in _Gsyscall or _Gdeadextra anymore. Nothing to do.
return syscallingThread{}, false
}
if gp.m != mp || gp.m.p.ptr() != pp {
// This is not what we originally observed. Nothing to do.
casfrom_Gscanstatus(gp, status|_Gscan, status)
return syscallingThread{}, false
}
return syscallingThread{gp, mp, pp, status}, true
}
// gcstopP unwires the P attached to the syscalling thread
// and moves it into the _Pgcstop state.
//
// The caller must be stopping the world.
func (s syscallingThread) gcstopP() {
assertLockHeld(&sched.lock)
s.releaseP(_Pgcstop)
s.pp.gcStopTime = nanotime()
sched.stopwait--
}
// takeP unwires the P attached to the syscalling thread
// and moves it into the _Pidle state.
func (s syscallingThread) takeP() {
s.releaseP(_Pidle)
}
// releaseP unwires the P from the syscalling thread, moving
// it to the provided state. Callers should prefer to use
// takeP and gcstopP.
func (s syscallingThread) releaseP(state uint32) {
if state != _Pidle && state != _Pgcstop {
throw("attempted to release P into a bad state")
}
trace := traceAcquire()
s.pp.m = 0
s.mp.p = 0
atomic.Store(&s.pp.status, state)
if trace.ok() {
trace.ProcSteal(s.pp)
traceRelease(trace)
}
addGSyscallNoP(s.mp)
s.pp.syscalltick++
}
// resume allows a syscalling thread to advance beyond exitsyscall.
func (s syscallingThread) resume() {
casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
}
// Tell all goroutines that they have been preempted and they should stop.
// This function is purely best-effort. It can fail to inform a goroutine if a
// processor just started running it.
// No locks need to be held.
// Returns true if preemption request was issued to at least one goroutine.
func preemptall() bool {
res := false
for _, pp := range allp {
if pp.status != _Prunning {
continue
}
if preemptone(pp) {
res = true
}
}
return res
}
// Tell the goroutine running on processor P to stop.
// This function is purely best-effort. It can incorrectly fail to inform the
// goroutine. It can inform the wrong goroutine. Even if it informs the
// correct goroutine, that goroutine might ignore the request if it is
// simultaneously executing newstack.
// No lock needs to be held.
// Returns true if preemption request was issued.
// The actual preemption will happen at some point in the future
// and will be indicated by the gp->status no longer being
// Grunning
func preemptone(pp *p) bool {
mp := pp.m.ptr()
if mp == nil || mp == getg().m {
return false
}
gp := mp.curg
if gp == nil || gp == mp.g0 {
return false
}
if readgstatus(gp)&^_Gscan == _Gsyscall {
// Don't bother trying to preempt a goroutine in a syscall.
return false
}
gp.preempt = true
// Every call in a goroutine checks for stack overflow by
// comparing the current stack pointer to gp->stackguard0.
// Setting gp->stackguard0 to StackPreempt folds
// preemption into the normal stack overflow check.
gp.stackguard0 = stackPreempt
// Request an async preemption of this P.
if preemptMSupported && debug.asyncpreemptoff == 0 {
pp.preempt = true
preemptM(mp)
}
return true
}
var starttime int64
func schedtrace(detailed bool) {
now := nanotime()
if starttime == 0 {
starttime = now
}
lock(&sched.lock)
print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
if detailed {
print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
}
// We must be careful while reading data from P's, M's and G's.
// Even if we hold schedlock, most data can be changed concurrently.
// E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
for i, pp := range allp {
h := atomic.Load(&pp.runqhead)
t := atomic.Load(&pp.runqtail)
if detailed {
print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
mp := pp.m.ptr()
if mp != nil {
print(mp.id)
} else {
print("nil")
}
print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
} else {
// In non-detailed mode format lengths of per-P run queues as:
// [ len1 len2 len3 len4 ]
print(" ")
if i == 0 {
print("[ ")
}
print(t - h)
if i == len(allp)-1 {
print(" ]")
}
}
}
if !detailed {
// Format per-P schedticks as: schedticks=[ tick1 tick2 tick3 tick4 ].
print(" schedticks=[ ")
for _, pp := range allp {
print(pp.schedtick)
print(" ")
}
print("]\n")
}
if !detailed {
unlock(&sched.lock)
return
}
for mp := allm; mp != nil; mp = mp.alllink {
pp := mp.p.ptr()
print(" M", mp.id, ": p=")
if pp != nil {
print(pp.id)
} else {
print("nil")
}
print(" curg=")
if mp.curg != nil {
print(mp.curg.goid)
} else {
print("nil")
}
print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
if lockedg := mp.lockedg.ptr(); lockedg != nil {
print(lockedg.goid)
} else {
print("nil")
}
print("\n")
}
forEachG(func(gp *g) {
print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
if gp.m != nil {
print(gp.m.id)
} else {
print("nil")
}
print(" lockedm=")
if lockedm := gp.lockedm.ptr(); lockedm != nil {
print(lockedm.id)
} else {
print("nil")
}
print("\n")
})
unlock(&sched.lock)
}
type updateMaxProcsGState struct {
lock mutex
g *g
idle atomic.Bool
// Readable when idle == false, writable when idle == true.
procs int32 // new GOMAXPROCS value
}
var (
// GOMAXPROCS update godebug metric. Incremented if automatic
// GOMAXPROCS updates actually change the value of GOMAXPROCS.
updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
// Synchronization and state between updateMaxProcsGoroutine and
// sysmon.
updateMaxProcsG updateMaxProcsGState
// Synchronization between GOMAXPROCS and sysmon.
//
// Setting GOMAXPROCS via a call to GOMAXPROCS disables automatic
// GOMAXPROCS updates.
//
// We want to make two guarantees to callers of GOMAXPROCS. After
// GOMAXPROCS returns:
//
// 1. The runtime will not make any automatic changes to GOMAXPROCS.
//
// 2. The runtime will not perform any of the system calls used to
// determine the appropriate value of GOMAXPROCS (i.e., it won't
// call defaultGOMAXPROCS).
//
// (1) is the baseline guarantee that everyone needs. The GOMAXPROCS
// API isn't useful to anyone if automatic updates may occur after it
// returns. This is easily achieved by double-checking the state under
// STW before committing an automatic GOMAXPROCS update.
//
// (2) doesn't matter to most users, as it is isn't observable as long
// as (1) holds. However, it can be important to users sandboxing Go.
// They want disable these system calls and need some way to know when
// they are guaranteed the calls will stop.
//
// This would be simple to achieve if we simply called
// defaultGOMAXPROCS under STW in updateMaxProcsGoroutine below.
// However, we would like to avoid scheduling this goroutine every
// second when it will almost never do anything. Instead, sysmon calls
// defaultGOMAXPROCS to decide whether to schedule
// updateMaxProcsGoroutine. Thus we need to synchronize between sysmon
// and GOMAXPROCS calls.
//
// GOMAXPROCS can't hold a runtime mutex across STW. It could hold a
// semaphore, but sysmon cannot take semaphores. Instead, we have a
// more complex scheme:
//
// * sysmon holds computeMaxProcsLock while calling defaultGOMAXPROCS.
// * sysmon skips the current update if sched.customGOMAXPROCS is
// set.
// * GOMAXPROCS sets sched.customGOMAXPROCS once it is committed to
// changing GOMAXPROCS.
// * GOMAXPROCS takes computeMaxProcsLock to wait for outstanding
// defaultGOMAXPROCS calls to complete.
//
// N.B. computeMaxProcsLock could simply be sched.lock, but we want to
// avoid holding that lock during the potentially slow
// defaultGOMAXPROCS.
computeMaxProcsLock mutex
)
// Start GOMAXPROCS update helper goroutine.
//
// This is based on forcegchelper.
func defaultGOMAXPROCSUpdateEnable() {
if debug.updatemaxprocs == 0 {
// Unconditionally increment the metric when updates are disabled.
//
// It would be more descriptive if we did a dry run of the
// complete update, determining the appropriate value of
// GOMAXPROCS and the bailing out and just incrementing the
// metric if a change would occur.
//
// Not only is that a lot of ongoing work for a disabled
// feature, but some users need to be able to completely
// disable the update system calls (such as sandboxes).
// Currently, updatemaxprocs=0 serves that purpose.
updatemaxprocs.IncNonDefault()
return
}
go updateMaxProcsGoroutine()
}
func updateMaxProcsGoroutine() {
updateMaxProcsG.g = getg()
lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
for {
lock(&updateMaxProcsG.lock)
if updateMaxProcsG.idle.Load() {
throw("updateMaxProcsGoroutine: phase error")
}
updateMaxProcsG.idle.Store(true)
goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
// This goroutine is explicitly resumed by sysmon.
stw := stopTheWorldGC(stwGOMAXPROCS)
// Still OK to update?
lock(&sched.lock)
custom := sched.customGOMAXPROCS
unlock(&sched.lock)
if custom {
startTheWorldGC(stw)
return
}
// newprocs will be processed by startTheWorld
//
// TODO(prattmic): this could use a nicer API. Perhaps add it to the
// stw parameter?
newprocs = updateMaxProcsG.procs
lock(&sched.lock)
sched.customGOMAXPROCS = false
unlock(&sched.lock)
startTheWorldGC(stw)
}
}
func sysmonUpdateGOMAXPROCS() {
// Synchronize with GOMAXPROCS. See comment on computeMaxProcsLock.
lock(&computeMaxProcsLock)
// No update if GOMAXPROCS was set manually.
lock(&sched.lock)
custom := sched.customGOMAXPROCS
curr := gomaxprocs
unlock(&sched.lock)
if custom {
unlock(&computeMaxProcsLock)
return
}
// Don't hold sched.lock while we read the filesystem.
procs := defaultGOMAXPROCS(0)
unlock(&computeMaxProcsLock)
if procs == curr {
// Nothing to do.
return
}
// Sysmon can't directly stop the world. Run the helper to do so on our
// behalf. If updateGOMAXPROCS.idle is false, then a previous update is
// still pending.
if updateMaxProcsG.idle.Load() {
lock(&updateMaxProcsG.lock)
updateMaxProcsG.procs = procs
updateMaxProcsG.idle.Store(false)
var list gList
list.push(updateMaxProcsG.g)
injectglist(&list)
unlock(&updateMaxProcsG.lock)
}
}
// schedEnableUser enables or disables the scheduling of user
// goroutines.
//
// This does not stop already running user goroutines, so the caller
// should first stop the world when disabling user goroutines.
func schedEnableUser(enable bool) {
lock(&sched.lock)
if sched.disable.user == !enable {
unlock(&sched.lock)
return
}
sched.disable.user = !enable
if enable {
n := sched.disable.runnable.size
globrunqputbatch(&sched.disable.runnable)
unlock(&sched.lock)
for ; n != 0 && sched.npidle.Load() != 0; n-- {
startm(nil, false, false)
}
} else {
unlock(&sched.lock)
}
}
// schedEnabled reports whether gp should be scheduled. It returns
// false is scheduling of gp is disabled.
//
// sched.lock must be held.
func schedEnabled(gp *g) bool {
assertLockHeld(&sched.lock)
if sched.disable.user {
return isSystemGoroutine(gp, true)
}
return true
}
// Put mp on midle list.
// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
//
//go:nowritebarrierrec
func mput(mp *m) {
assertLockHeld(&sched.lock)
sched.midle.push(unsafe.Pointer(mp))
sched.nmidle++
checkdead()
}
// Try to get an m from midle list.
// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
//
//go:nowritebarrierrec
func mget() *m {
assertLockHeld(&sched.lock)
mp := (*m)(sched.midle.pop())
if mp != nil {
sched.nmidle--
}
return mp
}
// Try to get a specific m from midle list. Returns nil if it isn't on the
// midle list.
//
// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
//
//go:nowritebarrierrec
func mgetSpecific(mp *m) *m {
assertLockHeld(&sched.lock)
if mp.idleNode.prev == 0 && mp.idleNode.next == 0 {
// Not on the list.
return nil
}
sched.midle.remove(unsafe.Pointer(mp))
sched.nmidle--
return mp
}
// Put gp on the global runnable queue.
// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
//
//go:nowritebarrierrec
func globrunqput(gp *g) {
assertLockHeld(&sched.lock)
sched.runq.pushBack(gp)
}
// Put gp at the head of the global runnable queue.
// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
//
//go:nowritebarrierrec
func globrunqputhead(gp *g) {
assertLockHeld(&sched.lock)
sched.runq.push(gp)
}
// Put a batch of runnable goroutines on the global runnable queue.
// This clears *batch.
// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
//
//go:nowritebarrierrec
func globrunqputbatch(batch *gQueue) {
assertLockHeld(&sched.lock)
sched.runq.pushBackAll(*batch)
*batch = gQueue{}
}
// Try get a single G from the global runnable queue.
// sched.lock must be held.
func globrunqget() *g {
assertLockHeld(&sched.lock)
if sched.runq.size == 0 {
return nil
}
return sched.runq.pop()
}
// Try get a batch of G's from the global runnable queue.
// sched.lock must be held.
func globrunqgetbatch(n int32) (gp *g, q gQueue) {
assertLockHeld(&sched.lock)
if sched.runq.size == 0 {
return
}
n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
gp = sched.runq.pop()
n--
for ; n > 0; n-- {
gp1 := sched.runq.pop()
q.pushBack(gp1)
}
return
}
// pMask is an atomic bitstring with one bit per P.
type pMask []uint32
// read returns true if P id's bit is set.
func (p pMask) read(id uint32) bool {
word := id / 32
mask := uint32(1) << (id % 32)
return (atomic.Load(&p[word]) & mask) != 0
}
// set sets P id's bit.
func (p pMask) set(id int32) {
word := id / 32
mask := uint32(1) << (id % 32)
atomic.Or(&p[word], mask)
}
// clear clears P id's bit.
func (p pMask) clear(id int32) {
word := id / 32
mask := uint32(1) << (id % 32)
atomic.And(&p[word], ^mask)
}
// any returns true if any bit in p is set.
func (p pMask) any() bool {
for i := range p {
if atomic.Load(&p[i]) != 0 {
return true
}
}
return false
}
// resize resizes the pMask and returns a new one.
//
// The result may alias p, so callers are encouraged to
// discard p. Not safe for concurrent use.
func (p pMask) resize(nprocs int32) pMask {
maskWords := (nprocs + 31) / 32
if maskWords <= int32(cap(p)) {
return p[:maskWords]
}
newMask := make([]uint32, maskWords)
// No need to copy beyond len, old Ps are irrelevant.
copy(newMask, p)
return newMask
}
// pidleput puts p on the _Pidle list. now must be a relatively recent call
// to nanotime or zero. Returns now or the current time if now was zero.
//
// This releases ownership of p. Once sched.lock is released it is no longer
// safe to use p.
//
// sched.lock must be held.
//
// May run during STW, so write barriers are not allowed.
//
//go:nowritebarrierrec
func pidleput(pp *p, now int64) int64 {
assertLockHeld(&sched.lock)
if !runqempty(pp) {
throw("pidleput: P has non-empty run queue")
}
if now == 0 {
now = nanotime()
}
if pp.timers.len.Load() == 0 {
timerpMask.clear(pp.id)
}
idlepMask.set(pp.id)
pp.link = sched.pidle
sched.pidle.set(pp)
sched.npidle.Add(1)
if !pp.limiterEvent.start(limiterEventIdle, now) {
throw("must be able to track idle limiter event")
}
return now
}
// pidleget tries to get a p from the _Pidle list, acquiring ownership.
//
// sched.lock must be held.
//
// May run during STW, so write barriers are not allowed.
//
//go:nowritebarrierrec
func pidleget(now int64) (*p, int64) {
assertLockHeld(&sched.lock)
pp := sched.pidle.ptr()
if pp != nil {
// Timer may get added at any time now.
if now == 0 {
now = nanotime()
}
timerpMask.set(pp.id)
idlepMask.clear(pp.id)
sched.pidle = pp.link
sched.npidle.Add(-1)
pp.limiterEvent.stop(limiterEventIdle, now)
}
return pp, now
}
// pidlegetSpinning tries to get a p from the _Pidle list, acquiring ownership.
// This is called by spinning Ms (or callers than need a spinning M) that have
// found work. If no P is available, this must synchronized with non-spinning
// Ms that may be preparing to drop their P without discovering this work.
//
// sched.lock must be held.
//
// May run during STW, so write barriers are not allowed.
//
//go:nowritebarrierrec
func pidlegetSpinning(now int64) (*p, int64) {
assertLockHeld(&sched.lock)
pp, now := pidleget(now)
if pp == nil {
// See "Delicate dance" comment in findRunnable. We found work
// that we cannot take, we must synchronize with non-spinning
// Ms that may be preparing to drop their P.
sched.needspinning.Store(1)
return nil, now
}
return pp, now
}
// runqempty reports whether pp has no Gs on its local run queue.
// It never returns true spuriously.
func runqempty(pp *p) bool {
// Defend against a race where 1) pp has G1 in runqnext but runqhead == runqtail,
// 2) runqput on pp kicks G1 to the runq, 3) runqget on pp empties runqnext.
// Simply observing that runqhead == runqtail and then observing that runqnext == nil
// does not mean the queue is empty.
for {
head := atomic.Load(&pp.runqhead)
tail := atomic.Load(&pp.runqtail)
runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
if tail == atomic.Load(&pp.runqtail) {
return head == tail && runnext == 0
}
}
}
// To shake out latent assumptions about scheduling order,
// we introduce some randomness into scheduling decisions
// when running with the race detector.
// The need for this was made obvious by changing the
// (deterministic) scheduling order in Go 1.5 and breaking
// many poorly-written tests.
// With the randomness here, as long as the tests pass
// consistently with -race, they shouldn't have latent scheduling
// assumptions.
const randomizeScheduler = raceenabled
// runqput tries to put g on the local runnable queue.
// If next is false, runqput adds g to the tail of the runnable queue.
// If next is true, runqput puts g in the pp.runnext slot.
// If the run queue is full, runnext puts g on the global queue.
// Executed only by the owner P.
func runqput(pp *p, gp *g, next bool) {
if !haveSysmon && next {
// A runnext goroutine shares the same time slice as the
// current goroutine (inheritTime from runqget). To prevent a
// ping-pong pair of goroutines from starving all others, we
// depend on sysmon to preempt "long-running goroutines". That
// is, any set of goroutines sharing the same time slice.
//
// If there is no sysmon, we must avoid runnext entirely or
// risk starvation.
next = false
}
if randomizeScheduler && next && randn(2) == 0 {
next = false
}
if next {
retryNext:
oldnext := pp.runnext
if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
goto retryNext
}
if oldnext == 0 {
return
}
// Kick the old runnext out to the regular run queue.
gp = oldnext.ptr()
}
retry:
h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with consumers
t := pp.runqtail
if t-h < uint32(len(pp.runq)) {
pp.runq[t%uint32(len(pp.runq))].set(gp)
atomic.StoreRel(&pp.runqtail, t+1) // store-release, makes the item available for consumption
return
}
if runqputslow(pp, gp, h, t) {
return
}
// the queue is not full, now the put above must succeed
goto retry
}
// Put g and a batch of work from local runnable queue on global queue.
// Executed only by the owner P.
func runqputslow(pp *p, gp *g, h, t uint32) bool {
var batch [len(pp.runq)/2 + 1]*g
// First, grab a batch from local queue.
n := t - h
n = n / 2
if n != uint32(len(pp.runq)/2) {
throw("runqputslow: queue is not full")
}
for i := uint32(0); i < n; i++ {
batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
}
if !atomic.CasRel(&pp.runqhead, h, h+n) { // cas-release, commits consume
return false
}
batch[n] = gp
if randomizeScheduler {
for i := uint32(1); i <= n; i++ {
j := cheaprandn(i + 1)
batch[i], batch[j] = batch[j], batch[i]
}
}
// Link the goroutines.
for i := uint32(0); i < n; i++ {
batch[i].schedlink.set(batch[i+1])
}
q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
// Now put the batch on global queue.
lock(&sched.lock)
globrunqputbatch(&q)
unlock(&sched.lock)
return true
}
// runqputbatch tries to put all the G's on q on the local runnable queue.
// If the local runq is full the input queue still contains unqueued Gs.
// Executed only by the owner P.
func runqputbatch(pp *p, q *gQueue) {
if q.empty() {
return
}
h := atomic.LoadAcq(&pp.runqhead)
t := pp.runqtail
n := uint32(0)
for !q.empty() && t-h < uint32(len(pp.runq)) {
gp := q.pop()
pp.runq[t%uint32(len(pp.runq))].set(gp)
t++
n++
}
if randomizeScheduler {
off := func(o uint32) uint32 {
return (pp.runqtail + o) % uint32(len(pp.runq))
}
for i := uint32(1); i < n; i++ {
j := cheaprandn(i + 1)
pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
}
}
atomic.StoreRel(&pp.runqtail, t)
return
}
// Get g from local runnable queue.
// If inheritTime is true, gp should inherit the remaining time in the
// current time slice. Otherwise, it should start a new time slice.
// Executed only by the owner P.
func runqget(pp *p) (gp *g, inheritTime bool) {
// If there's a runnext, it's the next G to run.
next := pp.runnext
// If the runnext is non-0 and the CAS fails, it could only have been stolen by another P,
// because other Ps can race to set runnext to 0, but only the current P can set it to non-0.
// Hence, there's no need to retry this CAS if it fails.
if next != 0 && pp.runnext.cas(next, 0) {
return next.ptr(), true
}
for {
h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers
t := pp.runqtail
if t == h {
return nil, false
}
gp := pp.runq[h%uint32(len(pp.runq))].ptr()
if atomic.CasRel(&pp.runqhead, h, h+1) { // cas-release, commits consume
return gp, false
}
}
}
// runqdrain drains the local runnable queue of pp and returns all goroutines in it.
// Executed only by the owner P.
func runqdrain(pp *p) (drainQ gQueue) {
oldNext := pp.runnext
if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
drainQ.pushBack(oldNext.ptr())
}
retry:
h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers
t := pp.runqtail
qn := t - h
if qn == 0 {
return
}
if qn > uint32(len(pp.runq)) { // read inconsistent h and t
goto retry
}
if !atomic.CasRel(&pp.runqhead, h, h+qn) { // cas-release, commits consume
goto retry
}
// We've inverted the order in which it gets G's from the local P's runnable queue
// and then advances the head pointer because we don't want to mess up the statuses of G's
// while runqdrain() and runqsteal() are running in parallel.
// Thus we should advance the head pointer before draining the local P into a gQueue,
// so that we can update any gp.schedlink only after we take the full ownership of G,
// meanwhile, other P's can't access to all G's in local P's runnable queue and steal them.
// See https://groups.google.com/g/golang-dev/c/0pTKxEKhHSc/m/6Q85QjdVBQAJ for more details.
for i := uint32(0); i < qn; i++ {
gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
drainQ.pushBack(gp)
}
return
}
// Grabs a batch of goroutines from pp's runnable queue into batch.
// Batch is a ring buffer starting at batchHead.
// Returns number of grabbed goroutines.
// Can be executed by any P.
func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
for {
h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers
t := atomic.LoadAcq(&pp.runqtail) // load-acquire, synchronize with the producer
n := t - h
n = n - n/2
if n == 0 {
if stealRunNextG {
// Try to steal from pp.runnext.
if next := pp.runnext; next != 0 {
if pp.status == _Prunning {
if mp := pp.m.ptr(); mp != nil {
if gp := mp.curg; gp == nil || readgstatus(gp)&^_Gscan != _Gsyscall {
// Sleep to ensure that pp isn't about to run the g
// we are about to steal.
// The important use case here is when the g running
// on pp ready()s another g and then almost
// immediately blocks. Instead of stealing runnext
// in this window, back off to give pp a chance to
// schedule runnext. This will avoid thrashing gs
// between different Ps.
// A sync chan send/recv takes ~50ns as of time of
// writing, so 3us gives ~50x overshoot.
// If curg is nil, we assume that the P is likely
// to be in the scheduler. If curg isn't nil and isn't
// in a syscall, then it's either running, waiting, or
// runnable. In this case we want to sleep because the
// P might either call into the scheduler soon (running),
// or already is (since we found a waiting or runnable
// goroutine hanging off of a running P, suggesting it
// either recently transitioned out of running, or will
// transition to running shortly).
if !osHasLowResTimer {
usleep(3)
} else {
// On some platforms system timer granularity is
// 1-15ms, which is way too much for this
// optimization. So just yield.
osyield()
}
}
}
}
if !pp.runnext.cas(next, 0) {
continue
}
batch[batchHead%uint32(len(batch))] = next
return 1
}
}
return 0
}
if n > uint32(len(pp.runq)/2) { // read inconsistent h and t
continue
}
for i := uint32(0); i < n; i++ {
g := pp.runq[(h+i)%uint32(len(pp.runq))]
batch[(batchHead+i)%uint32(len(batch))] = g
}
if atomic.CasRel(&pp.runqhead, h, h+n) { // cas-release, commits consume
return n
}
}
}
// Steal half of elements from local runnable queue of p2
// and put onto local runnable queue of p.
// Returns one of the stolen elements (or nil if failed).
func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
t := pp.runqtail
n := runqgrab(p2, &pp.runq, t, stealRunNextG)
if n == 0 {
return nil
}
n--
gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
if n == 0 {
return gp
}
h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with consumers
if t-h+n >= uint32(len(pp.runq)) {
throw("runqsteal: runq overflow")
}
atomic.StoreRel(&pp.runqtail, t+n) // store-release, makes the item available for consumption
return gp
}
// A gQueue is a dequeue of Gs linked through g.schedlink. A G can only
// be on one gQueue or gList at a time.
type gQueue struct {
head guintptr
tail guintptr
size int32
}
// empty reports whether q is empty.
func (q *gQueue) empty() bool {
return q.head == 0
}
// push adds gp to the head of q.
func (q *gQueue) push(gp *g) {
gp.schedlink = q.head
q.head.set(gp)
if q.tail == 0 {
q.tail.set(gp)
}
q.size++
}
// pushBack adds gp to the tail of q.
func (q *gQueue) pushBack(gp *g) {
gp.schedlink = 0
if q.tail != 0 {
q.tail.ptr().schedlink.set(gp)
} else {
q.head.set(gp)
}
q.tail.set(gp)
q.size++
}
// pushBackAll adds all Gs in q2 to the tail of q. After this q2 must
// not be used.
func (q *gQueue) pushBackAll(q2 gQueue) {
if q2.tail == 0 {
return
}
q2.tail.ptr().schedlink = 0
if q.tail != 0 {
q.tail.ptr().schedlink = q2.head
} else {
q.head = q2.head
}
q.tail = q2.tail
q.size += q2.size
}
// pop removes and returns the head of queue q. It returns nil if
// q is empty.
func (q *gQueue) pop() *g {
gp := q.head.ptr()
if gp != nil {
q.head = gp.schedlink
if q.head == 0 {
q.tail = 0
}
q.size--
}
return gp
}
// popList takes all Gs in q and returns them as a gList.
func (q *gQueue) popList() gList {
stack := gList{q.head, q.size}
*q = gQueue{}
return stack
}
// A gList is a list of Gs linked through g.schedlink. A G can only be
// on one gQueue or gList at a time.
type gList struct {
head guintptr
size int32
}
// empty reports whether l is empty.
func (l *gList) empty() bool {
return l.head == 0
}
// push adds gp to the head of l.
func (l *gList) push(gp *g) {
gp.schedlink = l.head
l.head.set(gp)
l.size++
}
// pushAll prepends all Gs in q to l. After this q must not be used.
func (l *gList) pushAll(q gQueue) {
if !q.empty() {
q.tail.ptr().schedlink = l.head
l.head = q.head
l.size += q.size
}
}
// pop removes and returns the head of l. If l is empty, it returns nil.
func (l *gList) pop() *g {
gp := l.head.ptr()
if gp != nil {
l.head = gp.schedlink
l.size--
}
return gp
}
//go:linkname setMaxThreads runtime/debug.setMaxThreads
func setMaxThreads(in int) (out int) {
lock(&sched.lock)
out = int(sched.maxmcount)
if in > 0x7fffffff { // MaxInt32
sched.maxmcount = 0x7fffffff
} else {
sched.maxmcount = int32(in)
}
checkmcount()
unlock(&sched.lock)
return
}
// procPin should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/gopkg
// - github.com/choleraehyq/pid
// - github.com/songzhibin97/gkit
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname procPin
//go:nosplit
func procPin() int {
gp := getg()
mp := gp.m
mp.locks++
return int(mp.p.ptr().id)
}
// procUnpin should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/gopkg
// - github.com/choleraehyq/pid
// - github.com/songzhibin97/gkit
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname procUnpin
//go:nosplit
func procUnpin() {
gp := getg()
gp.m.locks--
}
//go:linkname sync_runtime_procPin sync.runtime_procPin
//go:nosplit
func sync_runtime_procPin() int {
return procPin()
}
//go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
//go:nosplit
func sync_runtime_procUnpin() {
procUnpin()
}
//go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin
//go:nosplit
func sync_atomic_runtime_procPin() int {
return procPin()
}
//go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin
//go:nosplit
func sync_atomic_runtime_procUnpin() {
procUnpin()
}
// Active spinning for sync.Mutex.
//
//go:linkname internal_sync_runtime_canSpin internal/sync.runtime_canSpin
//go:nosplit
func internal_sync_runtime_canSpin(i int) bool {
// sync.Mutex is cooperative, so we are conservative with spinning.
// Spin only few times and only if running on a multicore machine and
// GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
// As opposed to runtime mutex we don't do passive spinning here,
// because there can be work on global runq or on other Ps.
if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
return false
}
if p := getg().m.p.ptr(); !runqempty(p) {
return false
}
return true
}
//go:linkname internal_sync_runtime_doSpin internal/sync.runtime_doSpin
//go:nosplit
func internal_sync_runtime_doSpin() {
procyield(active_spin_cnt)
}
// Active spinning for sync.Mutex.
//
// sync_runtime_canSpin should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/livekit/protocol
// - github.com/sagernet/gvisor
// - gvisor.dev/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname sync_runtime_canSpin sync.runtime_canSpin
//go:nosplit
func sync_runtime_canSpin(i int) bool {
return internal_sync_runtime_canSpin(i)
}
// sync_runtime_doSpin should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/livekit/protocol
// - github.com/sagernet/gvisor
// - gvisor.dev/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname sync_runtime_doSpin sync.runtime_doSpin
//go:nosplit
func sync_runtime_doSpin() {
internal_sync_runtime_doSpin()
}
var stealOrder randomOrder
// randomOrder/randomEnum are helper types for randomized work stealing.
// They allow to enumerate all Ps in different pseudo-random orders without repetitions.
// The algorithm is based on the fact that if we have X such that X and GOMAXPROCS
// are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration.
type randomOrder struct {
count uint32
coprimes []uint32
}
type randomEnum struct {
i uint32
count uint32
pos uint32
inc uint32
}
func (ord *randomOrder) reset(count uint32) {
ord.count = count
ord.coprimes = ord.coprimes[:0]
for i := uint32(1); i <= count; i++ {
if gcd(i, count) == 1 {
ord.coprimes = append(ord.coprimes, i)
}
}
}
func (ord *randomOrder) start(i uint32) randomEnum {
return randomEnum{
count: ord.count,
pos: i % ord.count,
inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
}
}
func (enum *randomEnum) done() bool {
return enum.i == enum.count
}
func (enum *randomEnum) next() {
enum.i++
enum.pos = (enum.pos + enum.inc) % enum.count
}
func (enum *randomEnum) position() uint32 {
return enum.pos
}
func gcd(a, b uint32) uint32 {
for b != 0 {
a, b = b, a%b
}
return a
}
// An initTask represents the set of initializations that need to be done for a package.
// Keep in sync with ../../test/noinit.go:initTask
type initTask struct {
state uint32 // 0 = uninitialized, 1 = in progress, 2 = done
nfns uint32
// followed by nfns pcs, uintptr sized, one per init function to run
}
// inittrace stores statistics for init functions which are
// updated by malloc and newproc when active is true.
var inittrace tracestat
type tracestat struct {
active bool // init tracing activation status
id uint64 // init goroutine id
allocs uint64 // heap allocations
bytes uint64 // heap allocated bytes
}
func doInit(ts []*initTask) {
for _, t := range ts {
doInit1(t)
}
}
func doInit1(t *initTask) {
switch t.state {
case 2: // fully initialized
return
case 1: // initialization in progress
throw("recursive call during initialization - linker skew")
default: // not initialized yet
t.state = 1 // initialization in progress
var (
start int64
before tracestat
)
if inittrace.active {
start = nanotime()
// Load stats non-atomically since tracinit is updated only by this init goroutine.
before = inittrace
}
if t.nfns == 0 {
// We should have pruned all of these in the linker.
throw("inittask with no functions")
}
firstFunc := add(unsafe.Pointer(t), 8)
for i := uint32(0); i < t.nfns; i++ {
p := add(firstFunc, uintptr(i)*goarch.PtrSize)
f := *(*func())(unsafe.Pointer(&p))
f()
}
if inittrace.active {
end := nanotime()
// Load stats non-atomically since tracinit is updated only by this init goroutine.
after := inittrace
f := *(*func())(unsafe.Pointer(&firstFunc))
pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
var sbuf [24]byte
print("init ", pkg, " @")
print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
print("\n")
}
t.state = 2 // initialization done
}
}
// libInit is common startup code for most architectures when
// using -buildmode=c-archive or -buildmode=c-shared.
//
// May run with m.p==nil, so write barriers are not allowed.
//
//go:nowritebarrierrec
//go:nosplit
func libInit() {
// Synchronous initialization.
libpreinit()
// Asynchronous initialization.
// Prefer creating a thread via cgo if it is available.
if _cgo_sys_thread_create != nil {
asmcgocall(_cgo_sys_thread_create, unsafe.Pointer(abi.FuncPCABIInternal(rt0_lib_go)))
} else {
const stackSize = 0x800000 // 8192KB
newosproc0(stackSize, unsafe.Pointer(abi.FuncPCABIInternal(rt0_lib_go)))
}
}
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/runtime/atomic"
"unsafe"
)
// A profBuf is a lock-free buffer for profiling events,
// safe for concurrent use by one reader and one writer.
// The writer may be a signal handler running without a user g.
// The reader is assumed to be a user g.
//
// Each logged event corresponds to a fixed size header, a list of
// uintptrs (typically a stack), and exactly one unsafe.Pointer tag.
// The header and uintptrs are stored in the circular buffer data and the
// tag is stored in a circular buffer tags, running in parallel.
// In the circular buffer data, each event takes 2+hdrsize+len(stk)
// words: the value 2+hdrsize+len(stk), then the time of the event, then
// hdrsize words giving the fixed-size header, and then len(stk) words
// for the stack.
//
// The current effective offsets into the tags and data circular buffers
// for reading and writing are stored in the high 30 and low 32 bits of r and w.
// The bottom bits of the high 32 are additional flag bits in w, unused in r.
// "Effective" offsets means the total number of reads or writes, mod 2^length.
// The offset in the buffer is the effective offset mod the length of the buffer.
// To make wraparound mod 2^length match wraparound mod length of the buffer,
// the length of the buffer must be a power of two.
//
// If the reader catches up to the writer, a flag passed to read controls
// whether the read blocks until more data is available. A read returns a
// pointer to the buffer data itself; the caller is assumed to be done with
// that data at the next read. The read offset rNext tracks the next offset to
// be returned by read. By definition, r ≤ rNext ≤ w (before wraparound),
// and rNext is only used by the reader, so it can be accessed without atomics.
//
// If the reader is blocked waiting for more data, the writer will wake it up if
// either the buffer is more than half full, or when the writer sets the eof
// marker or writes overflow entries (described below.)
//
// If the writer gets ahead of the reader, so that the buffer fills,
// future writes are discarded and replaced in the output stream by an
// overflow entry, which has size 2+hdrsize+1, time set to the time of
// the first discarded write, a header of all zeroed words, and a "stack"
// containing one word, the number of discarded writes.
//
// Between the time the buffer fills and the buffer becomes empty enough
// to hold more data, the overflow entry is stored as a pending overflow
// entry in the fields overflow and overflowTime. The pending overflow
// entry can be turned into a real record by either the writer or the
// reader. If the writer is called to write a new record and finds that
// the output buffer has room for both the pending overflow entry and the
// new record, the writer emits the pending overflow entry and the new
// record into the buffer. If the reader is called to read data and finds
// that the output buffer is empty but that there is a pending overflow
// entry, the reader will return a synthesized record for the pending
// overflow entry.
//
// Only the writer can create or add to a pending overflow entry, but
// either the reader or the writer can clear the pending overflow entry.
// A pending overflow entry is indicated by the low 32 bits of 'overflow'
// holding the number of discarded writes, and overflowTime holding the
// time of the first discarded write. The high 32 bits of 'overflow'
// increment each time the low 32 bits transition from zero to non-zero
// or vice versa. This sequence number avoids ABA problems in the use of
// compare-and-swap to coordinate between reader and writer.
// The overflowTime is only written when the low 32 bits of overflow are
// zero, that is, only when there is no pending overflow entry, in
// preparation for creating a new one. The reader can therefore fetch and
// clear the entry atomically using
//
// for {
// overflow = load(&b.overflow)
// if uint32(overflow) == 0 {
// // no pending entry
// break
// }
// time = load(&b.overflowTime)
// if cas(&b.overflow, overflow, ((overflow>>32)+1)<<32) {
// // pending entry cleared
// break
// }
// }
// if uint32(overflow) > 0 {
// emit entry for uint32(overflow), time
// }
type profBuf struct {
// accessed atomically
r, w profAtomic
overflow atomic.Uint64
overflowTime atomic.Uint64
eof atomic.Uint32
// immutable (excluding slice content)
hdrsize uintptr
data []uint64
tags []unsafe.Pointer
// owned by reader
rNext profIndex
overflowBuf []uint64 // for use by reader to return overflow record
wait note
}
// A profAtomic is the atomically-accessed word holding a profIndex.
type profAtomic uint64
// A profIndex is the packet tag and data counts and flags bits, described above.
type profIndex uint64
const (
profReaderSleeping profIndex = 1 << 32 // reader is sleeping and must be woken up
profWriteExtra profIndex = 1 << 33 // overflow or eof waiting
)
func (x *profAtomic) load() profIndex {
return profIndex(atomic.Load64((*uint64)(x)))
}
func (x *profAtomic) store(new profIndex) {
atomic.Store64((*uint64)(x), uint64(new))
}
func (x *profAtomic) cas(old, new profIndex) bool {
return atomic.Cas64((*uint64)(x), uint64(old), uint64(new))
}
func (x profIndex) dataCount() uint32 {
return uint32(x)
}
func (x profIndex) tagCount() uint32 {
return uint32(x >> 34)
}
// countSub subtracts two counts obtained from profIndex.dataCount or profIndex.tagCount,
// assuming that they are no more than 2^29 apart (guaranteed since they are never more than
// len(data) or len(tags) apart, respectively).
// tagCount wraps at 2^30, while dataCount wraps at 2^32.
// This function works for both.
func countSub(x, y uint32) int {
// x-y is 32-bit signed or 30-bit signed; sign-extend to 32 bits and convert to int.
return int(int32(x-y) << 2 >> 2)
}
// addCountsAndClearFlags returns the packed form of "x + (data, tag) - all flags".
func (x profIndex) addCountsAndClearFlags(data, tag int) profIndex {
return profIndex((uint64(x)>>34+uint64(uint32(tag)<<2>>2))<<34 | uint64(uint32(x)+uint32(data)))
}
// hasOverflow reports whether b has any overflow records pending.
func (b *profBuf) hasOverflow() bool {
return uint32(b.overflow.Load()) > 0
}
// takeOverflow consumes the pending overflow records, returning the overflow count
// and the time of the first overflow.
// When called by the reader, it is racing against incrementOverflow.
func (b *profBuf) takeOverflow() (count uint32, time uint64) {
overflow := b.overflow.Load()
time = b.overflowTime.Load()
for {
count = uint32(overflow)
if count == 0 {
time = 0
break
}
// Increment generation, clear overflow count in low bits.
if b.overflow.CompareAndSwap(overflow, ((overflow>>32)+1)<<32) {
break
}
overflow = b.overflow.Load()
time = b.overflowTime.Load()
}
return uint32(overflow), time
}
// incrementOverflow records a single overflow at time now.
// It is racing against a possible takeOverflow in the reader.
func (b *profBuf) incrementOverflow(now int64) {
for {
overflow := b.overflow.Load()
// Once we see b.overflow reach 0, it's stable: no one else is changing it underfoot.
// We need to set overflowTime if we're incrementing b.overflow from 0.
if uint32(overflow) == 0 {
// Store overflowTime first so it's always available when overflow != 0.
b.overflowTime.Store(uint64(now))
b.overflow.Store((((overflow >> 32) + 1) << 32) + 1)
break
}
// Otherwise we're racing to increment against reader
// who wants to set b.overflow to 0.
// Out of paranoia, leave 2³²-1 a sticky overflow value,
// to avoid wrapping around. Extremely unlikely.
if int32(overflow) == -1 {
break
}
if b.overflow.CompareAndSwap(overflow, overflow+1) {
break
}
}
}
// newProfBuf returns a new profiling buffer with room for
// a header of hdrsize words and a buffer of at least bufwords words.
func newProfBuf(hdrsize, bufwords, tags int) *profBuf {
if min := 2 + hdrsize + 1; bufwords < min {
bufwords = min
}
// Buffer sizes must be power of two, so that we don't have to
// worry about uint32 wraparound changing the effective position
// within the buffers. We store 30 bits of count; limiting to 28
// gives us some room for intermediate calculations.
if bufwords >= 1<<28 || tags >= 1<<28 {
throw("newProfBuf: buffer too large")
}
var i int
for i = 1; i < bufwords; i <<= 1 {
}
bufwords = i
for i = 1; i < tags; i <<= 1 {
}
tags = i
b := new(profBuf)
b.hdrsize = uintptr(hdrsize)
b.data = make([]uint64, bufwords)
b.tags = make([]unsafe.Pointer, tags)
b.overflowBuf = make([]uint64, 2+b.hdrsize+1)
return b
}
// canWriteRecord reports whether the buffer has room
// for a single contiguous record with a stack of length nstk.
func (b *profBuf) canWriteRecord(nstk int) bool {
br := b.r.load()
bw := b.w.load()
// room for tag?
if countSub(br.tagCount(), bw.tagCount())+len(b.tags) < 1 {
return false
}
// room for data?
nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
want := 2 + int(b.hdrsize) + nstk
i := int(bw.dataCount() % uint32(len(b.data)))
if i+want > len(b.data) {
// Can't fit in trailing fragment of slice.
// Skip over that and start over at beginning of slice.
nd -= len(b.data) - i
}
return nd >= want
}
// canWriteTwoRecords reports whether the buffer has room
// for two records with stack lengths nstk1, nstk2, in that order.
// Each record must be contiguous on its own, but the two
// records need not be contiguous (one can be at the end of the buffer
// and the other can wrap around and start at the beginning of the buffer).
func (b *profBuf) canWriteTwoRecords(nstk1, nstk2 int) bool {
br := b.r.load()
bw := b.w.load()
// room for tag?
if countSub(br.tagCount(), bw.tagCount())+len(b.tags) < 2 {
return false
}
// room for data?
nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
// first record
want := 2 + int(b.hdrsize) + nstk1
i := int(bw.dataCount() % uint32(len(b.data)))
if i+want > len(b.data) {
// Can't fit in trailing fragment of slice.
// Skip over that and start over at beginning of slice.
nd -= len(b.data) - i
i = 0
}
i += want
nd -= want
// second record
want = 2 + int(b.hdrsize) + nstk2
if i+want > len(b.data) {
// Can't fit in trailing fragment of slice.
// Skip over that and start over at beginning of slice.
nd -= len(b.data) - i
i = 0
}
return nd >= want
}
// write writes an entry to the profiling buffer b.
// The entry begins with a fixed hdr, which must have
// length b.hdrsize, followed by a variable-sized stack
// and a single tag pointer *tagPtr (or nil if tagPtr is nil).
// No write barriers allowed because this might be called from a signal handler.
func (b *profBuf) write(tagPtr *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
if b == nil {
return
}
if len(hdr) > int(b.hdrsize) {
throw("misuse of profBuf.write")
}
if hasOverflow := b.hasOverflow(); hasOverflow && b.canWriteTwoRecords(1, len(stk)) {
// Room for both an overflow record and the one being written.
// Write the overflow record if the reader hasn't gotten to it yet.
// Only racing against reader, not other writers.
count, time := b.takeOverflow()
if count > 0 {
var stk [1]uintptr
stk[0] = uintptr(count)
b.write(nil, int64(time), nil, stk[:])
}
} else if hasOverflow || !b.canWriteRecord(len(stk)) {
// Pending overflow without room to write overflow and new records
// or no overflow but also no room for new record.
b.incrementOverflow(now)
b.wakeupExtra()
return
}
// There's room: write the record.
br := b.r.load()
bw := b.w.load()
// Profiling tag
//
// The tag is a pointer, but we can't run a write barrier here.
// We have interrupted the OS-level execution of gp, but the
// runtime still sees gp as executing. In effect, we are running
// in place of the real gp. Since gp is the only goroutine that
// can overwrite gp.labels, the value of gp.labels is stable during
// this signal handler: it will still be reachable from gp when
// we finish executing. If a GC is in progress right now, it must
// keep gp.labels alive, because gp.labels is reachable from gp.
// If gp were to overwrite gp.labels, the deletion barrier would
// still shade that pointer, which would preserve it for the
// in-progress GC, so all is well. Any future GC will see the
// value we copied when scanning b.tags (heap-allocated).
// We arrange that the store here is always overwriting a nil,
// so there is no need for a deletion barrier on b.tags[wt].
wt := int(bw.tagCount() % uint32(len(b.tags)))
if tagPtr != nil {
*(*uintptr)(unsafe.Pointer(&b.tags[wt])) = uintptr(*tagPtr)
}
// Main record.
// It has to fit in a contiguous section of the slice, so if it doesn't fit at the end,
// leave a rewind marker (0) and start over at the beginning of the slice.
wd := int(bw.dataCount() % uint32(len(b.data)))
nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
skip := 0
if wd+2+int(b.hdrsize)+len(stk) > len(b.data) {
b.data[wd] = 0
skip = len(b.data) - wd
nd -= skip
wd = 0
}
data := b.data[wd:]
data[0] = uint64(2 + b.hdrsize + uintptr(len(stk))) // length
data[1] = uint64(now) // time stamp
// header, zero-padded
i := copy(data[2:2+b.hdrsize], hdr)
clear(data[2+i : 2+b.hdrsize])
for i, pc := range stk {
data[2+b.hdrsize+uintptr(i)] = uint64(pc)
}
for {
// Commit write.
// Racing with reader setting flag bits in b.w, to avoid lost wakeups.
old := b.w.load()
new := old.addCountsAndClearFlags(skip+2+len(stk)+int(b.hdrsize), 1)
// We re-load b.r here to reduce the likelihood of early wakeups
// if the reader already consumed some data between the last
// time we read b.r and now. This isn't strictly necessary.
unread := countSub(new.dataCount(), b.r.load().dataCount())
if unread < 0 {
// The new count overflowed and wrapped around.
unread += len(b.data)
}
wakeupThreshold := len(b.data) / 2
if unread < wakeupThreshold {
// Carry over the sleeping flag since we're not planning
// to wake the reader yet
new |= old & profReaderSleeping
}
if !b.w.cas(old, new) {
continue
}
// If we've hit our high watermark for data in the buffer,
// and there is a reader, wake it up.
if unread >= wakeupThreshold && old&profReaderSleeping != 0 {
// NB: if we reach this point, then the sleeping bit is
// cleared in the new b.w value
notewakeup(&b.wait)
}
break
}
}
// close signals that there will be no more writes on the buffer.
// Once all the data has been read from the buffer, reads will return eof=true.
func (b *profBuf) close() {
if b.eof.Load() > 0 {
throw("runtime: profBuf already closed")
}
b.eof.Store(1)
b.wakeupExtra()
}
// wakeupExtra must be called after setting one of the "extra"
// atomic fields b.overflow or b.eof.
// It records the change in b.w and wakes up the reader if needed.
func (b *profBuf) wakeupExtra() {
for {
old := b.w.load()
new := old | profWriteExtra
// Clear profReaderSleeping. We're going to wake up the reader
// if it was sleeping and we don't want double wakeups in case
// we, for example, attempt to write into a full buffer multiple
// times before the reader wakes up.
new &^= profReaderSleeping
if !b.w.cas(old, new) {
continue
}
if old&profReaderSleeping != 0 {
notewakeup(&b.wait)
}
break
}
}
// profBufReadMode specifies whether to block when no data is available to read.
type profBufReadMode int
const (
profBufBlocking profBufReadMode = iota
profBufNonBlocking
)
var overflowTag [1]unsafe.Pointer // always nil
func (b *profBuf) read(mode profBufReadMode) (data []uint64, tags []unsafe.Pointer, eof bool) {
if b == nil {
return nil, nil, true
}
br := b.rNext
// Commit previous read, returning that part of the ring to the writer.
// First clear tags that have now been read, both to avoid holding
// up the memory they point at for longer than necessary
// and so that b.write can assume it is always overwriting
// nil tag entries (see comment in b.write).
rPrev := b.r.load()
if rPrev != br {
ntag := countSub(br.tagCount(), rPrev.tagCount())
ti := int(rPrev.tagCount() % uint32(len(b.tags)))
for i := 0; i < ntag; i++ {
b.tags[ti] = nil
if ti++; ti == len(b.tags) {
ti = 0
}
}
b.r.store(br)
}
Read:
bw := b.w.load()
numData := countSub(bw.dataCount(), br.dataCount())
if numData == 0 {
if b.hasOverflow() {
// No data to read, but there is overflow to report.
// Racing with writer flushing b.overflow into a real record.
count, time := b.takeOverflow()
if count == 0 {
// Lost the race, go around again.
goto Read
}
// Won the race, report overflow.
dst := b.overflowBuf
dst[0] = uint64(2 + b.hdrsize + 1)
dst[1] = time
clear(dst[2 : 2+b.hdrsize])
dst[2+b.hdrsize] = uint64(count)
return dst[:2+b.hdrsize+1], overflowTag[:1], false
}
if b.eof.Load() > 0 {
// No data, no overflow, EOF set: done.
return nil, nil, true
}
if bw&profWriteExtra != 0 {
// Writer claims to have published extra information (overflow or eof).
// Attempt to clear notification and then check again.
// If we fail to clear the notification it means b.w changed,
// so we still need to check again.
b.w.cas(bw, bw&^profWriteExtra)
goto Read
}
// Nothing to read right now.
// Return or sleep according to mode.
if mode == profBufNonBlocking {
// Necessary on Darwin, notetsleepg below does not work in signal handler, root cause of #61768.
return nil, nil, false
}
if !b.w.cas(bw, bw|profReaderSleeping) {
goto Read
}
// Committed to sleeping.
notetsleepg(&b.wait, -1)
noteclear(&b.wait)
goto Read
}
data = b.data[br.dataCount()%uint32(len(b.data)):]
if len(data) > numData {
data = data[:numData]
} else {
numData -= len(data) // available in case of wraparound
}
skip := 0
if data[0] == 0 {
// Wraparound record. Go back to the beginning of the ring.
skip = len(data)
data = b.data
if len(data) > numData {
data = data[:numData]
}
}
ntag := countSub(bw.tagCount(), br.tagCount())
if ntag == 0 {
throw("runtime: malformed profBuf buffer - tag and data out of sync")
}
tags = b.tags[br.tagCount()%uint32(len(b.tags)):]
if len(tags) > ntag {
tags = tags[:ntag]
}
// Count out whole data records until either data or tags is done.
// They are always in sync in the buffer, but due to an end-of-slice
// wraparound we might need to stop early and return the rest
// in the next call.
di := 0
ti := 0
for di < len(data) && data[di] != 0 && ti < len(tags) {
if uintptr(di)+uintptr(data[di]) > uintptr(len(data)) {
throw("runtime: malformed profBuf buffer - invalid size")
}
di += int(data[di])
ti++
}
// Remember how much we returned, to commit read on next call.
b.rNext = br.addCountsAndClearFlags(skip+di, ti)
if raceenabled {
// Match racereleasemerge in runtime_setProfLabel,
// so that the setting of the labels in runtime_setProfLabel
// is treated as happening before any use of the labels
// by our caller. The synchronization on labelSync itself is a fiction
// for the race detector. The actual synchronization is handled
// by the fact that the signal handler only reads from the current
// goroutine and uses atomics to write the updated queue indices,
// and then the read-out from the signal handler buffer uses
// atomics to read those queue indices.
raceacquire(unsafe.Pointer(&labelSync))
}
return data[:di], tags[:ti], false
}
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
var labelSync uintptr
// runtime_setProfLabel should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/cloudwego/localsession
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname runtime_setProfLabel runtime/pprof.runtime_setProfLabel
func runtime_setProfLabel(labels unsafe.Pointer) {
// Introduce race edge for read-back via profile.
// This would more properly use &getg().labels as the sync address,
// but we do the read in a signal handler and can't call the race runtime then.
//
// This uses racereleasemerge rather than just racerelease so
// the acquire in profBuf.read synchronizes with *all* prior
// setProfLabel operations, not just the most recent one. This
// is important because profBuf.read will observe different
// labels set by different setProfLabel operations on
// different goroutines, so it needs to synchronize with all
// of them (this wouldn't be an issue if we could synchronize
// on &getg().labels since we would synchronize with each
// most-recent labels write separately.)
//
// racereleasemerge is like a full read-modify-write on
// labelSync, rather than just a store-release, so it carries
// a dependency on the previous racereleasemerge, which
// ultimately carries forward to the acquire in profBuf.read.
if raceenabled {
racereleasemerge(unsafe.Pointer(&labelSync))
}
getg().labels = labels
}
// runtime_getProfLabel should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/cloudwego/localsession
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname runtime_getProfLabel runtime/pprof.runtime_getProfLabel
func runtime_getProfLabel() unsafe.Pointer {
return getg().labels
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !race
// Dummy race detection API, used when not built with -race.
package runtime
import (
"unsafe"
)
const raceenabled = false
// Because raceenabled is false, none of these functions should be called.
func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
func raceinit() (uintptr, uintptr) { throw("race"); return 0, 0 }
func racefini() { throw("race") }
func raceproccreate() uintptr { throw("race"); return 0 }
func raceprocdestroy(ctx uintptr) { throw("race") }
func racemapshadow(addr unsafe.Pointer, size uintptr) { throw("race") }
func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
func racereadpc(addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
func racereadrangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { throw("race") }
func racewriterangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { throw("race") }
func raceacquire(addr unsafe.Pointer) { throw("race") }
func raceacquireg(gp *g, addr unsafe.Pointer) { throw("race") }
func raceacquirectx(racectx uintptr, addr unsafe.Pointer) { throw("race") }
func racerelease(addr unsafe.Pointer) { throw("race") }
func racereleaseg(gp *g, addr unsafe.Pointer) { throw("race") }
func racereleaseacquire(addr unsafe.Pointer) { throw("race") }
func racereleaseacquireg(gp *g, addr unsafe.Pointer) { throw("race") }
func racereleasemerge(addr unsafe.Pointer) { throw("race") }
func racereleasemergeg(gp *g, addr unsafe.Pointer) { throw("race") }
func racefingo() { throw("race") }
func racemalloc(p unsafe.Pointer, sz uintptr) { throw("race") }
func racefree(p unsafe.Pointer, sz uintptr) { throw("race") }
func racegostart(pc uintptr) uintptr { throw("race"); return 0 }
func racegoend() { throw("race") }
func racectxstart(spawnctx, racectx uintptr) uintptr { throw("race"); return 0 }
func racectxend(racectx uintptr) { throw("race") }
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Random number generation
package runtime
import (
"internal/byteorder"
"internal/chacha8rand"
"internal/goarch"
"math/bits"
"unsafe"
_ "unsafe" // for go:linkname
)
// OS-specific startup can set startupRand if the OS passes
// random data to the process at startup time.
// For example Linux passes 16 bytes in the auxv vector.
var startupRand []byte
// globalRand holds the global random state.
// It is only used at startup and for creating new m's.
// Otherwise the per-m random state should be used
// by calling goodrand.
var globalRand struct {
lock mutex
seed [32]byte
state chacha8rand.State
init bool
}
var readRandomFailed bool
// randinit initializes the global random state.
// It must be called before any use of grand.
func randinit() {
lock(&globalRand.lock)
if globalRand.init {
fatal("randinit twice")
}
seed := &globalRand.seed
if len(startupRand) >= 16 &&
// Check that at least the first two words of startupRand weren't
// cleared by any libc initialization.
!allZero(startupRand[:8]) && !allZero(startupRand[8:16]) {
for i, c := range startupRand {
seed[i%len(seed)] ^= c
}
} else {
if readRandom(seed[:]) != len(seed) || allZero(seed[:]) {
// readRandom should never fail, but if it does we'd rather
// not make Go binaries completely unusable, so make up
// some random data based on the current time.
readRandomFailed = true
readTimeRandom(seed[:])
}
}
globalRand.state.Init(*seed)
clear(seed[:])
if startupRand != nil {
// Overwrite startupRand instead of clearing it, in case cgo programs
// access it after we used it.
for len(startupRand) > 0 {
buf := make([]byte, 8)
for {
if x, ok := globalRand.state.Next(); ok {
byteorder.BEPutUint64(buf, x)
break
}
globalRand.state.Refill()
}
n := copy(startupRand, buf)
startupRand = startupRand[n:]
}
startupRand = nil
}
globalRand.init = true
unlock(&globalRand.lock)
}
// readTimeRandom stretches any entropy in the current time
// into entropy the length of r and XORs it into r.
// This is a fallback for when readRandom does not read
// the full requested amount.
// Whatever entropy r already contained is preserved.
func readTimeRandom(r []byte) {
// Inspired by wyrand.
// An earlier version of this code used getg().m.procid as well,
// but note that this is called so early in startup that procid
// is not initialized yet.
v := uint64(nanotime())
for len(r) > 0 {
v ^= 0xa0761d6478bd642f
v *= 0xe7037ed1a0b428db
size := 8
if len(r) < 8 {
size = len(r)
}
for i := 0; i < size; i++ {
r[i] ^= byte(v >> (8 * i))
}
r = r[size:]
v = v>>32 | v<<32
}
}
func allZero(b []byte) bool {
var acc byte
for _, x := range b {
acc |= x
}
return acc == 0
}
// bootstrapRand returns a random uint64 from the global random generator.
func bootstrapRand() uint64 {
lock(&globalRand.lock)
if !globalRand.init {
fatal("randinit missed")
}
for {
if x, ok := globalRand.state.Next(); ok {
unlock(&globalRand.lock)
return x
}
globalRand.state.Refill()
}
}
// bootstrapRandReseed reseeds the bootstrap random number generator,
// clearing from memory any trace of previously returned random numbers.
func bootstrapRandReseed() {
lock(&globalRand.lock)
if !globalRand.init {
fatal("randinit missed")
}
globalRand.state.Reseed()
unlock(&globalRand.lock)
}
// rand32 is uint32(rand()), called from compiler-generated code.
//
//go:nosplit
func rand32() uint32 {
return uint32(rand())
}
// rand returns a random uint64 from the per-m chacha8 state.
// This is called from compiler-generated code.
//
// Do not change signature: used via linkname from other packages.
//
//go:nosplit
//go:linkname rand
func rand() uint64 {
// Note: We avoid acquirem here so that in the fast path
// there is just a getg, an inlined c.Next, and a return.
// The performance difference on a 16-core AMD is
// 3.7ns/call this way versus 4.3ns/call with acquirem (+16%).
mp := getg().m
c := &mp.chacha8
for {
// Note: c.Next is marked nosplit,
// so we don't need to use mp.locks
// on the fast path, which is that the
// first attempt succeeds.
x, ok := c.Next()
if ok {
return x
}
mp.locks++ // hold m even though c.Refill may do stack split checks
c.Refill()
mp.locks--
}
}
//go:linkname maps_rand internal/runtime/maps.rand
func maps_rand() uint64 {
return rand()
}
// mrandinit initializes the random state of an m.
func mrandinit(mp *m) {
var seed [4]uint64
for i := range seed {
seed[i] = bootstrapRand()
}
bootstrapRandReseed() // erase key we just extracted
mp.chacha8.Init64(seed)
mp.cheaprand = uint32(rand())
mp.cheaprand64 = rand()
}
// randn is like rand() % n but faster.
// Do not change signature: used via linkname from other packages.
//
//go:nosplit
//go:linkname randn
func randn(n uint32) uint32 {
// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
return uint32((uint64(uint32(rand())) * uint64(n)) >> 32)
}
// cheaprand is a non-cryptographic-quality 32-bit random generator
// suitable for calling at very high frequency (such as during scheduling decisions)
// and at sensitive moments in the runtime (such as during stack unwinding).
// it is "cheap" in the sense of both expense and quality.
//
// cheaprand must not be exported to other packages:
// the rule is that other packages using runtime-provided
// randomness must always use rand.
//
// cheaprand should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/gopkg
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname cheaprand
//go:nosplit
func cheaprand() uint32 {
mp := getg().m
// Implement wyrand: https://github.com/wangyi-fudan/wyhash
// Only the platform that supports 64-bit multiplication
// natively should be allowed.
if bits.UintSize == 64 {
mp.cheaprand += 0x53c5ca59
hi, lo := bits.Mul32(mp.cheaprand, mp.cheaprand^0x74743c1b)
return hi ^ lo
}
// Implement xorshift64+: 2 32-bit xorshift sequences added together.
// Shift triplet [17,7,16] was calculated as indicated in Marsaglia's
// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
// This generator passes the SmallCrush suite, part of TestU01 framework:
// http://simul.iro.umontreal.ca/testu01/tu01.html
t := (*[2]uint32)(unsafe.Pointer(&mp.cheaprand64))
s1, s0 := t[0], t[1]
s1 ^= s1 << 17
s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
t[0], t[1] = s0, s1
return s0 + s1
}
// cheaprand64 is a non-cryptographic-quality 63-bit random generator
// suitable for calling at very high frequency (such as during sampling decisions).
// it is "cheap" in the sense of both expense and quality.
//
// cheaprand64 must not be exported to other packages:
// the rule is that other packages using runtime-provided
// randomness must always use rand.
//
// cheaprand64 should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/zhangyunhao116/fastrand
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname cheaprand64
//go:nosplit
func cheaprand64() int64 {
return int64(cheaprandu64() & ^(uint64(1) << 63))
}
// cheaprandu64 is a non-cryptographic-quality 64-bit random generator
// suitable for calling at very high frequency (such as during sampling decisions).
// it is "cheap" in the sense of both expense and quality.
//
// cheaprandu64 must not be exported to other packages:
// the rule is that other packages using runtime-provided
// randomness must always use rand.
//
//go:nosplit
func cheaprandu64() uint64 {
// Implement wyrand: https://github.com/wangyi-fudan/wyhash
// Only the platform that bits.Mul64 can be lowered
// by the compiler should be in this list.
if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64|
goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le|
goarch.IsS390x|goarch.IsRiscv64|goarch.IsLoong64 == 1 {
mp := getg().m
// Implement wyrand: https://github.com/wangyi-fudan/wyhash
mp.cheaprand64 += 0xa0761d6478bd642f
hi, lo := bits.Mul64(mp.cheaprand64, mp.cheaprand64^0xe7037ed1a0b428db)
return hi ^ lo
}
return uint64(cheaprand())<<32 | uint64(cheaprand())
}
// cheaprandn is like cheaprand() % n but faster.
//
// cheaprandn must not be exported to other packages:
// the rule is that other packages using runtime-provided
// randomness must always use randn.
//
// cheaprandn should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/phuslu/log
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname cheaprandn
//go:nosplit
func cheaprandn(n uint32) uint32 {
// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
return uint32((uint64(cheaprand()) * uint64(n)) >> 32)
}
// Too much legacy code has go:linkname references
// to runtime.fastrand and friends, so keep these around for now.
// Code should migrate to math/rand/v2.Uint64,
// which is just as fast, but that's only available in Go 1.22+.
// It would be reasonable to remove these in Go 1.24.
// Do not call these from package runtime.
//go:linkname legacy_fastrand runtime.fastrand
func legacy_fastrand() uint32 {
return uint32(rand())
}
//go:linkname legacy_fastrandn runtime.fastrandn
func legacy_fastrandn(n uint32) uint32 {
return randn(n)
}
//go:linkname legacy_fastrand64 runtime.fastrand64
func legacy_fastrand64() uint64 {
return rand()
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import _ "unsafe" // for go:linkname
//go:linkname setMaxStack runtime/debug.setMaxStack
func setMaxStack(in int) (out int) {
out = int(maxstacksize)
maxstacksize = uintptr(in)
return out
}
//go:linkname setPanicOnFault runtime/debug.setPanicOnFault
func setPanicOnFault(new bool) (old bool) {
gp := getg()
old = gp.paniconfault
gp.paniconfault = new
return old
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package runtime
// retryOnEAGAIN retries a function until it does not return EAGAIN.
// It will use an increasing delay between calls, and retry up to 20 times.
// The function argument is expected to return an errno value,
// and retryOnEAGAIN will return any errno value other than EAGAIN.
// If all retries return EAGAIN, then retryOnEAGAIN will return EAGAIN.
func retryOnEAGAIN(fn func() int32) int32 {
for tries := 0; tries < 20; tries++ {
errno := fn()
if errno != _EAGAIN {
return errno
}
usleep_no_g(uint32(tries+1) * 1000) // milliseconds
}
return _EAGAIN
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/runtime/atomic"
"unsafe"
)
//go:generate go run wincallback.go
//go:generate go run mkduff.go
//go:generate go run mkfastlog2table.go
//go:generate go run mklockrank.go -o lockrank.go
var ticks ticksType
type ticksType struct {
// lock protects access to start* and val.
lock mutex
startTicks int64
startTime int64
val atomic.Int64
}
// init initializes ticks to maximize the chance that we have a good ticksPerSecond reference.
//
// Must not run concurrently with ticksPerSecond.
func (t *ticksType) init() {
lock(&ticks.lock)
t.startTime = nanotime()
t.startTicks = cputicks()
unlock(&ticks.lock)
}
// minTimeForTicksPerSecond is the minimum elapsed time we require to consider our ticksPerSecond
// measurement to be of decent enough quality for profiling.
//
// There's a linear relationship here between minimum time and error from the true value.
// The error from the true ticks-per-second in a linux/amd64 VM seems to be:
// - 1 ms -> ~0.02% error
// - 5 ms -> ~0.004% error
// - 10 ms -> ~0.002% error
// - 50 ms -> ~0.0003% error
// - 100 ms -> ~0.0001% error
//
// We're willing to take 0.004% error here, because ticksPerSecond is intended to be used for
// converting durations, not timestamps. Durations are usually going to be much larger, and so
// the tiny error doesn't matter. The error is definitely going to be a problem when trying to
// use this for timestamps, as it'll make those timestamps much less likely to line up.
const minTimeForTicksPerSecond = 5_000_000*(1-osHasLowResClockInt) + 100_000_000*osHasLowResClockInt
// ticksPerSecond returns a conversion rate between the cputicks clock and the nanotime clock.
//
// Note: Clocks are hard. Using this as an actual conversion rate for timestamps is ill-advised
// and should be avoided when possible. Use only for durations, where a tiny error term isn't going
// to make a meaningful difference in even a 1ms duration. If an accurate timestamp is needed,
// use nanotime instead. (The entire Windows platform is a broad exception to this rule, where nanotime
// produces timestamps on such a coarse granularity that the error from this conversion is actually
// preferable.)
//
// The strategy for computing the conversion rate is to write down nanotime and cputicks as
// early in process startup as possible. From then, we just need to wait until we get values
// from nanotime that we can use (some platforms have a really coarse system time granularity).
// We require some amount of time to pass to ensure that the conversion rate is fairly accurate
// in aggregate. But because we compute this rate lazily, there's a pretty good chance a decent
// amount of time has passed by the time we get here.
//
// Must be called from a normal goroutine context (running regular goroutine with a P).
//
// Called by runtime/pprof in addition to runtime code.
//
// TODO(mknyszek): This doesn't account for things like CPU frequency scaling. Consider
// a more sophisticated and general approach in the future.
func ticksPerSecond() int64 {
// Get the conversion rate if we've already computed it.
r := ticks.val.Load()
if r != 0 {
return r
}
// Compute the conversion rate.
for {
lock(&ticks.lock)
r = ticks.val.Load()
if r != 0 {
unlock(&ticks.lock)
return r
}
// Grab the current time in both clocks.
nowTime := nanotime()
nowTicks := cputicks()
// See if we can use these times.
if nowTicks > ticks.startTicks && nowTime-ticks.startTime > minTimeForTicksPerSecond {
// Perform the calculation with floats. We don't want to risk overflow.
r = int64(float64(nowTicks-ticks.startTicks) * 1e9 / float64(nowTime-ticks.startTime))
if r == 0 {
// Zero is both a sentinel value and it would be bad if callers used this as
// a divisor. We tried out best, so just make it 1.
r++
}
ticks.val.Store(r)
unlock(&ticks.lock)
break
}
unlock(&ticks.lock)
// Sleep in one millisecond increments until we have a reliable time.
timeSleep(1_000_000)
}
return r
}
var envs []string
var argslice []string
//go:linkname syscall_runtime_envs syscall.runtime_envs
func syscall_runtime_envs() []string { return append([]string{}, envs...) }
//go:linkname syscall_Getpagesize syscall.Getpagesize
func syscall_Getpagesize() int { return int(physPageSize) }
//go:linkname os_runtime_args os.runtime_args
func os_runtime_args() []string { return append([]string{}, argslice...) }
//go:linkname syscall_Exit syscall.Exit
//go:nosplit
func syscall_Exit(code int) {
exit(int32(code))
}
var godebugDefault string
var godebugUpdate atomic.Pointer[func(string, string)]
var godebugEnv atomic.Pointer[string] // set by parsedebugvars
var godebugNewIncNonDefault atomic.Pointer[func(string) func()]
//go:linkname godebug_setUpdate internal/godebug.setUpdate
func godebug_setUpdate(update func(string, string)) {
p := new(func(string, string))
*p = update
godebugUpdate.Store(p)
godebugNotify(false)
}
//go:linkname godebug_setNewIncNonDefault internal/godebug.setNewIncNonDefault
func godebug_setNewIncNonDefault(newIncNonDefault func(string) func()) {
p := new(func(string) func())
*p = newIncNonDefault
godebugNewIncNonDefault.Store(p)
defaultGOMAXPROCSUpdateGODEBUG()
}
// A godebugInc provides access to internal/godebug's IncNonDefault function
// for a given GODEBUG setting.
// Calls before internal/godebug registers itself are dropped on the floor.
type godebugInc struct {
name string
inc atomic.Pointer[func()]
}
func (g *godebugInc) IncNonDefault() {
inc := g.inc.Load()
if inc == nil {
newInc := godebugNewIncNonDefault.Load()
if newInc == nil {
return
}
inc = new(func())
*inc = (*newInc)(g.name)
if raceenabled {
racereleasemerge(unsafe.Pointer(&g.inc))
}
if !g.inc.CompareAndSwap(nil, inc) {
inc = g.inc.Load()
}
}
if raceenabled {
raceacquire(unsafe.Pointer(&g.inc))
}
(*inc)()
}
func godebugNotify(envChanged bool) {
update := godebugUpdate.Load()
var env string
if p := godebugEnv.Load(); p != nil {
env = *p
}
if envChanged {
reparsedebugvars(env)
}
if update != nil {
(*update)(godebugDefault, env)
}
}
//go:linkname syscall_runtimeSetenv syscall.runtimeSetenv
func syscall_runtimeSetenv(key, value string) {
setenv_c(key, value)
if key == "GODEBUG" {
p := new(string)
*p = value
godebugEnv.Store(p)
godebugNotify(true)
}
}
//go:linkname syscall_runtimeUnsetenv syscall.runtimeUnsetenv
func syscall_runtimeUnsetenv(key string) {
unsetenv_c(key)
if key == "GODEBUG" {
godebugEnv.Store(nil)
godebugNotify(true)
}
}
// writeErrStr writes a string to descriptor 2.
// If SetCrashOutput(f) was called, it also writes to f.
//
//go:nosplit
func writeErrStr(s string) {
writeErrData(unsafe.StringData(s), int32(len(s)))
}
// writeErrData is the common parts of writeErr{,Str}.
//
//go:nosplit
func writeErrData(data *byte, n int32) {
write(2, unsafe.Pointer(data), n)
// If crashing, print a copy to the SetCrashOutput fd.
gp := getg()
if gp != nil && gp.m.dying > 0 ||
gp == nil && panicking.Load() > 0 {
if fd := crashFD.Load(); fd != ^uintptr(0) {
write(fd, unsafe.Pointer(data), n)
}
}
}
// crashFD is an optional file descriptor to use for fatal panics, as
// set by debug.SetCrashOutput (see #42888). If it is a valid fd (not
// all ones), writeErr and related functions write to it in addition
// to standard error.
//
// Initialized to -1 in schedinit.
var crashFD atomic.Uintptr
//go:linkname setCrashFD
func setCrashFD(fd uintptr) uintptr {
// Don't change the crash FD if a crash is already in progress.
//
// Unlike the case below, this is not required for correctness, but it
// is generally nicer to have all of the crash output go to the same
// place rather than getting split across two different FDs.
if panicking.Load() > 0 {
return ^uintptr(0)
}
old := crashFD.Swap(fd)
// If we are panicking, don't return the old FD to runtime/debug for
// closing. writeErrData may have already read the old FD from crashFD
// before the swap and closing it would cause the write to be lost [1].
// The old FD will never be closed, but we are about to crash anyway.
//
// On the writeErrData thread, panicking.Add(1) happens-before
// crashFD.Load() [2].
//
// On this thread, swapping old FD for new in crashFD happens-before
// panicking.Load() > 0.
//
// Therefore, if panicking.Load() == 0 here (old FD will be closed), it
// is impossible for the writeErrData thread to observe
// crashFD.Load() == old FD.
//
// [1] Or, if really unlucky, another concurrent open could reuse the
// FD, sending the write into an unrelated file.
//
// [2] If gp != nil, it occurs when incrementing gp.m.dying in
// startpanic_m. If gp == nil, we read panicking.Load() > 0, so an Add
// must have happened-before.
if panicking.Load() > 0 {
return ^uintptr(0)
}
return old
}
// auxv is populated on relevant platforms but defined here for all platforms
// so x/sys/cpu and x/sys/unix can assume the getAuxv symbol exists without
// keeping its list of auxv-using GOOS build tags in sync.
//
// It contains an even number of elements, (tag, value) pairs.
var auxv []uintptr
// golang.org/x/sys/cpu and golang.org/x/sys/unix use getAuxv via linkname.
// Do not remove or change the type signature.
// See go.dev/issue/57336 and go.dev/issue/67401.
//
//go:linkname getAuxv
func getAuxv() []uintptr { return auxv }
// zeroVal is used by reflect via linkname.
//
// zeroVal should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname zeroVal
var zeroVal [abi.ZeroValSize]byte
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/bytealg"
"internal/goarch"
"internal/runtime/atomic"
"internal/strconv"
"unsafe"
)
// Keep a cached value to make gotraceback fast,
// since we call it on every call to gentraceback.
// The cached value is a uint32 in which the low bits
// are the "crash" and "all" settings and the remaining
// bits are the traceback value (0 off, 1 on, 2 include system).
const (
tracebackCrash = 1 << iota
tracebackAll
tracebackShift = iota
)
var traceback_cache uint32 = 2 << tracebackShift
var traceback_env uint32
// gotraceback returns the current traceback settings.
//
// If level is 0, suppress all tracebacks.
// If level is 1, show tracebacks, but exclude runtime frames.
// If level is 2, show tracebacks including runtime frames.
// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
// If crash is set, crash (core dump, etc) after tracebacking.
//
//go:nosplit
func gotraceback() (level int32, all, crash bool) {
gp := getg()
t := atomic.Load(&traceback_cache)
crash = t&tracebackCrash != 0
all = gp.m.throwing > throwTypeUser || t&tracebackAll != 0
if gp.m.traceback != 0 {
level = int32(gp.m.traceback)
} else if gp.m.throwing >= throwTypeRuntime {
// Always include runtime frames in runtime throws unless
// otherwise overridden by m.traceback.
level = 2
} else {
level = int32(t >> tracebackShift)
}
return
}
var (
argc int32
argv **byte
)
// nosplit for use in linux startup sysargs.
//
//go:nosplit
func argv_index(argv **byte, i int32) *byte {
return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
}
func args(c int32, v **byte) {
argc = c
argv = v
sysargs(c, v)
}
func goargs() {
if GOOS == "windows" {
return
}
argslice = make([]string, argc)
for i := int32(0); i < argc; i++ {
argslice[i] = gostringnocopy(argv_index(argv, i))
}
}
func goenvs_unix() {
// TODO(austin): ppc64 in dynamic linking mode doesn't
// guarantee env[] will immediately follow argv. Might cause
// problems.
n := int32(0)
for argv_index(argv, argc+1+n) != nil {
n++
}
envs = make([]string, n)
for i := int32(0); i < n; i++ {
envs[i] = gostring(argv_index(argv, argc+1+i))
}
}
func environ() []string {
return envs
}
// TODO: These should be locals in testAtomic64, but we don't 8-byte
// align stack variables on 386.
var test_z64, test_x64 uint64
func testAtomic64() {
test_z64 = 42
test_x64 = 0
if atomic.Cas64(&test_z64, test_x64, 1) {
throw("cas64 failed")
}
if test_x64 != 0 {
throw("cas64 failed")
}
test_x64 = 42
if !atomic.Cas64(&test_z64, test_x64, 1) {
throw("cas64 failed")
}
if test_x64 != 42 || test_z64 != 1 {
throw("cas64 failed")
}
if atomic.Load64(&test_z64) != 1 {
throw("load64 failed")
}
atomic.Store64(&test_z64, (1<<40)+1)
if atomic.Load64(&test_z64) != (1<<40)+1 {
throw("store64 failed")
}
if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
throw("xadd64 failed")
}
if atomic.Load64(&test_z64) != (2<<40)+2 {
throw("xadd64 failed")
}
if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
throw("xchg64 failed")
}
if atomic.Load64(&test_z64) != (3<<40)+3 {
throw("xchg64 failed")
}
}
func check() {
var (
a int8
b uint8
c int16
d uint16
e int32
f uint32
g int64
h uint64
i, i1 float32
j, j1 float64
k unsafe.Pointer
l *uint16
m [4]byte
)
type x1t struct {
x uint8
}
type y1t struct {
x1 x1t
y uint8
}
var x1 x1t
var y1 y1t
if unsafe.Sizeof(a) != 1 {
throw("bad a")
}
if unsafe.Sizeof(b) != 1 {
throw("bad b")
}
if unsafe.Sizeof(c) != 2 {
throw("bad c")
}
if unsafe.Sizeof(d) != 2 {
throw("bad d")
}
if unsafe.Sizeof(e) != 4 {
throw("bad e")
}
if unsafe.Sizeof(f) != 4 {
throw("bad f")
}
if unsafe.Sizeof(g) != 8 {
throw("bad g")
}
if unsafe.Sizeof(h) != 8 {
throw("bad h")
}
if unsafe.Sizeof(i) != 4 {
throw("bad i")
}
if unsafe.Sizeof(j) != 8 {
throw("bad j")
}
if unsafe.Sizeof(k) != goarch.PtrSize {
throw("bad k")
}
if unsafe.Sizeof(l) != goarch.PtrSize {
throw("bad l")
}
if unsafe.Sizeof(x1) != 1 {
throw("bad unsafe.Sizeof x1")
}
if unsafe.Offsetof(y1.y) != 1 {
throw("bad offsetof y1.y")
}
if unsafe.Sizeof(y1) != 2 {
throw("bad unsafe.Sizeof y1")
}
var z uint32
z = 1
if !atomic.Cas(&z, 1, 2) {
throw("cas1")
}
if z != 2 {
throw("cas2")
}
z = 4
if atomic.Cas(&z, 5, 6) {
throw("cas3")
}
if z != 4 {
throw("cas4")
}
z = 0xffffffff
if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
throw("cas5")
}
if z != 0xfffffffe {
throw("cas6")
}
m = [4]byte{1, 1, 1, 1}
atomic.Or8(&m[1], 0xf0)
if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
throw("atomicor8")
}
m = [4]byte{0xff, 0xff, 0xff, 0xff}
atomic.And8(&m[1], 0x1)
if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
throw("atomicand8")
}
*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
if j == j {
throw("float64nan")
}
if !(j != j) {
throw("float64nan1")
}
*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
if j == j1 {
throw("float64nan2")
}
if !(j != j1) {
throw("float64nan3")
}
*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
if i == i {
throw("float32nan")
}
if i == i {
throw("float32nan1")
}
*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
if i == i1 {
throw("float32nan2")
}
if i == i1 {
throw("float32nan3")
}
testAtomic64()
if fixedStack != round2(fixedStack) {
throw("FixedStack is not power-of-2")
}
if !checkASM() {
throw("assembly checks failed")
}
}
type dbgVar struct {
name string
value *int32 // for variables that can only be set at startup
atomic *atomic.Int32 // for variables that can be changed during execution
def int32 // default value (ideally zero)
}
// Holds variables parsed from GODEBUG env var,
// except for "memprofilerate" since there is an
// existing int var for that value, which may
// already have an initial value.
var debug struct {
cgocheck int32
clobberfree int32
containermaxprocs int32
decoratemappings int32
disablethp int32
dontfreezetheworld int32
efence int32
gccheckmark int32
gcpacertrace int32
gcshrinkstackoff int32
gcstoptheworld int32
gctrace int32
invalidptr int32
madvdontneed int32 // for Linux; issue 28466
scavtrace int32
scheddetail int32
schedtrace int32
tracebackancestors int32
updatemaxprocs int32
asyncpreemptoff int32
harddecommit int32
adaptivestackstart int32
tracefpunwindoff int32
traceadvanceperiod int32
traceCheckStackOwnership int32
profstackdepth int32
dataindependenttiming int32
// debug.malloc is used as a combined debug check
// in the malloc function and should be set
// if any of the below debug options is != 0.
malloc bool
inittrace int32
sbrk int32
checkfinalizers int32
// traceallocfree controls whether execution traces contain
// detailed trace data about memory allocation. This value
// affects debug.malloc only if it is != 0 and the execution
// tracer is enabled, in which case debug.malloc will be
// set to "true" if it isn't already while tracing is enabled.
// It will be set while the world is stopped, so it's safe.
// The value of traceallocfree can be changed any time in response
// to os.Setenv("GODEBUG").
traceallocfree atomic.Int32
panicnil atomic.Int32
// asynctimerchan controls whether timer channels
// behave asynchronously (as in Go 1.22 and earlier)
// instead of their Go 1.23+ synchronous behavior.
// The value can change at any time (in response to os.Setenv("GODEBUG"))
// and affects all extant timer channels immediately.
// Programs wouldn't normally change over an execution,
// but allowing it is convenient for testing and for programs
// that do an os.Setenv in main.init or main.main.
asynctimerchan atomic.Int32
// tracebacklabels controls the inclusion of goroutine labels in the
// goroutine status header line.
tracebacklabels atomic.Int32
}
var dbgvars = []*dbgVar{
{name: "adaptivestackstart", value: &debug.adaptivestackstart},
{name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
{name: "asynctimerchan", atomic: &debug.asynctimerchan},
{name: "cgocheck", value: &debug.cgocheck},
{name: "clobberfree", value: &debug.clobberfree},
{name: "containermaxprocs", value: &debug.containermaxprocs, def: 1},
{name: "dataindependenttiming", value: &debug.dataindependenttiming},
{name: "decoratemappings", value: &debug.decoratemappings, def: 1},
{name: "disablethp", value: &debug.disablethp},
{name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
{name: "checkfinalizers", value: &debug.checkfinalizers},
{name: "efence", value: &debug.efence},
{name: "gccheckmark", value: &debug.gccheckmark},
{name: "gcpacertrace", value: &debug.gcpacertrace},
{name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
{name: "gcstoptheworld", value: &debug.gcstoptheworld},
{name: "gctrace", value: &debug.gctrace},
{name: "harddecommit", value: &debug.harddecommit},
{name: "inittrace", value: &debug.inittrace},
{name: "invalidptr", value: &debug.invalidptr},
{name: "madvdontneed", value: &debug.madvdontneed},
{name: "panicnil", atomic: &debug.panicnil},
{name: "profstackdepth", value: &debug.profstackdepth, def: 128},
{name: "sbrk", value: &debug.sbrk},
{name: "scavtrace", value: &debug.scavtrace},
{name: "scheddetail", value: &debug.scheddetail},
{name: "schedtrace", value: &debug.schedtrace},
{name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
{name: "traceallocfree", atomic: &debug.traceallocfree},
{name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership},
{name: "tracebackancestors", value: &debug.tracebackancestors},
{name: "tracebacklabels", atomic: &debug.tracebacklabels, def: 1},
{name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
{name: "updatemaxprocs", value: &debug.updatemaxprocs, def: 1},
}
func parseRuntimeDebugVars(godebug string) {
// defaults
debug.cgocheck = 1
debug.invalidptr = 1
debug.adaptivestackstart = 1 // set this to 0 to turn larger initial goroutine stacks off
if GOOS == "linux" {
// On Linux, MADV_FREE is faster than MADV_DONTNEED,
// but doesn't affect many of the statistics that
// MADV_DONTNEED does until the memory is actually
// reclaimed. This generally leads to poor user
// experience, like confusing stats in top and other
// monitoring tools; and bad integration with
// management systems that respond to memory usage.
// Hence, default to MADV_DONTNEED.
debug.madvdontneed = 1
}
debug.traceadvanceperiod = defaultTraceAdvancePeriod
// apply runtime defaults, if any
for _, v := range dbgvars {
if v.def != 0 {
// Every var should have either v.value or v.atomic set.
if v.value != nil {
*v.value = v.def
} else if v.atomic != nil {
v.atomic.Store(v.def)
}
}
}
// apply compile-time GODEBUG settings
parsegodebug(godebugDefault, nil)
// apply environment settings
parsegodebug(godebug, nil)
debug.malloc = (debug.inittrace | debug.sbrk | debug.checkfinalizers) != 0
debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth)
// Disable async preemption in checkmark mode. The following situation is
// problematic with checkmark mode:
//
// - The GC doesn't mark object A because it is truly dead.
// - The GC stops the world, asynchronously preempting G1 which has a reference
// to A in its top stack frame
// - During the stop the world, we run the second checkmark GC. It marks the roots
// and discovers A through G1.
// - Checkmark mode reports a failure since there's a discrepancy in mark metadata.
//
// We could disable just conservative scanning during the checkmark scan, which is
// safe but makes checkmark slightly less powerful, but that's a lot more invasive
// than just disabling async preemption altogether.
if debug.gccheckmark > 0 {
debug.asyncpreemptoff = 1
}
}
func finishDebugVarsSetup() {
p := new(string)
*p = gogetenv("GODEBUG")
godebugEnv.Store(p)
setTraceback(gogetenv("GOTRACEBACK"))
traceback_env = traceback_cache
}
// reparsedebugvars reparses the runtime's debug variables
// because the environment variable has been changed to env.
func reparsedebugvars(env string) {
seen := make(map[string]bool)
// apply environment settings
parsegodebug(env, seen)
// apply compile-time GODEBUG settings for as-yet-unseen variables
parsegodebug(godebugDefault, seen)
// apply defaults for as-yet-unseen variables
for _, v := range dbgvars {
if v.atomic != nil && !seen[v.name] {
v.atomic.Store(0)
}
}
}
// parsegodebug parses the godebug string, updating variables listed in dbgvars.
// If seen == nil, this is startup time and we process the string left to right
// overwriting older settings with newer ones.
// If seen != nil, $GODEBUG has changed and we are doing an
// incremental update. To avoid flapping in the case where a value is
// set multiple times (perhaps in the default and the environment,
// or perhaps twice in the environment), we process the string right-to-left
// and only change values not already seen. After doing this for both
// the environment and the default settings, the caller must also call
// cleargodebug(seen) to reset any now-unset values back to their defaults.
func parsegodebug(godebug string, seen map[string]bool) {
for p := godebug; p != ""; {
var field string
if seen == nil {
// startup: process left to right, overwriting older settings with newer
i := bytealg.IndexByteString(p, ',')
if i < 0 {
field, p = p, ""
} else {
field, p = p[:i], p[i+1:]
}
} else {
// incremental update: process right to left, updating and skipping seen
i := len(p) - 1
for i >= 0 && p[i] != ',' {
i--
}
if i < 0 {
p, field = "", p
} else {
p, field = p[:i], p[i+1:]
}
}
i := bytealg.IndexByteString(field, '=')
if i < 0 {
continue
}
key, value := field[:i], field[i+1:]
if seen[key] {
continue
}
if seen != nil {
seen[key] = true
}
// Update MemProfileRate directly here since it
// is int, not int32, and should only be updated
// if specified in GODEBUG.
if seen == nil && key == "memprofilerate" {
if n, err := strconv.Atoi(value); err == nil {
MemProfileRate = n
}
} else {
for _, v := range dbgvars {
if v.name == key {
if n, err := strconv.ParseInt(value, 10, 32); err == nil {
if seen == nil && v.value != nil {
*v.value = int32(n)
} else if v.atomic != nil {
v.atomic.Store(int32(n))
}
}
}
}
}
}
if debug.cgocheck > 1 {
throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
}
}
//go:linkname setTraceback runtime/debug.SetTraceback
func setTraceback(level string) {
var t uint32
switch level {
case "none":
t = 0
case "single", "":
t = 1 << tracebackShift
case "all":
t = 1<<tracebackShift | tracebackAll
case "system":
t = 2<<tracebackShift | tracebackAll
case "crash":
t = 2<<tracebackShift | tracebackAll | tracebackCrash
case "wer":
if GOOS == "windows" {
t = 2<<tracebackShift | tracebackAll | tracebackCrash
enableWER()
break
}
fallthrough
default:
t = tracebackAll
if n, err := strconv.Atoi(level); err == nil && n == int(uint32(n)) {
t |= uint32(n) << tracebackShift
}
}
// when C owns the process, simply exit'ing the process on fatal errors
// and panics is surprising. Be louder and abort instead.
if islibrary || isarchive {
t |= tracebackCrash
}
t |= traceback_env
atomic.Store(&traceback_cache, t)
}
// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
//go:nosplit
func acquirem() *m {
gp := getg()
gp.m.locks++
return gp.m
}
//go:nosplit
func releasem(mp *m) {
gp := getg()
mp.locks--
if mp.locks == 0 && gp.preempt {
// restore the preemption request in case we've cleared it in newstack
gp.stackguard0 = stackPreempt
}
}
// reflect_typelinks is meant for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
// - github.com/goccy/json
// - github.com/modern-go/reflect2
// - github.com/vmware/govmomi
// - github.com/pinpoint-apm/pinpoint-go-agent
// - github.com/timandy/routine
// - github.com/v2pro/plz
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
// This is obsolete and only remains for external packages.
// New code should use reflect_compiledTypelinks.
//
//go:linkname reflect_typelinks reflect.typelinks
func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
modules := activeModules()
typesToOffsets := func(md *moduledata) []int32 {
types := moduleTypelinks(md)
ret := make([]int32, 0, len(types))
for _, typ := range types {
ret = append(ret, int32(uintptr(unsafe.Pointer(typ))-md.types))
}
return ret
}
sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
ret := [][]int32{typesToOffsets(modules[0])}
for _, md := range modules[1:] {
sections = append(sections, unsafe.Pointer(md.types))
ret = append(ret, typesToOffsets(md))
}
return sections, ret
}
// reflect_compiledTypelinks returns the typelink types
// generated by the compiler for all current modules.
// The normal case is a single module, so this returns one
// slice for the main module, and a slice of slices, normally nil,
// for other modules.
//
//go:linkname reflect_compiledTypelinks reflect.compiledTypelinks
func reflect_compiledTypelinks() ([]*abi.Type, [][]*abi.Type) {
modules := activeModules()
firstTypes := moduleTypelinks(modules[0])
var rest [][]*abi.Type
for _, md := range modules[1:] {
rest = append(rest, moduleTypelinks(md))
}
return firstTypes, rest
}
// reflect_resolveNameOff resolves a name offset from a base pointer.
//
// reflect_resolveNameOff is for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/agiledragon/gomonkey/v2
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_resolveNameOff reflect.resolveNameOff
func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
}
// reflect_resolveTypeOff resolves an *rtype offset from a base type.
//
// reflect_resolveTypeOff is meant for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
// - github.com/modern-go/reflect2
// - github.com/v2pro/plz
// - github.com/timandy/routine
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
}
// reflect_resolveTextOff resolves a function pointer offset from a base type.
//
// reflect_resolveTextOff is for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/agiledragon/gomonkey/v2
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_resolveTextOff reflect.resolveTextOff
func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
return toRType((*_type)(rtype)).textOff(textOff(off))
}
// reflectlite_resolveNameOff resolves a name offset from a base pointer.
//
//go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
}
// reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
//
//go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
}
// reflect_addReflectOff adds a pointer to the reflection offset lookup map.
//
//go:linkname reflect_addReflectOff reflect.addReflectOff
func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
reflectOffsLock()
if reflectOffs.m == nil {
reflectOffs.m = make(map[int32]unsafe.Pointer)
reflectOffs.minv = make(map[unsafe.Pointer]int32)
reflectOffs.next = -1
}
id, found := reflectOffs.minv[ptr]
if !found {
id = reflectOffs.next
reflectOffs.next-- // use negative offsets as IDs to aid debugging
reflectOffs.m[id] = ptr
reflectOffs.minv[ptr] = id
}
reflectOffsUnlock()
return id
}
// reflect_adjustAIXGCDataForRuntime takes a type.GCData address and returns
// the new address to use. This is only called on AIX.
// See getGCMaskOnDemand.
//
//go:linkname reflect_adjustAIXGCDataForRuntime reflect.adjustAIXGCDataForRuntime
func reflect_adjustAIXGCDataForRuntime(addr *byte) *byte {
return (*byte)(add(unsafe.Pointer(addr), aixStaticDataBase-firstmoduledata.data))
}
//go:linkname fips_getIndicator crypto/internal/fips140.getIndicator
func fips_getIndicator() uint8 {
return getg().fipsIndicator
}
//go:linkname fips_setIndicator crypto/internal/fips140.setIndicator
func fips_setIndicator(indicator uint8) {
getg().fipsIndicator = indicator
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/chacha8rand"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
// defined constants
const (
// G status
//
// Beyond indicating the general state of a G, the G status
// acts like a lock on the goroutine's stack (and hence its
// ability to execute user code).
//
// If you add to this list, add to the list
// of "okay during garbage collection" status
// in mgcmark.go too.
//
// TODO(austin): The _Gscan bit could be much lighter-weight.
// For example, we could choose not to run _Gscanrunnable
// goroutines found in the run queue, rather than CAS-looping
// until they become _Grunnable. And transitions like
// _Gscanwaiting -> _Gscanrunnable are actually okay because
// they don't affect stack ownership.
// _Gidle means this goroutine was just allocated and has not
// yet been initialized.
_Gidle = iota // 0
// _Grunnable means this goroutine is on a run queue. It is
// not currently executing user code. The stack is not owned.
_Grunnable // 1
// _Grunning means this goroutine may execute user code. The
// stack is owned by this goroutine. It is not on a run queue.
// It is assigned an M (g.m is valid) and it usually has a P
// (g.m.p is valid), but there are small windows of time where
// it might not, namely upon entering and exiting _Gsyscall.
_Grunning // 2
// _Gsyscall means this goroutine is executing a system call.
// It is not executing user code. The stack is owned by this
// goroutine. It is not on a run queue. It is assigned an M.
// It may have a P attached, but it does not own it. Code
// executing in this state must not touch g.m.p.
_Gsyscall // 3
// _Gwaiting means this goroutine is blocked in the runtime.
// It is not executing user code. It is not on a run queue,
// but should be recorded somewhere (e.g., a channel wait
// queue) so it can be ready()d when necessary. The stack is
// not owned *except* that a channel operation may read or
// write parts of the stack under the appropriate channel
// lock. Otherwise, it is not safe to access the stack after a
// goroutine enters _Gwaiting (e.g., it may get moved).
_Gwaiting // 4
// _Gmoribund_unused is currently unused, but hardcoded in gdb
// scripts.
_Gmoribund_unused // 5
// _Gdead means this goroutine is currently unused. It may be
// just exited, on a free list, or just being initialized. It
// is not executing user code. It may or may not have a stack
// allocated. The G and its stack (if any) are owned by the M
// that is exiting the G or that obtained the G from the free
// list.
_Gdead // 6
// _Genqueue_unused is currently unused.
_Genqueue_unused // 7
// _Gcopystack means this goroutine's stack is being moved. It
// is not executing user code and is not on a run queue. The
// stack is owned by the goroutine that put it in _Gcopystack.
_Gcopystack // 8
// _Gpreempted means this goroutine stopped itself for a
// suspendG preemption. It is like _Gwaiting, but nothing is
// yet responsible for ready()ing it. Some suspendG must CAS
// the status to _Gwaiting to take responsibility for
// ready()ing this G.
_Gpreempted // 9
// _Gleaked represents a leaked goroutine caught by the GC.
_Gleaked // 10
// _Gdeadextra is a _Gdead goroutine that's attached to an extra M
// used for cgo callbacks.
_Gdeadextra // 11
// _Gscan combined with one of the above states other than
// _Grunning indicates that GC is scanning the stack. The
// goroutine is not executing user code and the stack is owned
// by the goroutine that set the _Gscan bit.
//
// _Gscanrunning is different: it is used to briefly block
// state transitions while GC signals the G to scan its own
// stack. This is otherwise like _Grunning.
//
// atomicstatus&~Gscan gives the state the goroutine will
// return to when the scan completes.
_Gscan = 0x1000
_Gscanrunnable = _Gscan + _Grunnable // 0x1001
_Gscanrunning = _Gscan + _Grunning // 0x1002
_Gscansyscall = _Gscan + _Gsyscall // 0x1003
_Gscanwaiting = _Gscan + _Gwaiting // 0x1004
_Gscanpreempted = _Gscan + _Gpreempted // 0x1009
_Gscanleaked = _Gscan + _Gleaked // 0x100a
_Gscandeadextra = _Gscan + _Gdeadextra // 0x100b
)
const (
// P status
// _Pidle means a P is not being used to run user code or the
// scheduler. Typically, it's on the idle P list and available
// to the scheduler, but it may just be transitioning between
// other states.
//
// The P is owned by the idle list or by whatever is
// transitioning its state. Its run queue is empty.
_Pidle = iota
// _Prunning means a P is owned by an M and is being used to
// run user code or the scheduler. Only the M that owns this P
// is allowed to change the P's status from _Prunning. The M
// may transition the P to _Pidle (if it has no more work to
// do), or _Pgcstop (to halt for the GC). The M may also hand
// ownership of the P off directly to another M (for example,
// to schedule a locked G).
_Prunning
// _Psyscall_unused is a now-defunct state for a P. A P is
// identified as "in a system call" by looking at the goroutine's
// state.
_Psyscall_unused
// _Pgcstop means a P is halted for STW and owned by the M
// that stopped the world. The M that stopped the world
// continues to use its P, even in _Pgcstop. Transitioning
// from _Prunning to _Pgcstop causes an M to release its P and
// park.
//
// The P retains its run queue and startTheWorld will restart
// the scheduler on Ps with non-empty run queues.
_Pgcstop
// _Pdead means a P is no longer used (GOMAXPROCS shrank). We
// reuse Ps if GOMAXPROCS increases. A dead P is mostly
// stripped of its resources, though a few things remain
// (e.g., trace buffers).
_Pdead
)
// Mutual exclusion locks. In the uncontended case,
// as fast as spin locks (just a few user-level instructions),
// but on the contention path they sleep in the kernel.
// A zeroed Mutex is unlocked (no need to initialize each lock).
// Initialization is helpful for static lock ranking, but not required.
type mutex struct {
// Empty struct if lock ranking is disabled, otherwise includes the lock rank
lockRankStruct
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
// Used to be a union, but unions break precise GC.
key uintptr
}
type funcval struct {
fn uintptr
// variable-size, fn-specific data here
}
type iface struct {
tab *itab
data unsafe.Pointer
}
type eface struct {
_type *_type
data unsafe.Pointer
}
func efaceOf(ep *any) *eface {
return (*eface)(unsafe.Pointer(ep))
}
// The guintptr, muintptr, and puintptr are all used to bypass write barriers.
// It is particularly important to avoid write barriers when the current P has
// been released, because the GC thinks the world is stopped, and an
// unexpected write barrier would not be synchronized with the GC,
// which can lead to a half-executed write barrier that has marked the object
// but not queued it. If the GC skips the object and completes before the
// queuing can occur, it will incorrectly free the object.
//
// We tried using special assignment functions invoked only when not
// holding a running P, but then some updates to a particular memory
// word went through write barriers and some did not. This breaks the
// write barrier shadow checking mode, and it is also scary: better to have
// a word that is completely ignored by the GC than to have one for which
// only a few updates are ignored.
//
// Gs and Ps are always reachable via true pointers in the
// allgs and allp lists or (during allocation before they reach those lists)
// from stack variables.
//
// Ms are always reachable via true pointers either from allm or
// freem. Unlike Gs and Ps we do free Ms, so it's important that
// nothing ever hold an muintptr across a safe point.
// A guintptr holds a goroutine pointer, but typed as a uintptr
// to bypass write barriers. It is used in the Gobuf goroutine state
// and in scheduling lists that are manipulated without a P.
//
// The Gobuf.g goroutine pointer is almost always updated by assembly code.
// In one of the few places it is updated by Go code - func save - it must be
// treated as a uintptr to avoid a write barrier being emitted at a bad time.
// Instead of figuring out how to emit the write barriers missing in the
// assembly manipulation, we change the type of the field to uintptr,
// so that it does not require write barriers at all.
//
// Goroutine structs are published in the allg list and never freed.
// That will keep the goroutine structs from being collected.
// There is never a time that Gobuf.g's contain the only references
// to a goroutine: the publishing of the goroutine in allg comes first.
// Goroutine pointers are also kept in non-GC-visible places like TLS,
// so I can't see them ever moving. If we did want to start moving data
// in the GC, we'd need to allocate the goroutine structs from an
// alternate arena. Using guintptr doesn't make that problem any worse.
// Note that pollDesc.rg, pollDesc.wg also store g in uintptr form,
// so they would need to be updated too if g's start moving.
type guintptr uintptr
//go:nosplit
func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
//go:nosplit
func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
//go:nosplit
func (gp *guintptr) cas(old, new guintptr) bool {
return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
}
//go:nosplit
func (gp *g) guintptr() guintptr {
return guintptr(unsafe.Pointer(gp))
}
// setGNoWB performs *gp = new without a write barrier.
// For times when it's impractical to use a guintptr.
//
//go:nosplit
//go:nowritebarrier
func setGNoWB(gp **g, new *g) {
(*guintptr)(unsafe.Pointer(gp)).set(new)
}
type puintptr uintptr
//go:nosplit
func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
//go:nosplit
func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
// muintptr is a *m that is not tracked by the garbage collector.
//
// Because we do free Ms, there are some additional constrains on
// muintptrs:
//
// 1. Never hold an muintptr locally across a safe point.
//
// 2. Any muintptr in the heap must be owned by the M itself so it can
// ensure it is not in use when the last true *m is released.
type muintptr uintptr
//go:nosplit
func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
//go:nosplit
func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
// setMNoWB performs *mp = new without a write barrier.
// For times when it's impractical to use an muintptr.
//
//go:nosplit
//go:nowritebarrier
func setMNoWB(mp **m, new *m) {
(*muintptr)(unsafe.Pointer(mp)).set(new)
}
type gobuf struct {
// ctxt is unusual with respect to GC: it may be a
// heap-allocated funcval, so GC needs to track it, but it
// needs to be set and cleared from assembly, where it's
// difficult to have write barriers. However, ctxt is really a
// saved, live register, and we only ever exchange it between
// the real register and the gobuf. Hence, we treat it as a
// root during stack scanning, which means assembly that saves
// and restores it doesn't need write barriers. It's still
// typed as a pointer so that any other writes from Go get
// write barriers.
sp uintptr
pc uintptr
g guintptr
ctxt unsafe.Pointer
lr uintptr
bp uintptr // for framepointer-enabled architectures
}
// maybeTraceablePtr is a special pointer that is conditionally trackable
// by the GC. It consists of an address as a uintptr (vu) and a pointer
// to a data element (vp).
//
// maybeTraceablePtr values can be in one of three states:
// 1. Unset: vu == 0 && vp == nil
// 2. Untracked: vu != 0 && vp == nil
// 3. Tracked: vu != 0 && vp != nil
//
// Do not set fields manually. Use methods instead.
// Extend this type with additional methods if needed.
type maybeTraceablePtr struct {
vp unsafe.Pointer // For liveness only.
vu uintptr // Source of truth.
}
// untrack unsets the pointer but preserves the address.
// This is used to hide the pointer from the GC.
//
//go:nosplit
func (p *maybeTraceablePtr) setUntraceable() {
p.vp = nil
}
// setTraceable resets the pointer to the stored address.
// This is used to make the pointer visible to the GC.
//
//go:nosplit
func (p *maybeTraceablePtr) setTraceable() {
p.vp = unsafe.Pointer(p.vu)
}
// set sets the pointer to the data element and updates the address.
//
//go:nosplit
func (p *maybeTraceablePtr) set(v unsafe.Pointer) {
p.vp = v
p.vu = uintptr(v)
}
// get retrieves the pointer to the data element.
//
//go:nosplit
func (p *maybeTraceablePtr) get() unsafe.Pointer {
return unsafe.Pointer(p.vu)
}
// uintptr returns the uintptr address of the pointer.
//
//go:nosplit
func (p *maybeTraceablePtr) uintptr() uintptr {
return p.vu
}
// maybeTraceableChan extends conditionally trackable pointers (maybeTraceablePtr)
// to track hchan pointers.
//
// Do not set fields manually. Use methods instead.
type maybeTraceableChan struct {
maybeTraceablePtr
}
//go:nosplit
func (p *maybeTraceableChan) set(c *hchan) {
p.maybeTraceablePtr.set(unsafe.Pointer(c))
}
//go:nosplit
func (p *maybeTraceableChan) get() *hchan {
return (*hchan)(p.maybeTraceablePtr.get())
}
// sudog (pseudo-g) represents a g in a wait list, such as for sending/receiving
// on a channel.
//
// sudog is necessary because the g ↔ synchronization object relation
// is many-to-many. A g can be on many wait lists, so there may be
// many sudogs for one g; and many gs may be waiting on the same
// synchronization object, so there may be many sudogs for one object.
//
// sudogs are allocated from a special pool. Use acquireSudog and
// releaseSudog to allocate and free them.
type sudog struct {
// The following fields are protected by the hchan.lock of the
// channel this sudog is blocking on. shrinkstack depends on
// this for sudogs involved in channel ops.
g *g
next *sudog
prev *sudog
elem maybeTraceablePtr // data element (may point to stack)
// The following fields are never accessed concurrently.
// For channels, waitlink is only accessed by g.
// For semaphores, all fields (including the ones above)
// are only accessed when holding a semaRoot lock.
acquiretime int64
releasetime int64
ticket uint32
// isSelect indicates g is participating in a select, so
// g.selectDone must be CAS'd to win the wake-up race.
isSelect bool
// success indicates whether communication over channel c
// succeeded. It is true if the goroutine was awoken because a
// value was delivered over channel c, and false if awoken
// because c was closed.
success bool
// waiters is a count of semaRoot waiting list other than head of list,
// clamped to a uint16 to fit in unused space.
// Only meaningful at the head of the list.
// (If we wanted to be overly clever, we could store a high 16 bits
// in the second entry in the list.)
waiters uint16
parent *sudog // semaRoot binary tree
waitlink *sudog // g.waiting list or semaRoot
waittail *sudog // semaRoot
c maybeTraceableChan // channel
}
type libcall struct {
fn uintptr
n uintptr // number of parameters
args uintptr // parameters
r1 uintptr // return values
r2 uintptr
err uintptr // error number
}
// Stack describes a Go execution stack.
// The bounds of the stack are exactly [lo, hi),
// with no implicit data structures on either side.
type stack struct {
lo uintptr
hi uintptr
}
// heldLockInfo gives info on a held lock and the rank of that lock
type heldLockInfo struct {
lockAddr uintptr
rank lockRank
}
type g struct {
// Stack parameters.
// stack describes the actual stack memory: [stack.lo, stack.hi).
// stackguard0 is the stack pointer compared in the Go stack growth prologue.
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
// stackguard1 is the stack pointer compared in the //go:systemstack stack growth prologue.
// It is stack.lo+StackGuard on g0 and gsignal stacks.
// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
stack stack // offset known to runtime/cgo
stackguard0 uintptr // offset known to cmd/internal/obj/*
stackguard1 uintptr // offset known to cmd/internal/obj/*
_panic *_panic // innermost panic
_defer *_defer // innermost defer
m *m // current m
sched gobuf
syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
syscallbp uintptr // if status==Gsyscall, syscallbp = sched.bp to use in fpTraceback
stktopsp uintptr // expected sp at top of stack, to check in traceback
// param is a generic pointer parameter field used to pass
// values in particular contexts where other storage for the
// parameter would be difficult to find. It is currently used
// in four ways:
// 1. When a channel operation wakes up a blocked goroutine, it sets param to
// point to the sudog of the completed blocking operation.
// 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed
// the GC cycle. It is unsafe to do so in any other way, because the goroutine's
// stack may have moved in the meantime.
// 3. By debugCallWrap to pass parameters to a new goroutine because allocating a
// closure in the runtime is forbidden.
// 4. When a panic is recovered and control returns to the respective frame,
// param may point to a savedOpenDeferState.
param unsafe.Pointer
atomicstatus atomic.Uint32
stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
goid uint64
schedlink guintptr
waitsince int64 // approx time when the g become blocked
waitreason waitReason // if status==Gwaiting
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule
preemptShrink bool // shrink stack at synchronous safe point
// asyncSafePoint is set if g is stopped at an asynchronous
// safe point. This means there are frames on the stack
// without precise pointer information.
asyncSafePoint bool
paniconfault bool // panic (instead of crash) on unexpected fault address
gcscandone bool // g has scanned stack; protected by _Gscan bit in status
throwsplit bool // must not split stack
// activeStackChans indicates that there are unlocked channels
// pointing into this goroutine's stack. If true, stack
// copying needs to acquire channel locks to protect these
// areas of the stack.
activeStackChans bool
// parkingOnChan indicates that the goroutine is about to
// park on a chansend or chanrecv. Used to signal an unsafe point
// for stack shrinking.
parkingOnChan atomic.Bool
// inMarkAssist indicates whether the goroutine is in mark assist.
// Used by the execution tracer.
inMarkAssist bool
coroexit bool // argument to coroswitch_m
raceignore int8 // ignore race detection events
nocgocallback bool // whether disable callback from C
tracking bool // whether we're tracking this G for sched latency statistics
trackingSeq uint8 // used to decide whether to track this G
trackingStamp int64 // timestamp of when the G last started being tracked
runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking
lockedm muintptr
fipsIndicator uint8
fipsOnlyBypass bool
ditWanted bool // set if g wants to be executed with DIT enabled
syncSafePoint bool // set if g is stopped at a synchronous safe point.
runningCleanups atomic.Bool
sig uint32
secret int32 // current nesting of runtime/secret.Do calls.
writebuf []byte
sigcode0 uintptr
sigcode1 uintptr
sigpc uintptr
parentGoid uint64 // goid of goroutine that created this goroutine
gopc uintptr // pc of go statement that created this goroutine
ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
startpc uintptr // pc of goroutine function
racectx uintptr
waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
cgoCtxt []uintptr // cgo traceback context
labels unsafe.Pointer // profiler labels
timer *timer // cached timer for time.Sleep
sleepWhen int64 // when to sleep until
selectDone atomic.Uint32 // are we participating in a select and did someone win the race?
// goroutineProfiled indicates the status of this goroutine's stack for the
// current in-progress goroutine profile
goroutineProfiled goroutineProfileStateHolder
coroarg *coro // argument during coroutine transfers
bubble *synctestBubble
// xRegs stores the extended register state if this G has been
// asynchronously preempted.
xRegs xRegPerG
// Per-G tracer state.
trace gTraceState
// Per-G GC state
// gcAssistBytes is this G's GC assist credit in terms of
// bytes allocated. If this is positive, then the G has credit
// to allocate gcAssistBytes bytes without assisting. If this
// is negative, then the G must correct this by performing
// scan work. We track this in bytes to make it fast to update
// and check for debt in the malloc hot path. The assist ratio
// determines how this corresponds to scan work debt.
gcAssistBytes int64
// valgrindStackID is used to track what memory is used for stacks when a program is
// built with the "valgrind" build tag, otherwise it is unused.
valgrindStackID uintptr
}
// gTrackingPeriod is the number of transitions out of _Grunning between
// latency tracking runs.
const gTrackingPeriod = 8
const (
// tlsSlots is the number of pointer-sized slots reserved for TLS on some platforms,
// like Windows.
tlsSlots = 6
tlsSize = tlsSlots * goarch.PtrSize
)
// Values for m.freeWait.
const (
freeMStack = 0 // M done, free stack and reference.
freeMRef = 1 // M done, free reference.
freeMWait = 2 // M still in use.
)
type m struct {
g0 *g // goroutine with scheduling stack
morebuf gobuf // gobuf arg to morestack
divmod uint32 // div/mod denominator for arm - known to liblink (cmd/internal/obj/arm/obj5.go)
// Fields whose offsets are not known to debuggers.
procid uint64 // for debuggers, but offset not hard-coded
gsignal *g // signal-handling g
goSigStack gsignalStack // Go-allocated signal handling stack
sigmask sigset // storage for saved signal mask
tls [tlsSlots]uintptr // thread-local storage (for x86 extern register)
mstartfn func()
curg *g // current running goroutine
caughtsig guintptr // goroutine running during fatal signal
// Indicates whether we've received a signal while
// running in secret mode.
signalSecret bool
// p is the currently attached P for executing Go code, nil if not executing user Go code.
//
// A non-nil p implies exclusive ownership of the P, unless curg is in _Gsyscall.
// In _Gsyscall the scheduler may mutate this instead. The point of synchronization
// is the _Gscan bit on curg's status. The scheduler must arrange to prevent curg
// from transitioning out of _Gsyscall if it intends to mutate p.
p puintptr
nextp puintptr // The next P to install before executing. Implies exclusive ownership of this P.
oldp puintptr // The P that was attached before executing a syscall.
id int64
mallocing int32
throwing throwType
preemptoff string // if != "", keep curg running on this m
locks int32
dying int32
profilehz int32
spinning bool // m is out of work and is actively looking for work
blocked bool // m is blocked on a note
newSigstack bool // minit on C thread called sigaltstack
printlock int8
incgo bool // m is executing a cgo call
isextra bool // m is an extra m
isExtraInC bool // m is an extra m that does not have any Go frames
isExtraInSig bool // m is an extra m in a signal handler
freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait)
needextram bool
g0StackAccurate bool // whether the g0 stack has accurate bounds
traceback uint8
allpSnapshot []*p // Snapshot of allp for use after dropping P in findRunnable, nil otherwise.
ncgocall uint64 // number of cgo calls in total
ncgo int32 // number of cgo calls currently in progress
cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily
cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
park note
alllink *m // on allm
schedlink muintptr
idleNode listNodeManual
lockedg guintptr
createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it.
lockedExt uint32 // tracking for external LockOSThread
lockedInt uint32 // tracking for internal lockOSThread
mWaitList mWaitList // list of runtime lock waiters
ditEnabled bool // set if DIT is currently enabled on this M
mLockProfile mLockProfile // fields relating to runtime.lock contention
profStack []uintptr // used for memory/block/mutex stack traces
// wait* are used to carry arguments from gopark into park_m, because
// there's no stack to put them on. That is their sole purpose.
waitunlockf func(*g, unsafe.Pointer) bool
waitlock unsafe.Pointer
waitTraceSkip int
waitTraceBlockReason traceBlockReason
syscalltick uint32
freelink *m // on sched.freem
trace mTraceState
// These are here to avoid using the G stack so the stack can move during the call.
libcallpc uintptr // for cpu profiler
libcallsp uintptr
libcallg guintptr
winsyscall winlibcall // stores syscall parameters on windows
vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call)
vdsoPC uintptr // PC for traceback while in VDSO call
// preemptGen counts the number of completed preemption
// signals. This is used to detect when a preemption is
// requested, but fails.
preemptGen atomic.Uint32
// Whether this is a pending preemption signal on this M.
signalPending atomic.Uint32
// pcvalue lookup cache
pcvalueCache pcvalueCache
dlogPerM
mOS
chacha8 chacha8rand.State
cheaprand uint32
cheaprand64 uint64
// Up to 10 locks held by this m, maintained by the lock ranking code.
locksHeldLen int
locksHeld [10]heldLockInfo
// self points this M until mexit clears it to return nil.
self mWeakPointer
}
const mRedZoneSize = (16 << 3) * asanenabledBit // redZoneSize(2048)
type mPadded struct {
m
// Size the runtime.m structure so it fits in the 2048-byte size class, and
// not in the next-smallest (1792-byte) size class. That leaves the 11 low
// bits of muintptr values available for flags, as required by
// lock_spinbit.go.
_ [(1 - goarch.IsWasm) * (2048 - mallocHeaderSize - mRedZoneSize - unsafe.Sizeof(m{}))]byte
}
// mWeakPointer is a "weak" pointer to an M. A weak pointer for each M is
// available as m.self. Users may copy mWeakPointer arbitrarily, and get will
// return the M if it is still live, or nil after mexit.
//
// The zero value is treated as a nil pointer.
//
// Note that get may race with M exit. A successful get will keep the m object
// alive, but the M itself may be exited and thus not actually usable.
type mWeakPointer struct {
m *atomic.Pointer[m]
}
func newMWeakPointer(mp *m) mWeakPointer {
w := mWeakPointer{m: new(atomic.Pointer[m])}
w.m.Store(mp)
return w
}
func (w mWeakPointer) get() *m {
if w.m == nil {
return nil
}
return w.m.Load()
}
// clear sets the weak pointer to nil. It cannot be used on zero value
// mWeakPointers.
func (w mWeakPointer) clear() {
w.m.Store(nil)
}
type p struct {
id int32
status uint32 // one of pidle/prunning/...
link puintptr
schedtick uint32 // incremented on every scheduler call
syscalltick uint32 // incremented on every system call
sysmontick sysmontick // last tick observed by sysmon
m muintptr // back-link to associated m (nil if idle)
mcache *mcache
pcache pageCache
raceprocctx uintptr
// oldm is the previous m this p ran on.
//
// We are not assosciated with this m, so we have no control over its
// lifecycle. This value is an m.self object which points to the m
// until the m exits.
//
// Note that this m may be idle, running, or exiting. It should only be
// used with mgetSpecific, which will take ownership of the m only if
// it is idle.
oldm mWeakPointer
deferpool []*_defer // pool of available defer structs (see panic.go)
deferpoolbuf [32]*_defer
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
goidcache uint64
goidcacheend uint64
// Queue of runnable goroutines. Accessed without lock.
runqhead uint32
runqtail uint32
runq [256]guintptr
// runnext, if non-nil, is a runnable G that was ready'd by
// the current G and should be run next instead of what's in
// runq if there's time remaining in the running G's time
// slice. It will inherit the time left in the current time
// slice. If a set of goroutines is locked in a
// communicate-and-wait pattern, this schedules that set as a
// unit and eliminates the (potentially large) scheduling
// latency that otherwise arises from adding the ready'd
// goroutines to the end of the run queue.
//
// Note that while other P's may atomically CAS this to zero,
// only the owner P can CAS it to a valid G.
runnext guintptr
// Available G's (status == Gdead)
gFree gList
sudogcache []*sudog
sudogbuf [128]*sudog
// Cache of mspan objects from the heap.
mspancache struct {
// We need an explicit length here because this field is used
// in allocation codepaths where write barriers are not allowed,
// and eliminating the write barrier/keeping it eliminated from
// slice updates is tricky, more so than just managing the length
// ourselves.
len int
buf [128]*mspan
}
// Cache of a single pinner object to reduce allocations from repeated
// pinner creation.
pinnerCache *pinner
trace pTraceState
palloc persistentAlloc // per-P to avoid mutex
// Per-P GC state
gcAssistTime int64 // Nanoseconds in assistAlloc
gcFractionalMarkTime atomic.Int64 // Nanoseconds in fractional mark worker
// limiterEvent tracks events for the GC CPU limiter.
limiterEvent limiterEvent
// gcMarkWorkerMode is the mode for the next mark worker to run in.
// That is, this is used to communicate with the worker goroutine
// selected for immediate execution by
// gcController.findRunnableGCWorker. When scheduling other goroutines,
// this field must be set to gcMarkWorkerNotWorker.
gcMarkWorkerMode gcMarkWorkerMode
// gcMarkWorkerStartTime is the nanotime() at which the most recent
// mark worker started.
gcMarkWorkerStartTime int64
// nextGCMarkWorker is the next mark worker to run. This may be set
// during start-the-world to assign a worker to this P. The P runs this
// worker on the next call to gcController.findRunnableGCWorker. If the
// P runs something else or stops, it must release this worker via
// gcController.releaseNextGCMarkWorker.
//
// See comment in gcBgMarkWorker about the lifetime of
// gcBgMarkWorkerNode.
//
// Only accessed by this P or during STW.
nextGCMarkWorker *gcBgMarkWorkerNode
// gcw is this P's GC work buffer cache. The work buffer is
// filled by write barriers, drained by mutator assists, and
// disposed on certain GC state transitions.
gcw gcWork
// wbBuf is this P's GC write barrier buffer.
//
// TODO: Consider caching this in the running G.
wbBuf wbBuf
runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
// statsSeq is a counter indicating whether this P is currently
// writing any stats. Its value is even when not, odd when it is.
statsSeq atomic.Uint32
// Timer heap.
timers timers
// Cleanups.
cleanups *cleanupBlock
cleanupsQueued uint64 // monotonic count of cleanups queued by this P
// maxStackScanDelta accumulates the amount of stack space held by
// live goroutines (i.e. those eligible for stack scanning).
// Flushed to gcController.maxStackScan once maxStackScanSlack
// or -maxStackScanSlack is reached.
maxStackScanDelta int64
// gc-time statistics about current goroutines
// Note that this differs from maxStackScan in that this
// accumulates the actual stack observed to be used at GC time (hi - sp),
// not an instantaneous measure of the total stack size that might need
// to be scanned (hi - lo).
scannedStackSize uint64 // stack size of goroutines scanned by this P
scannedStacks uint64 // number of goroutines scanned by this P
// preempt is set to indicate that this P should be enter the
// scheduler ASAP (regardless of what G is running on it).
preempt bool
// gcStopTime is the nanotime timestamp that this P last entered _Pgcstop.
gcStopTime int64
// goroutinesCreated is the total count of goroutines created by this P.
goroutinesCreated uint64
// xRegs is the per-P extended register state used by asynchronous
// preemption. This is an empty struct on platforms that don't use extended
// register state.
xRegs xRegPerP
// Padding is no longer needed. False sharing is now not a worry because p is large enough
// that its size class is an integer multiple of the cache line size (for any of our architectures).
}
type schedt struct {
goidgen atomic.Uint64
lastpoll atomic.Int64 // time of last network poll, 0 if currently polling
pollUntil atomic.Int64 // time to which current poll is sleeping
pollingNet atomic.Int32 // 1 if some P doing non-blocking network poll
lock mutex
// When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be
// sure to call checkdead().
midle listHeadManual // idle m's waiting for work
nmidle int32 // number of idle m's waiting for work
nmidlelocked int32 // number of locked m's waiting for work
mnext int64 // number of m's that have been created and next M ID
maxmcount int32 // maximum number of m's allowed (or die)
nmsys int32 // number of system m's not counted for deadlock
nmfreed int64 // cumulative number of freed m's
ngsys atomic.Int32 // number of system goroutines
nGsyscallNoP atomic.Int32 // number of goroutines in syscalls without a P but whose M is not isExtraInC
pidle puintptr // idle p's
npidle atomic.Int32
nmspinning atomic.Int32 // See "Worker thread parking/unparking" comment in proc.go.
needspinning atomic.Uint32 // See "Delicate dance" comment in proc.go. Boolean. Must hold sched.lock to set to 1.
// Global runnable queue.
runq gQueue
// disable controls selective disabling of the scheduler.
//
// Use schedEnableUser to control this.
//
// disable is protected by sched.lock.
disable struct {
// user disables scheduling of user goroutines.
user bool
runnable gQueue // pending runnable Gs
}
// Global cache of dead G's.
gFree struct {
lock mutex
stack gList // Gs with stacks
noStack gList // Gs without stacks
}
// Central cache of sudog structs.
sudoglock mutex
sudogcache *sudog
// Central pool of available defer structs.
deferlock mutex
deferpool *_defer
// freem is the list of m's waiting to be freed when their
// m.exited is set. Linked through m.freelink.
freem *m
gcwaiting atomic.Bool // gc is waiting to run
stopwait int32
stopnote note
sysmonwait atomic.Bool
sysmonnote note
// safePointFn should be called on each P at the next GC
// safepoint if p.runSafePointFn is set.
safePointFn func(*p)
safePointWait int32
safePointNote note
profilehz int32 // cpu profiling rate
procresizetime int64 // nanotime() of last change to gomaxprocs
totaltime int64 // ∫gomaxprocs dt up to procresizetime
customGOMAXPROCS bool // GOMAXPROCS was manually set from the environment or runtime.GOMAXPROCS
// sysmonlock protects sysmon's actions on the runtime.
//
// Acquire and hold this mutex to block sysmon from interacting
// with the rest of the runtime.
sysmonlock mutex
// timeToRun is a distribution of scheduling latencies, defined
// as the sum of time a G spends in the _Grunnable state before
// it transitions to _Grunning.
timeToRun timeHistogram
// idleTime is the total CPU time Ps have "spent" idle.
//
// Reset on each GC cycle.
idleTime atomic.Int64
// totalMutexWaitTime is the sum of time goroutines have spent in _Gwaiting
// with a waitreason of the form waitReasonSync{RW,}Mutex{R,}Lock.
totalMutexWaitTime atomic.Int64
// stwStoppingTimeGC/Other are distributions of stop-the-world stopping
// latencies, defined as the time taken by stopTheWorldWithSema to get
// all Ps to stop. stwStoppingTimeGC covers all GC-related STWs,
// stwStoppingTimeOther covers the others.
stwStoppingTimeGC timeHistogram
stwStoppingTimeOther timeHistogram
// stwTotalTimeGC/Other are distributions of stop-the-world total
// latencies, defined as the total time from stopTheWorldWithSema to
// startTheWorldWithSema. This is a superset of
// stwStoppingTimeGC/Other. stwTotalTimeGC covers all GC-related STWs,
// stwTotalTimeOther covers the others.
stwTotalTimeGC timeHistogram
stwTotalTimeOther timeHistogram
// totalRuntimeLockWaitTime (plus the value of lockWaitTime on each M in
// allm) is the sum of time goroutines have spent in _Grunnable and with an
// M, but waiting for locks within the runtime. This field stores the value
// for Ms that have exited.
totalRuntimeLockWaitTime atomic.Int64
// goroutinesCreated (plus the value of goroutinesCreated on each P in allp)
// is the sum of all goroutines created by the program.
goroutinesCreated atomic.Uint64
}
// Values for the flags field of a sigTabT.
const (
_SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel
_SigKill // if signal.Notify doesn't take it, exit quietly
_SigThrow // if signal.Notify doesn't take it, exit loudly
_SigPanic // if the signal is from the kernel, panic
_SigDefault // if the signal isn't explicitly requested, don't monitor it
_SigGoExit // cause all runtime procs to exit (only used on Plan 9).
_SigSetStack // Don't explicitly install handler, but add SA_ONSTACK to existing libc handler
_SigUnblock // always unblock; see blockableSig
_SigIgn // _SIG_DFL action is to ignore the signal
)
// Layout of in-memory per-function information prepared by linker
// See https://golang.org/s/go12symtab.
// Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab)
// and with package debug/gosym and with symtab.go in package runtime.
type _func struct {
sys.NotInHeap // Only in static data
entryOff uint32 // start pc, as offset from moduledata.text
nameOff int32 // function name, as index into moduledata.funcnametab.
args int32 // in/out args size
deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
pcsp uint32
pcfile uint32
pcln uint32
npcdata uint32
cuOffset uint32 // runtime.cutab offset of this function's CU
startLine int32 // line number of start of function (func keyword/TEXT directive)
funcID abi.FuncID // set for certain special runtime functions
flag abi.FuncFlag
_ [1]byte // pad
nfuncdata uint8 // must be last, must end on a uint32-aligned boundary
// The end of the struct is followed immediately by two variable-length
// arrays that reference the pcdata and funcdata locations for this
// function.
// pcdata contains the offset into moduledata.pctab for the start of
// that index's table. e.g.,
// &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of
// the unsafe point table.
//
// An offset of 0 indicates that there is no table.
//
// pcdata [npcdata]uint32
// funcdata contains the offset past moduledata.gofunc which contains a
// pointer to that index's funcdata. e.g.,
// *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is
// the argument pointer map.
//
// An offset of ^uint32(0) indicates that there is no entry.
//
// funcdata [nfuncdata]uint32
}
// Pseudo-Func that is returned for PCs that occur in inlined code.
// A *Func can be either a *_func or a *funcinl, and they are distinguished
// by the first uintptr.
//
// TODO(austin): Can we merge this with inlinedCall?
type funcinl struct {
ones uint32 // set to ^0 to distinguish from _func
entry uintptr // entry of the real (the "outermost") frame
name string
file string
line int32
startLine int32
}
type itab = abi.ITab
// Lock-free stack node.
// Also known to export_test.go.
type lfnode struct {
next uint64
pushcnt uintptr
}
type forcegcstate struct {
lock mutex
g *g
idle atomic.Bool
}
// A _defer holds an entry on the list of deferred calls.
// If you add a field here, add code to clear it in deferProcStack.
// This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct
// and cmd/compile/internal/ssagen/ssa.go:(*state).call.
// Some defers will be allocated on the stack and some on the heap.
// All defers are logically part of the stack, so write barriers to
// initialize them are not required. All defers must be manually scanned,
// and for heap defers, marked.
type _defer struct {
heap bool
rangefunc bool // true for rangefunc list
sp uintptr // sp at time of defer
pc uintptr // pc at time of defer
fn func() // can be nil for open-coded defers
link *_defer // next defer on G; can point to either heap or stack!
// If rangefunc is true, *head is the head of the atomic linked list
// during a range-over-func execution.
head *atomic.Pointer[_defer]
}
// A _panic holds information about an active panic.
//
// A _panic value must only ever live on the stack.
//
// The gopanicFP and link fields are stack pointers, but don't need special
// handling during stack growth: because they are pointer-typed and
// _panic values only live on the stack, regular stack pointer
// adjustment takes care of them.
type _panic struct {
arg any // argument to panic
link *_panic // link to earlier panic
// startPC and startSP track where _panic.start was called.
// (These are the SP and PC of the gopanic frame itself.)
startPC uintptr
startSP unsafe.Pointer
// The current stack frame that we're running deferred calls for.
pc uintptr
sp unsafe.Pointer
fp unsafe.Pointer
// retpc stores the PC where the panic should jump back to, if the
// function last returned by _panic.nextDefer() recovers the panic.
retpc uintptr
// Extra state for handling open-coded defers.
deferBitsPtr *uint8
slotsPtr unsafe.Pointer
recovered bool // whether this panic has been recovered
repanicked bool // whether this panic repanicked
goexit bool
deferreturn bool
gopanicFP unsafe.Pointer // frame pointer of the gopanic frame
}
// savedOpenDeferState tracks the extra state from _panic that's
// necessary for deferreturn to pick up where gopanic left off,
// without needing to unwind the stack.
type savedOpenDeferState struct {
retpc uintptr
deferBitsOffset uintptr
slotsOffset uintptr
}
// ancestorInfo records details of where a goroutine was started.
type ancestorInfo struct {
pcs []uintptr // pcs from the stack of this goroutine
goid uint64 // goroutine id of this goroutine; original goroutine possibly dead
gopc uintptr // pc of go statement that created this goroutine
}
// A waitReason explains why a goroutine has been stopped.
// See gopark. Do not re-use waitReasons, add new ones.
type waitReason uint8
const (
waitReasonZero waitReason = iota // ""
waitReasonGCAssistMarking // "GC assist marking"
waitReasonIOWait // "IO wait"
waitReasonDumpingHeap // "dumping heap"
waitReasonGarbageCollection // "garbage collection"
waitReasonGarbageCollectionScan // "garbage collection scan"
waitReasonPanicWait // "panicwait"
waitReasonGCAssistWait // "GC assist wait"
waitReasonGCSweepWait // "GC sweep wait"
waitReasonGCScavengeWait // "GC scavenge wait"
waitReasonFinalizerWait // "finalizer wait"
waitReasonForceGCIdle // "force gc (idle)"
waitReasonUpdateGOMAXPROCSIdle // "GOMAXPROCS updater (idle)"
waitReasonSemacquire // "semacquire"
waitReasonSleep // "sleep"
waitReasonChanReceiveNilChan // "chan receive (nil chan)"
waitReasonChanSendNilChan // "chan send (nil chan)"
waitReasonSelectNoCases // "select (no cases)"
waitReasonSelect // "select"
waitReasonChanReceive // "chan receive"
waitReasonChanSend // "chan send"
waitReasonSyncCondWait // "sync.Cond.Wait"
waitReasonSyncMutexLock // "sync.Mutex.Lock"
waitReasonSyncRWMutexRLock // "sync.RWMutex.RLock"
waitReasonSyncRWMutexLock // "sync.RWMutex.Lock"
waitReasonSyncWaitGroupWait // "sync.WaitGroup.Wait"
waitReasonTraceReaderBlocked // "trace reader (blocked)"
waitReasonWaitForGCCycle // "wait for GC cycle"
waitReasonGCWorkerIdle // "GC worker (idle)"
waitReasonGCWorkerActive // "GC worker (active)"
waitReasonPreempted // "preempted"
waitReasonDebugCall // "debug call"
waitReasonGCMarkTermination // "GC mark termination"
waitReasonStoppingTheWorld // "stopping the world"
waitReasonFlushProcCaches // "flushing proc caches"
waitReasonTraceGoroutineStatus // "trace goroutine status"
waitReasonTraceProcStatus // "trace proc status"
waitReasonPageTraceFlush // "page trace flush"
waitReasonCoroutine // "coroutine"
waitReasonGCWeakToStrongWait // "GC weak to strong wait"
waitReasonSynctestRun // "synctest.Run"
waitReasonSynctestWait // "synctest.Wait"
waitReasonSynctestChanReceive // "chan receive (durable)"
waitReasonSynctestChanSend // "chan send (durable)"
waitReasonSynctestSelect // "select (durable)"
waitReasonSynctestWaitGroupWait // "sync.WaitGroup.Wait (durable)"
waitReasonCleanupWait // "cleanup wait"
)
var waitReasonStrings = [...]string{
waitReasonZero: "",
waitReasonGCAssistMarking: "GC assist marking",
waitReasonIOWait: "IO wait",
waitReasonChanReceiveNilChan: "chan receive (nil chan)",
waitReasonChanSendNilChan: "chan send (nil chan)",
waitReasonDumpingHeap: "dumping heap",
waitReasonGarbageCollection: "garbage collection",
waitReasonGarbageCollectionScan: "garbage collection scan",
waitReasonPanicWait: "panicwait",
waitReasonSelect: "select",
waitReasonSelectNoCases: "select (no cases)",
waitReasonGCAssistWait: "GC assist wait",
waitReasonGCSweepWait: "GC sweep wait",
waitReasonGCScavengeWait: "GC scavenge wait",
waitReasonChanReceive: "chan receive",
waitReasonChanSend: "chan send",
waitReasonFinalizerWait: "finalizer wait",
waitReasonForceGCIdle: "force gc (idle)",
waitReasonUpdateGOMAXPROCSIdle: "GOMAXPROCS updater (idle)",
waitReasonSemacquire: "semacquire",
waitReasonSleep: "sleep",
waitReasonSyncCondWait: "sync.Cond.Wait",
waitReasonSyncMutexLock: "sync.Mutex.Lock",
waitReasonSyncRWMutexRLock: "sync.RWMutex.RLock",
waitReasonSyncRWMutexLock: "sync.RWMutex.Lock",
waitReasonSyncWaitGroupWait: "sync.WaitGroup.Wait",
waitReasonTraceReaderBlocked: "trace reader (blocked)",
waitReasonWaitForGCCycle: "wait for GC cycle",
waitReasonGCWorkerIdle: "GC worker (idle)",
waitReasonGCWorkerActive: "GC worker (active)",
waitReasonPreempted: "preempted",
waitReasonDebugCall: "debug call",
waitReasonGCMarkTermination: "GC mark termination",
waitReasonStoppingTheWorld: "stopping the world",
waitReasonFlushProcCaches: "flushing proc caches",
waitReasonTraceGoroutineStatus: "trace goroutine status",
waitReasonTraceProcStatus: "trace proc status",
waitReasonPageTraceFlush: "page trace flush",
waitReasonCoroutine: "coroutine",
waitReasonGCWeakToStrongWait: "GC weak to strong wait",
waitReasonSynctestRun: "synctest.Run",
waitReasonSynctestWait: "synctest.Wait",
waitReasonSynctestChanReceive: "chan receive (durable)",
waitReasonSynctestChanSend: "chan send (durable)",
waitReasonSynctestSelect: "select (durable)",
waitReasonSynctestWaitGroupWait: "sync.WaitGroup.Wait (durable)",
waitReasonCleanupWait: "cleanup wait",
}
func (w waitReason) String() string {
if w < 0 || w >= waitReason(len(waitReasonStrings)) {
return "unknown wait reason"
}
return waitReasonStrings[w]
}
// isMutexWait returns true if the goroutine is blocked because of
// sync.Mutex.Lock or sync.RWMutex.[R]Lock.
//
//go:nosplit
func (w waitReason) isMutexWait() bool {
return w == waitReasonSyncMutexLock ||
w == waitReasonSyncRWMutexRLock ||
w == waitReasonSyncRWMutexLock
}
// isSyncWait returns true if the goroutine is blocked because of
// sync library primitive operations.
//
//go:nosplit
func (w waitReason) isSyncWait() bool {
return waitReasonSyncCondWait <= w && w <= waitReasonSyncWaitGroupWait
}
// isChanWait is true if the goroutine is blocked because of non-nil
// channel operations or a select statement with at least one case.
//
//go:nosplit
func (w waitReason) isChanWait() bool {
return w == waitReasonSelect ||
w == waitReasonChanReceive ||
w == waitReasonChanSend
}
func (w waitReason) isWaitingForSuspendG() bool {
return isWaitingForSuspendG[w]
}
// isWaitingForSuspendG indicates that a goroutine is only entering _Gwaiting and
// setting a waitReason because it needs to be able to let the suspendG
// (used by the GC and the execution tracer) take ownership of its stack.
// The G is always actually executing on the system stack in these cases.
//
// TODO(mknyszek): Consider replacing this with a new dedicated G status.
var isWaitingForSuspendG = [len(waitReasonStrings)]bool{
waitReasonStoppingTheWorld: true,
waitReasonGCMarkTermination: true,
waitReasonGarbageCollection: true,
waitReasonGarbageCollectionScan: true,
waitReasonTraceGoroutineStatus: true,
waitReasonTraceProcStatus: true,
waitReasonPageTraceFlush: true,
waitReasonGCAssistMarking: true,
waitReasonGCWorkerActive: true,
waitReasonFlushProcCaches: true,
}
func (w waitReason) isIdleInSynctest() bool {
return isIdleInSynctest[w]
}
// isIdleInSynctest indicates that a goroutine is considered idle by synctest.Wait.
var isIdleInSynctest = [len(waitReasonStrings)]bool{
waitReasonChanReceiveNilChan: true,
waitReasonChanSendNilChan: true,
waitReasonSelectNoCases: true,
waitReasonSleep: true,
waitReasonSyncCondWait: true,
waitReasonSynctestWaitGroupWait: true,
waitReasonCoroutine: true,
waitReasonSynctestRun: true,
waitReasonSynctestWait: true,
waitReasonSynctestChanReceive: true,
waitReasonSynctestChanSend: true,
waitReasonSynctestSelect: true,
}
var (
// Linked-list of all Ms. Written under sched.lock, read atomically.
allm *m
gomaxprocs int32
numCPUStartup int32
forcegc forcegcstate
sched schedt
newprocs int32
)
var (
// allpLock protects P-less reads and size changes of allp, idlepMask,
// and timerpMask, and all writes to allp.
allpLock mutex
// len(allp) == gomaxprocs; may change at safe points, otherwise
// immutable.
allp []*p
// Bitmask of Ps in _Pidle list, one bit per P. Reads and writes must
// be atomic. Length may change at safe points.
//
// Each P must update only its own bit. In order to maintain
// consistency, a P going idle must set the idle mask simultaneously with
// updates to the idle P list under the sched.lock, otherwise a racing
// pidleget may clear the mask before pidleput sets the mask,
// corrupting the bitmap.
//
// N.B., procresize takes ownership of all Ps in stopTheWorldWithSema.
idlepMask pMask
// Bitmask of Ps that may have a timer, one bit per P. Reads and writes
// must be atomic. Length may change at safe points.
//
// Ideally, the timer mask would be kept immediately consistent on any timer
// operations. Unfortunately, updating a shared global data structure in the
// timer hot path adds too much overhead in applications frequently switching
// between no timers and some timers.
//
// As a compromise, the timer mask is updated only on pidleget / pidleput. A
// running P (returned by pidleget) may add a timer at any time, so its mask
// must be set. An idle P (passed to pidleput) cannot add new timers while
// idle, so if it has no timers at that time, its mask may be cleared.
//
// Thus, we get the following effects on timer-stealing in findRunnable:
//
// - Idle Ps with no timers when they go idle are never checked in findRunnable
// (for work- or timer-stealing; this is the ideal case).
// - Running Ps must always be checked.
// - Idle Ps whose timers are stolen must continue to be checked until they run
// again, even after timer expiration.
//
// When the P starts running again, the mask should be set, as a timer may be
// added at any time.
//
// TODO(prattmic): Additional targeted updates may improve the above cases.
// e.g., updating the mask when stealing a timer.
timerpMask pMask
)
// goarmsoftfp is used by runtime/cgo assembly.
//
//go:linkname goarmsoftfp
var (
// Pool of GC parked background workers. Entries are type
// *gcBgMarkWorkerNode.
gcBgMarkWorkerPool lfstack
// Total number of gcBgMarkWorker goroutines. Protected by worldsema.
gcBgMarkWorkerCount int32
// Information about what cpu features are available.
// Packages outside the runtime should not use these
// as they are not an external api.
// Set on startup in asm_{386,amd64}.s
processorVersionInfo uint32
isIntel bool
)
// set by cmd/link on arm systems
// accessed using linkname by internal/runtime/atomic.
//
// goarm should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/creativeprojects/go-selfupdate
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname goarm
var (
goarm uint8
goarmsoftfp uint8
)
// Set by the linker so the runtime can determine the buildmode.
var (
islibrary bool // -buildmode=c-shared
isarchive bool // -buildmode=c-archive
)
// Must agree with internal/buildcfg.FramePointerEnabled.
const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64"
// getcallerfp returns the frame pointer of the caller of the caller
// of this function.
//
//go:nosplit
//go:noinline
func getcallerfp() uintptr {
fp := getfp() // This frame's FP.
if fp != 0 {
fp = *(*uintptr)(unsafe.Pointer(fp)) // The caller's FP.
fp = *(*uintptr)(unsafe.Pointer(fp)) // The caller's caller's FP.
}
return fp
}
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import _ "unsafe" // for go:linkname
//go:linkname boring_runtime_arg0 crypto/internal/boring.runtime_arg0
func boring_runtime_arg0() string {
// On Windows, argslice is not set, and it's too much work to find argv0.
if len(argslice) == 0 {
return ""
}
return argslice[0]
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
package runtime
import "unsafe"
var _cgo_clearenv unsafe.Pointer // pointer to C function
// Clear the C environment if cgo is loaded.
func clearenv_c() {
if _cgo_clearenv == nil {
return
}
asmcgocall(_cgo_clearenv, nil)
}
//go:linkname syscall_runtimeClearenv syscall.runtimeClearenv
func syscall_runtimeClearenv(env map[string]int) {
clearenv_c()
// Did we just unset GODEBUG?
if _, ok := env["GODEBUG"]; ok {
godebugEnv.Store(nil)
godebugNotify(true)
}
}
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/runtime/atomic"
)
// This is a copy of sync/rwmutex.go rewritten to work in the runtime.
// A rwmutex is a reader/writer mutual exclusion lock.
// The lock can be held by an arbitrary number of readers or a single writer.
// This is a variant of sync.RWMutex, for the runtime package.
// Like mutex, rwmutex blocks the calling M.
// It does not interact with the goroutine scheduler.
type rwmutex struct {
rLock mutex // protects readers, readerPass, writer
readers muintptr // list of pending readers
readerPass uint32 // number of pending readers to skip readers list
wLock mutex // serializes writers
writer muintptr // pending writer waiting for completing readers
readerCount atomic.Int32 // number of pending readers
readerWait atomic.Int32 // number of departing readers
readRank lockRank // semantic lock rank for read locking
}
// Lock ranking an rwmutex has two aspects:
//
// Semantic ranking: this rwmutex represents some higher level lock that
// protects some resource (e.g., allocmLock protects creation of new Ms). The
// read and write locks of that resource need to be represented in the lock
// rank.
//
// Internal ranking: as an implementation detail, rwmutex uses two mutexes:
// rLock and wLock. These have lock order requirements: wLock must be locked
// before rLock. This also needs to be represented in the lock rank.
//
// Semantic ranking is represented by acquiring readRank during read lock and
// writeRank during write lock.
//
// wLock is held for the duration of a write lock, so it uses writeRank
// directly, both for semantic and internal ranking. rLock is only held
// temporarily inside the rlock/lock methods, so it uses readRankInternal to
// represent internal ranking. Semantic ranking is represented by a separate
// acquire of readRank for the duration of a read lock.
//
// The lock ranking must document this ordering:
// - readRankInternal is a leaf lock.
// - readRank is taken before readRankInternal.
// - writeRank is taken before readRankInternal.
// - readRank is placed in the lock order wherever a read lock of this rwmutex
// belongs.
// - writeRank is placed in the lock order wherever a write lock of this
// rwmutex belongs.
func (rw *rwmutex) init(readRank, readRankInternal, writeRank lockRank) {
rw.readRank = readRank
lockInit(&rw.rLock, readRankInternal)
lockInit(&rw.wLock, writeRank)
}
const rwmutexMaxReaders = 1 << 30
// rlock locks rw for reading.
func (rw *rwmutex) rlock() {
// The reader must not be allowed to lose its P or else other
// things blocking on the lock may consume all of the Ps and
// deadlock (issue #20903). Alternatively, we could drop the P
// while sleeping.
acquireLockRankAndM(rw.readRank)
lockWithRankMayAcquire(&rw.rLock, getLockRank(&rw.rLock))
if rw.readerCount.Add(1) < 0 {
// A writer is pending. Park on the reader queue.
systemstack(func() {
lock(&rw.rLock)
if rw.readerPass > 0 {
// Writer finished.
rw.readerPass -= 1
unlock(&rw.rLock)
} else {
// Queue this reader to be woken by
// the writer.
m := getg().m
m.schedlink = rw.readers
rw.readers.set(m)
unlock(&rw.rLock)
notesleep(&m.park)
noteclear(&m.park)
}
})
}
}
// runlock undoes a single rlock call on rw.
func (rw *rwmutex) runlock() {
if r := rw.readerCount.Add(-1); r < 0 {
if r+1 == 0 || r+1 == -rwmutexMaxReaders {
throw("runlock of unlocked rwmutex")
}
// A writer is pending.
if rw.readerWait.Add(-1) == 0 {
// The last reader unblocks the writer.
lock(&rw.rLock)
w := rw.writer.ptr()
if w != nil {
notewakeup(&w.park)
}
unlock(&rw.rLock)
}
}
releaseLockRankAndM(rw.readRank)
}
// lock locks rw for writing.
func (rw *rwmutex) lock() {
// Resolve competition with other writers and stick to our P.
lock(&rw.wLock)
m := getg().m
// Announce that there is a pending writer.
r := rw.readerCount.Add(-rwmutexMaxReaders) + rwmutexMaxReaders
// Wait for any active readers to complete.
lock(&rw.rLock)
if r != 0 && rw.readerWait.Add(r) != 0 {
// Wait for reader to wake us up.
systemstack(func() {
rw.writer.set(m)
unlock(&rw.rLock)
notesleep(&m.park)
noteclear(&m.park)
})
} else {
unlock(&rw.rLock)
}
}
// unlock unlocks rw for writing.
func (rw *rwmutex) unlock() {
// Announce to readers that there is no active writer.
r := rw.readerCount.Add(rwmutexMaxReaders)
if r >= rwmutexMaxReaders {
throw("unlock of unlocked rwmutex")
}
// Unblock blocked readers.
lock(&rw.rLock)
for rw.readers.ptr() != nil {
reader := rw.readers.ptr()
rw.readers = reader.schedlink
reader.schedlink.set(nil)
notewakeup(&reader.park)
r -= 1
}
// If r > 0, there are pending readers that aren't on the
// queue. Tell them to skip waiting.
rw.readerPass += uint32(r)
unlock(&rw.rLock)
// Allow other writers to proceed.
unlock(&rw.wLock)
}
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (amd64 || arm64) && linux
package runtime
import (
"internal/goarch"
"unsafe"
)
//go:linkname secret_count runtime/secret.count
func secret_count() int32 {
return getg().secret
}
//go:linkname secret_inc runtime/secret.inc
func secret_inc() {
gp := getg()
gp.secret++
}
//go:linkname secret_dec runtime/secret.dec
func secret_dec() {
gp := getg()
gp.secret--
}
//go:linkname secret_eraseSecrets runtime/secret.eraseSecrets
func secret_eraseSecrets() {
// zero all the stack memory that might be dirtied with
// secrets. We do this from the systemstack so that we
// don't have to figure out which holes we have to keep
// to ensure that we can return from memclr. gp.sched will
// act as a pigeonhole for our actual return.
lo := getg().stack.lo
systemstack(func() {
// Note, this systemstack call happens within the secret mode,
// so we don't have to call out to erase our registers, the systemstack
// code will do that.
mp := acquirem()
sp := mp.curg.sched.sp
// we need to keep systemstack return on top of the stack being cleared
// for traceback
sp -= goarch.PtrSize
// TODO: keep some sort of low water mark so that we don't have
// to zero a potentially large stack if we used just a little
// bit of it. That will allow us to use a higher value for
// lo than gp.stack.lo.
memclrNoHeapPointers(unsafe.Pointer(lo), sp-lo)
releasem(mp)
})
// Don't put any code here: the stack frame's contents are gone!
}
// addSecret records the fact that we need to zero p immediately
// when it is freed.
func addSecret(p unsafe.Pointer, size uintptr) {
// TODO(dmo): figure out the cost of these. These are mostly
// intended to catch allocations that happen via the runtime
// that the user has no control over and not big buffers that user
// code is allocating. The cost should be relatively low,
// but we have run into a wall with other special allocations before.
lock(&mheap_.speciallock)
s := (*specialSecret)(mheap_.specialSecretAlloc.alloc())
s.special.kind = _KindSpecialSecret
s.size = size
unlock(&mheap_.speciallock)
addspecial(p, &s.special, false)
}
// secret_getStack returns the memory range of the
// current goroutine's stack.
// For testing only.
// Note that this is kind of tricky, as the goroutine can
// be copied and/or exit before the result is used, at which
// point it may no longer be valid.
//
//go:linkname secret_getStack runtime/secret.getStack
func secret_getStack() (uintptr, uintptr) {
gp := getg()
return gp.stack.lo, gp.stack.hi
}
// erase any secrets that may have been spilled onto the signal stack during
// signal handling. Must be called on g0 or inside STW to make sure we don't
// get rescheduled onto a different M.
//
//go:nosplit
func eraseSecretsSignalStk() {
mp := getg().m
if mp.signalSecret {
mp.signalSecret = false
// signal handlers get invoked atomically
// so it's fine for us to zero out the stack while a signal
// might get delivered. Worst case is we are currently running
// in secret mode and the signal spills fresh secret info onto
// the stack, but since we haven't returned from the secret.Do
// yet, we make no guarantees about that information.
//
// It might be tempting to only erase the part of the signal
// stack that has the context, but when running with forwarded
// signals, they might pull arbitrary data out of the context and
// store it elsewhere on the stack. We can't stop them from storing
// the data in arbitrary places, but we can erase the stack where
// they are likely to put it in cases of a register spill.
size := mp.gsignal.stack.hi - mp.gsignal.stack.lo
memclrNoHeapPointers(unsafe.Pointer(mp.gsignal.stack.lo), size)
}
}
// return a slice of all Ms signal stacks
// For testing only.
//
//go:linkname secret_appendSignalStacks runtime/secret.appendSignalStacks
func secret_appendSignalStacks(sigstacks []stack) []stack {
// This is probably overkill, but it's what
// doAllThreadsSyscall does
stw := stopTheWorld(stwAllThreadsSyscall)
allocmLock.lock()
acquirem()
for mp := allm; mp != nil; mp = mp.alllink {
sigstacks = append(sigstacks, mp.gsignal.stack)
}
releasem(getg().m)
allocmLock.unlock()
startTheWorld(stw)
return sigstacks
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import _ "unsafe"
func initSecureMode() {
// We have already initialized the secureMode bool in sysauxv.
}
func isSecureMode() bool {
return secureMode
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package runtime
import (
"internal/stringslite"
)
func secure() {
initSecureMode()
if !isSecureMode() {
return
}
// When secure mode is enabled, we do one thing: enforce specific
// environment variable values (currently we only force GOTRACEBACK=none)
//
// Other packages may also disable specific functionality when secure mode
// is enabled (determined by using linkname to call isSecureMode).
secureEnv()
}
func secureEnv() {
var hasTraceback bool
for i := 0; i < len(envs); i++ {
if stringslite.HasPrefix(envs[i], "GOTRACEBACK=") {
hasTraceback = true
envs[i] = "GOTRACEBACK=none"
}
}
if !hasTraceback {
envs = append(envs, "GOTRACEBACK=none")
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// This file contains the implementation of Go select statements.
import (
"internal/abi"
"internal/runtime/sys"
"unsafe"
)
const debugSelect = false
// Select case descriptor.
// Known to compiler.
// Changes here must also be made in src/cmd/compile/internal/walk/select.go's scasetype.
type scase struct {
c *hchan // chan
elem unsafe.Pointer // data element
}
var (
chansendpc = abi.FuncPCABIInternal(chansend)
chanrecvpc = abi.FuncPCABIInternal(chanrecv)
)
func selectsetpc(pc *uintptr) {
*pc = sys.GetCallerPC()
}
func sellock(scases []scase, lockorder []uint16) {
var c *hchan
for _, o := range lockorder {
c0 := scases[o].c
if c0 != c {
c = c0
lock(&c.lock)
}
}
}
func selunlock(scases []scase, lockorder []uint16) {
// We must be very careful here to not touch sel after we have unlocked
// the last lock, because sel can be freed right after the last unlock.
// Consider the following situation.
// First M calls runtime·park() in runtime·selectgo() passing the sel.
// Once runtime·park() has unlocked the last lock, another M makes
// the G that calls select runnable again and schedules it for execution.
// When the G runs on another M, it locks all the locks and frees sel.
// Now if the first M touches sel, it will access freed memory.
for i := len(lockorder) - 1; i >= 0; i-- {
c := scases[lockorder[i]].c
if i > 0 && c == scases[lockorder[i-1]].c {
continue // will unlock it on the next iteration
}
unlock(&c.lock)
}
}
func selparkcommit(gp *g, _ unsafe.Pointer) bool {
// There are unlocked sudogs that point into gp's stack. Stack
// copying must lock the channels of those sudogs.
// Set activeStackChans here instead of before we try parking
// because we could self-deadlock in stack growth on a
// channel lock.
gp.activeStackChans = true
// Mark that it's safe for stack shrinking to occur now,
// because any thread acquiring this G's stack for shrinking
// is guaranteed to observe activeStackChans after this store.
gp.parkingOnChan.Store(false)
// Make sure we unlock after setting activeStackChans and
// unsetting parkingOnChan. The moment we unlock any of the
// channel locks we risk gp getting readied by a channel operation
// and so gp could continue running before everything before the
// unlock is visible (even to gp itself).
// This must not access gp's stack (see gopark). In
// particular, it must not access the *hselect. That's okay,
// because by the time this is called, gp.waiting has all
// channels in lock order.
var lastc *hchan
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
if sg.c.get() != lastc && lastc != nil {
// As soon as we unlock the channel, fields in
// any sudog with that channel may change,
// including c and waitlink. Since multiple
// sudogs may have the same channel, we unlock
// only after we've passed the last instance
// of a channel.
unlock(&lastc.lock)
}
lastc = sg.c.get()
}
if lastc != nil {
unlock(&lastc.lock)
}
return true
}
func block() {
gopark(nil, nil, waitReasonSelectNoCases, traceBlockForever, 1) // forever
}
// selectgo implements the select statement.
//
// cas0 points to an array of type [ncases]scase, and order0 points to
// an array of type [2*ncases]uint16 where ncases must be <= 65536.
// Both reside on the goroutine's stack (regardless of any escaping in
// selectgo).
//
// For race detector builds, pc0 points to an array of type
// [ncases]uintptr (also on the stack); for other builds, it's set to
// nil.
//
// selectgo returns the index of the chosen scase, which matches the
// ordinal position of its respective select{recv,send,default} call.
// Also, if the chosen scase was a receive operation, it reports whether
// a value was received.
func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, block bool) (int, bool) {
gp := getg()
if debugSelect {
print("select: cas0=", cas0, "\n")
}
// NOTE: In order to maintain a lean stack size, the number of scases
// is capped at 65536.
cas1 := (*[1 << 16]scase)(unsafe.Pointer(cas0))
order1 := (*[1 << 17]uint16)(unsafe.Pointer(order0))
ncases := nsends + nrecvs
scases := cas1[:ncases:ncases]
pollorder := order1[:ncases:ncases]
lockorder := order1[ncases:][:ncases:ncases]
// NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler.
// Even when raceenabled is true, there might be select
// statements in packages compiled without -race (e.g.,
// ensureSigM in runtime/signal_unix.go).
var pcs []uintptr
if raceenabled && pc0 != nil {
pc1 := (*[1 << 16]uintptr)(unsafe.Pointer(pc0))
pcs = pc1[:ncases:ncases]
}
casePC := func(casi int) uintptr {
if pcs == nil {
return 0
}
return pcs[casi]
}
var t0 int64
if blockprofilerate > 0 {
t0 = cputicks()
}
// The compiler rewrites selects that statically have
// only 0 or 1 cases plus default into simpler constructs.
// The only way we can end up with such small sel.ncase
// values here is for a larger select in which most channels
// have been nilled out. The general code handles those
// cases correctly, and they are rare enough not to bother
// optimizing (and needing to test).
// generate permuted order
norder := 0
allSynctest := true
for i := range scases {
cas := &scases[i]
// Omit cases without channels from the poll and lock orders.
if cas.c == nil {
cas.elem = nil // allow GC
continue
}
if cas.c.bubble != nil {
if getg().bubble != cas.c.bubble {
fatal("select on synctest channel from outside bubble")
}
} else {
allSynctest = false
}
if cas.c.timer != nil {
cas.c.timer.maybeRunChan(cas.c)
}
j := cheaprandn(uint32(norder + 1))
pollorder[norder] = pollorder[j]
pollorder[j] = uint16(i)
norder++
}
pollorder = pollorder[:norder]
lockorder = lockorder[:norder]
waitReason := waitReasonSelect
if gp.bubble != nil && allSynctest {
// Every channel selected on is in a synctest bubble,
// so this goroutine will count as idle while selecting.
waitReason = waitReasonSynctestSelect
}
// sort the cases by Hchan address to get the locking order.
// simple heap sort, to guarantee n log n time and constant stack footprint.
for i := range lockorder {
j := i
// Start with the pollorder to permute cases on the same channel.
c := scases[pollorder[i]].c
for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() {
k := (j - 1) / 2
lockorder[j] = lockorder[k]
j = k
}
lockorder[j] = pollorder[i]
}
for i := len(lockorder) - 1; i >= 0; i-- {
o := lockorder[i]
c := scases[o].c
lockorder[i] = lockorder[0]
j := 0
for {
k := j*2 + 1
if k >= i {
break
}
if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() {
k++
}
if c.sortkey() < scases[lockorder[k]].c.sortkey() {
lockorder[j] = lockorder[k]
j = k
continue
}
break
}
lockorder[j] = o
}
if debugSelect {
for i := 0; i+1 < len(lockorder); i++ {
if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
throw("select: broken sort")
}
}
}
// lock all the channels involved in the select
sellock(scases, lockorder)
var (
sg *sudog
c *hchan
k *scase
sglist *sudog
sgnext *sudog
qp unsafe.Pointer
nextp **sudog
)
// pass 1 - look for something already waiting
var casi int
var cas *scase
var caseSuccess bool
var caseReleaseTime int64 = -1
var recvOK bool
for _, casei := range pollorder {
casi = int(casei)
cas = &scases[casi]
c = cas.c
if casi >= nsends {
sg = c.sendq.dequeue()
if sg != nil {
goto recv
}
if c.qcount > 0 {
goto bufrecv
}
if c.closed != 0 {
goto rclose
}
} else {
if raceenabled {
racereadpc(c.raceaddr(), casePC(casi), chansendpc)
}
if c.closed != 0 {
goto sclose
}
sg = c.recvq.dequeue()
if sg != nil {
goto send
}
if c.qcount < c.dataqsiz {
goto bufsend
}
}
}
if !block {
selunlock(scases, lockorder)
casi = -1
goto retc
}
// pass 2 - enqueue on all chans
if gp.waiting != nil {
throw("gp.waiting != nil")
}
nextp = &gp.waiting
for _, casei := range lockorder {
casi = int(casei)
cas = &scases[casi]
c = cas.c
sg := acquireSudog()
sg.g = gp
sg.isSelect = true
// No stack splits between assigning elem and enqueuing
// sg on gp.waiting where copystack can find it.
sg.elem.set(cas.elem)
sg.releasetime = 0
if t0 != 0 {
sg.releasetime = -1
}
sg.c.set(c)
// Construct waiting list in lock order.
*nextp = sg
nextp = &sg.waitlink
if casi < nsends {
c.sendq.enqueue(sg)
} else {
c.recvq.enqueue(sg)
}
if c.timer != nil {
blockTimerChan(c)
}
}
// wait for someone to wake us up
gp.param = nil
// Signal to anyone trying to shrink our stack that we're about
// to park on a channel. The window between when this G's status
// changes and when we set gp.activeStackChans is not safe for
// stack shrinking.
gp.parkingOnChan.Store(true)
gopark(selparkcommit, nil, waitReason, traceBlockSelect, 1)
gp.activeStackChans = false
sellock(scases, lockorder)
gp.selectDone.Store(0)
sg = (*sudog)(gp.param)
gp.param = nil
// pass 3 - dequeue from unsuccessful chans
// otherwise they stack up on quiet channels
// record the successful case, if any.
// We singly-linked up the SudoGs in lock order.
casi = -1
cas = nil
caseSuccess = false
sglist = gp.waiting
// Clear all elem before unlinking from gp.waiting.
for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
sg1.isSelect = false
sg1.elem.set(nil)
sg1.c.set(nil)
}
gp.waiting = nil
for _, casei := range lockorder {
k = &scases[casei]
if k.c.timer != nil {
unblockTimerChan(k.c)
}
if sg == sglist {
// sg has already been dequeued by the G that woke us up.
casi = int(casei)
cas = k
caseSuccess = sglist.success
if sglist.releasetime > 0 {
caseReleaseTime = sglist.releasetime
}
} else {
c = k.c
if int(casei) < nsends {
c.sendq.dequeueSudoG(sglist)
} else {
c.recvq.dequeueSudoG(sglist)
}
}
sgnext = sglist.waitlink
sglist.waitlink = nil
releaseSudog(sglist)
sglist = sgnext
}
if cas == nil {
throw("selectgo: bad wakeup")
}
c = cas.c
if debugSelect {
print("wait-return: cas0=", cas0, " c=", c, " cas=", cas, " send=", casi < nsends, "\n")
}
if casi < nsends {
if !caseSuccess {
goto sclose
}
} else {
recvOK = caseSuccess
}
if raceenabled {
if casi < nsends {
raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc)
} else if cas.elem != nil {
raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc)
}
}
if msanenabled {
if casi < nsends {
msanread(cas.elem, c.elemtype.Size_)
} else if cas.elem != nil {
msanwrite(cas.elem, c.elemtype.Size_)
}
}
if asanenabled {
if casi < nsends {
asanread(cas.elem, c.elemtype.Size_)
} else if cas.elem != nil {
asanwrite(cas.elem, c.elemtype.Size_)
}
}
selunlock(scases, lockorder)
goto retc
bufrecv:
// can receive from buffer
if raceenabled {
if cas.elem != nil {
raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc)
}
racenotify(c, c.recvx, nil)
}
if msanenabled && cas.elem != nil {
msanwrite(cas.elem, c.elemtype.Size_)
}
if asanenabled && cas.elem != nil {
asanwrite(cas.elem, c.elemtype.Size_)
}
recvOK = true
qp = chanbuf(c, c.recvx)
if cas.elem != nil {
typedmemmove(c.elemtype, cas.elem, qp)
}
typedmemclr(c.elemtype, qp)
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.qcount--
selunlock(scases, lockorder)
goto retc
bufsend:
// can send to buffer
if raceenabled {
racenotify(c, c.sendx, nil)
raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc)
}
if msanenabled {
msanread(cas.elem, c.elemtype.Size_)
}
if asanenabled {
asanread(cas.elem, c.elemtype.Size_)
}
typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem)
c.sendx++
if c.sendx == c.dataqsiz {
c.sendx = 0
}
c.qcount++
selunlock(scases, lockorder)
goto retc
recv:
// can receive from sleeping sender (sg)
recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
if debugSelect {
print("syncrecv: cas0=", cas0, " c=", c, "\n")
}
recvOK = true
goto retc
rclose:
// read at end of closed channel
selunlock(scases, lockorder)
recvOK = false
if cas.elem != nil {
typedmemclr(c.elemtype, cas.elem)
}
if raceenabled {
raceacquire(c.raceaddr())
}
goto retc
send:
// can send to a sleeping receiver (sg)
if raceenabled {
raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc)
}
if msanenabled {
msanread(cas.elem, c.elemtype.Size_)
}
if asanenabled {
asanread(cas.elem, c.elemtype.Size_)
}
send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
if debugSelect {
print("syncsend: cas0=", cas0, " c=", c, "\n")
}
goto retc
retc:
if caseReleaseTime > 0 {
blockevent(caseReleaseTime-t0, 1)
}
return casi, recvOK
sclose:
// send on closed channel
selunlock(scases, lockorder)
panic(plainError("send on closed channel"))
}
func (c *hchan) sortkey() uintptr {
return uintptr(unsafe.Pointer(c))
}
// A runtimeSelect is a single case passed to rselect.
// This must match ../reflect/value.go:/runtimeSelect
type runtimeSelect struct {
dir selectDir
typ unsafe.Pointer // channel type (not used here)
ch *hchan // channel
val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
}
// These values must match ../reflect/value.go:/SelectDir.
type selectDir int
const (
_ selectDir = iota
selectSend // case Chan <- Send
selectRecv // case <-Chan:
selectDefault // default
)
//go:linkname reflect_rselect reflect.rselect
func reflect_rselect(cases []runtimeSelect) (int, bool) {
if len(cases) == 0 {
block()
}
sel := make([]scase, len(cases))
orig := make([]int, len(cases))
nsends, nrecvs := 0, 0
dflt := -1
for i, rc := range cases {
var j int
switch rc.dir {
case selectDefault:
dflt = i
continue
case selectSend:
j = nsends
nsends++
case selectRecv:
nrecvs++
j = len(cases) - nrecvs
}
sel[j] = scase{c: rc.ch, elem: rc.val}
orig[j] = i
}
// Only a default case.
if nsends+nrecvs == 0 {
return dflt, false
}
// Compact sel and orig if necessary.
if nsends+nrecvs < len(cases) {
copy(sel[nsends:], sel[len(cases)-nrecvs:])
copy(orig[nsends:], orig[len(cases)-nrecvs:])
}
order := make([]uint16, 2*(nsends+nrecvs))
var pc0 *uintptr
if raceenabled {
pcs := make([]uintptr, nsends+nrecvs)
for i := range pcs {
selectsetpc(&pcs[i])
}
pc0 = &pcs[0]
}
chosen, recvOK := selectgo(&sel[0], &order[0], pc0, nsends, nrecvs, dflt == -1)
// Translate chosen back to caller's ordering.
if chosen < 0 {
chosen = dflt
} else {
chosen = orig[chosen]
}
return chosen, recvOK
}
func (q *waitq) dequeueSudoG(sgp *sudog) {
x := sgp.prev
y := sgp.next
if x != nil {
if y != nil {
// middle of queue
x.next = y
y.prev = x
sgp.next = nil
sgp.prev = nil
return
}
// end of queue
x.next = nil
q.last = x
sgp.prev = nil
return
}
if y != nil {
// start of queue
y.prev = nil
q.first = y
sgp.next = nil
return
}
// x==y==nil. Either sgp is the only element in the queue,
// or it has already been removed. Use q.first to disambiguate.
if q.first == sgp {
q.first = nil
q.last = nil
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Semaphore implementation exposed to Go.
// Intended use is provide a sleep and wakeup
// primitive that can be used in the contended case
// of other synchronization primitives.
// Thus it targets the same goal as Linux's futex,
// but it has much simpler semantics.
//
// That is, don't think of these as semaphores.
// Think of them as a way to implement sleep and wakeup
// such that every sleep is paired with a single wakeup,
// even if, due to races, the wakeup happens before the sleep.
//
// See Mullender and Cox, ``Semaphores in Plan 9,''
// https://swtch.com/semaphore.pdf
package runtime
import (
"internal/cpu"
"internal/runtime/atomic"
"unsafe"
)
// Asynchronous semaphore for sync.Mutex.
// A semaRoot holds a balanced tree of sudog with distinct addresses (s.elem).
// Each of those sudog may in turn point (through s.waitlink) to a list
// of other sudogs waiting on the same address.
// The operations on the inner lists of sudogs with the same address
// are all O(1). The scanning of the top-level semaRoot list is O(log n),
// where n is the number of distinct addresses with goroutines blocked
// on them that hash to the given semaRoot.
// See golang.org/issue/17953 for a program that worked badly
// before we introduced the second level of list, and
// BenchmarkSemTable/OneAddrCollision/* for a benchmark that exercises this.
type semaRoot struct {
lock mutex
treap *sudog // root of balanced tree of unique waiters.
nwait atomic.Uint32 // Number of waiters. Read w/o the lock.
}
var semtable semTable
// Prime to not correlate with any user patterns.
const semTabSize = 251
type semTable [semTabSize]struct {
root semaRoot
pad [cpu.CacheLinePadSize - unsafe.Sizeof(semaRoot{})]byte
}
func (t *semTable) rootFor(addr *uint32) *semaRoot {
return &t[(uintptr(unsafe.Pointer(addr))>>3)%semTabSize].root
}
// sync_runtime_Semacquire should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gvisor.dev/gvisor
// - github.com/sagernet/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
func sync_runtime_Semacquire(addr *uint32) {
semacquire1(addr, false, semaBlockProfile, 0, waitReasonSemacquire)
}
//go:linkname poll_runtime_Semacquire internal/poll.runtime_Semacquire
func poll_runtime_Semacquire(addr *uint32) {
semacquire1(addr, false, semaBlockProfile, 0, waitReasonSemacquire)
}
// sync_runtime_Semrelease should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gvisor.dev/gvisor
// - github.com/sagernet/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname sync_runtime_Semrelease sync.runtime_Semrelease
func sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int) {
semrelease1(addr, handoff, skipframes)
}
//go:linkname internal_sync_runtime_SemacquireMutex internal/sync.runtime_SemacquireMutex
func internal_sync_runtime_SemacquireMutex(addr *uint32, lifo bool, skipframes int) {
semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes, waitReasonSyncMutexLock)
}
//go:linkname sync_runtime_SemacquireRWMutexR sync.runtime_SemacquireRWMutexR
func sync_runtime_SemacquireRWMutexR(addr *uint32, lifo bool, skipframes int) {
semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes, waitReasonSyncRWMutexRLock)
}
//go:linkname sync_runtime_SemacquireRWMutex sync.runtime_SemacquireRWMutex
func sync_runtime_SemacquireRWMutex(addr *uint32, lifo bool, skipframes int) {
semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes, waitReasonSyncRWMutexLock)
}
//go:linkname sync_runtime_SemacquireWaitGroup sync.runtime_SemacquireWaitGroup
func sync_runtime_SemacquireWaitGroup(addr *uint32, synctestDurable bool) {
reason := waitReasonSyncWaitGroupWait
if synctestDurable {
reason = waitReasonSynctestWaitGroupWait
}
semacquire1(addr, false, semaBlockProfile, 0, reason)
}
//go:linkname poll_runtime_Semrelease internal/poll.runtime_Semrelease
func poll_runtime_Semrelease(addr *uint32) {
semrelease(addr)
}
//go:linkname internal_sync_runtime_Semrelease internal/sync.runtime_Semrelease
func internal_sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int) {
semrelease1(addr, handoff, skipframes)
}
func readyWithTime(s *sudog, traceskip int) {
if s.releasetime != 0 {
s.releasetime = cputicks()
}
goready(s.g, traceskip)
}
type semaProfileFlags int
const (
semaBlockProfile semaProfileFlags = 1 << iota
semaMutexProfile
)
// Called from runtime.
func semacquire(addr *uint32) {
semacquire1(addr, false, 0, 0, waitReasonSemacquire)
}
func semacquire1(addr *uint32, lifo bool, profile semaProfileFlags, skipframes int, reason waitReason) {
gp := getg()
if gp != gp.m.curg {
throw("semacquire not on the G stack")
}
// Easy case.
if cansemacquire(addr) {
return
}
// Harder case:
// increment waiter count
// try cansemacquire one more time, return if succeeded
// enqueue itself as a waiter
// sleep
// (waiter descriptor is dequeued by signaler)
s := acquireSudog()
root := semtable.rootFor(addr)
t0 := int64(0)
s.releasetime = 0
s.acquiretime = 0
s.ticket = 0
if profile&semaBlockProfile != 0 && blockprofilerate > 0 {
t0 = cputicks()
s.releasetime = -1
}
if profile&semaMutexProfile != 0 && mutexprofilerate > 0 {
if t0 == 0 {
t0 = cputicks()
}
s.acquiretime = t0
}
for {
lockWithRank(&root.lock, lockRankRoot)
// Add ourselves to nwait to disable "easy case" in semrelease.
root.nwait.Add(1)
// Check cansemacquire to avoid missed wakeup.
if cansemacquire(addr) {
root.nwait.Add(-1)
unlock(&root.lock)
break
}
// Any semrelease after the cansemacquire knows we're waiting
// (we set nwait above), so go to sleep.
root.queue(addr, s, lifo)
goparkunlock(&root.lock, reason, traceBlockSync, 4+skipframes)
if s.ticket != 0 || cansemacquire(addr) {
break
}
}
if s.releasetime > 0 {
blockevent(s.releasetime-t0, 3+skipframes)
}
releaseSudog(s)
}
func semrelease(addr *uint32) {
semrelease1(addr, false, 0)
}
func semrelease1(addr *uint32, handoff bool, skipframes int) {
root := semtable.rootFor(addr)
atomic.Xadd(addr, 1)
// Easy case: no waiters?
// This check must happen after the xadd, to avoid a missed wakeup
// (see loop in semacquire).
if root.nwait.Load() == 0 {
return
}
// Harder case: search for a waiter and wake it.
lockWithRank(&root.lock, lockRankRoot)
if root.nwait.Load() == 0 {
// The count is already consumed by another goroutine,
// so no need to wake up another goroutine.
unlock(&root.lock)
return
}
s, t0, tailtime := root.dequeue(addr)
if s != nil {
root.nwait.Add(-1)
}
unlock(&root.lock)
if s != nil { // May be slow or even yield, so unlock first
acquiretime := s.acquiretime
if acquiretime != 0 {
// Charge contention that this (delayed) unlock caused.
// If there are N more goroutines waiting beyond the
// one that's waking up, charge their delay as well, so that
// contention holding up many goroutines shows up as
// more costly than contention holding up a single goroutine.
// It would take O(N) time to calculate how long each goroutine
// has been waiting, so instead we charge avg(head-wait, tail-wait)*N.
// head-wait is the longest wait and tail-wait is the shortest.
// (When we do a lifo insertion, we preserve this property by
// copying the old head's acquiretime into the inserted new head.
// In that case the overall average may be slightly high, but that's fine:
// the average of the ends is only an approximation to the actual
// average anyway.)
// The root.dequeue above changed the head and tail acquiretime
// to the current time, so the next unlock will not re-count this contention.
dt0 := t0 - acquiretime
dt := dt0
if s.waiters != 0 {
dtail := t0 - tailtime
dt += (dtail + dt0) / 2 * int64(s.waiters)
}
mutexevent(dt, 3+skipframes)
}
if s.ticket != 0 {
throw("corrupted semaphore ticket")
}
if handoff && cansemacquire(addr) {
s.ticket = 1
}
readyWithTime(s, 5+skipframes)
if s.ticket == 1 && getg().m.locks == 0 && getg() != getg().m.g0 {
// Direct G handoff
//
// readyWithTime has added the waiter G as runnext in the
// current P; we now call the scheduler so that we start running
// the waiter G immediately.
//
// Note that waiter inherits our time slice: this is desirable
// to avoid having a highly contended semaphore hog the P
// indefinitely. goyield is like Gosched, but it emits a
// "preempted" trace event instead and, more importantly, puts
// the current G on the local runq instead of the global one.
// We only do this in the starving regime (handoff=true), as in
// the non-starving case it is possible for a different waiter
// to acquire the semaphore while we are yielding/scheduling,
// and this would be wasteful. We wait instead to enter starving
// regime, and then we start to do direct handoffs of ticket and P.
//
// See issue 33747 for discussion.
//
// We don't handoff directly if we're holding locks or on the
// system stack, since it's not safe to enter the scheduler.
goyield()
}
}
}
func cansemacquire(addr *uint32) bool {
for {
v := atomic.Load(addr)
if v == 0 {
return false
}
if atomic.Cas(addr, v, v-1) {
return true
}
}
}
// queue adds s to the blocked goroutines in semaRoot.
func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) {
s.g = getg()
s.elem.set(unsafe.Pointer(addr))
// Storing this pointer so that we can trace the semaphore address
// from the blocked goroutine when checking for goroutine leaks.
s.g.waiting = s
s.next = nil
s.prev = nil
s.waiters = 0
var last *sudog
pt := &root.treap
for t := *pt; t != nil; t = *pt {
if uintptr(unsafe.Pointer(addr)) == t.elem.uintptr() {
// Already have addr in list.
if lifo {
// Substitute s in t's place in treap.
*pt = s
s.ticket = t.ticket
s.acquiretime = t.acquiretime // preserve head acquiretime as oldest time
s.parent = t.parent
s.prev = t.prev
s.next = t.next
if s.prev != nil {
s.prev.parent = s
}
if s.next != nil {
s.next.parent = s
}
// Add t first in s's wait list.
s.waitlink = t
s.waittail = t.waittail
if s.waittail == nil {
s.waittail = t
}
s.waiters = t.waiters
if s.waiters+1 != 0 {
s.waiters++
}
t.parent = nil
t.prev = nil
t.next = nil
t.waittail = nil
} else {
// Add s to end of t's wait list.
if t.waittail == nil {
t.waitlink = s
} else {
t.waittail.waitlink = s
}
t.waittail = s
s.waitlink = nil
if t.waiters+1 != 0 {
t.waiters++
}
}
return
}
last = t
if uintptr(unsafe.Pointer(addr)) < t.elem.uintptr() {
pt = &t.prev
} else {
pt = &t.next
}
}
// Add s as new leaf in tree of unique addrs.
// The balanced tree is a treap using ticket as the random heap priority.
// That is, it is a binary tree ordered according to the elem addresses,
// but then among the space of possible binary trees respecting those
// addresses, it is kept balanced on average by maintaining a heap ordering
// on the ticket: s.ticket <= both s.prev.ticket and s.next.ticket.
// https://en.wikipedia.org/wiki/Treap
// https://faculty.washington.edu/aragon/pubs/rst89.pdf
//
// s.ticket compared with zero in couple of places, therefore set lowest bit.
// It will not affect treap's quality noticeably.
s.ticket = cheaprand() | 1
s.parent = last
*pt = s
// Rotate up into tree according to ticket (priority).
for s.parent != nil && s.parent.ticket > s.ticket {
if s.parent.prev == s {
root.rotateRight(s.parent)
} else {
if s.parent.next != s {
panic("semaRoot queue")
}
root.rotateLeft(s.parent)
}
}
}
// dequeue searches for and finds the first goroutine
// in semaRoot blocked on addr.
// If the sudog was being profiled, dequeue returns the time
// at which it was woken up as now. Otherwise now is 0.
// If there are additional entries in the wait list, dequeue
// returns tailtime set to the last entry's acquiretime.
// Otherwise tailtime is found.acquiretime.
func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now, tailtime int64) {
ps := &root.treap
s := *ps
for ; s != nil; s = *ps {
if uintptr(unsafe.Pointer(addr)) == s.elem.uintptr() {
goto Found
}
if uintptr(unsafe.Pointer(addr)) < s.elem.uintptr() {
ps = &s.prev
} else {
ps = &s.next
}
}
return nil, 0, 0
Found:
now = int64(0)
if s.acquiretime != 0 {
now = cputicks()
}
if t := s.waitlink; t != nil {
// Substitute t, also waiting on addr, for s in root tree of unique addrs.
*ps = t
t.ticket = s.ticket
t.parent = s.parent
t.prev = s.prev
if t.prev != nil {
t.prev.parent = t
}
t.next = s.next
if t.next != nil {
t.next.parent = t
}
if t.waitlink != nil {
t.waittail = s.waittail
} else {
t.waittail = nil
}
t.waiters = s.waiters
if t.waiters > 1 {
t.waiters--
}
// Set head and tail acquire time to 'now',
// because the caller will take care of charging
// the delays before now for all entries in the list.
t.acquiretime = now
tailtime = s.waittail.acquiretime
s.waittail.acquiretime = now
s.waitlink = nil
s.waittail = nil
} else {
// Rotate s down to be leaf of tree for removal, respecting priorities.
for s.next != nil || s.prev != nil {
if s.next == nil || s.prev != nil && s.prev.ticket < s.next.ticket {
root.rotateRight(s)
} else {
root.rotateLeft(s)
}
}
// Remove s, now a leaf.
if s.parent != nil {
if s.parent.prev == s {
s.parent.prev = nil
} else {
s.parent.next = nil
}
} else {
root.treap = nil
}
tailtime = s.acquiretime
}
// Goroutine is no longer blocked. Clear the waiting pointer.
s.g.waiting = nil
s.parent = nil
s.elem.set(nil)
s.next = nil
s.prev = nil
s.ticket = 0
return s, now, tailtime
}
// rotateLeft rotates the tree rooted at node x.
// turning (x a (y b c)) into (y (x a b) c).
func (root *semaRoot) rotateLeft(x *sudog) {
// p -> (x a (y b c))
p := x.parent
y := x.next
b := y.prev
y.prev = x
x.parent = y
x.next = b
if b != nil {
b.parent = x
}
y.parent = p
if p == nil {
root.treap = y
} else if p.prev == x {
p.prev = y
} else {
if p.next != x {
throw("semaRoot rotateLeft")
}
p.next = y
}
}
// rotateRight rotates the tree rooted at node y.
// turning (y (x a b) c) into (x a (y b c)).
func (root *semaRoot) rotateRight(y *sudog) {
// p -> (y (x a b) c)
p := y.parent
x := y.prev
b := x.next
x.next = y
y.parent = x
y.prev = b
if b != nil {
b.parent = y
}
x.parent = p
if p == nil {
root.treap = x
} else if p.prev == y {
p.prev = x
} else {
if p.next != y {
throw("semaRoot rotateRight")
}
p.next = x
}
}
// notifyList is a ticket-based notification list used to implement sync.Cond.
//
// It must be kept in sync with the sync package.
type notifyList struct {
// wait is the ticket number of the next waiter. It is atomically
// incremented outside the lock.
wait atomic.Uint32
// notify is the ticket number of the next waiter to be notified. It can
// be read outside the lock, but is only written to with lock held.
//
// Both wait & notify can wrap around, and such cases will be correctly
// handled as long as their "unwrapped" difference is bounded by 2^31.
// For this not to be the case, we'd need to have 2^31+ goroutines
// blocked on the same condvar, which is currently not possible.
notify uint32
// List of parked waiters.
lock mutex
head *sudog
tail *sudog
}
// less checks if a < b, considering a & b running counts that may overflow the
// 32-bit range, and that their "unwrapped" difference is always less than 2^31.
func less(a, b uint32) bool {
return int32(a-b) < 0
}
// notifyListAdd adds the caller to a notify list such that it can receive
// notifications. The caller must eventually call notifyListWait to wait for
// such a notification, passing the returned ticket number.
//
//go:linkname notifyListAdd sync.runtime_notifyListAdd
func notifyListAdd(l *notifyList) uint32 {
// This may be called concurrently, for example, when called from
// sync.Cond.Wait while holding a RWMutex in read mode.
return l.wait.Add(1) - 1
}
// notifyListWait waits for a notification. If one has been sent since
// notifyListAdd was called, it returns immediately. Otherwise, it blocks.
//
//go:linkname notifyListWait sync.runtime_notifyListWait
func notifyListWait(l *notifyList, t uint32) {
lockWithRank(&l.lock, lockRankNotifyList)
// Return right away if this ticket has already been notified.
if less(t, l.notify) {
unlock(&l.lock)
return
}
// Enqueue itself.
s := acquireSudog()
s.g = getg()
// Storing this pointer so that we can trace the condvar address
// from the blocked goroutine when checking for goroutine leaks.
s.elem.set(unsafe.Pointer(l))
s.g.waiting = s
s.ticket = t
s.releasetime = 0
t0 := int64(0)
if blockprofilerate > 0 {
t0 = cputicks()
s.releasetime = -1
}
if l.tail == nil {
l.head = s
} else {
l.tail.next = s
}
l.tail = s
goparkunlock(&l.lock, waitReasonSyncCondWait, traceBlockCondWait, 3)
if t0 != 0 {
blockevent(s.releasetime-t0, 2)
}
// Goroutine is no longer blocked. Clear up its waiting pointer,
// and clean up the sudog before releasing it.
s.g.waiting = nil
s.elem.set(nil)
releaseSudog(s)
}
// notifyListNotifyAll notifies all entries in the list.
//
//go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
func notifyListNotifyAll(l *notifyList) {
// Fast-path: if there are no new waiters since the last notification
// we don't need to acquire the lock.
if l.wait.Load() == atomic.Load(&l.notify) {
return
}
// Pull the list out into a local variable, waiters will be readied
// outside the lock.
lockWithRank(&l.lock, lockRankNotifyList)
s := l.head
l.head = nil
l.tail = nil
// Update the next ticket to be notified. We can set it to the current
// value of wait because any previous waiters are already in the list
// or will notice that they have already been notified when trying to
// add themselves to the list.
atomic.Store(&l.notify, l.wait.Load())
unlock(&l.lock)
// Go through the local list and ready all waiters.
for s != nil {
next := s.next
s.next = nil
if s.g.bubble != nil && getg().bubble != s.g.bubble {
println("semaphore wake of synctest goroutine", s.g.goid, "from outside bubble")
fatal("semaphore wake of synctest goroutine from outside bubble")
}
readyWithTime(s, 4)
s = next
}
}
// notifyListNotifyOne notifies one entry in the list.
//
//go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
func notifyListNotifyOne(l *notifyList) {
// Fast-path: if there are no new waiters since the last notification
// we don't need to acquire the lock at all.
if l.wait.Load() == atomic.Load(&l.notify) {
return
}
lockWithRank(&l.lock, lockRankNotifyList)
// Re-check under the lock if we need to do anything.
t := l.notify
if t == l.wait.Load() {
unlock(&l.lock)
return
}
// Update the next notify ticket number.
atomic.Store(&l.notify, t+1)
// Try to find the g that needs to be notified.
// If it hasn't made it to the list yet we won't find it,
// but it won't park itself once it sees the new notify number.
//
// This scan looks linear but essentially always stops quickly.
// Because g's queue separately from taking numbers,
// there may be minor reorderings in the list, but we
// expect the g we're looking for to be near the front.
// The g has others in front of it on the list only to the
// extent that it lost the race, so the iteration will not
// be too long. This applies even when the g is missing:
// it hasn't yet gotten to sleep and has lost the race to
// the (few) other g's that we find on the list.
for p, s := (*sudog)(nil), l.head; s != nil; p, s = s, s.next {
if s.ticket == t {
n := s.next
if p != nil {
p.next = n
} else {
l.head = n
}
if n == nil {
l.tail = p
}
unlock(&l.lock)
s.next = nil
if s.g.bubble != nil && getg().bubble != s.g.bubble {
println("semaphore wake of synctest goroutine", s.g.goid, "from outside bubble")
fatal("semaphore wake of synctest goroutine from outside bubble")
}
readyWithTime(s, 4)
return
}
}
unlock(&l.lock)
}
//go:linkname notifyListCheck sync.runtime_notifyListCheck
func notifyListCheck(sz uintptr) {
if sz != unsafe.Sizeof(notifyList{}) {
print("runtime: bad notifyList size - sync=", sz, " runtime=", unsafe.Sizeof(notifyList{}), "\n")
throw("bad notifyList size")
}
}
//go:linkname internal_sync_nanotime internal/sync.runtime_nanotime
func internal_sync_nanotime() int64 {
return nanotime()
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
package runtime
import (
"internal/runtime/atomic"
"internal/runtime/syscall/linux"
"unsafe"
)
var prSetVMAUnsupported atomic.Bool
func setVMANameSupported() bool {
return !prSetVMAUnsupported.Load()
}
// setVMAName calls prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, len, name)
func setVMAName(start unsafe.Pointer, length uintptr, name string) {
if debug.decoratemappings == 0 || !setVMANameSupported() {
return
}
var sysName [80]byte
n := copy(sysName[:], " Go: ")
copy(sysName[n:79], name) // leave final byte zero
_, _, err := linux.Syscall6(linux.SYS_PRCTL, linux.PR_SET_VMA, linux.PR_SET_VMA_ANON_NAME, uintptr(start), length, uintptr(unsafe.Pointer(&sysName[0])), 0)
if err == _EINVAL {
prSetVMAUnsupported.Store(true)
}
// ignore other errors
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 && (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris)
package runtime
import (
"internal/abi"
"internal/goarch"
"unsafe"
)
func dumpregs(c *sigctxt) {
print("rax ", hex(c.rax()), "\n")
print("rbx ", hex(c.rbx()), "\n")
print("rcx ", hex(c.rcx()), "\n")
print("rdx ", hex(c.rdx()), "\n")
print("rdi ", hex(c.rdi()), "\n")
print("rsi ", hex(c.rsi()), "\n")
print("rbp ", hex(c.rbp()), "\n")
print("rsp ", hex(c.rsp()), "\n")
print("r8 ", hex(c.r8()), "\n")
print("r9 ", hex(c.r9()), "\n")
print("r10 ", hex(c.r10()), "\n")
print("r11 ", hex(c.r11()), "\n")
print("r12 ", hex(c.r12()), "\n")
print("r13 ", hex(c.r13()), "\n")
print("r14 ", hex(c.r14()), "\n")
print("r15 ", hex(c.r15()), "\n")
print("rip ", hex(c.rip()), "\n")
print("rflags ", hex(c.rflags()), "\n")
print("cs ", hex(c.cs()), "\n")
print("fs ", hex(c.fs()), "\n")
print("gs ", hex(c.gs()), "\n")
}
//go:nosplit
//go:nowritebarrierrec
func (c *sigctxt) sigpc() uintptr { return uintptr(c.rip()) }
func (c *sigctxt) setsigpc(x uint64) { c.set_rip(x) }
func (c *sigctxt) sigsp() uintptr { return uintptr(c.rsp()) }
func (c *sigctxt) siglr() uintptr { return 0 }
func (c *sigctxt) fault() uintptr { return uintptr(c.sigaddr()) }
// preparePanic sets up the stack to look like a call to sigpanic.
func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// Work around Leopard bug that doesn't set FPE_INTDIV.
// Look at instruction to see if it is a divide.
// Not necessary in Snow Leopard (si_code will be != 0).
if GOOS == "darwin" && sig == _SIGFPE && gp.sigcode0 == 0 {
pc := (*[4]byte)(unsafe.Pointer(gp.sigpc))
i := 0
if pc[i]&0xF0 == 0x40 { // 64-bit REX prefix
i++
} else if pc[i] == 0x66 { // 16-bit instruction prefix
i++
}
if pc[i] == 0xF6 || pc[i] == 0xF7 {
gp.sigcode0 = _FPE_INTDIV
}
}
pc := uintptr(c.rip())
sp := uintptr(c.rsp())
// In case we are panicking from external code, we need to initialize
// Go special registers. We inject sigpanic0 (instead of sigpanic),
// which takes care of that.
if shouldPushSigpanic(gp, pc, *(*uintptr)(unsafe.Pointer(sp))) {
c.pushCall(abi.FuncPCABI0(sigpanic0), pc)
} else {
// Not safe to push the call. Just clobber the frame.
c.set_rip(uint64(abi.FuncPCABI0(sigpanic0)))
}
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Make it look like we called target at resumePC.
sp := uintptr(c.rsp())
sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = resumePC
c.set_rsp(uint64(sp))
c.set_rip(uint64(targetPC))
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/goarch"
"unsafe"
)
type sigctxt struct {
info *siginfo
ctxt unsafe.Pointer
}
//go:nosplit
//go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext {
return (*sigcontext)(unsafe.Pointer(&(*ucontext)(c.ctxt).uc_mcontext))
}
func (c *sigctxt) rax() uint64 { return c.regs().rax }
func (c *sigctxt) rbx() uint64 { return c.regs().rbx }
func (c *sigctxt) rcx() uint64 { return c.regs().rcx }
func (c *sigctxt) rdx() uint64 { return c.regs().rdx }
func (c *sigctxt) rdi() uint64 { return c.regs().rdi }
func (c *sigctxt) rsi() uint64 { return c.regs().rsi }
func (c *sigctxt) rbp() uint64 { return c.regs().rbp }
func (c *sigctxt) rsp() uint64 { return c.regs().rsp }
func (c *sigctxt) r8() uint64 { return c.regs().r8 }
func (c *sigctxt) r9() uint64 { return c.regs().r9 }
func (c *sigctxt) r10() uint64 { return c.regs().r10 }
func (c *sigctxt) r11() uint64 { return c.regs().r11 }
func (c *sigctxt) r12() uint64 { return c.regs().r12 }
func (c *sigctxt) r13() uint64 { return c.regs().r13 }
func (c *sigctxt) r14() uint64 { return c.regs().r14 }
func (c *sigctxt) r15() uint64 { return c.regs().r15 }
//go:nosplit
//go:nowritebarrierrec
func (c *sigctxt) rip() uint64 { return c.regs().rip }
func (c *sigctxt) rflags() uint64 { return c.regs().eflags }
func (c *sigctxt) cs() uint64 { return uint64(c.regs().cs) }
func (c *sigctxt) fs() uint64 { return uint64(c.regs().fs) }
func (c *sigctxt) gs() uint64 { return uint64(c.regs().gs) }
func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr }
func (c *sigctxt) set_rip(x uint64) { c.regs().rip = x }
func (c *sigctxt) set_rsp(x uint64) { c.regs().rsp = x }
func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
*(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
// dumpSigStack prints a signal stack with the context, fpstate pointer field within that context and
// the beginning of the fpstate annotated by C/F/S respectively
func dumpSigStack(s string, sp uintptr, stackhi uintptr, ctx uintptr) {
println(s)
println("SP:\t", hex(sp))
println("ctx:\t", hex(ctx))
fpfield := ctx + unsafe.Offsetof(ucontext{}.uc_mcontext) + unsafe.Offsetof(mcontext{}.fpregs)
println("fpfield:\t", hex(fpfield))
fpbegin := uintptr(unsafe.Pointer((&sigctxt{nil, unsafe.Pointer(ctx)}).regs().fpstate))
println("fpstate:\t", hex(fpbegin))
hexdumpWords(sp, stackhi, func(p uintptr, hm hexdumpMarker) {
switch p {
case ctx:
hm.start()
print("C")
println()
case fpfield:
hm.start()
print("F")
println()
case fpbegin:
hm.start()
print("S")
println()
}
})
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package runtime
import (
"internal/abi"
"internal/goexperiment"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
// sigTabT is the type of an entry in the global sigtable array.
// sigtable is inherently system dependent, and appears in OS-specific files,
// but sigTabT is the same for all Unixy systems.
// The sigtable array is indexed by a system signal number to get the flags
// and printable name of each signal.
type sigTabT struct {
flags int32
name string
}
//go:linkname os_sigpipe os.sigpipe
func os_sigpipe() {
systemstack(sigpipe)
}
func signame(sig uint32) string {
if sig >= uint32(len(sigtable)) {
return ""
}
return sigtable[sig].name
}
const (
_SIG_DFL uintptr = 0
_SIG_IGN uintptr = 1
)
// sigPreempt is the signal used for non-cooperative preemption.
//
// There's no good way to choose this signal, but there are some
// heuristics:
//
// 1. It should be a signal that's passed-through by debuggers by
// default. On Linux, this is SIGALRM, SIGURG, SIGCHLD, SIGIO,
// SIGVTALRM, SIGPROF, and SIGWINCH, plus some glibc-internal signals.
//
// 2. It shouldn't be used internally by libc in mixed Go/C binaries
// because libc may assume it's the only thing that can handle these
// signals. For example SIGCANCEL or SIGSETXID.
//
// 3. It should be a signal that can happen spuriously without
// consequences. For example, SIGALRM is a bad choice because the
// signal handler can't tell if it was caused by the real process
// alarm or not (arguably this means the signal is broken, but I
// digress). SIGUSR1 and SIGUSR2 are also bad because those are often
// used in meaningful ways by applications.
//
// 4. We need to deal with platforms without real-time signals (like
// macOS), so those are out.
//
// We use SIGURG because it meets all of these criteria, is extremely
// unlikely to be used by an application for its "real" meaning (both
// because out-of-band data is basically unused and because SIGURG
// doesn't report which socket has the condition, making it pretty
// useless), and even if it is, the application has to be ready for
// spurious SIGURG. SIGIO wouldn't be a bad choice either, but is more
// likely to be used for real.
const sigPreempt = _SIGURG
// Stores the signal handlers registered before Go installed its own.
// These signal handlers will be invoked in cases where Go doesn't want to
// handle a particular signal (e.g., signal occurred on a non-Go thread).
// See sigfwdgo for more information on when the signals are forwarded.
//
// This is read by the signal handler; accesses should use
// atomic.Loaduintptr and atomic.Storeuintptr.
var fwdSig [_NSIG]uintptr
// handlingSig is indexed by signal number and is non-zero if we are
// currently handling the signal. Or, to put it another way, whether
// the signal handler is currently set to the Go signal handler or not.
// This is uint32 rather than bool so that we can use atomic instructions.
var handlingSig [_NSIG]uint32
// channels for synchronizing signal mask updates with the signal mask
// thread
var (
disableSigChan chan uint32
enableSigChan chan uint32
maskUpdatedChan chan struct{}
)
func init() {
// _NSIG is the number of signals on this operating system.
// sigtable should describe what to do for all the possible signals.
if len(sigtable) != _NSIG {
print("runtime: len(sigtable)=", len(sigtable), " _NSIG=", _NSIG, "\n")
throw("bad sigtable len")
}
}
var signalsOK bool
// Initialize signals.
// Called by libpreinit so runtime may not be initialized.
//
//go:nosplit
//go:nowritebarrierrec
func initsig(preinit bool) {
if !preinit {
// It's now OK for signal handlers to run.
signalsOK = true
}
// For c-archive/c-shared this is called by libpreinit with
// preinit == true.
if (isarchive || islibrary) && !preinit {
return
}
for i := uint32(0); i < _NSIG; i++ {
t := &sigtable[i]
if t.flags == 0 || t.flags&_SigDefault != 0 {
continue
}
// We don't need to use atomic operations here because
// there shouldn't be any other goroutines running yet.
fwdSig[i] = getsig(i)
if !sigInstallGoHandler(i) {
// Even if we are not installing a signal handler,
// set SA_ONSTACK if necessary.
if fwdSig[i] != _SIG_DFL && fwdSig[i] != _SIG_IGN {
setsigstack(i)
} else if fwdSig[i] == _SIG_IGN {
sigInitIgnored(i)
}
continue
}
handlingSig[i] = 1
setsig(i, abi.FuncPCABIInternal(sighandler))
}
}
//go:nosplit
//go:nowritebarrierrec
func sigInstallGoHandler(sig uint32) bool {
// For some signals, we respect an inherited SIG_IGN handler
// rather than insist on installing our own default handler.
// Even these signals can be fetched using the os/signal package.
switch sig {
case _SIGHUP, _SIGINT:
if atomic.Loaduintptr(&fwdSig[sig]) == _SIG_IGN {
return false
}
}
if (GOOS == "linux" || GOOS == "android") && !iscgo && sig == sigPerThreadSyscall {
// sigPerThreadSyscall is the same signal used by glibc for
// per-thread syscalls on Linux. We use it for the same purpose
// in non-cgo binaries.
return true
}
t := &sigtable[sig]
if t.flags&_SigSetStack != 0 {
return false
}
// When built using c-archive or c-shared, only install signal
// handlers for synchronous signals and SIGPIPE and sigPreempt.
if (isarchive || islibrary) && t.flags&_SigPanic == 0 && sig != _SIGPIPE && sig != sigPreempt {
return false
}
return true
}
// sigenable enables the Go signal handler to catch the signal sig.
// It is only called while holding the os/signal.handlers lock,
// via os/signal.enableSignal and signal_enable.
func sigenable(sig uint32) {
if sig >= uint32(len(sigtable)) {
return
}
// SIGPROF is handled specially for profiling.
if sig == _SIGPROF {
return
}
t := &sigtable[sig]
if t.flags&_SigNotify != 0 {
ensureSigM()
enableSigChan <- sig
<-maskUpdatedChan
if atomic.Cas(&handlingSig[sig], 0, 1) {
atomic.Storeuintptr(&fwdSig[sig], getsig(sig))
setsig(sig, abi.FuncPCABIInternal(sighandler))
}
}
}
// sigdisable disables the Go signal handler for the signal sig.
// It is only called while holding the os/signal.handlers lock,
// via os/signal.disableSignal and signal_disable.
func sigdisable(sig uint32) {
if sig >= uint32(len(sigtable)) {
return
}
// SIGPROF is handled specially for profiling.
if sig == _SIGPROF {
return
}
t := &sigtable[sig]
if t.flags&_SigNotify != 0 {
ensureSigM()
disableSigChan <- sig
<-maskUpdatedChan
// If initsig does not install a signal handler for a
// signal, then to go back to the state before Notify
// we should remove the one we installed.
if !sigInstallGoHandler(sig) {
atomic.Store(&handlingSig[sig], 0)
setsig(sig, atomic.Loaduintptr(&fwdSig[sig]))
}
}
}
// sigignore ignores the signal sig.
// It is only called while holding the os/signal.handlers lock,
// via os/signal.ignoreSignal and signal_ignore.
func sigignore(sig uint32) {
if sig >= uint32(len(sigtable)) {
return
}
// SIGPROF is handled specially for profiling.
if sig == _SIGPROF {
return
}
t := &sigtable[sig]
if t.flags&_SigNotify != 0 {
atomic.Store(&handlingSig[sig], 0)
setsig(sig, _SIG_IGN)
}
}
// clearSignalHandlers clears all signal handlers that are not ignored
// back to the default. This is called by the child after a fork, so that
// we can enable the signal mask for the exec without worrying about
// running a signal handler in the child.
//
//go:nosplit
//go:nowritebarrierrec
func clearSignalHandlers() {
for i := uint32(0); i < _NSIG; i++ {
if atomic.Load(&handlingSig[i]) != 0 {
setsig(i, _SIG_DFL)
}
}
}
// setProcessCPUProfilerTimer is called when the profiling timer changes.
// It is called with prof.signalLock held. hz is the new timer, and is 0 if
// profiling is being disabled. Enable or disable the signal as
// required for -buildmode=c-archive.
func setProcessCPUProfilerTimer(hz int32) {
if hz != 0 {
// Enable the Go signal handler if not enabled.
if atomic.Cas(&handlingSig[_SIGPROF], 0, 1) {
h := getsig(_SIGPROF)
// If no signal handler was installed before, then we record
// _SIG_IGN here. When we turn off profiling (below) we'll start
// ignoring SIGPROF signals. We do this, rather than change
// to SIG_DFL, because there may be a pending SIGPROF
// signal that has not yet been delivered to some other thread.
// If we change to SIG_DFL when turning off profiling, the
// program will crash when that SIGPROF is delivered. We assume
// that programs that use profiling don't want to crash on a
// stray SIGPROF. See issue 19320.
// We do the change here instead of when turning off profiling,
// because there we may race with a signal handler running
// concurrently, in particular, sigfwdgo may observe _SIG_DFL and
// die. See issue 43828.
if h == _SIG_DFL {
h = _SIG_IGN
}
atomic.Storeuintptr(&fwdSig[_SIGPROF], h)
setsig(_SIGPROF, abi.FuncPCABIInternal(sighandler))
}
var it itimerval
it.it_interval.tv_sec = 0
it.it_interval.set_usec(1000000 / hz)
it.it_value = it.it_interval
setitimer(_ITIMER_PROF, &it, nil)
} else {
setitimer(_ITIMER_PROF, &itimerval{}, nil)
// If the Go signal handler should be disabled by default,
// switch back to the signal handler that was installed
// when we enabled profiling. We don't try to handle the case
// of a program that changes the SIGPROF handler while Go
// profiling is enabled.
if !sigInstallGoHandler(_SIGPROF) {
if atomic.Cas(&handlingSig[_SIGPROF], 1, 0) {
h := atomic.Loaduintptr(&fwdSig[_SIGPROF])
setsig(_SIGPROF, h)
}
}
}
}
// setThreadCPUProfilerHz makes any thread-specific changes required to
// implement profiling at a rate of hz.
// No changes required on Unix systems when using setitimer.
func setThreadCPUProfilerHz(hz int32) {
getg().m.profilehz = hz
}
func sigpipe() {
if signal_ignored(_SIGPIPE) || sigsend(_SIGPIPE) {
return
}
dieFromSignal(_SIGPIPE)
}
// doSigPreempt handles a preemption signal on gp.
func doSigPreempt(gp *g, ctxt *sigctxt) {
// Check if this G wants to be preempted and is safe to
// preempt.
if wantAsyncPreempt(gp) {
if ok, newpc := isAsyncSafePoint(gp, ctxt.sigpc(), ctxt.sigsp(), ctxt.siglr()); ok {
// Adjust the PC and inject a call to asyncPreempt.
ctxt.pushCall(abi.FuncPCABI0(asyncPreempt), newpc)
}
}
// Acknowledge the preemption.
gp.m.preemptGen.Add(1)
gp.m.signalPending.Store(0)
if GOOS == "darwin" || GOOS == "ios" {
pendingPreemptSignals.Add(-1)
}
}
const preemptMSupported = true
// preemptM sends a preemption request to mp. This request may be
// handled asynchronously and may be coalesced with other requests to
// the M. When the request is received, if the running G or P are
// marked for preemption and the goroutine is at an asynchronous
// safe-point, it will preempt the goroutine. It always atomically
// increments mp.preemptGen after handling a preemption request.
func preemptM(mp *m) {
// On Darwin, don't try to preempt threads during exec.
// Issue #41702.
if GOOS == "darwin" || GOOS == "ios" {
execLock.rlock()
}
if mp.signalPending.CompareAndSwap(0, 1) {
if GOOS == "darwin" || GOOS == "ios" {
pendingPreemptSignals.Add(1)
}
// If multiple threads are preempting the same M, it may send many
// signals to the same M such that it hardly make progress, causing
// live-lock problem. Apparently this could happen on darwin. See
// issue #37741.
// Only send a signal if there isn't already one pending.
signalM(mp, sigPreempt)
}
if GOOS == "darwin" || GOOS == "ios" {
execLock.runlock()
}
}
// sigFetchG fetches the value of G safely when running in a signal handler.
// On some architectures, the g value may be clobbered when running in a VDSO.
// See issue #32912.
//
//go:nosplit
func sigFetchG(c *sigctxt) *g {
switch GOARCH {
case "arm", "arm64", "loong64", "ppc64", "ppc64le", "riscv64", "s390x":
if !iscgo && inVDSOPage(c.sigpc()) {
// When using cgo, we save the g on TLS and load it from there
// in sigtramp. Just use that.
// Otherwise, before making a VDSO call we save the g to the
// bottom of the signal stack. Fetch from there.
// TODO: in efence mode, stack is sysAlloc'd, so this wouldn't
// work.
sp := sys.GetCallerSP()
s := spanOf(sp)
if s != nil && s.state.get() == mSpanManual && s.base() < sp && sp < s.limit {
gp := *(**g)(unsafe.Pointer(s.base()))
return gp
}
return nil
}
}
return getg()
}
// sigtrampgo is called from the signal handler function, sigtramp,
// written in assembly code.
// This is called by the signal handler, and the world may be stopped.
//
// It must be nosplit because getg() is still the G that was running
// (if any) when the signal was delivered, but it's (usually) called
// on the gsignal stack. Until this switches the G to gsignal, the
// stack bounds check won't work.
//
//go:nosplit
//go:nowritebarrierrec
func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
if sigfwdgo(sig, info, ctx) {
return
}
c := &sigctxt{info, ctx}
gp := sigFetchG(c)
setg(gp)
if gp == nil || (gp.m != nil && gp.m.isExtraInC) {
if sig == _SIGPROF {
// Some platforms (Linux) have per-thread timers, which we use in
// combination with the process-wide timer. Avoid double-counting.
if validSIGPROF(nil, c) {
sigprofNonGoPC(c.sigpc())
}
return
}
if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
// This is probably a signal from preemptM sent
// while executing Go code but received while
// executing non-Go code.
// We got past sigfwdgo, so we know that there is
// no non-Go signal handler for sigPreempt.
// The default behavior for sigPreempt is to ignore
// the signal, so badsignal will be a no-op anyway.
if GOOS == "darwin" || GOOS == "ios" {
pendingPreemptSignals.Add(-1)
}
return
}
c.fixsigcode(sig)
// Set g to nil here and badsignal will use g0 by needm.
// TODO: reuse the current m here by using the gsignal and adjustSignalStack,
// since the current g maybe a normal goroutine and actually running on the signal stack,
// it may hit stack split that is not expected here.
if gp != nil {
setg(nil)
}
badsignal(uintptr(sig), c)
// Restore g
if gp != nil {
setg(gp)
}
return
}
setg(gp.m.gsignal)
// If some non-Go code called sigaltstack, adjust.
var gsignalStack gsignalStack
setStack := adjustSignalStack(sig, gp.m, &gsignalStack)
if setStack {
gp.m.gsignal.stktopsp = sys.GetCallerSP()
}
if gp.stackguard0 == stackFork {
signalDuringFork(sig)
}
c.fixsigcode(sig)
sighandler(sig, info, ctx, gp)
if goexperiment.RuntimeSecret && gp.secret > 0 {
gp.m.signalSecret = true
}
setg(gp)
if setStack {
restoreGsignalStack(&gsignalStack)
}
}
// If the signal handler receives a SIGPROF signal on a non-Go thread,
// it tries to collect a traceback into sigprofCallers.
// sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback.
var sigprofCallers cgoCallers
var sigprofCallersUse uint32
// sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
// and the signal handler collected a stack trace in sigprofCallers.
// When this is called, sigprofCallersUse will be non-zero.
// g is nil, and what we can do is very limited.
//
// It is called from the signal handling functions written in assembly code that
// are active for cgo programs, cgoSigtramp and sigprofNonGoWrapper, which have
// not verified that the SIGPROF delivery corresponds to the best available
// profiling source for this thread.
//
//go:nosplit
//go:nowritebarrierrec
func sigprofNonGo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
if prof.hz.Load() != 0 {
c := &sigctxt{info, ctx}
// Some platforms (Linux) have per-thread timers, which we use in
// combination with the process-wide timer. Avoid double-counting.
if validSIGPROF(nil, c) {
n := 0
for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
n++
}
cpuprof.addNonGo(sigprofCallers[:n])
}
}
atomic.Store(&sigprofCallersUse, 0)
}
// sigprofNonGoPC is called when a profiling signal arrived on a
// non-Go thread and we have a single PC value, not a stack trace.
// g is nil, and what we can do is very limited.
//
//go:nosplit
//go:nowritebarrierrec
func sigprofNonGoPC(pc uintptr) {
if prof.hz.Load() != 0 {
stk := []uintptr{
pc,
abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
}
cpuprof.addNonGo(stk)
}
}
// adjustSignalStack adjusts the current stack guard based on the
// stack pointer that is actually in use while handling a signal.
// We do this in case some non-Go code called sigaltstack.
// This reports whether the stack was adjusted, and if so stores the old
// signal stack in *gsigstack.
//
//go:nosplit
func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool {
sp := uintptr(unsafe.Pointer(&sig))
if sp >= mp.gsignal.stack.lo && sp < mp.gsignal.stack.hi {
return false
}
var st stackt
sigaltstack(nil, &st)
stsp := uintptr(unsafe.Pointer(st.ss_sp))
if st.ss_flags&_SS_DISABLE == 0 && sp >= stsp && sp < stsp+st.ss_size {
setGsignalStack(&st, gsigStack)
return true
}
if sp >= mp.g0.stack.lo && sp < mp.g0.stack.hi {
// The signal was delivered on the g0 stack.
// This can happen when linked with C code
// using the thread sanitizer, which collects
// signals then delivers them itself by calling
// the signal handler directly when C code,
// including C code called via cgo, calls a
// TSAN-intercepted function such as malloc.
//
// We check this condition last as g0.stack.lo
// may be not very accurate (see mstart).
st := stackt{ss_size: mp.g0.stack.hi - mp.g0.stack.lo}
setSignalstackSP(&st, mp.g0.stack.lo)
setGsignalStack(&st, gsigStack)
return true
}
// sp is not within gsignal stack, g0 stack, or sigaltstack. Bad.
// Call indirectly to avoid nosplit stack overflow on OpenBSD.
adjustSignalStack2Indirect(sig, sp, mp, st.ss_flags&_SS_DISABLE != 0)
return false
}
var adjustSignalStack2Indirect = adjustSignalStack2
//go:nosplit
func adjustSignalStack2(sig uint32, sp uintptr, mp *m, ssDisable bool) {
setg(nil)
needm(true)
if ssDisable {
noSignalStack(sig)
} else {
sigNotOnStack(sig, sp, mp)
}
dropm()
}
// crashing is the number of m's we have waited for when implementing
// GOTRACEBACK=crash when a signal is received.
var crashing atomic.Int32
// testSigtrap and testSigusr1 are used by the runtime tests. If
// non-nil, it is called on SIGTRAP/SIGUSR1. If it returns true, the
// normal behavior on this signal is suppressed.
var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
var testSigusr1 func(gp *g) bool
// sigsysIgnored is non-zero if we are currently ignoring SIGSYS. See issue #69065.
var sigsysIgnored uint32
//go:linkname ignoreSIGSYS os.ignoreSIGSYS
func ignoreSIGSYS() {
atomic.Store(&sigsysIgnored, 1)
}
//go:linkname restoreSIGSYS os.restoreSIGSYS
func restoreSIGSYS() {
atomic.Store(&sigsysIgnored, 0)
}
// sighandler is invoked when a signal occurs. The global g will be
// set to a gsignal goroutine and we will be running on the alternate
// signal stack. The parameter gp will be the value of the global g
// when the signal occurred. The sig, info, and ctxt parameters are
// from the system signal handler: they are the parameters passed when
// the SA is passed to the sigaction system call.
//
// The garbage collector may have stopped the world, so write barriers
// are not allowed.
//
//go:nowritebarrierrec
func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
// The g executing the signal handler. This is almost always
// mp.gsignal. See delayedSignal for an exception.
gsignal := getg()
mp := gsignal.m
c := &sigctxt{info, ctxt}
// Cgo TSAN (not the Go race detector) intercepts signals and calls the
// signal handler at a later time. When the signal handler is called, the
// memory may have changed, but the signal context remains old. The
// unmatched signal context and memory makes it unsafe to unwind or inspect
// the stack. So we ignore delayed non-fatal signals that will cause a stack
// inspection (profiling signal and preemption signal).
// cgo_yield is only non-nil for TSAN, and is specifically used to trigger
// signal delivery. We use that as an indicator of delayed signals.
// For delayed signals, the handler is called on the g0 stack (see
// adjustSignalStack).
delayedSignal := *cgo_yield != nil && mp != nil && gsignal.stack == mp.g0.stack
if sig == _SIGPROF {
// Some platforms (Linux) have per-thread timers, which we use in
// combination with the process-wide timer. Avoid double-counting.
if !delayedSignal && validSIGPROF(mp, c) {
sigprof(c.sigpc(), c.sigsp(), c.siglr(), gp, mp)
}
return
}
if sig == _SIGTRAP && testSigtrap != nil && testSigtrap(info, (*sigctxt)(noescape(unsafe.Pointer(c))), gp) {
return
}
if sig == _SIGUSR1 && testSigusr1 != nil && testSigusr1(gp) {
return
}
if (GOOS == "linux" || GOOS == "android") && sig == sigPerThreadSyscall {
// sigPerThreadSyscall is the same signal used by glibc for
// per-thread syscalls on Linux. We use it for the same purpose
// in non-cgo binaries. Since this signal is not _SigNotify,
// there is nothing more to do once we run the syscall.
runPerThreadSyscall()
return
}
if sig == sigPreempt && debug.asyncpreemptoff == 0 && !delayedSignal {
// Might be a preemption signal.
doSigPreempt(gp, c)
// Even if this was definitely a preemption signal, it
// may have been coalesced with another signal, so we
// still let it through to the application.
}
flags := int32(_SigThrow)
if sig < uint32(len(sigtable)) {
flags = sigtable[sig].flags
}
if !c.sigFromUser() && flags&_SigPanic != 0 && (gp.throwsplit || gp != mp.curg) {
// We can't safely sigpanic because it may grow the
// stack. Abort in the signal handler instead.
//
// Also don't inject a sigpanic if we are not on a
// user G stack. Either we're in the runtime, or we're
// running C code. Either way we cannot recover.
flags = _SigThrow
}
if isAbortPC(c.sigpc()) {
// On many architectures, the abort function just
// causes a memory fault. Don't turn that into a panic.
flags = _SigThrow
}
if !c.sigFromUser() && flags&_SigPanic != 0 {
// The signal is going to cause a panic.
// Arrange the stack so that it looks like the point
// where the signal occurred made a call to the
// function sigpanic. Then set the PC to sigpanic.
// Have to pass arguments out of band since
// augmenting the stack frame would break
// the unwinding code.
gp.sig = sig
gp.sigcode0 = uintptr(c.sigcode())
gp.sigcode1 = c.fault()
gp.sigpc = c.sigpc()
c.preparePanic(sig, gp)
return
}
if c.sigFromUser() || flags&_SigNotify != 0 {
if sigsend(sig) {
return
}
}
if c.sigFromUser() && signal_ignored(sig) {
return
}
if sig == _SIGSYS && c.sigFromSeccomp() && atomic.Load(&sigsysIgnored) != 0 {
return
}
if flags&_SigKill != 0 {
dieFromSignal(sig)
}
// _SigThrow means that we should exit now.
// If we get here with _SigPanic, it means that the signal
// was sent to us by a program (c.sigFromUser() is true);
// in that case, if we didn't handle it in sigsend, we exit now.
if flags&(_SigThrow|_SigPanic) == 0 {
return
}
mp.throwing = throwTypeRuntime
mp.caughtsig.set(gp)
if crashing.Load() == 0 {
startpanic_m()
}
gp = fatalsignal(sig, c, gp, mp)
level, _, docrash := gotraceback()
if level > 0 {
goroutineheader(gp)
tracebacktrap(c.sigpc(), c.sigsp(), c.siglr(), gp)
if crashing.Load() > 0 && gp != mp.curg && mp.curg != nil && readgstatus(mp.curg)&^_Gscan == _Grunning {
// tracebackothers on original m skipped this one; trace it now.
goroutineheader(mp.curg)
traceback(^uintptr(0), ^uintptr(0), 0, mp.curg)
} else if crashing.Load() == 0 {
tracebackothers(gp)
print("\n")
}
dumpregs(c)
}
if docrash {
var crashSleepMicros uint32 = 5000
var watchdogTimeoutMicros uint32 = 2000 * crashSleepMicros
isCrashThread := false
if crashing.CompareAndSwap(0, 1) {
isCrashThread = true
} else {
crashing.Add(1)
}
if crashing.Load() < mcount()-int32(extraMLength.Load()) {
// There are other m's that need to dump their stacks.
// Relay SIGQUIT to the next m by sending it to the current process.
// All m's that have already received SIGQUIT have signal masks blocking
// receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet.
// The first m will wait until all ms received the SIGQUIT, then crash/exit.
// Just in case the relaying gets botched, each m involved in
// the relay sleeps for 5 seconds and then does the crash/exit itself.
// The faulting m is crashing first so it is the faulting thread in the core dump (see issue #63277):
// in expected operation, the first m will wait until the last m has received the SIGQUIT,
// and then run crash/exit and the process is gone.
// However, if it spends more than 10 seconds to send SIGQUIT to all ms,
// any of ms may crash/exit the process after waiting for 10 seconds.
print("\n-----\n\n")
raiseproc(_SIGQUIT)
}
if isCrashThread {
// Sleep for short intervals so that we can crash quickly after all ms have received SIGQUIT.
// Reset the timer whenever we see more ms received SIGQUIT
// to make it have enough time to crash (see issue #64752).
timeout := watchdogTimeoutMicros
maxCrashing := crashing.Load()
for timeout > 0 && (crashing.Load() < mcount()-int32(extraMLength.Load())) {
usleep(crashSleepMicros)
timeout -= crashSleepMicros
if c := crashing.Load(); c > maxCrashing {
// We make progress, so reset the watchdog timeout
maxCrashing = c
timeout = watchdogTimeoutMicros
}
}
} else {
maxCrashing := int32(0)
c := crashing.Load()
for c > maxCrashing {
maxCrashing = c
usleep(watchdogTimeoutMicros)
c = crashing.Load()
}
}
printDebugLog()
crash()
}
printDebugLog()
exit(2)
}
func fatalsignal(sig uint32, c *sigctxt, gp *g, mp *m) *g {
if sig < uint32(len(sigtable)) {
print(sigtable[sig].name, "\n")
} else {
print("Signal ", sig, "\n")
}
if isSecureMode() {
exit(2)
}
print("PC=", hex(c.sigpc()), " m=", mp.id, " sigcode=", c.sigcode())
if sig == _SIGSEGV || sig == _SIGBUS {
print(" addr=", hex(c.fault()))
}
print("\n")
if mp.incgo && gp == mp.g0 && mp.curg != nil {
print("signal arrived during cgo execution\n")
// Switch to curg so that we get a traceback of the Go code
// leading up to the cgocall, which switched from curg to g0.
gp = mp.curg
}
if sig == _SIGILL || sig == _SIGFPE {
// It would be nice to know how long the instruction is.
// Unfortunately, that's complicated to do in general (mostly for x86
// and s930x, but other archs have non-standard instruction lengths also).
// Opt to print 16 bytes, which covers most instructions.
const maxN = 16
n := uintptr(maxN)
// We have to be careful, though. If we're near the end of
// a page and the following page isn't mapped, we could
// segfault. So make sure we don't straddle a page (even though
// that could lead to printing an incomplete instruction).
// We're assuming here we can read at least the page containing the PC.
// I suppose it is possible that the page is mapped executable but not readable?
pc := c.sigpc()
if n > physPageSize-pc%physPageSize {
n = physPageSize - pc%physPageSize
}
print("instruction bytes:")
b := (*[maxN]byte)(unsafe.Pointer(pc))
for i := uintptr(0); i < n; i++ {
print(" ", hex(b[i]))
}
println()
}
print("\n")
return gp
}
// sigpanic turns a synchronous signal into a run-time panic.
// If the signal handler sees a synchronous panic, it arranges the
// stack to look like the function where the signal occurred called
// sigpanic, sets the signal's PC value to sigpanic, and returns from
// the signal handler. The effect is that the program will act as
// though the function that got the signal simply called sigpanic
// instead.
//
// This must NOT be nosplit because the linker doesn't know where
// sigpanic calls can be injected.
//
// The signal handler must not inject a call to sigpanic if
// getg().throwsplit, since sigpanic may need to grow the stack.
//
// This is exported via linkname to assembly in runtime/cgo.
//
//go:linkname sigpanic
func sigpanic() {
gp := getg()
if !canpanic() {
throw("unexpected signal during runtime execution")
}
switch gp.sig {
case _SIGBUS:
if gp.sigcode0 == _BUS_ADRERR && gp.sigcode1 < 0x1000 {
panicmem()
}
// Support runtime/debug.SetPanicOnFault.
if gp.paniconfault {
panicmemAddr(gp.sigcode1)
}
print("unexpected fault address ", hex(gp.sigcode1), "\n")
throw("fault")
case _SIGSEGV:
if (gp.sigcode0 == 0 || gp.sigcode0 == _SEGV_MAPERR || gp.sigcode0 == _SEGV_ACCERR) && gp.sigcode1 < 0x1000 {
panicmem()
}
// Support runtime/debug.SetPanicOnFault.
if gp.paniconfault {
panicmemAddr(gp.sigcode1)
}
if inUserArenaChunk(gp.sigcode1) {
// We could check that the arena chunk is explicitly set to fault,
// but the fact that we faulted on accessing it is enough to prove
// that it is.
print("accessed data from freed user arena ", hex(gp.sigcode1), "\n")
} else {
print("unexpected fault address ", hex(gp.sigcode1), "\n")
}
throw("fault")
case _SIGFPE:
switch gp.sigcode0 {
case _FPE_INTDIV:
panicdivide()
case _FPE_INTOVF:
panicoverflow()
}
panicfloat()
}
if gp.sig >= uint32(len(sigtable)) {
// can't happen: we looked up gp.sig in sigtable to decide to call sigpanic
throw("unexpected signal value")
}
panic(errorString(sigtable[gp.sig].name))
}
// dieFromSignal kills the program with a signal.
// This provides the expected exit status for the shell.
// This is only called with fatal signals expected to kill the process.
//
//go:nosplit
//go:nowritebarrierrec
func dieFromSignal(sig uint32) {
unblocksig(sig)
// Mark the signal as unhandled to ensure it is forwarded.
atomic.Store(&handlingSig[sig], 0)
raise(sig)
// That should have killed us. On some systems, though, raise
// sends the signal to the whole process rather than to just
// the current thread, which means that the signal may not yet
// have been delivered. Give other threads a chance to run and
// pick up the signal.
osyield()
osyield()
osyield()
// If that didn't work, try _SIG_DFL.
setsig(sig, _SIG_DFL)
raise(sig)
osyield()
osyield()
osyield()
// If we are still somehow running, just exit with the wrong status.
exit(2)
}
// raisebadsignal is called when a signal is received on a non-Go
// thread, and the Go program does not want to handle it (that is, the
// program has not called os/signal.Notify for the signal).
func raisebadsignal(sig uint32, c *sigctxt) {
if sig == _SIGPROF {
// Ignore profiling signals that arrive on non-Go threads.
return
}
var handler uintptr
var flags int32
if sig >= _NSIG {
handler = _SIG_DFL
} else {
handler = atomic.Loaduintptr(&fwdSig[sig])
flags = sigtable[sig].flags
}
// If the signal is ignored, raising the signal is no-op.
if handler == _SIG_IGN || (handler == _SIG_DFL && flags&_SigIgn != 0) {
return
}
// Reset the signal handler and raise the signal.
// We are currently running inside a signal handler, so the
// signal is blocked. We need to unblock it before raising the
// signal, or the signal we raise will be ignored until we return
// from the signal handler. We know that the signal was unblocked
// before entering the handler, or else we would not have received
// it. That means that we don't have to worry about blocking it
// again.
unblocksig(sig)
setsig(sig, handler)
// If we're linked into a non-Go program we want to try to
// avoid modifying the original context in which the signal
// was raised. If the handler is the default, we know it
// is non-recoverable, so we don't have to worry about
// re-installing sighandler. At this point we can just
// return and the signal will be re-raised and caught by
// the default handler with the correct context.
//
// On FreeBSD, the libthr sigaction code prevents
// this from working so we fall through to raise.
if GOOS != "freebsd" && (isarchive || islibrary) && handler == _SIG_DFL && !c.sigFromUser() {
return
}
raise(sig)
// Give the signal a chance to be delivered.
// In almost all real cases the program is about to crash,
// so sleeping here is not a waste of time.
usleep(1000)
// If the signal didn't cause the program to exit, restore the
// Go signal handler and carry on.
//
// We may receive another instance of the signal before we
// restore the Go handler, but that is not so bad: we know
// that the Go program has been ignoring the signal.
setsig(sig, abi.FuncPCABIInternal(sighandler))
}
//go:nosplit
func crash() {
dieFromSignal(_SIGABRT)
}
// ensureSigM starts one global, sleeping thread to make sure at least one thread
// is available to catch signals enabled for os/signal.
func ensureSigM() {
if maskUpdatedChan != nil {
return
}
maskUpdatedChan = make(chan struct{})
disableSigChan = make(chan uint32)
enableSigChan = make(chan uint32)
go func() {
// Signal masks are per-thread, so make sure this goroutine stays on one
// thread.
LockOSThread()
defer UnlockOSThread()
// The sigBlocked mask contains the signals not active for os/signal,
// initially all signals except the essential. When signal.Notify()/Stop is called,
// sigenable/sigdisable in turn notify this thread to update its signal
// mask accordingly.
sigBlocked := sigset_all
for i := range sigtable {
if !blockableSig(uint32(i)) {
sigdelset(&sigBlocked, i)
}
}
sigprocmask(_SIG_SETMASK, &sigBlocked, nil)
for {
select {
case sig := <-enableSigChan:
if sig > 0 {
sigdelset(&sigBlocked, int(sig))
}
case sig := <-disableSigChan:
if sig > 0 && blockableSig(sig) {
sigaddset(&sigBlocked, int(sig))
}
}
sigprocmask(_SIG_SETMASK, &sigBlocked, nil)
maskUpdatedChan <- struct{}{}
}
}()
}
// This is called when we receive a signal when there is no signal stack.
// This can only happen if non-Go code calls sigaltstack to disable the
// signal stack.
func noSignalStack(sig uint32) {
println("signal", sig, "received on thread with no signal stack")
throw("non-Go code disabled sigaltstack")
}
// This is called if we receive a signal when there is a signal stack
// but we are not on it. This can only happen if non-Go code called
// sigaction without setting the SS_ONSTACK flag.
func sigNotOnStack(sig uint32, sp uintptr, mp *m) {
println("signal", sig, "received but handler not on signal stack")
print("mp.gsignal stack [", hex(mp.gsignal.stack.lo), " ", hex(mp.gsignal.stack.hi), "], ")
print("mp.g0 stack [", hex(mp.g0.stack.lo), " ", hex(mp.g0.stack.hi), "], sp=", hex(sp), "\n")
throw("non-Go code set up signal handler without SA_ONSTACK flag")
}
// signalDuringFork is called if we receive a signal while doing a fork.
// We do not want signals at that time, as a signal sent to the process
// group may be delivered to the child process, causing confusion.
// This should never be called, because we block signals across the fork;
// this function is just a safety check. See issue 18600 for background.
func signalDuringFork(sig uint32) {
println("signal", sig, "received during fork")
throw("signal received during fork")
}
// This runs on a foreign stack, without an m or a g. No stack split.
//
//go:nosplit
//go:norace
//go:nowritebarrierrec
func badsignal(sig uintptr, c *sigctxt) {
if !iscgo && !cgoHasExtraM {
// There is no extra M. needm will not be able to grab
// an M. Instead of hanging, just crash.
// Cannot call split-stack function as there is no G.
writeErrStr("fatal: bad g in signal handler\n")
exit(2)
*(*uintptr)(unsafe.Pointer(uintptr(123))) = 2
}
needm(true)
if !sigsend(uint32(sig)) {
// A foreign thread received the signal sig, and the
// Go code does not want to handle it.
raisebadsignal(uint32(sig), c)
}
dropm()
}
//go:noescape
func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
// Determines if the signal should be handled by Go and if not, forwards the
// signal to the handler that was installed before Go's. Returns whether the
// signal was forwarded.
// This is called by the signal handler, and the world may be stopped.
//
//go:nosplit
//go:nowritebarrierrec
func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool {
if sig >= uint32(len(sigtable)) {
return false
}
fwdFn := atomic.Loaduintptr(&fwdSig[sig])
flags := sigtable[sig].flags
// If we aren't handling the signal, forward it.
if atomic.Load(&handlingSig[sig]) == 0 || !signalsOK {
// If the signal is ignored, doing nothing is the same as forwarding.
if fwdFn == _SIG_IGN || (fwdFn == _SIG_DFL && flags&_SigIgn != 0) {
return true
}
// We are not handling the signal and there is no other handler to forward to.
// Crash with the default behavior.
if fwdFn == _SIG_DFL {
setsig(sig, _SIG_DFL)
dieFromSignal(sig)
return false
}
sigfwd(fwdFn, sig, info, ctx)
return true
}
// This function and its caller sigtrampgo assumes SIGPIPE is delivered on the
// originating thread. This property does not hold on macOS (golang.org/issue/33384),
// so we have no choice but to ignore SIGPIPE.
if (GOOS == "darwin" || GOOS == "ios") && sig == _SIGPIPE {
return true
}
// If there is no handler to forward to, no need to forward.
if fwdFn == _SIG_DFL {
return false
}
c := &sigctxt{info, ctx}
// Only forward synchronous signals and SIGPIPE.
// Unfortunately, user generated SIGPIPEs will also be forwarded, because si_code
// is set to _SI_USER even for a SIGPIPE raised from a write to a closed socket
// or pipe.
if (c.sigFromUser() || flags&_SigPanic == 0) && sig != _SIGPIPE {
return false
}
// Determine if the signal occurred inside Go code. We test that:
// (1) we weren't in VDSO page,
// (2) we were in a goroutine (i.e., m.curg != nil), and
// (3) we weren't in CGO.
// (4) we weren't in dropped extra m.
gp := sigFetchG(c)
if gp != nil && gp.m != nil && gp.m.curg != nil && !gp.m.isExtraInC && !gp.m.incgo {
return false
}
// Signal not handled by Go, forward it.
if fwdFn != _SIG_IGN {
sigfwd(fwdFn, sig, info, ctx)
}
return true
}
// sigsave saves the current thread's signal mask into *p.
// This is used to preserve the non-Go signal mask when a non-Go
// thread calls a Go function.
// This is nosplit and nowritebarrierrec because it is called by needm
// which may be called on a non-Go thread with no g available.
//
//go:nosplit
//go:nowritebarrierrec
func sigsave(p *sigset) {
sigprocmask(_SIG_SETMASK, nil, p)
}
// msigrestore sets the current thread's signal mask to sigmask.
// This is used to restore the non-Go signal mask when a non-Go thread
// calls a Go function.
// This is nosplit and nowritebarrierrec because it is called by dropm
// after g has been cleared.
//
//go:nosplit
//go:nowritebarrierrec
func msigrestore(sigmask sigset) {
sigprocmask(_SIG_SETMASK, &sigmask, nil)
}
// sigsetAllExiting is used by sigblock(true) when a thread is
// exiting.
var sigsetAllExiting = func() sigset {
res := sigset_all
// Apply GOOS-specific overrides here, rather than in osinit,
// because osinit may be called before sigsetAllExiting is
// initialized (#51913).
if GOOS == "linux" && iscgo {
// #42494 glibc and musl reserve some signals for
// internal use and require they not be blocked by
// the rest of a normal C runtime. When the go runtime
// blocks...unblocks signals, temporarily, the blocked
// interval of time is generally very short. As such,
// these expectations of *libc code are mostly met by
// the combined go+cgo system of threads. However,
// when go causes a thread to exit, via a return from
// mstart(), the combined runtime can deadlock if
// these signals are blocked. Thus, don't block these
// signals when exiting threads.
// - glibc: SIGCANCEL (32), SIGSETXID (33)
// - musl: SIGTIMER (32), SIGCANCEL (33), SIGSYNCCALL (34)
sigdelset(&res, 32)
sigdelset(&res, 33)
sigdelset(&res, 34)
}
return res
}()
// sigblock blocks signals in the current thread's signal mask.
// This is used to block signals while setting up and tearing down g
// when a non-Go thread calls a Go function. When a thread is exiting
// we use the sigsetAllExiting value, otherwise the OS specific
// definition of sigset_all is used.
// This is nosplit and nowritebarrierrec because it is called by needm
// which may be called on a non-Go thread with no g available.
//
//go:nosplit
//go:nowritebarrierrec
func sigblock(exiting bool) {
if exiting {
sigprocmask(_SIG_SETMASK, &sigsetAllExiting, nil)
return
}
sigprocmask(_SIG_SETMASK, &sigset_all, nil)
}
// unblocksig removes sig from the current thread's signal mask.
// This is nosplit and nowritebarrierrec because it is called from
// dieFromSignal, which can be called by sigfwdgo while running in the
// signal handler, on the signal stack, with no g available.
//
//go:nosplit
//go:nowritebarrierrec
func unblocksig(sig uint32) {
var set sigset
sigaddset(&set, int(sig))
sigprocmask(_SIG_UNBLOCK, &set, nil)
}
// minitSignals is called when initializing a new m to set the
// thread's alternate signal stack and signal mask.
func minitSignals() {
minitSignalStack()
minitSignalMask()
}
// minitSignalStack is called when initializing a new m to set the
// alternate signal stack. If the alternate signal stack is not set
// for the thread (the normal case) then set the alternate signal
// stack to the gsignal stack. If the alternate signal stack is set
// for the thread (the case when a non-Go thread sets the alternate
// signal stack and then calls a Go function) then set the gsignal
// stack to the alternate signal stack. We also set the alternate
// signal stack to the gsignal stack if cgo is not used (regardless
// of whether it is already set). Record which choice was made in
// newSigstack, so that it can be undone in unminit.
func minitSignalStack() {
mp := getg().m
var st stackt
sigaltstack(nil, &st)
if st.ss_flags&_SS_DISABLE != 0 || !iscgo {
signalstack(&mp.gsignal.stack)
mp.newSigstack = true
} else {
setGsignalStack(&st, &mp.goSigStack)
mp.newSigstack = false
}
}
// minitSignalMask is called when initializing a new m to set the
// thread's signal mask. When this is called all signals have been
// blocked for the thread. This starts with m.sigmask, which was set
// either from initSigmask for a newly created thread or by calling
// sigsave if this is a non-Go thread calling a Go function. It
// removes all essential signals from the mask, thus causing those
// signals to not be blocked. Then it sets the thread's signal mask.
// After this is called the thread can receive signals.
func minitSignalMask() {
nmask := getg().m.sigmask
for i := range sigtable {
if !blockableSig(uint32(i)) {
sigdelset(&nmask, i)
}
}
sigprocmask(_SIG_SETMASK, &nmask, nil)
}
// unminitSignals is called from dropm, via unminit, to undo the
// effect of calling minit on a non-Go thread.
//
//go:nosplit
func unminitSignals() {
if getg().m.newSigstack {
st := stackt{ss_flags: _SS_DISABLE}
sigaltstack(&st, nil)
} else {
// We got the signal stack from someone else. Restore
// the Go-allocated stack in case this M gets reused
// for another thread (e.g., it's an extram). Also, on
// Android, libc allocates a signal stack for all
// threads, so it's important to restore the Go stack
// even on Go-created threads so we can free it.
restoreGsignalStack(&getg().m.goSigStack)
}
}
// blockableSig reports whether sig may be blocked by the signal mask.
// We never want to block the signals marked _SigUnblock;
// these are the synchronous signals that turn into a Go panic.
// We never want to block the preemption signal if it is being used.
// In a Go program--not a c-archive/c-shared--we never want to block
// the signals marked _SigKill or _SigThrow, as otherwise it's possible
// for all running threads to block them and delay their delivery until
// we start a new thread. When linked into a C program we let the C code
// decide on the disposition of those signals.
func blockableSig(sig uint32) bool {
flags := sigtable[sig].flags
if flags&_SigUnblock != 0 {
return false
}
if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
return false
}
if isarchive || islibrary {
return true
}
return flags&(_SigKill|_SigThrow) == 0
}
// gsignalStack saves the fields of the gsignal stack changed by
// setGsignalStack.
type gsignalStack struct {
stack stack
stackguard0 uintptr
stackguard1 uintptr
stktopsp uintptr
}
// setGsignalStack sets the gsignal stack of the current m to an
// alternate signal stack returned from the sigaltstack system call.
// It saves the old values in *old for use by restoreGsignalStack.
// This is used when handling a signal if non-Go code has set the
// alternate signal stack.
//
//go:nosplit
//go:nowritebarrierrec
func setGsignalStack(st *stackt, old *gsignalStack) {
gp := getg()
if old != nil {
old.stack = gp.m.gsignal.stack
old.stackguard0 = gp.m.gsignal.stackguard0
old.stackguard1 = gp.m.gsignal.stackguard1
old.stktopsp = gp.m.gsignal.stktopsp
}
stsp := uintptr(unsafe.Pointer(st.ss_sp))
gp.m.gsignal.stack.lo = stsp
gp.m.gsignal.stack.hi = stsp + st.ss_size
gp.m.gsignal.stackguard0 = stsp + stackGuard
gp.m.gsignal.stackguard1 = stsp + stackGuard
}
// restoreGsignalStack restores the gsignal stack to the value it had
// before entering the signal handler.
//
//go:nosplit
//go:nowritebarrierrec
func restoreGsignalStack(st *gsignalStack) {
gp := getg().m.gsignal
gp.stack = st.stack
gp.stackguard0 = st.stackguard0
gp.stackguard1 = st.stackguard1
gp.stktopsp = st.stktopsp
}
// signalstack sets the current thread's alternate signal stack to s.
//
//go:nosplit
func signalstack(s *stack) {
st := stackt{ss_size: s.hi - s.lo}
setSignalstackSP(&st, s.lo)
sigaltstack(&st, nil)
}
// setsigsegv is used on darwin/arm64 to fake a segmentation fault.
//
// This is exported via linkname to assembly in runtime/cgo.
//
//go:nosplit
//go:linkname setsigsegv
func setsigsegv(pc uintptr) {
gp := getg()
gp.sig = _SIGSEGV
gp.sigpc = pc
gp.sigcode0 = _SEGV_MAPERR
gp.sigcode1 = 0 // TODO: emulate si_addr
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements runtime support for signal handling.
//
// Most synchronization primitives are not available from
// the signal handler (it cannot block, allocate memory, or use locks)
// so the handler communicates with a processing goroutine
// via struct sig, below.
//
// sigsend is called by the signal handler to queue a new signal.
// signal_recv is called by the Go program to receive a newly queued signal.
//
// Synchronization between sigsend and signal_recv is based on the sig.state
// variable. It can be in three states:
// * sigReceiving means that signal_recv is blocked on sig.Note and there are
// no new pending signals.
// * sigSending means that sig.mask *may* contain new pending signals,
// signal_recv can't be blocked in this state.
// * sigIdle means that there are no new pending signals and signal_recv is not
// blocked.
//
// Transitions between states are done atomically with CAS.
//
// When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask.
// If several sigsends and signal_recv execute concurrently, it can lead to
// unnecessary rechecks of sig.mask, but it cannot lead to missed signals
// nor deadlocks.
//go:build !plan9
package runtime
import (
"internal/runtime/atomic"
_ "unsafe" // for go:linkname
)
// sig handles communication between the signal handler and os/signal.
// Other than the inuse and recv fields, the fields are accessed atomically.
//
// The wanted and ignored fields are only written by one goroutine at
// a time; access is controlled by the handlers Mutex in os/signal.
// The fields are only read by that one goroutine and by the signal handler.
// We access them atomically to minimize the race between setting them
// in the goroutine calling os/signal and the signal handler,
// which may be running in a different thread. That race is unavoidable,
// as there is no connection between handling a signal and receiving one,
// but atomic instructions should minimize it.
var sig struct {
note note
mask [(_NSIG + 31) / 32]uint32
wanted [(_NSIG + 31) / 32]uint32
ignored [(_NSIG + 31) / 32]uint32
recv [(_NSIG + 31) / 32]uint32
state atomic.Uint32
delivering atomic.Uint32
inuse bool
}
const (
sigIdle = iota
sigReceiving
sigSending
)
// sigsend delivers a signal from sighandler to the internal signal delivery queue.
// It reports whether the signal was sent. If not, the caller typically crashes the program.
// It runs from the signal handler, so it's limited in what it can do.
func sigsend(s uint32) bool {
bit := uint32(1) << uint(s&31)
if s >= uint32(32*len(sig.wanted)) {
return false
}
sig.delivering.Add(1)
// We are running in the signal handler; defer is not available.
if w := atomic.Load(&sig.wanted[s/32]); w&bit == 0 {
sig.delivering.Add(-1)
return false
}
// Add signal to outgoing queue.
for {
mask := sig.mask[s/32]
if mask&bit != 0 {
sig.delivering.Add(-1)
return true // signal already in queue
}
if atomic.Cas(&sig.mask[s/32], mask, mask|bit) {
break
}
}
// Notify receiver that queue has new bit.
Send:
for {
switch sig.state.Load() {
default:
throw("sigsend: inconsistent state")
case sigIdle:
if sig.state.CompareAndSwap(sigIdle, sigSending) {
break Send
}
case sigSending:
// notification already pending
break Send
case sigReceiving:
if sig.state.CompareAndSwap(sigReceiving, sigIdle) {
if GOOS == "darwin" || GOOS == "ios" {
sigNoteWakeup(&sig.note)
break Send
}
notewakeup(&sig.note)
break Send
}
}
}
sig.delivering.Add(-1)
return true
}
// Called to receive the next queued signal.
// Must only be called from a single goroutine at a time.
//
//go:linkname signal_recv os/signal.signal_recv
func signal_recv() uint32 {
for {
// Serve any signals from local copy.
for i := uint32(0); i < _NSIG; i++ {
if sig.recv[i/32]&(1<<(i&31)) != 0 {
sig.recv[i/32] &^= 1 << (i & 31)
return i
}
}
// Wait for updates to be available from signal sender.
Receive:
for {
switch sig.state.Load() {
default:
throw("signal_recv: inconsistent state")
case sigIdle:
if sig.state.CompareAndSwap(sigIdle, sigReceiving) {
if GOOS == "darwin" || GOOS == "ios" {
sigNoteSleep(&sig.note)
break Receive
}
notetsleepg(&sig.note, -1)
noteclear(&sig.note)
break Receive
}
case sigSending:
if sig.state.CompareAndSwap(sigSending, sigIdle) {
break Receive
}
}
}
// Incorporate updates from sender into local copy.
for i := range sig.mask {
sig.recv[i] = atomic.Xchg(&sig.mask[i], 0)
}
}
}
// signalWaitUntilIdle waits until the signal delivery mechanism is idle.
// This is used to ensure that we do not drop a signal notification due
// to a race between disabling a signal and receiving a signal.
// This assumes that signal delivery has already been disabled for
// the signal(s) in question, and here we are just waiting to make sure
// that all the signals have been delivered to the user channels
// by the os/signal package.
//
//go:linkname signalWaitUntilIdle os/signal.signalWaitUntilIdle
func signalWaitUntilIdle() {
// Although the signals we care about have been removed from
// sig.wanted, it is possible that another thread has received
// a signal, has read from sig.wanted, is now updating sig.mask,
// and has not yet woken up the processor thread. We need to wait
// until all current signal deliveries have completed.
for sig.delivering.Load() != 0 {
Gosched()
}
// Although WaitUntilIdle seems like the right name for this
// function, the state we are looking for is sigReceiving, not
// sigIdle. The sigIdle state is really more like sigProcessing.
for sig.state.Load() != sigReceiving {
Gosched()
}
}
// Must only be called from a single goroutine at a time.
//
//go:linkname signal_enable os/signal.signal_enable
func signal_enable(s uint32) {
if !sig.inuse {
// This is the first call to signal_enable. Initialize.
sig.inuse = true // enable reception of signals; cannot disable
if GOOS == "darwin" || GOOS == "ios" {
sigNoteSetup(&sig.note)
} else {
noteclear(&sig.note)
}
}
if s >= uint32(len(sig.wanted)*32) {
return
}
w := sig.wanted[s/32]
w |= 1 << (s & 31)
atomic.Store(&sig.wanted[s/32], w)
i := sig.ignored[s/32]
i &^= 1 << (s & 31)
atomic.Store(&sig.ignored[s/32], i)
sigenable(s)
}
// Must only be called from a single goroutine at a time.
//
//go:linkname signal_disable os/signal.signal_disable
func signal_disable(s uint32) {
if s >= uint32(len(sig.wanted)*32) {
return
}
sigdisable(s)
w := sig.wanted[s/32]
w &^= 1 << (s & 31)
atomic.Store(&sig.wanted[s/32], w)
}
// Must only be called from a single goroutine at a time.
//
//go:linkname signal_ignore os/signal.signal_ignore
func signal_ignore(s uint32) {
if s >= uint32(len(sig.wanted)*32) {
return
}
sigignore(s)
w := sig.wanted[s/32]
w &^= 1 << (s & 31)
atomic.Store(&sig.wanted[s/32], w)
i := sig.ignored[s/32]
i |= 1 << (s & 31)
atomic.Store(&sig.ignored[s/32], i)
}
// sigInitIgnored marks the signal as already ignored. This is called at
// program start by initsig. In a shared library initsig is called by
// libpreinit, so the runtime may not be initialized yet.
//
//go:nosplit
func sigInitIgnored(s uint32) {
i := sig.ignored[s/32]
i |= 1 << (s & 31)
atomic.Store(&sig.ignored[s/32], i)
}
// Checked by signal handlers.
//
//go:linkname signal_ignored os/signal.signal_ignored
func signal_ignored(s uint32) bool {
i := atomic.Load(&sig.ignored[s/32])
return i&(1<<(s&31)) != 0
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The current implementation of notes on Darwin is not async-signal-safe,
// so on Darwin the sigqueue code uses different functions to wake up the
// signal_recv thread. This file holds the non-Darwin implementations of
// those functions. These functions will never be called.
//go:build !darwin && !plan9
package runtime
func sigNoteSetup(*note) {
throw("sigNoteSetup")
}
func sigNoteSleep(*note) {
throw("sigNoteSleep")
}
func sigNoteWakeup(*note) {
throw("sigNoteWakeup")
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
)
type slice struct {
array unsafe.Pointer
len int
cap int
}
// A notInHeapSlice is a slice backed by internal/runtime/sys.NotInHeap memory.
type notInHeapSlice struct {
array *notInHeap
len int
cap int
}
func panicmakeslicelen() {
panic(errorString("makeslice: len out of range"))
}
func panicmakeslicecap() {
panic(errorString("makeslice: cap out of range"))
}
// makeslicecopy allocates a slice of "tolen" elements of type "et",
// then copies "fromlen" elements of type "et" into that new allocation from "from".
func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer {
var tomem, copymem uintptr
if uintptr(tolen) > uintptr(fromlen) {
var overflow bool
tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen))
if overflow || tomem > maxAlloc || tolen < 0 {
panicmakeslicelen()
}
copymem = et.Size_ * uintptr(fromlen)
} else {
// fromlen is a known good length providing and equal or greater than tolen,
// thereby making tolen a good slice length too as from and to slices have the
// same element width.
tomem = et.Size_ * uintptr(tolen)
copymem = tomem
}
var to unsafe.Pointer
if !et.Pointers() {
to = mallocgc(tomem, nil, false)
if copymem < tomem {
memclrNoHeapPointers(add(to, copymem), tomem-copymem)
}
} else {
// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
to = mallocgc(tomem, et, true)
if copymem > 0 && writeBarrier.enabled {
// Only shade the pointers in old.array since we know the destination slice to
// only contains nil pointers because it has been cleared during alloc.
//
// It's safe to pass a type to this function as an optimization because
// from and to only ever refer to memory representing whole values of
// type et. See the comment on bulkBarrierPreWrite.
bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem, et)
}
}
if raceenabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(makeslicecopy)
racereadrangepc(from, copymem, callerpc, pc)
}
if msanenabled {
msanread(from, copymem)
}
if asanenabled {
asanread(from, copymem)
}
memmove(to, from, copymem)
return to
}
// makeslice should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname makeslice
func makeslice(et *_type, len, cap int) unsafe.Pointer {
mem, overflow := math.MulUintptr(et.Size_, uintptr(cap))
if overflow || mem > maxAlloc || len < 0 || len > cap {
// NOTE: Produce a 'len out of range' error instead of a
// 'cap out of range' error when someone does make([]T, bignumber).
// 'cap out of range' is true too, but since the cap is only being
// supplied implicitly, saying len is clearer.
// See golang.org/issue/4085.
mem, overflow := math.MulUintptr(et.Size_, uintptr(len))
if overflow || mem > maxAlloc || len < 0 {
panicmakeslicelen()
}
panicmakeslicecap()
}
return mallocgc(mem, et, true)
}
func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer {
len := int(len64)
if int64(len) != len64 {
panicmakeslicelen()
}
cap := int(cap64)
if int64(cap) != cap64 {
panicmakeslicecap()
}
return makeslice(et, len, cap)
}
// growslice allocates new backing store for a slice.
//
// arguments:
//
// oldPtr = pointer to the slice's backing array
// newLen = new length (= oldLen + num)
// oldCap = original slice's capacity.
// num = number of elements being added
// et = element type
//
// return values:
//
// newPtr = pointer to the new backing store
// newLen = same value as the argument
// newCap = capacity of the new backing store
//
// Requires that uint(newLen) > uint(oldCap).
// Assumes the original slice length is newLen - num
//
// A new backing store is allocated with space for at least newLen elements.
// Existing entries [0, oldLen) are copied over to the new backing store.
// Added entries [oldLen, newLen) are not initialized by growslice
// (although for pointer-containing element types, they are zeroed). They
// must be initialized by the caller.
// Trailing entries [newLen, newCap) are zeroed.
//
// growslice's odd calling convention makes the generated code that calls
// this function simpler. In particular, it accepts and returns the
// new length so that the old length is not live (does not need to be
// spilled/restored) and the new length is returned (also does not need
// to be spilled/restored).
//
// growslice should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
// - github.com/chenzhuoyu/iasm
// - github.com/cloudwego/dynamicgo
// - github.com/ugorji/go/codec
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname growslice
func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice {
oldLen := newLen - num
if raceenabled {
callerpc := sys.GetCallerPC()
racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice))
}
if msanenabled {
msanread(oldPtr, uintptr(oldLen*int(et.Size_)))
}
if asanenabled {
asanread(oldPtr, uintptr(oldLen*int(et.Size_)))
}
if newLen < 0 {
panic(errorString("growslice: len out of range"))
}
if et.Size_ == 0 {
// append should not create a slice with nil pointer but non-zero len.
// We assume that append doesn't need to preserve oldPtr in this case.
return slice{unsafe.Pointer(&zerobase), newLen, newLen}
}
newcap := nextslicecap(newLen, oldCap)
var overflow bool
var lenmem, newlenmem, capmem uintptr
// Specialize for common values of et.Size.
// For 1 we don't need any division/multiplication.
// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
// For powers of 2, use a variable shift.
noscan := !et.Pointers()
switch {
case et.Size_ == 1:
lenmem = uintptr(oldLen)
newlenmem = uintptr(newLen)
capmem = roundupsize(uintptr(newcap), noscan)
overflow = uintptr(newcap) > maxAlloc
newcap = int(capmem)
case et.Size_ == goarch.PtrSize:
lenmem = uintptr(oldLen) * goarch.PtrSize
newlenmem = uintptr(newLen) * goarch.PtrSize
capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan)
overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
newcap = int(capmem / goarch.PtrSize)
case isPowerOfTwo(et.Size_):
var shift uintptr
if goarch.PtrSize == 8 {
// Mask shift for better code generation.
shift = uintptr(sys.TrailingZeros64(uint64(et.Size_))) & 63
} else {
shift = uintptr(sys.TrailingZeros32(uint32(et.Size_))) & 31
}
lenmem = uintptr(oldLen) << shift
newlenmem = uintptr(newLen) << shift
capmem = roundupsize(uintptr(newcap)<<shift, noscan)
overflow = uintptr(newcap) > (maxAlloc >> shift)
newcap = int(capmem >> shift)
capmem = uintptr(newcap) << shift
default:
lenmem = uintptr(oldLen) * et.Size_
newlenmem = uintptr(newLen) * et.Size_
capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap))
capmem = roundupsize(capmem, noscan)
newcap = int(capmem / et.Size_)
capmem = uintptr(newcap) * et.Size_
}
// The check of overflow in addition to capmem > maxAlloc is needed
// to prevent an overflow which can be used to trigger a segfault
// on 32bit architectures with this example program:
//
// type T [1<<27 + 1]int64
//
// var d T
// var s []T
//
// func main() {
// s = append(s, d, d, d, d)
// print(len(s), "\n")
// }
if overflow || capmem > maxAlloc {
panic(errorString("growslice: len out of range"))
}
var p unsafe.Pointer
if !et.Pointers() {
p = mallocgc(capmem, nil, false)
// The append() that calls growslice is going to overwrite from oldLen to newLen.
// Only clear the part that will not be overwritten.
// The reflect_growslice() that calls growslice will manually clear
// the region not cleared here.
memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem)
} else {
// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
p = mallocgc(capmem, et, true)
if lenmem > 0 && writeBarrier.enabled {
// Only shade the pointers in oldPtr since we know the destination slice p
// only contains nil pointers because it has been cleared during alloc.
//
// It's safe to pass a type to this function as an optimization because
// from and to only ever refer to memory representing whole values of
// type et. See the comment on bulkBarrierPreWrite.
bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes, et)
}
}
memmove(p, oldPtr, lenmem)
return slice{p, newLen, newcap}
}
// growsliceNoAlias is like growslice but only for the case where
// we know that oldPtr is not aliased.
//
// In other words, the caller must know that there are no other references
// to the backing memory of the slice being grown aside from the slice header
// that will be updated with new backing memory when growsliceNoAlias
// returns, and therefore oldPtr must be the only pointer to its referent
// aside from the slice header updated by the returned slice.
//
// In addition, oldPtr must point to the start of the allocation and match
// the pointer that was returned by mallocgc. In particular, oldPtr must not
// be an interior pointer, such as after a reslice.
//
// See freegc for details.
func growsliceNoAlias(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice {
s := growslice(oldPtr, newLen, oldCap, num, et)
if goexperiment.RuntimeFreegc && oldPtr != nil && oldPtr != s.array {
if gp := getg(); uintptr(oldPtr) < gp.stack.lo || gp.stack.hi <= uintptr(oldPtr) {
// oldPtr does not point into the current stack, and it is not
// the data pointer for s after the grow, so attempt to free it.
// (Note that freegc also verifies that oldPtr does not point into our stack,
// but checking here first is slightly cheaper for the case when
// oldPtr is on the stack and freegc would be a no-op.)
//
// TODO(thepudds): it may be that oldPtr==s.array only when elemsize==0,
// so perhaps we could prohibit growsliceNoAlias being called in that case
// and eliminate that check here, or alternatively, we could lean into
// freegc being a no-op for zero-sized allocations (that is, no check of
// oldPtr != s.array here and just let freegc return quickly).
noscan := !et.Pointers()
freegc(oldPtr, uintptr(oldCap)*et.Size_, noscan)
}
}
return s
}
// nextslicecap computes the next appropriate slice length.
func nextslicecap(newLen, oldCap int) int {
newcap := oldCap
doublecap := newcap + newcap
if newLen > doublecap {
return newLen
}
const threshold = 256
if oldCap < threshold {
return doublecap
}
for {
// Transition from growing 2x for small slices
// to growing 1.25x for large slices. This formula
// gives a smooth-ish transition between the two.
newcap += (newcap + 3*threshold) >> 2
// We need to check `newcap >= newLen` and whether `newcap` overflowed.
// newLen is guaranteed to be larger than zero, hence
// when newcap overflows then `uint(newcap) > uint(newLen)`.
// This allows to check for both with the same comparison.
if uint(newcap) >= uint(newLen) {
break
}
}
// Set newcap to the requested cap when
// the newcap calculation overflowed.
if newcap <= 0 {
return newLen
}
return newcap
}
// reflect_growslice should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/cloudwego/dynamicgo
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname reflect_growslice reflect.growslice
func reflect_growslice(et *_type, old slice, num int) slice {
// Semantically equivalent to slices.Grow, except that the caller
// is responsible for ensuring that old.len+num > old.cap.
num -= old.cap - old.len // preserve memory of old[old.len:old.cap]
new := growslice(old.array, old.cap+num, old.cap, num, et)
// growslice does not zero out new[old.cap:new.len] since it assumes that
// the memory will be overwritten by an append() that called growslice.
// Since the caller of reflect_growslice is not append(),
// zero out this region before returning the slice to the reflect package.
if !et.Pointers() {
oldcapmem := uintptr(old.cap) * et.Size_
newlenmem := uintptr(new.len) * et.Size_
memclrNoHeapPointers(add(new.array, oldcapmem), newlenmem-oldcapmem)
}
new.len = old.len // preserve the old length
return new
}
func isPowerOfTwo(x uintptr) bool {
return x&(x-1) == 0
}
// slicecopy is used to copy from a string or slice of pointerless elements into a slice.
func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int {
if fromLen == 0 || toLen == 0 {
return 0
}
n := fromLen
if toLen < n {
n = toLen
}
if width == 0 {
return n
}
size := uintptr(n) * width
if raceenabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(slicecopy)
racereadrangepc(fromPtr, size, callerpc, pc)
racewriterangepc(toPtr, size, callerpc, pc)
}
if msanenabled {
msanread(fromPtr, size)
msanwrite(toPtr, size)
}
if asanenabled {
asanread(fromPtr, size)
asanwrite(toPtr, size)
}
if size == 1 { // common case worth about 2x to do here
// TODO: is this still worth it with new memmove impl?
*(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer
} else {
memmove(toPtr, fromPtr, size)
}
return n
}
//go:linkname bytealg_MakeNoZero internal/bytealg.MakeNoZero
func bytealg_MakeNoZero(len int) []byte {
if uintptr(len) > maxAlloc {
panicmakeslicelen()
}
cap := roundupsize(uintptr(len), true)
return unsafe.Slice((*byte)(mallocgc(cap, nil, false)), cap)[:len]
}
// moveSlice copies the input slice to the heap and returns it.
// et is the element type of the slice.
func moveSlice(et *_type, old unsafe.Pointer, len, cap int) (unsafe.Pointer, int, int) {
if cap == 0 {
if old != nil {
old = unsafe.Pointer(&zerobase)
}
return old, 0, 0
}
capmem := uintptr(cap) * et.Size_
new := mallocgc(capmem, et, true)
bulkBarrierPreWriteSrcOnly(uintptr(new), uintptr(old), capmem, et)
memmove(new, old, capmem)
return new, len, cap
}
// moveSliceNoScan is like moveSlice except the element type is known to
// not have any pointers. We instead pass in the size of the element.
func moveSliceNoScan(elemSize uintptr, old unsafe.Pointer, len, cap int) (unsafe.Pointer, int, int) {
if cap == 0 {
if old != nil {
old = unsafe.Pointer(&zerobase)
}
return old, 0, 0
}
capmem := uintptr(cap) * elemSize
new := mallocgc(capmem, nil, false)
memmove(new, old, capmem)
return new, len, cap
}
// moveSliceNoCap is like moveSlice, but can pick any appropriate capacity
// for the returned slice.
// Elements between len and cap in the returned slice will be zeroed.
func moveSliceNoCap(et *_type, old unsafe.Pointer, len int) (unsafe.Pointer, int, int) {
if len == 0 {
if old != nil {
old = unsafe.Pointer(&zerobase)
}
return old, 0, 0
}
lenmem := uintptr(len) * et.Size_
capmem := roundupsize(lenmem, false)
new := mallocgc(capmem, et, true)
bulkBarrierPreWriteSrcOnly(uintptr(new), uintptr(old), lenmem, et)
memmove(new, old, lenmem)
return new, len, int(capmem / et.Size_)
}
// moveSliceNoCapNoScan is a combination of moveSliceNoScan and moveSliceNoCap.
func moveSliceNoCapNoScan(elemSize uintptr, old unsafe.Pointer, len int) (unsafe.Pointer, int, int) {
if len == 0 {
if old != nil {
old = unsafe.Pointer(&zerobase)
}
return old, 0, 0
}
lenmem := uintptr(len) * elemSize
capmem := roundupsize(lenmem, true)
new := mallocgc(capmem, nil, false)
memmove(new, old, lenmem)
if capmem > lenmem {
memclrNoHeapPointers(add(new, lenmem), capmem-lenmem)
}
return new, len, int(capmem / elemSize)
}
// growsliceBuf is like growslice, but we can use the given buffer
// as a backing store if we want. bufPtr must be on the stack.
func growsliceBuf(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type, bufPtr unsafe.Pointer, bufLen int) slice {
if newLen > bufLen {
// Doesn't fit, process like a normal growslice.
return growslice(oldPtr, newLen, oldCap, num, et)
}
oldLen := newLen - num
if oldPtr != bufPtr && oldLen != 0 {
// Move data to start of buffer.
// Note: bufPtr is on the stack, so no write barrier needed.
memmove(bufPtr, oldPtr, uintptr(oldLen)*et.Size_)
}
// Pick a new capacity.
//
// Unlike growslice, we don't need to double the size each time.
// The work done here is not proportional to the length of the slice.
// (Unless the memmove happens above, but that is rare, and in any
// case there are not many elements on this path.)
//
// Instead, we try to just bump up to the next size class.
// This will ensure that we don't waste any space when we eventually
// call moveSlice with the resulting slice.
newCap := int(roundupsize(uintptr(newLen)*et.Size_, !et.Pointers()) / et.Size_)
// Zero slice beyond newLen.
// The buffer is stack memory, so NoHeapPointers is ok.
// Caller will overwrite [oldLen:newLen], so we don't need to zero that portion.
// If et.Pointers(), buffer is at least initialized so we don't need to
// worry about the caller overwriting junk in [oldLen:newLen].
if newLen < newCap {
memclrNoHeapPointers(add(bufPtr, uintptr(newLen)*et.Size_), uintptr(newCap-newLen)*et.Size_)
}
return slice{bufPtr, newLen, newCap}
}
// growsliceBufNoAlias is a combination of growsliceBuf and growsliceNoAlias.
// bufPtr must be on the stack.
func growsliceBufNoAlias(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type, bufPtr unsafe.Pointer, bufLen int) slice {
s := growsliceBuf(oldPtr, newLen, oldCap, num, et, bufPtr, bufLen)
if goexperiment.RuntimeFreegc && oldPtr != bufPtr && oldPtr != nil && oldPtr != s.array {
// oldPtr is not bufPtr (the stack buffer) and it is not
// the data pointer for s after the grow, so attempt to free it.
// (Note that freegc does a broader check that oldPtr does not point into our stack,
// but checking here first is slightly cheaper for a common case when oldPtr is bufPtr
// and freegc would be a no-op.)
//
// TODO(thepudds): see related TODO in growsliceNoAlias about possibly eliminating
// the oldPtr != s.array check.
noscan := !et.Pointers()
freegc(oldPtr, uintptr(oldCap)*et.Size_, noscan)
}
return s
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Software IEEE754 64-bit floating point.
// Only referred to (and thus linked in) by softfloat targets
// and by tests in this directory.
package runtime
const (
mantbits64 uint = 52
expbits64 uint = 11
bias64 = -1<<(expbits64-1) + 1
nan64 uint64 = (1<<expbits64-1)<<mantbits64 + 1<<(mantbits64-1) // quiet NaN, 0 payload
inf64 uint64 = (1<<expbits64 - 1) << mantbits64
neg64 uint64 = 1 << (expbits64 + mantbits64)
mantbits32 uint = 23
expbits32 uint = 8
bias32 = -1<<(expbits32-1) + 1
nan32 uint32 = (1<<expbits32-1)<<mantbits32 + 1<<(mantbits32-1) // quiet NaN, 0 payload
inf32 uint32 = (1<<expbits32 - 1) << mantbits32
neg32 uint32 = 1 << (expbits32 + mantbits32)
)
func funpack64(f uint64) (sign, mant uint64, exp int, inf, nan bool) {
sign = f & (1 << (mantbits64 + expbits64))
mant = f & (1<<mantbits64 - 1)
exp = int(f>>mantbits64) & (1<<expbits64 - 1)
switch exp {
case 1<<expbits64 - 1:
if mant != 0 {
nan = true
return
}
inf = true
return
case 0:
// denormalized
if mant != 0 {
exp += bias64 + 1
for mant < 1<<mantbits64 {
mant <<= 1
exp--
}
}
default:
// add implicit top bit
mant |= 1 << mantbits64
exp += bias64
}
return
}
func funpack32(f uint32) (sign, mant uint32, exp int, inf, nan bool) {
sign = f & (1 << (mantbits32 + expbits32))
mant = f & (1<<mantbits32 - 1)
exp = int(f>>mantbits32) & (1<<expbits32 - 1)
switch exp {
case 1<<expbits32 - 1:
if mant != 0 {
nan = true
return
}
inf = true
return
case 0:
// denormalized
if mant != 0 {
exp += bias32 + 1
for mant < 1<<mantbits32 {
mant <<= 1
exp--
}
}
default:
// add implicit top bit
mant |= 1 << mantbits32
exp += bias32
}
return
}
func fpack64(sign, mant uint64, exp int, trunc uint64) uint64 {
mant0, exp0, trunc0 := mant, exp, trunc
if mant == 0 {
return sign
}
for mant < 1<<mantbits64 {
mant <<= 1
exp--
}
for mant >= 4<<mantbits64 {
trunc |= mant & 1
mant >>= 1
exp++
}
if mant >= 2<<mantbits64 {
if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
mant++
if mant >= 4<<mantbits64 {
mant >>= 1
exp++
}
}
mant >>= 1
exp++
}
if exp >= 1<<expbits64-1+bias64 {
return sign ^ inf64
}
if exp < bias64+1 {
if exp < bias64-int(mantbits64) {
return sign | 0
}
// repeat expecting denormal
mant, exp, trunc = mant0, exp0, trunc0
for exp < bias64 {
trunc |= mant & 1
mant >>= 1
exp++
}
if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
mant++
}
mant >>= 1
exp++
if mant < 1<<mantbits64 {
return sign | mant
}
}
return sign | uint64(exp-bias64)<<mantbits64 | mant&(1<<mantbits64-1)
}
func fpack32(sign, mant uint32, exp int, trunc uint32) uint32 {
mant0, exp0, trunc0 := mant, exp, trunc
if mant == 0 {
return sign
}
for mant < 1<<mantbits32 {
mant <<= 1
exp--
}
for mant >= 4<<mantbits32 {
trunc |= mant & 1
mant >>= 1
exp++
}
if mant >= 2<<mantbits32 {
if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
mant++
if mant >= 4<<mantbits32 {
mant >>= 1
exp++
}
}
mant >>= 1
exp++
}
if exp >= 1<<expbits32-1+bias32 {
return sign ^ inf32
}
if exp < bias32+1 {
if exp < bias32-int(mantbits32) {
return sign | 0
}
// repeat expecting denormal
mant, exp, trunc = mant0, exp0, trunc0
for exp < bias32 {
trunc |= mant & 1
mant >>= 1
exp++
}
if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
mant++
}
mant >>= 1
exp++
if mant < 1<<mantbits32 {
return sign | mant
}
}
return sign | uint32(exp-bias32)<<mantbits32 | mant&(1<<mantbits32-1)
}
func fadd64(f, g uint64) uint64 {
fs, fm, fe, fi, fn := funpack64(f)
gs, gm, ge, gi, gn := funpack64(g)
// Special cases.
switch {
case fn || gn: // NaN + x or x + NaN = NaN
return nan64
case fi && gi && fs != gs: // +Inf + -Inf or -Inf + +Inf = NaN
return nan64
case fi: // ±Inf + g = ±Inf
return f
case gi: // f + ±Inf = ±Inf
return g
case fm == 0 && gm == 0 && fs != 0 && gs != 0: // -0 + -0 = -0
return f
case fm == 0: // 0 + g = g but 0 + -0 = +0
if gm == 0 {
g ^= gs
}
return g
case gm == 0: // f + 0 = f
return f
}
if fe < ge || fe == ge && fm < gm {
f, g, fs, fm, fe, gs, gm, ge = g, f, gs, gm, ge, fs, fm, fe
}
shift := uint(fe - ge)
fm <<= 2
gm <<= 2
trunc := gm & (1<<shift - 1)
gm >>= shift
if fs == gs {
fm += gm
} else {
fm -= gm
if trunc != 0 {
fm--
}
}
if fm == 0 {
fs = 0
}
return fpack64(fs, fm, fe-2, trunc)
}
func fsub64(f, g uint64) uint64 {
return fadd64(f, fneg64(g))
}
func fneg64(f uint64) uint64 {
return f ^ (1 << (mantbits64 + expbits64))
}
func fmul64(f, g uint64) uint64 {
fs, fm, fe, fi, fn := funpack64(f)
gs, gm, ge, gi, gn := funpack64(g)
// Special cases.
switch {
case fn || gn: // NaN * g or f * NaN = NaN
return nan64
case fi && gi: // Inf * Inf = Inf (with sign adjusted)
return f ^ gs
case fi && gm == 0, fm == 0 && gi: // 0 * Inf = Inf * 0 = NaN
return nan64
case fm == 0: // 0 * x = 0 (with sign adjusted)
return f ^ gs
case gm == 0: // x * 0 = 0 (with sign adjusted)
return g ^ fs
}
// 53-bit * 53-bit = 107- or 108-bit
lo, hi := mullu(fm, gm)
shift := mantbits64 - 1
trunc := lo & (1<<shift - 1)
mant := hi<<(64-shift) | lo>>shift
return fpack64(fs^gs, mant, fe+ge-1, trunc)
}
func fdiv64(f, g uint64) uint64 {
fs, fm, fe, fi, fn := funpack64(f)
gs, gm, ge, gi, gn := funpack64(g)
// Special cases.
switch {
case fn || gn: // NaN / g = f / NaN = NaN
return nan64
case fi && gi: // ±Inf / ±Inf = NaN
return nan64
case !fi && !gi && fm == 0 && gm == 0: // 0 / 0 = NaN
return nan64
case fi, !gi && gm == 0: // Inf / g = f / 0 = Inf
return fs ^ gs ^ inf64
case gi, fm == 0: // f / Inf = 0 / g = Inf
return fs ^ gs ^ 0
}
_, _, _, _ = fi, fn, gi, gn
// 53-bit<<54 / 53-bit = 53- or 54-bit.
shift := mantbits64 + 2
q, r := divlu(fm>>(64-shift), fm<<shift, gm)
return fpack64(fs^gs, q, fe-ge-2, r)
}
func f64to32(f uint64) uint32 {
fs, fm, fe, fi, fn := funpack64(f)
if fn {
return nan32
}
fs32 := uint32(fs >> 32)
if fi {
return fs32 ^ inf32
}
const d = mantbits64 - mantbits32 - 1
return fpack32(fs32, uint32(fm>>d), fe-1, uint32(fm&(1<<d-1)))
}
func f32to64(f uint32) uint64 {
const d = mantbits64 - mantbits32
fs, fm, fe, fi, fn := funpack32(f)
if fn {
return nan64
}
fs64 := uint64(fs) << 32
if fi {
return fs64 ^ inf64
}
return fpack64(fs64, uint64(fm)<<d, fe, 0)
}
func fcmp64(f, g uint64) (cmp int32, isnan bool) {
fs, fm, _, fi, fn := funpack64(f)
gs, gm, _, gi, gn := funpack64(g)
switch {
case fn, gn: // flag NaN
return 0, true
case !fi && !gi && fm == 0 && gm == 0: // ±0 == ±0
return 0, false
case fs > gs: // f < 0, g > 0
return -1, false
case fs < gs: // f > 0, g < 0
return +1, false
// Same sign, not NaN.
// Can compare encodings directly now.
// Reverse for sign.
case fs == 0 && f < g, fs != 0 && f > g:
return -1, false
case fs == 0 && f > g, fs != 0 && f < g:
return +1, false
}
// f == g
return 0, false
}
func f64toint(f uint64) (val int64, ok bool) {
fs, fm, fe, fi, fn := funpack64(f)
switch {
case fi, fn: // NaN
return 0, false
case fe < -1: // f < 0.5
return 0, false
case fe > 63: // f >= 2^63
if fs != 0 && fm == 0 { // f == -2^63
return -1 << 63, true
}
if fs != 0 {
return 0, false
}
return 0, false
}
for fe > int(mantbits64) {
fe--
fm <<= 1
}
for fe < int(mantbits64) {
fe++
fm >>= 1
}
val = int64(fm)
if fs != 0 {
val = -val
}
return val, true
}
func fintto64(val int64) (f uint64) {
fs := uint64(val) & (1 << 63)
mant := uint64(val)
if fs != 0 {
mant = -mant
}
return fpack64(fs, mant, int(mantbits64), 0)
}
func fintto32(val int64) (f uint32) {
fs := uint64(val) & (1 << 63)
mant := uint64(val)
if fs != 0 {
mant = -mant
}
// Reduce mantissa size until it fits into a uint32.
// Keep track of the bits we throw away, and if any are
// nonzero or them into the lowest bit.
exp := int(mantbits32)
var trunc uint32
for mant >= 1<<32 {
trunc |= uint32(mant) & 1
mant >>= 1
exp++
}
return fpack32(uint32(fs>>32), uint32(mant), exp, trunc)
}
// 64x64 -> 128 multiply.
// adapted from hacker's delight.
func mullu(u, v uint64) (lo, hi uint64) {
const (
s = 32
mask = 1<<s - 1
)
u0 := u & mask
u1 := u >> s
v0 := v & mask
v1 := v >> s
w0 := u0 * v0
t := u1*v0 + w0>>s
w1 := t & mask
w2 := t >> s
w1 += u0 * v1
return u * v, u1*v1 + w2 + w1>>s
}
// 128/64 -> 64 quotient, 64 remainder.
// adapted from hacker's delight
func divlu(u1, u0, v uint64) (q, r uint64) {
const b = 1 << 32
if u1 >= v {
return 1<<64 - 1, 1<<64 - 1
}
// s = nlz(v); v <<= s
s := uint(0)
for v&(1<<63) == 0 {
s++
v <<= 1
}
vn1 := v >> 32
vn0 := v & (1<<32 - 1)
un32 := u1<<s | u0>>(64-s)
un10 := u0 << s
un1 := un10 >> 32
un0 := un10 & (1<<32 - 1)
q1 := un32 / vn1
rhat := un32 - q1*vn1
again1:
if q1 >= b || q1*vn0 > b*rhat+un1 {
q1--
rhat += vn1
if rhat < b {
goto again1
}
}
un21 := un32*b + un1 - q1*v
q0 := un21 / vn1
rhat = un21 - q0*vn1
again2:
if q0 >= b || q0*vn0 > b*rhat+un0 {
q0--
rhat += vn1
if rhat < b {
goto again2
}
}
return q1*b + q0, (un21*b + un0 - q0*v) >> s
}
func fadd32(x, y uint32) uint32 {
return f64to32(fadd64(f32to64(x), f32to64(y)))
}
func fmul32(x, y uint32) uint32 {
return f64to32(fmul64(f32to64(x), f32to64(y)))
}
func fdiv32(x, y uint32) uint32 {
// TODO: are there double-rounding problems here? See issue 48807.
return f64to32(fdiv64(f32to64(x), f32to64(y)))
}
func feq32(x, y uint32) bool {
cmp, nan := fcmp64(f32to64(x), f32to64(y))
return cmp == 0 && !nan
}
func fgt32(x, y uint32) bool {
cmp, nan := fcmp64(f32to64(x), f32to64(y))
return cmp >= 1 && !nan
}
func fge32(x, y uint32) bool {
cmp, nan := fcmp64(f32to64(x), f32to64(y))
return cmp >= 0 && !nan
}
func feq64(x, y uint64) bool {
cmp, nan := fcmp64(x, y)
return cmp == 0 && !nan
}
func fgt64(x, y uint64) bool {
cmp, nan := fcmp64(x, y)
return cmp >= 1 && !nan
}
func fge64(x, y uint64) bool {
cmp, nan := fcmp64(x, y)
return cmp >= 0 && !nan
}
func fint32to32(x int32) uint32 {
return fintto32(int64(x))
}
func fint32to64(x int32) uint64 {
return fintto64(int64(x))
}
func fint64to32(x int64) uint32 {
return fintto32(x)
}
func fint64to64(x int64) uint64 {
return fintto64(x)
}
func f32toint32(x uint32) int32 {
val, _ := f64toint(f32to64(x))
return int32(val)
}
func f32toint64(x uint32) int64 {
val, _ := f64toint(f32to64(x))
return val
}
func f64toint32(x uint64) int32 {
val, _ := f64toint(x)
return int32(val)
}
func f64toint64(x uint64) int64 {
val, _ := f64toint(x)
return val
}
func f64touint64(x uint64) uint64 {
var m uint64 = 0x43e0000000000000 // float64 1<<63
if fgt64(m, x) {
return uint64(f64toint64(x))
}
y := fadd64(x, -m)
z := uint64(f64toint64(y))
return z | (1 << 63)
}
func f32touint64(x uint32) uint64 {
var m uint32 = 0x5f000000 // float32 1<<63
if fgt32(m, x) {
return uint64(f32toint64(x))
}
y := fadd32(x, -m)
z := uint64(f32toint64(y))
return z | (1 << 63)
}
func fuint64to64(x uint64) uint64 {
if int64(x) >= 0 {
return fint64to64(int64(x))
}
// See ../cmd/compile/internal/ssagen/ssa.go:uint64Tofloat
y := x & 1
z := x >> 1
z = z | y
r := fint64to64(int64(z))
return fadd64(r, r)
}
func fuint64to32(x uint64) uint32 {
if int64(x) >= 0 {
return fint64to32(int64(x))
}
// See ../cmd/compile/internal/ssagen/ssa.go:uint64Tofloat
y := x & 1
z := x >> 1
z = z | y
r := fint64to32(int64(z))
return fadd32(r, r)
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/cpu"
"internal/goarch"
"internal/goexperiment"
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/gc"
"internal/runtime/sys"
"math/bits"
"unsafe"
)
/*
Stack layout parameters.
Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
The per-goroutine g->stackguard is set to point StackGuard bytes
above the bottom of the stack. Each function compares its stack
pointer against g->stackguard to check for overflow. To cut one
instruction from the check sequence for functions with tiny frames,
the stack is allowed to protrude StackSmall bytes below the stack
guard. Functions with large frames don't bother with the check and
always call morestack. The sequences are (for amd64, others are
similar):
guard = g->stackguard
frame = function's stack frame size
argsize = size of function arguments (call + return)
stack frame size <= StackSmall:
CMPQ guard, SP
JHI 3(PC)
MOVQ m->morearg, $(argsize << 32)
CALL morestack(SB)
stack frame size > StackSmall but < StackBig
LEAQ (frame-StackSmall)(SP), R0
CMPQ guard, R0
JHI 3(PC)
MOVQ m->morearg, $(argsize << 32)
CALL morestack(SB)
stack frame size >= StackBig:
MOVQ m->morearg, $((argsize << 32) | frame)
CALL morestack(SB)
The bottom StackGuard - StackSmall bytes are important: there has
to be enough room to execute functions that refuse to check for
stack overflow, either because they need to be adjacent to the
actual caller's frame (deferproc) or because they handle the imminent
stack overflow (morestack).
For example, deferproc might call malloc, which does one of the
above checks (without allocating a full frame), which might trigger
a call to morestack. This sequence needs to fit in the bottom
section of the stack. On amd64, morestack's frame is 40 bytes, and
deferproc's frame is 56 bytes. That fits well within the
StackGuard - StackSmall bytes at the bottom.
The linkers explore all possible call traces involving non-splitting
functions to make sure that this limit cannot be violated.
*/
const (
// stackSystem is a number of additional bytes to add
// to each stack below the usual guard area for OS-specific
// purposes like signal handling. Used on Windows, Plan 9,
// and iOS because they do not use a separate stack.
stackSystem = goos.IsWindows*4096 + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
// The minimum size of stack used by Go code
stackMin = 2048
// The minimum stack size to allocate.
// The hackery here rounds fixedStack0 up to a power of 2.
fixedStack0 = stackMin + stackSystem
fixedStack1 = fixedStack0 - 1
fixedStack2 = fixedStack1 | (fixedStack1 >> 1)
fixedStack3 = fixedStack2 | (fixedStack2 >> 2)
fixedStack4 = fixedStack3 | (fixedStack3 >> 4)
fixedStack5 = fixedStack4 | (fixedStack4 >> 8)
fixedStack6 = fixedStack5 | (fixedStack5 >> 16)
fixedStack = fixedStack6 + 1
// stackNosplit is the maximum number of bytes that a chain of NOSPLIT
// functions can use.
// This arithmetic must match that in cmd/internal/objabi/stack.go:StackNosplit.
stackNosplit = abi.StackNosplitBase * sys.StackGuardMultiplier
// The stack guard is a pointer this many bytes above the
// bottom of the stack.
//
// The guard leaves enough room for a stackNosplit chain of NOSPLIT calls
// plus one stackSmall frame plus stackSystem bytes for the OS.
// This arithmetic must match that in cmd/internal/objabi/stack.go:StackLimit.
stackGuard = stackNosplit + stackSystem + abi.StackSmall
)
const (
// stackDebug == 0: no logging
// == 1: logging of per-stack operations
// == 2: logging of per-frame operations
// == 3: logging of per-word updates
// == 4: logging of per-word reads
stackDebug = 0
stackFromSystem = 0 // allocate stacks from system memory instead of the heap
stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
stackNoCache = 0 // disable per-P small stack caches
// check the BP links during traceback.
debugCheckBP = false
)
var (
stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
)
const (
uintptrMask = 1<<(8*goarch.PtrSize) - 1
// The values below can be stored to g.stackguard0 to force
// the next stack check to fail.
// These are all larger than any real SP.
// Goroutine preemption request.
// 0xfffffade in hex.
stackPreempt = uintptrMask & -1314
// Thread is forking. Causes a split stack check failure.
// 0xfffffb2e in hex.
stackFork = uintptrMask & -1234
// Force a stack movement. Used for debugging.
// 0xfffffeed in hex.
stackForceMove = uintptrMask & -275
// stackPoisonMin is the lowest allowed stack poison value.
stackPoisonMin = uintptrMask & -4096
)
// Global pool of spans that have free stacks.
// Stacks are assigned an order according to size.
//
// order = log_2(size/FixedStack)
//
// There is a free list for each order.
var stackpool [_NumStackOrders]struct {
item stackpoolItem
_ [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
}
type stackpoolItem struct {
_ sys.NotInHeap
mu mutex
span mSpanList
}
// Global pool of large stack spans.
var stackLarge struct {
lock mutex
free [heapAddrBits - gc.PageShift]mSpanList // free lists by log_2(s.npages)
}
func stackinit() {
if _StackCacheSize&pageMask != 0 {
throw("cache size must be a multiple of page size")
}
for i := range stackpool {
stackpool[i].item.span.init()
lockInit(&stackpool[i].item.mu, lockRankStackpool)
}
for i := range stackLarge.free {
stackLarge.free[i].init()
lockInit(&stackLarge.lock, lockRankStackLarge)
}
}
// stacklog2 returns ⌊log_2(n)⌋.
func stacklog2(n uintptr) int {
if n == 0 {
return 0
}
return bits.Len64(uint64(n))
}
// Allocates a stack from the free pool. Must be called with
// stackpool[order].item.mu held.
func stackpoolalloc(order uint8) gclinkptr {
list := &stackpool[order].item.span
s := list.first
lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
if s == nil {
// no free stacks. Allocate another span worth.
s = mheap_.allocManual(_StackCacheSize>>gc.PageShift, spanAllocStack)
if s == nil {
throw("out of memory")
}
if s.allocCount != 0 {
throw("bad allocCount")
}
if s.manualFreeList.ptr() != nil {
throw("bad manualFreeList")
}
osStackAlloc(s)
s.elemsize = fixedStack << order
for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
x := gclinkptr(s.base() + i)
if valgrindenabled {
// The address of x.ptr() becomes the base of stacks. We need to
// mark it allocated here and in stackfree and stackpoolfree, and free'd in
// stackalloc in order to avoid overlapping allocations and
// uninitialized memory errors in valgrind.
valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
}
x.ptr().next = s.manualFreeList
s.manualFreeList = x
}
list.insert(s)
}
x := s.manualFreeList
if x.ptr() == nil {
throw("span has no free stacks")
}
s.manualFreeList = x.ptr().next
s.allocCount++
if s.manualFreeList.ptr() == nil {
// all stacks in s are allocated.
list.remove(s)
}
return x
}
// Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
func stackpoolfree(x gclinkptr, order uint8) {
s := spanOfUnchecked(uintptr(x))
if s.state.get() != mSpanManual {
throw("freeing stack not in a stack span")
}
if s.manualFreeList.ptr() == nil {
// s will now have a free stack
stackpool[order].item.span.insert(s)
}
x.ptr().next = s.manualFreeList
s.manualFreeList = x
s.allocCount--
if gcphase == _GCoff && s.allocCount == 0 {
// Span is completely free. Return it to the heap
// immediately if we're sweeping.
//
// If GC is active, we delay the free until the end of
// GC to avoid the following type of situation:
//
// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
// 2) The stack that pointer points to is copied
// 3) The old stack is freed
// 4) The containing span is marked free
// 5) GC attempts to mark the SudoG.elem pointer. The
// marking fails because the pointer looks like a
// pointer into a free span.
//
// By not freeing, we prevent step #4 until GC is done.
stackpool[order].item.span.remove(s)
s.manualFreeList = 0
osStackFree(s)
mheap_.freeManual(s, spanAllocStack)
}
}
// stackcacherefill/stackcacherelease implement a global pool of stack segments.
// The pool is required to prevent unlimited growth of per-thread caches.
//
//go:systemstack
func stackcacherefill(c *mcache, order uint8) {
if stackDebug >= 1 {
print("stackcacherefill order=", order, "\n")
}
// Grab some stacks from the global cache.
// Grab half of the allowed capacity (to prevent thrashing).
var list gclinkptr
var size uintptr
lock(&stackpool[order].item.mu)
for size < _StackCacheSize/2 {
x := stackpoolalloc(order)
x.ptr().next = list
list = x
size += fixedStack << order
}
unlock(&stackpool[order].item.mu)
c.stackcache[order].list = list
c.stackcache[order].size = size
}
//go:systemstack
func stackcacherelease(c *mcache, order uint8) {
if stackDebug >= 1 {
print("stackcacherelease order=", order, "\n")
}
x := c.stackcache[order].list
size := c.stackcache[order].size
lock(&stackpool[order].item.mu)
for size > _StackCacheSize/2 {
y := x.ptr().next
stackpoolfree(x, order)
x = y
size -= fixedStack << order
}
unlock(&stackpool[order].item.mu)
c.stackcache[order].list = x
c.stackcache[order].size = size
}
//go:systemstack
func stackcache_clear(c *mcache) {
if stackDebug >= 1 {
print("stackcache clear\n")
}
for order := uint8(0); order < _NumStackOrders; order++ {
lock(&stackpool[order].item.mu)
x := c.stackcache[order].list
for x.ptr() != nil {
y := x.ptr().next
stackpoolfree(x, order)
x = y
}
c.stackcache[order].list = 0
c.stackcache[order].size = 0
unlock(&stackpool[order].item.mu)
}
}
// stackalloc allocates an n byte stack.
//
// stackalloc must run on the system stack because it uses per-P
// resources and must not split the stack.
//
//go:systemstack
func stackalloc(n uint32) stack {
// Stackalloc must be called on scheduler stack, so that we
// never try to grow the stack during the code that stackalloc runs.
// Doing so would cause a deadlock (issue 1547).
thisg := getg()
if thisg != thisg.m.g0 {
throw("stackalloc not on scheduler stack")
}
if n&(n-1) != 0 {
throw("stack size not a power of 2")
}
if stackDebug >= 1 {
print("stackalloc ", n, "\n")
}
if debug.efence != 0 || stackFromSystem != 0 {
n = uint32(alignUp(uintptr(n), physPageSize))
v := sysAlloc(uintptr(n), &memstats.stacks_sys, "goroutine stack (system)")
if v == nil {
throw("out of memory (stackalloc)")
}
return stack{uintptr(v), uintptr(v) + uintptr(n)}
}
// Small stacks are allocated with a fixed-size free-list allocator.
// If we need a stack of a bigger size, we fall back on allocating
// a dedicated span.
var v unsafe.Pointer
if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0)
n2 := n
for n2 > fixedStack {
order++
n2 >>= 1
}
var x gclinkptr
if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
// thisg.m.p == 0 can happen in the guts of exitsyscall
// or procresize. Just get a stack from the global pool.
// Also don't touch stackcache during gc
// as it's flushed concurrently.
lock(&stackpool[order].item.mu)
x = stackpoolalloc(order)
unlock(&stackpool[order].item.mu)
} else {
c := thisg.m.p.ptr().mcache
x = c.stackcache[order].list
if x.ptr() == nil {
stackcacherefill(c, order)
x = c.stackcache[order].list
}
c.stackcache[order].list = x.ptr().next
c.stackcache[order].size -= uintptr(n)
}
if valgrindenabled {
// We're about to allocate the stack region starting at x.ptr().
// To prevent valgrind from complaining about overlapping allocations,
// we need to mark the (previously allocated) memory as free'd.
valgrindFree(unsafe.Pointer(x.ptr()))
}
v = unsafe.Pointer(x)
} else {
var s *mspan
npage := uintptr(n) >> gc.PageShift
log2npage := stacklog2(npage)
// Try to get a stack from the large stack cache.
lock(&stackLarge.lock)
if !stackLarge.free[log2npage].isEmpty() {
s = stackLarge.free[log2npage].first
stackLarge.free[log2npage].remove(s)
}
unlock(&stackLarge.lock)
lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
if s == nil {
// Allocate a new stack from the heap.
s = mheap_.allocManual(npage, spanAllocStack)
if s == nil {
throw("out of memory")
}
osStackAlloc(s)
s.elemsize = uintptr(n)
}
v = unsafe.Pointer(s.base())
}
if traceAllocFreeEnabled() {
trace := traceAcquire()
if trace.ok() {
trace.GoroutineStackAlloc(uintptr(v), uintptr(n))
traceRelease(trace)
}
}
if raceenabled {
racemalloc(v, uintptr(n))
}
if msanenabled {
msanmalloc(v, uintptr(n))
}
if asanenabled {
asanunpoison(v, uintptr(n))
}
if valgrindenabled {
valgrindMalloc(v, uintptr(n))
}
if stackDebug >= 1 {
print(" allocated ", v, "\n")
}
return stack{uintptr(v), uintptr(v) + uintptr(n)}
}
// stackfree frees an n byte stack allocation at stk.
//
// stackfree must run on the system stack because it uses per-P
// resources and must not split the stack.
//
//go:systemstack
func stackfree(stk stack) {
gp := getg()
v := unsafe.Pointer(stk.lo)
n := stk.hi - stk.lo
if n&(n-1) != 0 {
throw("stack not a power of 2")
}
if stk.lo+n < stk.hi {
throw("bad stack size")
}
if stackDebug >= 1 {
println("stackfree", v, n)
memclrNoHeapPointers(v, n) // for testing, clobber stack data
}
if debug.efence != 0 || stackFromSystem != 0 {
if debug.efence != 0 || stackFaultOnFree != 0 {
sysFault(v, n)
} else {
sysFree(v, n, &memstats.stacks_sys)
}
return
}
if traceAllocFreeEnabled() {
trace := traceAcquire()
if trace.ok() {
trace.GoroutineStackFree(uintptr(v))
traceRelease(trace)
}
}
if msanenabled {
msanfree(v, n)
}
if asanenabled {
asanpoison(v, n)
}
if valgrindenabled {
valgrindFree(v)
}
if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0)
n2 := n
for n2 > fixedStack {
order++
n2 >>= 1
}
x := gclinkptr(v)
if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
lock(&stackpool[order].item.mu)
if valgrindenabled {
// x.ptr() is the head of the list of free stacks, and will be used
// when allocating a new stack, so it has to be marked allocated.
valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
}
stackpoolfree(x, order)
unlock(&stackpool[order].item.mu)
} else {
c := gp.m.p.ptr().mcache
if c.stackcache[order].size >= _StackCacheSize {
stackcacherelease(c, order)
}
if valgrindenabled {
// x.ptr() is the head of the list of free stacks, and will
// be used when allocating a new stack, so it has to be
// marked allocated.
valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
}
x.ptr().next = c.stackcache[order].list
c.stackcache[order].list = x
c.stackcache[order].size += n
}
} else {
s := spanOfUnchecked(uintptr(v))
if s.state.get() != mSpanManual {
println(hex(s.base()), v)
throw("bad span state")
}
if gcphase == _GCoff {
// Free the stack immediately if we're
// sweeping.
osStackFree(s)
mheap_.freeManual(s, spanAllocStack)
} else {
// If the GC is running, we can't return a
// stack span to the heap because it could be
// reused as a heap span, and this state
// change would race with GC. Add it to the
// large stack cache instead.
log2npage := stacklog2(s.npages)
lock(&stackLarge.lock)
stackLarge.free[log2npage].insert(s)
unlock(&stackLarge.lock)
}
}
}
var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
var maxstackceiling = maxstacksize
var ptrnames = []string{
0: "scalar",
1: "ptr",
}
// Stack frame layout
//
// (x86)
// +------------------+
// | args from caller |
// +------------------+ <- frame->argp
// | return address |
// +------------------+
// | caller's BP (*) | (*) if framepointer_enabled && varp > sp
// +------------------+ <- frame->varp
// | locals |
// +------------------+
// | args to callee |
// +------------------+ <- frame->sp
//
// (arm)
// +------------------+
// | args from caller |
// +------------------+ <- frame->argp
// | caller's retaddr |
// +------------------+
// | caller's FP (*) | (*) on ARM64, if framepointer_enabled && varp > sp
// +------------------+ <- frame->varp
// | locals |
// +------------------+
// | args to callee |
// +------------------+
// | return address |
// +------------------+ <- frame->sp
//
// varp > sp means that the function has a frame;
// varp == sp means frameless function.
type adjustinfo struct {
old stack
delta uintptr // ptr distance from old to new stack (newbase - oldbase)
// sghi is the highest sudog.elem on the stack.
sghi uintptr
}
// adjustpointer checks whether *vpp is in the old stack described by adjinfo.
// If so, it rewrites *vpp to point into the new stack.
func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
pp := (*uintptr)(vpp)
p := *pp
if stackDebug >= 4 {
print(" ", pp, ":", hex(p), "\n")
}
if valgrindenabled {
// p is a pointer on a stack, it is inherently initialized, as
// everything on the stack is, but valgrind for _some unknown reason_
// sometimes thinks it's uninitialized, and flags operations on p below
// as uninitialized. We just initialize it if valgrind thinks its
// uninitialized.
//
// See go.dev/issues/73801.
valgrindMakeMemDefined(unsafe.Pointer(&p), unsafe.Sizeof(&p))
}
if adjinfo.old.lo <= p && p < adjinfo.old.hi {
*pp = p + adjinfo.delta
if stackDebug >= 3 {
print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
}
}
}
// Information from the compiler about the layout of stack frames.
// Note: this type must agree with reflect.bitVector.
type bitvector struct {
n int32 // # of bits
bytedata *uint8
}
// ptrbit returns the i'th bit in bv.
// ptrbit is less efficient than iterating directly over bitvector bits,
// and should only be used in non-performance-critical code.
// See adjustpointers for an example of a high-efficiency walk of a bitvector.
func (bv *bitvector) ptrbit(i uintptr) uint8 {
b := *(addb(bv.bytedata, i/8))
return (b >> (i % 8)) & 1
}
// bv describes the memory starting at address scanp.
// Adjust any pointers contained therein.
func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
minp := adjinfo.old.lo
maxp := adjinfo.old.hi
delta := adjinfo.delta
num := uintptr(bv.n)
// If this frame might contain channel receive slots, use CAS
// to adjust pointers. If the slot hasn't been received into
// yet, it may contain stack pointers and a concurrent send
// could race with adjusting those pointers. (The sent value
// itself can never contain stack pointers.)
useCAS := uintptr(scanp) < adjinfo.sghi
for i := uintptr(0); i < num; i += 8 {
if stackDebug >= 4 {
for j := uintptr(0); j < 8; j++ {
print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
}
}
b := *(addb(bv.bytedata, i/8))
for b != 0 {
j := uintptr(sys.TrailingZeros8(b))
b &= b - 1
pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
retry:
p := *pp
if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
// Looks like a junk value in a pointer slot.
// Live analysis wrong?
getg().m.traceback = 2
print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
throw("invalid pointer found on stack")
}
if minp <= p && p < maxp {
if stackDebug >= 3 {
print("adjust ptr ", hex(p), " ", funcname(f), "\n")
}
if useCAS {
ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
goto retry
}
} else {
*pp = p + delta
}
}
}
}
}
// Note: the argument/return area is adjusted by the callee.
func adjustframe(frame *stkframe, adjinfo *adjustinfo) {
if frame.continpc == 0 {
// Frame is dead.
return
}
f := frame.fn
if stackDebug >= 2 {
print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
}
// Adjust saved frame pointer if there is one.
if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
if stackDebug >= 3 {
print(" saved bp\n")
}
if debugCheckBP {
// Frame pointers should always point to the next higher frame on
// the Go stack (or be nil, for the top frame on the stack).
bp := *(*uintptr)(unsafe.Pointer(frame.varp))
if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
println("runtime: found invalid frame pointer")
print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
throw("bad frame pointer")
}
}
// On AMD64, this is the caller's frame pointer saved in the current
// frame.
// On ARM64, this is the frame pointer of the caller's caller saved
// by the caller in its frame (one word below its SP).
adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
}
locals, args, objs := frame.getStackMap(true)
// Adjust local variables if stack frame has been allocated.
if locals.n > 0 {
size := uintptr(locals.n) * goarch.PtrSize
adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
}
// Adjust arguments.
if args.n > 0 {
if stackDebug >= 3 {
print(" args\n")
}
adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
}
// Adjust pointers in all stack objects (whether they are live or not).
// See comments in mgcmark.go:scanframeworker.
if frame.varp != 0 {
for i := range objs {
obj := &objs[i]
off := obj.off
base := frame.varp // locals base pointer
if off >= 0 {
base = frame.argp // arguments and return values base pointer
}
p := base + uintptr(off)
if p < frame.sp {
// Object hasn't been allocated in the frame yet.
// (Happens when the stack bounds check fails and
// we call into morestack.)
continue
}
ptrBytes, gcData := obj.gcdata()
for i := uintptr(0); i < ptrBytes; i += goarch.PtrSize {
if *addb(gcData, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
adjustpointer(adjinfo, unsafe.Pointer(p+i))
}
}
}
}
}
func adjustctxt(gp *g, adjinfo *adjustinfo) {
adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
if !framepointer_enabled {
return
}
if debugCheckBP {
bp := gp.sched.bp
if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
println("runtime: found invalid top frame pointer")
print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
throw("bad top frame pointer")
}
}
oldfp := gp.sched.bp
adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
if GOARCH == "arm64" {
// On ARM64, the frame pointer is saved one word *below* the SP,
// which is not copied or adjusted in any frame. Do it explicitly
// here.
if oldfp == gp.sched.sp-goarch.PtrSize {
memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
adjustpointer(adjinfo, unsafe.Pointer(gp.sched.bp))
}
}
}
func adjustdefers(gp *g, adjinfo *adjustinfo) {
// Adjust pointers in the Defer structs.
// We need to do this first because we need to adjust the
// defer.link fields so we always work on the new stack.
adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
for d := gp._defer; d != nil; d = d.link {
adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
adjustpointer(adjinfo, unsafe.Pointer(&d.link))
}
}
func adjustpanics(gp *g, adjinfo *adjustinfo) {
// Panics are on stack and already adjusted.
// Update pointer to head of list in G.
adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
}
func adjustsudogs(gp *g, adjinfo *adjustinfo) {
// the data elements pointed to by a SudoG structure
// might be in the stack.
for s := gp.waiting; s != nil; s = s.waitlink {
adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vu))
adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vp))
}
}
func fillstack(stk stack, b byte) {
for p := stk.lo; p < stk.hi; p++ {
*(*byte)(unsafe.Pointer(p)) = b
}
}
func findsghi(gp *g, stk stack) uintptr {
var sghi uintptr
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
p := sg.elem.uintptr() + uintptr(sg.c.get().elemsize)
if stk.lo <= p && p < stk.hi && p > sghi {
sghi = p
}
}
return sghi
}
// syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
// stack they refer to while synchronizing with concurrent channel
// operations. It returns the number of bytes of stack copied.
func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
if gp.waiting == nil {
return 0
}
// Lock channels to prevent concurrent send/receive.
var lastc *hchan
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
if sg.c.get() != lastc {
// There is a ranking cycle here between gscan bit and
// hchan locks. Normally, we only allow acquiring hchan
// locks and then getting a gscan bit. In this case, we
// already have the gscan bit. We allow acquiring hchan
// locks here as a special case, since a deadlock can't
// happen because the G involved must already be
// suspended. So, we get a special hchan lock rank here
// that is lower than gscan, but doesn't allow acquiring
// any other locks other than hchan.
lockWithRank(&sg.c.get().lock, lockRankHchanLeaf)
}
lastc = sg.c.get()
}
// Adjust sudogs.
adjustsudogs(gp, adjinfo)
// Copy the part of the stack the sudogs point in to
// while holding the lock to prevent races on
// send/receive slots.
var sgsize uintptr
if adjinfo.sghi != 0 {
oldBot := adjinfo.old.hi - used
newBot := oldBot + adjinfo.delta
sgsize = adjinfo.sghi - oldBot
memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
}
// Unlock channels.
lastc = nil
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
if sg.c.get() != lastc {
unlock(&sg.c.get().lock)
}
lastc = sg.c.get()
}
return sgsize
}
// Copies gp's stack to a new stack of a different size.
// Caller must have changed gp status to Gcopystack.
func copystack(gp *g, newsize uintptr) {
if gp.syscallsp != 0 {
throw("stack growth not allowed in system call")
}
old := gp.stack
if old.lo == 0 {
throw("nil stackbase")
}
used := old.hi - gp.sched.sp
// Add just the difference to gcController.addScannableStack.
// g0 stacks never move, so this will never account for them.
// It's also fine if we have no P, addScannableStack can deal with
// that case.
gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
// allocate new stack
new := stackalloc(uint32(newsize))
if stackPoisonCopy != 0 {
fillstack(new, 0xfd)
}
if stackDebug >= 1 {
print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
}
// Compute adjustment.
var adjinfo adjustinfo
adjinfo.old = old
adjinfo.delta = new.hi - old.hi
// Adjust sudogs, synchronizing with channel ops if necessary.
ncopy := used
if !gp.activeStackChans {
if newsize < old.hi-old.lo && gp.parkingOnChan.Load() {
// It's not safe for someone to shrink this stack while we're actively
// parking on a channel, but it is safe to grow since we do that
// ourselves and explicitly don't want to synchronize with channels
// since we could self-deadlock.
throw("racy sudog adjustment due to parking on channel")
}
adjustsudogs(gp, &adjinfo)
} else {
// sudogs may be pointing in to the stack and gp has
// released channel locks, so other goroutines could
// be writing to gp's stack. Find the highest such
// pointer so we can handle everything there and below
// carefully. (This shouldn't be far from the bottom
// of the stack, so there's little cost in handling
// everything below it carefully.)
adjinfo.sghi = findsghi(gp, old)
// Synchronize with channel ops and copy the part of
// the stack they may interact with.
ncopy -= syncadjustsudogs(gp, used, &adjinfo)
}
// Copy the stack (or the rest of it) to the new location
memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
// Adjust remaining structures that have pointers into stacks.
// We have to do most of these before we traceback the new
// stack because gentraceback uses them.
adjustctxt(gp, &adjinfo)
adjustdefers(gp, &adjinfo)
adjustpanics(gp, &adjinfo)
if adjinfo.sghi != 0 {
adjinfo.sghi += adjinfo.delta
}
// Swap out old stack for new one
gp.stack = new
gp.stackguard0 = new.lo + stackGuard // NOTE: might clobber a preempt request
gp.sched.sp = new.hi - used
gp.stktopsp += adjinfo.delta
// Adjust pointers in the new stack.
var u unwinder
for u.init(gp, 0); u.valid(); u.next() {
adjustframe(&u.frame, &adjinfo)
}
if valgrindenabled {
if gp.valgrindStackID == 0 {
gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(new.lo), unsafe.Pointer(new.hi))
} else {
valgrindChangeStack(gp.valgrindStackID, unsafe.Pointer(new.lo), unsafe.Pointer(new.hi))
}
}
// free old stack
if goexperiment.RuntimeSecret && gp.secret > 0 {
// Some portion of the old stack has secret stuff on it.
// We don't really know where we entered secret mode,
// so just clear the whole thing.
// TODO(dmo): traceback until we hit secret.Do? clearing
// is fast and optimized, might not be worth it.
memclrNoHeapPointers(unsafe.Pointer(old.lo), old.hi-old.lo)
// The memmove call above might put secrets from the stack into registers.
secretEraseRegisters()
}
if stackPoisonCopy != 0 {
fillstack(old, 0xfc)
}
stackfree(old)
}
// round x up to a power of 2.
func round2(x int32) int32 {
s := uint(0)
for 1<<s < x {
s++
}
return 1 << s
}
// Called from runtime·morestack when more stack is needed.
// Allocate larger stack and relocate to new stack.
// Stack growth is multiplicative, for constant amortized cost.
//
// g->atomicstatus will be Grunning or Gscanrunning upon entry.
// If the scheduler is trying to stop this g, then it will set preemptStop.
//
// This must be nowritebarrierrec because it can be called as part of
// stack growth from other nowritebarrierrec functions, but the
// compiler doesn't check this.
//
//go:nowritebarrierrec
func newstack() {
thisg := getg()
// TODO: double check all gp. shouldn't be getg().
if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
throw("stack growth after fork")
}
if thisg.m.morebuf.g.ptr() != thisg.m.curg {
print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
morebuf := thisg.m.morebuf
traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
throw("runtime: wrong goroutine in newstack")
}
gp := thisg.m.curg
if goexperiment.RuntimeSecret && gp.secret > 0 {
// If we're entering here from a secret context, clear
// all the registers. This is important because we
// might context switch to a different goroutine which
// is not in secret mode, and it will not be careful
// about clearing its registers.
secretEraseRegisters()
}
if thisg.m.curg.throwsplit {
// Update syscallsp, syscallpc in case traceback uses them.
morebuf := thisg.m.morebuf
gp.syscallsp = morebuf.sp
gp.syscallpc = morebuf.pc
pcname, pcoff := "(unknown)", uintptr(0)
f := findfunc(gp.sched.pc)
if f.valid() {
pcname = funcname(f)
pcoff = gp.sched.pc - f.entry()
}
print("runtime: newstack at ", pcname, "+", hex(pcoff),
" sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
thisg.m.traceback = 2 // Include runtime frames
traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
throw("runtime: stack split at bad time")
}
morebuf := thisg.m.morebuf
thisg.m.morebuf.pc = 0
thisg.m.morebuf.lr = 0
thisg.m.morebuf.sp = 0
thisg.m.morebuf.g = 0
// NOTE: stackguard0 may change underfoot, if another thread
// is about to try to preempt gp. Read it just once and use that same
// value now and below.
stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
// Be conservative about where we preempt.
// We are interested in preempting user Go code, not runtime code.
// If we're holding locks, mallocing, or preemption is disabled, don't
// preempt.
// This check is very early in newstack so that even the status change
// from Grunning to Gwaiting and back doesn't happen in this case.
// That status change by itself can be viewed as a small preemption,
// because the GC might change Gwaiting to Gscanwaiting, and then
// this goroutine has to wait for the GC to finish before continuing.
// If the GC is in some way dependent on this goroutine (for example,
// it needs a lock held by the goroutine), that small preemption turns
// into a real deadlock.
preempt := stackguard0 == stackPreempt
if preempt {
if !canPreemptM(thisg.m) {
// Let the goroutine keep running for now.
// gp->preempt is set, so it will be preempted next time.
gp.stackguard0 = gp.stack.lo + stackGuard
gogo(&gp.sched) // never return
}
}
if gp.stack.lo == 0 {
throw("missing stack in newstack")
}
sp := gp.sched.sp
if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
// The call to morestack cost a word.
sp -= goarch.PtrSize
}
if stackDebug >= 1 || sp < gp.stack.lo {
print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
}
if sp < gp.stack.lo {
print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
throw("runtime: split stack overflow")
}
if preempt {
if gp == thisg.m.g0 {
throw("runtime: preempt g0")
}
if thisg.m.p == 0 && thisg.m.locks == 0 {
throw("runtime: g is running but p is not")
}
if gp.preemptShrink {
// We're at a synchronous safe point now, so
// do the pending stack shrink.
gp.preemptShrink = false
shrinkstack(gp)
}
// Set a flag indicated that we've been synchronously preempted.
gp.syncSafePoint = true
if gp.preemptStop {
preemptPark(gp) // never returns
}
// Act like goroutine called runtime.Gosched.
gopreempt_m(gp) // never return
}
// Allocate a bigger segment and move the stack.
oldsize := gp.stack.hi - gp.stack.lo
newsize := oldsize * 2
// Make sure we grow at least as much as needed to fit the new frame.
// (This is just an optimization - the caller of morestack will
// recheck the bounds on return.)
if f := findfunc(gp.sched.pc); f.valid() {
max := uintptr(funcMaxSPDelta(f))
needed := max + stackGuard
used := gp.stack.hi - gp.sched.sp
for newsize-used < needed {
newsize *= 2
}
}
if stackguard0 == stackForceMove {
// Forced stack movement used for debugging.
// Don't double the stack (or we may quickly run out
// if this is done repeatedly).
newsize = oldsize
}
if newsize > maxstacksize || newsize > maxstackceiling {
if maxstacksize < maxstackceiling {
print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
} else {
print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
}
print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
throw("stack overflow")
}
// The goroutine must be executing in order to call newstack,
// so it must be Grunning (or Gscanrunning).
casgstatus(gp, _Grunning, _Gcopystack)
// The concurrent GC will not scan the stack while we are doing the copy since
// the gp is in a Gcopystack status.
copystack(gp, newsize)
if stackDebug >= 1 {
print("stack grow done\n")
}
casgstatus(gp, _Gcopystack, _Grunning)
gogo(&gp.sched)
}
//go:nosplit
func nilfunc() {
*(*uint8)(nil) = 0
}
// adjust Gobuf as if it executed a call to fn
// and then stopped before the first instruction in fn.
func gostartcallfn(gobuf *gobuf, fv *funcval) {
var fn unsafe.Pointer
if fv != nil {
fn = unsafe.Pointer(fv.fn)
} else {
fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
}
gostartcall(gobuf, fn, unsafe.Pointer(fv))
}
// isShrinkStackSafe returns whether it's safe to attempt to shrink
// gp's stack. Shrinking the stack is only safe when we have precise
// pointer maps for all frames on the stack. The caller must hold the
// _Gscan bit for gp or must be running gp itself.
func isShrinkStackSafe(gp *g) bool {
// We can't copy the stack if we're in a syscall.
// The syscall might have pointers into the stack and
// often we don't have precise pointer maps for the innermost
// frames.
if gp.syscallsp != 0 {
return false
}
// We also can't copy the stack if we're at an asynchronous
// safe-point because we don't have precise pointer maps for
// all frames.
if gp.asyncSafePoint {
return false
}
// We also can't *shrink* the stack in the window between the
// goroutine calling gopark to park on a channel and
// gp.activeStackChans being set.
if gp.parkingOnChan.Load() {
return false
}
// We also can't copy the stack while a gp is in _Gwaiting solely
// to make itself available to suspendG.
//
// In these cases, the G is actually executing on the system
// stack, and the execution tracer, mutex profiler, etc. may want
// to take a stack trace of the G's stack.
//
// Note: it's safe to access gp.waitreason here.
// We're only calling isShrinkStackSafe if we took ownership of the
// G with the _Gscan bit. This prevents the goroutine from transitioning,
// which prevents gp.waitreason from changing.
if readgstatus(gp)&^_Gscan == _Gwaiting && gp.waitreason.isWaitingForSuspendG() {
return false
}
return true
}
// Maybe shrink the stack being used by gp.
//
// gp must be stopped and we must own its stack. It may be in
// _Grunning, but only if this is our own user G.
func shrinkstack(gp *g) {
if gp.stack.lo == 0 {
throw("missing stack in shrinkstack")
}
if s := readgstatus(gp); s&_Gscan == 0 {
// We don't own the stack via _Gscan. We could still
// own it if this is our own user G and we're on the
// system stack.
if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
// We don't own the stack.
throw("bad status in shrinkstack")
}
}
if !isShrinkStackSafe(gp) {
throw("shrinkstack at bad time")
}
// Check for self-shrinks while in a libcall. These may have
// pointers into the stack disguised as uintptrs, but these
// code paths should all be nosplit.
if gp == getg().m.curg && gp.m.libcallsp != 0 {
throw("shrinking stack in libcall")
}
if debug.gcshrinkstackoff > 0 {
return
}
oldsize := gp.stack.hi - gp.stack.lo
newsize := oldsize / 2
// Don't shrink the allocation below the minimum-sized stack
// allocation.
if newsize < fixedStack {
return
}
// Compute how much of the stack is currently in use and only
// shrink the stack if gp is using less than a quarter of its
// current stack. The currently used stack includes everything
// down to the SP plus the stack guard space that ensures
// there's room for nosplit functions.
avail := gp.stack.hi - gp.stack.lo
if used := gp.stack.hi - gp.sched.sp + stackNosplit; used >= avail/4 {
return
}
if stackDebug > 0 {
print("shrinking stack ", oldsize, "->", newsize, "\n")
}
copystack(gp, newsize)
}
// freeStackSpans frees unused stack spans at the end of GC.
func freeStackSpans() {
// Scan stack pools for empty stack spans.
for order := range stackpool {
lock(&stackpool[order].item.mu)
list := &stackpool[order].item.span
for s := list.first; s != nil; {
next := s.next
if s.allocCount == 0 {
list.remove(s)
s.manualFreeList = 0
osStackFree(s)
mheap_.freeManual(s, spanAllocStack)
}
s = next
}
unlock(&stackpool[order].item.mu)
}
// Free large stack spans.
lock(&stackLarge.lock)
for i := range stackLarge.free {
for s := stackLarge.free[i].first; s != nil; {
next := s.next
stackLarge.free[i].remove(s)
osStackFree(s)
mheap_.freeManual(s, spanAllocStack)
s = next
}
}
unlock(&stackLarge.lock)
}
// A stackObjectRecord is generated by the compiler for each stack object in a stack frame.
// This record must match the generator code in cmd/compile/internal/liveness/plive.go:emitStackObjects.
type stackObjectRecord struct {
// offset in frame
// if negative, offset from varp
// if non-negative, offset from argp
off int32
size int32
ptrBytes int32
gcdataoff uint32 // offset to gcdata from moduledata.rodata
}
// gcdata returns the number of bytes that contain pointers, and
// a ptr/nonptr bitmask covering those bytes.
// Note that this bitmask might be larger than internal/abi.MaxPtrmaskBytes.
func (r *stackObjectRecord) gcdata() (uintptr, *byte) {
ptr := uintptr(unsafe.Pointer(r))
var mod *moduledata
for datap := &firstmoduledata; datap != nil; datap = datap.next {
// The normal case: stackObjectRecord is in funcdata.
if datap.gofunc <= ptr && ptr < datap.epclntab {
mod = datap
break
}
// A special case: methodValueCallFrameObjs.
if datap.noptrbss <= ptr && ptr < datap.enoptrbss {
mod = datap
break
}
}
// If you get a panic here due to a nil mod,
// you may have made a copy of a stackObjectRecord.
// You must use the original pointer.
res := mod.rodata + uintptr(r.gcdataoff)
return uintptr(r.ptrBytes), (*byte)(unsafe.Pointer(res))
}
// This is exported as ABI0 via linkname so obj can call it.
//
//go:nosplit
//go:linkname morestackc
func morestackc() {
throw("attempt to execute system stack code on user stack")
}
// startingStackSize is the amount of stack that new goroutines start with.
// It is a power of 2, and between fixedStack and maxstacksize, inclusive.
// startingStackSize is updated every GC by tracking the average size of
// stacks scanned during the GC.
var startingStackSize uint32 = fixedStack
func gcComputeStartingStackSize() {
if debug.adaptivestackstart == 0 {
return
}
// For details, see the design doc at
// https://docs.google.com/document/d/1YDlGIdVTPnmUiTAavlZxBI1d9pwGQgZT7IKFKlIXohQ/edit?usp=sharing
// The basic algorithm is to track the average size of stacks
// and start goroutines with stack equal to that average size.
// Starting at the average size uses at most 2x the space that
// an ideal algorithm would have used.
// This is just a heuristic to avoid excessive stack growth work
// early in a goroutine's lifetime. See issue 18138. Stacks that
// are allocated too small can still grow, and stacks allocated
// too large can still shrink.
var scannedStackSize uint64
var scannedStacks uint64
for _, p := range allp {
scannedStackSize += p.scannedStackSize
scannedStacks += p.scannedStacks
// Reset for next time
p.scannedStackSize = 0
p.scannedStacks = 0
}
if scannedStacks == 0 {
startingStackSize = fixedStack
return
}
avg := scannedStackSize/scannedStacks + stackGuard
// Note: we add stackGuard to ensure that a goroutine that
// uses the average space will not trigger a growth.
if avg > uint64(maxstacksize) {
avg = uint64(maxstacksize)
}
if avg < fixedStack {
avg = fixedStack
}
// Note: maxstacksize fits in 30 bits, so avg also does.
startingStackSize = uint32(round2(int32(avg)))
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/runtime/sys"
"unsafe"
)
// A stkframe holds information about a single physical stack frame.
type stkframe struct {
// fn is the function being run in this frame. If there is
// inlining, this is the outermost function.
fn funcInfo
// pc is the program counter within fn.
//
// The meaning of this is subtle:
//
// - Typically, this frame performed a regular function call
// and this is the return PC (just after the CALL
// instruction). In this case, pc-1 reflects the CALL
// instruction itself and is the correct source of symbolic
// information.
//
// - If this frame "called" sigpanic, then pc is the
// instruction that panicked, and pc is the correct address
// to use for symbolic information.
//
// - If this is the innermost frame, then PC is where
// execution will continue, but it may not be the
// instruction following a CALL. This may be from
// cooperative preemption, in which case this is the
// instruction after the call to morestack. Or this may be
// from a signal or an un-started goroutine, in which case
// PC could be any instruction, including the first
// instruction in a function. Conventionally, we use pc-1
// for symbolic information, unless pc == fn.entry(), in
// which case we use pc.
pc uintptr
// continpc is the PC where execution will continue in fn, or
// 0 if execution will not continue in this frame.
//
// This is usually the same as pc, unless this frame "called"
// sigpanic, in which case it's either the address of
// deferreturn or 0 if this frame will never execute again.
//
// This is the PC to use to look up GC liveness for this frame.
continpc uintptr
lr uintptr // program counter at caller aka link register
sp uintptr // stack pointer at pc
fp uintptr // stack pointer at caller aka frame pointer
varp uintptr // top of local variables
argp uintptr // pointer to function arguments
}
// reflectMethodValue is a partial duplicate of reflect.makeFuncImpl
// and reflect.methodValue.
type reflectMethodValue struct {
fn uintptr
stack *bitvector // ptrmap for both args and results
argLen uintptr // just args
}
// argBytes returns the argument frame size for a call to frame.fn.
func (frame *stkframe) argBytes() uintptr {
if frame.fn.args != abi.ArgsSizeUnknown {
return uintptr(frame.fn.args)
}
// This is an uncommon and complicated case. Fall back to fully
// fetching the argument map to compute its size.
argMap, _ := frame.argMapInternal()
return uintptr(argMap.n) * goarch.PtrSize
}
// argMapInternal is used internally by stkframe to fetch special
// argument maps.
//
// argMap.n is always populated with the size of the argument map.
//
// argMap.bytedata is only populated for dynamic argument maps (used
// by reflect). If the caller requires the argument map, it should use
// this if non-nil, and otherwise fetch the argument map using the
// current PC.
//
// hasReflectStackObj indicates that this frame also has a reflect
// function stack object, which the caller must synthesize.
func (frame *stkframe) argMapInternal() (argMap bitvector, hasReflectStackObj bool) {
f := frame.fn
if f.args != abi.ArgsSizeUnknown {
argMap.n = f.args / goarch.PtrSize
return
}
// Extract argument bitmaps for reflect stubs from the calls they made to reflect.
switch funcname(f) {
case "reflect.makeFuncStub", "reflect.methodValueCall":
// These take a *reflect.methodValue as their
// context register and immediately save it to 0(SP).
// Get the methodValue from 0(SP).
arg0 := frame.sp + sys.MinFrameSize
minSP := frame.fp
if !usesLR {
// The CALL itself pushes a word.
// Undo that adjustment.
minSP -= goarch.PtrSize
}
if arg0 >= minSP {
// The function hasn't started yet.
// This only happens if f was the
// start function of a new goroutine
// that hasn't run yet *and* f takes
// no arguments and has no results
// (otherwise it will get wrapped in a
// closure). In this case, we can't
// reach into its locals because it
// doesn't have locals yet, but we
// also know its argument map is
// empty.
if frame.pc != f.entry() {
print("runtime: confused by ", funcname(f), ": no frame (sp=", hex(frame.sp), " fp=", hex(frame.fp), ") at entry+", hex(frame.pc-f.entry()), "\n")
throw("reflect mismatch")
}
return bitvector{}, false // No locals, so also no stack objects
}
hasReflectStackObj = true
mv := *(**reflectMethodValue)(unsafe.Pointer(arg0))
// Figure out whether the return values are valid.
// Reflect will update this value after it copies
// in the return values.
retValid := *(*bool)(unsafe.Pointer(arg0 + 4*goarch.PtrSize))
if mv.fn != f.entry() {
print("runtime: confused by ", funcname(f), "\n")
throw("reflect mismatch")
}
argMap = *mv.stack
if !retValid {
// argMap.n includes the results, but
// those aren't valid, so drop them.
n := int32((mv.argLen &^ (goarch.PtrSize - 1)) / goarch.PtrSize)
if n < argMap.n {
argMap.n = n
}
}
}
return
}
// getStackMap returns the locals and arguments live pointer maps, and
// stack object list for frame.
func (frame *stkframe) getStackMap(debug bool) (locals, args bitvector, objs []stackObjectRecord) {
targetpc := frame.continpc
if targetpc == 0 {
// Frame is dead. Return empty bitvectors.
return
}
f := frame.fn
pcdata := int32(-1)
if targetpc != f.entry() {
// Back up to the CALL. If we're at the function entry
// point, we want to use the entry map (-1), even if
// the first instruction of the function changes the
// stack map.
targetpc--
pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, targetpc)
}
if pcdata == -1 {
// We do not have a valid pcdata value but there might be a
// stackmap for this function. It is likely that we are looking
// at the function prologue, assume so and hope for the best.
pcdata = 0
}
// Local variables.
size := frame.varp - frame.sp
var minsize uintptr
switch goarch.ArchFamily {
case goarch.ARM64:
minsize = sys.StackAlign
default:
minsize = sys.MinFrameSize
}
if size > minsize {
stackid := pcdata
stkmap := (*stackmap)(funcdata(f, abi.FUNCDATA_LocalsPointerMaps))
if stkmap == nil || stkmap.n <= 0 {
print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
throw("missing stackmap")
}
// If nbit == 0, there's no work to do.
if stkmap.nbit > 0 {
if stackid < 0 || stackid >= stkmap.n {
// don't know where we are
print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
throw("bad symbol table")
}
locals = stackmapdata(stkmap, stackid)
if stackDebug >= 3 && debug {
print(" locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
}
} else if stackDebug >= 3 && debug {
print(" no locals to adjust\n")
}
}
// Arguments. First fetch frame size and special-case argument maps.
var isReflect bool
args, isReflect = frame.argMapInternal()
if args.n > 0 && args.bytedata == nil {
// Non-empty argument frame, but not a special map.
// Fetch the argument map at pcdata.
stackmap := (*stackmap)(funcdata(f, abi.FUNCDATA_ArgsPointerMaps))
if stackmap == nil || stackmap.n <= 0 {
print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(args.n*goarch.PtrSize), "\n")
throw("missing stackmap")
}
if pcdata < 0 || pcdata >= stackmap.n {
// don't know where we are
print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
throw("bad symbol table")
}
if stackmap.nbit == 0 {
args.n = 0
} else {
args = stackmapdata(stackmap, pcdata)
}
}
// stack objects.
if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "loong64" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64" || GOARCH == "s390x") &&
unsafe.Sizeof(abi.RegArgs{}) > 0 && isReflect {
// For reflect.makeFuncStub and reflect.methodValueCall,
// we need to fake the stack object record.
// These frames contain an internal/abi.RegArgs at a hard-coded offset.
// This offset matches the assembly code on amd64 and arm64.
objs = methodValueCallFrameObjs[:]
} else {
p := funcdata(f, abi.FUNCDATA_StackObjects)
if p != nil {
n := *(*uintptr)(p)
p = add(p, goarch.PtrSize)
r0 := (*stackObjectRecord)(noescape(p))
objs = unsafe.Slice(r0, int(n))
// Note: the noescape above is needed to keep
// getStackMap from "leaking param content:
// frame". That leak propagates up to getgcmask, then
// GCMask, then verifyGCInfo, which converts the stack
// gcinfo tests into heap gcinfo tests :(
}
}
return
}
var methodValueCallFrameObjs [1]stackObjectRecord // initialized in stkobjinit
func stkobjinit() {
var abiRegArgsEface any = abi.RegArgs{}
abiRegArgsType := efaceOf(&abiRegArgsEface)._type
// Set methodValueCallFrameObjs[0].gcdataoff so that
// stackObjectRecord.gcdata() will work correctly with it.
ptr := uintptr(unsafe.Pointer(&methodValueCallFrameObjs[0]))
var mod *moduledata
for datap := &firstmoduledata; datap != nil; datap = datap.next {
if datap.noptrbss <= ptr && ptr < datap.enoptrbss {
mod = datap
break
}
}
if mod == nil {
throw("methodValueCallFrameObjs is not in a module")
}
methodValueCallFrameObjs[0] = stackObjectRecord{
off: -int32(alignUp(abiRegArgsType.Size_, 8)), // It's always the highest address local.
size: int32(abiRegArgsType.Size_),
ptrBytes: int32(abiRegArgsType.PtrBytes),
gcdataoff: uint32(uintptr(unsafe.Pointer(getGCMask(abiRegArgsType))) - mod.rodata),
}
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/bytealg"
"internal/goarch"
"internal/goos"
"internal/runtime/math"
"internal/runtime/sys"
"internal/strconv"
"unsafe"
)
// The constant is known to the compiler.
// There is no fundamental theory behind this number.
const tmpStringBufSize = 32
type tmpBuf [tmpStringBufSize]byte
// concatstrings implements a Go string concatenation x+y+z+...
// The operands are passed in the slice a.
// If buf != nil, the compiler has determined that the result does not
// escape the calling function, so the string data can be stored in buf
// if small enough.
func concatstrings(buf *tmpBuf, a []string) string {
idx := 0
l := 0
count := 0
for i, x := range a {
n := len(x)
if n == 0 {
continue
}
if l+n < l {
throw("string concatenation too long")
}
l += n
count++
idx = i
}
if count == 0 {
return ""
}
// If there is just one string and either it is not on the stack
// or our result does not escape the calling frame (buf != nil),
// then we can return that string directly.
if count == 1 && (buf != nil || !stringDataOnStack(a[idx])) {
return a[idx]
}
s, b := rawstringtmp(buf, l)
for _, x := range a {
n := copy(b, x)
b = b[n:]
}
return s
}
// concatstring2 helps make the callsite smaller (compared to concatstrings),
// and we think this is currently more valuable than omitting one call in the
// chain, the same goes for concatstring{3,4,5}.
func concatstring2(buf *tmpBuf, a0, a1 string) string {
return concatstrings(buf, []string{a0, a1})
}
func concatstring3(buf *tmpBuf, a0, a1, a2 string) string {
return concatstrings(buf, []string{a0, a1, a2})
}
func concatstring4(buf *tmpBuf, a0, a1, a2, a3 string) string {
return concatstrings(buf, []string{a0, a1, a2, a3})
}
func concatstring5(buf *tmpBuf, a0, a1, a2, a3, a4 string) string {
return concatstrings(buf, []string{a0, a1, a2, a3, a4})
}
// concatbytes implements a Go string concatenation x+y+z+... returning a slice
// of bytes.
// The operands are passed in the slice a.
func concatbytes(buf *tmpBuf, a []string) []byte {
l := 0
for _, x := range a {
n := len(x)
if l+n < l {
throw("string concatenation too long")
}
l += n
}
if l == 0 {
// This is to match the return type of the non-optimized concatenation.
return []byte{}
}
var b []byte
if buf != nil && l <= len(buf) {
*buf = tmpBuf{}
b = buf[:l]
} else {
b = rawbyteslice(l)
}
offset := 0
for _, x := range a {
copy(b[offset:], x)
offset += len(x)
}
return b
}
// concatbyte2 helps make the callsite smaller (compared to concatbytes),
// and we think this is currently more valuable than omitting one call in
// the chain, the same goes for concatbyte{3,4,5}.
func concatbyte2(buf *tmpBuf, a0, a1 string) []byte {
return concatbytes(buf, []string{a0, a1})
}
func concatbyte3(buf *tmpBuf, a0, a1, a2 string) []byte {
return concatbytes(buf, []string{a0, a1, a2})
}
func concatbyte4(buf *tmpBuf, a0, a1, a2, a3 string) []byte {
return concatbytes(buf, []string{a0, a1, a2, a3})
}
func concatbyte5(buf *tmpBuf, a0, a1, a2, a3, a4 string) []byte {
return concatbytes(buf, []string{a0, a1, a2, a3, a4})
}
// slicebytetostring converts a byte slice to a string.
// It is inserted by the compiler into generated code.
// ptr is a pointer to the first element of the slice;
// n is the length of the slice.
// Buf is a fixed-size buffer for the result,
// it is not nil if the result does not escape.
func slicebytetostring(buf *tmpBuf, ptr *byte, n int) string {
if n == 0 {
// Turns out to be a relatively common case.
// Consider that you want to parse out data between parens in "foo()bar",
// you find the indices and convert the subslice to string.
return ""
}
if raceenabled {
racereadrangepc(unsafe.Pointer(ptr),
uintptr(n),
sys.GetCallerPC(),
abi.FuncPCABIInternal(slicebytetostring))
}
if msanenabled {
msanread(unsafe.Pointer(ptr), uintptr(n))
}
if asanenabled {
asanread(unsafe.Pointer(ptr), uintptr(n))
}
if n == 1 {
p := unsafe.Pointer(&staticuint64s[*ptr])
if goarch.BigEndian {
p = add(p, 7)
}
return unsafe.String((*byte)(p), 1)
}
var p unsafe.Pointer
if buf != nil && n <= len(buf) {
p = unsafe.Pointer(buf)
} else {
p = mallocgc(uintptr(n), nil, false)
}
memmove(p, unsafe.Pointer(ptr), uintptr(n))
return unsafe.String((*byte)(p), n)
}
// stringDataOnStack reports whether the string's data is
// stored on the current goroutine's stack.
func stringDataOnStack(s string) bool {
ptr := uintptr(unsafe.Pointer(unsafe.StringData(s)))
stk := getg().stack
return stk.lo <= ptr && ptr < stk.hi
}
func rawstringtmp(buf *tmpBuf, l int) (s string, b []byte) {
if buf != nil && l <= len(buf) {
b = buf[:l]
s = slicebytetostringtmp(&b[0], len(b))
} else {
s, b = rawstring(l)
}
return
}
// slicebytetostringtmp returns a "string" referring to the actual []byte bytes.
//
// Callers need to ensure that the returned string will not be used after
// the calling goroutine modifies the original slice or synchronizes with
// another goroutine.
//
// The function is only called when instrumenting
// and otherwise intrinsified by the compiler.
//
// Some internal compiler optimizations use this function.
// - Used for m[T1{... Tn{..., string(k), ...} ...}] and m[string(k)]
// where k is []byte, T1 to Tn is a nesting of struct and array literals.
// - Used for "<"+string(b)+">" concatenation where b is []byte.
// - Used for string(b)=="foo" comparison where b is []byte.
func slicebytetostringtmp(ptr *byte, n int) string {
if raceenabled && n > 0 {
racereadrangepc(unsafe.Pointer(ptr),
uintptr(n),
sys.GetCallerPC(),
abi.FuncPCABIInternal(slicebytetostringtmp))
}
if msanenabled && n > 0 {
msanread(unsafe.Pointer(ptr), uintptr(n))
}
if asanenabled && n > 0 {
asanread(unsafe.Pointer(ptr), uintptr(n))
}
return unsafe.String(ptr, n)
}
func stringtoslicebyte(buf *tmpBuf, s string) []byte {
var b []byte
if buf != nil && len(s) <= len(buf) {
*buf = tmpBuf{}
b = buf[:len(s)]
} else {
b = rawbyteslice(len(s))
}
copy(b, s)
return b
}
func stringtoslicerune(buf *[tmpStringBufSize]rune, s string) []rune {
// two passes.
// unlike slicerunetostring, no race because strings are immutable.
n := 0
for range s {
n++
}
var a []rune
if buf != nil && n <= len(buf) {
*buf = [tmpStringBufSize]rune{}
a = buf[:n]
} else {
a = rawruneslice(n)
}
n = 0
for _, r := range s {
a[n] = r
n++
}
return a
}
func slicerunetostring(buf *tmpBuf, a []rune) string {
if raceenabled && len(a) > 0 {
racereadrangepc(unsafe.Pointer(&a[0]),
uintptr(len(a))*unsafe.Sizeof(a[0]),
sys.GetCallerPC(),
abi.FuncPCABIInternal(slicerunetostring))
}
if msanenabled && len(a) > 0 {
msanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
}
if asanenabled && len(a) > 0 {
asanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
}
var dum [4]byte
size1 := 0
for _, r := range a {
size1 += encoderune(dum[:], r)
}
s, b := rawstringtmp(buf, size1+3)
size2 := 0
for _, r := range a {
// check for race
if size2 >= size1 {
break
}
size2 += encoderune(b[size2:], r)
}
return s[:size2]
}
type stringStruct struct {
str unsafe.Pointer
len int
}
// Variant with *byte pointer type for DWARF debugging.
type stringStructDWARF struct {
str *byte
len int
}
func stringStructOf(sp *string) *stringStruct {
return (*stringStruct)(unsafe.Pointer(sp))
}
func intstring(buf *[4]byte, v int64) (s string) {
var b []byte
if buf != nil {
b = buf[:]
s = slicebytetostringtmp(&b[0], len(b))
} else {
s, b = rawstring(4)
}
if int64(rune(v)) != v {
v = runeError
}
n := encoderune(b, rune(v))
return s[:n]
}
// rawstring allocates storage for a new string. The returned
// string and byte slice both refer to the same storage.
// The storage is not zeroed. Callers should use
// b to set the string contents and then drop b.
func rawstring(size int) (s string, b []byte) {
p := mallocgc(uintptr(size), nil, false)
return unsafe.String((*byte)(p), size), unsafe.Slice((*byte)(p), size)
}
// rawbyteslice allocates a new byte slice. The byte slice is not zeroed.
func rawbyteslice(size int) (b []byte) {
cap := roundupsize(uintptr(size), true)
p := mallocgc(cap, nil, false)
if cap != uintptr(size) {
memclrNoHeapPointers(add(p, uintptr(size)), cap-uintptr(size))
}
*(*slice)(unsafe.Pointer(&b)) = slice{p, size, int(cap)}
return
}
// rawruneslice allocates a new rune slice. The rune slice is not zeroed.
func rawruneslice(size int) (b []rune) {
if uintptr(size) > maxAlloc/4 {
throw("out of memory")
}
mem := roundupsize(uintptr(size)*4, true)
p := mallocgc(mem, nil, false)
if mem != uintptr(size)*4 {
memclrNoHeapPointers(add(p, uintptr(size)*4), mem-uintptr(size)*4)
}
*(*slice)(unsafe.Pointer(&b)) = slice{p, size, int(mem / 4)}
return
}
// used by cmd/cgo
func gobytes(p *byte, n int) (b []byte) {
if n == 0 {
return make([]byte, 0)
}
if n < 0 || uintptr(n) > maxAlloc {
panic(errorString("gobytes: length out of range"))
}
bp := mallocgc(uintptr(n), nil, false)
memmove(bp, unsafe.Pointer(p), uintptr(n))
*(*slice)(unsafe.Pointer(&b)) = slice{bp, n, n}
return
}
// This is exported via linkname to assembly in syscall (for Plan9) and cgo.
//
//go:linkname gostring
func gostring(p *byte) string {
l := findnull(p)
if l == 0 {
return ""
}
s, b := rawstring(l)
memmove(unsafe.Pointer(&b[0]), unsafe.Pointer(p), uintptr(l))
return s
}
// internal_syscall_gostring is a version of gostring for internal/syscall/unix.
//
//go:linkname internal_syscall_gostring internal/syscall/unix.gostring
func internal_syscall_gostring(p *byte) string {
return gostring(p)
}
func gostringn(p *byte, l int) string {
if l == 0 {
return ""
}
s, b := rawstring(l)
memmove(unsafe.Pointer(&b[0]), unsafe.Pointer(p), uintptr(l))
return s
}
// parseByteCount parses a string that represents a count of bytes.
//
// s must match the following regular expression:
//
// ^[0-9]+(([KMGT]i)?B)?$
//
// In other words, an integer byte count with an optional unit
// suffix. Acceptable suffixes include one of
// - KiB, MiB, GiB, TiB which represent binary IEC/ISO 80000 units, or
// - B, which just represents bytes.
//
// Returns an int64 because that's what its callers want and receive,
// but the result is always non-negative.
func parseByteCount(s string) (int64, bool) {
// The empty string is not valid.
if s == "" {
return 0, false
}
// Handle the easy non-suffix case.
last := s[len(s)-1]
if last >= '0' && last <= '9' {
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || n < 0 {
return 0, false
}
return n, true
}
// Failing a trailing digit, this must always end in 'B'.
// Also at this point there must be at least one digit before
// that B.
if last != 'B' || len(s) < 2 {
return 0, false
}
// The one before that must always be a digit or 'i'.
if c := s[len(s)-2]; c >= '0' && c <= '9' {
// Trivial 'B' suffix.
n, err := strconv.ParseInt(s[:len(s)-1], 10, 64)
if err != nil || n < 0 {
return 0, false
}
return n, true
} else if c != 'i' {
return 0, false
}
// Finally, we need at least 4 characters now, for the unit
// prefix and at least one digit.
if len(s) < 4 {
return 0, false
}
power := 0
switch s[len(s)-3] {
case 'K':
power = 1
case 'M':
power = 2
case 'G':
power = 3
case 'T':
power = 4
default:
// Invalid suffix.
return 0, false
}
m := uint64(1)
for i := 0; i < power; i++ {
m *= 1024
}
n, err := strconv.ParseInt(s[:len(s)-3], 10, 64)
if err != nil || n < 0 {
return 0, false
}
un := uint64(n)
if un > math.MaxUint64/m {
// Overflow.
return 0, false
}
un *= m
if un > uint64(math.MaxInt64) {
// Overflow.
return 0, false
}
return int64(un), true
}
//go:nosplit
func findnull(s *byte) int {
if s == nil {
return 0
}
// Avoid IndexByteString on Plan 9 because it uses SSE instructions
// on x86 machines, and those are classified as floating point instructions,
// which are illegal in a note handler.
if GOOS == "plan9" {
p := (*[maxAlloc/2 - 1]byte)(unsafe.Pointer(s))
l := 0
for p[l] != 0 {
l++
}
return l
}
// pageSize is the unit we scan at a time looking for NULL.
// It must be the minimum page size for any architecture Go
// runs on. It's okay (just a minor performance loss) if the
// actual system page size is larger than this value.
// For Android, we set the page size to the MTE size, as MTE
// might be enforced. See issue 59090.
const pageSize = 4096*(1-goos.IsAndroid) + 16*goos.IsAndroid
offset := 0
ptr := unsafe.Pointer(s)
// IndexByteString uses wide reads, so we need to be careful
// with page boundaries. Call IndexByteString on
// [ptr, endOfPage) interval.
safeLen := int(pageSize - uintptr(ptr)%pageSize)
for {
t := *(*string)(unsafe.Pointer(&stringStruct{ptr, safeLen}))
// Check one page at a time.
if i := bytealg.IndexByteString(t, 0); i != -1 {
return offset + i
}
// Move to next page
ptr = unsafe.Pointer(uintptr(ptr) + uintptr(safeLen))
offset += safeLen
safeLen = pageSize
}
}
func findnullw(s *uint16) int {
if s == nil {
return 0
}
p := (*[maxAlloc/2/2 - 1]uint16)(unsafe.Pointer(s))
l := 0
for p[l] != 0 {
l++
}
return l
}
//go:nosplit
func gostringnocopy(str *byte) string {
ss := stringStruct{str: unsafe.Pointer(str), len: findnull(str)}
s := *(*string)(unsafe.Pointer(&ss))
return s
}
func gostringw(strw *uint16) string {
var buf [8]byte
str := (*[maxAlloc/2/2 - 1]uint16)(unsafe.Pointer(strw))
n1 := 0
for i := 0; str[i] != 0; i++ {
n1 += encoderune(buf[:], rune(str[i]))
}
s, b := rawstring(n1 + 4)
n2 := 0
for i := 0; str[i] != 0; i++ {
// check for race
if n2 >= n1 {
break
}
n2 += encoderune(b[n2:], rune(str[i]))
}
b[n2] = 0 // for luck
return s[:n2]
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"unsafe"
)
// Should be a built-in for unsafe.Pointer?
//
// add should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - fortio.org/log
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname add
//go:nosplit
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(p) + x)
}
// getg returns the pointer to the current g.
// The compiler rewrites calls to this function into instructions
// that fetch the g directly (from TLS or from the dedicated register).
func getg() *g
// mcall switches from the g to the g0 stack and invokes fn(g),
// where g is the goroutine that made the call.
// mcall saves g's current PC/SP in g->sched so that it can be restored later.
// It is up to fn to arrange for that later execution, typically by recording
// g in a data structure, causing something to call ready(g) later.
// mcall returns to the original goroutine g later, when g has been rescheduled.
// fn must not return at all; typically it ends by calling schedule, to let the m
// run other goroutines.
//
// mcall can only be called from g stacks (not g0, not gsignal).
//
// This must NOT be go:noescape: if fn is a stack-allocated closure,
// fn puts g on a run queue, and g executes before fn returns, the
// closure will be invalidated while it is still executing.
func mcall(fn func(*g))
// systemstack runs fn on a system stack.
// If systemstack is called from the per-OS-thread (g0) stack, or
// if systemstack is called from the signal handling (gsignal) stack,
// systemstack calls fn directly and returns.
// Otherwise, systemstack is being called from the limited stack
// of an ordinary goroutine. In this case, systemstack switches
// to the per-OS-thread stack, calls fn, and switches back.
// It is common to use a func literal as the argument, in order
// to share inputs and outputs with the code around the call
// to system stack:
//
// ... set up y ...
// systemstack(func() {
// x = bigcall(y)
// })
// ... use x ...
//
//go:noescape
func systemstack(fn func())
//go:nosplit
//go:nowritebarrierrec
func badsystemstack() {
writeErrStr("fatal: systemstack called from unexpected goroutine")
}
// memclrNoHeapPointers clears n bytes starting at ptr.
//
// Usually you should use typedmemclr. memclrNoHeapPointers should be
// used only when the caller knows that *ptr contains no heap pointers
// because either:
//
// *ptr is initialized memory and its type is pointer-free, or
//
// *ptr is uninitialized memory (e.g., memory that's being reused
// for a new allocation) and hence contains only "junk".
//
// memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n
// is a multiple of the pointer size, then any pointer-aligned,
// pointer-sized portion is cleared atomically. Despite the function
// name, this is necessary because this function is the underlying
// implementation of typedmemclr and memclrHasPointers. See the doc of
// memmove for more details.
//
// The (CPU-specific) implementations of this function are in memclr_*.s.
//
// memclrNoHeapPointers should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
// - github.com/chenzhuoyu/iasm
// - github.com/dgraph-io/ristretto
// - github.com/outcaste-io/ristretto
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname memclrNoHeapPointers
//go:noescape
func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
//go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers
func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
memclrNoHeapPointers(ptr, n)
}
// memmove copies n bytes from "from" to "to".
//
// memmove ensures that any pointer in "from" is written to "to" with
// an indivisible write, so that racy reads cannot observe a
// half-written pointer. This is necessary to prevent the garbage
// collector from observing invalid pointers, and differs from memmove
// in unmanaged languages. However, memmove is only required to do
// this if "from" and "to" may contain pointers, which can only be the
// case if "from", "to", and "n" are all be word-aligned.
//
// Implementations are in memmove_*.s.
//
// Outside assembly calls memmove.
//
// memmove should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
// - github.com/cloudwego/dynamicgo
// - github.com/ebitengine/purego
// - github.com/tetratelabs/wazero
// - github.com/ugorji/go/codec
// - gvisor.dev/gvisor
// - github.com/sagernet/gvisor
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname memmove
//go:noescape
func memmove(to, from unsafe.Pointer, n uintptr)
//go:linkname reflect_memmove reflect.memmove
func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
memmove(to, from, n)
}
// exported value for testing
const hashLoad = float32(loadFactorNum) / float32(loadFactorDen)
// in internal/bytealg/equal_*.s
//
// memequal should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname memequal
//go:noescape
func memequal(a, b unsafe.Pointer, size uintptr) bool
// noescape hides a pointer from escape analysis. noescape is
// the identity function but escape analysis doesn't think the
// output depends on the input. noescape is inlined and currently
// compiles down to zero instructions.
// USE CAREFULLY!
//
// noescape should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/gopkg
// - github.com/ebitengine/purego
// - github.com/hamba/avro/v2
// - github.com/puzpuzpuz/xsync/v3
// - github.com/songzhibin97/gkit
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname noescape
//go:nosplit
func noescape(p unsafe.Pointer) unsafe.Pointer {
x := uintptr(p)
return unsafe.Pointer(x ^ 0)
}
// noEscapePtr hides a pointer from escape analysis. See noescape.
// USE CAREFULLY!
//
//go:nosplit
func noEscapePtr[T any](p *T) *T {
x := uintptr(unsafe.Pointer(p))
return (*T)(unsafe.Pointer(x ^ 0))
}
// Not all cgocallback frames are actually cgocallback,
// so not all have these arguments. Mark them uintptr so that the GC
// does not misinterpret memory when the arguments are not present.
// cgocallback is not called from Go, only from crosscall2.
// This in turn calls cgocallbackg, which is where we'll find
// pointer-declared arguments.
//
// When fn is nil (frame is saved g), call dropm instead,
// this is used when the C thread is exiting.
func cgocallback(fn, frame, ctxt uintptr)
func gogo(buf *gobuf)
func asminit()
func setg(gg *g)
func breakpoint()
// reflectcall calls fn with arguments described by stackArgs, stackArgsSize,
// frameSize, and regArgs.
//
// Arguments passed on the stack and space for return values passed on the stack
// must be laid out at the space pointed to by stackArgs (with total length
// stackArgsSize) according to the ABI.
//
// stackRetOffset must be some value <= stackArgsSize that indicates the
// offset within stackArgs where the return value space begins.
//
// frameSize is the total size of the argument frame at stackArgs and must
// therefore be >= stackArgsSize. It must include additional space for spilling
// register arguments for stack growth and preemption.
//
// TODO(mknyszek): Once we don't need the additional spill space, remove frameSize,
// since frameSize will be redundant with stackArgsSize.
//
// Arguments passed in registers must be laid out in regArgs according to the ABI.
// regArgs will hold any return values passed in registers after the call.
//
// reflectcall copies stack arguments from stackArgs to the goroutine stack, and
// then copies back stackArgsSize-stackRetOffset bytes back to the return space
// in stackArgs once fn has completed. It also "unspills" argument registers from
// regArgs before calling fn, and spills them back into regArgs immediately
// following the call to fn. If there are results being returned on the stack,
// the caller should pass the argument frame type as stackArgsType so that
// reflectcall can execute appropriate write barriers during the copy.
//
// reflectcall expects regArgs.ReturnIsPtr to be populated indicating which
// registers on the return path will contain Go pointers. It will then store
// these pointers in regArgs.Ptrs such that they are visible to the GC.
//
// Package reflect passes a frame type. In package runtime, there is only
// one call that copies results back, in callbackWrap in syscall_windows.go, and it
// does NOT pass a frame type, meaning there are no write barriers invoked. See that
// call site for justification.
//
// Package reflect accesses this symbol through a linkname.
//
// Arguments passed through to reflectcall do not escape. The type is used
// only in a very limited callee of reflectcall, the stackArgs are copied, and
// regArgs is only used in the reflectcall frame.
//
//go:noescape
func reflectcall(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
// procyield should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/sagernet/sing-tun
// - github.com/slackhq/nebula
// - golang.zx2c4.com/wireguard
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname procyield
//go:nosplit
func procyield(cycles uint32) {
if cycles == 0 {
return
}
procyieldAsm(cycles)
}
// procyieldAsm is the assembly implementation of procyield.
func procyieldAsm(cycles uint32)
type neverCallThisFunction struct{}
// goexit is the return stub at the top of every goroutine call stack.
// Each goroutine stack is constructed as if goexit called the
// goroutine's entry point function, so that when the entry point
// function returns, it will return to goexit, which will call goexit1
// to perform the actual exit.
//
// This function must never be called directly. Call goexit1 instead.
// gentraceback assumes that goexit terminates the stack. A direct
// call on the stack will cause gentraceback to stop walking the stack
// prematurely and if there is leftover state it may panic.
func goexit(neverCallThisFunction)
// publicationBarrier performs a store/store barrier (a "publication"
// or "export" barrier). Some form of synchronization is required
// between initializing an object and making that object accessible to
// another processor. Without synchronization, the initialization
// writes and the "publication" write may be reordered, allowing the
// other processor to follow the pointer and observe an uninitialized
// object. In general, higher-level synchronization should be used,
// such as locking or an atomic pointer write. publicationBarrier is
// for when those aren't an option, such as in the implementation of
// the memory manager.
//
// There's no corresponding barrier for the read side because the read
// side naturally has a data dependency order. All architectures that
// Go supports or seems likely to ever support automatically enforce
// data dependency ordering.
func publicationBarrier()
//go:noescape
func asmcgocall(fn, arg unsafe.Pointer) int32
func morestack()
// morestack_noctxt should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issues/67401.
// See go.dev/issues/71672.
//
//go:linkname morestack_noctxt
func morestack_noctxt()
func rt0_go()
func rt0_lib_go()
// in asm_*.s
// not called directly; definitions here supply type information for traceback.
// These must have the same signature (arg pointer map) as reflectcall.
func call16(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call32(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call64(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call128(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call256(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call512(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call1024(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call2048(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call4096(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call8192(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call16384(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call32768(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call65536(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call131072(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call262144(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call524288(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call1048576(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call2097152(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call4194304(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call8388608(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call16777216(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call33554432(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call67108864(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call134217728(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call268435456(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call536870912(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call1073741824(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func systemstack_switch()
// alignUp rounds n up to a multiple of a. a must be a power of 2.
//
//go:nosplit
func alignUp(n, a uintptr) uintptr {
return (n + a - 1) &^ (a - 1)
}
// alignDown rounds n down to a multiple of a. a must be a power of 2.
//
//go:nosplit
func alignDown(n, a uintptr) uintptr {
return n &^ (a - 1)
}
// divRoundUp returns ceil(n / a).
//
//go:nosplit
func divRoundUp(n, a uintptr) uintptr {
// a is generally a power of two. This will get inlined and
// the compiler will optimize the division.
return (n + a - 1) / a
}
// checkASM reports whether assembly runtime checks have passed.
func checkASM() bool
func memequal_varlen(a, b unsafe.Pointer) bool
// bool2int returns 0 if x is false or 1 if x is true.
func bool2int(x bool) int {
// Avoid branches. In the SSA compiler, this compiles to
// exactly what you would want it to.
return int(*(*uint8)(unsafe.Pointer(&x)))
}
// abort crashes the runtime in situations where even throw might not
// work. In general it should do something a debugger will recognize
// (e.g., an INT3 on x86). A crash in abort is recognized by the
// signal handler, which will attempt to tear down the runtime
// immediately.
func abort()
// Called from compiled code; declared for vet; do NOT call from Go.
func gcWriteBarrier1()
// gcWriteBarrier2 should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname gcWriteBarrier2
func gcWriteBarrier2()
func gcWriteBarrier3()
func gcWriteBarrier4()
func gcWriteBarrier5()
func gcWriteBarrier6()
func gcWriteBarrier7()
func gcWriteBarrier8()
func duffzero()
func duffcopy()
// Called from linker-generated .initarray; declared for go vet; do NOT call from Go.
func addmoduledata()
// Injected by the signal handler for panicking signals.
// Initializes any registers that have fixed meaning at calls but
// are scratch in bodies and calls sigpanic.
// On many platforms it just jumps to sigpanic.
func sigpanic0()
// intArgRegs is used by the various register assignment
// algorithm implementations in the runtime. These include:.
// - Finalizers (mfinal.go)
// - Windows callbacks (syscall_windows.go)
//
// Both are stripped-down versions of the algorithm since they
// only have to deal with a subset of cases (finalizers only
// take a pointer or interface argument, Go Windows callbacks
// don't support floating point).
//
// It should be modified with care and are generally only
// modified when testing this package.
//
// It should never be set higher than its internal/abi
// constant counterparts, because the system relies on a
// structure that is at least large enough to hold the
// registers the system supports.
//
// Protected by finlock.
var intArgRegs = abi.IntArgRegs
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !js && !openbsd && !plan9 && !solaris && !wasip1 && !windows
package runtime
import (
"internal/runtime/atomic"
"unsafe"
)
// read calls the read system call.
// It returns a non-negative number of bytes written or a negative errno value.
func read(fd int32, p unsafe.Pointer, n int32) int32
func closefd(fd int32) int32
func exit(code int32)
func usleep(usec uint32)
//go:nosplit
func usleep_no_g(usec uint32) {
usleep(usec)
}
// write1 calls the write system call.
// It returns a non-negative number of bytes written or a negative errno value.
//
//go:noescape
func write1(fd uintptr, p unsafe.Pointer, n int32) int32
//go:noescape
func open(name *byte, mode, perm int32) int32
// return value is only set on linux to be used in osinit().
func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32
// exitThread terminates the current thread, writing *wait = freeMStack when
// the stack is safe to reclaim.
//
//go:noescape
func exitThread(wait *atomic.Uint32)
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !wasm
package runtime
// pause is only used on wasm.
func pause(newsp uintptr) { panic("unreachable") }
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
// Frames may be used to get function/file/line information for a
// slice of PC values returned by [Callers].
type Frames struct {
// callers is a slice of PCs that have not yet been expanded to frames.
callers []uintptr
// nextPC is a next PC to expand ahead of processing callers.
nextPC uintptr
// frames is a slice of Frames that have yet to be returned.
frames []Frame
frameStore [2]Frame
}
// Frame is the information returned by [Frames] for each call frame.
type Frame struct {
// PC is the program counter for the location in this frame.
// For a frame that calls another frame, this will be the
// program counter of a call instruction. Because of inlining,
// multiple frames may have the same PC value, but different
// symbolic information.
PC uintptr
// Func is the Func value of this call frame. This may be nil
// for non-Go code or fully inlined functions.
Func *Func
// Function is the package path-qualified function name of
// this call frame. If non-empty, this string uniquely
// identifies a single function in the program.
// This may be the empty string if not known.
// If Func is not nil then Function == Func.Name().
Function string
// File and Line are the file name and line number of the
// location in this frame. For non-leaf frames, this will be
// the location of a call. These may be the empty string and
// zero, respectively, if not known. The file name uses
// forward slashes, even on Windows.
File string
Line int
// startLine is the line number of the beginning of the function in
// this frame. Specifically, it is the line number of the func keyword
// for Go functions. Note that //line directives can change the
// filename and/or line number arbitrarily within a function, meaning
// that the Line - startLine offset is not always meaningful.
//
// This may be zero if not known.
startLine int
// Entry point program counter for the function; may be zero
// if not known. If Func is not nil then Entry ==
// Func.Entry().
Entry uintptr
// The runtime's internal view of the function. This field
// is set (funcInfo.valid() returns true) only for Go functions,
// not for C functions.
funcInfo funcInfo
}
// CallersFrames takes a slice of PC values returned by [Callers] and
// prepares to return function/file/line information.
// Do not change the slice until you are done with the [Frames].
func CallersFrames(callers []uintptr) *Frames {
f := &Frames{callers: callers}
f.frames = f.frameStore[:0]
return f
}
// Next returns a [Frame] representing the next call frame in the slice
// of PC values. If it has already returned all call frames, Next
// returns a zero [Frame].
//
// The more result indicates whether the next call to Next will return
// a valid [Frame]. It does not necessarily indicate whether this call
// returned one.
//
// See the [Frames] example for idiomatic usage.
func (ci *Frames) Next() (frame Frame, more bool) {
for len(ci.frames) < 2 {
// Find the next frame.
// We need to look for 2 frames so we know what
// to return for the "more" result.
if len(ci.callers) == 0 {
break
}
var pc uintptr
if ci.nextPC != 0 {
pc, ci.nextPC = ci.nextPC, 0
} else {
pc, ci.callers = ci.callers[0], ci.callers[1:]
}
funcInfo := findfunc(pc)
if !funcInfo.valid() {
if cgoSymbolizerAvailable() {
// Pre-expand cgo frames. We could do this
// incrementally, too, but there's no way to
// avoid allocation in this case anyway.
ci.frames = append(ci.frames, expandCgoFrames(pc)...)
}
continue
}
f := funcInfo._Func()
entry := f.Entry()
// We store the pc of the start of the instruction following
// the instruction in question (the call or the inline mark).
// This is done for historical reasons, and to make FuncForPC
// work correctly for entries in the result of runtime.Callers.
// Decrement to get back to the instruction we care about.
//
// It is not possible to get pc == entry from runtime.Callers,
// but if the caller does provide one, provide best-effort
// results by avoiding backing out of the function entirely.
if pc > entry {
pc--
}
// It's important that interpret pc non-strictly as cgoTraceback may
// have added bogus PCs with a valid funcInfo but invalid PCDATA.
u, uf := newInlineUnwinder(funcInfo, pc)
sf := u.srcFunc(uf)
if u.isInlined(uf) {
// Note: entry is not modified. It always refers to a real frame, not an inlined one.
// File/line from funcline1 below are already correct.
f = nil
// When CallersFrame is invoked using the PC list returned by Callers,
// the PC list includes virtual PCs corresponding to each outer frame
// around an innermost real inlined PC.
// We also want to support code passing in a PC list extracted from a
// stack trace, and there only the real PCs are printed, not the virtual ones.
// So check to see if the implied virtual PC for this PC (obtained from the
// unwinder itself) is the next PC in ci.callers. If not, insert it.
// The +1 here correspond to the pc-- above: the output of Callers
// and therefore the input to CallersFrames is return PCs from the stack;
// The pc-- backs up into the CALL instruction (not the first byte of the CALL
// instruction, but good enough to find it nonetheless).
// There are no cycles in implied virtual PCs (some number of frames were
// inlined, but that number is finite), so this unpacking cannot cause an infinite loop.
for unext := u.next(uf); unext.valid() && len(ci.callers) > 0 && ci.callers[0] != unext.pc+1; unext = u.next(unext) {
snext := u.srcFunc(unext)
if snext.funcID == abi.FuncIDWrapper && elideWrapperCalling(sf.funcID) {
// Skip, because tracebackPCs (inside runtime.Callers) would too.
continue
}
ci.nextPC = unext.pc + 1
break
}
}
ci.frames = append(ci.frames, Frame{
PC: pc,
Func: f,
Function: funcNameForPrint(sf.name()),
Entry: entry,
startLine: int(sf.startLine),
funcInfo: funcInfo,
// Note: File,Line set below
})
}
// Pop one frame from the frame list. Keep the rest.
// Avoid allocation in the common case, which is 1 or 2 frames.
switch len(ci.frames) {
case 0: // In the rare case when there are no frames at all, we return Frame{}.
return
case 1:
frame = ci.frames[0]
ci.frames = ci.frameStore[:0]
case 2:
frame = ci.frames[0]
ci.frameStore[0] = ci.frames[1]
ci.frames = ci.frameStore[:1]
default:
frame = ci.frames[0]
ci.frames = ci.frames[1:]
}
more = len(ci.frames) > 0
if frame.funcInfo.valid() {
// Compute file/line just before we need to return it,
// as it can be expensive. This avoids computing file/line
// for the Frame we find but don't return. See issue 32093.
file, line := funcline1(frame.funcInfo, frame.PC, false)
frame.File, frame.Line = file, int(line)
}
return
}
// runtime_FrameStartLine returns the start line of the function in a Frame.
//
// runtime_FrameStartLine should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/grafana/pyroscope-go/godeltaprof
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname runtime_FrameStartLine runtime/pprof.runtime_FrameStartLine
func runtime_FrameStartLine(f *Frame) int {
return f.startLine
}
// runtime_FrameSymbolName returns the full symbol name of the function in a Frame.
// For generic functions this differs from f.Function in that this doesn't replace
// the shape name to "...".
//
// runtime_FrameSymbolName should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/grafana/pyroscope-go/godeltaprof
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname runtime_FrameSymbolName runtime/pprof.runtime_FrameSymbolName
func runtime_FrameSymbolName(f *Frame) string {
if !f.funcInfo.valid() {
return f.Function
}
u, uf := newInlineUnwinder(f.funcInfo, f.PC)
sf := u.srcFunc(uf)
return sf.name()
}
// runtime_expandFinalInlineFrame expands the final pc in stk to include all
// "callers" if pc is inline.
//
// runtime_expandFinalInlineFrame should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/grafana/pyroscope-go/godeltaprof
// - github.com/pyroscope-io/godeltaprof
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname runtime_expandFinalInlineFrame runtime/pprof.runtime_expandFinalInlineFrame
func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr {
// TODO: It would be more efficient to report only physical PCs to pprof and
// just expand the whole stack.
if len(stk) == 0 {
return stk
}
pc := stk[len(stk)-1]
tracepc := pc - 1
f := findfunc(tracepc)
if !f.valid() {
// Not a Go function.
return stk
}
u, uf := newInlineUnwinder(f, tracepc)
if !u.isInlined(uf) {
// Nothing inline at tracepc.
return stk
}
// Treat the previous func as normal. We haven't actually checked, but
// since this pc was included in the stack, we know it shouldn't be
// elided.
calleeID := abi.FuncIDNormal
// Remove pc from stk; we'll re-add it below.
stk = stk[:len(stk)-1]
for ; uf.valid(); uf = u.next(uf) {
funcID := u.srcFunc(uf).funcID
if funcID == abi.FuncIDWrapper && elideWrapperCalling(calleeID) {
// ignore wrappers
} else {
stk = append(stk, uf.pc+1)
}
calleeID = funcID
}
return stk
}
// expandCgoFrames expands frame information for pc, known to be
// a non-Go function, using the cgoSymbolizer hook. expandCgoFrames
// returns nil if pc could not be expanded.
//
// Preconditions: cgoSymbolizerAvailable returns true.
func expandCgoFrames(pc uintptr) []Frame {
arg := cgoSymbolizerArg{pc: pc}
callCgoSymbolizer(&arg)
if arg.file == nil && arg.funcName == nil {
// No useful information from symbolizer.
return nil
}
var frames []Frame
for {
frames = append(frames, Frame{
PC: pc,
Func: nil,
Function: gostring(arg.funcName),
File: gostring(arg.file),
Line: int(arg.lineno),
Entry: arg.entry,
// funcInfo is zero, which implies !funcInfo.valid().
// That ensures that we use the File/Line info given here.
})
if arg.more == 0 {
break
}
callCgoSymbolizer(&arg)
}
// No more frames for this PC. Tell the symbolizer we are done.
// We don't try to maintain a single cgoSymbolizerArg for the
// whole use of Frames, because there would be no good way to tell
// the symbolizer when we are done.
arg.pc = 0
callCgoSymbolizer(&arg)
return frames
}
// NOTE: Func does not expose the actual unexported fields, because we return *Func
// values to users, and we want to keep them from being able to overwrite the data
// with (say) *f = Func{}.
// All code operating on a *Func must call raw() to get the *_func
// or funcInfo() to get the funcInfo instead.
// A Func represents a Go function in the running binary.
type Func struct {
opaque struct{} // unexported field to disallow conversions
}
func (f *Func) raw() *_func {
return (*_func)(unsafe.Pointer(f))
}
func (f *Func) funcInfo() funcInfo {
return f.raw().funcInfo()
}
func (f *_func) funcInfo() funcInfo {
// Find the module containing fn. fn is located in the pclntable.
// The unsafe.Pointer to uintptr conversions and arithmetic
// are safe because we are working with module addresses.
ptr := uintptr(unsafe.Pointer(f))
var mod *moduledata
for datap := &firstmoduledata; datap != nil; datap = datap.next {
if len(datap.pclntable) == 0 {
continue
}
base := uintptr(unsafe.Pointer(&datap.pclntable[0]))
if base <= ptr && ptr < base+uintptr(len(datap.pclntable)) {
mod = datap
break
}
}
return funcInfo{f, mod}
}
// pcHeader holds data used by the pclntab lookups.
type pcHeader struct {
magic abi.PCLnTabMagic // abi.Go1NNPcLnTabMagic
pad1, pad2 uint8 // 0,0
minLC uint8 // min instruction size
ptrSize uint8 // size of a ptr in bytes
nfunc int // number of functions in the module
nfiles uint // number of entries in the file tab
// The next field used to be textStart. This is no longer stored
// as it requires a relocation. Code should use the moduledata text
// field instead. This unused field can be removed in coordination
// with Delve.
_ uintptr
funcnameOffset uintptr // offset to the funcnametab variable from pcHeader
cuOffset uintptr // offset to the cutab variable from pcHeader
filetabOffset uintptr // offset to the filetab variable from pcHeader
pctabOffset uintptr // offset to the pctab variable from pcHeader
pclnOffset uintptr // offset to the pclntab variable from pcHeader
}
// moduledata records information about the layout of the executable
// image. It is written by the linker. Any changes here must be
// matched changes to the code in cmd/link/internal/ld/symtab.go:symtab.
// moduledata is stored in statically allocated non-pointer memory;
// none of the pointers here are visible to the garbage collector.
type moduledata struct {
sys.NotInHeap // Only in static data
pcHeader *pcHeader
funcnametab []byte
cutab []uint32
filetab []byte
pctab []byte
pclntable []byte
ftab []functab
findfunctab uintptr
minpc, maxpc uintptr
text, etext uintptr
noptrdata, enoptrdata uintptr
data, edata uintptr
bss, ebss uintptr
noptrbss, enoptrbss uintptr
covctrs, ecovctrs uintptr
end, gcdata, gcbss uintptr
types, typedesclen, etypes uintptr
itaboffset uintptr
rodata uintptr
gofunc uintptr // go.func.*
epclntab uintptr
textsectmap []textsect
ptab []ptabEntry
pluginpath string
pkghashes []modulehash
// This slice records the initializing tasks that need to be
// done to start up the program. It is built by the linker.
inittasks []*initTask
modulename string
modulehashes []modulehash
hasmain uint8 // 1 if module contains the main function, 0 otherwise
bad bool // module failed to load and should be ignored
gcdatamask, gcbssmask bitvector
typemap map[*_type]*_type // *_type to use from previous module
next *moduledata
}
// A modulehash is used to compare the ABI of a new module or a
// package in a new module with the loaded program.
//
// For each shared library a module links against, the linker creates an entry in the
// moduledata.modulehashes slice containing the name of the module, the abi hash seen
// at link time and a pointer to the runtime abi hash. These are checked in
// moduledataverify1 below.
//
// For each loaded plugin, the pkghashes slice has a modulehash of the
// newly loaded package that can be used to check the plugin's version of
// a package against any previously loaded version of the package.
// This is done in plugin.lastmoduleinit.
type modulehash struct {
modulename string
linktimehash string
runtimehash *string
}
// pinnedTypemaps are the map[*_type]*_type from the moduledata objects.
//
// These typemap objects are allocated at run time on the heap, but the
// only direct reference to them is in the moduledata, created by the
// linker and marked SNOPTRDATA so it is ignored by the GC.
//
// To make sure the map isn't collected, we keep a second reference here.
var pinnedTypemaps []map[*_type]*_type
// aixStaticDataBase (used only on AIX) holds the unrelocated address
// of the data section, set by the linker.
//
// On AIX, an R_ADDR relocation from an RODATA symbol to a DATA symbol
// does not work, as the dynamic loader can change the address of the
// data section, and it is not possible to apply a dynamic relocation
// to RODATA. In order to get the correct address, we need to apply
// the delta between unrelocated and relocated data section addresses.
// aixStaticDataBase is the unrelocated address, and moduledata.data is
// the relocated one.
var aixStaticDataBase uintptr // linker symbol
var firstmoduledata moduledata // linker symbol
// lastmoduledatap should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issues/67401.
// See go.dev/issues/71672.
//
//go:linkname lastmoduledatap
var lastmoduledatap *moduledata // linker symbol
var modulesSlice *[]*moduledata // see activeModules
// activeModules returns a slice of active modules.
//
// A module is active once its gcdatamask and gcbssmask have been
// assembled and it is usable by the GC.
//
// This is nosplit/nowritebarrier because it is called by the
// cgo pointer checking code.
//
//go:nosplit
//go:nowritebarrier
func activeModules() []*moduledata {
p := (*[]*moduledata)(atomic.Loadp(unsafe.Pointer(&modulesSlice)))
if p == nil {
return nil
}
return *p
}
// modulesinit creates the active modules slice out of all loaded modules.
//
// When a module is first loaded by the dynamic linker, an .init_array
// function (written by cmd/link) is invoked to call addmoduledata,
// appending to the module to the linked list that starts with
// firstmoduledata.
//
// There are two times this can happen in the lifecycle of a Go
// program. First, if compiled with -linkshared, a number of modules
// built with -buildmode=shared can be loaded at program initialization.
// Second, a Go program can load a module while running that was built
// with -buildmode=plugin.
//
// After loading, this function is called which initializes the
// moduledata so it is usable by the GC and creates a new activeModules
// list.
//
// Only one goroutine may call modulesinit at a time.
func modulesinit() {
modules := new([]*moduledata)
for md := &firstmoduledata; md != nil; md = md.next {
if md.bad {
continue
}
*modules = append(*modules, md)
if md.gcdatamask == (bitvector{}) {
scanDataSize := md.edata - md.data
md.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(md.gcdata)), scanDataSize)
scanBSSSize := md.ebss - md.bss
md.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(md.gcbss)), scanBSSSize)
gcController.addGlobals(int64(scanDataSize + scanBSSSize))
}
}
// Modules appear in the moduledata linked list in the order they are
// loaded by the dynamic loader, with one exception: the
// firstmoduledata itself the module that contains the runtime. This
// is not always the first module (when using -buildmode=shared, it
// is typically libstd.so, the second module). The order matters for
// typelinksinit, so we swap the first module with whatever module
// contains the main function.
//
// See Issue #18729.
for i, md := range *modules {
if md.hasmain != 0 {
(*modules)[0] = md
(*modules)[i] = &firstmoduledata
break
}
}
atomicstorep(unsafe.Pointer(&modulesSlice), unsafe.Pointer(modules))
}
type functab struct {
entryoff uint32 // relative to runtime.text
funcoff uint32
}
// Mapping information for secondary text sections
type textsect struct {
vaddr uintptr // prelinked section vaddr
end uintptr // vaddr + section length
baseaddr uintptr // relocated section address
}
// findfuncbucket is an array of these structures.
// Each bucket represents 4096 bytes of the text segment.
// Each subbucket represents 256 bytes of the text segment.
// To find a function given a pc, locate the bucket and subbucket for
// that pc. Add together the idx and subbucket value to obtain a
// function index. Then scan the functab array starting at that
// index to find the target function.
// This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead.
type findfuncbucket struct {
idx uint32
subbuckets [16]byte
}
func moduledataverify() {
for datap := &firstmoduledata; datap != nil; datap = datap.next {
moduledataverify1(datap)
}
}
const debugPcln = false
// moduledataverify1 should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/bytedance/sonic
//
// Do not remove or change the type signature.
// See go.dev/issues/67401.
// See go.dev/issues/71672.
//
//go:linkname moduledataverify1
func moduledataverify1(datap *moduledata) {
// Check that the pclntab's format is valid.
hdr := datap.pcHeader
if hdr.magic != abi.CurrentPCLnTabMagic || hdr.pad1 != 0 || hdr.pad2 != 0 ||
hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize {
println("runtime: pcHeader: magic=", hex(hdr.magic), "pad1=", hdr.pad1, "pad2=", hdr.pad2,
"minLC=", hdr.minLC, "ptrSize=", hdr.ptrSize, "pluginpath=", datap.pluginpath)
throw("invalid function symbol table")
}
// ftab is lookup table for function by program counter.
nftab := len(datap.ftab) - 1
for i := 0; i < nftab; i++ {
// NOTE: ftab[nftab].entry is legal; it is the address beyond the final function.
if datap.ftab[i].entryoff > datap.ftab[i+1].entryoff {
f1 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i].funcoff])), datap}
f2 := funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[i+1].funcoff])), datap}
f2name := "end"
if i+1 < nftab {
f2name = funcname(f2)
}
println("function symbol table not sorted by PC offset:", hex(datap.ftab[i].entryoff), funcname(f1), ">", hex(datap.ftab[i+1].entryoff), f2name, ", plugin:", datap.pluginpath)
for j := 0; j <= i; j++ {
println("\t", hex(datap.ftab[j].entryoff), funcname(funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[datap.ftab[j].funcoff])), datap}))
}
if GOOS == "aix" && isarchive {
println("-Wl,-bnoobjreorder is mandatory on aix/ppc64 with c-archive")
}
throw("invalid runtime symbol table")
}
}
min := datap.textAddr(datap.ftab[0].entryoff)
max := datap.textAddr(datap.ftab[nftab].entryoff)
minpc := datap.minpc
maxpc := datap.maxpc
if GOARCH == "wasm" {
// On Wasm, the func table contains the function index, whereas
// the "PC" is function index << 16 + block index.
maxpc = alignUp(maxpc, 1<<16) // round up for end PC
}
if minpc != min || maxpc != max {
println("minpc=", hex(minpc), "min=", hex(min), "maxpc=", hex(maxpc), "max=", hex(max))
throw("minpc or maxpc invalid")
}
for _, modulehash := range datap.modulehashes {
if modulehash.linktimehash != *modulehash.runtimehash {
println("abi mismatch detected between", datap.modulename, "and", modulehash.modulename)
throw("abi mismatch")
}
}
}
// textAddr returns md.text + off, with special handling for multiple text sections.
// off is a (virtual) offset computed at internal linking time,
// before the external linker adjusts the sections' base addresses.
//
// The text, or instruction stream is generated as one large buffer.
// The off (offset) for a function is its offset within this buffer.
// If the total text size gets too large, there can be issues on platforms like ppc64
// if the target of calls are too far for the call instruction.
// To resolve the large text issue, the text is split into multiple text sections
// to allow the linker to generate long calls when necessary.
// When this happens, the vaddr for each text section is set to its offset within the text.
// Each function's offset is compared against the section vaddrs and ends to determine the containing section.
// Then the section relative offset is added to the section's
// relocated baseaddr to compute the function address.
//
// It is nosplit because it is part of the findfunc implementation.
//
//go:nosplit
func (md *moduledata) textAddr(off32 uint32) uintptr {
off := uintptr(off32)
res := md.text + off
if len(md.textsectmap) > 1 {
for i, sect := range md.textsectmap {
// For the last section, include the end address (etext), as it is included in the functab.
if off >= sect.vaddr && off < sect.end || (i == len(md.textsectmap)-1 && off == sect.end) {
res = sect.baseaddr + off - sect.vaddr
break
}
}
if res > md.etext && GOARCH != "wasm" { // on wasm, functions do not live in the same address space as the linear memory
println("runtime: textAddr", hex(res), "out of range", hex(md.text), "-", hex(md.etext))
throw("runtime: text offset out of range")
}
}
if GOARCH == "wasm" {
// On Wasm, a text offset (e.g. in the method table) is function index, whereas
// the "PC" is function index << 16 + block index.
res <<= 16
}
return res
}
// textOff is the opposite of textAddr. It converts a PC to a (virtual) offset
// to md.text, and returns if the PC is in any Go text section.
//
// It is nosplit because it is part of the findfunc implementation.
//
//go:nosplit
func (md *moduledata) textOff(pc uintptr) (uint32, bool) {
off := pc - md.text
if GOARCH == "wasm" {
// On Wasm, the func table contains the function index, whereas
// the "PC" is function index << 16 + block index.
off >>= 16
}
res := uint32(off)
if len(md.textsectmap) > 1 {
if GOARCH == "wasm" {
fatal("unexpected multiple text sections on Wasm")
}
for i, sect := range md.textsectmap {
if sect.baseaddr > pc {
// pc is not in any section.
return 0, false
}
end := sect.baseaddr + (sect.end - sect.vaddr)
// For the last section, include the end address (etext), as it is included in the functab.
if i == len(md.textsectmap)-1 {
end++
}
if pc < end {
res = uint32(pc - sect.baseaddr + sect.vaddr)
break
}
}
}
return res, true
}
// funcName returns the string at nameOff in the function name table.
func (md *moduledata) funcName(nameOff int32) string {
if nameOff == 0 {
return ""
}
return gostringnocopy(&md.funcnametab[nameOff])
}
// Despite being an exported symbol,
// FuncForPC is linknamed by widely used packages.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
// Note that this comment is not part of the doc comment.
//
//go:linkname FuncForPC
// FuncForPC returns a *[Func] describing the function that contains the
// given program counter address, or else nil.
//
// If pc represents multiple functions because of inlining, it returns
// the *Func describing the innermost function, but with an entry of
// the outermost function.
func FuncForPC(pc uintptr) *Func {
f := findfunc(pc)
if !f.valid() {
return nil
}
// This must interpret PC non-strictly so bad PCs (those between functions) don't crash the runtime.
// We just report the preceding function in that situation. See issue 29735.
// TODO: Perhaps we should report no function at all in that case.
// The runtime currently doesn't have function end info, alas.
u, uf := newInlineUnwinder(f, pc)
if !u.isInlined(uf) {
return f._Func()
}
sf := u.srcFunc(uf)
file, line := u.fileLine(uf)
fi := &funcinl{
ones: ^uint32(0),
entry: f.entry(), // entry of the real (the outermost) function.
name: sf.name(),
file: file,
line: int32(line),
startLine: sf.startLine,
}
return (*Func)(unsafe.Pointer(fi))
}
// Name returns the name of the function.
func (f *Func) Name() string {
if f == nil {
return ""
}
fn := f.raw()
if fn.isInlined() { // inlined version
fi := (*funcinl)(unsafe.Pointer(fn))
return funcNameForPrint(fi.name)
}
return funcNameForPrint(funcname(f.funcInfo()))
}
// Entry returns the entry address of the function.
func (f *Func) Entry() uintptr {
fn := f.raw()
if fn.isInlined() { // inlined version
fi := (*funcinl)(unsafe.Pointer(fn))
return fi.entry
}
return fn.funcInfo().entry()
}
// FileLine returns the file name and line number of the
// source code corresponding to the program counter pc.
// The result will not be accurate if pc is not a program
// counter within f.
func (f *Func) FileLine(pc uintptr) (file string, line int) {
fn := f.raw()
if fn.isInlined() { // inlined version
fi := (*funcinl)(unsafe.Pointer(fn))
return fi.file, int(fi.line)
}
// Pass strict=false here, because anyone can call this function,
// and they might just be wrong about targetpc belonging to f.
file, line32 := funcline1(f.funcInfo(), pc, false)
return file, int(line32)
}
// startLine returns the starting line number of the function. i.e., the line
// number of the func keyword.
func (f *Func) startLine() int32 {
fn := f.raw()
if fn.isInlined() { // inlined version
fi := (*funcinl)(unsafe.Pointer(fn))
return fi.startLine
}
return fn.funcInfo().startLine
}
// findmoduledatap looks up the moduledata for a PC.
//
// It is nosplit because it's part of the isgoexception
// implementation.
//
//go:nosplit
func findmoduledatap(pc uintptr) *moduledata {
for datap := &firstmoduledata; datap != nil; datap = datap.next {
if datap.minpc <= pc && pc < datap.maxpc {
return datap
}
}
return nil
}
type funcInfo struct {
*_func
datap *moduledata
}
func (f funcInfo) valid() bool {
return f._func != nil
}
func (f funcInfo) _Func() *Func {
return (*Func)(unsafe.Pointer(f._func))
}
// isInlined reports whether f should be re-interpreted as a *funcinl.
func (f *_func) isInlined() bool {
return f.entryOff == ^uint32(0) // see comment for funcinl.ones
}
// entry returns the entry PC for f.
//
// entry should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/phuslu/log
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
func (f funcInfo) entry() uintptr {
return f.datap.textAddr(f.entryOff)
}
//go:linkname badFuncInfoEntry runtime.funcInfo.entry
func badFuncInfoEntry(funcInfo) uintptr
// findfunc looks up function metadata for a PC.
//
// It is nosplit because it's part of the isgoexception
// implementation.
//
// findfunc should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/phuslu/log
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:nosplit
//go:linkname findfunc
func findfunc(pc uintptr) funcInfo {
datap := findmoduledatap(pc)
if datap == nil {
return funcInfo{}
}
const nsub = uintptr(len(findfuncbucket{}.subbuckets))
pcOff, ok := datap.textOff(pc)
if !ok {
return funcInfo{}
}
x := uintptr(pcOff) + datap.text - datap.minpc // TODO: are datap.text and datap.minpc always equal?
if GOARCH == "wasm" {
// On Wasm, pcOff is the function index, whereas
// the "PC" is function index << 16 + block index.
x = uintptr(pcOff)<<16 + datap.text - datap.minpc
}
b := x / abi.FuncTabBucketSize
i := x % abi.FuncTabBucketSize / (abi.FuncTabBucketSize / nsub)
ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{})))
idx := ffb.idx + uint32(ffb.subbuckets[i])
// Find the ftab entry.
for datap.ftab[idx+1].entryoff <= pcOff {
idx++
}
funcoff := datap.ftab[idx].funcoff
return funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[funcoff])), datap}
}
// A srcFunc represents a logical function in the source code. This may
// correspond to an actual symbol in the binary text, or it may correspond to a
// source function that has been inlined.
type srcFunc struct {
datap *moduledata
nameOff int32
startLine int32
funcID abi.FuncID
}
func (f funcInfo) srcFunc() srcFunc {
if !f.valid() {
return srcFunc{}
}
return srcFunc{f.datap, f.nameOff, f.startLine, f.funcID}
}
// name should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/phuslu/log
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
func (s srcFunc) name() string {
if s.datap == nil {
return ""
}
return s.datap.funcName(s.nameOff)
}
//go:linkname badSrcFuncName runtime.srcFunc.name
func badSrcFuncName(srcFunc) string
type pcvalueCache struct {
entries [2][8]pcvalueCacheEnt
inUse int
}
type pcvalueCacheEnt struct {
// targetpc and off together are the key of this cache entry.
targetpc uintptr
off uint32
val int32 // The value of this entry.
valPC uintptr // The PC at which val starts
}
// pcvalueCacheKey returns the outermost index in a pcvalueCache to use for targetpc.
// It must be very cheap to calculate.
// For now, align to goarch.PtrSize and reduce mod the number of entries.
// In practice, this appears to be fairly randomly and evenly distributed.
func pcvalueCacheKey(targetpc uintptr) uintptr {
return (targetpc / goarch.PtrSize) % uintptr(len(pcvalueCache{}.entries))
}
// Returns the PCData value, and the PC where this value starts.
func pcvalue(f funcInfo, off uint32, targetpc uintptr, strict bool) (int32, uintptr) {
// If true, when we get a cache hit, still look up the data and make sure it
// matches the cached contents.
const debugCheckCache = false
// If true, skip checking the cache entirely.
const skipCache = false
if off == 0 {
return -1, 0
}
// Check the cache. This speeds up walks of deep stacks, which
// tend to have the same recursive functions over and over,
// or repetitive stacks between goroutines.
var checkVal int32
var checkPC uintptr
ck := pcvalueCacheKey(targetpc)
if !skipCache {
mp := acquirem()
cache := &mp.pcvalueCache
// The cache can be used by the signal handler on this M. Avoid
// re-entrant use of the cache. The signal handler can also write inUse,
// but will always restore its value, so we can use a regular increment
// even if we get signaled in the middle of it.
cache.inUse++
if cache.inUse == 1 {
for i := range cache.entries[ck] {
// We check off first because we're more
// likely to have multiple entries with
// different offsets for the same targetpc
// than the other way around, so we'll usually
// fail in the first clause.
ent := &cache.entries[ck][i]
if ent.off == off && ent.targetpc == targetpc {
val, pc := ent.val, ent.valPC
if debugCheckCache {
checkVal, checkPC = ent.val, ent.valPC
break
} else {
cache.inUse--
releasem(mp)
return val, pc
}
}
}
} else if debugCheckCache && (cache.inUse < 1 || cache.inUse > 2) {
// Catch accounting errors or deeply reentrant use. In principle
// "inUse" should never exceed 2.
throw("cache.inUse out of range")
}
cache.inUse--
releasem(mp)
}
if !f.valid() {
if strict && panicking.Load() == 0 {
println("runtime: no module data for", hex(f.entry()))
throw("no module data")
}
return -1, 0
}
datap := f.datap
p := datap.pctab[off:]
pc := f.entry()
prevpc := pc
val := int32(-1)
for {
var ok bool
p, ok = step(p, &pc, &val, pc == f.entry())
if !ok {
break
}
if targetpc < pc {
// Replace a random entry in the cache. Random
// replacement prevents a performance cliff if
// a recursive stack's cycle is slightly
// larger than the cache.
// Put the new element at the beginning,
// since it is the most likely to be newly used.
if debugCheckCache && checkPC != 0 {
if checkVal != val || checkPC != prevpc {
print("runtime: table value ", val, "@", prevpc, " != cache value ", checkVal, "@", checkPC, " at PC ", targetpc, " off ", off, "\n")
throw("bad pcvalue cache")
}
} else {
mp := acquirem()
cache := &mp.pcvalueCache
cache.inUse++
if cache.inUse == 1 {
e := &cache.entries[ck]
ci := cheaprandn(uint32(len(cache.entries[ck])))
e[ci] = e[0]
e[0] = pcvalueCacheEnt{
targetpc: targetpc,
off: off,
val: val,
valPC: prevpc,
}
}
cache.inUse--
releasem(mp)
}
return val, prevpc
}
prevpc = pc
}
// If there was a table, it should have covered all program counters.
// If not, something is wrong.
if panicking.Load() != 0 || !strict {
return -1, 0
}
print("runtime: invalid pc-encoded table f=", funcname(f), " pc=", hex(pc), " targetpc=", hex(targetpc), " tab=", p, "\n")
p = datap.pctab[off:]
pc = f.entry()
val = -1
for {
var ok bool
p, ok = step(p, &pc, &val, pc == f.entry())
if !ok {
break
}
print("\tvalue=", val, " until pc=", hex(pc), "\n")
}
throw("invalid runtime symbol table")
return -1, 0
}
func funcname(f funcInfo) string {
if !f.valid() {
return ""
}
return f.datap.funcName(f.nameOff)
}
func funcpkgpath(f funcInfo) string {
name := funcNameForPrint(funcname(f))
i := len(name) - 1
for ; i > 0; i-- {
if name[i] == '/' {
break
}
}
for ; i < len(name); i++ {
if name[i] == '.' {
break
}
}
return name[:i]
}
func funcfile(f funcInfo, fileno int32) string {
datap := f.datap
if !f.valid() {
return "?"
}
// Make sure the cu index and file offset are valid
if fileoff := datap.cutab[f.cuOffset+uint32(fileno)]; fileoff != ^uint32(0) {
return gostringnocopy(&datap.filetab[fileoff])
}
// pcln section is corrupt.
return "?"
}
// funcline1 should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/phuslu/log
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname funcline1
func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32) {
datap := f.datap
if !f.valid() {
return "?", 0
}
fileno, _ := pcvalue(f, f.pcfile, targetpc, strict)
line, _ = pcvalue(f, f.pcln, targetpc, strict)
if fileno == -1 || line == -1 || int(fileno) >= len(datap.filetab) {
// print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n")
return "?", 0
}
file = funcfile(f, fileno)
return
}
func funcline(f funcInfo, targetpc uintptr) (file string, line int32) {
return funcline1(f, targetpc, true)
}
func funcspdelta(f funcInfo, targetpc uintptr) int32 {
x, _ := pcvalue(f, f.pcsp, targetpc, true)
if debugPcln && x&(goarch.PtrSize-1) != 0 {
print("invalid spdelta ", funcname(f), " ", hex(f.entry()), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n")
throw("bad spdelta")
}
return x
}
// funcMaxSPDelta returns the maximum spdelta at any point in f.
func funcMaxSPDelta(f funcInfo) int32 {
datap := f.datap
p := datap.pctab[f.pcsp:]
pc := f.entry()
val := int32(-1)
most := int32(0)
for {
var ok bool
p, ok = step(p, &pc, &val, pc == f.entry())
if !ok {
return most
}
most = max(most, val)
}
}
func pcdatastart(f funcInfo, table uint32) uint32 {
return *(*uint32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4))
}
func pcdatavalue(f funcInfo, table uint32, targetpc uintptr) int32 {
if table >= f.npcdata {
return -1
}
r, _ := pcvalue(f, pcdatastart(f, table), targetpc, true)
return r
}
func pcdatavalue1(f funcInfo, table uint32, targetpc uintptr, strict bool) int32 {
if table >= f.npcdata {
return -1
}
r, _ := pcvalue(f, pcdatastart(f, table), targetpc, strict)
return r
}
// Like pcdatavalue, but also return the start PC of this PCData value.
func pcdatavalue2(f funcInfo, table uint32, targetpc uintptr) (int32, uintptr) {
if table >= f.npcdata {
return -1, 0
}
return pcvalue(f, pcdatastart(f, table), targetpc, true)
}
// funcdata returns a pointer to the ith funcdata for f.
// funcdata should be kept in sync with cmd/link:writeFuncs.
func funcdata(f funcInfo, i uint8) unsafe.Pointer {
if i < 0 || i >= f.nfuncdata {
return nil
}
base := f.datap.gofunc // load gofunc address early so that we calculate during cache misses
p := uintptr(unsafe.Pointer(&f.nfuncdata)) + unsafe.Sizeof(f.nfuncdata) + uintptr(f.npcdata)*4 + uintptr(i)*4
off := *(*uint32)(unsafe.Pointer(p))
// Return off == ^uint32(0) ? 0 : f.datap.gofunc + uintptr(off), but without branches.
// The compiler calculates mask on most architectures using conditional assignment.
var mask uintptr
if off == ^uint32(0) {
mask = 1
}
mask--
raw := base + uintptr(off)
return unsafe.Pointer(raw & mask)
}
// step advances to the next pc, value pair in the encoded table.
func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) {
// For both uvdelta and pcdelta, the common case (~70%)
// is that they are a single byte. If so, avoid calling readvarint.
uvdelta := uint32(p[0])
if uvdelta == 0 && !first {
return nil, false
}
n := uint32(1)
if uvdelta&0x80 != 0 {
n, uvdelta = readvarint(p)
}
*val += int32(-(uvdelta & 1) ^ (uvdelta >> 1))
p = p[n:]
pcdelta := uint32(p[0])
n = 1
if pcdelta&0x80 != 0 {
n, pcdelta = readvarint(p)
}
p = p[n:]
*pc += uintptr(pcdelta * sys.PCQuantum)
return p, true
}
// readvarint reads a varint from p.
func readvarint(p []byte) (read uint32, val uint32) {
var v, shift, n uint32
for {
b := p[n]
n++
v |= uint32(b&0x7F) << (shift & 31)
if b&0x80 == 0 {
break
}
shift += 7
}
return n, v
}
type stackmap struct {
n int32 // number of bitmaps
nbit int32 // number of bits in each bitmap
bytedata [1]byte // bitmaps, each starting on a byte boundary
}
//go:nowritebarrier
func stackmapdata(stkmap *stackmap, n int32) bitvector {
// Check this invariant only when stackDebug is on at all.
// The invariant is already checked by many of stackmapdata's callers,
// and disabling it by default allows stackmapdata to be inlined.
if stackDebug > 0 && (n < 0 || n >= stkmap.n) {
throw("stackmapdata: index out of range")
}
return bitvector{stkmap.nbit, addb(&stkmap.bytedata[0], uintptr(n*((stkmap.nbit+7)>>3)))}
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
_ "unsafe" // for linkname
)
// inlinedCall is the encoding of entries in the FUNCDATA_InlTree table.
type inlinedCall struct {
funcID abi.FuncID // type of the called function
_ [3]byte
nameOff int32 // offset into pclntab for name of called function
parentPc int32 // position of an instruction whose source position is the call site (offset from entry)
startLine int32 // line number of start of function (func keyword/TEXT directive)
}
// An inlineUnwinder iterates over the stack of inlined calls at a PC by
// decoding the inline table. The last step of iteration is always the frame of
// the physical function, so there's always at least one frame.
//
// This is typically used as:
//
// for u, uf := newInlineUnwinder(...); uf.valid(); uf = u.next(uf) { ... }
//
// Implementation note: This is used in contexts that disallow write barriers.
// Hence, the constructor returns this by value and pointer receiver methods
// must not mutate pointer fields. Also, we keep the mutable state in a separate
// struct mostly to keep both structs SSA-able, which generates much better
// code.
type inlineUnwinder struct {
f funcInfo
inlTree *[1 << 20]inlinedCall
}
// An inlineFrame is a position in an inlineUnwinder.
type inlineFrame struct {
// pc is the PC giving the file/line metadata of the current frame. This is
// always a "call PC" (not a "return PC"). This is 0 when the iterator is
// exhausted.
pc uintptr
// index is the index of the current record in inlTree, or -1 if we are in
// the outermost function.
index int32
}
// newInlineUnwinder creates an inlineUnwinder initially set to the inner-most
// inlined frame at PC. PC should be a "call PC" (not a "return PC").
//
// This unwinder uses non-strict handling of PC because it's assumed this is
// only ever used for symbolic debugging. If things go really wrong, it'll just
// fall back to the outermost frame.
//
// newInlineUnwinder should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/phuslu/log
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname newInlineUnwinder
func newInlineUnwinder(f funcInfo, pc uintptr) (inlineUnwinder, inlineFrame) {
inldata := funcdata(f, abi.FUNCDATA_InlTree)
if inldata == nil {
return inlineUnwinder{f: f}, inlineFrame{pc: pc, index: -1}
}
inlTree := (*[1 << 20]inlinedCall)(inldata)
u := inlineUnwinder{f: f, inlTree: inlTree}
return u, u.resolveInternal(pc)
}
func (u *inlineUnwinder) resolveInternal(pc uintptr) inlineFrame {
return inlineFrame{
pc: pc,
// Conveniently, this returns -1 if there's an error, which is the same
// value we use for the outermost frame.
index: pcdatavalue1(u.f, abi.PCDATA_InlTreeIndex, pc, false),
}
}
func (uf inlineFrame) valid() bool {
return uf.pc != 0
}
// next returns the frame representing uf's logical caller.
func (u *inlineUnwinder) next(uf inlineFrame) inlineFrame {
if uf.index < 0 {
uf.pc = 0
return uf
}
parentPc := u.inlTree[uf.index].parentPc
return u.resolveInternal(u.f.entry() + uintptr(parentPc))
}
// isInlined returns whether uf is an inlined frame.
func (u *inlineUnwinder) isInlined(uf inlineFrame) bool {
return uf.index >= 0
}
// srcFunc returns the srcFunc representing the given frame.
//
// srcFunc should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/phuslu/log
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
// The go:linkname is below.
func (u *inlineUnwinder) srcFunc(uf inlineFrame) srcFunc {
if uf.index < 0 {
return u.f.srcFunc()
}
t := &u.inlTree[uf.index]
return srcFunc{
u.f.datap,
t.nameOff,
t.startLine,
t.funcID,
}
}
//go:linkname badSrcFunc runtime.(*inlineUnwinder).srcFunc
func badSrcFunc(*inlineUnwinder, inlineFrame) srcFunc
// fileLine returns the file name and line number of the call within the given
// frame. As a convenience, for the innermost frame, it returns the file and
// line of the PC this unwinder was started at (often this is a call to another
// physical function).
//
// It returns "?", 0 if something goes wrong.
func (u *inlineUnwinder) fileLine(uf inlineFrame) (file string, line int) {
file, line32 := funcline1(u.f, uf.pc, false)
return file, int(line32)
}
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
// A synctestBubble is a set of goroutines started by synctest.Run.
type synctestBubble struct {
mu mutex
timers timers
id uint64 // unique id
now int64 // current fake time
root *g // caller of synctest.Run
waiter *g // caller of synctest.Wait
main *g // goroutine started by synctest.Run
waiting bool // true if a goroutine is calling synctest.Wait
done bool // true if main has exited
// The bubble is active (not blocked) so long as running > 0 || active > 0.
//
// running is the number of goroutines which are not "durably blocked":
// Goroutines which are either running, runnable, or non-durably blocked
// (for example, blocked in a syscall).
//
// active is used to keep the bubble from becoming blocked,
// even if all goroutines in the bubble are blocked.
// For example, park_m can choose to immediately unpark a goroutine after parking it.
// It increments the active count to keep the bubble active until it has determined
// that the park operation has completed.
total int // total goroutines
running int // non-blocked goroutines
active int // other sources of activity
}
// changegstatus is called when the non-lock status of a g changes.
// It is never called with a Gscanstatus.
func (bubble *synctestBubble) changegstatus(gp *g, oldval, newval uint32) {
// Determine whether this change in status affects the idleness of the bubble.
// If this isn't a goroutine starting, stopping, durably blocking,
// or waking up after durably blocking, then return immediately without
// locking bubble.mu.
//
// For example, stack growth (newstack) will changegstatus
// from _Grunning to _Gcopystack. This is uninteresting to synctest,
// but if stack growth occurs while bubble.mu is held, we must not recursively lock.
totalDelta := 0
wasRunning := true
switch oldval {
case _Gdead, _Gdeadextra:
wasRunning = false
totalDelta++
case _Gwaiting:
if gp.waitreason.isIdleInSynctest() {
wasRunning = false
}
}
isRunning := true
switch newval {
case _Gdead, _Gdeadextra:
isRunning = false
totalDelta--
if gp == bubble.main {
bubble.done = true
}
case _Gwaiting:
if gp.waitreason.isIdleInSynctest() {
isRunning = false
}
}
// It's possible for wasRunning == isRunning while totalDelta != 0;
// for example, if a new goroutine is created in a non-running state.
if wasRunning == isRunning && totalDelta == 0 {
return
}
lock(&bubble.mu)
bubble.total += totalDelta
if wasRunning != isRunning {
if isRunning {
bubble.running++
} else {
bubble.running--
if raceenabled && newval != _Gdead && newval != _Gdeadextra {
// Record that this goroutine parking happens before
// any subsequent Wait.
racereleasemergeg(gp, bubble.raceaddr())
}
}
}
if bubble.total < 0 {
fatal("total < 0")
}
if bubble.running < 0 {
fatal("running < 0")
}
wake := bubble.maybeWakeLocked()
unlock(&bubble.mu)
if wake != nil {
goready(wake, 0)
}
}
// incActive increments the active-count for the bubble.
// A bubble does not become durably blocked while the active-count is non-zero.
func (bubble *synctestBubble) incActive() {
lock(&bubble.mu)
bubble.active++
unlock(&bubble.mu)
}
// decActive decrements the active-count for the bubble.
func (bubble *synctestBubble) decActive() {
lock(&bubble.mu)
bubble.active--
if bubble.active < 0 {
throw("active < 0")
}
wake := bubble.maybeWakeLocked()
unlock(&bubble.mu)
if wake != nil {
goready(wake, 0)
}
}
// maybeWakeLocked returns a g to wake if the bubble is durably blocked.
func (bubble *synctestBubble) maybeWakeLocked() *g {
if bubble.running > 0 || bubble.active > 0 {
return nil
}
// Increment the bubble active count, since we've determined to wake something.
// The woken goroutine will decrement the count.
// We can't just call goready and let it increment bubble.running,
// since we can't call goready with bubble.mu held.
//
// Incrementing the active count here is only necessary if something has gone wrong,
// and a goroutine that we considered durably blocked wakes up unexpectedly.
// Two wakes happening at the same time leads to very confusing failure modes,
// so we take steps to avoid it happening.
bubble.active++
next := bubble.timers.wakeTime()
if next > 0 && next <= bubble.now {
// A timer is scheduled to fire. Wake the root goroutine to handle it.
return bubble.root
}
if gp := bubble.waiter; gp != nil {
// A goroutine is blocked in Wait. Wake it.
return gp
}
// All goroutines in the bubble are durably blocked, and nothing has called Wait.
// Wake the root goroutine.
return bubble.root
}
func (bubble *synctestBubble) raceaddr() unsafe.Pointer {
// Address used to record happens-before relationships created by the bubble.
//
// Wait creates a happens-before relationship between itself and
// the blocking operations which caused other goroutines in the bubble to park.
return unsafe.Pointer(bubble)
}
var bubbleGen atomic.Uint64 // bubble ID counter
//go:linkname synctestRun internal/synctest.Run
func synctestRun(f func()) {
if debug.asynctimerchan.Load() != 0 {
panic("synctest.Run not supported with asynctimerchan!=0")
}
gp := getg()
if gp.bubble != nil {
panic("synctest.Run called from within a synctest bubble")
}
bubble := &synctestBubble{
id: bubbleGen.Add(1),
total: 1,
running: 1,
root: gp,
}
const synctestBaseTime = 946684800000000000 // midnight UTC 2000-01-01
bubble.now = synctestBaseTime
lockInit(&bubble.mu, lockRankSynctest)
lockInit(&bubble.timers.mu, lockRankTimers)
gp.bubble = bubble
defer func() {
gp.bubble = nil
}()
// This is newproc, but also records the new g in bubble.main.
pc := sys.GetCallerPC()
systemstack(func() {
fv := *(**funcval)(unsafe.Pointer(&f))
bubble.main = newproc1(fv, gp, pc, false, waitReasonZero)
pp := getg().m.p.ptr()
runqput(pp, bubble.main, true)
wakep()
})
lock(&bubble.mu)
bubble.active++
for {
unlock(&bubble.mu)
systemstack(func() {
// Clear gp.m.curg while running timers,
// so timer goroutines inherit their child race context from g0.
curg := gp.m.curg
gp.m.curg = nil
gp.bubble.timers.check(bubble.now, bubble)
gp.m.curg = curg
})
gopark(synctestidle_c, nil, waitReasonSynctestRun, traceBlockSynctest, 0)
lock(&bubble.mu)
if bubble.active < 0 {
throw("active < 0")
}
next := bubble.timers.wakeTime()
if next == 0 {
break
}
if next < bubble.now {
throw("time went backwards")
}
if bubble.done {
// Time stops once the bubble's main goroutine has exited.
break
}
bubble.now = next
}
total := bubble.total
unlock(&bubble.mu)
if raceenabled {
// Establish a happens-before relationship between bubbled goroutines exiting
// and Run returning.
raceacquireg(gp, gp.bubble.raceaddr())
}
if total != 1 {
var reason string
if bubble.done {
reason = "deadlock: main bubble goroutine has exited but blocked goroutines remain"
} else {
reason = "deadlock: all goroutines in bubble are blocked"
}
panic(synctestDeadlockError{reason: reason, bubble: bubble})
}
if gp.timer != nil && gp.timer.isFake {
// Verify that we haven't marked this goroutine's sleep timer as fake.
// This could happen if something in Run were to call timeSleep.
throw("synctest root goroutine has a fake timer")
}
}
type synctestDeadlockError struct {
reason string
bubble *synctestBubble
}
func (e synctestDeadlockError) Error() string {
return e.reason
}
func synctestidle_c(gp *g, _ unsafe.Pointer) bool {
lock(&gp.bubble.mu)
canIdle := true
if gp.bubble.running == 0 && gp.bubble.active == 1 {
// All goroutines in the bubble have blocked or exited.
canIdle = false
} else {
gp.bubble.active--
}
unlock(&gp.bubble.mu)
return canIdle
}
//go:linkname synctestWait internal/synctest.Wait
func synctestWait() {
gp := getg()
if gp.bubble == nil {
panic("goroutine is not in a bubble")
}
lock(&gp.bubble.mu)
// We use a bubble.waiting bool to detect simultaneous calls to Wait rather than
// checking to see if bubble.waiter is non-nil. This avoids a race between unlocking
// bubble.mu and setting bubble.waiter while parking.
if gp.bubble.waiting {
unlock(&gp.bubble.mu)
panic("wait already in progress")
}
gp.bubble.waiting = true
unlock(&gp.bubble.mu)
gopark(synctestwait_c, nil, waitReasonSynctestWait, traceBlockSynctest, 0)
lock(&gp.bubble.mu)
gp.bubble.active--
if gp.bubble.active < 0 {
throw("active < 0")
}
gp.bubble.waiter = nil
gp.bubble.waiting = false
unlock(&gp.bubble.mu)
// Establish a happens-before relationship on the activity of the now-blocked
// goroutines in the bubble.
if raceenabled {
raceacquireg(gp, gp.bubble.raceaddr())
}
}
func synctestwait_c(gp *g, _ unsafe.Pointer) bool {
lock(&gp.bubble.mu)
if gp.bubble.running == 0 && gp.bubble.active == 0 {
// This shouldn't be possible, since gopark increments active during unlockf.
throw("running == 0 && active == 0")
}
gp.bubble.waiter = gp
unlock(&gp.bubble.mu)
return true
}
//go:linkname synctest_isInBubble internal/synctest.IsInBubble
func synctest_isInBubble() bool {
return getg().bubble != nil
}
//go:linkname synctest_acquire internal/synctest.acquire
func synctest_acquire() any {
if bubble := getg().bubble; bubble != nil {
bubble.incActive()
return bubble
}
return nil
}
//go:linkname synctest_release internal/synctest.release
func synctest_release(bubble any) {
bubble.(*synctestBubble).decActive()
}
//go:linkname synctest_inBubble internal/synctest.inBubble
func synctest_inBubble(bubble any, f func()) {
gp := getg()
if gp.bubble != nil {
panic("goroutine is already bubbled")
}
gp.bubble = bubble.(*synctestBubble)
defer func() {
gp.bubble = nil
}()
f()
}
// specialBubble is a special used to associate objects with bubbles.
type specialBubble struct {
_ sys.NotInHeap
special special
bubbleid uint64
}
// Keep these in sync with internal/synctest.
const (
bubbleAssocUnbubbled = iota // not associated with any bubble
bubbleAssocCurrentBubble // associated with the current bubble
bubbleAssocOtherBubble // associated with a different bubble
)
// getOrSetBubbleSpecial checks the special record for p's bubble membership.
//
// If add is true and p is not associated with any bubble,
// it adds a special record for p associating it with bubbleid.
//
// It returns ok==true if p is associated with bubbleid
// (including if a new association was added),
// and ok==false if not.
func getOrSetBubbleSpecial(p unsafe.Pointer, bubbleid uint64, add bool) (assoc int) {
span := spanOfHeap(uintptr(p))
if span == nil {
// This is probably a package var.
// We can't attach a special to it, so always consider it unbubbled.
return bubbleAssocUnbubbled
}
// Ensure that the span is swept.
// Sweeping accesses the specials list w/o locks, so we have
// to synchronize with it. And it's just much safer.
mp := acquirem()
span.ensureSwept()
offset := uintptr(p) - span.base()
lock(&span.speciallock)
// Find splice point, check for existing record.
iter, exists := span.specialFindSplicePoint(offset, _KindSpecialBubble)
if exists {
// p is already associated with a bubble.
// Return true iff it's the same bubble.
s := (*specialBubble)((unsafe.Pointer)(*iter))
if s.bubbleid == bubbleid {
assoc = bubbleAssocCurrentBubble
} else {
assoc = bubbleAssocOtherBubble
}
} else if add {
// p is not associated with a bubble,
// and we've been asked to add an association.
lock(&mheap_.speciallock)
s := (*specialBubble)(mheap_.specialBubbleAlloc.alloc())
unlock(&mheap_.speciallock)
s.bubbleid = bubbleid
s.special.kind = _KindSpecialBubble
s.special.offset = offset
s.special.next = *iter
*iter = (*special)(unsafe.Pointer(s))
spanHasSpecials(span)
assoc = bubbleAssocCurrentBubble
} else {
// p is not associated with a bubble.
assoc = bubbleAssocUnbubbled
}
unlock(&span.speciallock)
releasem(mp)
return assoc
}
// synctest_associate associates p with the current bubble.
// It returns false if p is already associated with a different bubble.
//
//go:linkname synctest_associate internal/synctest.associate
func synctest_associate(p unsafe.Pointer) int {
return getOrSetBubbleSpecial(p, getg().bubble.id, true)
}
// synctest_disassociate disassociates p from its bubble.
//
//go:linkname synctest_disassociate internal/synctest.disassociate
func synctest_disassociate(p unsafe.Pointer) {
removespecial(p, _KindSpecialBubble)
}
// synctest_isAssociated reports whether p is associated with the current bubble.
//
//go:linkname synctest_isAssociated internal/synctest.isAssociated
func synctest_isAssociated(p unsafe.Pointer) bool {
return getOrSetBubbleSpecial(p, getg().bubble.id, false) == bubbleAssocCurrentBubble
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !ppc64 && !ppc64le
package runtime
func prepGoExitFrame(sp uintptr) {
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 || 386
package runtime
import (
"internal/goarch"
"unsafe"
)
// adjust Gobuf as if it executed a call to fn with context ctxt
// and then stopped before the first instruction in fn.
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
sp := buf.sp
sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = buf.pc
buf.sp = sp
buf.pc = uintptr(fn)
buf.ctxt = ctxt
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm
package runtime
import (
"internal/goarch"
"internal/goos"
"unsafe"
)
const (
// addrBits is the number of bits needed to represent a virtual address.
//
// See heapAddrBits for a table of address space sizes on
// various architectures. 48 bits is enough for all
// arch/os combos except s390x, aix, and riscv64.
//
// On AMD64, virtual addresses are 48-bit (or 57-bit) sign-extended.
// Other archs are 48-bit zero-extended.
//
// We use one extra bit to placate systems which simulate amd64 binaries on
// an arm64 host. Allocated arm64 addresses could be as high as 1<<48-1,
// which would be invalid if we assumed 48-bit sign-extended addresses.
// See issue 69255.
// (Note that this does not help the other way around, simluating arm64
// on amd64, but we don't have that problem at the moment.)
//
// On s390x, virtual addresses are 64-bit. There's not much we
// can do about this, so we just hope that the kernel doesn't
// get to really high addresses and panic if it does.
defaultAddrBits = 48 + 1
// On AIX, 64-bit addresses are split into 36-bit segment number and 28-bit
// offset in segment. Segment numbers in the range 0x0A0000000-0x0AFFFFFFF(LSA)
// are available for mmap.
// We assume all tagged addresses are from memory allocated with mmap.
// We use one bit to distinguish between the two ranges.
aixAddrBits = 57
// Later versions of FreeBSD enable amd64's la57 by default.
freebsdAmd64AddrBits = 57
// riscv64 SV57 mode gives 56 bits of userspace VA.
// tagged pointer code supports it,
// but broader support for SV57 mode is incomplete,
// and there may be other issues (see #54104).
riscv64AddrBits = 56
addrBits = goos.IsAix*aixAddrBits + goarch.IsRiscv64*riscv64AddrBits + goos.IsFreebsd*goarch.IsAmd64*freebsdAmd64AddrBits + (1-goos.IsAix)*(1-goarch.IsRiscv64)*(1-goos.IsFreebsd*goarch.IsAmd64)*defaultAddrBits
// In addition to the 16 bits (or other, depending on arch/os) taken from the top,
// we can take 9 from the bottom, because we require pointers to be well-aligned
// (see tagptr.go:tagAlignBits). That gives us a total of 25 bits for the tag.
tagBits = 64 - addrBits + tagAlignBits
)
// taggedPointerPack created a taggedPointer from a pointer and a tag.
// Tag bits that don't fit in the result are discarded.
func taggedPointerPack(ptr unsafe.Pointer, tag uintptr) taggedPointer {
t := taggedPointer(uint64(uintptr(ptr))<<(tagBits-tagAlignBits) | uint64(tag&(1<<tagBits-1)))
if t.pointer() != ptr || t.tag() != tag {
print("runtime: taggedPointerPack invalid packing: ptr=", ptr, " tag=", hex(tag), " packed=", hex(t), " -> ptr=", t.pointer(), " tag=", hex(t.tag()), "\n")
throw("taggedPointerPack")
}
return t
}
// Pointer returns the pointer from a taggedPointer.
func (tp taggedPointer) pointer() unsafe.Pointer {
if GOARCH == "amd64" {
// amd64 systems can place the stack above the VA hole, so we need to sign extend
// val before unpacking.
return unsafe.Pointer(uintptr(int64(tp) >> tagBits << tagAlignBits))
}
if GOOS == "aix" {
return unsafe.Pointer(uintptr((tp >> tagBits << tagAlignBits) | 0xa<<56))
}
return unsafe.Pointer(uintptr(tp >> tagBits << tagAlignBits))
}
// Tag returns the tag from a taggedPointer.
func (tp taggedPointer) tag() uintptr {
return uintptr(tp & (1<<tagBits - 1))
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Time-related runtime and pieces of package time.
package runtime
import (
"internal/abi"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
//go:linkname time_runtimeNow time.runtimeNow
func time_runtimeNow() (sec int64, nsec int32, mono int64) {
if bubble := getg().bubble; bubble != nil {
sec = bubble.now / (1000 * 1000 * 1000)
nsec = int32(bubble.now % (1000 * 1000 * 1000))
// Don't return a monotonic time inside a synctest bubble.
// If we return a monotonic time based on the fake clock,
// arithmetic on times created inside/outside bubbles is confusing.
// If we return a monotonic time based on the real monotonic clock,
// arithmetic on times created in the same bubble is confusing.
// Simplest is to omit the monotonic time within a bubble.
return sec, nsec, 0
}
return time_now()
}
//go:linkname time_runtimeNano time.runtimeNano
func time_runtimeNano() int64 {
gp := getg()
if gp.bubble != nil {
return gp.bubble.now
}
return nanotime()
}
//go:linkname time_runtimeIsBubbled time.runtimeIsBubbled
func time_runtimeIsBubbled() bool {
return getg().bubble != nil
}
// A timer is a potentially repeating trigger for calling t.f(t.arg, t.seq).
// Timers are allocated by client code, often as part of other data structures.
// Each P has a heap of pointers to timers that it manages.
//
// A timer is expected to be used by only one client goroutine at a time,
// but there will be concurrent access by the P managing that timer.
// Timer accesses are protected by the lock t.mu, with a snapshot of
// t's state bits published in t.astate to enable certain fast paths to make
// decisions about a timer without acquiring the lock.
type timer struct {
// mu protects reads and writes to all fields, with exceptions noted below.
mu mutex
astate atomic.Uint8 // atomic copy of state bits at last unlock
state uint8 // state bits
isChan bool // timer has a channel; immutable; can be read without lock
isFake bool // timer is using fake time; immutable; can be read without lock
blocked uint32 // number of goroutines blocked on timer's channel
rand uint32 // randomizes order of timers at same instant; only set when isFake
// Timer wakes up at when, and then at when+period, ... (period > 0 only)
// each time calling f(arg, seq, delay) in the timer goroutine, so f must be
// a well-behaved function and not block.
//
// The arg and seq are client-specified opaque arguments passed back to f.
// When used from netpoll, arg and seq have meanings defined by netpoll
// and are completely opaque to this code; in that context, seq is a sequence
// number to recognize and squelch stale function invocations.
// When used from package time, arg is a channel (for After, NewTicker)
// or the function to call (for AfterFunc) and seq is unused (0).
//
// Package time does not know about seq, but if this is a channel timer (t.isChan == true),
// this file uses t.seq as a sequence number to recognize and squelch
// sends that correspond to an earlier (stale) timer configuration,
// similar to its use in netpoll. In this usage (that is, when t.isChan == true),
// writes to seq are protected by both t.mu and t.sendLock,
// so reads are allowed when holding either of the two mutexes.
//
// The delay argument is nanotime() - t.when, meaning the delay in ns between
// when the timer should have gone off and now. Normally that amount is
// small enough not to matter, but for channel timers that are fed lazily,
// the delay can be arbitrarily long; package time subtracts it out to make
// it look like the send happened earlier than it actually did.
// (No one looked at the channel since then, or the send would have
// not happened so late, so no one can tell the difference.)
when int64
period int64
f func(arg any, seq uintptr, delay int64)
arg any
seq uintptr
// If non-nil, the timers containing t.
ts *timers
// sendLock protects sends on the timer's channel.
// Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0.
sendLock mutex
// isSending is used to handle races between running a
// channel timer and stopping or resetting the timer.
// It is used only for channel timers (t.isChan == true).
// It is not used for tickers.
// The value is incremented when about to send a value on the channel,
// and decremented after sending the value.
// The stop/reset code uses this to detect whether it
// stopped the channel send.
//
// isSending is incremented only when t.mu is held.
// isSending is decremented only when t.sendLock is held.
// isSending is read only when both t.mu and t.sendLock are held.
isSending atomic.Int32
}
// init initializes a newly allocated timer t.
// Any code that allocates a timer must call t.init before using it.
// The arg and f can be set during init, or they can be nil in init
// and set by a future call to t.modify.
func (t *timer) init(f func(arg any, seq uintptr, delay int64), arg any) {
lockInit(&t.mu, lockRankTimer)
t.f = f
t.arg = arg
}
// A timers is a per-P set of timers.
type timers struct {
// mu protects timers; timers are per-P, but the scheduler can
// access the timers of another P, so we have to lock.
mu mutex
// heap is the set of timers, ordered by heap[i].when.
// Must hold lock to access.
heap []timerWhen
// len is an atomic copy of len(heap).
len atomic.Uint32
// zombies is the number of timers in the heap
// that are marked for removal.
zombies atomic.Int32
// raceCtx is the race context used while executing timer functions.
raceCtx uintptr
// minWhenHeap is the minimum heap[i].when value (= heap[0].when).
// The wakeTime method uses minWhenHeap and minWhenModified
// to determine the next wake time.
// If minWhenHeap = 0, it means there are no timers in the heap.
minWhenHeap atomic.Int64
// minWhenModified is a lower bound on the minimum
// heap[i].when over timers with the timerModified bit set.
// If minWhenModified = 0, it means there are no timerModified timers in the heap.
minWhenModified atomic.Int64
}
type timerWhen struct {
timer *timer
when int64
}
// less reports whether tw is less than other.
func (tw timerWhen) less(other timerWhen) bool {
switch {
case tw.when < other.when:
return true
case tw.when > other.when:
return false
default:
// When timers wake at the same time, use a per-timer random value to order them.
// We only set the random value for timers using fake time, since there's
// no practical way to schedule real-time timers for the same instant.
return tw.timer.rand < other.timer.rand
}
}
func (ts *timers) lock() {
lock(&ts.mu)
}
func (ts *timers) unlock() {
// Update atomic copy of len(ts.heap).
// We only update at unlock so that the len is always
// the most recent unlocked length, not an ephemeral length.
// This matters if we lock ts, delete the only timer from the heap,
// add it back, and unlock. We want ts.len.Load to return 1 the
// entire time, never 0. This is important for pidleput deciding
// whether ts is empty.
ts.len.Store(uint32(len(ts.heap)))
unlock(&ts.mu)
}
// Timer state field.
const (
// timerHeaped is set when the timer is stored in some P's heap.
timerHeaped uint8 = 1 << iota
// timerModified is set when t.when has been modified
// but the heap's heap[i].when entry still needs to be updated.
// That change waits until the heap in which
// the timer appears can be locked and rearranged.
// timerModified is only set when timerHeaped is also set.
timerModified
// timerZombie is set when the timer has been stopped
// but is still present in some P's heap.
// Only set when timerHeaped is also set.
// It is possible for timerModified and timerZombie to both
// be set, meaning that the timer was modified and then stopped.
// A timer sending to a channel may be placed in timerZombie
// to take it out of the heap even though the timer is not stopped,
// as long as nothing is reading from the channel.
timerZombie
)
// timerDebug enables printing a textual debug trace of all timer operations to stderr.
const timerDebug = false
func (t *timer) trace(op string) {
if timerDebug {
t.trace1(op)
}
}
func (t *timer) trace1(op string) {
if !timerDebug {
return
}
bits := [4]string{"h", "m", "z", "c"}
for i := range 3 {
if t.state&(1<<i) == 0 {
bits[i] = "-"
}
}
if !t.isChan {
bits[3] = "-"
}
print("T ", t, " ", bits[0], bits[1], bits[2], bits[3], " b=", t.blocked, " ", op, "\n")
}
func (ts *timers) trace(op string) {
if timerDebug {
println("TS", ts, op)
}
}
// lock locks the timer, allowing reading or writing any of the timer fields.
func (t *timer) lock() {
lock(&t.mu)
t.trace("lock")
}
// unlock updates t.astate and unlocks the timer.
func (t *timer) unlock() {
t.trace("unlock")
// Let heap fast paths know whether heap[i].when is accurate.
// Also let maybeRunChan know whether channel is in heap.
t.astate.Store(t.state)
unlock(&t.mu)
}
// hchan returns the channel in t.arg.
// t must be a timer with a channel.
func (t *timer) hchan() *hchan {
if !t.isChan {
badTimer()
}
// Note: t.arg is a chan time.Time,
// and runtime cannot refer to that type,
// so we cannot use a type assertion.
return (*hchan)(efaceOf(&t.arg).data)
}
// updateHeap updates t as directed by t.state, updating t.state
// and returning a bool indicating whether the state (and ts.heap[0].when) changed.
// The caller must hold t's lock, or the world can be stopped instead.
// The timer set t.ts must be non-nil and locked, t must be t.ts.heap[0], and updateHeap
// takes care of moving t within the timers heap to preserve the heap invariants.
// If ts == nil, then t must not be in a heap (or is in a heap that is
// temporarily not maintaining its invariant, such as during timers.adjust).
func (t *timer) updateHeap() (updated bool) {
assertWorldStoppedOrLockHeld(&t.mu)
t.trace("updateHeap")
ts := t.ts
if ts == nil || t != ts.heap[0].timer {
badTimer()
}
assertLockHeld(&ts.mu)
if t.state&timerZombie != 0 {
// Take timer out of heap.
t.state &^= timerHeaped | timerZombie | timerModified
ts.zombies.Add(-1)
ts.deleteMin()
return true
}
if t.state&timerModified != 0 {
// Update ts.heap[0].when and move within heap.
t.state &^= timerModified
ts.heap[0].when = t.when
ts.siftDown(0)
ts.updateMinWhenHeap()
return true
}
return false
}
// maxWhen is the maximum value for timer's when field.
const maxWhen = 1<<63 - 1
// verifyTimers can be set to true to add debugging checks that the
// timer heaps are valid.
const verifyTimers = false
// Package time APIs.
// Godoc uses the comments in package time, not these.
// time.now is implemented in assembly.
// timeSleep puts the current goroutine to sleep for at least ns nanoseconds.
//
//go:linkname timeSleep time.Sleep
func timeSleep(ns int64) {
if ns <= 0 {
return
}
gp := getg()
t := gp.timer
if t == nil {
t = new(timer)
t.init(goroutineReady, gp)
if gp.bubble != nil {
t.isFake = true
}
gp.timer = t
}
var now int64
if bubble := gp.bubble; bubble != nil {
now = bubble.now
} else {
now = nanotime()
}
when := now + ns
if when < 0 { // check for overflow.
when = maxWhen
}
gp.sleepWhen = when
if t.isFake {
// Call timer.reset in this goroutine, since it's the one in a bubble.
// We don't need to worry about the timer function running before the goroutine
// is parked, because time won't advance until we park.
resetForSleep(gp, nil)
gopark(nil, nil, waitReasonSleep, traceBlockSleep, 1)
} else {
gopark(resetForSleep, nil, waitReasonSleep, traceBlockSleep, 1)
}
}
// resetForSleep is called after the goroutine is parked for timeSleep.
// We can't call timer.reset in timeSleep itself because if this is a short
// sleep and there are many goroutines then the P can wind up running the
// timer function, goroutineReady, before the goroutine has been parked.
func resetForSleep(gp *g, _ unsafe.Pointer) bool {
gp.timer.reset(gp.sleepWhen, 0)
return true
}
// A timeTimer is a runtime-allocated time.Timer or time.Ticker
// with the additional runtime state following it.
// The runtime state is inaccessible to package time.
type timeTimer struct {
c unsafe.Pointer // <-chan time.Time
init bool
timer
}
// newTimer allocates and returns a new time.Timer or time.Ticker (same layout)
// with the given parameters.
//
//go:linkname newTimer time.newTimer
func newTimer(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, c *hchan) *timeTimer {
t := new(timeTimer)
t.timer.init(nil, nil)
t.trace("new")
if raceenabled {
racerelease(unsafe.Pointer(&t.timer))
}
if c != nil {
lockInit(&t.sendLock, lockRankTimerSend)
t.isChan = true
c.timer = &t.timer
if c.dataqsiz == 0 {
throw("invalid timer channel: no capacity")
}
}
if bubble := getg().bubble; bubble != nil {
t.isFake = true
}
t.modify(when, period, f, arg, 0)
t.init = true
return t
}
// stopTimer stops a timer.
// It reports whether t was stopped before being run.
//
//go:linkname stopTimer time.stopTimer
func stopTimer(t *timeTimer) bool {
if t.isFake && getg().bubble == nil {
fatal("stop of synctest timer from outside bubble")
}
return t.stop()
}
// resetTimer resets an inactive timer, adding it to the timer heap.
//
// Reports whether the timer was modified before it was run.
//
//go:linkname resetTimer time.resetTimer
func resetTimer(t *timeTimer, when, period int64) bool {
if raceenabled {
racerelease(unsafe.Pointer(&t.timer))
}
if t.isFake && getg().bubble == nil {
fatal("reset of synctest timer from outside bubble")
}
return t.reset(when, period)
}
// Go runtime.
// Ready the goroutine arg.
func goroutineReady(arg any, _ uintptr, _ int64) {
goready(arg.(*g), 0)
}
// addHeap adds t to the timers heap.
// The caller must hold ts.lock or the world must be stopped.
// The caller must also have checked that t belongs in the heap.
// Callers that are not sure can call t.maybeAdd instead,
// but note that maybeAdd has different locking requirements.
func (ts *timers) addHeap(t *timer) {
assertWorldStoppedOrLockHeld(&ts.mu)
// Timers rely on the network poller, so make sure the poller
// has started.
if netpollInited.Load() == 0 {
netpollGenericInit()
}
if t.ts != nil {
throw("ts set in timer")
}
t.ts = ts
ts.heap = append(ts.heap, timerWhen{t, t.when})
ts.siftUp(len(ts.heap) - 1)
if t == ts.heap[0].timer {
ts.updateMinWhenHeap()
}
}
// maybeRunAsync checks whether t needs to be triggered and runs it if so.
// The caller is responsible for locking the timer and for checking that we
// are running timers in async mode. If the timer needs to be run,
// maybeRunAsync will unlock and re-lock it.
// The timer is always locked on return.
func (t *timer) maybeRunAsync() {
assertLockHeld(&t.mu)
if t.state&timerHeaped == 0 && t.isChan && t.when > 0 {
// If timer should have triggered already (but nothing looked at it yet),
// trigger now, so that a receive after the stop sees the "old" value
// that should be there.
// (It is possible to have t.blocked > 0 if there is a racing receive
// in blockTimerChan, but timerHeaped not being set means
// it hasn't run t.maybeAdd yet; in that case, running the
// timer ourselves now is fine.)
if now := nanotime(); t.when <= now {
systemstack(func() {
t.unlockAndRun(now, nil) // resets t.when
})
t.lock()
}
}
}
// stop stops the timer t. It may be on some other P, so we can't
// actually remove it from the timers heap. We can only mark it as stopped.
// It will be removed in due course by the P whose heap it is on.
// Reports whether the timer was stopped before it was run.
func (t *timer) stop() bool {
async := debug.asynctimerchan.Load() != 0
if !async && t.isChan {
lock(&t.sendLock)
}
t.lock()
t.trace("stop")
if async {
t.maybeRunAsync()
}
if t.state&timerHeaped != 0 {
t.state |= timerModified
if t.state&timerZombie == 0 {
t.state |= timerZombie
t.ts.zombies.Add(1)
}
}
pending := t.when > 0
t.when = 0
if !async && t.isChan {
// Stop any future sends with stale values.
// See timer.unlockAndRun.
t.seq++
// If there is currently a send in progress,
// incrementing seq is going to prevent that
// send from actually happening. That means
// that we should return true: the timer was
// stopped, even though t.when may be zero.
if t.period == 0 && t.isSending.Load() > 0 {
pending = true
}
}
t.unlock()
if !async && t.isChan {
unlock(&t.sendLock)
if timerchandrain(t.hchan()) {
pending = true
}
}
return pending
}
// deleteMin removes timer 0 from ts.
// ts must be locked.
func (ts *timers) deleteMin() {
assertLockHeld(&ts.mu)
t := ts.heap[0].timer
if t.ts != ts {
throw("wrong timers")
}
t.ts = nil
last := len(ts.heap) - 1
if last > 0 {
ts.heap[0] = ts.heap[last]
}
ts.heap[last] = timerWhen{}
ts.heap = ts.heap[:last]
if last > 0 {
ts.siftDown(0)
}
ts.updateMinWhenHeap()
if last == 0 {
// If there are no timers, then clearly there are no timerModified timers.
ts.minWhenModified.Store(0)
}
}
// modify modifies an existing timer.
// This is called by the netpoll code or time.Ticker.Reset or time.Timer.Reset.
// Reports whether the timer was modified before it was run.
// If f == nil, then t.f, t.arg, and t.seq are not modified.
func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, seq uintptr) bool {
if when <= 0 {
throw("timer when must be positive")
}
if period < 0 {
throw("timer period must be non-negative")
}
async := debug.asynctimerchan.Load() != 0
if !async && t.isChan {
lock(&t.sendLock)
}
t.lock()
if async {
t.maybeRunAsync()
}
t.trace("modify")
oldPeriod := t.period
t.period = period
if f != nil {
t.f = f
t.arg = arg
t.seq = seq
}
wake := false
pending := t.when > 0
t.when = when
if t.state&timerHeaped != 0 {
t.state |= timerModified
if t.state&timerZombie != 0 {
// In the heap but marked for removal (by a Stop).
// Unmark it, since it has been Reset and will be running again.
t.ts.zombies.Add(-1)
t.state &^= timerZombie
}
// The corresponding heap[i].when is updated later.
// See comment in type timer above and in timers.adjust below.
if min := t.ts.minWhenModified.Load(); min == 0 || when < min {
wake = true
// Force timerModified bit out to t.astate before updating t.minWhenModified,
// to synchronize with t.ts.adjust. See comment in adjust.
t.astate.Store(t.state)
t.ts.updateMinWhenModified(when)
}
}
add := t.needsAdd()
if add && t.isFake {
// If this is a bubbled timer scheduled to fire immediately,
// run it now rather than waiting for the bubble's timer scheduler.
// This avoids deferring timer execution until after the bubble
// becomes durably blocked.
//
// Don't do this for non-bubbled timers: It isn't necessary,
// and there may be cases where the runtime executes timers with
// the expectation the timer func will not run in the current goroutine.
// Bubbled timers are always created by the time package, and are
// safe to run in the current goroutine.
bubble := getg().bubble
if bubble == nil {
throw("fake timer executing with no bubble")
}
if t.state&timerHeaped == 0 && when <= bubble.now {
systemstack(func() {
if !async && t.isChan {
unlock(&t.sendLock)
}
t.unlockAndRun(bubble.now, bubble)
})
return pending
}
}
if !async && t.isChan {
// Stop any future sends with stale values.
// See timer.unlockAndRun.
t.seq++
// If there is currently a send in progress,
// incrementing seq is going to prevent that
// send from actually happening. That means
// that we should return true: the timer was
// stopped, even though t.when may be zero.
if oldPeriod == 0 && t.isSending.Load() > 0 {
pending = true
}
}
t.unlock()
if !async && t.isChan {
if timerchandrain(t.hchan()) {
pending = true
}
unlock(&t.sendLock)
}
if add {
t.maybeAdd()
}
if wake {
wakeNetPoller(when)
}
return pending
}
// needsAdd reports whether t needs to be added to a timers heap.
// t must be locked.
func (t *timer) needsAdd() bool {
assertLockHeld(&t.mu)
need := t.state&timerHeaped == 0 && t.when > 0 && (!t.isChan || t.blocked > 0)
if need {
t.trace("needsAdd+")
} else {
t.trace("needsAdd-")
}
return need
}
// maybeAdd adds t to the local timers heap if it needs to be in a heap.
// The caller must not hold t's lock nor any timers heap lock.
// The caller probably just unlocked t, but that lock must be dropped
// in order to acquire a ts.lock, to avoid lock inversions.
// (timers.adjust holds ts.lock while acquiring each t's lock,
// so we cannot hold any t's lock while acquiring ts.lock).
//
// Strictly speaking it *might* be okay to hold t.lock and
// acquire ts.lock at the same time, because we know that
// t is not in any ts.heap, so nothing holding a ts.lock would
// be acquiring the t.lock at the same time, meaning there
// isn't a possible deadlock. But it is easier and safer not to be
// too clever and respect the static ordering.
// (If we don't, we have to change the static lock checking of t and ts.)
//
// Concurrent calls to time.Timer.Reset or blockTimerChan
// may result in concurrent calls to t.maybeAdd,
// so we cannot assume that t is not in a heap on entry to t.maybeAdd.
func (t *timer) maybeAdd() {
// Note: Not holding any locks on entry to t.maybeAdd,
// so the current g can be rescheduled to a different M and P
// at any time, including between the ts := assignment and the
// call to ts.lock. If a reschedule happened then, we would be
// adding t to some other P's timers, perhaps even a P that the scheduler
// has marked as idle with no timers, in which case the timer could
// go unnoticed until long after t.when.
// Calling acquirem instead of using getg().m makes sure that
// we end up locking and inserting into the current P's timers.
mp := acquirem()
var ts *timers
if t.isFake {
bubble := getg().bubble
if bubble == nil {
throw("invalid timer: fake time but no syncgroup")
}
ts = &bubble.timers
} else {
ts = &mp.p.ptr().timers
}
ts.lock()
ts.cleanHead()
t.lock()
t.trace("maybeAdd")
when := int64(0)
wake := false
if t.needsAdd() {
if t.isFake {
// Re-randomize timer order.
// We could do this for all timers, but unbubbled timers are highly
// unlikely to have the same when.
t.rand = cheaprand()
}
t.state |= timerHeaped
when = t.when
wakeTime := ts.wakeTime()
wake = wakeTime == 0 || when < wakeTime
ts.addHeap(t)
}
t.unlock()
ts.unlock()
releasem(mp)
if wake {
wakeNetPoller(when)
}
}
// reset resets the time when a timer should fire.
// If used for an inactive timer, the timer will become active.
// Reports whether the timer was active and was stopped.
func (t *timer) reset(when, period int64) bool {
return t.modify(when, period, nil, nil, 0)
}
// cleanHead cleans up the head of the timer queue. This speeds up
// programs that create and delete timers; leaving them in the heap
// slows down heap operations.
// The caller must have locked ts.
func (ts *timers) cleanHead() {
ts.trace("cleanHead")
assertLockHeld(&ts.mu)
gp := getg()
for {
if len(ts.heap) == 0 {
return
}
// This loop can theoretically run for a while, and because
// it is holding timersLock it cannot be preempted.
// If someone is trying to preempt us, just return.
// We can clean the timers later.
if gp.preemptStop {
return
}
// Delete zombies from tail of heap. It requires no heap adjustments at all,
// and doing so increases the chances that when we swap out a zombie
// in heap[0] for the tail of the heap, we'll get a non-zombie timer,
// shortening this loop.
n := len(ts.heap)
if t := ts.heap[n-1].timer; t.astate.Load()&timerZombie != 0 {
t.lock()
if t.state&timerZombie != 0 {
t.state &^= timerHeaped | timerZombie | timerModified
t.ts = nil
ts.zombies.Add(-1)
ts.heap[n-1] = timerWhen{}
ts.heap = ts.heap[:n-1]
}
t.unlock()
continue
}
t := ts.heap[0].timer
if t.ts != ts {
throw("bad ts")
}
if t.astate.Load()&(timerModified|timerZombie) == 0 {
// Fast path: head of timers does not need adjustment.
return
}
t.lock()
updated := t.updateHeap()
t.unlock()
if !updated {
// Head of timers does not need adjustment.
return
}
}
}
// take moves any timers from src into ts
// and then clears the timer state from src,
// because src is being destroyed.
// The caller must not have locked either timers.
// For now this is only called when the world is stopped.
func (ts *timers) take(src *timers) {
ts.trace("take")
assertWorldStopped()
if len(src.heap) > 0 {
// The world is stopped, so we ignore the locking of ts and src here.
// That would introduce a sched < timers lock ordering,
// which we'd rather avoid in the static ranking.
for _, tw := range src.heap {
t := tw.timer
t.ts = nil
if t.state&timerZombie != 0 {
t.state &^= timerHeaped | timerZombie | timerModified
} else {
t.state &^= timerModified
ts.addHeap(t)
}
}
src.heap = nil
src.zombies.Store(0)
src.minWhenHeap.Store(0)
src.minWhenModified.Store(0)
src.len.Store(0)
ts.len.Store(uint32(len(ts.heap)))
}
}
// adjust looks through the timers in ts.heap for
// any timers that have been modified to run earlier, and puts them in
// the correct place in the heap. While looking for those timers,
// it also moves timers that have been modified to run later,
// and removes deleted timers. The caller must have locked ts.
func (ts *timers) adjust(now int64, force bool) {
ts.trace("adjust")
assertLockHeld(&ts.mu)
// If we haven't yet reached the time of the earliest modified
// timer, don't do anything. This speeds up programs that adjust
// a lot of timers back and forth if the timers rarely expire.
// We'll postpone looking through all the adjusted timers until
// one would actually expire.
if !force {
first := ts.minWhenModified.Load()
if first == 0 || first > now {
if verifyTimers {
ts.verify()
}
return
}
}
// minWhenModified is a lower bound on the earliest t.when
// among the timerModified timers. We want to make it more precise:
// we are going to scan the heap and clean out all the timerModified bits,
// at which point minWhenModified can be set to 0 (indicating none at all).
//
// Other P's can be calling ts.wakeTime concurrently, and we'd like to
// keep ts.wakeTime returning an accurate value throughout this entire process.
//
// Setting minWhenModified = 0 *before* the scan could make wakeTime
// return an incorrect value: if minWhenModified < minWhenHeap, then clearing
// it to 0 will make wakeTime return minWhenHeap (too late) until the scan finishes.
// To avoid that, we want to set minWhenModified to 0 *after* the scan.
//
// Setting minWhenModified = 0 *after* the scan could result in missing
// concurrent timer modifications in other goroutines; those will lock
// the specific timer, set the timerModified bit, and set t.when.
// To avoid that, we want to set minWhenModified to 0 *before* the scan.
//
// The way out of this dilemma is to preserve wakeTime a different way.
// wakeTime is min(minWhenHeap, minWhenModified), and minWhenHeap
// is protected by ts.lock, which we hold, so we can modify it however we like
// in service of keeping wakeTime accurate.
//
// So we can:
//
// 1. Set minWhenHeap = min(minWhenHeap, minWhenModified)
// 2. Set minWhenModified = 0
// (Other goroutines may modify timers and update minWhenModified now.)
// 3. Scan timers
// 4. Set minWhenHeap = heap[0].when
//
// That order preserves a correct value of wakeTime throughout the entire
// operation:
// Step 1 “locks in” an accurate wakeTime even with minWhenModified cleared.
// Step 2 makes sure concurrent t.when updates are not lost during the scan.
// Step 3 processes all modified timer values, justifying minWhenModified = 0.
// Step 4 corrects minWhenHeap to a precise value.
//
// The wakeTime method implementation reads minWhenModified *before* minWhenHeap,
// so that if the minWhenModified is observed to be 0, that means the minWhenHeap that
// follows will include the information that was zeroed out of it.
//
// Originally Step 3 locked every timer, which made sure any timer update that was
// already in progress during Steps 1+2 completed and was observed by Step 3.
// All that locking was too expensive, so now we do an atomic load of t.astate to
// decide whether we need to do a full lock. To make sure that we still observe any
// timer update already in progress during Steps 1+2, t.modify sets timerModified
// in t.astate *before* calling t.updateMinWhenModified. That ensures that the
// overwrite in Step 2 cannot lose an update: if it does overwrite an update, Step 3
// will see the timerModified and do a full lock.
ts.minWhenHeap.Store(ts.wakeTime())
ts.minWhenModified.Store(0)
changed := false
for i := 0; i < len(ts.heap); i++ {
tw := &ts.heap[i]
t := tw.timer
if t.ts != ts {
throw("bad ts")
}
if t.astate.Load()&(timerModified|timerZombie) == 0 {
// Does not need adjustment.
continue
}
t.lock()
switch {
case t.state&timerHeaped == 0:
badTimer()
case t.state&timerZombie != 0:
ts.zombies.Add(-1)
t.state &^= timerHeaped | timerZombie | timerModified
n := len(ts.heap)
ts.heap[i] = ts.heap[n-1]
ts.heap[n-1] = timerWhen{}
ts.heap = ts.heap[:n-1]
t.ts = nil
i--
changed = true
case t.state&timerModified != 0:
tw.when = t.when
t.state &^= timerModified
changed = true
}
t.unlock()
}
if changed {
ts.initHeap()
}
ts.updateMinWhenHeap()
if verifyTimers {
ts.verify()
}
}
// wakeTime looks at ts's timers and returns the time when we
// should wake up the netpoller. It returns 0 if there are no timers.
// This function is invoked when dropping a P, so it must run without
// any write barriers.
//
//go:nowritebarrierrec
func (ts *timers) wakeTime() int64 {
// Note that the order of these two loads matters:
// adjust updates minWhen to make it safe to clear minNextWhen.
// We read minWhen after reading minNextWhen so that
// if we see a cleared minNextWhen, we are guaranteed to see
// the updated minWhen.
nextWhen := ts.minWhenModified.Load()
when := ts.minWhenHeap.Load()
if when == 0 || (nextWhen != 0 && nextWhen < when) {
when = nextWhen
}
return when
}
// check runs any timers in ts that are ready.
// If now is not 0 it is the current time.
// It returns the passed time or the current time if now was passed as 0.
// and the time when the next timer should run or 0 if there is no next timer,
// and reports whether it ran any timers.
// If the time when the next timer should run is not 0,
// it is always larger than the returned time.
// We pass now in and out to avoid extra calls of nanotime.
//
//go:yeswritebarrierrec
func (ts *timers) check(now int64, bubble *synctestBubble) (rnow, pollUntil int64, ran bool) {
ts.trace("check")
// If it's not yet time for the first timer, or the first adjusted
// timer, then there is nothing to do.
next := ts.wakeTime()
if next == 0 {
// No timers to run or adjust.
return now, 0, false
}
if now == 0 {
now = nanotime()
}
// If this is the local P, and there are a lot of deleted timers,
// clear them out. We only do this for the local P to reduce
// lock contention on timersLock.
zombies := ts.zombies.Load()
if zombies < 0 {
badTimer()
}
force := ts == &getg().m.p.ptr().timers && int(zombies) > int(ts.len.Load())/4
if now < next && !force {
// Next timer is not ready to run, and we don't need to clear deleted timers.
return now, next, false
}
ts.lock()
if len(ts.heap) > 0 {
ts.adjust(now, false)
for len(ts.heap) > 0 {
// Note that runtimer may temporarily unlock ts.
if tw := ts.run(now, bubble); tw != 0 {
if tw > 0 {
pollUntil = tw
}
break
}
ran = true
}
// Note: Delaying the forced adjustment until after the ts.run
// (as opposed to calling ts.adjust(now, force) above)
// is significantly faster under contention, such as in
// package time's BenchmarkTimerAdjust10000,
// though we do not fully understand why.
force = ts == &getg().m.p.ptr().timers && int(ts.zombies.Load()) > int(ts.len.Load())/4
if force {
ts.adjust(now, true)
}
}
ts.unlock()
return now, pollUntil, ran
}
// run examines the first timer in ts. If it is ready based on now,
// it runs the timer and removes or updates it.
// Returns 0 if it ran a timer, -1 if there are no more timers, or the time
// when the first timer should run.
// The caller must have locked ts.
// If a timer is run, this will temporarily unlock ts.
//
//go:systemstack
func (ts *timers) run(now int64, bubble *synctestBubble) int64 {
ts.trace("run")
assertLockHeld(&ts.mu)
Redo:
if len(ts.heap) == 0 {
return -1
}
tw := ts.heap[0]
t := tw.timer
if t.ts != ts {
throw("bad ts")
}
if t.astate.Load()&(timerModified|timerZombie) == 0 && tw.when > now {
// Fast path: not ready to run.
return tw.when
}
t.lock()
if t.updateHeap() {
t.unlock()
goto Redo
}
if t.state&timerHeaped == 0 || t.state&timerModified != 0 {
badTimer()
}
if t.when > now {
// Not ready to run.
t.unlock()
return t.when
}
t.unlockAndRun(now, bubble)
assertLockHeld(&ts.mu) // t is unlocked now, but not ts
return 0
}
// unlockAndRun unlocks and runs the timer t (which must be locked).
// If t is in a timer set (t.ts != nil), the caller must also have locked the timer set,
// and this call will temporarily unlock the timer set while running the timer function.
// unlockAndRun returns with t unlocked and t.ts (re-)locked.
//
//go:systemstack
func (t *timer) unlockAndRun(now int64, bubble *synctestBubble) {
t.trace("unlockAndRun")
assertLockHeld(&t.mu)
if t.ts != nil {
assertLockHeld(&t.ts.mu)
}
if raceenabled {
// Note that we are running on a system stack,
// so there is no chance of getg().m being reassigned
// out from under us while this function executes.
gp := getg()
var tsLocal *timers
if bubble == nil {
tsLocal = &gp.m.p.ptr().timers
} else {
tsLocal = &bubble.timers
}
if tsLocal.raceCtx == 0 {
tsLocal.raceCtx = racegostart(abi.FuncPCABIInternal((*timers).run) + sys.PCQuantum)
}
raceacquirectx(tsLocal.raceCtx, unsafe.Pointer(t))
}
if t.state&(timerModified|timerZombie) != 0 {
badTimer()
}
f := t.f
arg := t.arg
seq := t.seq
var next int64
delay := now - t.when
if t.period > 0 {
// Leave in heap but adjust next time to fire.
next = t.when + t.period*(1+delay/t.period)
if next < 0 { // check for overflow.
next = maxWhen
}
} else {
next = 0
}
ts := t.ts
t.when = next
if t.state&timerHeaped != 0 {
t.state |= timerModified
if next == 0 {
t.state |= timerZombie
t.ts.zombies.Add(1)
}
t.updateHeap()
}
async := debug.asynctimerchan.Load() != 0
if !async && t.isChan && t.period == 0 {
// Tell Stop/Reset that we are sending a value.
if t.isSending.Add(1) < 0 {
throw("too many concurrent timer firings")
}
}
t.unlock()
if raceenabled {
// Temporarily use the current P's racectx for g0.
gp := getg()
if gp.racectx != 0 {
throw("unexpected racectx")
}
if bubble == nil {
gp.racectx = gp.m.p.ptr().timers.raceCtx
} else {
gp.racectx = bubble.timers.raceCtx
}
}
if ts != nil {
ts.unlock()
}
if bubble != nil {
// Temporarily use the timer's synctest group for the G running this timer.
gp := getg()
if gp.bubble != nil {
throw("unexpected syncgroup set")
}
gp.bubble = bubble
bubble.changegstatus(gp, _Gdead, _Grunning)
}
if !async && t.isChan {
// For a timer channel, we want to make sure that no stale sends
// happen after a t.stop or t.modify, but we cannot hold t.mu
// during the actual send (which f does) due to lock ordering.
// It can happen that we are holding t's lock above, we decide
// it's time to send a time value (by calling f), grab the parameters,
// unlock above, and then a t.stop or t.modify changes the timer
// and returns. At that point, the send needs not to happen after all.
// The way we arrange for it not to happen is that t.stop and t.modify
// both increment t.seq while holding both t.mu and t.sendLock.
// We copied the seq value above while holding t.mu.
// Now we can acquire t.sendLock (which will be held across the send)
// and double-check that t.seq is still the seq value we saw above.
// If not, the timer has been updated and we should skip the send.
// We skip the send by reassigning f to a no-op function.
//
// The isSending field tells t.stop or t.modify that we have
// started to send the value. That lets them correctly return
// true meaning that no value was sent.
lock(&t.sendLock)
if t.period == 0 {
// We are committed to possibly sending a value
// based on seq, so no need to keep telling
// stop/modify that we are sending.
if t.isSending.Add(-1) < 0 {
throw("mismatched isSending updates")
}
}
if t.seq != seq {
f = func(any, uintptr, int64) {}
}
}
f(arg, seq, delay)
if !async && t.isChan {
unlock(&t.sendLock)
}
if bubble != nil {
gp := getg()
bubble.changegstatus(gp, _Grunning, _Gdead)
if raceenabled {
// Establish a happens-before between this timer event and
// the next synctest.Wait call.
racereleasemergeg(gp, bubble.raceaddr())
}
gp.bubble = nil
}
if ts != nil {
ts.lock()
}
if raceenabled {
gp := getg()
gp.racectx = 0
}
}
// verifyTimerHeap verifies that the timers is in a valid state.
// This is only for debugging, and is only called if verifyTimers is true.
// The caller must have locked ts.
func (ts *timers) verify() {
assertLockHeld(&ts.mu)
for i, tw := range ts.heap {
if i == 0 {
// First timer has no parent.
continue
}
// The heap is timerHeapN-ary. See siftupTimer and siftdownTimer.
p := int(uint(i-1) / timerHeapN)
if tw.less(ts.heap[p]) {
print("bad timer heap at ", i, ": ", p, ": ", ts.heap[p].when, ", ", i, ": ", tw.when, "\n")
throw("bad timer heap")
}
}
if n := int(ts.len.Load()); len(ts.heap) != n {
println("timer heap len", len(ts.heap), "!= atomic len", n)
throw("bad timer heap len")
}
}
// updateMinWhenHeap sets ts.minWhenHeap to ts.heap[0].when.
// The caller must have locked ts or the world must be stopped.
func (ts *timers) updateMinWhenHeap() {
assertWorldStoppedOrLockHeld(&ts.mu)
if len(ts.heap) == 0 {
ts.minWhenHeap.Store(0)
} else {
ts.minWhenHeap.Store(ts.heap[0].when)
}
}
// updateMinWhenModified updates ts.minWhenModified to be <= when.
// ts need not be (and usually is not) locked.
func (ts *timers) updateMinWhenModified(when int64) {
for {
old := ts.minWhenModified.Load()
if old != 0 && old < when {
return
}
if ts.minWhenModified.CompareAndSwap(old, when) {
return
}
}
}
// timeSleepUntil returns the time when the next timer should fire. Returns
// maxWhen if there are no timers.
// This is only called by sysmon and checkdead.
func timeSleepUntil() int64 {
next := int64(maxWhen)
// Prevent allp slice changes. This is like retake.
lock(&allpLock)
for _, pp := range allp {
if pp == nil {
// This can happen if procresize has grown
// allp but not yet created new Ps.
continue
}
if w := pp.timers.wakeTime(); w != 0 {
next = min(next, w)
}
}
unlock(&allpLock)
return next
}
const timerHeapN = 4
// Heap maintenance algorithms.
// These algorithms check for slice index errors manually.
// Slice index error can happen if the program is using racy
// access to timers. We don't want to panic here, because
// it will cause the program to crash with a mysterious
// "panic holding locks" message. Instead, we panic while not
// holding a lock.
// siftUp puts the timer at position i in the right place
// in the heap by moving it up toward the top of the heap.
func (ts *timers) siftUp(i int) {
heap := ts.heap
if i >= len(heap) {
badTimer()
}
tw := heap[i]
if tw.when <= 0 {
badTimer()
}
for i > 0 {
p := int(uint(i-1) / timerHeapN) // parent
if !tw.less(heap[p]) {
break
}
heap[i] = heap[p]
i = p
}
if heap[i].timer != tw.timer {
heap[i] = tw
}
}
// siftDown puts the timer at position i in the right place
// in the heap by moving it down toward the bottom of the heap.
func (ts *timers) siftDown(i int) {
heap := ts.heap
n := len(heap)
if i >= n {
badTimer()
}
if i*timerHeapN+1 >= n {
return
}
tw := heap[i]
if tw.when <= 0 {
badTimer()
}
for {
leftChild := i*timerHeapN + 1
if leftChild >= n {
break
}
w := tw
c := -1
for j, tw := range heap[leftChild:min(leftChild+timerHeapN, n)] {
if tw.less(w) {
w = tw
c = leftChild + j
}
}
if c < 0 {
break
}
heap[i] = heap[c]
i = c
}
if heap[i].timer != tw.timer {
heap[i] = tw
}
}
// initHeap reestablishes the heap order in the slice ts.heap.
// It takes O(n) time for n=len(ts.heap), not the O(n log n) of n repeated add operations.
func (ts *timers) initHeap() {
// Last possible element that needs sifting down is parent of last element;
// last element is len(t)-1; parent of last element is (len(t)-1-1)/timerHeapN.
if len(ts.heap) <= 1 {
return
}
for i := int(uint(len(ts.heap)-1-1) / timerHeapN); i >= 0; i-- {
ts.siftDown(i)
}
}
// badTimer is called if the timer data structures have been corrupted,
// presumably due to racy use by the program. We panic here rather than
// panicking due to invalid slice access while holding locks.
// See issue #25686.
func badTimer() {
throw("timer data corruption")
}
// Timer channels.
// maybeRunChan checks whether the timer needs to run
// to send a value to its associated channel. If so, it does.
// The timer must not be locked.
func (t *timer) maybeRunChan(c *hchan) {
if t.isFake && getg().bubble != c.bubble {
// This should have been checked by the caller, but check just in case.
fatal("synctest timer accessed from outside bubble")
}
if t.astate.Load()&timerHeaped != 0 {
// If the timer is in the heap, the ordinary timer code
// is in charge of sending when appropriate.
return
}
t.lock()
now := nanotime()
if t.isFake {
now = getg().bubble.now
}
if t.state&timerHeaped != 0 || t.when == 0 || t.when > now {
t.trace("maybeRunChan-")
// Timer in the heap, or not running at all, or not triggered.
t.unlock()
return
}
t.trace("maybeRunChan+")
systemstack(func() {
t.unlockAndRun(now, c.bubble)
})
}
// blockTimerChan is called when a channel op has decided to block on c.
// The caller holds the channel lock for c and possibly other channels.
// blockTimerChan makes sure that c is in a timer heap,
// adding it if needed.
func blockTimerChan(c *hchan) {
t := c.timer
if t.isFake && c.bubble != getg().bubble {
// This should have been checked by the caller, but check just in case.
fatal("synctest timer accessed from outside bubble")
}
t.lock()
t.trace("blockTimerChan")
if !t.isChan {
badTimer()
}
t.blocked++
// If this is the first enqueue after a recent dequeue,
// the timer may still be in the heap but marked as a zombie.
// Unmark it in this case, if the timer is still pending.
if t.state&timerHeaped != 0 && t.state&timerZombie != 0 && t.when > 0 {
t.state &^= timerZombie
t.ts.zombies.Add(-1)
}
// t.maybeAdd must be called with t unlocked,
// because it needs to lock t.ts before t.
// Then it will do nothing if t.needsAdd(state) is false.
// Check that now before the unlock,
// avoiding the extra lock-lock-unlock-unlock
// inside maybeAdd when t does not need to be added.
add := t.needsAdd()
t.unlock()
if add {
t.maybeAdd()
}
}
// unblockTimerChan is called when a channel op that was blocked on c
// is no longer blocked. Every call to blockTimerChan must be paired with
// a call to unblockTimerChan.
// The caller holds the channel lock for c and possibly other channels.
// unblockTimerChan removes c from the timer heap when nothing is
// blocked on it anymore.
func unblockTimerChan(c *hchan) {
t := c.timer
t.lock()
t.trace("unblockTimerChan")
if !t.isChan || t.blocked == 0 {
badTimer()
}
t.blocked--
if t.blocked == 0 && t.state&timerHeaped != 0 && t.state&timerZombie == 0 {
// Last goroutine that was blocked on this timer.
// Mark for removal from heap but do not clear t.when,
// so that we know what time it is still meant to trigger.
t.state |= timerZombie
t.ts.zombies.Add(1)
}
t.unlock()
}
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !faketime
package runtime
import "unsafe"
// faketime is the simulated time in nanoseconds since 1970 for the
// playground.
//
// Zero means not to use faketime.
var faketime int64
// Exported via linkname for use by time and internal/poll.
//
// Many external packages also linkname nanotime for a fast monotonic time.
// Such code should be updated to use:
//
// var start = time.Now() // at init time
//
// and then replace nanotime() with time.Since(start), which is equally fast.
//
// However, all the code linknaming nanotime is never going to go away.
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname nanotime
//go:nosplit
func nanotime() int64 {
return nanotime1()
}
// overrideWrite allows write to be redirected externally, by
// linkname'ing this and set it to a write function.
//
// overrideWrite should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - golang.zx2c4.com/wireguard/windows
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname overrideWrite
var overrideWrite func(fd uintptr, p unsafe.Pointer, n int32) int32
// write must be nosplit on Windows (see write1)
//
//go:nosplit
func write(fd uintptr, p unsafe.Pointer, n int32) int32 {
if overrideWrite != nil {
return overrideWrite(fd, noescape(p), n)
}
return write1(fd, p, n)
}
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (windows && !amd64) || !windows
package runtime
//go:nosplit
func osSetupTLS(mp *m) {}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// # Go execution tracer
//
// The tracer captures a wide range of execution events like goroutine
// creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
// changes of heap size, processor start/stop, etc and writes them to a buffer
// in a compact form. A precise nanosecond-precision timestamp and a stack
// trace is captured for most events.
//
// ## Design
//
// The basic idea behind the execution tracer is to have per-M buffers that
// trace data may be written into. Each M maintains a write flag indicating whether
// its trace buffer is currently in use.
//
// Tracing is initiated by StartTrace, and proceeds in "generations," with each
// generation being marked by a call to traceAdvance, to advance to the next
// generation. Generations are a global synchronization point for trace data,
// and we proceed to a new generation by moving forward trace.gen. Each M reads
// trace.gen under its own write flag to determine which generation it is writing
// trace data for. To this end, each M has 2 slots for buffers: one slot for the
// previous generation, one slot for the current one. It uses tl.gen to select
// which buffer slot to write to. Simultaneously, traceAdvance uses the write flag
// to determine whether every thread is guaranteed to observe an updated
// trace.gen. Once it is sure, it may then flush any buffers that are left over
// from the previous generation safely, since it knows the Ms will not mutate
// it.
//
// Flushed buffers are processed by the ReadTrace function, which is called by
// the trace reader goroutine. The first goroutine to call ReadTrace is designated
// as the trace reader goroutine until tracing completes. (There may only be one at
// a time.)
//
// Once all buffers are flushed, any extra post-processing complete, and flushed
// buffers are processed by the trace reader goroutine, the trace emits an
// EndOfGeneration event to mark the global synchronization point in the trace.
//
// All other trace features, including CPU profile samples, stack information,
// string tables, etc. all revolve around this generation system, and typically
// appear in pairs: one for the previous generation, and one for the current one.
// Like the per-M buffers, which of the two is written to is selected using trace.gen,
// and anything managed this way must similarly be mutated only in traceAdvance or
// under the M's write flag.
//
// Trace events themselves are simple. They consist of a single byte for the event type,
// followed by zero or more LEB128-encoded unsigned varints. They are decoded using
// a pre-determined table for each trace version: internal/trace/tracev2.specs.
//
// To avoid relying on timestamps for correctness and validation, each G and P have
// sequence counters that are written into trace events to encode a partial order.
// The sequence counters reset on each generation. Ms do not need sequence counters
// because they are the source of truth for execution: trace events, and even whole
// buffers, are guaranteed to appear in order in the trace data stream, simply because
// that's the order the thread emitted them in.
//
// See traceruntime.go for the API the tracer exposes to the runtime for emitting events.
//
// In each generation, we ensure that we enumerate all goroutines, such that each
// generation's data is fully self-contained. This makes features like the flight
// recorder easy to implement. To this end, we guarantee that every live goroutine is
// listed at least once by emitting a status event for the goroutine, indicating its
// starting state. These status events are emitted based on context, generally based
// on the event that's about to be emitted.
//
// The traceEventWriter type encapsulates these details, and is the backbone of
// the API exposed in traceruntime.go, though there are deviations where necessary.
//
// This is the overall design, but as always, there are many details. Beyond this,
// look to the invariants and select corner cases below and the code itself for the
// source of truth.
//
// See https://go.dev/issue/60773 for a link to a more complete design with rationale,
// though parts of it are out-of-date.
//
// ## Invariants
//
// 1. An m that has a trace buffer MUST be on either the allm or sched.freem lists.
//
// Otherwise, traceAdvance might miss an M with a buffer that needs to be flushed.
//
// 2. Trace buffers MUST only be mutated in traceAdvance or under a traceAcquire/traceRelease.
//
// Otherwise, traceAdvance may race with Ms writing trace data when trying to flush buffers.
//
// 3. traceAdvance MUST NOT return until all of the current generation's buffers are flushed.
//
// Otherwise, callers cannot rely on all the data they need being available (for example, for
// the flight recorder).
//
// 4. P and goroutine state transition events MUST be emitted by an M that owns its ability
// to transition.
//
// What this means is that the M must either be the owner of the P, the owner of the goroutine,
// or owner of a non-running goroutine's _Gscan bit. There are a lot of bad things that can
// happen if this invariant isn't maintained, mostly around generating inconsistencies in the
// trace due to racy emission of events.
//
// 5. Acquisition of a P (pidleget or takeP/gcstopP) MUST NOT be performed under a traceAcquire/traceRelease pair.
//
// Notably, it's important that traceAcquire/traceRelease not cover a state in which the
// goroutine or P is not yet owned. For example, if traceAcquire is held across both wirep and
// pidleget, then we could end up emitting an event in the wrong generation. Suppose T1
// traceAcquires in generation 1, a generation transition happens, T2 emits a ProcStop and
// executes pidleput in generation 2, and finally T1 calls pidleget and emits ProcStart.
// The ProcStart must follow the ProcStop in the trace to make any sense, but ProcStop was
// emitted in a latter generation.
//
// 6. Goroutine state transitions, with the exception of transitions into _Grunning, MUST be
// performed under the traceAcquire/traceRelease pair where the event is emitted.
//
// Otherwise, traceAdvance may observe a goroutine state that is inconsistent with the
// events being emitted. traceAdvance inspects all goroutines' states in order to emit
// a status event for any goroutine that did not have an event emitted for it already.
// If the generation then advances in between that observation and the event being emitted,
// then the trace will contain a status that doesn't line up with the event. For example,
// if the event is emitted after the state transition _Gwaiting -> _Grunnable, then
// traceAdvance may observe the goroutine in _Grunnable, emit a status event, advance the
// generation, and the following generation contains a GoUnblock event. The trace parser
// will get confused because it sees that goroutine in _Grunnable in the previous generation
// trying to be transitioned from _Gwaiting into _Grunnable in the following one. Something
// similar happens if the trace event is emitted before the state transition, so that does
// not help either.
//
// Transitions to _Grunning do not have the same problem because traceAdvance is unable to
// observe running goroutines directly. It must stop them, or wait for them to emit an event.
// Note that it cannot even stop them with asynchronous preemption in any "bad" window between
// the state transition to _Grunning and the event emission because async preemption cannot
// stop goroutines in the runtime.
//
// 7. Goroutine state transitions into _Grunning MUST emit an event for the transition after
// the state transition.
//
// This follows from invariants (4), (5), and the explanation of (6).
// The relevant part of the previous invariant is that in order for the tracer to be unable to
// stop a goroutine, it must be in _Grunning and in the runtime. So to close any windows between
// event emission and the state transition, the event emission must happen *after* the transition
// to _Grunning.
//
// ## Select corner cases
//
// ### CGO calls / system calls
//
// CGO calls and system calls are mostly straightforward, except for P stealing. For historical
// reasons, this introduces a new trace-level P state called ProcSyscall which used to model
// _Psyscall (now _Psyscall_unused). This state is used to indicate in the trace that a P
// is eligible for stealing as part of the parser's ordering logic.
//
// Another quirk of this corner case is the ProcSyscallAbandoned trace-level P state, which
// is used only in status events to indicate a relaxation of verification requirements. It
// means that if the execution trace parser can't find the corresponding thread that the P
// was stolen from in the state it expects it to be, to accept the trace anyway. This is also
// historical. When _Psyscall still existed, one would steal and then ProcSteal, and there
// was no ordering between the ProcSteal and the subsequent GoSyscallEndBlocked. One clearly
// happened before the other, but since P stealing was a single atomic, there was no way
// to enforce the order. The GoSyscallEndBlocked thread could move on and end up in any
// state, and the GoSyscallEndBlocked could be in a completely different generation to the
// ProcSteal. Today this is no longer possible as the ProcSteal is always ordered before
// the GoSyscallEndBlocked event in the runtime.
//
// Both ProcSyscall and ProcSyscallAbandoned are likely no longer be necessary.
//
// ### CGO callbacks
//
// When a C thread calls into Go, the execution tracer models that as the creation of a new
// goroutine. When the thread exits back into C, that is modeled as the destruction of that
// goroutine. These are the GoCreateSyscall and GoDestroySyscall events, which represent the
// creation and destruction of a goroutine with its starting and ending states being _Gsyscall.
//
// This model is simple to reason about but contradicts the runtime implementation, which
// doesn't do this directly for performance reasons. The runtime implementation instead caches
// a G on the M created for the C thread. On Linux this M is then cached in the thread's TLS,
// and on other systems, the M is put on a global list on exit from Go. We need to do some
// extra work to make sure that this is modeled correctly in the tracer. For example,
// a C thread exiting Go may leave a P hanging off of its M (whether that M is kept in TLS
// or placed back on a list). In order to correctly model goroutine creation and destruction,
// we must behave as if the P was at some point stolen by the runtime, if the C thread
// reenters Go with the same M (and thus, same P) once more.
package runtime
import (
"internal/runtime/atomic"
"internal/trace/tracev2"
"unsafe"
)
// Trace state.
// trace is global tracing context.
var trace struct {
// trace.lock must only be acquired on the system stack where
// stack splits cannot happen while it is held.
lock mutex
// Trace buffer management.
//
// First we check the empty list for any free buffers. If not, buffers
// are allocated directly from the OS. Once they're filled up and/or
// flushed, they end up on the full queue for trace.gen%2.
//
// The trace reader takes buffers off the full list one-by-one and
// places them into reading until they're finished being read from.
// Then they're placed onto the empty list.
//
// Protected by trace.lock.
reading *traceBuf // buffer currently handed off to user
empty *traceBuf // stack of empty buffers
full [2]traceBufQueue
workAvailable atomic.Bool
// State for the trace reader goroutine.
//
// Protected by trace.lock.
readerGen atomic.Uintptr // the generation the reader is currently reading for
flushedGen atomic.Uintptr // the last completed generation
headerWritten bool // whether ReadTrace has emitted trace header
endOfGenerationWritten bool // whether readTrace has emitted the end of the generation signal
// doneSema is used to synchronize the reader and traceAdvance. Specifically,
// it notifies traceAdvance that the reader is done with a generation.
// Both semaphores are 0 by default (so, acquires block). traceAdvance
// attempts to acquire for gen%2 after flushing the last buffers for gen.
// Meanwhile the reader releases the sema for gen%2 when it has finished
// processing gen.
doneSema [2]uint32
// Trace data tables for deduplicating data going into the trace.
// There are 2 of each: one for gen%2, one for 1-gen%2.
stackTab [2]traceStackTable // maps stack traces to unique ids
stringTab [2]traceStringTable // maps strings to unique ids
typeTab [2]traceTypeTable // maps type pointers to unique ids
// cpuLogRead accepts CPU profile samples from the signal handler where
// they're generated. There are two profBufs here: one for gen%2, one for
// 1-gen%2. These profBufs use a three-word header to hold the IDs of the P, G,
// and M (respectively) that were active at the time of the sample. Because
// profBuf uses a record with all zeros in its header to indicate overflow,
// we make sure to make the P field always non-zero: The ID of a real P will
// start at bit 1, and bit 0 will be set. Samples that arrive while no P is
// running (such as near syscalls) will set the first header field to 0b10.
// This careful handling of the first header field allows us to store ID of
// the active G directly in the second field, even though that will be 0
// when sampling g0.
//
// Initialization and teardown of these fields is protected by traceAdvanceSema.
cpuLogRead [2]*profBuf
signalLock atomic.Uint32 // protects use of the following member, only usable in signal handlers
cpuLogWrite [2]atomic.Pointer[profBuf] // copy of cpuLogRead for use in signal handlers, set without signalLock
cpuSleep *wakeableSleep
cpuLogDone <-chan struct{}
cpuBuf [2]*traceBuf
reader atomic.Pointer[g] // goroutine that called ReadTrace, or nil
// Fast mappings from enumerations to string IDs that are prepopulated
// in the trace.
markWorkerLabels [2][len(gcMarkWorkerModeStrings)]traceArg
goStopReasons [2][len(traceGoStopReasonStrings)]traceArg
goBlockReasons [2][len(traceBlockReasonStrings)]traceArg
// enabled indicates whether tracing is enabled, but it is only an optimization,
// NOT the source of truth on whether tracing is enabled. Tracing is only truly
// enabled if gen != 0. This is used as an optimistic fast path check.
//
// Transitioning this value from true -> false is easy (once gen is 0)
// because it's OK for enabled to have a stale "true" value. traceAcquire will
// always double-check gen.
//
// Transitioning this value from false -> true is harder. We need to make sure
// this is observable as true strictly before gen != 0. To maintain this invariant
// we only make this transition with the world stopped and use the store to gen
// as a publication barrier.
enabled bool
// enabledWithAllocFree is set if debug.traceallocfree is != 0 when tracing begins.
// It follows the same synchronization protocol as enabled.
enabledWithAllocFree bool
// Trace generation counter.
gen atomic.Uintptr
lastNonZeroGen uintptr // last non-zero value of gen
// shutdown is set when we are waiting for trace reader to finish after setting gen to 0
//
// Writes protected by trace.lock.
shutdown atomic.Bool
// Number of goroutines in syscall exiting slow path.
exitingSyscall atomic.Int32
// seqGC is the sequence counter for GC begin/end.
//
// Mutated only during stop-the-world.
seqGC uint64
// minPageHeapAddr is the minimum address of the page heap when tracing started.
minPageHeapAddr uint64
// debugMalloc is the value of debug.malloc before tracing began.
debugMalloc bool
}
// Trace public API.
var (
traceAdvanceSema uint32 = 1
traceShutdownSema uint32 = 1
)
// StartTrace enables tracing for the current process.
// While tracing, the data will be buffered and available via [ReadTrace].
// StartTrace returns an error if tracing is already enabled.
// Most clients should use the [runtime/trace] package or the [testing] package's
// -test.trace flag instead of calling StartTrace directly.
func StartTrace() error {
if traceEnabled() || traceShuttingDown() {
return errorString("tracing is already enabled")
}
// Block until cleanup of the last trace is done.
semacquire(&traceShutdownSema)
semrelease(&traceShutdownSema)
// Hold traceAdvanceSema across trace start, since we'll want it on
// the other side of tracing being enabled globally.
semacquire(&traceAdvanceSema)
// Initialize CPU profile -> trace ingestion.
traceInitReadCPU()
// Compute the first generation for this StartTrace.
//
// Note: we start from the last non-zero generation rather than 1 so we
// can avoid resetting all the arrays indexed by gen%2 or gen%3. There's
// more than one of each per m, p, and goroutine.
firstGen := traceNextGen(trace.lastNonZeroGen)
// Reset GC sequencer.
trace.seqGC = 1
// Reset trace reader state.
trace.headerWritten = false
trace.readerGen.Store(firstGen)
trace.flushedGen.Store(0)
// Register some basic strings in the string tables.
traceRegisterLabelsAndReasons(firstGen)
// N.B. This may block for quite a while to get a frequency estimate. Do it
// here to minimize the time that the world is stopped.
frequency := traceClockUnitsPerSecond()
// Stop the world.
//
// What we need to successfully begin tracing is to make sure that the next time
// *any goroutine* hits a traceAcquire, it sees that the trace is enabled.
//
// Stopping the world gets us most of the way there, since it makes sure that goroutines
// stop executing. There is however one exception: goroutines without Ps concurrently
// exiting a syscall. We handle this by making sure that, after we update trace.gen,
// there isn't a single goroutine calling traceAcquire on the syscall slow path by checking
// trace.exitingSyscall. See the comment on the check below for more details.
//
// Note also that stopping the world is necessary to make sure sweep-related events are
// coherent. Since the world is stopped and sweeps are non-preemptible, we can never start
// the world and see an unpaired sweep 'end' event. Other parts of the tracer rely on this.
stw := stopTheWorld(stwStartTrace)
// Prevent sysmon from running any code that could generate events.
lock(&sched.sysmonlock)
// Grab the minimum page heap address. All Ps are stopped, so it's safe to read this since
// nothing can allocate heap memory.
trace.minPageHeapAddr = uint64(mheap_.pages.inUse.ranges[0].base.addr())
// Reset mSyscallID on all Ps while we have them stationary and the trace is disabled.
for _, pp := range allp {
pp.trace.mSyscallID = -1
}
// Start tracing.
//
// Set trace.enabled. This is *very* subtle. We need to maintain the invariant that if
// trace.gen != 0, then trace.enabled is always observed as true. Simultaneously, for
// performance, we need trace.enabled to be read without any synchronization.
//
// We ensure this is safe by stopping the world, which acts a global barrier on almost
// every M, and explicitly synchronize with any other Ms that could be running concurrently
// with us. Today, there are only two such cases:
// - sysmon, which we synchronized with by acquiring sysmonlock.
// - goroutines exiting syscalls, which we synchronize with via trace.exitingSyscall.
//
// After trace.gen is updated, other Ms may start creating trace buffers and emitting
// data into them.
trace.enabled = true
if debug.traceallocfree.Load() != 0 {
// Enable memory events since the GODEBUG is set.
trace.debugMalloc = debug.malloc
trace.enabledWithAllocFree = true
debug.malloc = true
}
trace.gen.Store(firstGen)
// Wait for exitingSyscall to drain.
//
// It may not monotonically decrease to zero, but in the limit it will always become
// zero because the world is stopped and there are no available Ps for syscall-exited
// goroutines to run on.
//
// Because we set gen before checking this, and because exitingSyscall is always incremented
// *before* traceAcquire (which checks gen), we can be certain that when exitingSyscall is zero
// that any goroutine that goes to exit a syscall from then on *must* observe the new gen as
// well as trace.enabled being set to true.
//
// The critical section on each goroutine here is going to be quite short, so the likelihood
// that we observe a zero value is high.
for trace.exitingSyscall.Load() != 0 {
osyield()
}
// Record some initial pieces of information.
//
// N.B. This will also emit a status event for this goroutine.
tl := traceAcquire()
traceSyncBatch(firstGen, frequency) // Get this as early in the trace as possible. See comment in traceAdvance.
tl.Gomaxprocs(gomaxprocs) // Get this as early in the trace as possible. See comment in traceAdvance.
tl.STWStart(stwStartTrace) // We didn't trace this above, so trace it now.
// Record the fact that a GC is active, if applicable.
if gcphase == _GCmark || gcphase == _GCmarktermination {
tl.GCActive()
}
// Dump a snapshot of memory, if enabled.
if trace.enabledWithAllocFree {
traceSnapshotMemory(firstGen)
}
// Record the heap goal so we have it at the very beginning of the trace.
tl.HeapGoal()
traceRelease(tl)
unlock(&sched.sysmonlock)
startTheWorld(stw)
traceStartReadCPU()
traceAdvancer.start()
semrelease(&traceAdvanceSema)
return nil
}
// StopTrace stops tracing, if it was previously enabled.
// StopTrace only returns after all the reads for the trace have completed.
func StopTrace() {
traceAdvance(true)
}
// traceAdvance moves tracing to the next generation, and cleans up the current generation,
// ensuring that it's flushed out before returning. If stopTrace is true, it disables tracing
// altogether instead of advancing to the next generation.
//
// traceAdvanceSema must not be held.
//
// traceAdvance is called by runtime/trace and golang.org/x/exp/trace using linkname.
//
//go:linkname traceAdvance
func traceAdvance(stopTrace bool) {
semacquire(&traceAdvanceSema)
// Get the gen that we're advancing from. In this function we don't really care much
// about the generation we're advancing _into_ since we'll do all the cleanup in this
// generation for the next advancement.
gen := trace.gen.Load()
if gen == 0 {
// We may end up here traceAdvance is called concurrently with StopTrace.
semrelease(&traceAdvanceSema)
return
}
// Collect all the untraced Gs.
type untracedG struct {
gp *g
goid uint64
mid int64
stackID uint64
status uint32
waitreason waitReason
inMarkAssist bool
}
var untracedGs []untracedG
forEachGRace(func(gp *g) {
// Make absolutely sure all Gs are ready for the next
// generation. We need to do this even for dead Gs because
// they may come alive with a new identity, and its status
// traced bookkeeping might end up being stale.
// We may miss totally new goroutines, but they'll always
// have clean bookkeeping.
gp.trace.readyNextGen(gen)
// If the status was traced, nothing else to do.
if gp.trace.statusWasTraced(gen) {
return
}
// Scribble down information about this goroutine.
ug := untracedG{gp: gp, mid: -1}
systemstack(func() {
me := getg().m.curg
// We don't have to handle this G status transition because we
// already eliminated ourselves from consideration above.
casGToWaitingForSuspendG(me, _Grunning, waitReasonTraceGoroutineStatus)
// We need to suspend and take ownership of the G to safely read its
// goid. Note that we can't actually emit the event at this point
// because we might stop the G in a window where it's unsafe to write
// events based on the G's status. We need the global trace buffer flush
// coming up to make sure we're not racing with the G.
//
// It should be very unlikely that we try to preempt a running G here.
// The only situation that we might is that we're racing with a G
// that's running for the first time in this generation. Therefore,
// this should be relatively fast.
s := suspendG(gp)
if !s.dead {
ug.goid = s.g.goid
if s.g.m != nil {
ug.mid = int64(s.g.m.procid)
}
ug.status = readgstatus(s.g) &^ _Gscan
ug.waitreason = s.g.waitreason
ug.inMarkAssist = s.g.inMarkAssist
ug.stackID = traceStack(0, gp, &trace.stackTab[gen%2])
}
resumeG(s)
casgstatus(me, _Gwaiting, _Grunning)
})
if ug.goid != 0 {
untracedGs = append(untracedGs, ug)
}
})
if !stopTrace {
// Re-register runtime goroutine labels and stop/block reasons.
traceRegisterLabelsAndReasons(traceNextGen(gen))
}
// N.B. This may block for quite a while to get a frequency estimate. Do it
// here to minimize the time that we prevent the world from stopping.
frequency := traceClockUnitsPerSecond()
// Prevent the world from stopping.
//
// This is necessary to ensure the consistency of the STW events. If we're feeling
// adventurous we could lift this restriction and add a STWActive event, but the
// cost of maintaining this consistency is low.
//
// This is also a good time to preempt all the Ps and ensure they had a status traced.
semacquire(&worldsema)
// Go over each P and emit a status event for it if necessary.
//
// TODO(mknyszek): forEachP is very heavyweight. We could do better by integrating
// the statusWasTraced check into it, to avoid preempting unnecessarily.
forEachP(waitReasonTraceProcStatus, func(pp *p) {
tl := traceAcquire()
if !pp.trace.statusWasTraced(tl.gen) {
tl.writer().writeProcStatusForP(pp, false).end()
}
traceRelease(tl)
})
// While we're still holding worldsema (preventing a STW and thus a
// change in the number of Ps), reset the status on dead Ps.
// They just appear as idle.
//
// TODO(mknyszek): Consider explicitly emitting ProcCreate and ProcDestroy
// events to indicate whether a P exists, rather than just making its
// existence implicit.
for _, pp := range allp[len(allp):cap(allp)] {
pp.trace.readyNextGen(gen)
}
// Prevent preemption to make sure we're not interrupted.
//
// We want to get through the rest as soon as possible.
mp := acquirem()
// Advance the generation or stop the trace.
trace.lastNonZeroGen = gen
if stopTrace {
systemstack(func() {
// Ordering is important here. Set shutdown first, then disable tracing,
// so that conditions like (traceEnabled() || traceShuttingDown()) have
// no opportunity to be false. Hold the trace lock so this update appears
// atomic to the trace reader.
lock(&trace.lock)
trace.shutdown.Store(true)
trace.gen.Store(0)
unlock(&trace.lock)
// Clear trace.enabled. It is totally OK for this value to be stale,
// because traceAcquire will always double-check gen.
trace.enabled = false
})
} else {
trace.gen.Store(traceNextGen(gen))
}
// Emit a sync batch which contains a ClockSnapshot. Also emit a ProcsChange
// event so we have one on record for each generation. Let's emit it as soon
// as possible so that downstream tools can rely on the value being there
// fairly soon in a generation.
//
// It's important that we do this before allowing stop-the-worlds again,
// because the procs count could change.
if !stopTrace {
tl := traceAcquire()
traceSyncBatch(tl.gen, frequency)
tl.Gomaxprocs(gomaxprocs)
traceRelease(tl)
}
// Emit a GCActive event in the new generation if necessary.
//
// It's important that we do this before allowing stop-the-worlds again,
// because that could emit global GC-related events.
if !stopTrace && (gcphase == _GCmark || gcphase == _GCmarktermination) {
tl := traceAcquire()
tl.GCActive()
traceRelease(tl)
}
// Preemption is OK again after this. If the world stops or whatever it's fine.
// We're just cleaning up the last generation after this point.
//
// We also don't care if the GC starts again after this for the same reasons.
releasem(mp)
semrelease(&worldsema)
// Snapshot allm and freem.
//
// Snapshotting after the generation counter update is sufficient.
// Because an m must be on either allm or sched.freem if it has an active trace
// buffer, new threads added to allm after this point must necessarily observe
// the new generation number (sched.lock acts as a barrier).
//
// Threads that exit before this point and are on neither list explicitly
// flush their own buffers in traceThreadDestroy.
//
// Snapshotting freem is necessary because Ms can continue to emit events
// while they're still on that list. Removal from sched.freem is serialized with
// this snapshot, so either we'll capture an m on sched.freem and race with
// the removal to flush its buffers (resolved by traceThreadDestroy acquiring
// the thread's write flag, which one of us must win, so at least its old gen buffer
// will be flushed in time for the new generation) or it will have flushed its
// buffers before we snapshotted it to begin with.
lock(&sched.lock)
mToFlush := allm
for mp := mToFlush; mp != nil; mp = mp.alllink {
mp.trace.link = mp.alllink
}
for mp := sched.freem; mp != nil; mp = mp.freelink {
mp.trace.link = mToFlush
mToFlush = mp
}
unlock(&sched.lock)
// Iterate over our snapshot, flushing every buffer until we're done.
//
// Because trace writers read the generation while the write flag is
// held, we can be certain that when there are no writers there are
// also no stale generation values left. Therefore, it's safe to flush
// any buffers that remain in that generation's slot.
const debugDeadlock = false
systemstack(func() {
// Track iterations for some rudimentary deadlock detection.
i := 0
detectedDeadlock := false
for mToFlush != nil {
prev := &mToFlush
for mp := *prev; mp != nil; {
if mp.trace.writing.Load() {
// The M is writing. Come back to it later.
prev = &mp.trace.link
mp = mp.trace.link
continue
}
// Flush the trace buffer.
//
// trace.lock needed for traceBufFlush, but also to synchronize
// with traceThreadDestroy, which flushes both buffers unconditionally.
lock(&trace.lock)
for exp, buf := range mp.trace.buf[gen%2] {
if buf != nil {
traceBufFlush(buf, gen)
mp.trace.buf[gen%2][exp] = nil
}
}
unlock(&trace.lock)
// Remove the m from the flush list.
*prev = mp.trace.link
mp.trace.link = nil
mp = *prev
}
// Yield only if we're going to be going around the loop again.
if mToFlush != nil {
osyield()
}
if debugDeadlock {
// Try to detect a deadlock. We probably shouldn't loop here
// this many times.
if i > 100000 && !detectedDeadlock {
detectedDeadlock = true
println("runtime: failing to flush")
for mp := mToFlush; mp != nil; mp = mp.trace.link {
print("runtime: m=", mp.id, "\n")
}
}
i++
}
}
})
// At this point, the old generation is fully flushed minus stack and string
// tables, CPU samples, and goroutines that haven't run at all during the last
// generation.
// Check to see if any Gs still haven't had events written out for them.
statusWriter := unsafeTraceWriter(gen, nil)
for _, ug := range untracedGs {
if ug.gp.trace.statusWasTraced(gen) {
// It was traced, we don't need to do anything.
continue
}
// It still wasn't traced. Because we ensured all Ms stopped writing trace
// events to the last generation, that must mean the G never had its status
// traced in gen between when we recorded it and now. If that's true, the goid
// and status we recorded then is exactly what we want right now.
status := goStatusToTraceGoStatus(ug.status, ug.waitreason)
statusWriter = statusWriter.writeGoStatus(ug.goid, ug.mid, status, ug.inMarkAssist, ug.stackID)
}
statusWriter.flush().end()
// Read everything out of the last gen's CPU profile buffer.
traceReadCPU(gen)
// Flush CPU samples, stacks, and strings for the last generation. This is safe,
// because we're now certain no M is writing to the last generation.
//
// Ordering is important here. traceCPUFlush may generate new stacks and dumping
// stacks may generate new strings.
traceCPUFlush(gen)
trace.stackTab[gen%2].dump(gen)
trace.typeTab[gen%2].dump(gen)
trace.stringTab[gen%2].reset(gen)
// That's it. This generation is done producing buffers.
systemstack(func() {
lock(&trace.lock)
trace.flushedGen.Store(gen)
unlock(&trace.lock)
})
if stopTrace {
// Acquire the shutdown sema to begin the shutdown process.
semacquire(&traceShutdownSema)
// Finish off CPU profile reading.
traceStopReadCPU()
// Reset debug.malloc if necessary. Note that this is set in a racy
// way; that's OK. Some mallocs may still enter into the debug.malloc
// block, but they won't generate events because tracing is disabled.
// That is, it's OK if mallocs read a stale debug.malloc or
// trace.enabledWithAllocFree value.
if trace.enabledWithAllocFree {
trace.enabledWithAllocFree = false
debug.malloc = trace.debugMalloc
}
}
// Block until the trace reader has finished processing the last generation.
semacquire(&trace.doneSema[gen%2])
if raceenabled {
raceacquire(unsafe.Pointer(&trace.doneSema[gen%2]))
}
// Double-check that things look as we expect after advancing and perform some
// final cleanup if the trace has fully stopped.
systemstack(func() {
lock(&trace.lock)
if !trace.full[gen%2].empty() {
throw("trace: non-empty full trace buffer for done generation")
}
if stopTrace {
if !trace.full[1-(gen%2)].empty() {
throw("trace: non-empty full trace buffer for next generation")
}
if trace.reading != nil || trace.reader.Load() != nil {
throw("trace: reading after shutdown")
}
// Free all the empty buffers.
for trace.empty != nil {
buf := trace.empty
trace.empty = buf.link
sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf), &memstats.other_sys)
}
// Clear trace.shutdown and other flags.
trace.headerWritten = false
trace.shutdown.Store(false)
}
unlock(&trace.lock)
})
if stopTrace {
// Clear the sweep state on every P for the next time tracing is enabled.
//
// It may be stale in the next trace because we may have ended tracing in
// the middle of a sweep on a P.
//
// It's fine not to call forEachP here because tracing is disabled and we
// know at this point that nothing is calling into the tracer, but we do
// need to look at dead Ps too just because GOMAXPROCS could have been called
// at any point since we stopped tracing, and we have to ensure there's no
// bad state on dead Ps too. Prevent a STW and a concurrent GOMAXPROCS that
// might mutate allp by making ourselves briefly non-preemptible.
mp := acquirem()
for _, pp := range allp[:cap(allp)] {
pp.trace.inSweep = false
pp.trace.maySweep = false
pp.trace.swept = 0
pp.trace.reclaimed = 0
}
releasem(mp)
}
// Release the advance semaphore. If stopTrace is true we're still holding onto
// traceShutdownSema.
//
// Do a direct handoff. Don't let one caller of traceAdvance starve
// other calls to traceAdvance.
semrelease1(&traceAdvanceSema, true, 0)
if stopTrace {
// Stop the traceAdvancer. We can't be holding traceAdvanceSema here because
// we'll deadlock (we're blocked on the advancer goroutine exiting, but it
// may be currently trying to acquire traceAdvanceSema).
traceAdvancer.stop()
semrelease(&traceShutdownSema)
}
}
func traceNextGen(gen uintptr) uintptr {
if gen == ^uintptr(0) {
// gen is used both %2 and %3 and we want both patterns to continue when we loop around.
// ^uint32(0) and ^uint64(0) are both odd and multiples of 3. Therefore the next generation
// we want is even and one more than a multiple of 3. The smallest such number is 4.
return 4
}
return gen + 1
}
// traceRegisterLabelsAndReasons re-registers mark worker labels and
// goroutine stop/block reasons in the string table for the provided
// generation. Note: the provided generation must not have started yet.
func traceRegisterLabelsAndReasons(gen uintptr) {
for i, label := range gcMarkWorkerModeStrings[:] {
trace.markWorkerLabels[gen%2][i] = traceArg(trace.stringTab[gen%2].put(gen, label))
}
for i, str := range traceBlockReasonStrings[:] {
trace.goBlockReasons[gen%2][i] = traceArg(trace.stringTab[gen%2].put(gen, str))
}
for i, str := range traceGoStopReasonStrings[:] {
trace.goStopReasons[gen%2][i] = traceArg(trace.stringTab[gen%2].put(gen, str))
}
}
// ReadTrace returns the next chunk of binary tracing data, blocking until data
// is available. If tracing is turned off and all the data accumulated while it
// was on has been returned, ReadTrace returns nil. The caller must copy the
// returned data before calling ReadTrace again.
// ReadTrace must be called from one goroutine at a time.
func ReadTrace() (buf []byte) {
top:
var park bool
systemstack(func() {
buf, park = readTrace0()
})
if park {
gopark(func(gp *g, _ unsafe.Pointer) bool {
if !trace.reader.CompareAndSwapNoWB(nil, gp) {
// We're racing with another reader.
// Wake up and handle this case.
return false
}
if g2 := traceReader(); gp == g2 {
// New data arrived between unlocking
// and the CAS and we won the wake-up
// race, so wake up directly.
return false
} else if g2 != nil {
printlock()
println("runtime: got trace reader", g2, g2.goid)
throw("unexpected trace reader")
}
return true
}, nil, waitReasonTraceReaderBlocked, traceBlockSystemGoroutine, 2)
goto top
}
return buf
}
// readTrace0 is ReadTrace's continuation on g0. This must run on the
// system stack because it acquires trace.lock.
//
//go:systemstack
func readTrace0() (buf []byte, park bool) {
if raceenabled {
// g0 doesn't have a race context. Borrow the user G's.
if getg().racectx != 0 {
throw("expected racectx == 0")
}
getg().racectx = getg().m.curg.racectx
// (This defer should get open-coded, which is safe on
// the system stack.)
defer func() { getg().racectx = 0 }()
}
// This function must not allocate while holding trace.lock:
// allocation can call heap allocate, which will try to emit a trace
// event while holding heap lock.
lock(&trace.lock)
if trace.reader.Load() != nil {
// More than one goroutine reads trace. This is bad.
// But we rather do not crash the program because of tracing,
// because tracing can be enabled at runtime on prod servers.
unlock(&trace.lock)
println("runtime: ReadTrace called from multiple goroutines simultaneously")
return nil, false
}
// Recycle the old buffer.
if buf := trace.reading; buf != nil {
buf.link = trace.empty
trace.empty = buf
trace.reading = nil
}
// Write trace header.
if !trace.headerWritten {
trace.headerWritten = true
unlock(&trace.lock)
return []byte("go 1.26 trace\x00\x00\x00"), false
}
// Read the next buffer.
if trace.readerGen.Load() == 0 {
trace.readerGen.Store(1)
}
var gen uintptr
for {
assertLockHeld(&trace.lock)
gen = trace.readerGen.Load()
// Check to see if we need to block for more data in this generation
// or if we need to move our generation forward.
if !trace.full[gen%2].empty() {
break
}
// Most of the time readerGen is one generation ahead of flushedGen, as the
// current generation is being read from. Then, once the last buffer is flushed
// into readerGen, flushedGen will rise to meet it. At this point, the tracer
// is waiting on the reader to finish flushing the last generation so that it
// can continue to advance.
if trace.flushedGen.Load() == gen {
// Write out the internal in-band end-of-generation signal.
if !trace.endOfGenerationWritten {
trace.endOfGenerationWritten = true
unlock(&trace.lock)
return []byte{byte(tracev2.EvEndOfGeneration)}, false
}
// Reset the flag.
trace.endOfGenerationWritten = false
// Handle shutdown.
if trace.shutdown.Load() {
unlock(&trace.lock)
// Wake up anyone waiting for us to be done with this generation.
//
// Do this after reading trace.shutdown, because the thread we're
// waking up is going to clear trace.shutdown.
if raceenabled {
// Model synchronization on trace.doneSema, which te race
// detector does not see. This is required to avoid false
// race reports on writer passed to trace.Start.
racerelease(unsafe.Pointer(&trace.doneSema[gen%2]))
}
semrelease(&trace.doneSema[gen%2])
// We're shutting down, and the last generation is fully
// read. We're done.
return nil, false
}
// Handle advancing to the next generation.
// The previous gen has had all of its buffers flushed, and
// there's nothing else for us to read. Advance the generation
// we're reading from and try again.
trace.readerGen.Store(trace.gen.Load())
unlock(&trace.lock)
// Wake up anyone waiting for us to be done with this generation.
//
// Do this after reading gen to make sure we can't have the trace
// advance until we've read it.
if raceenabled {
// See comment above in the shutdown case.
racerelease(unsafe.Pointer(&trace.doneSema[gen%2]))
}
semrelease(&trace.doneSema[gen%2])
// Reacquire the lock and go back to the top of the loop.
lock(&trace.lock)
continue
}
// Wait for new data.
//
// We don't simply use a note because the scheduler
// executes this goroutine directly when it wakes up
// (also a note would consume an M).
//
// Before we drop the lock, clear the workAvailable flag. Work can
// only be queued with trace.lock held, so this is at least true until
// we drop the lock.
trace.workAvailable.Store(false)
unlock(&trace.lock)
return nil, true
}
// Pull a buffer.
tbuf := trace.full[gen%2].pop()
trace.reading = tbuf
unlock(&trace.lock)
return tbuf.arr[:tbuf.pos], false
}
// traceReader returns the trace reader that should be woken up, if any.
// Callers should first check (traceEnabled() || traceShuttingDown()).
//
// This must run on the system stack because it acquires trace.lock.
//
//go:systemstack
func traceReader() *g {
gp := traceReaderAvailable()
if gp == nil || !trace.reader.CompareAndSwapNoWB(gp, nil) {
return nil
}
return gp
}
// traceReaderAvailable returns the trace reader if it is not currently
// scheduled and should be. Callers should first check that
// (traceEnabled() || traceShuttingDown()) is true.
func traceReaderAvailable() *g {
// There are two conditions under which we definitely want to schedule
// the reader:
// - The reader is lagging behind in finishing off the last generation.
// In this case, trace buffers could even be empty, but the trace
// advancer will be waiting on the reader, so we have to make sure
// to schedule the reader ASAP.
// - The reader has pending work to process for it's reader generation
// (assuming readerGen is not lagging behind). Note that we also want
// to be careful *not* to schedule the reader if there's no work to do.
//
// We also want to be careful not to schedule the reader if there's no
// reason to.
if trace.flushedGen.Load() == trace.readerGen.Load() || trace.workAvailable.Load() {
return trace.reader.Load()
}
return nil
}
// Trace advancer goroutine.
var traceAdvancer traceAdvancerState
type traceAdvancerState struct {
timer *wakeableSleep
done chan struct{}
}
// start starts a new traceAdvancer.
func (s *traceAdvancerState) start() {
// Start a goroutine to periodically advance the trace generation.
s.done = make(chan struct{})
s.timer = newWakeableSleep()
go func() {
for traceEnabled() {
// Set a timer to wake us up
s.timer.sleep(int64(debug.traceadvanceperiod))
// Try to advance the trace.
traceAdvance(false)
}
s.done <- struct{}{}
}()
}
// stop stops a traceAdvancer and blocks until it exits.
func (s *traceAdvancerState) stop() {
s.timer.wake()
<-s.done
close(s.done)
s.timer.close()
}
// traceAdvancePeriod is the approximate period between
// new generations.
const defaultTraceAdvancePeriod = 1e9 // 1 second.
// wakeableSleep manages a wakeable goroutine sleep.
//
// Users of this type must call init before first use and
// close to free up resources. Once close is called, init
// must be called before another use.
type wakeableSleep struct {
timer *timer
// lock protects access to wakeup, but not send/recv on it.
lock mutex
wakeup chan struct{}
}
// newWakeableSleep initializes a new wakeableSleep and returns it.
func newWakeableSleep() *wakeableSleep {
s := new(wakeableSleep)
lockInit(&s.lock, lockRankWakeableSleep)
s.wakeup = make(chan struct{}, 1)
s.timer = new(timer)
f := func(s any, _ uintptr, _ int64) {
s.(*wakeableSleep).wake()
}
s.timer.init(f, s)
return s
}
// sleep sleeps for the provided duration in nanoseconds or until
// another goroutine calls wake.
//
// Must not be called by more than one goroutine at a time and
// must not be called concurrently with close.
func (s *wakeableSleep) sleep(ns int64) {
s.timer.reset(nanotime()+ns, 0)
lock(&s.lock)
if raceenabled {
raceacquire(unsafe.Pointer(&s.lock))
}
wakeup := s.wakeup
if raceenabled {
racerelease(unsafe.Pointer(&s.lock))
}
unlock(&s.lock)
<-wakeup
s.timer.stop()
}
// wake awakens any goroutine sleeping on the timer.
//
// Safe for concurrent use with all other methods.
func (s *wakeableSleep) wake() {
// Grab the wakeup channel, which may be nil if we're
// racing with close.
lock(&s.lock)
if raceenabled {
raceacquire(unsafe.Pointer(&s.lock))
}
if s.wakeup != nil {
// Non-blocking send.
//
// Others may also write to this channel and we don't
// want to block on the receiver waking up. This also
// effectively batches together wakeup notifications.
select {
case s.wakeup <- struct{}{}:
default:
}
}
if raceenabled {
racerelease(unsafe.Pointer(&s.lock))
}
unlock(&s.lock)
}
// close wakes any goroutine sleeping on the timer and prevents
// further sleeping on it.
//
// Once close is called, the wakeableSleep must no longer be used.
//
// It must only be called once no goroutine is sleeping on the
// timer *and* nothing else will call wake concurrently.
func (s *wakeableSleep) close() {
// Set wakeup to nil so that a late timer ends up being a no-op.
lock(&s.lock)
if raceenabled {
raceacquire(unsafe.Pointer(&s.lock))
}
wakeup := s.wakeup
s.wakeup = nil
// Close the channel.
close(wakeup)
if raceenabled {
racerelease(unsafe.Pointer(&s.lock))
}
unlock(&s.lock)
return
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"context"
"fmt"
"sync/atomic"
_ "unsafe"
)
type traceContextKey struct{}
// NewTask creates a task instance with the type taskType and returns
// it along with a Context that carries the task.
// If the input context contains a task, the new task is its subtask.
//
// The taskType is used to classify task instances. Analysis tools
// like the Go execution tracer may assume there are only a bounded
// number of unique task types in the system.
//
// The returned Task's [Task.End] method is used to mark the task's end.
// The trace tool measures task latency as the time between task creation
// and when the End method is called, and provides the latency
// distribution per task type.
// If the End method is called multiple times, only the first
// call is used in the latency measurement.
//
// ctx, task := trace.NewTask(ctx, "awesomeTask")
// trace.WithRegion(ctx, "preparation", prepWork)
// // preparation of the task
// go func() { // continue processing the task in a separate goroutine.
// defer task.End()
// trace.WithRegion(ctx, "remainingWork", remainingWork)
// }()
func NewTask(pctx context.Context, taskType string) (ctx context.Context, task *Task) {
pid := fromContext(pctx).id
id := newID()
userTaskCreate(id, pid, taskType)
s := &Task{id: id}
return context.WithValue(pctx, traceContextKey{}, s), s
// We allocate a new task even when
// the tracing is disabled because the context and task
// can be used across trace enable/disable boundaries,
// which complicates the problem.
//
// For example, consider the following scenario:
// - trace is enabled.
// - trace.WithRegion is called, so a new context ctx
// with a new region is created.
// - trace is disabled.
// - trace is enabled again.
// - trace APIs with the ctx is called. Is the ID in the task
// a valid one to use?
//
// TODO(hyangah): reduce the overhead at least when
// tracing is disabled. Maybe the id can embed a tracing
// round number and ignore ids generated from previous
// tracing round.
}
func fromContext(ctx context.Context) *Task {
if s, ok := ctx.Value(traceContextKey{}).(*Task); ok {
return s
}
return &bgTask
}
// Task is a data type for tracing a user-defined, logical operation.
type Task struct {
id uint64
// TODO(hyangah): record parent id?
}
// End marks the end of the operation represented by the [Task].
func (t *Task) End() {
userTaskEnd(t.id)
}
var lastTaskID uint64 = 0 // task id issued last time
func newID() uint64 {
// TODO(hyangah): use per-P cache
return atomic.AddUint64(&lastTaskID, 1)
}
var bgTask = Task{id: uint64(0)}
// Log emits a one-off event with the given category and message.
// Category can be empty and the API assumes there are only a handful of
// unique categories in the system.
func Log(ctx context.Context, category, message string) {
id := fromContext(ctx).id
userLog(id, category, message)
}
// Logf is like [Log], but the value is formatted using the specified format spec.
func Logf(ctx context.Context, category, format string, args ...any) {
if IsEnabled() {
// Ideally this should be just Log, but that will
// add one more frame in the stack trace.
id := fromContext(ctx).id
userLog(id, category, fmt.Sprintf(format, args...))
}
}
const (
regionStartCode = uint64(0)
regionEndCode = uint64(1)
)
// WithRegion starts a region associated with its calling goroutine, runs fn,
// and then ends the region. If the context carries a task, the region is
// associated with the task. Otherwise, the region is attached to the background
// task.
//
// The regionType is used to classify regions, so there should be only a
// handful of unique region types.
func WithRegion(ctx context.Context, regionType string, fn func()) {
// NOTE:
// WithRegion helps avoiding misuse of the API but in practice,
// this is very restrictive:
// - Use of WithRegion makes the stack traces captured from
// region start and end are identical.
// - Refactoring the existing code to use WithRegion is sometimes
// hard and makes the code less readable.
// e.g. code block nested deep in the loop with various
// exit point with return values
// - Refactoring the code to use this API with closure can
// cause different GC behavior such as retaining some parameters
// longer.
// This causes more churns in code than I hoped, and sometimes
// makes the code less readable.
id := fromContext(ctx).id
userRegion(id, regionStartCode, regionType)
defer userRegion(id, regionEndCode, regionType)
fn()
}
// StartRegion starts a region and returns it.
// The returned Region's [Region.End] method must be called
// from the same goroutine where the region was started.
// Within each goroutine, regions must nest. That is, regions started
// after this region must be ended before this region can be ended.
// Recommended usage is
//
// defer trace.StartRegion(ctx, "myTracedRegion").End()
func StartRegion(ctx context.Context, regionType string) *Region {
if !IsEnabled() {
return noopRegion
}
id := fromContext(ctx).id
userRegion(id, regionStartCode, regionType)
return &Region{id, regionType}
}
// Region is a region of code whose execution time interval is traced.
type Region struct {
id uint64
regionType string
}
var noopRegion = &Region{}
// End marks the end of the traced code region.
func (r *Region) End() {
if r == noopRegion {
return
}
userRegion(r.id, regionEndCode, r.regionType)
}
// IsEnabled reports whether tracing is enabled.
// The information is advisory only. The tracing status
// may have changed by the time this function returns.
func IsEnabled() bool {
return tracing.enabled.Load()
}
//
// Function bodies are defined in runtime/trace.go
//
// emits UserTaskCreate event.
func userTaskCreate(id, parentID uint64, taskType string)
// emits UserTaskEnd event.
func userTaskEnd(id uint64)
// emits UserRegion event.
func userRegion(id, mode uint64, regionType string)
// emits UserLog event.
func userLog(id uint64, category, message string)
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"fmt"
"internal/trace/tracev2"
)
// timestamp is an unprocessed timestamp.
type timestamp uint64
type batch struct {
time timestamp
gen uint64
data []byte
}
// readBatch copies b and parses the trace batch header inside.
// Returns the batch, bytes read, and an error.
func readBatch(b []byte) (batch, uint64, error) {
if len(b) == 0 {
return batch{}, 0, fmt.Errorf("batch is empty")
}
data := make([]byte, len(b))
copy(data, b)
// Read batch header byte.
if typ := tracev2.EventType(b[0]); typ == tracev2.EvEndOfGeneration {
if len(b) != 1 {
return batch{}, 1, fmt.Errorf("unexpected end of generation in batch of size >1")
}
return batch{data: data}, 1, nil
}
if typ := tracev2.EventType(b[0]); typ != tracev2.EvEventBatch && typ != tracev2.EvExperimentalBatch {
return batch{}, 1, fmt.Errorf("expected batch event, got event %d", typ)
}
total := 1
b = b[1:]
// Read the generation
gen, n, err := readUvarint(b)
if err != nil {
return batch{}, uint64(total + n), fmt.Errorf("error reading batch gen: %w", err)
}
total += n
b = b[n:]
// Read the M (discard it).
_, n, err = readUvarint(b)
if err != nil {
return batch{}, uint64(total + n), fmt.Errorf("error reading batch M ID: %w", err)
}
total += n
b = b[n:]
// Read the timestamp.
ts, n, err := readUvarint(b)
if err != nil {
return batch{}, uint64(total + n), fmt.Errorf("error reading batch timestamp: %w", err)
}
total += n
b = b[n:]
// Read the size of the batch to follow.
size, n, err := readUvarint(b)
if err != nil {
return batch{}, uint64(total + n), fmt.Errorf("error reading batch size: %w", err)
}
if size > tracev2.MaxBatchSize {
return batch{}, uint64(total + n), fmt.Errorf("invalid batch size %d, maximum is %d", size, tracev2.MaxBatchSize)
}
total += n
total += int(size)
if total != len(data) {
return batch{}, uint64(total), fmt.Errorf("expected complete batch")
}
data = data[:total]
// Return the batch.
return batch{
gen: gen,
time: timestamp(ts),
data: data,
}, uint64(total), nil
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"errors"
)
// maxVarintLenN is the maximum length of a varint-encoded N-bit integer.
const maxVarintLen64 = 10
var (
errOverflow = errors.New("binary: varint overflows a 64-bit integer")
errEOB = errors.New("binary: end of buffer")
)
// TODO deduplicate this function.
func readUvarint(b []byte) (uint64, int, error) {
var x uint64
var s uint
var byt byte
for i := 0; i < maxVarintLen64 && i < len(b); i++ {
byt = b[i]
if byt < 0x80 {
if i == maxVarintLen64-1 && byt > 1 {
return x, i, errOverflow
}
return x | uint64(byt)<<s, i + 1, nil
}
x |= uint64(byt&0x7f) << s
s += 7
}
return x, len(b), errOverflow
}
// putUvarint encodes a uint64 into buf and returns the number of bytes written.
// If the buffer is too small, PutUvarint will panic.
// TODO deduplicate this function.
func putUvarint(buf []byte, x uint64) int {
i := 0
for x >= 0x80 {
buf[i] = byte(x) | 0x80
x >>= 7
i++
}
buf[i] = byte(x)
return i + 1
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"fmt"
"io"
"sync"
"time"
_ "unsafe" // added for go linkname usage
)
// FlightRecorder represents a single consumer of a Go execution
// trace.
// It tracks a moving window over the execution trace produced by
// the runtime, always containing the most recent trace data.
//
// At most one flight recorder may be active at any given time,
// though flight recording is allowed to be concurrently active
// with a trace consumer using trace.Start.
// This restriction of only a single flight recorder may be removed
// in the future.
type FlightRecorder struct {
err error
// State specific to the recorder.
header [16]byte
active rawGeneration
ringMu sync.Mutex
ring []rawGeneration
freq frequency // timestamp conversion factor, from the runtime
// Externally-set options.
targetSize uint64
targetPeriod time.Duration
enabled bool // whether the flight recorder is enabled.
writing sync.Mutex // protects concurrent calls to WriteTo
// The values of targetSize and targetPeriod we've committed to since the last Start.
wantSize uint64
wantDur time.Duration
}
// NewFlightRecorder creates a new flight recorder from the provided configuration.
func NewFlightRecorder(cfg FlightRecorderConfig) *FlightRecorder {
fr := new(FlightRecorder)
if cfg.MaxBytes != 0 {
fr.targetSize = cfg.MaxBytes
} else {
fr.targetSize = 10 << 20 // 10 MiB.
}
if cfg.MinAge != 0 {
fr.targetPeriod = cfg.MinAge
} else {
fr.targetPeriod = 10 * time.Second
}
return fr
}
// Start activates the flight recorder and begins recording trace data.
// Only one call to trace.Start may be active at any given time.
// In addition, currently only one flight recorder may be active in the program.
// Returns an error if the flight recorder cannot be started or is already started.
func (fr *FlightRecorder) Start() error {
if fr.enabled {
return fmt.Errorf("cannot enable a enabled flight recorder")
}
fr.wantSize = fr.targetSize
fr.wantDur = fr.targetPeriod
fr.err = nil
fr.freq = frequency(1.0 / (float64(runtime_traceClockUnitsPerSecond()) / 1e9))
// Start tracing, data is sent to a recorder which forwards it to our own
// storage.
if err := tracing.subscribeFlightRecorder(&recorder{r: fr}); err != nil {
return err
}
fr.enabled = true
return nil
}
// Stop ends recording of trace data. It blocks until any concurrent WriteTo calls complete.
func (fr *FlightRecorder) Stop() {
if !fr.enabled {
return
}
fr.enabled = false
tracing.unsubscribeFlightRecorder()
// Reset all state. No need to lock because the reader has already exited.
fr.active = rawGeneration{}
fr.ring = nil
}
// Enabled returns true if the flight recorder is active.
// Specifically, it will return true if Start did not return an error, and Stop has not yet been called.
// It is safe to call from multiple goroutines simultaneously.
func (fr *FlightRecorder) Enabled() bool { return fr.enabled }
// WriteTo snapshots the moving window tracked by the flight recorder.
// The snapshot is expected to contain data that is up-to-date as of when WriteTo is called,
// though this is not a hard guarantee.
// Only one goroutine may execute WriteTo at a time.
// An error is returned upon failure to write to w, if another WriteTo call is already in-progress,
// or if the flight recorder is inactive.
func (fr *FlightRecorder) WriteTo(w io.Writer) (n int64, err error) {
if !fr.enabled {
return 0, fmt.Errorf("cannot snapshot a disabled flight recorder")
}
if !fr.writing.TryLock() {
// Indicates that a call to WriteTo was made while one was already in progress.
// If the caller of WriteTo sees this error, they should use the result from the other call to WriteTo.
return 0, fmt.Errorf("call to WriteTo for trace.FlightRecorder already in progress")
}
defer fr.writing.Unlock()
// Force a global buffer flush.
runtime_traceAdvance(false)
// Now that everything has been flushed and written, grab whatever we have.
//
// N.B. traceAdvance blocks until the tracer goroutine has actually written everything
// out, which means the generation we just flushed must have been already been observed
// by the recorder goroutine. Because we flushed twice, the first flush is guaranteed to
// have been both completed *and* processed by the recorder goroutine.
fr.ringMu.Lock()
gens := fr.ring
fr.ringMu.Unlock()
// Write the header.
nw, err := w.Write(fr.header[:])
if err != nil {
return int64(nw), err
}
n += int64(nw)
// Write all the data.
for _, gen := range gens {
for _, data := range gen.batches {
// Write batch data.
nw, err = w.Write(data)
n += int64(nw)
if err != nil {
return n, err
}
}
}
return n, nil
}
type FlightRecorderConfig struct {
// MinAge is a lower bound on the age of an event in the flight recorder's window.
//
// The flight recorder will strive to promptly discard events older than the minimum age,
// but older events may appear in the window snapshot. The age setting will always be
// overridden by MaxBytes.
//
// If this is 0, the minimum age is implementation defined, but can be assumed to be on the order
// of seconds.
MinAge time.Duration
// MaxBytes is an upper bound on the size of the window in bytes.
//
// This setting takes precedence over MinAge.
// However, it does not make any guarantees on the size of the data WriteTo will write,
// nor does it guarantee memory overheads will always stay below MaxBytes. Treat it
// as a hint.
//
// If this is 0, the maximum size is implementation defined.
MaxBytes uint64
}
//go:linkname runtime_traceClockUnitsPerSecond
func runtime_traceClockUnitsPerSecond() uint64
//go:linkname runtime_traceAdvance runtime.traceAdvance
func runtime_traceAdvance(stopTrace bool)
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"fmt"
"slices"
"time"
_ "unsafe" // added for go linkname usage
)
// A recorder receives bytes from the runtime tracer, processes it.
type recorder struct {
r *FlightRecorder
headerReceived bool
}
func (w *recorder) Write(b []byte) (n int, err error) {
r := w.r
defer func() {
if err != nil {
// Propagate errors to the flightrecorder.
if r.err == nil {
r.err = err
}
}
}()
if !w.headerReceived {
if len(b) < len(r.header) {
return 0, fmt.Errorf("expected at least %d bytes in the first write", len(r.header))
}
r.header = ([16]byte)(b[:16])
n += 16
w.headerReceived = true
}
if len(b) == n {
return n, nil
}
ba, nb, err := readBatch(b[n:]) // Every write from the runtime is guaranteed to be a complete batch.
if err != nil {
return len(b) - int(nb) - n, err
}
n += int(nb)
// Append the batch to the current generation.
if ba.gen != 0 && r.active.gen == 0 {
r.active.gen = ba.gen
}
if ba.time != 0 && (r.active.minTime == 0 || r.active.minTime > r.freq.mul(ba.time)) {
r.active.minTime = r.freq.mul(ba.time)
}
r.active.size += len(ba.data)
r.active.batches = append(r.active.batches, ba.data)
return len(b), nil
}
func (w *recorder) endGeneration() {
r := w.r
// Check if we're entering a new generation.
r.ringMu.Lock()
// Get the current trace clock time.
now := traceTimeNow(r.freq)
// Add the current generation to the ring. Make sure we always have at least one
// complete generation by putting the active generation onto the new list, regardless
// of whatever our settings are.
//
// N.B. Let's completely replace the ring here, so that WriteTo can just make a copy
// and not worry about aliasing. This creates allocations, but at a very low rate.
newRing := []rawGeneration{r.active}
size := r.active.size
for i := len(r.ring) - 1; i >= 0; i-- {
// Stop adding older generations if the new ring already exceeds the thresholds.
// This ensures we keep generations that cross a threshold, but not any that lie
// entirely outside it.
if uint64(size) > r.wantSize || now.Sub(newRing[len(newRing)-1].minTime) > r.wantDur {
break
}
size += r.ring[i].size
newRing = append(newRing, r.ring[i])
}
slices.Reverse(newRing)
r.ring = newRing
r.ringMu.Unlock()
// Start a new active generation.
r.active = rawGeneration{}
}
type rawGeneration struct {
gen uint64
size int
minTime eventTime
batches [][]byte
}
func traceTimeNow(freq frequency) eventTime {
return freq.mul(timestamp(runtime_traceClockNow()))
}
//go:linkname runtime_traceClockNow runtime.traceClockNow
func runtime_traceClockNow() uint64
// frequency is nanoseconds per timestamp unit.
type frequency float64
// mul multiplies an unprocessed timestamp to produce a time in nanoseconds.
func (f frequency) mul(t timestamp) eventTime {
return eventTime(float64(t) * float64(f))
}
// eventTime is a timestamp in nanoseconds.
//
// It corresponds to the monotonic clock on the platform that the
// trace was taken, and so is possible to correlate with timestamps
// for other traces taken on the same machine using the same clock
// (i.e. no reboots in between).
//
// The actual absolute value of the timestamp is only meaningful in
// relation to other timestamps from the same clock.
//
// BUG: Timestamps coming from traces on Windows platforms are
// only comparable with timestamps from the same trace. Timestamps
// across traces cannot be compared, because the system clock is
// not used as of Go 1.22.
//
// BUG: Traces produced by Go versions 1.21 and earlier cannot be
// compared with timestamps from other traces taken on the same
// machine. This is because the system clock was not used at all
// to collect those timestamps.
type eventTime int64
// Sub subtracts t0 from t, returning the duration in nanoseconds.
func (t eventTime) Sub(t0 eventTime) time.Duration {
return time.Duration(int64(t) - int64(t0))
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"fmt"
"internal/trace/tracev2"
"io"
"runtime"
"sync"
"sync/atomic"
_ "unsafe"
)
var tracing traceMultiplexer
type traceMultiplexer struct {
sync.Mutex
enabled atomic.Bool
subscribers int
subscribersMu sync.Mutex
traceStartWriter io.Writer
flightRecorder *recorder
}
func (t *traceMultiplexer) subscribeFlightRecorder(r *recorder) error {
t.Lock()
defer t.Unlock()
t.subscribersMu.Lock()
if t.flightRecorder != nil {
t.subscribersMu.Unlock()
return fmt.Errorf("flight recorder already enabled")
}
t.flightRecorder = r
t.subscribersMu.Unlock()
if err := t.addedSubscriber(); err != nil {
t.subscribersMu.Lock()
t.flightRecorder = nil
t.subscribersMu.Unlock()
return err
}
return nil
}
func (t *traceMultiplexer) unsubscribeFlightRecorder() error {
t.Lock()
defer t.Unlock()
t.removingSubscriber()
t.subscribersMu.Lock()
if t.flightRecorder == nil {
t.subscribersMu.Unlock()
return fmt.Errorf("attempt to unsubscribe missing flight recorder")
}
t.flightRecorder = nil
t.subscribersMu.Unlock()
t.removedSubscriber()
return nil
}
func (t *traceMultiplexer) subscribeTraceStartWriter(w io.Writer) error {
t.Lock()
defer t.Unlock()
t.subscribersMu.Lock()
if t.traceStartWriter != nil {
t.subscribersMu.Unlock()
return fmt.Errorf("execution tracer already enabled")
}
t.traceStartWriter = w
t.subscribersMu.Unlock()
if err := t.addedSubscriber(); err != nil {
t.subscribersMu.Lock()
t.traceStartWriter = nil
t.subscribersMu.Unlock()
return err
}
return nil
}
func (t *traceMultiplexer) unsubscribeTraceStartWriter() {
t.Lock()
defer t.Unlock()
t.removingSubscriber()
t.subscribersMu.Lock()
if t.traceStartWriter == nil {
t.subscribersMu.Unlock()
return
}
t.traceStartWriter = nil
t.subscribersMu.Unlock()
t.removedSubscriber()
return
}
func (t *traceMultiplexer) addedSubscriber() error {
if t.enabled.Load() {
// This is necessary for the trace reader goroutine to pick up on the new subscriber.
runtime_traceAdvance(false)
} else {
if err := t.startLocked(); err != nil {
return err
}
}
t.subscribers++
return nil
}
func (t *traceMultiplexer) removingSubscriber() {
if t.subscribers == 0 {
return
}
t.subscribers--
if t.subscribers == 0 {
runtime.StopTrace()
t.enabled.Store(false)
} else {
// This is necessary to avoid missing trace data when the system is under high load.
runtime_traceAdvance(false)
}
}
func (t *traceMultiplexer) removedSubscriber() {
if t.subscribers > 0 {
// This is necessary for the trace reader goroutine to pick up on the new subscriber.
runtime_traceAdvance(false)
}
}
func (t *traceMultiplexer) startLocked() error {
if err := runtime.StartTrace(); err != nil {
return err
}
// Grab the trace reader goroutine's subscribers.
//
// We only update our subscribers if we see an end-of-generation
// signal from the runtime after this, so any new subscriptions
// or unsubscriptions must call traceAdvance to ensure the reader
// goroutine sees an end-of-generation signal.
t.subscribersMu.Lock()
flightRecorder := t.flightRecorder
traceStartWriter := t.traceStartWriter
t.subscribersMu.Unlock()
go func() {
header := runtime.ReadTrace()
if traceStartWriter != nil {
traceStartWriter.Write(header)
}
if flightRecorder != nil {
flightRecorder.Write(header)
}
for {
data := runtime.ReadTrace()
if data == nil {
break
}
if traceStartWriter != nil {
traceStartWriter.Write(data)
}
if flightRecorder != nil {
flightRecorder.Write(data)
}
if len(data) == 1 && tracev2.EventType(data[0]) == tracev2.EvEndOfGeneration {
if flightRecorder != nil {
flightRecorder.endGeneration()
}
// Pick up any changes.
t.subscribersMu.Lock()
frIsNew := flightRecorder != t.flightRecorder && t.flightRecorder != nil
trIsNew := traceStartWriter != t.traceStartWriter && t.traceStartWriter != nil
flightRecorder = t.flightRecorder
traceStartWriter = t.traceStartWriter
t.subscribersMu.Unlock()
if trIsNew {
traceStartWriter.Write(header)
}
if frIsNew {
flightRecorder.Write(header)
}
}
}
}()
t.enabled.Store(true)
return nil
}
//go:linkname runtime_readTrace
func runtime_readTrace() (buf []byte)
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package trace contains facilities for programs to generate traces
// for the Go execution tracer.
//
// # Tracing runtime activities
//
// The execution trace captures a wide range of execution events such as
// goroutine creation/blocking/unblocking, syscall enter/exit/block,
// GC-related events, changes of heap size, processor start/stop, etc.
// When CPU profiling is active, the execution tracer makes an effort to
// include those samples as well.
// A precise nanosecond-precision timestamp and a stack trace is
// captured for most events. The generated trace can be interpreted
// using `go tool trace`.
//
// Support for tracing tests and benchmarks built with the standard
// testing package is built into `go test`. For example, the following
// command runs the test in the current directory and writes the trace
// file (trace.out).
//
// go test -trace=trace.out
//
// This runtime/trace package provides APIs to add equivalent tracing
// support to a standalone program. See the Example that demonstrates
// how to use this API to enable tracing.
//
// There is also a standard HTTP interface to trace data. Adding the
// following line will install a handler under the /debug/pprof/trace URL
// to download a live trace:
//
// import _ "net/http/pprof"
//
// See the [net/http/pprof] package for more details about all of the
// debug endpoints installed by this import.
//
// # User annotation
//
// Package trace provides user annotation APIs that can be used to
// log interesting events during execution.
//
// There are three types of user annotations: log messages, regions,
// and tasks.
//
// [Log] emits a timestamped message to the execution trace along with
// additional information such as the category of the message and
// which goroutine called [Log]. The execution tracer provides UIs to filter
// and group goroutines using the log category and the message supplied
// in [Log].
//
// A region is for logging a time interval during a goroutine's execution.
// By definition, a region starts and ends in the same goroutine.
// Regions can be nested to represent subintervals.
// For example, the following code records four regions in the execution
// trace to trace the durations of sequential steps in a cappuccino making
// operation.
//
// trace.WithRegion(ctx, "makeCappuccino", func() {
//
// // orderID allows to identify a specific order
// // among many cappuccino order region records.
// trace.Log(ctx, "orderID", orderID)
//
// trace.WithRegion(ctx, "steamMilk", steamMilk)
// trace.WithRegion(ctx, "extractCoffee", extractCoffee)
// trace.WithRegion(ctx, "mixMilkCoffee", mixMilkCoffee)
// })
//
// A task is a higher-level component that aids tracing of logical
// operations such as an RPC request, an HTTP request, or an
// interesting local operation which may require multiple goroutines
// working together. Since tasks can involve multiple goroutines,
// they are tracked via a [context.Context] object. [NewTask] creates
// a new task and embeds it in the returned [context.Context] object.
// Log messages and regions are attached to the task, if any, in the
// Context passed to [Log] and [WithRegion].
//
// For example, assume that we decided to froth milk, extract coffee,
// and mix milk and coffee in separate goroutines. With a task,
// the trace tool can identify the goroutines involved in a specific
// cappuccino order.
//
// ctx, task := trace.NewTask(ctx, "makeCappuccino")
// trace.Log(ctx, "orderID", orderID)
//
// milk := make(chan bool)
// espresso := make(chan bool)
//
// go func() {
// trace.WithRegion(ctx, "steamMilk", steamMilk)
// milk <- true
// }()
// go func() {
// trace.WithRegion(ctx, "extractCoffee", extractCoffee)
// espresso <- true
// }()
// go func() {
// defer task.End() // When assemble is done, the order is complete.
// <-espresso
// <-milk
// trace.WithRegion(ctx, "mixMilkCoffee", mixMilkCoffee)
// }()
//
// The trace tool computes the latency of a task by measuring the
// time between the task creation and the task end and provides
// latency distributions for each task type found in the trace.
package trace
import (
"io"
)
// Start enables tracing for the current program.
// While tracing, the trace will be buffered and written to w.
// Start returns an error if tracing is already enabled.
func Start(w io.Writer) error {
return tracing.subscribeTraceStartWriter(w)
}
// Stop stops the current tracing, if any.
// Stop only returns after all the writes for the trace have completed.
func Stop() {
tracing.unsubscribeTraceStartWriter()
}
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Runtime -> tracer API for memory events.
package runtime
import (
"internal/abi"
"internal/runtime/gc"
"internal/runtime/sys"
"internal/trace/tracev2"
)
// Batch type values for the alloc/free experiment.
const (
traceAllocFreeTypesBatch = iota // Contains types. [{id, address, size, ptrspan, name length, name string} ...]
traceAllocFreeInfoBatch // Contains info for interpreting events. [min heap addr, page size, min heap align, min stack align]
)
// traceSnapshotMemory takes a snapshot of all runtime memory that there are events for
// (heap spans, heap objects, goroutine stacks, etc.) and writes out events for them.
//
// The world must be stopped and tracing must be enabled when this function is called.
func traceSnapshotMemory(gen uintptr) {
assertWorldStopped()
// Write a batch containing information that'll be necessary to
// interpret the events.
var flushed bool
w := unsafeTraceExpWriter(gen, nil, tracev2.AllocFree)
w, flushed = w.ensure(1 + 4*traceBytesPerNumber)
if flushed {
// Annotate the batch as containing additional info.
w.byte(byte(traceAllocFreeInfoBatch))
}
// Emit info.
w.varint(trace.minPageHeapAddr)
w.varint(uint64(pageSize))
w.varint(uint64(gc.MinHeapAlign))
w.varint(uint64(fixedStack))
// Finish writing the batch.
w.flush().end()
// Start tracing.
trace := traceAcquire()
if !trace.ok() {
throw("traceSnapshotMemory: tracing is not enabled")
}
// Write out all the heap spans and heap objects.
for _, s := range mheap_.allspans {
if s.state.get() == mSpanDead {
continue
}
// It's some kind of span, so trace that it exists.
trace.SpanExists(s)
// Write out allocated objects if it's a heap span.
if s.state.get() != mSpanInUse {
continue
}
// Find all allocated objects.
abits := s.allocBitsForIndex(0)
for i := uintptr(0); i < uintptr(s.nelems); i++ {
if abits.index < uintptr(s.freeindex) || abits.isMarked() {
x := s.base() + i*s.elemsize
trace.HeapObjectExists(x, s.typePointersOfUnchecked(x).typ)
}
abits.advance()
}
}
// Write out all the goroutine stacks.
forEachGRace(func(gp *g) {
trace.GoroutineStackExists(gp.stack.lo, gp.stack.hi-gp.stack.lo)
})
traceRelease(trace)
}
func traceSpanTypeAndClass(s *mspan) traceArg {
if s.state.get() == mSpanInUse {
return traceArg(s.spanclass) << 1
}
return traceArg(1)
}
// SpanExists records an event indicating that the span exists.
func (tl traceLocker) SpanExists(s *mspan) {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvSpan, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
}
// SpanAlloc records an event indicating that the span has just been allocated.
func (tl traceLocker) SpanAlloc(s *mspan) {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvSpanAlloc, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
}
// SpanFree records an event indicating that the span is about to be freed.
func (tl traceLocker) SpanFree(s *mspan) {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvSpanFree, traceSpanID(s))
}
// traceSpanID creates a trace ID for the span s for the trace.
func traceSpanID(s *mspan) traceArg {
return traceArg(uint64(s.base())-trace.minPageHeapAddr) / pageSize
}
// HeapObjectExists records that an object already exists at addr with the provided type.
// The type is optional, and the size of the slot occupied the object is inferred from the
// span containing it.
func (tl traceLocker) HeapObjectExists(addr uintptr, typ *abi.Type) {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvHeapObject, traceHeapObjectID(addr), tl.rtype(typ))
}
// HeapObjectAlloc records that an object was newly allocated at addr with the provided type.
// The type is optional, and the size of the slot occupied the object is inferred from the
// span containing it.
func (tl traceLocker) HeapObjectAlloc(addr uintptr, typ *abi.Type) {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvHeapObjectAlloc, traceHeapObjectID(addr), tl.rtype(typ))
}
// HeapObjectFree records that an object at addr is about to be freed.
func (tl traceLocker) HeapObjectFree(addr uintptr) {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvHeapObjectFree, traceHeapObjectID(addr))
}
// traceHeapObjectID creates a trace ID for a heap object at address addr.
func traceHeapObjectID(addr uintptr) traceArg {
return traceArg(uint64(addr)-trace.minPageHeapAddr) / gc.MinHeapAlign
}
// GoroutineStackExists records that a goroutine stack already exists at address base with the provided size.
func (tl traceLocker) GoroutineStackExists(base, size uintptr) {
order := traceCompressStackSize(size)
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoroutineStack, traceGoroutineStackID(base), order)
}
// GoroutineStackAlloc records that a goroutine stack was newly allocated at address base with the provided size..
func (tl traceLocker) GoroutineStackAlloc(base, size uintptr) {
order := traceCompressStackSize(size)
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoroutineStackAlloc, traceGoroutineStackID(base), order)
}
// GoroutineStackFree records that a goroutine stack at address base is about to be freed.
func (tl traceLocker) GoroutineStackFree(base uintptr) {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoroutineStackFree, traceGoroutineStackID(base))
}
// traceGoroutineStackID creates a trace ID for the goroutine stack from its base address.
func traceGoroutineStackID(base uintptr) traceArg {
return traceArg(uint64(base)-trace.minPageHeapAddr) / fixedStack
}
// traceCompressStackSize assumes size is a power of 2 and returns log2(size).
func traceCompressStackSize(size uintptr) traceArg {
if size&(size-1) != 0 {
throw("goroutine stack size is not a power of 2")
}
return traceArg(sys.Len64(uint64(size)))
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"internal/bytealg"
"internal/goarch"
"internal/runtime/pprof/label"
"internal/runtime/sys"
"internal/stringslite"
"unsafe"
)
// The code in this file implements stack trace walking for all architectures.
// The most important fact about a given architecture is whether it uses a link register.
// On systems with link registers, the prologue for a non-leaf function stores the
// incoming value of LR at the bottom of the newly allocated stack frame.
// On systems without link registers (x86), the architecture pushes a return PC during
// the call instruction, so the return PC ends up above the stack frame.
// In this file, the return PC is always called LR, no matter how it was found.
const usesLR = sys.MinFrameSize > 0
const (
// tracebackInnerFrames is the number of innermost frames to print in a
// stack trace. The total maximum frames is tracebackInnerFrames +
// tracebackOuterFrames.
tracebackInnerFrames = 50
// tracebackOuterFrames is the number of outermost frames to print in a
// stack trace.
tracebackOuterFrames = 50
)
// unwindFlags control the behavior of various unwinders.
type unwindFlags uint8
const (
// unwindPrintErrors indicates that if unwinding encounters an error, it
// should print a message and stop without throwing. This is used for things
// like stack printing, where it's better to get incomplete information than
// to crash. This is also used in situations where everything may not be
// stopped nicely and the stack walk may not be able to complete, such as
// during profiling signals or during a crash.
//
// If neither unwindPrintErrors or unwindSilentErrors are set, unwinding
// performs extra consistency checks and throws on any error.
//
// Note that there are a small number of fatal situations that will throw
// regardless of unwindPrintErrors or unwindSilentErrors.
unwindPrintErrors unwindFlags = 1 << iota
// unwindSilentErrors silently ignores errors during unwinding.
unwindSilentErrors
// unwindTrap indicates that the initial PC and SP are from a trap, not a
// return PC from a call.
//
// The unwindTrap flag is updated during unwinding. If set, frame.pc is the
// address of a faulting instruction instead of the return address of a
// call. It also means the liveness at pc may not be known.
//
// TODO: Distinguish frame.continpc, which is really the stack map PC, from
// the actual continuation PC, which is computed differently depending on
// this flag and a few other things.
unwindTrap
// unwindJumpStack indicates that, if the traceback is on a system stack, it
// should resume tracing at the user stack when the system stack is
// exhausted.
unwindJumpStack
)
// An unwinder iterates the physical stack frames of a Go sack.
//
// Typical use of an unwinder looks like:
//
// var u unwinder
// for u.init(gp, 0); u.valid(); u.next() {
// // ... use frame info in u ...
// }
//
// Implementation note: This is carefully structured to be pointer-free because
// tracebacks happen in places that disallow write barriers (e.g., signals).
// Even if this is stack-allocated, its pointer-receiver methods don't know that
// their receiver is on the stack, so they still emit write barriers. Here we
// address that by carefully avoiding any pointers in this type. Another
// approach would be to split this into a mutable part that's passed by pointer
// but contains no pointers itself and an immutable part that's passed and
// returned by value and can contain pointers. We could potentially hide that
// we're doing that in trivial methods that are inlined into the caller that has
// the stack allocation, but that's fragile.
type unwinder struct {
// frame is the current physical stack frame, or all 0s if
// there is no frame.
frame stkframe
// g is the G who's stack is being unwound. If the
// unwindJumpStack flag is set and the unwinder jumps stacks,
// this will be different from the initial G.
g guintptr
// cgoCtxt is the index into g.cgoCtxt of the next frame on the cgo stack.
// The cgo stack is unwound in tandem with the Go stack as we find marker frames.
cgoCtxt int
// calleeFuncID is the function ID of the caller of the current
// frame.
calleeFuncID abi.FuncID
// flags are the flags to this unwind. Some of these are updated as we
// unwind (see the flags documentation).
flags unwindFlags
}
// init initializes u to start unwinding gp's stack and positions the
// iterator on gp's innermost frame. gp must not be the current G.
//
// A single unwinder can be reused for multiple unwinds.
func (u *unwinder) init(gp *g, flags unwindFlags) {
// Implementation note: This starts the iterator on the first frame and we
// provide a "valid" method. Alternatively, this could start in a "before
// the first frame" state and "next" could return whether it was able to
// move to the next frame, but that's both more awkward to use in a "for"
// loop and is harder to implement because we have to do things differently
// for the first frame.
u.initAt(^uintptr(0), ^uintptr(0), ^uintptr(0), gp, flags)
}
func (u *unwinder) initAt(pc0, sp0, lr0 uintptr, gp *g, flags unwindFlags) {
// Don't call this "g"; it's too easy get "g" and "gp" confused.
if ourg := getg(); ourg == gp && ourg == ourg.m.curg {
// The starting sp has been passed in as a uintptr, and the caller may
// have other uintptr-typed stack references as well.
// If during one of the calls that got us here or during one of the
// callbacks below the stack must be grown, all these uintptr references
// to the stack will not be updated, and traceback will continue
// to inspect the old stack memory, which may no longer be valid.
// Even if all the variables were updated correctly, it is not clear that
// we want to expose a traceback that begins on one stack and ends
// on another stack. That could confuse callers quite a bit.
// Instead, we require that initAt and any other function that
// accepts an sp for the current goroutine (typically obtained by
// calling GetCallerSP) must not run on that goroutine's stack but
// instead on the g0 stack.
throw("cannot trace user goroutine on its own stack")
}
if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp.
if gp.syscallsp != 0 {
pc0 = gp.syscallpc
sp0 = gp.syscallsp
if usesLR {
lr0 = 0
}
} else {
pc0 = gp.sched.pc
sp0 = gp.sched.sp
if usesLR {
lr0 = gp.sched.lr
}
}
}
var frame stkframe
frame.pc = pc0
frame.sp = sp0
if usesLR {
frame.lr = lr0
}
// If the PC is zero, it's likely a nil function call.
// Start in the caller's frame.
if frame.pc == 0 {
if usesLR {
frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp))
frame.lr = 0
} else {
frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp))
frame.sp += goarch.PtrSize
}
}
// internal/runtime/atomic functions call into kernel helpers on
// arm < 7. See internal/runtime/atomic/sys_linux_arm.s.
//
// Start in the caller's frame.
if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && frame.pc&0xffff0000 == 0xffff0000 {
// Note that the calls are simple BL without pushing the return
// address, so we use LR directly.
//
// The kernel helpers are frameless leaf functions, so SP and
// LR are not touched.
frame.pc = frame.lr
frame.lr = 0
}
f := findfunc(frame.pc)
if !f.valid() {
if flags&unwindSilentErrors == 0 {
print("runtime: g ", gp.goid, " gp=", gp, ": unknown pc ", hex(frame.pc), "\n")
tracebackHexdump(gp.stack, &frame, 0)
}
if flags&(unwindPrintErrors|unwindSilentErrors) == 0 {
throw("unknown pc")
}
*u = unwinder{}
return
}
frame.fn = f
// Populate the unwinder.
*u = unwinder{
frame: frame,
g: gp.guintptr(),
cgoCtxt: len(gp.cgoCtxt) - 1,
calleeFuncID: abi.FuncIDNormal,
flags: flags,
}
isSyscall := frame.pc == pc0 && frame.sp == sp0 && pc0 == gp.syscallpc && sp0 == gp.syscallsp
u.resolveInternal(true, isSyscall)
}
func (u *unwinder) valid() bool {
return u.frame.pc != 0
}
// resolveInternal fills in u.frame based on u.frame.fn, pc, and sp.
//
// innermost indicates that this is the first resolve on this stack. If
// innermost is set, isSyscall indicates that the PC/SP was retrieved from
// gp.syscall*; this is otherwise ignored.
//
// On entry, u.frame contains:
// - fn is the running function.
// - pc is the PC in the running function.
// - sp is the stack pointer at that program counter.
// - For the innermost frame on LR machines, lr is the program counter that called fn.
//
// On return, u.frame contains:
// - fp is the stack pointer of the caller.
// - lr is the program counter that called fn.
// - varp, argp, and continpc are populated for the current frame.
//
// If fn is a stack-jumping function, resolveInternal can change the entire
// frame state to follow that stack jump.
//
// This is internal to unwinder.
func (u *unwinder) resolveInternal(innermost, isSyscall bool) {
frame := &u.frame
gp := u.g.ptr()
f := frame.fn
if f.pcsp == 0 {
// No frame information, must be external function, like race support.
// See golang.org/issue/13568.
u.finishInternal()
return
}
// Compute function info flags.
flag := f.flag
if f.funcID == abi.FuncID_cgocallback {
// cgocallback does write SP to switch from the g0 to the curg stack,
// but it carefully arranges that during the transition BOTH stacks
// have cgocallback frame valid for unwinding through.
// So we don't need to exclude it with the other SP-writing functions.
flag &^= abi.FuncFlagSPWrite
}
if isSyscall {
// Some Syscall functions write to SP, but they do so only after
// saving the entry PC/SP using entersyscall.
// Since we are using the entry PC/SP, the later SP write doesn't matter.
flag &^= abi.FuncFlagSPWrite
}
// Found an actual function.
// Derive frame pointer.
if frame.fp == 0 {
// Jump over system stack transitions. If we're on g0 and there's a user
// goroutine, try to jump. Otherwise this is a regular call.
// We also defensively check that this won't switch M's on us,
// which could happen at critical points in the scheduler.
// This ensures gp.m doesn't change from a stack jump.
if u.flags&unwindJumpStack != 0 && gp == gp.m.g0 && gp.m.curg != nil && gp.m.curg.m == gp.m {
switch f.funcID {
case abi.FuncID_morestack:
// morestack does not return normally -- newstack()
// gogo's to curg.sched. Match that.
// This keeps morestack() from showing up in the backtrace,
// but that makes some sense since it'll never be returned
// to.
gp = gp.m.curg
u.g.set(gp)
frame.pc = gp.sched.pc
frame.fn = findfunc(frame.pc)
f = frame.fn
flag = f.flag
frame.lr = gp.sched.lr
frame.sp = gp.sched.sp
u.cgoCtxt = len(gp.cgoCtxt) - 1
case abi.FuncID_systemstack:
// systemstack returns normally, so just follow the
// stack transition.
if usesLR && funcspdelta(f, frame.pc) == 0 {
// We're at the function prologue and the stack
// switch hasn't happened, or epilogue where we're
// about to return. Just unwind normally.
// Do this only on LR machines because on x86
// systemstack doesn't have an SP delta (the CALL
// instruction opens the frame), therefore no way
// to check.
flag &^= abi.FuncFlagSPWrite
break
}
gp = gp.m.curg
u.g.set(gp)
frame.sp = gp.sched.sp
u.cgoCtxt = len(gp.cgoCtxt) - 1
flag &^= abi.FuncFlagSPWrite
}
}
frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc))
if !usesLR {
// On x86, call instruction pushes return PC before entering new function.
frame.fp += goarch.PtrSize
}
}
// Derive link register.
if flag&abi.FuncFlagTopFrame != 0 {
// This function marks the top of the stack. Stop the traceback.
frame.lr = 0
} else if flag&abi.FuncFlagSPWrite != 0 && (!innermost || u.flags&(unwindPrintErrors|unwindSilentErrors) != 0) {
// The function we are in does a write to SP that we don't know
// how to encode in the spdelta table. Examples include context
// switch routines like runtime.gogo but also any code that switches
// to the g0 stack to run host C code.
// We can't reliably unwind the SP (we might not even be on
// the stack we think we are), so stop the traceback here.
//
// The one exception (encoded in the complex condition above) is that
// we assume if we're doing a precise traceback, and this is the
// innermost frame, that the SPWRITE function voluntarily preempted itself on entry
// during the stack growth check. In that case, the function has
// not yet had a chance to do any writes to SP and is safe to unwind.
// isAsyncSafePoint does not allow assembly functions to be async preempted,
// and preemptPark double-checks that SPWRITE functions are not async preempted.
// So for GC stack traversal, we can safely ignore SPWRITE for the innermost frame,
// but farther up the stack we'd better not find any.
// This is somewhat imprecise because we're just guessing that we're in the stack
// growth check. It would be better if SPWRITE were encoded in the spdelta
// table so we would know for sure that we were still in safe code.
//
// uSE uPE inn | action
// T _ _ | frame.lr = 0
// F T _ | frame.lr = 0
// F F F | print; panic
// F F T | ignore SPWrite
if u.flags&(unwindPrintErrors|unwindSilentErrors) == 0 && !innermost {
println("traceback: unexpected SPWRITE function", funcname(f))
throw("traceback")
}
frame.lr = 0
} else {
var lrPtr uintptr
if usesLR {
if innermost && frame.sp < frame.fp || frame.lr == 0 {
lrPtr = frame.sp
frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr))
}
} else {
if frame.lr == 0 {
lrPtr = frame.fp - goarch.PtrSize
frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr))
}
}
}
frame.varp = frame.fp
if !usesLR {
// On x86, call instruction pushes return PC before entering new function.
frame.varp -= goarch.PtrSize
}
// For architectures with frame pointers, if there's
// a frame, then there's a saved frame pointer here.
//
// NOTE: This code is not as general as it looks.
// On x86, the ABI is to save the frame pointer word at the
// top of the stack frame, so we have to back down over it.
// On arm64, the frame pointer should be at the bottom of
// the stack (with R29 (aka FP) = RSP), in which case we would
// not want to do the subtraction here. But we started out without
// any frame pointer, and when we wanted to add it, we didn't
// want to break all the assembly doing direct writes to 8(RSP)
// to set the first parameter to a called function.
// So we decided to write the FP link *below* the stack pointer
// (with R29 = RSP - 8 in Go functions).
// This is technically ABI-compatible but not standard.
// And it happens to end up mimicking the x86 layout.
// Other architectures may make different decisions.
if frame.varp > frame.sp && framepointer_enabled {
frame.varp -= goarch.PtrSize
}
frame.argp = frame.fp + sys.MinFrameSize
// Determine frame's 'continuation PC', where it can continue.
// Normally this is the return address on the stack, but if sigpanic
// is immediately below this function on the stack, then the frame
// stopped executing due to a trap, and frame.pc is probably not
// a safe point for looking up liveness information. In this panicking case,
// the function either doesn't return at all (if it has no defers or if the
// defers do not recover) or it returns from one of the calls to
// deferproc a second time (if the corresponding deferred func recovers).
// In the latter case, use a deferreturn call site as the continuation pc.
frame.continpc = frame.pc
if u.calleeFuncID == abi.FuncID_sigpanic {
if frame.fn.deferreturn != 0 {
frame.continpc = frame.fn.entry() + uintptr(frame.fn.deferreturn) + 1
// Note: this may perhaps keep return variables alive longer than
// strictly necessary, as we are using "function has a defer statement"
// as a proxy for "function actually deferred something". It seems
// to be a minor drawback. (We used to actually look through the
// gp._defer for a defer corresponding to this function, but that
// is hard to do with defer records on the stack during a stack copy.)
// Note: the +1 is to offset the -1 that
// (*stkframe).getStackMap does to back up a return
// address make sure the pc is in the CALL instruction.
} else {
frame.continpc = 0
}
}
}
func (u *unwinder) next() {
frame := &u.frame
f := frame.fn
gp := u.g.ptr()
// Do not unwind past the bottom of the stack.
if frame.lr == 0 {
u.finishInternal()
return
}
flr := findfunc(frame.lr)
if !flr.valid() {
// This happens if you get a profiling interrupt at just the wrong time.
// In that context it is okay to stop early.
// But if no error flags are set, we're doing a garbage collection and must
// get everything, so crash loudly.
fail := u.flags&(unwindPrintErrors|unwindSilentErrors) == 0
doPrint := u.flags&unwindSilentErrors == 0
if doPrint && gp.m != nil && gp.m.incgo && f.funcID == abi.FuncID_sigpanic {
// We can inject sigpanic
// calls directly into C code,
// in which case we'll see a C
// return PC. Don't complain.
doPrint = false
}
if fail || doPrint {
print("runtime: g ", gp.goid, ": unexpected return pc for ", funcname(f), " called from ", hex(frame.lr), "\n")
tracebackHexdump(gp.stack, frame, 0)
}
if fail {
throw("unknown caller pc")
}
frame.lr = 0
u.finishInternal()
return
}
if frame.pc == frame.lr && frame.sp == frame.fp {
// If the next frame is identical to the current frame, we cannot make progress.
print("runtime: traceback stuck. pc=", hex(frame.pc), " sp=", hex(frame.sp), "\n")
tracebackHexdump(gp.stack, frame, frame.sp)
throw("traceback stuck")
}
injectedCall := f.funcID == abi.FuncID_sigpanic || f.funcID == abi.FuncID_asyncPreempt || f.funcID == abi.FuncID_debugCallV2
if injectedCall {
u.flags |= unwindTrap
} else {
u.flags &^= unwindTrap
}
// Unwind to next frame.
u.calleeFuncID = f.funcID
frame.fn = flr
frame.pc = frame.lr
frame.lr = 0
frame.sp = frame.fp
frame.fp = 0
// On link register architectures, sighandler saves the LR on stack
// before faking a call.
if usesLR && injectedCall {
x := *(*uintptr)(unsafe.Pointer(frame.sp))
frame.sp += alignUp(sys.MinFrameSize, sys.StackAlign)
f = findfunc(frame.pc)
frame.fn = f
if !f.valid() {
frame.pc = x
} else if funcspdelta(f, frame.pc) == 0 {
frame.lr = x
}
}
u.resolveInternal(false, false)
}
// finishInternal is an unwinder-internal helper called after the stack has been
// exhausted. It sets the unwinder to an invalid state and checks that it
// successfully unwound the entire stack.
func (u *unwinder) finishInternal() {
u.frame.pc = 0
// Note that panic != nil is okay here: there can be leftover panics,
// because the defers on the panic stack do not nest in frame order as
// they do on the defer stack. If you have:
//
// frame 1 defers d1
// frame 2 defers d2
// frame 3 defers d3
// frame 4 panics
// frame 4's panic starts running defers
// frame 5, running d3, defers d4
// frame 5 panics
// frame 5's panic starts running defers
// frame 6, running d4, garbage collects
// frame 6, running d2, garbage collects
//
// During the execution of d4, the panic stack is d4 -> d3, which
// is nested properly, and we'll treat frame 3 as resumable, because we
// can find d3. (And in fact frame 3 is resumable. If d4 recovers
// and frame 5 continues running, d3, d3 can recover and we'll
// resume execution in (returning from) frame 3.)
//
// During the execution of d2, however, the panic stack is d2 -> d3,
// which is inverted. The scan will match d2 to frame 2 but having
// d2 on the stack until then means it will not match d3 to frame 3.
// This is okay: if we're running d2, then all the defers after d2 have
// completed and their corresponding frames are dead. Not finding d3
// for frame 3 means we'll set frame 3's continpc == 0, which is correct
// (frame 3 is dead). At the end of the walk the panic stack can thus
// contain defers (d3 in this case) for dead frames. The inversion here
// always indicates a dead frame, and the effect of the inversion on the
// scan is to hide those dead frames, so the scan is still okay:
// what's left on the panic stack are exactly (and only) the dead frames.
//
// We require callback != nil here because only when callback != nil
// do we know that gentraceback is being called in a "must be correct"
// context as opposed to a "best effort" context. The tracebacks with
// callbacks only happen when everything is stopped nicely.
// At other times, such as when gathering a stack for a profiling signal
// or when printing a traceback during a crash, everything may not be
// stopped nicely, and the stack walk may not be able to complete.
gp := u.g.ptr()
if u.flags&(unwindPrintErrors|unwindSilentErrors) == 0 && u.frame.sp != gp.stktopsp {
print("runtime: g", gp.goid, ": frame.sp=", hex(u.frame.sp), " top=", hex(gp.stktopsp), "\n")
print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "\n")
throw("traceback did not unwind completely")
}
}
// symPC returns the PC that should be used for symbolizing the current frame.
// Specifically, this is the PC of the last instruction executed in this frame.
//
// If this frame did a normal call, then frame.pc is a return PC, so this will
// return frame.pc-1, which points into the CALL instruction. If the frame was
// interrupted by a signal (e.g., profiler, segv, etc) then frame.pc is for the
// trapped instruction, so this returns frame.pc. See issue #34123. Finally,
// frame.pc can be at function entry when the frame is initialized without
// actually running code, like in runtime.mstart, in which case this returns
// frame.pc because that's the best we can do.
func (u *unwinder) symPC() uintptr {
if u.flags&unwindTrap == 0 && u.frame.pc > u.frame.fn.entry() {
// Regular call.
return u.frame.pc - 1
}
// Trapping instruction or we're at the function entry point.
return u.frame.pc
}
// cgoCallers populates pcBuf with the cgo callers of the current frame using
// the registered cgo unwinder. It returns the number of PCs written to pcBuf.
// If the current frame is not a cgo frame or if there's no registered cgo
// unwinder, it returns 0.
func (u *unwinder) cgoCallers(pcBuf []uintptr) int {
if !cgoTracebackAvailable() || u.frame.fn.funcID != abi.FuncID_cgocallback || u.cgoCtxt < 0 {
// We don't have a cgo unwinder (typical case), or we do but we're not
// in a cgo frame or we're out of cgo context.
return 0
}
ctxt := u.g.ptr().cgoCtxt[u.cgoCtxt]
u.cgoCtxt--
cgoContextPCs(ctxt, pcBuf)
for i, pc := range pcBuf {
if pc == 0 {
return i
}
}
return len(pcBuf)
}
// tracebackPCs populates pcBuf with the return addresses for each frame from u
// and returns the number of PCs written to pcBuf. The returned PCs correspond
// to "logical frames" rather than "physical frames"; that is if A is inlined
// into B, this will still return a PCs for both A and B. This also includes PCs
// generated by the cgo unwinder, if one is registered.
//
// If skip != 0, this skips this many logical frames.
//
// Callers should set the unwindSilentErrors flag on u.
func tracebackPCs(u *unwinder, skip int, pcBuf []uintptr) int {
var cgoBuf [32]uintptr
n := 0
for ; n < len(pcBuf) && u.valid(); u.next() {
f := u.frame.fn
cgoN := u.cgoCallers(cgoBuf[:])
// TODO: Why does &u.cache cause u to escape? (Same in traceback2)
for iu, uf := newInlineUnwinder(f, u.symPC()); n < len(pcBuf) && uf.valid(); uf = iu.next(uf) {
sf := iu.srcFunc(uf)
if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(u.calleeFuncID) {
// ignore wrappers
} else if skip > 0 {
skip--
} else {
// Callers expect the pc buffer to contain return addresses
// and do the -1 themselves, so we add 1 to the call pc to
// create a "return pc". Since there is no actual call, here
// "return pc" just means a pc you subtract 1 from to get
// the pc of the "call". The actual no-op we insert may or
// may not be 1 byte.
pcBuf[n] = uf.pc + 1
n++
}
u.calleeFuncID = sf.funcID
}
// Add cgo frames (if we're done skipping over the requested number of
// Go frames).
if skip == 0 {
n += copy(pcBuf[n:], cgoBuf[:cgoN])
}
}
return n
}
// printArgs prints function arguments in traceback.
func printArgs(f funcInfo, argp unsafe.Pointer, pc uintptr) {
p := (*[abi.TraceArgsMaxLen]uint8)(funcdata(f, abi.FUNCDATA_ArgInfo))
if p == nil {
return
}
liveInfo := funcdata(f, abi.FUNCDATA_ArgLiveInfo)
liveIdx := pcdatavalue(f, abi.PCDATA_ArgLiveIndex, pc)
startOffset := uint8(0xff) // smallest offset that needs liveness info (slots with a lower offset is always live)
if liveInfo != nil {
startOffset = *(*uint8)(liveInfo)
}
isLive := func(off, slotIdx uint8) bool {
if liveInfo == nil || liveIdx <= 0 {
return true // no liveness info, always live
}
if off < startOffset {
return true
}
bits := *(*uint8)(add(liveInfo, uintptr(liveIdx)+uintptr(slotIdx/8)))
return bits&(1<<(slotIdx%8)) != 0
}
print1 := func(off, sz, slotIdx uint8) {
x := readUnaligned64(add(argp, uintptr(off)))
// mask out irrelevant bits
if sz < 8 {
shift := 64 - sz*8
if goarch.BigEndian {
x = x >> shift
} else {
x = x << shift >> shift
}
}
print(hex(x))
if !isLive(off, slotIdx) {
print("?")
}
}
start := true
printcomma := func() {
if !start {
print(", ")
}
}
pi := 0
slotIdx := uint8(0) // register arg spill slot index
printloop:
for {
o := p[pi]
pi++
switch o {
case abi.TraceArgsEndSeq:
break printloop
case abi.TraceArgsStartAgg:
printcomma()
print("{")
start = true
continue
case abi.TraceArgsEndAgg:
print("}")
case abi.TraceArgsDotdotdot:
printcomma()
print("...")
case abi.TraceArgsOffsetTooLarge:
printcomma()
print("_")
default:
printcomma()
sz := p[pi]
pi++
print1(o, sz, slotIdx)
if o >= startOffset {
slotIdx++
}
}
start = false
}
}
// funcNamePiecesForPrint returns the function name for printing to the user.
// It returns three pieces so it doesn't need an allocation for string
// concatenation.
func funcNamePiecesForPrint(name string) (string, string, string) {
// Replace the shape name in generic function with "...".
i := bytealg.IndexByteString(name, '[')
if i < 0 {
return name, "", ""
}
j := len(name) - 1
for name[j] != ']' {
j--
}
if j <= i {
return name, "", ""
}
return name[:i], "[...]", name[j+1:]
}
// funcNameForPrint returns the function name for printing to the user.
func funcNameForPrint(name string) string {
a, b, c := funcNamePiecesForPrint(name)
return a + b + c
}
// printFuncName prints a function name. name is the function name in
// the binary's func data table.
func printFuncName(name string) {
if name == "runtime.gopanic" {
print("panic")
return
}
a, b, c := funcNamePiecesForPrint(name)
print(a, b, c)
}
func printcreatedby(gp *g) {
// Show what created goroutine, except main goroutine (goid 1).
pc := gp.gopc
f := findfunc(pc)
if f.valid() && showframe(f.srcFunc(), gp, false, abi.FuncIDNormal) && gp.goid != 1 {
printcreatedby1(f, pc, gp.parentGoid)
}
}
func printcreatedby1(f funcInfo, pc uintptr, goid uint64) {
print("created by ")
printFuncName(funcname(f))
if goid != 0 {
print(" in goroutine ", goid)
}
print("\n")
tracepc := pc // back up to CALL instruction for funcline.
if pc > f.entry() {
tracepc -= sys.PCQuantum
}
file, line := funcline(f, tracepc)
print("\t", file, ":", line)
if pc > f.entry() {
print(" +", hex(pc-f.entry()))
}
print("\n")
}
func traceback(pc, sp, lr uintptr, gp *g) {
traceback1(pc, sp, lr, gp, 0)
}
// tracebacktrap is like traceback but expects that the PC and SP were obtained
// from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or GetCallerPC/GetCallerSP.
// Because they are from a trap instead of from a saved pair,
// the initial PC must not be rewound to the previous instruction.
// (All the saved pairs record a PC that is a return address, so we
// rewind it into the CALL instruction.)
// If gp.m.libcall{g,pc,sp} information is available, it uses that information in preference to
// the pc/sp/lr passed in.
func tracebacktrap(pc, sp, lr uintptr, gp *g) {
if gp.m.libcallsp != 0 {
// We're in C code somewhere, traceback from the saved position.
traceback1(gp.m.libcallpc, gp.m.libcallsp, 0, gp.m.libcallg.ptr(), 0)
return
}
traceback1(pc, sp, lr, gp, unwindTrap)
}
func traceback1(pc, sp, lr uintptr, gp *g, flags unwindFlags) {
// If the goroutine is in cgo, and we have a cgo traceback, print that.
if iscgo && gp.m != nil && gp.m.ncgo > 0 && gp.syscallsp != 0 && gp.m.cgoCallers != nil && gp.m.cgoCallers[0] != 0 {
// Lock cgoCallers so that a signal handler won't
// change it, copy the array, reset it, unlock it.
// We are locked to the thread and are not running
// concurrently with a signal handler.
// We just have to stop a signal handler from interrupting
// in the middle of our copy.
gp.m.cgoCallersUse.Store(1)
cgoCallers := *gp.m.cgoCallers
gp.m.cgoCallers[0] = 0
gp.m.cgoCallersUse.Store(0)
printCgoTraceback(&cgoCallers)
}
if readgstatus(gp)&^_Gscan == _Gsyscall {
// Override registers if blocked in system call.
pc = gp.syscallpc
sp = gp.syscallsp
flags &^= unwindTrap
}
if gp.m != nil && gp.m.vdsoSP != 0 {
// Override registers if running in VDSO. This comes after the
// _Gsyscall check to cover VDSO calls after entersyscall.
pc = gp.m.vdsoPC
sp = gp.m.vdsoSP
flags &^= unwindTrap
}
// Print traceback.
//
// We print the first tracebackInnerFrames frames, and the last
// tracebackOuterFrames frames. There are many possible approaches to this.
// There are various complications to this:
//
// - We'd prefer to walk the stack once because in really bad situations
// traceback may crash (and we want as much output as possible) or the stack
// may be changing.
//
// - Each physical frame can represent several logical frames, so we might
// have to pause in the middle of a physical frame and pick up in the middle
// of a physical frame.
//
// - The cgo symbolizer can expand a cgo PC to more than one logical frame,
// and involves juggling state on the C side that we don't manage. Since its
// expansion state is managed on the C side, we can't capture the expansion
// state part way through, and because the output strings are managed on the
// C side, we can't capture the output. Thus, our only choice is to replay a
// whole expansion, potentially discarding some of it.
//
// Rejected approaches:
//
// - Do two passes where the first pass just counts and the second pass does
// all the printing. This is undesirable if the stack is corrupted or changing
// because we won't see a partial stack if we panic.
//
// - Keep a ring buffer of the last N logical frames and use this to print
// the bottom frames once we reach the end of the stack. This works, but
// requires keeping a surprising amount of state on the stack, and we have
// to run the cgo symbolizer twice—once to count frames, and a second to
// print them—since we can't retain the strings it returns.
//
// Instead, we print the outer frames, and if we reach that limit, we clone
// the unwinder, count the remaining frames, and then skip forward and
// finish printing from the clone. This makes two passes over the outer part
// of the stack, but the single pass over the inner part ensures that's
// printed immediately and not revisited. It keeps minimal state on the
// stack. And through a combination of skip counts and limits, we can do all
// of the steps we need with a single traceback printer implementation.
//
// We could be more lax about exactly how many frames we print, for example
// always stopping and resuming on physical frame boundaries, or at least
// cgo expansion boundaries. It's not clear that's much simpler.
flags |= unwindPrintErrors
var u unwinder
tracebackWithRuntime := func(showRuntime bool) int {
const maxInt int = 0x7fffffff
u.initAt(pc, sp, lr, gp, flags)
n, lastN := traceback2(&u, showRuntime, 0, tracebackInnerFrames)
if n < tracebackInnerFrames {
// We printed the whole stack.
return n
}
// Clone the unwinder and figure out how many frames are left. This
// count will include any logical frames already printed for u's current
// physical frame.
u2 := u
remaining, _ := traceback2(&u, showRuntime, maxInt, 0)
elide := remaining - lastN - tracebackOuterFrames
if elide > 0 {
print("...", elide, " frames elided...\n")
traceback2(&u2, showRuntime, lastN+elide, tracebackOuterFrames)
} else if elide <= 0 {
// There are tracebackOuterFrames or fewer frames left to print.
// Just print the rest of the stack.
traceback2(&u2, showRuntime, lastN, tracebackOuterFrames)
}
return n
}
// By default, omits runtime frames. If that means we print nothing at all,
// repeat forcing all frames printed.
if tracebackWithRuntime(false) == 0 {
tracebackWithRuntime(true)
}
printcreatedby(gp)
if gp.ancestors == nil {
return
}
for _, ancestor := range *gp.ancestors {
printAncestorTraceback(ancestor)
}
}
// traceback2 prints a stack trace starting at u. It skips the first "skip"
// logical frames, after which it prints at most "max" logical frames. It
// returns n, which is the number of logical frames skipped and printed, and
// lastN, which is the number of logical frames skipped or printed just in the
// physical frame that u references.
func traceback2(u *unwinder, showRuntime bool, skip, max int) (n, lastN int) {
// commitFrame commits to a logical frame and returns whether this frame
// should be printed and whether iteration should stop.
commitFrame := func() (pr, stop bool) {
if skip == 0 && max == 0 {
// Stop
return false, true
}
n++
lastN++
if skip > 0 {
// Skip
skip--
return false, false
}
// Print
max--
return true, false
}
gp := u.g.ptr()
level, _, _ := gotraceback()
var cgoBuf [32]uintptr
for ; u.valid(); u.next() {
lastN = 0
f := u.frame.fn
for iu, uf := newInlineUnwinder(f, u.symPC()); uf.valid(); uf = iu.next(uf) {
sf := iu.srcFunc(uf)
callee := u.calleeFuncID
u.calleeFuncID = sf.funcID
if !(showRuntime || showframe(sf, gp, n == 0, callee)) {
continue
}
if pr, stop := commitFrame(); stop {
return
} else if !pr {
continue
}
name := sf.name()
file, line := iu.fileLine(uf)
// Print during crash.
// main(0x1, 0x2, 0x3)
// /home/rsc/go/src/runtime/x.go:23 +0xf
//
printFuncName(name)
print("(")
if iu.isInlined(uf) {
print("...")
} else {
argp := unsafe.Pointer(u.frame.argp)
printArgs(f, argp, u.symPC())
}
print(")\n")
print("\t", file, ":", line)
if !iu.isInlined(uf) {
if u.frame.pc > f.entry() {
print(" +", hex(u.frame.pc-f.entry()))
}
if gp.m != nil && gp.m.throwing >= throwTypeRuntime && gp == gp.m.curg || level >= 2 {
print(" fp=", hex(u.frame.fp), " sp=", hex(u.frame.sp), " pc=", hex(u.frame.pc))
}
}
print("\n")
}
// Print cgo frames.
if cgoN := u.cgoCallers(cgoBuf[:]); cgoN > 0 {
var arg cgoSymbolizerArg
anySymbolized := false
stop := false
for _, pc := range cgoBuf[:cgoN] {
if !cgoSymbolizerAvailable() {
if pr, stop := commitFrame(); stop {
break
} else if pr {
print("non-Go function at pc=", hex(pc), "\n")
}
} else {
stop = printOneCgoTraceback(pc, commitFrame, &arg)
anySymbolized = true
if stop {
break
}
}
}
if anySymbolized {
// Free symbolization state.
arg.pc = 0
callCgoSymbolizer(&arg)
}
if stop {
return
}
}
}
return n, 0
}
// printAncestorTraceback prints the traceback of the given ancestor.
// TODO: Unify this with gentraceback and CallersFrames.
func printAncestorTraceback(ancestor ancestorInfo) {
print("[originating from goroutine ", ancestor.goid, "]:\n")
for fidx, pc := range ancestor.pcs {
f := findfunc(pc) // f previously validated
if showfuncinfo(f.srcFunc(), fidx == 0, abi.FuncIDNormal) {
printAncestorTracebackFuncInfo(f, pc)
}
}
if len(ancestor.pcs) == tracebackInnerFrames {
print("...additional frames elided...\n")
}
// Show what created goroutine, except main goroutine (goid 1).
f := findfunc(ancestor.gopc)
if f.valid() && showfuncinfo(f.srcFunc(), false, abi.FuncIDNormal) && ancestor.goid != 1 {
// In ancestor mode, we'll already print the goroutine ancestor.
// Pass 0 for the goid parameter so we don't print it again.
printcreatedby1(f, ancestor.gopc, 0)
}
}
// printAncestorTracebackFuncInfo prints the given function info at a given pc
// within an ancestor traceback. The precision of this info is reduced
// due to only have access to the pcs at the time of the caller
// goroutine being created.
func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr) {
u, uf := newInlineUnwinder(f, pc)
file, line := u.fileLine(uf)
printFuncName(u.srcFunc(uf).name())
print("(...)\n")
print("\t", file, ":", line)
if pc > f.entry() {
print(" +", hex(pc-f.entry()))
}
print("\n")
}
// callers should be an internal detail,
// (and is almost identical to Callers),
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/phuslu/log
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname callers
func callers(skip int, pcbuf []uintptr) int {
sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
gp := getg()
var n int
systemstack(func() {
var u unwinder
u.initAt(pc, sp, 0, gp, unwindSilentErrors)
n = tracebackPCs(&u, skip, pcbuf)
})
return n
}
func gcallers(gp *g, skip int, pcbuf []uintptr) int {
var u unwinder
u.init(gp, unwindSilentErrors)
return tracebackPCs(&u, skip, pcbuf)
}
// showframe reports whether the frame with the given characteristics should
// be printed during a traceback.
func showframe(sf srcFunc, gp *g, firstFrame bool, calleeID abi.FuncID) bool {
mp := getg().m
if mp.throwing >= throwTypeRuntime && gp != nil && (gp == mp.curg || gp == mp.caughtsig.ptr()) {
return true
}
return showfuncinfo(sf, firstFrame, calleeID)
}
// showfuncinfo reports whether a function with the given characteristics should
// be printed during a traceback.
func showfuncinfo(sf srcFunc, firstFrame bool, calleeID abi.FuncID) bool {
level, _, _ := gotraceback()
if level > 1 {
// Show all frames.
return true
}
if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(calleeID) {
return false
}
// Always show runtime.runFinalizers and runtime.runCleanups as
// context that this goroutine is running finalizers or cleanups,
// otherwise there is no obvious indicator.
//
// TODO(prattmic): A more general approach would be to always show the
// outermost frame (besides runtime.goexit), even if it is a runtime.
// Hiding the outermost frame allows the apparent outermost frame to
// change across different traces, which seems impossible.
//
// Unfortunately, implementing this requires looking ahead at the next
// frame, which goes against traceback's incremental approach (see big
// comment in traceback1).
if sf.funcID == abi.FuncID_runFinalizers || sf.funcID == abi.FuncID_runCleanups {
return true
}
name := sf.name()
// Special case: always show runtime.gopanic frame
// in the middle of a stack trace, so that we can
// see the boundary between ordinary code and
// panic-induced deferred code.
// See golang.org/issue/5832.
if name == "runtime.gopanic" && !firstFrame {
return true
}
return bytealg.IndexByteString(name, '.') >= 0 && (!stringslite.HasPrefix(name, "runtime.") || isExportedRuntime(name))
}
// isExportedRuntime reports whether name is an exported runtime function.
// It is only for runtime functions, so ASCII A-Z is fine.
func isExportedRuntime(name string) bool {
// Check and remove package qualifier.
name, found := stringslite.CutPrefix(name, "runtime.")
if !found {
return false
}
rcvr := ""
// Extract receiver type, if any.
// For example, runtime.(*Func).Entry
i := len(name) - 1
for i >= 0 && name[i] != '.' {
i--
}
if i >= 0 {
rcvr = name[:i]
name = name[i+1:]
// Remove parentheses and star for pointer receivers.
if len(rcvr) >= 3 && rcvr[0] == '(' && rcvr[1] == '*' && rcvr[len(rcvr)-1] == ')' {
rcvr = rcvr[2 : len(rcvr)-1]
}
}
// Exported functions and exported methods on exported types.
return len(name) > 0 && 'A' <= name[0] && name[0] <= 'Z' && (len(rcvr) == 0 || 'A' <= rcvr[0] && rcvr[0] <= 'Z')
}
// elideWrapperCalling reports whether a wrapper function that called
// function id should be elided from stack traces.
func elideWrapperCalling(id abi.FuncID) bool {
// If the wrapper called a panic function instead of the
// wrapped function, we want to include it in stacks.
return !(id == abi.FuncID_gopanic || id == abi.FuncID_sigpanic || id == abi.FuncID_panicwrap)
}
var gStatusStrings = [...]string{
_Gidle: "idle",
_Grunnable: "runnable",
_Grunning: "running",
_Gsyscall: "syscall",
_Gwaiting: "waiting",
_Gdead: "dead",
_Gcopystack: "copystack",
_Gleaked: "leaked",
_Gpreempted: "preempted",
_Gdeadextra: "waiting for cgo callback",
}
func goroutineheader(gp *g) {
level, _, _ := gotraceback()
gpstatus := readgstatus(gp)
isScan := gpstatus&_Gscan != 0
gpstatus &^= _Gscan // drop the scan bit
// Basic string status
var status string
if 0 <= gpstatus && gpstatus < uint32(len(gStatusStrings)) {
status = gStatusStrings[gpstatus]
} else {
status = "???"
}
// Override.
if (gpstatus == _Gwaiting || gpstatus == _Gleaked) && gp.waitreason != waitReasonZero {
status = gp.waitreason.String()
}
// approx time the G is blocked, in minutes
var waitfor int64
if (gpstatus == _Gwaiting || gpstatus == _Gsyscall) && gp.waitsince != 0 {
waitfor = (nanotime() - gp.waitsince) / 60e9
}
print("goroutine ", gp.goid)
if gp.m != nil && gp.m.throwing >= throwTypeRuntime && gp == gp.m.curg || level >= 2 {
print(" gp=", gp)
if gp.m != nil {
print(" m=", gp.m.id, " mp=", gp.m)
} else {
print(" m=nil")
}
}
print(" [", status)
if gpstatus == _Gleaked {
print(" (leaked)")
}
if isScan {
print(" (scan)")
}
if bubble := gp.bubble; bubble != nil &&
gpstatus == _Gwaiting &&
gp.waitreason.isIdleInSynctest() &&
!stringslite.HasSuffix(status, "(durable)") {
// If this isn't a status where the name includes a (durable)
// suffix to distinguish it from the non-durable form, add it here.
print(" (durable)")
}
if waitfor >= 1 {
print(", ", waitfor, " minutes")
}
if gp.lockedm != 0 {
print(", locked to thread")
}
if bubble := gp.bubble; bubble != nil {
print(", synctest bubble ", bubble.id)
}
print("]")
if gp.labels != nil && debug.tracebacklabels.Load() == 1 {
labels := (*label.Set)(gp.labels).List
if len(labels) > 0 {
print(" {")
for i, kv := range labels {
// Try to be nice and only quote the keys/values if one of them has characters that need quoting or escaping.
printq := func(s string) {
if tracebackStringNeedsQuoting(s) {
print(quoted(s))
} else {
print(s)
}
}
printq(kv.Key)
print(": ")
printq(kv.Value)
if i < len(labels)-1 {
print(", ")
}
}
print("}")
}
}
print(":\n")
}
func tracebackStringNeedsQuoting(s string) bool {
for _, r := range s {
if !('a' <= r && r <= 'z' ||
'A' <= r && r <= 'Z' ||
'0' <= r && r <= '9' ||
r == '.' || r == '/' || r == '_') {
return true
}
}
return false
}
func tracebackothers(me *g) {
tracebacksomeothers(me, func(*g) bool { return true })
}
func tracebacksomeothers(me *g, showf func(*g) bool) {
level, _, _ := gotraceback()
// Show the current goroutine first, if we haven't already.
curgp := getg().m.curg
if curgp != nil && curgp != me {
print("\n")
goroutineheader(curgp)
traceback(^uintptr(0), ^uintptr(0), 0, curgp)
}
// We can't call locking forEachG here because this may be during fatal
// throw/panic, where locking could be out-of-order or a direct
// deadlock.
//
// Instead, use forEachGRace, which requires no locking. We don't lock
// against concurrent creation of new Gs, but even with allglock we may
// miss Gs created after this loop.
forEachGRace(func(gp *g) {
if gp == me || gp == curgp {
return
}
if status := readgstatus(gp); status == _Gdead || status == _Gdeadextra {
return
}
if !showf(gp) {
return
}
if isSystemGoroutine(gp, false) && level < 2 {
return
}
print("\n")
goroutineheader(gp)
// Note: gp.m == getg().m occurs when tracebackothers is called
// from a signal handler initiated during a systemstack call.
// The original G is still in the running state, and we want to
// print its stack.
//
// There's a small window of time in exitsyscall where a goroutine could be
// in _Grunning as it's exiting a syscall. This could be the case even if the
// world is stopped or frozen.
//
// This is OK because the goroutine will not exit the syscall while the world
// is stopped or frozen. This is also why it's safe to check syscallsp here,
// and safe to take the goroutine's stack trace. The syscall path mutates
// syscallsp only just before exiting the syscall.
if gp.m != getg().m && readgstatus(gp)&^_Gscan == _Grunning && gp.syscallsp == 0 {
print("\tgoroutine running on other thread; stack unavailable\n")
printcreatedby(gp)
} else {
traceback(^uintptr(0), ^uintptr(0), 0, gp)
}
})
}
// tracebackHexdump hexdumps part of stk around frame.sp and frame.fp
// for debugging purposes. If the address bad is included in the
// hexdumped range, it will mark it as well.
func tracebackHexdump(stk stack, frame *stkframe, bad uintptr) {
const expand = 32 * goarch.PtrSize
const maxExpand = 256 * goarch.PtrSize
// Start around frame.sp.
lo, hi := frame.sp, frame.sp
// Expand to include frame.fp.
if frame.fp != 0 && frame.fp < lo {
lo = frame.fp
}
if frame.fp != 0 && frame.fp > hi {
hi = frame.fp
}
// Expand a bit more.
lo, hi = lo-expand, hi+expand
// But don't go too far from frame.sp.
if lo < frame.sp-maxExpand {
lo = frame.sp - maxExpand
}
if hi > frame.sp+maxExpand {
hi = frame.sp + maxExpand
}
// And don't go outside the stack bounds.
if lo < stk.lo {
lo = stk.lo
}
if hi > stk.hi {
hi = stk.hi
}
// Print the hex dump.
print("stack: frame={sp:", hex(frame.sp), ", fp:", hex(frame.fp), "} stack=[", hex(stk.lo), ",", hex(stk.hi), ")\n")
hexdumpWords(lo, hi-lo, func(p uintptr, m hexdumpMarker) {
if p == frame.fp {
m.start()
println("FP")
}
if p == frame.sp {
m.start()
println("SP")
}
if p == bad {
m.start()
println("bad")
}
})
}
// isSystemGoroutine reports whether the goroutine g must be omitted
// in stack dumps and deadlock detector. This is any goroutine that
// starts at a runtime.* entry point, except for runtime.main,
// runtime.handleAsyncEvent (wasm only) and sometimes
// runtime.runFinalizers/runtime.runCleanups.
//
// If fixed is true, any goroutine that can vary between user and
// system (that is, the finalizer goroutine) is considered a user
// goroutine.
func isSystemGoroutine(gp *g, fixed bool) bool {
// Keep this in sync with internal/trace.IsSystemGoroutine.
f := findfunc(gp.startpc)
if !f.valid() {
return false
}
if f.funcID == abi.FuncID_runtime_main || f.funcID == abi.FuncID_corostart || f.funcID == abi.FuncID_handleAsyncEvent {
return false
}
if f.funcID == abi.FuncID_runFinalizers {
// We include the finalizer goroutine if it's calling
// back into user code.
if fixed {
// This goroutine can vary. In fixed mode,
// always consider it a user goroutine.
return false
}
return fingStatus.Load()&fingRunningFinalizer == 0
}
if f.funcID == abi.FuncID_runCleanups {
// We include the cleanup goroutines if they're calling
// back into user code.
if fixed {
// This goroutine can vary. In fixed mode,
// always consider it a user goroutine.
return false
}
return !gp.runningCleanups.Load()
}
return stringslite.HasPrefix(funcname(f), "runtime.")
}
// SetCgoTraceback records three C functions to use to gather
// traceback information from C code and to convert that traceback
// information into symbolic information. These are used when printing
// stack traces for a program that uses cgo.
//
// The traceback and context functions may be called from a signal
// handler, and must therefore use only async-signal safe functions.
// The symbolizer function may be called while the program is
// crashing, and so must be cautious about using memory. None of the
// functions may call back into Go.
//
// The context function will be called with a single argument, a
// pointer to a struct:
//
// struct {
// Context uintptr
// }
//
// In C syntax, this struct will be
//
// struct {
// uintptr_t Context;
// };
//
// If the Context field is 0, the context function is being called to
// record the current traceback context. It should record in the
// Context field whatever information is needed about the current
// point of execution to later produce a stack trace, probably the
// stack pointer and PC. In this case the context function will be
// called from C code.
//
// If the Context field is not 0, then it is a value returned by a
// previous call to the context function. This case is called when the
// context is no longer needed; that is, when the Go code is returning
// to its C code caller. This permits the context function to release
// any associated resources.
//
// While it would be correct for the context function to record a
// complete a stack trace whenever it is called, and simply copy that
// out in the traceback function, in a typical program the context
// function will be called many times without ever recording a
// traceback for that context. Recording a complete stack trace in a
// call to the context function is likely to be inefficient.
//
// The traceback function will be called with a single argument, a
// pointer to a struct:
//
// struct {
// Context uintptr
// SigContext uintptr
// Buf *uintptr
// Max uintptr
// }
//
// In C syntax, this struct will be
//
// struct {
// uintptr_t Context;
// uintptr_t SigContext;
// uintptr_t* Buf;
// uintptr_t Max;
// };
//
// The Context field will be zero to gather a traceback from the
// current program execution point. In this case, the traceback
// function will be called from C code.
//
// Otherwise Context will be a value previously returned by a call to
// the context function. The traceback function should gather a stack
// trace from that saved point in the program execution. The traceback
// function may be called from an execution thread other than the one
// that recorded the context, but only when the context is known to be
// valid and unchanging. The traceback function may also be called
// deeper in the call stack on the same thread that recorded the
// context. The traceback function may be called multiple times with
// the same Context value; it will usually be appropriate to cache the
// result, if possible, the first time this is called for a specific
// context value.
//
// If the traceback function is called from a signal handler on a Unix
// system, SigContext will be the signal context argument passed to
// the signal handler (a C ucontext_t* cast to uintptr_t). This may be
// used to start tracing at the point where the signal occurred. If
// the traceback function is not called from a signal handler,
// SigContext will be zero.
//
// Buf is where the traceback information should be stored. It should
// be PC values, such that Buf[0] is the PC of the caller, Buf[1] is
// the PC of that function's caller, and so on. Max is the maximum
// number of entries to store. The function should store a zero to
// indicate the top of the stack, or that the caller is on a different
// stack, presumably a Go stack.
//
// Unlike runtime.Callers, the PC values returned should, when passed
// to the symbolizer function, return the file/line of the call
// instruction. No additional subtraction is required or appropriate.
//
// On all platforms, the traceback function is invoked when a call from
// Go to C to Go requests a stack trace. On linux/amd64, linux/ppc64le,
// linux/arm64, and freebsd/amd64, the traceback function is also invoked
// when a signal is received by a thread that is executing a cgo call.
// The traceback function should not make assumptions about when it is
// called, as future versions of Go may make additional calls.
//
// The symbolizer function will be called with a single argument, a
// pointer to a struct:
//
// struct {
// PC uintptr // program counter to fetch information for
// File *byte // file name (NUL terminated)
// Lineno uintptr // line number
// Func *byte // function name (NUL terminated)
// Entry uintptr // function entry point
// More uintptr // set non-zero if more info for this PC
// Data uintptr // unused by runtime, available for function
// }
//
// In C syntax, this struct will be
//
// struct {
// uintptr_t PC;
// char* File;
// uintptr_t Lineno;
// char* Func;
// uintptr_t Entry;
// uintptr_t More;
// uintptr_t Data;
// };
//
// The PC field will be a value returned by a call to the traceback
// function.
//
// The first time the function is called for a particular traceback,
// all the fields except PC will be 0. The function should fill in the
// other fields if possible, setting them to 0/nil if the information
// is not available. The Data field may be used to store any useful
// information across calls. The More field should be set to non-zero
// if there is more information for this PC, zero otherwise. If More
// is set non-zero, the function will be called again with the same
// PC, and may return different information (this is intended for use
// with inlined functions). If More is zero, the function will be
// called with the next PC value in the traceback. When the traceback
// is complete, the function will be called once more with PC set to
// zero; this may be used to free any information. Each call will
// leave the fields of the struct set to the same values they had upon
// return, except for the PC field when the More field is zero. The
// function must not keep a copy of the struct pointer between calls.
//
// When calling SetCgoTraceback, the version argument is the version
// number of the structs that the functions expect to receive.
// Currently this must be zero.
//
// The symbolizer function may be nil, in which case the results of
// the traceback function will be displayed as numbers. If the
// traceback function is nil, the symbolizer function will never be
// called. The context function may be nil, in which case the
// traceback function will only be called with the context field set
// to zero. If the context function is nil, then calls from Go to C
// to Go will not show a traceback for the C portion of the call stack.
//
// SetCgoTraceback should be called only once, ideally from an init function.
func SetCgoTraceback(version int, traceback, context, symbolizer unsafe.Pointer) {
if version != 0 {
panic("unsupported version")
}
if cgoTraceback != nil && cgoTraceback != traceback ||
cgoContext != nil && cgoContext != context ||
cgoSymbolizer != nil && cgoSymbolizer != symbolizer {
panic("call SetCgoTraceback only once")
}
cgoTraceback = traceback
cgoContext = context
cgoSymbolizer = symbolizer
if _cgo_set_traceback_functions != nil {
type cgoSetTracebackFunctionsArg struct {
traceback unsafe.Pointer
context unsafe.Pointer
symbolizer unsafe.Pointer
}
arg := cgoSetTracebackFunctionsArg{
traceback: traceback,
context: context,
symbolizer: symbolizer,
}
cgocall(_cgo_set_traceback_functions, noescape(unsafe.Pointer(&arg)))
}
}
var cgoTraceback unsafe.Pointer
var cgoContext unsafe.Pointer
var cgoSymbolizer unsafe.Pointer
func cgoTracebackAvailable() bool {
// - The traceback function must be registered via SetCgoTraceback.
// - This must be a cgo binary (providing _cgo_call_traceback_function).
return cgoTraceback != nil && _cgo_call_traceback_function != nil
}
func cgoSymbolizerAvailable() bool {
// - The symbolizer function must be registered via SetCgoTraceback.
// - This must be a cgo binary (providing _cgo_call_symbolizer_function).
return cgoSymbolizer != nil && _cgo_call_symbolizer_function != nil
}
// cgoTracebackArg is the type passed to cgoTraceback.
type cgoTracebackArg struct {
context uintptr
sigContext uintptr
buf *uintptr
max uintptr
}
// cgoContextArg is the type passed to the context function.
type cgoContextArg struct {
context uintptr
}
// cgoSymbolizerArg is the type passed to cgoSymbolizer.
type cgoSymbolizerArg struct {
pc uintptr
file *byte
lineno uintptr
funcName *byte
entry uintptr
more uintptr
data uintptr
}
// printCgoTraceback prints a traceback of callers.
func printCgoTraceback(callers *cgoCallers) {
if !cgoSymbolizerAvailable() {
for _, c := range callers {
if c == 0 {
break
}
print("non-Go function at pc=", hex(c), "\n")
}
return
}
commitFrame := func() (pr, stop bool) { return true, false }
var arg cgoSymbolizerArg
for _, c := range callers {
if c == 0 {
break
}
printOneCgoTraceback(c, commitFrame, &arg)
}
arg.pc = 0
callCgoSymbolizer(&arg)
}
// printOneCgoTraceback prints the traceback of a single cgo caller.
// This can print more than one line because of inlining.
// It returns the "stop" result of commitFrame.
//
// Preconditions: cgoSymbolizerAvailable returns true.
func printOneCgoTraceback(pc uintptr, commitFrame func() (pr, stop bool), arg *cgoSymbolizerArg) bool {
arg.pc = pc
for {
if pr, stop := commitFrame(); stop {
return true
} else if !pr {
continue
}
callCgoSymbolizer(arg)
if arg.funcName != nil {
// Note that we don't print any argument
// information here, not even parentheses.
// The symbolizer must add that if appropriate.
println(gostringnocopy(arg.funcName))
} else {
println("non-Go function")
}
print("\t")
if arg.file != nil {
print(gostringnocopy(arg.file), ":", arg.lineno, " ")
}
print("pc=", hex(pc), "\n")
if arg.more == 0 {
return false
}
}
}
// callCgoSymbolizer calls the cgoSymbolizer function.
//
// Preconditions: cgoSymbolizerAvailable returns true.
func callCgoSymbolizer(arg *cgoSymbolizerArg) {
call := cgocall
if panicking.Load() > 0 || getg().m.curg != getg() {
// We do not want to call into the scheduler when panicking
// or when on the system stack.
call = asmcgocall
}
if msanenabled {
msanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
}
if asanenabled {
asanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
}
call(_cgo_call_symbolizer_function, noescape(unsafe.Pointer(arg)))
}
// cgoContextPCs gets the PC values from a cgo traceback.
//
// Preconditions: cgoTracebackAvailable returns true.
func cgoContextPCs(ctxt uintptr, buf []uintptr) {
call := cgocall
if panicking.Load() > 0 || getg().m.curg != getg() {
// We do not want to call into the scheduler when panicking
// or when on the system stack.
call = asmcgocall
}
arg := cgoTracebackArg{
context: ctxt,
buf: (*uintptr)(noescape(unsafe.Pointer(&buf[0]))),
max: uintptr(len(buf)),
}
if msanenabled {
msanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
}
if asanenabled {
asanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
}
call(_cgo_call_traceback_function, noescape(unsafe.Pointer(&arg)))
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Trace buffer management.
package runtime
import (
"internal/runtime/sys"
"internal/trace/tracev2"
"unsafe"
)
// Maximum number of bytes required to encode uint64 in base-128.
const traceBytesPerNumber = 10
// traceWriter is the interface for writing all trace data.
//
// This type is passed around as a value, and all of its methods return
// a new traceWriter. This allows for chaining together calls in a fluent-style
// API. This is partly stylistic, and very slightly for performance, since
// the compiler can destructure this value and pass it between calls as
// just regular arguments. However, this style is not load-bearing, and
// we can change it if it's deemed too error-prone.
type traceWriter struct {
traceLocker
exp tracev2.Experiment
*traceBuf
}
// writer returns a traceWriter that writes into the current M's stream.
//
// Once this is called, the caller must guard against stack growth until
// end is called on it. Therefore, it's highly recommended to use this
// API in a "fluent" style, for example tl.writer().event(...).end().
// Better yet, callers just looking to write events should use eventWriter
// when possible, which is a much safer wrapper around this function.
//
// nosplit to allow for safe reentrant tracing from stack growth paths.
//
//go:nosplit
func (tl traceLocker) writer() traceWriter {
if debugTraceReentrancy {
// Checks that the invariants of this function are being upheld.
gp := getg()
if gp == gp.m.curg {
tl.mp.trace.oldthrowsplit = gp.throwsplit
gp.throwsplit = true
}
}
return traceWriter{traceLocker: tl, traceBuf: tl.mp.trace.buf[tl.gen%2][tracev2.NoExperiment]}
}
// unsafeTraceWriter produces a traceWriter that doesn't lock the trace.
//
// It should only be used in contexts where either:
// - Another traceLocker is held.
// - trace.gen is prevented from advancing.
//
// This does not have the same stack growth restrictions as traceLocker.writer.
//
// buf may be nil.
func unsafeTraceWriter(gen uintptr, buf *traceBuf) traceWriter {
return traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf}
}
// event writes out the bytes of an event into the event stream.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (w traceWriter) event(ev tracev2.EventType, args ...traceArg) traceWriter {
// N.B. Everything in this call must be nosplit to maintain
// the stack growth related invariants for writing events.
// Make sure we have room.
w, _ = w.ensure(1 + (len(args)+1)*traceBytesPerNumber)
// Compute the timestamp diff that we'll put in the trace.
ts := traceClockNow()
if ts <= w.traceBuf.lastTime {
ts = w.traceBuf.lastTime + 1
}
tsDiff := uint64(ts - w.traceBuf.lastTime)
w.traceBuf.lastTime = ts
// Write out event.
w.byte(byte(ev))
w.varint(tsDiff)
for _, arg := range args {
w.varint(uint64(arg))
}
return w
}
// end writes the buffer back into the m.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (w traceWriter) end() {
if w.mp == nil {
// Tolerate a nil mp. It makes code that creates traceWriters directly
// less error-prone.
return
}
w.mp.trace.buf[w.gen%2][w.exp] = w.traceBuf
if debugTraceReentrancy {
// The writer is no longer live, we can drop throwsplit (if it wasn't
// already set upon entry).
gp := getg()
if gp == gp.m.curg {
gp.throwsplit = w.mp.trace.oldthrowsplit
}
}
}
// ensure makes sure that at least maxSize bytes are available to write.
//
// Returns whether the buffer was flushed.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (w traceWriter) ensure(maxSize int) (traceWriter, bool) {
refill := w.traceBuf == nil || !w.available(maxSize)
if refill {
w = w.refill()
}
return w, refill
}
// flush puts w.traceBuf on the queue of full buffers.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (w traceWriter) flush() traceWriter {
systemstack(func() {
lock(&trace.lock)
if w.traceBuf != nil {
traceBufFlush(w.traceBuf, w.gen)
}
unlock(&trace.lock)
})
w.traceBuf = nil
return w
}
// refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.
func (w traceWriter) refill() traceWriter {
systemstack(func() {
lock(&trace.lock)
if w.traceBuf != nil {
traceBufFlush(w.traceBuf, w.gen)
}
if trace.empty != nil {
w.traceBuf = trace.empty
trace.empty = w.traceBuf.link
unlock(&trace.lock)
} else {
unlock(&trace.lock)
w.traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys, "trace buffer"))
if w.traceBuf == nil {
throw("trace: out of memory")
}
}
})
// Initialize the buffer.
ts := traceClockNow()
if ts <= w.traceBuf.lastTime {
ts = w.traceBuf.lastTime + 1
}
w.traceBuf.lastTime = ts
w.traceBuf.link = nil
w.traceBuf.pos = 0
// Tolerate a nil mp.
mID := ^uint64(0)
if w.mp != nil {
mID = w.mp.procid
}
// Write the buffer's header.
if w.exp == tracev2.NoExperiment {
w.byte(byte(tracev2.EvEventBatch))
} else {
w.byte(byte(tracev2.EvExperimentalBatch))
w.byte(byte(w.exp))
}
w.varint(uint64(w.gen))
w.varint(mID)
w.varint(uint64(ts))
w.traceBuf.lenPos = w.varintReserve()
return w
}
// expWriter returns a traceWriter that writes into the current M's stream for
// the given experiment.
func (tl traceLocker) expWriter(exp tracev2.Experiment) traceWriter {
return traceWriter{traceLocker: tl, traceBuf: tl.mp.trace.buf[tl.gen%2][exp], exp: exp}
}
// unsafeTraceExpWriter produces a traceWriter for experimental trace batches
// that doesn't lock the trace. Data written to experimental batches need not
// conform to the standard trace format.
//
// It should only be used in contexts where either:
// - Another traceLocker is held.
// - trace.gen is prevented from advancing.
//
// This does not have the same stack growth restrictions as traceLocker.writer.
//
// buf may be nil.
func unsafeTraceExpWriter(gen uintptr, buf *traceBuf, exp tracev2.Experiment) traceWriter {
return traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf, exp: exp}
}
// traceBufQueue is a FIFO of traceBufs.
type traceBufQueue struct {
head, tail *traceBuf
}
// push queues buf into queue of buffers.
func (q *traceBufQueue) push(buf *traceBuf) {
buf.link = nil
if q.head == nil {
q.head = buf
} else {
q.tail.link = buf
}
q.tail = buf
}
// pop dequeues from the queue of buffers.
func (q *traceBufQueue) pop() *traceBuf {
buf := q.head
if buf == nil {
return nil
}
q.head = buf.link
if q.head == nil {
q.tail = nil
}
buf.link = nil
return buf
}
func (q *traceBufQueue) empty() bool {
return q.head == nil
}
// traceBufHeader is per-P tracing buffer.
type traceBufHeader struct {
link *traceBuf // in trace.empty/full
lastTime traceTime // when we wrote the last event
pos int // next write offset in arr
lenPos int // position of batch length value
}
// traceBuf is per-M tracing buffer.
//
// TODO(mknyszek): Rename traceBuf to traceBatch, since they map 1:1 with event batches.
type traceBuf struct {
_ sys.NotInHeap
traceBufHeader
arr [tracev2.MaxBatchSize - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
}
// byte appends v to buf.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (buf *traceBuf) byte(v byte) {
buf.arr[buf.pos] = v
buf.pos++
}
// varint appends v to buf in little-endian-base-128 encoding.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (buf *traceBuf) varint(v uint64) {
pos := buf.pos
arr := buf.arr[pos : pos+traceBytesPerNumber]
for i := range arr {
if v < 0x80 {
pos += i + 1
arr[i] = byte(v)
break
}
arr[i] = 0x80 | byte(v)
v >>= 7
}
buf.pos = pos
}
// varintReserve reserves enough space in buf to hold any varint.
//
// Space reserved this way can be filled in with the varintAt method.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (buf *traceBuf) varintReserve() int {
p := buf.pos
buf.pos += traceBytesPerNumber
return p
}
// stringData appends s's data directly to buf.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (buf *traceBuf) stringData(s string) {
buf.pos += copy(buf.arr[buf.pos:], s)
}
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (buf *traceBuf) available(size int) bool {
return len(buf.arr)-buf.pos >= size
}
// varintAt writes varint v at byte position pos in buf. This always
// consumes traceBytesPerNumber bytes. This is intended for when the caller
// needs to reserve space for a varint but can't populate it until later.
// Use varintReserve to reserve this space.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (buf *traceBuf) varintAt(pos int, v uint64) {
for i := 0; i < traceBytesPerNumber; i++ {
if i < traceBytesPerNumber-1 {
buf.arr[pos] = 0x80 | byte(v)
} else {
buf.arr[pos] = byte(v)
}
v >>= 7
pos++
}
if v != 0 {
throw("v could not fit in traceBytesPerNumber")
}
}
// traceBufFlush flushes a trace buffer.
//
// Must run on the system stack because trace.lock must be held.
//
//go:systemstack
func traceBufFlush(buf *traceBuf, gen uintptr) {
assertLockHeld(&trace.lock)
// Write out the non-header length of the batch in the header.
//
// Note: the length of the header is not included to make it easier
// to calculate this value when deserializing and reserializing the
// trace. Varints can have additional padding of zero bits that is
// quite difficult to preserve, and if we include the header we
// force serializers to do more work. Nothing else actually needs
// padding.
buf.varintAt(buf.lenPos, uint64(buf.pos-(buf.lenPos+traceBytesPerNumber)))
trace.full[gen%2].push(buf)
// Notify the scheduler that there's work available and that the trace
// reader should be scheduled.
if !trace.workAvailable.Load() {
trace.workAvailable.Store(true)
}
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// CPU profile -> trace
package runtime
import "internal/trace/tracev2"
// traceInitReadCPU initializes CPU profile -> tracer state for tracing.
//
// Returns a profBuf for reading from.
func traceInitReadCPU() {
if traceEnabled() {
throw("traceInitReadCPU called with trace enabled")
}
// Create new profBuf for CPU samples that will be emitted as events.
// Format: after the timestamp, header is [pp.id, gp.goid, mp.procid].
trace.cpuLogRead[0] = newProfBuf(3, profBufWordCount, profBufTagCount)
trace.cpuLogRead[1] = newProfBuf(3, profBufWordCount, profBufTagCount)
// We must not acquire trace.signalLock outside of a signal handler: a
// profiling signal may arrive at any time and try to acquire it, leading to
// deadlock. Because we can't use that lock to protect updates to
// trace.cpuLogWrite (only use of the structure it references), reads and
// writes of the pointer must be atomic. (And although this field is never
// the sole pointer to the profBuf value, it's best to allow a write barrier
// here.)
trace.cpuLogWrite[0].Store(trace.cpuLogRead[0])
trace.cpuLogWrite[1].Store(trace.cpuLogRead[1])
}
// traceStartReadCPU creates a goroutine to start reading CPU profile
// data into an active trace.
//
// traceAdvanceSema must be held.
func traceStartReadCPU() {
if !traceEnabled() {
throw("traceStartReadCPU called with trace disabled")
}
// Spin up the logger goroutine.
trace.cpuSleep = newWakeableSleep()
done := make(chan struct{}, 1)
go func() {
for traceEnabled() {
// Sleep here because traceReadCPU is non-blocking. This mirrors
// how the runtime/pprof package obtains CPU profile data.
//
// We can't do a blocking read here because Darwin can't do a
// wakeup from a signal handler, so all CPU profiling is just
// non-blocking. See #61768 for more details.
//
// Like the runtime/pprof package, even if that bug didn't exist
// we would still want to do a goroutine-level sleep in between
// reads to avoid frequent wakeups.
trace.cpuSleep.sleep(100_000_000)
tl := traceAcquire()
if !tl.ok() {
// Tracing disabled.
break
}
keepGoing := traceReadCPU(tl.gen)
traceRelease(tl)
if !keepGoing {
break
}
}
done <- struct{}{}
}()
trace.cpuLogDone = done
}
// traceStopReadCPU blocks until the trace CPU reading goroutine exits.
//
// traceAdvanceSema must be held, and tracing must be disabled.
func traceStopReadCPU() {
if traceEnabled() {
throw("traceStopReadCPU called with trace enabled")
}
// Once we close the profbuf, we'll be in one of two situations:
// - The logger goroutine has already exited because it observed
// that the trace is disabled.
// - The logger goroutine is asleep.
//
// Wake the goroutine so it can observe that their the buffer is
// closed an exit.
trace.cpuLogWrite[0].Store(nil)
trace.cpuLogWrite[1].Store(nil)
trace.cpuLogRead[0].close()
trace.cpuLogRead[1].close()
trace.cpuSleep.wake()
// Wait until the logger goroutine exits.
<-trace.cpuLogDone
// Clear state for the next trace.
trace.cpuLogDone = nil
trace.cpuLogRead[0] = nil
trace.cpuLogRead[1] = nil
trace.cpuSleep.close()
}
// traceReadCPU attempts to read from the provided profBuf[gen%2] and write
// into the trace. Returns true if there might be more to read or false
// if the profBuf is closed or the caller should otherwise stop reading.
//
// The caller is responsible for ensuring that gen does not change. Either
// the caller must be in a traceAcquire/traceRelease block, or must be calling
// with traceAdvanceSema held.
//
// No more than one goroutine may be in traceReadCPU for the same
// profBuf at a time.
//
// Must not run on the system stack because profBuf.read performs race
// operations.
func traceReadCPU(gen uintptr) bool {
var pcBuf [tracev2.MaxFramesPerStack]uintptr
data, tags, eof := trace.cpuLogRead[gen%2].read(profBufNonBlocking)
for len(data) > 0 {
if len(data) < 4 || data[0] > uint64(len(data)) {
break // truncated profile
}
if data[0] < 4 || tags != nil && len(tags) < 1 {
break // malformed profile
}
if len(tags) < 1 {
break // mismatched profile records and tags
}
// Deserialize the data in the profile buffer.
recordLen := data[0]
timestamp := data[1]
ppid := data[2] >> 1
if hasP := (data[2] & 0b1) != 0; !hasP {
ppid = ^uint64(0)
}
goid := data[3]
mpid := data[4]
stk := data[5:recordLen]
// Overflow records always have their headers contain
// all zeroes.
isOverflowRecord := len(stk) == 1 && data[2] == 0 && data[3] == 0 && data[4] == 0
// Move the data iterator forward.
data = data[recordLen:]
// No support here for reporting goroutine tags at the moment; if
// that information is to be part of the execution trace, we'd
// probably want to see when the tags are applied and when they
// change, instead of only seeing them when we get a CPU sample.
tags = tags[1:]
if isOverflowRecord {
// Looks like an overflow record from the profBuf. Not much to
// do here, we only want to report full records.
continue
}
// Construct the stack for insertion to the stack table.
nstk := 1
pcBuf[0] = logicalStackSentinel
for ; nstk < len(pcBuf) && nstk-1 < len(stk); nstk++ {
pcBuf[nstk] = uintptr(stk[nstk-1])
}
// Write out a trace event.
w := unsafeTraceWriter(gen, trace.cpuBuf[gen%2])
// Ensure we have a place to write to.
var flushed bool
w, flushed = w.ensure(2 + 5*traceBytesPerNumber /* tracev2.EvCPUSamples + tracev2.EvCPUSample + timestamp + g + m + p + stack ID */)
if flushed {
// Annotate the batch as containing strings.
w.byte(byte(tracev2.EvCPUSamples))
}
// Add the stack to the table.
stackID := trace.stackTab[gen%2].put(pcBuf[:nstk])
// Write out the CPU sample.
w.byte(byte(tracev2.EvCPUSample))
w.varint(timestamp)
w.varint(mpid)
w.varint(ppid)
w.varint(goid)
w.varint(stackID)
trace.cpuBuf[gen%2] = w.traceBuf
}
return !eof
}
// traceCPUFlush flushes trace.cpuBuf[gen%2]. The caller must be certain that gen
// has completed and that there are no more writers to it.
func traceCPUFlush(gen uintptr) {
// Flush any remaining trace buffers containing CPU samples.
if buf := trace.cpuBuf[gen%2]; buf != nil {
systemstack(func() {
lock(&trace.lock)
traceBufFlush(buf, gen)
unlock(&trace.lock)
trace.cpuBuf[gen%2] = nil
})
}
}
// traceCPUSample writes a CPU profile sample stack to the execution tracer's
// profiling buffer. It is called from a signal handler, so is limited in what
// it can do. mp must be the thread that is currently stopped in a signal.
func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) {
if !traceEnabled() {
// Tracing is usually turned off; don't spend time acquiring the signal
// lock unless it's active.
return
}
if mp == nil {
// Drop samples that don't have an identifiable thread. We can't render
// this in any useful way anyway.
return
}
// We're going to conditionally write to one of two buffers based on the
// generation. To make sure we write to the correct one, we need to make
// sure this thread's trace write flag is set. If it already is, then we're
// in the tracer and we can just take advantage of that. If it isn't, then
// we need to acquire it and read the generation.
locked := false
if !mp.trace.writing.Load() {
mp.trace.writing.Store(true)
locked = true
}
gen := trace.gen.Load()
if gen == 0 {
// Tracing is disabled, as it turns out. Clear the write flag if necessary
// and exit.
if locked {
mp.trace.writing.Store(false)
}
return
}
now := traceClockNow()
// The "header" here is the ID of the M that was running the profiled code,
// followed by the IDs of the P and goroutine. (For normal CPU profiling, it's
// usually the number of samples with the given stack.) Near syscalls, pp
// may be nil. Reporting goid of 0 is fine for either g0 or a nil gp.
var hdr [3]uint64
if pp != nil {
// Overflow records in profBuf have all header values set to zero. Make
// sure that real headers have at least one bit set.
hdr[0] = uint64(pp.id)<<1 | 0b1
} else {
hdr[0] = 0b10
}
if gp != nil {
hdr[1] = gp.goid
}
hdr[2] = mp.procid
// Allow only one writer at a time
for !trace.signalLock.CompareAndSwap(0, 1) {
// TODO: Is it safe to osyield here? https://go.dev/issue/52672
osyield()
}
if log := trace.cpuLogWrite[gen%2].Load(); log != nil {
// Note: we don't pass a tag pointer here (how should profiling tags
// interact with the execution tracer?), but if we did we'd need to be
// careful about write barriers. See the long comment in profBuf.write.
log.write(nil, int64(now), hdr[:], stk)
}
trace.signalLock.Store(0)
// Clear the write flag if we set it earlier.
if locked {
mp.trace.writing.Store(false)
}
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Trace event writing API for trace2runtime.go.
package runtime
import (
"internal/abi"
"internal/runtime/sys"
"internal/trace/tracev2"
)
// traceArg is a simple wrapper type to help ensure that arguments passed
// to traces are well-formed.
type traceArg uint64
// traceEventWriter is the high-level API for writing trace events.
//
// See the comment on traceWriter about style for more details as to why
// this type and its methods are structured the way they are.
type traceEventWriter struct {
tl traceLocker
}
// eventWriter creates a new traceEventWriter. It is the main entrypoint for writing trace events.
//
// Before creating the event writer, this method will emit a status for the current goroutine
// or proc if it exists, and if it hasn't had its status emitted yet. goStatus and procStatus indicate
// what the status of goroutine or P should be immediately *before* the events that are about to
// be written using the eventWriter (if they exist). No status will be written if there's no active
// goroutine or P.
//
// Callers can elect to pass a constant value here if the status is clear (e.g. a goroutine must have
// been Runnable before a GoStart). Otherwise, callers can query the status of either the goroutine
// or P and pass the appropriate status.
//
// In this case, the default status should be tracev2.GoBad or tracev2.ProcBad to help identify bugs sooner.
func (tl traceLocker) eventWriter(goStatus tracev2.GoStatus, procStatus tracev2.ProcStatus) traceEventWriter {
if pp := tl.mp.p.ptr(); pp != nil && !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) {
tl.writer().writeProcStatus(uint64(pp.id), procStatus, pp.trace.inSweep).end()
}
if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) {
tl.writer().writeGoStatus(gp.goid, int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end()
}
return tl.rawEventWriter()
}
// rawEventWriter creates a new traceEventWriter without emitting any status events.
//
// It is the caller's responsibility to emit any status events, if necessary.
func (tl traceLocker) rawEventWriter() traceEventWriter {
return traceEventWriter{tl}
}
// event writes out a trace event.
func (e traceEventWriter) event(ev tracev2.EventType, args ...traceArg) {
e.tl.writer().event(ev, args...).end()
}
// stack takes a stack trace skipping the provided number of frames.
// It then returns a traceArg representing that stack which may be
// passed to write.
func (tl traceLocker) stack(skip int) traceArg {
return traceArg(traceStack(skip, nil, &trace.stackTab[tl.gen%2]))
}
// startPC takes a start PC for a goroutine and produces a unique
// stack ID for it.
//
// It then returns a traceArg representing that stack which may be
// passed to write.
func (tl traceLocker) startPC(pc uintptr) traceArg {
// +PCQuantum because makeTraceFrame expects return PCs and subtracts PCQuantum.
return traceArg(trace.stackTab[tl.gen%2].put([]uintptr{
logicalStackSentinel,
startPCForTrace(pc) + sys.PCQuantum,
}))
}
// string returns a traceArg representing s which may be passed to write.
// The string is assumed to be relatively short and popular, so it may be
// stored for a while in the string dictionary.
func (tl traceLocker) string(s string) traceArg {
return traceArg(trace.stringTab[tl.gen%2].put(tl.gen, s))
}
// uniqueString returns a traceArg representing s which may be passed to write.
// The string is assumed to be unique or long, so it will be written out to
// the trace eagerly.
func (tl traceLocker) uniqueString(s string) traceArg {
return traceArg(trace.stringTab[tl.gen%2].emit(tl.gen, s))
}
// rtype returns a traceArg representing typ which may be passed to write.
func (tl traceLocker) rtype(typ *abi.Type) traceArg {
return traceArg(trace.typeTab[tl.gen%2].put(typ))
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Simple append-only thread-safe hash map for tracing.
// Provides a mapping between variable-length data and a
// unique ID. Subsequent puts of the same data will return
// the same ID. The zero value is ready to use.
//
// Uses a region-based allocation scheme internally, and
// reset clears the whole map.
//
// It avoids doing any high-level Go operations so it's safe
// to use even in sensitive contexts.
package runtime
import (
"internal/cpu"
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
// traceMap is a map of a variable-sized array of bytes to a unique ID.
//
// Because traceMap just operates on raw bytes, this type is used as the
// backing store for both the trace string table and trace stack table,
// the latter of which is just an array of PCs.
//
// ID 0 is reserved for arrays of bytes of size zero.
type traceMap struct {
root atomic.UnsafePointer // *traceMapNode (can't use generics because it's notinheap)
_ cpu.CacheLinePad
seq atomic.Uint64
_ cpu.CacheLinePad
mem traceRegionAlloc
}
// traceMapNode is an implementation of a lock-free append-only hash-trie
// (a trie of the hash bits).
//
// Key features:
// - 4-ary trie. Child nodes are indexed by the upper 2 (remaining) bits of the hash.
// For example, top level uses bits [63:62], next level uses [61:60] and so on.
// - New nodes are placed at the first empty level encountered.
// - When the first child is added to a node, the existing value is not moved into a child.
// This means that you must check the key at each level, not just at the leaf.
// - No deletion or rebalancing.
// - Intentionally devolves into a linked list on hash collisions (the hash bits will all
// get shifted out during iteration, and new nodes will just be appended to the 0th child).
type traceMapNode struct {
_ sys.NotInHeap
children [4]atomic.UnsafePointer // *traceMapNode (can't use generics because it's notinheap)
hash uintptr
id uint64
data []byte
}
// stealID steals an ID from the table, ensuring that it will not
// appear in the table anymore.
func (tab *traceMap) stealID() uint64 {
return tab.seq.Add(1)
}
// put inserts the data into the table.
//
// It's always safe for callers to noescape data because put copies its bytes.
//
// Returns a unique ID for the data and whether this is the first time
// the data has been added to the map.
func (tab *traceMap) put(data unsafe.Pointer, size uintptr) (uint64, bool) {
if size == 0 {
return 0, false
}
hash := memhash(data, 0, size)
var newNode *traceMapNode
m := &tab.root
hashIter := hash
for {
n := (*traceMapNode)(m.Load())
if n == nil {
// Try to insert a new map node. We may end up discarding
// this node if we fail to insert because it turns out the
// value is already in the map.
//
// The discard will only happen if two threads race on inserting
// the same value. Both might create nodes, but only one will
// succeed on insertion. If two threads race to insert two
// different values, then both nodes will *always* get inserted,
// because the equality checking below will always fail.
//
// Performance note: contention on insertion is likely to be
// higher for small maps, but since this data structure is
// append-only, either the map stays small because there isn't
// much activity, or the map gets big and races to insert on
// the same node are much less likely.
if newNode == nil {
newNode = tab.newTraceMapNode(data, size, hash, tab.seq.Add(1))
}
if m.CompareAndSwapNoWB(nil, unsafe.Pointer(newNode)) {
return newNode.id, true
}
// Reload n. Because pointers are only stored once,
// we must have lost the race, and therefore n is not nil
// anymore.
n = (*traceMapNode)(m.Load())
}
if n.hash == hash && uintptr(len(n.data)) == size {
if memequal(unsafe.Pointer(&n.data[0]), data, size) {
return n.id, false
}
}
m = &n.children[hashIter>>(8*goarch.PtrSize-2)]
hashIter <<= 2
}
}
func (tab *traceMap) newTraceMapNode(data unsafe.Pointer, size, hash uintptr, id uint64) *traceMapNode {
// Create data array.
sl := notInHeapSlice{
array: tab.mem.alloc(size),
len: int(size),
cap: int(size),
}
memmove(unsafe.Pointer(sl.array), data, size)
// Create metadata structure.
meta := (*traceMapNode)(unsafe.Pointer(tab.mem.alloc(unsafe.Sizeof(traceMapNode{}))))
*(*notInHeapSlice)(unsafe.Pointer(&meta.data)) = sl
meta.id = id
meta.hash = hash
return meta
}
// reset drops all allocated memory from the table and resets it.
//
// The caller must ensure that there are no put operations executing concurrently
// with this function.
func (tab *traceMap) reset() {
tab.root.Store(nil)
tab.seq.Store(0)
tab.mem.drop()
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Simple not-in-heap bump-pointer traceRegion allocator.
package runtime
import (
"internal/runtime/atomic"
"internal/runtime/sys"
"unsafe"
)
// traceRegionAlloc is a thread-safe region allocator.
// It holds a linked list of traceRegionAllocBlock.
type traceRegionAlloc struct {
lock mutex
dropping atomic.Bool // For checking invariants.
current atomic.UnsafePointer // *traceRegionAllocBlock
full *traceRegionAllocBlock
}
// traceRegionAllocBlock is a block in traceRegionAlloc.
//
// traceRegionAllocBlock is allocated from non-GC'd memory, so it must not
// contain heap pointers. Writes to pointers to traceRegionAllocBlocks do
// not need write barriers.
type traceRegionAllocBlock struct {
_ sys.NotInHeap
traceRegionAllocBlockHeader
data [traceRegionAllocBlockData]byte
}
type traceRegionAllocBlockHeader struct {
next *traceRegionAllocBlock
off atomic.Uintptr
}
const traceRegionAllocBlockData = 64<<10 - unsafe.Sizeof(traceRegionAllocBlockHeader{})
// alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform.
func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap {
n = alignUp(n, 8)
if n > traceRegionAllocBlockData {
throw("traceRegion: alloc too large")
}
if a.dropping.Load() {
throw("traceRegion: alloc with concurrent drop")
}
// Try to bump-pointer allocate into the current block.
block := (*traceRegionAllocBlock)(a.current.Load())
if block != nil {
r := block.off.Add(n)
if r <= uintptr(len(block.data)) {
return (*notInHeap)(unsafe.Pointer(&block.data[r-n]))
}
}
// Try to install a new block.
var x *notInHeap
systemstack(func() {
// Acquire a.lock on the systemstack to avoid stack growth
// and accidentally entering the tracer again.
lock(&a.lock)
// Check block again under the lock. Someone may
// have gotten here first.
block = (*traceRegionAllocBlock)(a.current.Load())
if block != nil {
r := block.off.Add(n)
if r <= uintptr(len(block.data)) {
unlock(&a.lock)
x = (*notInHeap)(unsafe.Pointer(&block.data[r-n]))
return
}
// Add the existing block to the full list.
block.next = a.full
a.full = block
}
// Allocate a new block.
block = (*traceRegionAllocBlock)(sysAlloc(unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys, "trace arena alloc"))
if block == nil {
throw("traceRegion: out of memory")
}
// Allocate space for our current request, so we always make
// progress.
block.off.Store(n)
x = (*notInHeap)(unsafe.Pointer(&block.data[0]))
// Publish the new block. No write barrier as the memory is off heap.
a.current.StoreNoWB(unsafe.Pointer(block))
unlock(&a.lock)
})
return x
}
// drop frees all previously allocated memory and resets the allocator.
//
// drop is not safe to call concurrently with other calls to drop or with calls to alloc. The caller
// must ensure that it is not possible for anything else to be using the same structure.
func (a *traceRegionAlloc) drop() {
a.dropping.Store(true)
for a.full != nil {
block := a.full
a.full = block.next
sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
}
if current := a.current.Load(); current != nil {
sysFree(current, unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
a.current.StoreNoWB(nil)
}
a.dropping.Store(false)
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Runtime -> tracer API.
package runtime
import (
"internal/runtime/atomic"
"internal/trace/tracev2"
_ "unsafe" // for go:linkname
)
// gTraceState is per-G state for the tracer.
type gTraceState struct {
traceSchedResourceState
}
// reset resets the gTraceState for a new goroutine.
func (s *gTraceState) reset() {
s.seq = [2]uint64{}
// N.B. s.statusTraced is managed and cleared separately.
}
// mTraceState is per-M state for the tracer.
type mTraceState struct {
writing atomic.Bool // flag indicating that this M is writing to a trace buffer.
buf [2][tracev2.NumExperiments]*traceBuf // Per-M traceBuf for writing. Indexed by trace.gen%2.
link *m // Snapshot of alllink or freelink.
reentered uint32 // Whether we've reentered tracing from within tracing.
entryGen uintptr // The generation value on first entry.
oldthrowsplit bool // gp.throwsplit upon calling traceLocker.writer. For debugging.
}
// pTraceState is per-P state for the tracer.
type pTraceState struct {
traceSchedResourceState
// mSyscallID is the ID of the M this was bound to before entering a syscall.
mSyscallID int64
// maySweep indicates the sweep events should be traced.
// This is used to defer the sweep start event until a span
// has actually been swept.
maySweep bool
// inSweep indicates that at least one sweep event has been traced.
inSweep bool
// swept and reclaimed track the number of bytes swept and reclaimed
// by sweeping in the current sweep loop (while maySweep was true).
swept, reclaimed uintptr
}
// traceLockInit initializes global trace locks.
func traceLockInit() {
// Sharing a lock rank here is fine because they should never be accessed
// together. If they are, we want to find out immediately.
lockInit(&trace.stringTab[0].lock, lockRankTraceStrings)
lockInit(&trace.stringTab[0].tab.mem.lock, lockRankTraceStrings)
lockInit(&trace.stringTab[1].lock, lockRankTraceStrings)
lockInit(&trace.stringTab[1].tab.mem.lock, lockRankTraceStrings)
lockInit(&trace.stackTab[0].tab.mem.lock, lockRankTraceStackTab)
lockInit(&trace.stackTab[1].tab.mem.lock, lockRankTraceStackTab)
lockInit(&trace.typeTab[0].tab.mem.lock, lockRankTraceTypeTab)
lockInit(&trace.typeTab[1].tab.mem.lock, lockRankTraceTypeTab)
lockInit(&trace.lock, lockRankTrace)
}
// lockRankMayTraceFlush records the lock ranking effects of a
// potential call to traceFlush.
//
// nosplit because traceAcquire is nosplit.
//
//go:nosplit
func lockRankMayTraceFlush() {
lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock))
}
// traceBlockReason is an enumeration of reasons a goroutine might block.
// This is the interface the rest of the runtime uses to tell the
// tracer why a goroutine blocked. The tracer then propagates this information
// into the trace however it sees fit.
//
// Note that traceBlockReasons should not be compared, since reasons that are
// distinct by name may *not* be distinct by value.
type traceBlockReason uint8
const (
traceBlockGeneric traceBlockReason = iota
traceBlockForever
traceBlockNet
traceBlockSelect
traceBlockCondWait
traceBlockSync
traceBlockChanSend
traceBlockChanRecv
traceBlockGCMarkAssist
traceBlockGCSweep
traceBlockSystemGoroutine
traceBlockPreempted
traceBlockDebugCall
traceBlockUntilGCEnds
traceBlockSleep
traceBlockGCWeakToStrongWait
traceBlockSynctest
)
var traceBlockReasonStrings = [...]string{
traceBlockGeneric: "unspecified",
traceBlockForever: "forever",
traceBlockNet: "network",
traceBlockSelect: "select",
traceBlockCondWait: "sync.(*Cond).Wait",
traceBlockSync: "sync",
traceBlockChanSend: "chan send",
traceBlockChanRecv: "chan receive",
traceBlockGCMarkAssist: "GC mark assist wait for work",
traceBlockGCSweep: "GC background sweeper wait",
traceBlockSystemGoroutine: "system goroutine wait",
traceBlockPreempted: "preempted",
traceBlockDebugCall: "wait for debug call",
traceBlockUntilGCEnds: "wait until GC ends",
traceBlockSleep: "sleep",
traceBlockGCWeakToStrongWait: "GC weak to strong wait",
traceBlockSynctest: "synctest",
}
// traceGoStopReason is an enumeration of reasons a goroutine might yield.
//
// Note that traceGoStopReasons should not be compared, since reasons that are
// distinct by name may *not* be distinct by value.
type traceGoStopReason uint8
const (
traceGoStopGeneric traceGoStopReason = iota
traceGoStopGoSched
traceGoStopPreempted
)
var traceGoStopReasonStrings = [...]string{
traceGoStopGeneric: "unspecified",
traceGoStopGoSched: "runtime.Gosched",
traceGoStopPreempted: "preempted",
}
// traceEnabled returns true if the trace is currently enabled.
//
//go:nosplit
func traceEnabled() bool {
return trace.enabled
}
// traceAllocFreeEnabled returns true if the trace is currently enabled
// and alloc/free events are also enabled.
//
//go:nosplit
func traceAllocFreeEnabled() bool {
return trace.enabledWithAllocFree
}
// traceShuttingDown returns true if the trace is currently shutting down.
func traceShuttingDown() bool {
return trace.shutdown.Load()
}
// traceLocker represents an M writing trace events. While a traceLocker value
// is valid, the tracer observes all operations on the G/M/P or trace events being
// written as happening atomically.
type traceLocker struct {
mp *m
gen uintptr
}
// debugTraceReentrancy checks if the trace is reentrant.
//
// This is optional because throwing in a function makes it instantly
// not inlineable, and we want traceAcquire to be inlineable for
// low overhead when the trace is disabled.
const debugTraceReentrancy = false
// traceAcquire prepares this M for writing one or more trace events.
//
// nosplit because it's called on the syscall path when stack movement is forbidden.
//
//go:nosplit
func traceAcquire() traceLocker {
if !traceEnabled() {
return traceLocker{}
}
return traceAcquireEnabled()
}
// traceAcquireEnabled is the traceEnabled path for traceAcquire. It's explicitly
// broken out to make traceAcquire inlineable to keep the overhead of the tracer
// when it's disabled low.
//
// nosplit because it's called by traceAcquire, which is nosplit.
//
//go:nosplit
func traceAcquireEnabled() traceLocker {
// Any time we acquire a traceLocker, we may flush a trace buffer. But
// buffer flushes are rare. Record the lock edge even if it doesn't happen
// this time.
lockRankMayTraceFlush()
// Prevent preemption.
mp := acquirem()
// Check if we're already tracing. It's safe to be reentrant in general,
// because this function (and the invariants of traceLocker.writer) ensure
// that it is.
if mp.trace.writing.Load() {
mp.trace.reentered++
return traceLocker{mp, mp.trace.entryGen}
}
// Set the write flag. This prevents traceAdvance from moving forward
// until all Ms are observed to be outside of a write critical section.
//
// Note: The write flag is mutated here and also in traceCPUSample. If you update
// usage of the write flag here, make sure to also look at what traceCPUSample is
// doing.
mp.trace.writing.Store(true)
// N.B. This load of gen appears redundant with the one in traceEnabled.
// However, it's very important that the gen we use for writing to the trace
// is acquired under a traceLocker so traceAdvance can make sure no stale
// gen values are being used.
//
// Because we're doing this load again, it also means that the trace
// might end up being disabled when we load it. In that case we need to undo
// what we did and bail.
gen := trace.gen.Load()
if gen == 0 {
mp.trace.writing.Store(false)
releasem(mp)
return traceLocker{}
}
mp.trace.entryGen = gen
return traceLocker{mp, gen}
}
// ok returns true if the traceLocker is valid (i.e. tracing is enabled).
//
// nosplit because it's called on the syscall path when stack movement is forbidden.
//
//go:nosplit
func (tl traceLocker) ok() bool {
return tl.gen != 0
}
// traceRelease indicates that this M is done writing trace events.
//
// nosplit because it's called on the syscall path when stack movement is forbidden.
//
//go:nosplit
func traceRelease(tl traceLocker) {
if tl.mp.trace.reentered > 0 {
tl.mp.trace.reentered--
} else {
tl.mp.trace.writing.Store(false)
}
releasem(tl.mp)
}
// traceExitingSyscall marks a goroutine as exiting the syscall slow path.
//
// Must be paired with a traceExitedSyscall call.
func traceExitingSyscall() {
trace.exitingSyscall.Add(1)
}
// traceExitedSyscall marks a goroutine as having exited the syscall slow path.
func traceExitedSyscall() {
trace.exitingSyscall.Add(-1)
}
// Gomaxprocs emits a ProcsChange event.
func (tl traceLocker) Gomaxprocs(procs int32) {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvProcsChange, traceArg(procs), tl.stack(1))
}
// ProcStart traces a ProcStart event.
//
// Must be called with a valid P.
func (tl traceLocker) ProcStart() {
pp := tl.mp.p.ptr()
// Procs are typically started within the scheduler when there is no user goroutine. If there is a user goroutine,
// it must be in _Gsyscall because the only time a goroutine is allowed to have its Proc moved around from under it
// is during a syscall.
tl.eventWriter(tracev2.GoSyscall, tracev2.ProcIdle).event(tracev2.EvProcStart, traceArg(pp.id), pp.trace.nextSeq(tl.gen))
}
// ProcStop traces a ProcStop event.
func (tl traceLocker) ProcStop(pp *p) {
// The only time a goroutine is allowed to have its Proc moved around
// from under it is during a syscall.
tl.eventWriter(tracev2.GoSyscall, tracev2.ProcRunning).event(tracev2.EvProcStop)
}
// GCActive traces a GCActive event.
//
// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
// easily and only depends on where it's currently called.
func (tl traceLocker) GCActive() {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGCActive, traceArg(trace.seqGC))
// N.B. Only one GC can be running at a time, so this is naturally
// serialized by the caller.
trace.seqGC++
}
// GCStart traces a GCBegin event.
//
// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
// easily and only depends on where it's currently called.
func (tl traceLocker) GCStart() {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGCBegin, traceArg(trace.seqGC), tl.stack(3))
// N.B. Only one GC can be running at a time, so this is naturally
// serialized by the caller.
trace.seqGC++
}
// GCDone traces a GCEnd event.
//
// Must be emitted by an actively running goroutine on an active P. This restriction can be changed
// easily and only depends on where it's currently called.
func (tl traceLocker) GCDone() {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGCEnd, traceArg(trace.seqGC))
// N.B. Only one GC can be running at a time, so this is naturally
// serialized by the caller.
trace.seqGC++
}
// STWStart traces a STWBegin event.
func (tl traceLocker) STWStart(reason stwReason) {
// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvSTWBegin, tl.string(reason.String()), tl.stack(2))
}
// STWDone traces a STWEnd event.
func (tl traceLocker) STWDone() {
// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvSTWEnd)
}
// GCSweepStart prepares to trace a sweep loop. This does not
// emit any events until traceGCSweepSpan is called.
//
// GCSweepStart must be paired with traceGCSweepDone and there
// must be no preemption points between these two calls.
//
// Must be called with a valid P.
func (tl traceLocker) GCSweepStart() {
// Delay the actual GCSweepBegin event until the first span
// sweep. If we don't sweep anything, don't emit any events.
pp := tl.mp.p.ptr()
if pp.trace.maySweep {
throw("double traceGCSweepStart")
}
pp.trace.maySweep, pp.trace.swept, pp.trace.reclaimed = true, 0, 0
}
// GCSweepSpan traces the sweep of a single span. If this is
// the first span swept since traceGCSweepStart was called, this
// will emit a GCSweepBegin event.
//
// This may be called outside a traceGCSweepStart/traceGCSweepDone
// pair; however, it will not emit any trace events in this case.
//
// Must be called with a valid P.
func (tl traceLocker) GCSweepSpan(bytesSwept uintptr) {
pp := tl.mp.p.ptr()
if pp.trace.maySweep {
if pp.trace.swept == 0 {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGCSweepBegin, tl.stack(1))
pp.trace.inSweep = true
}
pp.trace.swept += bytesSwept
}
}
// GCSweepDone finishes tracing a sweep loop. If any memory was
// swept (i.e. traceGCSweepSpan emitted an event) then this will emit
// a GCSweepEnd event.
//
// Must be called with a valid P.
func (tl traceLocker) GCSweepDone() {
pp := tl.mp.p.ptr()
if !pp.trace.maySweep {
throw("missing traceGCSweepStart")
}
if pp.trace.inSweep {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGCSweepEnd, traceArg(pp.trace.swept), traceArg(pp.trace.reclaimed))
pp.trace.inSweep = false
}
pp.trace.maySweep = false
}
// GCMarkAssistStart emits a MarkAssistBegin event.
func (tl traceLocker) GCMarkAssistStart() {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGCMarkAssistBegin, tl.stack(1))
}
// GCMarkAssistDone emits a MarkAssistEnd event.
func (tl traceLocker) GCMarkAssistDone() {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGCMarkAssistEnd)
}
// GoCreate emits a GoCreate event.
func (tl traceLocker) GoCreate(newg *g, pc uintptr, blocked bool) {
newg.trace.setStatusTraced(tl.gen)
ev := tracev2.EvGoCreate
if blocked {
ev = tracev2.EvGoCreateBlocked
}
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(ev, traceArg(newg.goid), tl.startPC(pc), tl.stack(2))
}
// GoStart emits a GoStart event.
//
// Must be called with a valid P.
func (tl traceLocker) GoStart() {
gp := getg().m.curg
pp := gp.m.p
w := tl.eventWriter(tracev2.GoRunnable, tracev2.ProcRunning)
w.event(tracev2.EvGoStart, traceArg(gp.goid), gp.trace.nextSeq(tl.gen))
if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
w.event(tracev2.EvGoLabel, trace.markWorkerLabels[tl.gen%2][pp.ptr().gcMarkWorkerMode])
}
}
// GoEnd emits a GoDestroy event.
//
// TODO(mknyszek): Rename this to GoDestroy.
func (tl traceLocker) GoEnd() {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoDestroy)
}
// GoSched emits a GoStop event with a GoSched reason.
func (tl traceLocker) GoSched() {
tl.GoStop(traceGoStopGoSched)
}
// GoPreempt emits a GoStop event with a GoPreempted reason.
func (tl traceLocker) GoPreempt() {
tl.GoStop(traceGoStopPreempted)
}
// GoStop emits a GoStop event with the provided reason.
func (tl traceLocker) GoStop(reason traceGoStopReason) {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, trace.goStopReasons[tl.gen%2][reason], tl.stack(0))
}
// GoPark emits a GoBlock event with the provided reason.
//
// TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
// that we have both, and waitReason is way more descriptive.
func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoBlock, trace.goBlockReasons[tl.gen%2][reason], tl.stack(skip))
}
// GoUnpark emits a GoUnblock event.
func (tl traceLocker) GoUnpark(gp *g, skip int) {
// Emit a GoWaiting status if necessary for the unblocked goroutine.
tl.emitUnblockStatus(gp, tl.gen)
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoUnblock, traceArg(gp.goid), gp.trace.nextSeq(tl.gen), tl.stack(skip))
}
// GoSwitch emits a GoSwitch event. If destroy is true, the calling goroutine
// is simultaneously being destroyed.
func (tl traceLocker) GoSwitch(nextg *g, destroy bool) {
// Emit a GoWaiting status if necessary for the unblocked goroutine.
tl.emitUnblockStatus(nextg, tl.gen)
w := tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning)
ev := tracev2.EvGoSwitch
if destroy {
ev = tracev2.EvGoSwitchDestroy
}
w.event(ev, traceArg(nextg.goid), nextg.trace.nextSeq(tl.gen))
}
// emitUnblockStatus emits a GoStatus GoWaiting event for a goroutine about to be
// unblocked to the trace writer.
func (tl traceLocker) emitUnblockStatus(gp *g, gen uintptr) {
if !gp.trace.statusWasTraced(gen) && gp.trace.acquireStatus(gen) {
// TODO(go.dev/issue/65634): Although it would be nice to add a stack trace here of gp,
// we cannot safely do so. gp is in _Gwaiting and so we don't have ownership of its stack.
// We can fix this by acquiring the goroutine's scan bit.
tl.writer().writeGoStatus(gp.goid, -1, tracev2.GoWaiting, gp.inMarkAssist, 0).end()
}
}
// GoSysCall emits a GoSyscallBegin event.
//
// Must be called with a valid P.
func (tl traceLocker) GoSysCall() {
// Scribble down the M that the P is currently attached to.
pp := tl.mp.p.ptr()
pp.trace.mSyscallID = int64(tl.mp.procid)
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoSyscallBegin, pp.trace.nextSeq(tl.gen), tl.stack(1))
}
// GoSysExit emits a GoSyscallEnd event, possibly along with a GoSyscallBlocked event
// if lostP is true.
//
// lostP must be true in all cases that a goroutine loses its P during a syscall.
// This means it's not sufficient to check if it has no P. In particular, it needs to be
// true in the following cases:
// - The goroutine lost its P, it ran some other code, and then got it back. It's now running with that P.
// - The goroutine lost its P and was unable to reacquire it, and is now running without a P.
// - The goroutine lost its P and acquired a different one, and is now running with that P.
func (tl traceLocker) GoSysExit(lostP bool) {
ev := tracev2.EvGoSyscallEnd
procStatus := tracev2.ProcSyscall // Procs implicitly enter tracev2.ProcSyscall on GoSyscallBegin.
if lostP {
ev = tracev2.EvGoSyscallEndBlocked
procStatus = tracev2.ProcRunning // If a G has a P when emitting this event, it reacquired a P and is indeed running.
} else {
tl.mp.p.ptr().trace.mSyscallID = -1
}
tl.eventWriter(tracev2.GoSyscall, procStatus).event(ev)
}
// ProcSteal indicates that our current M stole a P from another M.
//
// The caller must have ownership of pp.
func (tl traceLocker) ProcSteal(pp *p) {
// Grab the M ID we stole from.
mStolenFrom := pp.trace.mSyscallID
pp.trace.mSyscallID = -1
// Emit the status of the P we're stealing. We may be just about to do this when creating the event
// writer but it's not guaranteed, even if we're stealing from a syscall. Although it might seem like
// from a syscall context we're always stealing a P for ourselves, we may have not wired it up yet (so
// it wouldn't be visible to eventWriter) or we may not even intend to wire it up to ourselves
// at all and plan to hand it back to the runtime.
if !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) {
// Careful: don't use the event writer. We never want status or in-progress events
// to trigger more in-progress events.
tl.writer().writeProcStatus(uint64(pp.id), tracev2.ProcSyscallAbandoned, pp.trace.inSweep).end()
}
// The status of the proc and goroutine, if we need to emit one here, is not evident from the
// context of just emitting this event alone. There are two cases. Either we're trying to steal
// the P just to get its attention (e.g. STW or sysmon retake) or we're trying to steal a P for
// ourselves specifically to keep running. The two contexts look different, but can be summarized
// fairly succinctly. In the former, we're a regular running goroutine and proc, if we have either.
// In the latter, we're a goroutine in a syscall.
goStatus := tracev2.GoRunning
procStatus := tracev2.ProcRunning
if tl.mp.curg != nil && tl.mp.curg.syscallsp != 0 {
goStatus = tracev2.GoSyscall
procStatus = tracev2.ProcSyscallAbandoned
}
tl.eventWriter(goStatus, procStatus).event(tracev2.EvProcSteal, traceArg(pp.id), pp.trace.nextSeq(tl.gen), traceArg(mStolenFrom))
}
// HeapAlloc emits a HeapAlloc event.
func (tl traceLocker) HeapAlloc(live uint64) {
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvHeapAlloc, traceArg(live))
}
// HeapGoal reads the current heap goal and emits a HeapGoal event.
func (tl traceLocker) HeapGoal() {
heapGoal := gcController.heapGoal()
// The heapGoal calculations will result in strange numbers if the GC if off. See go.dev/issue/63864.
// Check gcPercent before using the heapGoal in the trace.
if heapGoal == ^uint64(0) || gcController.gcPercent.Load() < 0 {
// Heap-based triggering is disabled.
heapGoal = 0
}
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvHeapGoal, traceArg(heapGoal))
}
// GoCreateSyscall indicates that a goroutine has transitioned from dead to GoSyscall.
//
// Unlike GoCreate, the caller must be running on gp.
//
// This occurs when C code calls into Go. On pthread platforms it occurs only when
// a C thread calls into Go code for the first time.
func (tl traceLocker) GoCreateSyscall(gp *g) {
// N.B. We should never trace a status for this goroutine (which we're currently running on),
// since we want this to appear like goroutine creation.
gp.trace.setStatusTraced(tl.gen)
// We might have a P left over on the thread from the last cgo callback,
// but in a syscall context, it is NOT ours. Act as if we do not have a P,
// and don't record a status.
tl.rawEventWriter().event(tracev2.EvGoCreateSyscall, traceArg(gp.goid))
}
// GoDestroySyscall indicates that a goroutine has transitioned from GoSyscall to dead.
//
// This occurs when Go code returns back to C. On pthread platforms it occurs only when
// the C thread is destroyed.
func (tl traceLocker) GoDestroySyscall() {
// Write the status for the goroutine if necessary.
if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) {
tl.writer().writeGoStatus(gp.goid, int64(tl.mp.procid), tracev2.GoSyscall, false, 0 /* no stack */).end()
}
// We might have a P left over on the thread from the last cgo callback,
// but in a syscall context, it is NOT ours. Act as if we do not have a P,
// and don't record a status.
tl.rawEventWriter().event(tracev2.EvGoDestroySyscall)
}
// To access runtime functions from runtime/trace.
// See runtime/trace/annotation.go
// trace_userTaskCreate emits a UserTaskCreate event.
//
//go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
func trace_userTaskCreate(id, parentID uint64, taskType string) {
tl := traceAcquire()
if !tl.ok() {
// Need to do this check because the caller won't have it.
return
}
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvUserTaskBegin, traceArg(id), traceArg(parentID), tl.string(taskType), tl.stack(3))
traceRelease(tl)
}
// trace_userTaskEnd emits a UserTaskEnd event.
//
//go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
func trace_userTaskEnd(id uint64) {
tl := traceAcquire()
if !tl.ok() {
// Need to do this check because the caller won't have it.
return
}
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvUserTaskEnd, traceArg(id), tl.stack(2))
traceRelease(tl)
}
// trace_userRegion emits a UserRegionBegin or UserRegionEnd event,
// depending on mode (0 == Begin, 1 == End).
//
// TODO(mknyszek): Just make this two functions.
//
//go:linkname trace_userRegion runtime/trace.userRegion
func trace_userRegion(id, mode uint64, name string) {
tl := traceAcquire()
if !tl.ok() {
// Need to do this check because the caller won't have it.
return
}
var ev tracev2.EventType
switch mode {
case 0:
ev = tracev2.EvUserRegionBegin
case 1:
ev = tracev2.EvUserRegionEnd
default:
return
}
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(ev, traceArg(id), tl.string(name), tl.stack(3))
traceRelease(tl)
}
// trace_userLog emits a UserRegionBegin or UserRegionEnd event.
//
//go:linkname trace_userLog runtime/trace.userLog
func trace_userLog(id uint64, category, message string) {
tl := traceAcquire()
if !tl.ok() {
// Need to do this check because the caller won't have it.
return
}
tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvUserLog, traceArg(id), tl.string(category), tl.uniqueString(message), tl.stack(3))
traceRelease(tl)
}
// traceThreadDestroy is called when a thread is removed from
// sched.freem.
//
// mp must not be able to emit trace events anymore.
//
// sched.lock must be held to synchronize with traceAdvance.
func traceThreadDestroy(mp *m) {
assertLockHeld(&sched.lock)
// Flush all outstanding buffers to maintain the invariant
// that an M only has active buffers while on sched.freem
// or allm.
//
// Perform a traceAcquire/traceRelease on behalf of mp to
// synchronize with the tracer trying to flush our buffer
// as well.
if debugTraceReentrancy && mp.trace.writing.Load() {
throw("bad use of trace.writing")
}
mp.trace.writing.Store(true)
systemstack(func() {
lock(&trace.lock)
for i := range mp.trace.buf {
for exp, buf := range mp.trace.buf[i] {
if buf != nil {
// N.B. traceBufFlush accepts a generation, but it
// really just cares about gen%2.
traceBufFlush(buf, uintptr(i))
mp.trace.buf[i][exp] = nil
}
}
}
unlock(&trace.lock)
})
mp.trace.writing.Store(false)
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Trace stack table and acquisition.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/trace/tracev2"
"unsafe"
)
const (
// logicalStackSentinel is a sentinel value at pcBuf[0] signifying that
// pcBuf[1:] holds a logical stack requiring no further processing. Any other
// value at pcBuf[0] represents a skip value to apply to the physical stack in
// pcBuf[1:] after inline expansion.
logicalStackSentinel = ^uintptr(0)
)
// traceStack captures a stack trace from a goroutine and registers it in the trace
// stack table. It then returns its unique ID. If gp == nil, then traceStack will
// attempt to use the current execution context.
//
// skip controls the number of leaf frames to omit in order to hide tracer internals
// from stack traces, see CL 5523.
//
// Avoid calling this function directly. Prefer traceEventWriter.stack.
func traceStack(skip int, gp *g, tab *traceStackTable) uint64 {
pcBuf := getg().m.profStack
// Figure out gp and mp for the backtrace.
var mp *m
if gp == nil {
mp = getg().m
gp = mp.curg
}
// Double-check that we own the stack we're about to trace.
if debug.traceCheckStackOwnership != 0 && gp != nil {
status := readgstatus(gp)
// If the scan bit is set, assume we're the ones that acquired it.
if status&_Gscan == 0 {
// Use the trace status to check this. There are a number of cases
// where a running goroutine might be in _Gwaiting, and these cases
// are totally fine for taking a stack trace. They're captured
// correctly in goStatusToTraceGoStatus.
switch goStatusToTraceGoStatus(status, gp.waitreason) {
case tracev2.GoRunning, tracev2.GoSyscall:
if getg() == gp || mp.curg == gp {
break
}
fallthrough
default:
print("runtime: gp=", unsafe.Pointer(gp), " gp.goid=", gp.goid, " status=", gStatusStrings[status], "\n")
throw("attempted to trace stack of a goroutine this thread does not own")
}
}
}
if gp != nil && mp == nil {
// We're getting the backtrace for a G that's not currently executing.
// It may still have an M, if it's locked to some M.
mp = gp.lockedm.ptr()
}
nstk := 1
if tracefpunwindoff() || (mp != nil && mp.hasCgoOnStack()) {
// Slow path: Unwind using default unwinder. Used when frame pointer
// unwinding is unavailable or disabled (tracefpunwindoff), or might
// produce incomplete results or crashes (hasCgoOnStack). Note that no
// cgo callback related crashes have been observed yet. The main
// motivation is to take advantage of a potentially registered cgo
// symbolizer.
pcBuf[0] = logicalStackSentinel
if getg() == gp {
nstk += callers(skip+1, pcBuf[1:])
} else if gp != nil {
nstk += gcallers(gp, skip, pcBuf[1:])
}
} else {
// Fast path: Unwind using frame pointers.
pcBuf[0] = uintptr(skip)
if getg() == gp {
nstk += fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf[1:])
} else if gp != nil {
// Three cases:
//
// (1) We're called on the g0 stack through mcall(fn) or systemstack(fn). To
// behave like gcallers above, we start unwinding from sched.bp, which
// points to the caller frame of the leaf frame on g's stack. The return
// address of the leaf frame is stored in sched.pc, which we manually
// capture here.
//
// (2) We're called against a gp that we're not currently executing on, but that isn't
// in a syscall, in which case it's currently not executing. gp.sched contains the most
// up-to-date information about where it stopped, and like case (1), we match gcallers
// here.
//
// (3) We're called against a gp that we're not currently executing on, but that is in
// a syscall, in which case gp.syscallsp != 0. gp.syscall* contains the most up-to-date
// information about where it stopped, and like case (1), we match gcallers here.
if gp.syscallsp != 0 {
pcBuf[1] = gp.syscallpc
nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.syscallbp), pcBuf[2:])
} else {
pcBuf[1] = gp.sched.pc
if gp.syncSafePoint {
// We're stopped in morestack, which is an odd state because gp.sched.bp
// refers to our parent frame, since we haven't had the chance to push our
// frame pointer to the stack yet. If we just start walking from gp.sched.bp,
// we'll skip a frame as a result. Luckily, we can find the PC we want right
// at gp.sched.sp on non-LR platforms, and we have it directly on LR platforms.
// See issue go.dev/issue/68090.
if usesLR {
pcBuf[2] = gp.sched.lr
} else {
pcBuf[2] = *(*uintptr)(unsafe.Pointer(gp.sched.sp))
}
nstk += 2 + fpTracebackPCs(unsafe.Pointer(gp.sched.bp), pcBuf[3:])
} else {
nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.sched.bp), pcBuf[2:])
}
}
}
}
if nstk > 0 {
nstk-- // skip runtime.goexit
}
if nstk > 0 && gp.goid == 1 {
nstk-- // skip runtime.main
}
id := tab.put(pcBuf[:nstk])
return id
}
// traceStackTable maps stack traces (arrays of PC's) to unique IDs.
//
// ID 0 is reserved for a zero-length stack.
type traceStackTable struct {
tab traceMap
}
// put returns a unique id for the stack trace pcs and caches it in the table,
// if it sees the trace for the first time.
func (t *traceStackTable) put(pcs []uintptr) uint64 {
// Even though put will handle this for us, taking the address of pcs forces a bounds check
// that will fail if len(pcs) == 0.
if len(pcs) == 0 {
return 0 // ID 0 is reserved for zero-length stacks.
}
id, _ := t.tab.put(noescape(unsafe.Pointer(&pcs[0])), uintptr(len(pcs))*unsafe.Sizeof(uintptr(0)))
return id
}
// dump writes all previously cached stacks to trace buffers,
// releases all memory and resets state. It must only be called once the caller
// can guarantee that there are no more writers to the table.
func (t *traceStackTable) dump(gen uintptr) {
stackBuf := make([]uintptr, tracev2.MaxFramesPerStack)
w := unsafeTraceWriter(gen, nil)
if root := (*traceMapNode)(t.tab.root.Load()); root != nil {
w = dumpStacksRec(root, w, stackBuf)
}
w.flush().end()
t.tab.reset()
}
func dumpStacksRec(node *traceMapNode, w traceWriter, stackBuf []uintptr) traceWriter {
stack := unsafe.Slice((*uintptr)(unsafe.Pointer(&node.data[0])), uintptr(len(node.data))/unsafe.Sizeof(uintptr(0)))
// N.B. This might allocate, but that's OK because we're not writing to the M's buffer,
// but one we're about to create (with ensure).
n := fpunwindExpand(stackBuf, stack)
frames := makeTraceFrames(w.gen, stackBuf[:n])
// The maximum number of bytes required to hold the encoded stack, given that
// it contains N frames.
maxBytes := 1 + (2+4*len(frames))*traceBytesPerNumber
// Estimate the size of this record. This
// bound is pretty loose, but avoids counting
// lots of varint sizes.
//
// Add 1 because we might also write tracev2.EvStacks.
var flushed bool
w, flushed = w.ensure(1 + maxBytes)
if flushed {
w.byte(byte(tracev2.EvStacks))
}
// Emit stack event.
w.byte(byte(tracev2.EvStack))
w.varint(node.id)
w.varint(uint64(len(frames)))
for _, frame := range frames {
w.varint(uint64(frame.PC))
w.varint(frame.funcID)
w.varint(frame.fileID)
w.varint(frame.line)
}
// Recursively walk all child nodes.
for i := range node.children {
child := node.children[i].Load()
if child == nil {
continue
}
w = dumpStacksRec((*traceMapNode)(child), w, stackBuf)
}
return w
}
// makeTraceFrames returns the frames corresponding to pcs. It may
// allocate and may emit trace events.
func makeTraceFrames(gen uintptr, pcs []uintptr) []traceFrame {
frames := make([]traceFrame, 0, len(pcs))
ci := CallersFrames(pcs)
for {
f, more := ci.Next()
frames = append(frames, makeTraceFrame(gen, f))
if !more {
return frames
}
}
}
type traceFrame struct {
PC uintptr
funcID uint64
fileID uint64
line uint64
}
// makeTraceFrame sets up a traceFrame for a frame.
func makeTraceFrame(gen uintptr, f Frame) traceFrame {
var frame traceFrame
frame.PC = f.PC
fn := f.Function
const maxLen = 1 << 10
if len(fn) > maxLen {
fn = fn[len(fn)-maxLen:]
}
frame.funcID = trace.stringTab[gen%2].put(gen, fn)
frame.line = uint64(f.Line)
file := f.File
if len(file) > maxLen {
file = file[len(file)-maxLen:]
}
frame.fileID = trace.stringTab[gen%2].put(gen, file)
return frame
}
// tracefpunwindoff returns true if frame pointer unwinding for the tracer is
// disabled via GODEBUG or not supported by the architecture.
func tracefpunwindoff() bool {
return debug.tracefpunwindoff != 0 || (goarch.ArchFamily != goarch.AMD64 && goarch.ArchFamily != goarch.ARM64)
}
// fpTracebackPCs populates pcBuf with the return addresses for each frame and
// returns the number of PCs written to pcBuf. The returned PCs correspond to
// "physical frames" rather than "logical frames"; that is if A is inlined into
// B, this will return a PC for only B.
func fpTracebackPCs(fp unsafe.Pointer, pcBuf []uintptr) (i int) {
for i = 0; i < len(pcBuf) && fp != nil; i++ {
// return addr sits one word above the frame pointer
pcBuf[i] = *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
// follow the frame pointer to the next one
fp = unsafe.Pointer(*(*uintptr)(fp))
}
return i
}
//go:linkname pprof_fpunwindExpand
func pprof_fpunwindExpand(dst, src []uintptr) int {
return fpunwindExpand(dst, src)
}
// fpunwindExpand expands a call stack from pcBuf into dst,
// returning the number of PCs written to dst.
// pcBuf and dst should not overlap.
//
// fpunwindExpand checks if pcBuf contains logical frames (which include inlined
// frames) or physical frames (produced by frame pointer unwinding) using a
// sentinel value in pcBuf[0]. Logical frames are simply returned without the
// sentinel. Physical frames are turned into logical frames via inline unwinding
// and by applying the skip value that's stored in pcBuf[0].
func fpunwindExpand(dst, pcBuf []uintptr) int {
if len(pcBuf) == 0 {
return 0
} else if len(pcBuf) > 0 && pcBuf[0] == logicalStackSentinel {
// pcBuf contains logical rather than inlined frames, skip has already been
// applied, just return it without the sentinel value in pcBuf[0].
return copy(dst, pcBuf[1:])
}
var (
n int
lastFuncID = abi.FuncIDNormal
skip = pcBuf[0]
// skipOrAdd skips or appends retPC to newPCBuf and returns true if more
// pcs can be added.
skipOrAdd = func(retPC uintptr) bool {
if skip > 0 {
skip--
} else if n < len(dst) {
dst[n] = retPC
n++
}
return n < len(dst)
}
)
outer:
for _, retPC := range pcBuf[1:] {
callPC := retPC - 1
fi := findfunc(callPC)
if !fi.valid() {
// There is no funcInfo if callPC belongs to a C function. In this case
// we still keep the pc, but don't attempt to expand inlined frames.
if more := skipOrAdd(retPC); !more {
break outer
}
continue
}
u, uf := newInlineUnwinder(fi, callPC)
for ; uf.valid(); uf = u.next(uf) {
sf := u.srcFunc(uf)
if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(lastFuncID) {
// ignore wrappers
} else if more := skipOrAdd(uf.pc + 1); !more {
break outer
}
lastFuncID = sf.funcID
}
}
return n
}
// startPCForTrace returns the start PC of a goroutine for tracing purposes.
// If pc is a wrapper, it returns the PC of the wrapped function. Otherwise it
// returns pc.
func startPCForTrace(pc uintptr) uintptr {
f := findfunc(pc)
if !f.valid() {
return pc // may happen for locked g in extra M since its pc is 0.
}
w := funcdata(f, abi.FUNCDATA_WrapInfo)
if w == nil {
return pc // not a wrapper
}
return f.datap.textAddr(*(*uint32)(w))
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Trace goroutine and P status management.
package runtime
import (
"internal/runtime/atomic"
"internal/trace/tracev2"
)
// writeGoStatus emits a GoStatus event as well as any active ranges on the goroutine.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (w traceWriter) writeGoStatus(goid uint64, mid int64, status tracev2.GoStatus, markAssist bool, stackID uint64) traceWriter {
// The status should never be bad. Some invariant must have been violated.
if status == tracev2.GoBad {
print("runtime: goid=", goid, "\n")
throw("attempted to trace a bad status for a goroutine")
}
// Trace the status.
if stackID == 0 {
w = w.event(tracev2.EvGoStatus, traceArg(goid), traceArg(uint64(mid)), traceArg(status))
} else {
w = w.event(tracev2.EvGoStatusStack, traceArg(goid), traceArg(uint64(mid)), traceArg(status), traceArg(stackID))
}
// Trace any special ranges that are in-progress.
if markAssist {
w = w.event(tracev2.EvGCMarkAssistActive, traceArg(goid))
}
return w
}
// writeProcStatusForP emits a ProcStatus event for the provided p based on its status.
//
// The caller must fully own pp and it must be prevented from transitioning (e.g. this can be
// called by a forEachP callback or from a STW).
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (w traceWriter) writeProcStatusForP(pp *p, inSTW bool) traceWriter {
if !pp.trace.acquireStatus(w.gen) {
return w
}
var status tracev2.ProcStatus
switch pp.status {
case _Pidle, _Pgcstop:
status = tracev2.ProcIdle
if pp.status == _Pgcstop && inSTW {
// N.B. a P that is running and currently has the world stopped will be
// in _Pgcstop, but we model it as running in the tracer.
status = tracev2.ProcRunning
}
case _Prunning:
status = tracev2.ProcRunning
// A P is considered to be in a syscall if its attached G is. Since we fully
// own P, then the goroutine isn't going to transition and we can trivially
// check if the goroutine is in a syscall. This used to be just a small problematic
// window, but this is now the default since _Psyscall no longer exists. See #64318
// for the history on why it was needed while _Psyscall still existed.
if w.mp.p.ptr() == pp && w.mp.curg != nil && readgstatus(w.mp.curg)&^_Gscan == _Gsyscall {
status = tracev2.ProcSyscall
}
default:
throw("attempt to trace invalid or unsupported P status")
}
w = w.writeProcStatus(uint64(pp.id), status, pp.trace.inSweep)
return w
}
// writeProcStatus emits a ProcStatus event with all the provided information.
//
// The caller must have taken ownership of a P's status writing, and the P must be
// prevented from transitioning.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (w traceWriter) writeProcStatus(pid uint64, status tracev2.ProcStatus, inSweep bool) traceWriter {
// The status should never be bad. Some invariant must have been violated.
if status == tracev2.ProcBad {
print("runtime: pid=", pid, "\n")
throw("attempted to trace a bad status for a proc")
}
// Trace the status.
w = w.event(tracev2.EvProcStatus, traceArg(pid), traceArg(status))
// Trace any special ranges that are in-progress.
if inSweep {
w = w.event(tracev2.EvGCSweepActive, traceArg(pid))
}
return w
}
// goStatusToTraceGoStatus translates the internal status to tracGoStatus.
//
// status must not be _Gdead or any status whose name has the suffix "_unused."
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func goStatusToTraceGoStatus(status uint32, wr waitReason) tracev2.GoStatus {
// N.B. Ignore the _Gscan bit. We don't model it in the tracer.
var tgs tracev2.GoStatus
switch status &^ _Gscan {
case _Grunnable:
tgs = tracev2.GoRunnable
case _Grunning, _Gcopystack:
tgs = tracev2.GoRunning
case _Gsyscall:
tgs = tracev2.GoSyscall
case _Gwaiting, _Gpreempted, _Gleaked:
// There are a number of cases where a G might end up in
// _Gwaiting but it's actually running in a non-preemptive
// state but needs to present itself as preempted to the
// garbage collector and traceAdvance (via suspendG). In
// these cases, we're not going to emit an event, and we
// want these goroutines to appear in the final trace as
// if they're running, not blocked.
tgs = tracev2.GoWaiting
if status == _Gwaiting && wr.isWaitingForSuspendG() {
tgs = tracev2.GoRunning
}
case _Gdead, _Gdeadextra:
throw("tried to trace dead goroutine")
default:
throw("tried to trace goroutine with invalid or unsupported status")
}
return tgs
}
// traceSchedResourceState is shared state for scheduling resources (i.e. fields common to
// both Gs and Ps).
type traceSchedResourceState struct {
// statusTraced indicates whether a status event was traced for this resource
// a particular generation.
//
// There are 3 of these because when transitioning across generations, traceAdvance
// needs to be able to reliably observe whether a status was traced for the previous
// generation, while we need to clear the value for the next generation.
statusTraced [3]atomic.Uint32
// seq is the sequence counter for this scheduling resource's events.
// The purpose of the sequence counter is to establish a partial order between
// events that don't obviously happen serially (same M) in the stream ofevents.
//
// There are two of these so that we can reset the counter on each generation.
// This saves space in the resulting trace by keeping the counter small and allows
// GoStatus and GoCreate events to omit a sequence number (implicitly 0).
seq [2]uint64
}
// acquireStatus acquires the right to emit a Status event for the scheduling resource.
//
// nosplit because it's part of writing an event for an M, which must not
// have any stack growth.
//
//go:nosplit
func (r *traceSchedResourceState) acquireStatus(gen uintptr) bool {
if !r.statusTraced[gen%3].CompareAndSwap(0, 1) {
return false
}
r.readyNextGen(gen)
return true
}
// readyNextGen readies r for the generation following gen.
func (r *traceSchedResourceState) readyNextGen(gen uintptr) {
nextGen := traceNextGen(gen)
r.seq[nextGen%2] = 0
r.statusTraced[nextGen%3].Store(0)
}
// statusWasTraced returns true if the sched resource's status was already acquired for tracing.
func (r *traceSchedResourceState) statusWasTraced(gen uintptr) bool {
return r.statusTraced[gen%3].Load() != 0
}
// setStatusTraced indicates that the resource's status was already traced, for example
// when a goroutine is created.
func (r *traceSchedResourceState) setStatusTraced(gen uintptr) {
r.statusTraced[gen%3].Store(1)
}
// nextSeq returns the next sequence number for the resource.
func (r *traceSchedResourceState) nextSeq(gen uintptr) traceArg {
r.seq[gen%2]++
return traceArg(r.seq[gen%2])
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Trace string management.
package runtime
import "internal/trace/tracev2"
// Trace strings.
// traceStringTable is map of string -> unique ID that also manages
// writing strings out into the trace.
//
// ID 0 is reserved for the empty string.
type traceStringTable struct {
// lock protects buf.
lock mutex
buf *traceBuf // string batches to write out to the trace.
// tab is a mapping of string -> unique ID.
tab traceMap
}
// put adds a string to the table, emits it, and returns a unique ID for it.
func (t *traceStringTable) put(gen uintptr, s string) uint64 {
// Truncate the string now to avoid wasting space in the
// traceMap and to stay within traceRegionAlloc's block size limit.
if len(s) > tracev2.MaxEventTrailerDataSize {
s = s[:tracev2.MaxEventTrailerDataSize]
}
// Put the string in the table.
ss := stringStructOf(&s)
id, added := t.tab.put(ss.str, uintptr(ss.len))
if added {
// Write the string to the buffer.
systemstack(func() {
t.writeString(gen, id, s)
})
}
return id
}
// emit emits a string and creates an ID for it, but doesn't add it to the table. Returns the ID.
func (t *traceStringTable) emit(gen uintptr, s string) uint64 {
if len(s) == 0 {
return 0 // Empty strings are implicitly assigned ID 0 already.
}
// Grab an ID and write the string to the buffer.
id := t.tab.stealID()
systemstack(func() {
t.writeString(gen, id, s)
})
return id
}
// writeString writes the string to t.buf.
//
// Must run on the systemstack because it acquires t.lock.
//
//go:systemstack
func (t *traceStringTable) writeString(gen uintptr, id uint64, s string) {
// Truncate the string if necessary.
if len(s) > tracev2.MaxEventTrailerDataSize {
s = s[:tracev2.MaxEventTrailerDataSize]
}
lock(&t.lock)
w := unsafeTraceWriter(gen, t.buf)
// Ensure we have a place to write to.
var flushed bool
w, flushed = w.ensure(2 + 2*traceBytesPerNumber + len(s) /* tracev2.EvStrings + tracev2.EvString + ID + len + string data */)
if flushed {
// Annotate the batch as containing strings.
w.byte(byte(tracev2.EvStrings))
}
// Write out the string.
w.byte(byte(tracev2.EvString))
w.varint(id)
w.varint(uint64(len(s)))
w.stringData(s)
// Store back buf in case it was updated during ensure.
t.buf = w.traceBuf
unlock(&t.lock)
}
// reset clears the string table and flushes any buffers it has.
//
// Must be called only once the caller is certain nothing else will be
// added to this table.
func (t *traceStringTable) reset(gen uintptr) {
if t.buf != nil {
systemstack(func() {
lock(&trace.lock)
traceBufFlush(t.buf, gen)
unlock(&trace.lock)
})
t.buf = nil
}
// Reset the table.
t.tab.reset()
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Trace time and clock.
package runtime
import (
"internal/goarch"
"internal/trace/tracev2"
_ "unsafe"
)
// Timestamps in trace are produced through either nanotime or cputicks
// and divided by traceTimeDiv. nanotime is used everywhere except on
// platforms where osHasLowResClock is true, because the system clock
// isn't granular enough to get useful information out of a trace in
// many cases.
//
// This makes absolute values of timestamp diffs smaller, and so they are
// encoded in fewer bytes.
//
// The target resolution in all cases is 64 nanoseconds.
// This is based on the fact that fundamentally the execution tracer won't emit
// events more frequently than roughly every 200 ns or so, because that's roughly
// how long it takes to call through the scheduler.
// We could be more aggressive and bump this up to 128 ns while still getting
// useful data, but the extra bit doesn't save us that much and the headroom is
// nice to have.
//
// Hitting this target resolution is easy in the nanotime case: just pick a
// division of 64. In the cputicks case it's a bit more complex.
//
// For x86, on a 3 GHz machine, we'd want to divide by 3*64 to hit our target.
// To keep the division operation efficient, we round that up to 4*64, or 256.
// Given what cputicks represents, we use this on all other platforms except
// for PowerPC.
// The suggested increment frequency for PowerPC's time base register is
// 512 MHz according to Power ISA v2.07 section 6.2, so we use 32 on ppc64
// and ppc64le.
const traceTimeDiv = (1-osHasLowResClockInt)*64 + osHasLowResClockInt*(256-224*(goarch.IsPpc64|goarch.IsPpc64le))
// traceTime represents a timestamp for the trace.
type traceTime uint64
// traceClockNow returns a monotonic timestamp. The clock this function gets
// the timestamp from is specific to tracing, and shouldn't be mixed with other
// clock sources.
//
// nosplit because it's called from exitsyscall and various trace writing functions,
// which are nosplit.
//
// traceClockNow is called by runtime/trace and golang.org/x/exp/trace using linkname.
//
//go:linkname traceClockNow
//go:nosplit
func traceClockNow() traceTime {
if osHasLowResClock {
return traceTime(cputicks() / traceTimeDiv)
}
return traceTime(nanotime() / traceTimeDiv)
}
// traceClockUnitsPerSecond estimates the number of trace clock units per
// second that elapse.
//
//go:linkname traceClockUnitsPerSecond runtime/trace.runtime_traceClockUnitsPerSecond
func traceClockUnitsPerSecond() uint64 {
if osHasLowResClock {
// We're using cputicks as our clock, so we need a real estimate.
return uint64(ticksPerSecond() / traceTimeDiv)
}
// Our clock is nanotime, so it's just the constant time division.
// (trace clock units / nanoseconds) * (1e9 nanoseconds / 1 second)
return uint64(1.0 / float64(traceTimeDiv) * 1e9)
}
func traceSyncBatch(gen uintptr, frequency uint64) {
w := unsafeTraceWriter(gen, nil)
// Ensure we have a place to write to.
w, _ = w.ensure(3 /* EvSync + EvFrequency + EvClockSnapshot */ + 5*traceBytesPerNumber /* frequency, timestamp, mono, sec, nsec */)
// Write out the sync batch event.
w.byte(byte(tracev2.EvSync))
// Write out the frequency event.
w.byte(byte(tracev2.EvFrequency))
w.varint(frequency)
// Write out the clock snapshot event.
sec, nsec, mono := time_now()
ts := traceClockNow()
if ts <= w.traceBuf.lastTime {
ts = w.traceBuf.lastTime + 1
}
tsDiff := uint64(ts - w.traceBuf.lastTime)
w.traceBuf.lastTime = ts
w.byte(byte(tracev2.EvClockSnapshot))
w.varint(tsDiff)
w.varint(uint64(mono))
w.varint(uint64(sec))
w.varint(uint64(nsec))
// Immediately flush the buffer.
systemstack(func() {
lock(&trace.lock)
traceBufFlush(w.traceBuf, gen)
unlock(&trace.lock)
})
}
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Trace type table.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/trace/tracev2"
"unsafe"
)
// traceTypeTable maps types to unique uint32 ids.
// It is lock-free for reading.
type traceTypeTable struct {
tab traceMap
}
// put returns a unique id for the type typ and caches it in the table,
// if it's seeing it for the first time.
//
// N.B. typ must be kept alive forever for this to work correctly.
func (t *traceTypeTable) put(typ *abi.Type) uint64 {
if typ == nil {
return 0
}
// Insert the pointer to the type itself.
id, _ := t.tab.put(noescape(unsafe.Pointer(&typ)), goarch.PtrSize)
return id
}
// dump writes all previously cached types to trace buffers and
// releases all memory and resets state. It must only be called once the caller
// can guarantee that there are no more writers to the table.
func (t *traceTypeTable) dump(gen uintptr) {
w := unsafeTraceExpWriter(gen, nil, tracev2.AllocFree)
if root := (*traceMapNode)(t.tab.root.Load()); root != nil {
w = dumpTypesRec(root, w)
}
w.flush().end()
t.tab.reset()
}
func dumpTypesRec(node *traceMapNode, w traceWriter) traceWriter {
typ := (*abi.Type)(*(*unsafe.Pointer)(unsafe.Pointer(&node.data[0])))
typName := toRType(typ).string()
// The maximum number of bytes required to hold the encoded type.
maxBytes := 1 + 5*traceBytesPerNumber + len(typName)
// Estimate the size of this record. This
// bound is pretty loose, but avoids counting
// lots of varint sizes.
//
// Add 1 because we might also write a traceAllocFreeTypesBatch byte.
var flushed bool
w, flushed = w.ensure(1 + maxBytes)
if flushed {
// Annotate the batch as containing types.
w.byte(byte(traceAllocFreeTypesBatch))
}
// Emit type.
w.varint(node.id)
w.varint(uint64(uintptr(unsafe.Pointer(typ))))
w.varint(uint64(typ.Size()))
w.varint(uint64(typ.PtrBytes))
w.varint(uint64(len(typName)))
w.stringData(typName)
// Recursively walk all child nodes.
for i := range node.children {
child := node.children[i].Load()
if child == nil {
continue
}
w = dumpTypesRec((*traceMapNode)(child), w)
}
return w
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Runtime type representation.
package runtime
import (
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
"unsafe"
)
//go:linkname maps_typeString internal/runtime/maps.typeString
func maps_typeString(typ *abi.Type) string {
return toRType(typ).string()
}
type nameOff = abi.NameOff
type typeOff = abi.TypeOff
type textOff = abi.TextOff
type _type = abi.Type
// rtype is a wrapper that allows us to define additional methods.
type rtype struct {
*abi.Type // embedding is okay here (unlike reflect) because none of this is public
}
func (t rtype) string() string {
s := t.nameOff(t.Str).Name()
if t.TFlag&abi.TFlagExtraStar != 0 {
return s[1:]
}
return s
}
func (t rtype) uncommon() *uncommontype {
return t.Uncommon()
}
func (t rtype) name() string {
if t.TFlag&abi.TFlagNamed == 0 {
return ""
}
s := t.string()
i := len(s) - 1
sqBrackets := 0
for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
switch s[i] {
case ']':
sqBrackets++
case '[':
sqBrackets--
}
i--
}
return s[i+1:]
}
// pkgpath returns the path of the package where t was defined, if
// available. This is not the same as the reflect package's PkgPath
// method, in that it returns the package path for struct and interface
// types, not just named types.
func (t rtype) pkgpath() string {
if u := t.uncommon(); u != nil {
return t.nameOff(u.PkgPath).Name()
}
switch t.Kind() {
case abi.Struct:
st := (*structtype)(unsafe.Pointer(t.Type))
return st.PkgPath.Name()
case abi.Interface:
it := (*interfacetype)(unsafe.Pointer(t.Type))
return it.PkgPath.Name()
}
return ""
}
// getGCMask returns the pointer/nonpointer bitmask for type t.
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func getGCMask(t *_type) *byte {
if t.TFlag&abi.TFlagGCMaskOnDemand != 0 {
// Split the rest into getGCMaskOnDemand so getGCMask itself is inlineable.
return getGCMaskOnDemand(t)
}
return t.GCData
}
// inProgress is a byte whose address is a sentinel indicating that
// some thread is currently building the GC bitmask for a type.
var inProgress byte
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nosplit
func getGCMaskOnDemand(t *_type) *byte {
// For large types, GCData doesn't point directly to a bitmask.
// Instead it points to a pointer to a bitmask, and the runtime
// is responsible for (on first use) creating the bitmask and
// storing a pointer to it in that slot.
// TODO: we could use &t.GCData as the slot, but types are
// in read-only memory currently.
addr := unsafe.Pointer(t.GCData)
if GOOS == "aix" {
addr = add(addr, firstmoduledata.data-aixStaticDataBase)
}
for {
p := (*byte)(atomic.Loadp(addr))
switch p {
default: // Already built.
return p
case &inProgress: // Someone else is currently building it.
// Just wait until the builder is done.
// We can't block here, so spinning while having
// the OS thread yield is about the best we can do.
osyield()
continue
case nil: // Not built yet.
// Attempt to get exclusive access to build it.
if !atomic.Casp1((*unsafe.Pointer)(addr), nil, unsafe.Pointer(&inProgress)) {
continue
}
// Build gcmask for this type.
bytes := goarch.PtrSize * divRoundUp(t.PtrBytes/goarch.PtrSize, 8*goarch.PtrSize)
p = (*byte)(persistentalloc(bytes, goarch.PtrSize, &memstats.other_sys))
systemstack(func() {
buildGCMask(t, bitCursor{ptr: p, n: 0})
})
// Store the newly-built gcmask for future callers.
atomic.StorepNoWB(addr, unsafe.Pointer(p))
return p
}
}
}
// A bitCursor is a simple cursor to memory to which we
// can write a set of bits.
type bitCursor struct {
ptr *byte // base of region
n uintptr // cursor points to bit n of region
}
// Write to b cnt bits starting at bit 0 of data.
// Requires cnt>0.
func (b bitCursor) write(data *byte, cnt uintptr) {
// Starting byte for writing.
p := addb(b.ptr, b.n/8)
// Note: if we're starting halfway through a byte, we load the
// existing lower bits so we don't clobber them.
n := b.n % 8 // # of valid bits in buf
buf := uintptr(*p) & (1<<n - 1) // buffered bits to start
// Work 8 bits at a time.
for cnt > 8 {
// Read 8 more bits, now buf has 8-15 valid bits in it.
buf |= uintptr(*data) << n
n += 8
data = addb(data, 1)
cnt -= 8
// Write 8 of the buffered bits out.
*p = byte(buf)
buf >>= 8
n -= 8
p = addb(p, 1)
}
// Read remaining bits.
buf |= (uintptr(*data) & (1<<cnt - 1)) << n
n += cnt
// Flush remaining bits.
if n > 8 {
*p = byte(buf)
buf >>= 8
n -= 8
p = addb(p, 1)
}
*p &^= 1<<n - 1
*p |= byte(buf)
}
func (b bitCursor) offset(cnt uintptr) bitCursor {
return bitCursor{ptr: b.ptr, n: b.n + cnt}
}
// buildGCMask writes the ptr/nonptr bitmap for t to dst.
// t must have a pointer.
func buildGCMask(t *_type, dst bitCursor) {
// Note: we want to avoid a situation where buildGCMask gets into a
// very deep recursion, because M stacks are fixed size and pretty small
// (16KB). We do that by ensuring that any recursive
// call operates on a type at most half the size of its parent.
// Thus, the recursive chain can be at most 64 calls deep (on a
// 64-bit machine).
// Recursion is avoided by using a "tail call" (jumping to the
// "top" label) for any recursive call with a large subtype.
top:
if t.PtrBytes == 0 {
throw("pointerless type")
}
if t.TFlag&abi.TFlagGCMaskOnDemand == 0 {
// copy t.GCData to dst
dst.write(t.GCData, t.PtrBytes/goarch.PtrSize)
return
}
// The above case should handle all kinds except
// possibly arrays and structs.
switch t.Kind() {
case abi.Array:
a := t.ArrayType()
if a.Len == 1 {
// Avoid recursive call for element type that
// isn't smaller than the parent type.
t = a.Elem
goto top
}
e := a.Elem
for i := uintptr(0); i < a.Len; i++ {
buildGCMask(e, dst)
dst = dst.offset(e.Size_ / goarch.PtrSize)
}
case abi.Struct:
s := t.StructType()
var bigField abi.StructField
for _, f := range s.Fields {
ft := f.Typ
if !ft.Pointers() {
continue
}
if ft.Size_ > t.Size_/2 {
// Avoid recursive call for field type that
// is larger than half of the parent type.
// There can be only one.
bigField = f
continue
}
buildGCMask(ft, dst.offset(f.Offset/goarch.PtrSize))
}
if bigField.Typ != nil {
// Note: this case causes bits to be written out of order.
t = bigField.Typ
dst = dst.offset(bigField.Offset / goarch.PtrSize)
goto top
}
default:
throw("unexpected kind")
}
}
// reflectOffs holds type offsets defined at run time by the reflect package.
//
// When a type is defined at run time, its *rtype data lives on the heap.
// There are a wide range of possible addresses the heap may use, that
// may not be representable as a 32-bit offset. Moreover the GC may
// one day start moving heap memory, in which case there is no stable
// offset that can be defined.
//
// To provide stable offsets, we add pin *rtype objects in a global map
// and treat the offset as an identifier. We use negative offsets that
// do not overlap with any compile-time module offsets.
//
// Entries are created by reflect.addReflectOff.
var reflectOffs struct {
lock mutex
next int32
m map[int32]unsafe.Pointer
minv map[unsafe.Pointer]int32
}
func reflectOffsLock() {
lock(&reflectOffs.lock)
if raceenabled {
raceacquire(unsafe.Pointer(&reflectOffs.lock))
}
}
func reflectOffsUnlock() {
if raceenabled {
racerelease(unsafe.Pointer(&reflectOffs.lock))
}
unlock(&reflectOffs.lock)
}
func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name {
if off == 0 {
return name{}
}
base := uintptr(ptrInModule)
for md := &firstmoduledata; md != nil; md = md.next {
if base >= md.types && base < md.etypes {
res := md.types + uintptr(off)
if res > md.etypes {
println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
throw("runtime: name offset out of range")
}
return name{Bytes: (*byte)(unsafe.Pointer(res))}
}
}
// No module found. see if it is a run time name.
reflectOffsLock()
res, found := reflectOffs.m[int32(off)]
reflectOffsUnlock()
if !found {
println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:")
for next := &firstmoduledata; next != nil; next = next.next {
println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
}
throw("runtime: name offset base pointer out of range")
}
return name{Bytes: (*byte)(res)}
}
func (t rtype) nameOff(off nameOff) name {
return resolveNameOff(unsafe.Pointer(t.Type), off)
}
func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type {
if off == 0 || off == -1 {
// -1 is the sentinel value for unreachable code.
// See cmd/link/internal/ld/data.go:relocsym.
return nil
}
base := uintptr(ptrInModule)
var md *moduledata
for next := &firstmoduledata; next != nil; next = next.next {
if base >= next.types && base < next.etypes {
md = next
break
}
}
if md == nil {
reflectOffsLock()
res := reflectOffs.m[int32(off)]
reflectOffsUnlock()
if res == nil {
println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
for next := &firstmoduledata; next != nil; next = next.next {
println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
}
throw("runtime: type offset base pointer out of range")
}
return (*_type)(res)
}
res := md.types + uintptr(off)
resType := (*_type)(unsafe.Pointer(res))
if t := md.typemap[resType]; t != nil {
return t
}
if res > md.etypes {
println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
throw("runtime: type offset out of range")
}
return resType
}
func (t rtype) typeOff(off typeOff) *_type {
return resolveTypeOff(unsafe.Pointer(t.Type), off)
}
func (t rtype) textOff(off textOff) unsafe.Pointer {
if off == -1 {
// -1 is the sentinel value for unreachable code.
// See cmd/link/internal/ld/data.go:relocsym.
return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
}
base := uintptr(unsafe.Pointer(t.Type))
var md *moduledata
for next := &firstmoduledata; next != nil; next = next.next {
if base >= next.types && base < next.etypes {
md = next
break
}
}
if md == nil {
reflectOffsLock()
res := reflectOffs.m[int32(off)]
reflectOffsUnlock()
if res == nil {
println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
for next := &firstmoduledata; next != nil; next = next.next {
println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
}
throw("runtime: text offset base pointer out of range")
}
return res
}
res := md.textAddr(uint32(off))
return unsafe.Pointer(res)
}
type uncommontype = abi.UncommonType
type interfacetype = abi.InterfaceType
type arraytype = abi.ArrayType
type chantype = abi.ChanType
type slicetype = abi.SliceType
type functype = abi.FuncType
type ptrtype = abi.PtrType
type name = abi.Name
type structtype = abi.StructType
func pkgPath(n name) string {
if n.Bytes == nil || *n.Data(0)&(1<<2) == 0 {
return ""
}
i, l := n.ReadVarint(1)
off := 1 + i + l
if *n.Data(0)&(1<<1) != 0 {
i2, l2 := n.ReadVarint(off)
off += i2 + l2
}
var nameOff nameOff
copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.Data(off)))[:])
pkgPathName := resolveNameOff(unsafe.Pointer(n.Bytes), nameOff)
return pkgPathName.Name()
}
// typelinksinit scans the types from extra modules and builds the
// moduledata typemap used to de-duplicate type pointers.
func typelinksinit() {
lockInit(&moduleToTypelinksLock, lockRankTypelinks)
if firstmoduledata.next == nil {
return
}
modules := activeModules()
prev := modules[0]
prevTypelinks := moduleTypelinks(modules[0])
typehash := make(map[uint32][]*_type, len(prevTypelinks))
for _, md := range modules[1:] {
// Collect types from the previous module into typehash.
collect:
for _, tl := range prevTypelinks {
t := tl
if prev.typemap != nil {
t = prev.typemap[tl]
}
// Add to typehash if not seen before.
tlist := typehash[t.Hash]
for _, tcur := range tlist {
if tcur == t {
continue collect
}
}
typehash[t.Hash] = append(tlist, t)
}
mdTypelinks := moduleTypelinks(md)
if md.typemap == nil {
// If any of this module's typelinks match a type from a
// prior module, prefer that prior type by adding the offset
// to this module's typemap.
tm := make(map[*_type]*_type, len(mdTypelinks))
pinnedTypemaps = append(pinnedTypemaps, tm)
md.typemap = tm
for _, t := range mdTypelinks {
set := t
for _, candidate := range typehash[t.Hash] {
seen := map[_typePair]struct{}{}
if typesEqual(t, candidate, seen) {
set = candidate
break
}
}
md.typemap[t] = set
}
}
prev = md
prevTypelinks = mdTypelinks
}
}
// moduleToTypelinks maps from moduledata to typelinks.
// We build this lazily as needed, since most programs do not need it.
var (
moduleToTypelinks map[*moduledata][]*_type
moduleToTypelinksLock mutex
)
// moduleTypelinks takes a moduledata and returns the type
// descriptors that the reflect package needs to know about.
// These are the typelinks. They are the types that the user
// can construct. This is used to ensure that we use a unique
// type descriptor for all types. The returned types are sorted
// by type string; the sorting is done by the linker.
// This slice is constructed as needed.
func moduleTypelinks(md *moduledata) []*_type {
lock(&moduleToTypelinksLock)
if raceenabled {
raceacquire(unsafe.Pointer(&moduleToTypelinksLock))
}
if typelinks, ok := moduleToTypelinks[md]; ok {
if raceenabled {
racerelease(unsafe.Pointer(&moduleToTypelinksLock))
}
unlock(&moduleToTypelinksLock)
return typelinks
}
// Allocate a very rough estimate of the number of types.
ret := make([]*_type, 0, md.typedesclen/(2*unsafe.Sizeof(_type{})))
td := md.types
// We have to increment by the pointer size to match the
// increment in cmd/link/internal/data.go createRelroSect
// in allocateDataSections.
//
// The linker doesn't do that increment when runtime.types
// has a non-zero size, but in that case the runtime.types
// symbol itself pushes the other symbols forward.
// So either way this increment is correct.
td += goarch.PtrSize
etypedesc := md.types + md.typedesclen
for td < etypedesc {
td = alignUp(td, goarch.PtrSize)
typ := (*_type)(unsafe.Pointer(td))
ret = append(ret, typ)
td += uintptr(typ.DescriptorSize())
}
if moduleToTypelinks == nil {
moduleToTypelinks = make(map[*moduledata][]*_type)
}
moduleToTypelinks[md] = ret
if raceenabled {
racerelease(unsafe.Pointer(&moduleToTypelinksLock))
}
unlock(&moduleToTypelinksLock)
return ret
}
type _typePair struct {
t1 *_type
t2 *_type
}
func toRType(t *abi.Type) rtype {
return rtype{t}
}
// typesEqual reports whether two types are equal.
//
// Everywhere in the runtime and reflect packages, it is assumed that
// there is exactly one *_type per Go type, so that pointer equality
// can be used to test if types are equal. There is one place that
// breaks this assumption: buildmode=shared. In this case a type can
// appear as two different pieces of memory. This is hidden from the
// runtime and reflect package by the per-module typemap built in
// typelinksinit. It uses typesEqual to map types from later modules
// back into earlier ones.
//
// Only typelinksinit needs this function.
func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
tp := _typePair{t, v}
if _, ok := seen[tp]; ok {
return true
}
// mark these types as seen, and thus equivalent which prevents an infinite loop if
// the two types are identical, but recursively defined and loaded from
// different modules
seen[tp] = struct{}{}
if t == v {
return true
}
kind := t.Kind()
if kind != v.Kind() {
return false
}
rt, rv := toRType(t), toRType(v)
if rt.string() != rv.string() {
return false
}
ut := t.Uncommon()
uv := v.Uncommon()
if ut != nil || uv != nil {
if ut == nil || uv == nil {
return false
}
pkgpatht := rt.nameOff(ut.PkgPath).Name()
pkgpathv := rv.nameOff(uv.PkgPath).Name()
if pkgpatht != pkgpathv {
return false
}
}
if abi.Bool <= kind && kind <= abi.Complex128 {
return true
}
switch kind {
case abi.String, abi.UnsafePointer:
return true
case abi.Array:
at := (*arraytype)(unsafe.Pointer(t))
av := (*arraytype)(unsafe.Pointer(v))
return typesEqual(at.Elem, av.Elem, seen) && at.Len == av.Len
case abi.Chan:
ct := (*chantype)(unsafe.Pointer(t))
cv := (*chantype)(unsafe.Pointer(v))
return ct.Dir == cv.Dir && typesEqual(ct.Elem, cv.Elem, seen)
case abi.Func:
ft := (*functype)(unsafe.Pointer(t))
fv := (*functype)(unsafe.Pointer(v))
if ft.OutCount != fv.OutCount || ft.InCount != fv.InCount {
return false
}
tin, vin := ft.InSlice(), fv.InSlice()
for i := 0; i < len(tin); i++ {
if !typesEqual(tin[i], vin[i], seen) {
return false
}
}
tout, vout := ft.OutSlice(), fv.OutSlice()
for i := 0; i < len(tout); i++ {
if !typesEqual(tout[i], vout[i], seen) {
return false
}
}
return true
case abi.Interface:
it := (*interfacetype)(unsafe.Pointer(t))
iv := (*interfacetype)(unsafe.Pointer(v))
if it.PkgPath.Name() != iv.PkgPath.Name() {
return false
}
if len(it.Methods) != len(iv.Methods) {
return false
}
for i := range it.Methods {
tm := &it.Methods[i]
vm := &iv.Methods[i]
// Note the mhdr array can be relocated from
// another module. See #17724.
tname := resolveNameOff(unsafe.Pointer(tm), tm.Name)
vname := resolveNameOff(unsafe.Pointer(vm), vm.Name)
if tname.Name() != vname.Name() {
return false
}
if pkgPath(tname) != pkgPath(vname) {
return false
}
tityp := resolveTypeOff(unsafe.Pointer(tm), tm.Typ)
vityp := resolveTypeOff(unsafe.Pointer(vm), vm.Typ)
if !typesEqual(tityp, vityp, seen) {
return false
}
}
return true
case abi.Map:
mt := (*abi.MapType)(unsafe.Pointer(t))
mv := (*abi.MapType)(unsafe.Pointer(v))
return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
case abi.Pointer:
pt := (*ptrtype)(unsafe.Pointer(t))
pv := (*ptrtype)(unsafe.Pointer(v))
return typesEqual(pt.Elem, pv.Elem, seen)
case abi.Slice:
st := (*slicetype)(unsafe.Pointer(t))
sv := (*slicetype)(unsafe.Pointer(v))
return typesEqual(st.Elem, sv.Elem, seen)
case abi.Struct:
st := (*structtype)(unsafe.Pointer(t))
sv := (*structtype)(unsafe.Pointer(v))
if len(st.Fields) != len(sv.Fields) {
return false
}
if st.PkgPath.Name() != sv.PkgPath.Name() {
return false
}
for i := range st.Fields {
tf := &st.Fields[i]
vf := &sv.Fields[i]
if tf.Name.Name() != vf.Name.Name() {
return false
}
if !typesEqual(tf.Typ, vf.Typ, seen) {
return false
}
if tf.Name.Tag() != vf.Name.Tag() {
return false
}
if tf.Offset != vf.Offset {
return false
}
if tf.Name.IsEmbedded() != vf.Name.IsEmbedded() {
return false
}
}
return true
default:
println("runtime: impossible type kind", kind)
throw("runtime: impossible type kind")
return false
}
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
)
func unsafestring(ptr unsafe.Pointer, len int) {
if len < 0 {
panicunsafestringlen()
}
if uintptr(len) > -uintptr(ptr) {
if ptr == nil {
panicunsafestringnilptr()
}
panicunsafestringlen()
}
}
// Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeString
func unsafestring64(ptr unsafe.Pointer, len64 int64) {
len := int(len64)
if int64(len) != len64 {
panicunsafestringlen()
}
unsafestring(ptr, len)
}
func unsafestringcheckptr(ptr unsafe.Pointer, len64 int64) {
unsafestring64(ptr, len64)
// Check that underlying array doesn't straddle multiple heap objects.
// unsafestring64 has already checked for overflow.
if checkptrStraddles(ptr, uintptr(len64)) {
throw("checkptr: unsafe.String result straddles multiple allocations")
}
}
func panicunsafestringlen() {
panic(errorString("unsafe.String: len out of range"))
}
func panicunsafestringnilptr() {
panic(errorString("unsafe.String: ptr is nil and len is not zero"))
}
// Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeSlice
func unsafeslice(et *_type, ptr unsafe.Pointer, len int) {
if len < 0 {
panicunsafeslicelen1(sys.GetCallerPC())
}
if et.Size_ == 0 {
if ptr == nil && len > 0 {
panicunsafeslicenilptr1(sys.GetCallerPC())
}
}
mem, overflow := math.MulUintptr(et.Size_, uintptr(len))
if overflow || mem > -uintptr(ptr) {
if ptr == nil {
panicunsafeslicenilptr1(sys.GetCallerPC())
}
panicunsafeslicelen1(sys.GetCallerPC())
}
}
// Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeSlice
func unsafeslice64(et *_type, ptr unsafe.Pointer, len64 int64) {
len := int(len64)
if int64(len) != len64 {
panicunsafeslicelen1(sys.GetCallerPC())
}
unsafeslice(et, ptr, len)
}
func unsafeslicecheckptr(et *_type, ptr unsafe.Pointer, len64 int64) {
unsafeslice64(et, ptr, len64)
// Check that underlying array doesn't straddle multiple heap objects.
// unsafeslice64 has already checked for overflow.
if checkptrStraddles(ptr, uintptr(len64)*et.Size_) {
throw("checkptr: unsafe.Slice result straddles multiple allocations")
}
}
func panicunsafeslicelen() {
// This is called only from compiler-generated code, so we can get the
// source of the panic.
panicunsafeslicelen1(sys.GetCallerPC())
}
//go:yeswritebarrierrec
func panicunsafeslicelen1(pc uintptr) {
panicCheck1(pc, "unsafe.Slice: len out of range")
panic(errorString("unsafe.Slice: len out of range"))
}
func panicunsafeslicenilptr() {
// This is called only from compiler-generated code, so we can get the
// source of the panic.
panicunsafeslicenilptr1(sys.GetCallerPC())
}
//go:yeswritebarrierrec
func panicunsafeslicenilptr1(pc uintptr) {
panicCheck1(pc, "unsafe.Slice: ptr is nil and len is not zero")
panic(errorString("unsafe.Slice: ptr is nil and len is not zero"))
}
//go:linkname reflect_unsafeslice reflect.unsafeslice
func reflect_unsafeslice(et *_type, ptr unsafe.Pointer, len int) {
unsafeslice(et, ptr, len)
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// Numbers fundamental to the encoding.
const (
runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
runeSelf = 0x80 // characters below runeSelf are represented as themselves in a single byte.
maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
)
// Code points in the surrogate range are not valid for UTF-8.
const (
surrogateMin = 0xD800
surrogateMax = 0xDFFF
)
const (
t1 = 0x00 // 0000 0000
tx = 0x80 // 1000 0000
t2 = 0xC0 // 1100 0000
t3 = 0xE0 // 1110 0000
t4 = 0xF0 // 1111 0000
t5 = 0xF8 // 1111 1000
maskx = 0x3F // 0011 1111
mask2 = 0x1F // 0001 1111
mask3 = 0x0F // 0000 1111
mask4 = 0x07 // 0000 0111
rune1Max = 1<<7 - 1
rune2Max = 1<<11 - 1
rune3Max = 1<<16 - 1
// The default lowest and highest continuation byte.
locb = 0x80 // 1000 0000
hicb = 0xBF // 1011 1111
)
// countrunes returns the number of runes in s.
func countrunes(s string) int {
n := 0
for range s {
n++
}
return n
}
// decoderune returns the non-ASCII rune at the start of
// s[k:] and the index after the rune in s.
//
// decoderune assumes that caller has checked that
// the to be decoded rune is a non-ASCII rune.
//
// If the string appears to be incomplete or decoding problems
// are encountered (runeerror, k + 1) is returned to ensure
// progress when decoderune is used to iterate over a string.
func decoderune(s string, k uint) (r rune, pos uint) {
pos = k
if k >= uint(len(s)) {
return runeError, k + 1
}
s = s[k:]
switch {
case t2 <= s[0] && s[0] < t3:
// 0080-07FF two byte sequence
if len(s) > 1 && (locb <= s[1] && s[1] <= hicb) {
r = rune(s[0]&mask2)<<6 | rune(s[1]&maskx)
pos += 2
if rune1Max < r {
return
}
}
case t3 <= s[0] && s[0] < t4:
// 0800-FFFF three byte sequence
if len(s) > 2 && (locb <= s[1] && s[1] <= hicb) && (locb <= s[2] && s[2] <= hicb) {
r = rune(s[0]&mask3)<<12 | rune(s[1]&maskx)<<6 | rune(s[2]&maskx)
pos += 3
if rune2Max < r && !(surrogateMin <= r && r <= surrogateMax) {
return
}
}
case t4 <= s[0] && s[0] < t5:
// 10000-1FFFFF four byte sequence
if len(s) > 3 && (locb <= s[1] && s[1] <= hicb) && (locb <= s[2] && s[2] <= hicb) && (locb <= s[3] && s[3] <= hicb) {
r = rune(s[0]&mask4)<<18 | rune(s[1]&maskx)<<12 | rune(s[2]&maskx)<<6 | rune(s[3]&maskx)
pos += 4
if rune3Max < r && r <= maxRune {
return
}
}
}
return runeError, k + 1
}
// encoderune writes into p (which must be large enough) the UTF-8 encoding of the rune.
// It returns the number of bytes written.
func encoderune(p []byte, r rune) int {
// Negative values are erroneous. Making it unsigned addresses the problem.
switch i := uint32(r); {
case i <= rune1Max:
p[0] = byte(r)
return 1
case i <= rune2Max:
_ = p[1] // eliminate bounds checks
p[0] = t2 | byte(r>>6)
p[1] = tx | byte(r)&maskx
return 2
case i > maxRune, surrogateMin <= i && i <= surrogateMax:
r = runeError
fallthrough
case i <= rune3Max:
_ = p[2] // eliminate bounds checks
p[0] = t3 | byte(r>>12)
p[1] = tx | byte(r>>6)&maskx
p[2] = tx | byte(r)&maskx
return 3
default:
_ = p[3] // eliminate bounds checks
p[0] = t4 | byte(r>>18)
p[1] = tx | byte(r>>12)&maskx
p[2] = tx | byte(r>>6)&maskx
p[3] = tx | byte(r)&maskx
return 4
}
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Valgrind instrumentation is only available on linux amd64 and arm64.
//go:build !valgrind || !linux || (!amd64 && !arm64)
package runtime
import "unsafe"
const valgrindenabled = false
func valgrindRegisterStack(start, end unsafe.Pointer) uintptr { return 0 }
func valgrindDeregisterStack(id uintptr) {}
func valgrindChangeStack(id uintptr, start, end unsafe.Pointer) {}
func valgrindMalloc(addr unsafe.Pointer, size uintptr) {}
func valgrindFree(addr unsafe.Pointer) {}
func valgrindCreateMempool(addr unsafe.Pointer) {}
func valgrindMempoolMalloc(pool, addr unsafe.Pointer, size uintptr) {}
func valgrindMempoolFree(pool, addr unsafe.Pointer) {}
func valgrindMakeMemUndefined(addr unsafe.Pointer, size uintptr) {}
func valgrindMakeMemDefined(addr unsafe.Pointer, size uintptr) {}
func valgrindMakeMemNoAccess(addr unsafe.Pointer, size uintptr) {}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && (386 || amd64 || arm || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x)
package runtime
import "unsafe"
// Look up symbols in the Linux vDSO.
// This code was originally based on the sample Linux vDSO parser at
// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/vDSO/parse_vdso.c
// This implements the ELF dynamic linking spec at
// http://sco.com/developers/gabi/latest/ch5.dynamic.html
// The version section is documented at
// https://refspecs.linuxfoundation.org/LSB_3.2.0/LSB-Core-generic/LSB-Core-generic/symversion.html
const (
_AT_SYSINFO_EHDR = 33
_PT_LOAD = 1 /* Loadable program segment */
_PT_DYNAMIC = 2 /* Dynamic linking information */
_DT_NULL = 0 /* Marks end of dynamic section */
_DT_HASH = 4 /* Dynamic symbol hash table */
_DT_STRTAB = 5 /* Address of string table */
_DT_SYMTAB = 6 /* Address of symbol table */
_DT_GNU_HASH = 0x6ffffef5 /* GNU-style dynamic symbol hash table */
_DT_VERSYM = 0x6ffffff0
_DT_VERDEF = 0x6ffffffc
_VER_FLG_BASE = 0x1 /* Version definition of file itself */
_SHN_UNDEF = 0 /* Undefined section */
_SHT_DYNSYM = 11 /* Dynamic linker symbol table */
_STT_FUNC = 2 /* Symbol is a code object */
_STT_NOTYPE = 0 /* Symbol type is not specified */
_STB_GLOBAL = 1 /* Global symbol */
_STB_WEAK = 2 /* Weak symbol */
_EI_NIDENT = 16
// Maximum indices for the array types used when traversing the vDSO ELF structures.
// Computed from architecture-specific max provided by vdso_linux_*.go
vdsoSymTabSize = vdsoArrayMax / unsafe.Sizeof(elfSym{})
vdsoDynSize = vdsoArrayMax / unsafe.Sizeof(elfDyn{})
vdsoSymStringsSize = vdsoArrayMax // byte
vdsoVerSymSize = vdsoArrayMax / 2 // uint16
vdsoHashSize = vdsoArrayMax / 4 // uint32
// vdsoBloomSizeScale is a scaling factor for gnuhash tables which are uint32 indexed,
// but contain uintptrs
vdsoBloomSizeScale = unsafe.Sizeof(uintptr(0)) / 4 // uint32
)
/* How to extract and insert information held in the st_info field. */
func _ELF_ST_BIND(val byte) byte { return val >> 4 }
func _ELF_ST_TYPE(val byte) byte { return val & 0xf }
type vdsoSymbolKey struct {
name string
symHash uint32
gnuHash uint32
ptr *uintptr
}
type vdsoVersionKey struct {
version string
verHash uint32
}
type vdsoInfo struct {
valid bool
/* Load information */
loadAddr uintptr
loadOffset uintptr /* loadAddr - recorded vaddr */
/* Symbol table */
symtab *[vdsoSymTabSize]elfSym
symstrings *[vdsoSymStringsSize]byte
chain []uint32
bucket []uint32
symOff uint32
isGNUHash bool
/* Version table */
versym *[vdsoVerSymSize]uint16
verdef *elfVerdef
}
var vdsoLoadStart, vdsoLoadEnd uintptr
// see vdso_linux_*.go for vdsoSymbolKeys[] and vdso*Sym vars
func vdsoInitFromSysinfoEhdr(info *vdsoInfo, hdr *elfEhdr) {
info.valid = false
info.loadAddr = uintptr(unsafe.Pointer(hdr))
pt := unsafe.Pointer(info.loadAddr + uintptr(hdr.e_phoff))
// We need two things from the segment table: the load offset
// and the dynamic table.
var foundVaddr bool
var dyn *[vdsoDynSize]elfDyn
for i := uint16(0); i < hdr.e_phnum; i++ {
pt := (*elfPhdr)(add(pt, uintptr(i)*unsafe.Sizeof(elfPhdr{})))
switch pt.p_type {
case _PT_LOAD:
if !foundVaddr {
foundVaddr = true
info.loadOffset = info.loadAddr + uintptr(pt.p_offset-pt.p_vaddr)
vdsoLoadStart = info.loadOffset
vdsoLoadEnd = info.loadOffset + uintptr(pt.p_memsz)
}
case _PT_DYNAMIC:
dyn = (*[vdsoDynSize]elfDyn)(unsafe.Pointer(info.loadAddr + uintptr(pt.p_offset)))
}
}
if !foundVaddr || dyn == nil {
return // Failed
}
// Fish out the useful bits of the dynamic table.
var hash, gnuhash *[vdsoHashSize]uint32
info.symstrings = nil
info.symtab = nil
info.versym = nil
info.verdef = nil
for i := 0; dyn[i].d_tag != _DT_NULL; i++ {
dt := &dyn[i]
p := info.loadOffset + uintptr(dt.d_val)
switch dt.d_tag {
case _DT_STRTAB:
info.symstrings = (*[vdsoSymStringsSize]byte)(unsafe.Pointer(p))
case _DT_SYMTAB:
info.symtab = (*[vdsoSymTabSize]elfSym)(unsafe.Pointer(p))
case _DT_HASH:
hash = (*[vdsoHashSize]uint32)(unsafe.Pointer(p))
case _DT_GNU_HASH:
gnuhash = (*[vdsoHashSize]uint32)(unsafe.Pointer(p))
case _DT_VERSYM:
info.versym = (*[vdsoVerSymSize]uint16)(unsafe.Pointer(p))
case _DT_VERDEF:
info.verdef = (*elfVerdef)(unsafe.Pointer(p))
}
}
if info.symstrings == nil || info.symtab == nil || (hash == nil && gnuhash == nil) {
return // Failed
}
if info.verdef == nil {
info.versym = nil
}
if gnuhash != nil {
// Parse the GNU hash table header.
nbucket := gnuhash[0]
info.symOff = gnuhash[1]
bloomSize := gnuhash[2]
info.bucket = gnuhash[4+bloomSize*uint32(vdsoBloomSizeScale):][:nbucket]
info.chain = gnuhash[4+bloomSize*uint32(vdsoBloomSizeScale)+nbucket:]
info.isGNUHash = true
} else {
// Parse the hash table header.
nbucket := hash[0]
nchain := hash[1]
info.bucket = hash[2 : 2+nbucket]
info.chain = hash[2+nbucket : 2+nbucket+nchain]
}
// That's all we need.
info.valid = true
}
func vdsoFindVersion(info *vdsoInfo, ver *vdsoVersionKey) int32 {
if !info.valid {
return 0
}
def := info.verdef
for {
if def.vd_flags&_VER_FLG_BASE == 0 {
aux := (*elfVerdaux)(add(unsafe.Pointer(def), uintptr(def.vd_aux)))
if def.vd_hash == ver.verHash && ver.version == gostringnocopy(&info.symstrings[aux.vda_name]) {
return int32(def.vd_ndx & 0x7fff)
}
}
if def.vd_next == 0 {
break
}
def = (*elfVerdef)(add(unsafe.Pointer(def), uintptr(def.vd_next)))
}
return -1 // cannot match any version
}
func vdsoParseSymbols(info *vdsoInfo, version int32) {
if !info.valid {
return
}
apply := func(symIndex uint32, k vdsoSymbolKey) bool {
sym := &info.symtab[symIndex]
typ := _ELF_ST_TYPE(sym.st_info)
bind := _ELF_ST_BIND(sym.st_info)
// On ppc64x, VDSO functions are of type _STT_NOTYPE.
if typ != _STT_FUNC && typ != _STT_NOTYPE || bind != _STB_GLOBAL && bind != _STB_WEAK || sym.st_shndx == _SHN_UNDEF {
return false
}
if k.name != gostringnocopy(&info.symstrings[sym.st_name]) {
return false
}
// Check symbol version.
if info.versym != nil && version != 0 && int32(info.versym[symIndex]&0x7fff) != version {
return false
}
*k.ptr = info.loadOffset + uintptr(sym.st_value)
return true
}
if !info.isGNUHash {
// Old-style DT_HASH table.
for _, k := range vdsoSymbolKeys {
if len(info.bucket) > 0 {
for chain := info.bucket[k.symHash%uint32(len(info.bucket))]; chain != 0; chain = info.chain[chain] {
if apply(chain, k) {
break
}
}
}
}
return
}
// New-style DT_GNU_HASH table.
for _, k := range vdsoSymbolKeys {
symIndex := info.bucket[k.gnuHash%uint32(len(info.bucket))]
if symIndex < info.symOff {
continue
}
for ; ; symIndex++ {
hash := info.chain[symIndex-info.symOff]
if hash|1 == k.gnuHash|1 {
// Found a hash match.
if apply(symIndex, k) {
break
}
}
if hash&1 != 0 {
// End of chain.
break
}
}
}
}
func vdsoauxv(tag, val uintptr) {
switch tag {
case _AT_SYSINFO_EHDR:
if val == 0 {
// Something went wrong
return
}
var info vdsoInfo
// TODO(rsc): I don't understand why the compiler thinks info escapes
// when passed to the three functions below.
info1 := (*vdsoInfo)(noescape(unsafe.Pointer(&info)))
vdsoInitFromSysinfoEhdr(info1, (*elfEhdr)(unsafe.Pointer(val)))
vdsoParseSymbols(info1, vdsoFindVersion(info1, &vdsoLinuxVersion))
}
}
// inVDSOPage reports whether PC is on the VDSO page.
//
//go:nosplit
func inVDSOPage(pc uintptr) bool {
return pc >= vdsoLoadStart && pc < vdsoLoadEnd
}
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && (amd64 || arm64 || arm64be || ppc64 || ppc64le || loong64 || s390x)
package runtime
import (
"internal/cpu"
"internal/goexperiment"
"unsafe"
)
//go:noescape
func vgetrandom1(buf *byte, length uintptr, flags uint32, state uintptr, stateSize uintptr) int
var vgetrandomAlloc struct {
states []uintptr
statesLock mutex
stateSize uintptr
mmapProt int32
mmapFlags int32
}
func vgetrandomInit() {
if vdsoGetrandomSym == 0 {
return
}
var params struct {
SizeOfOpaqueState uint32
MmapProt uint32
MmapFlags uint32
reserved [13]uint32
}
if vgetrandom1(nil, 0, 0, uintptr(unsafe.Pointer(¶ms)), ^uintptr(0)) != 0 {
return
}
vgetrandomAlloc.stateSize = uintptr(params.SizeOfOpaqueState)
vgetrandomAlloc.mmapProt = int32(params.MmapProt)
vgetrandomAlloc.mmapFlags = int32(params.MmapFlags)
lockInit(&vgetrandomAlloc.statesLock, lockRankVgetrandom)
}
func vgetrandomGetState() uintptr {
lock(&vgetrandomAlloc.statesLock)
if len(vgetrandomAlloc.states) == 0 {
num := uintptr(numCPUStartup) // Just a reasonable size hint to start.
stateSizeCacheAligned := (vgetrandomAlloc.stateSize + cpu.CacheLineSize - 1) &^ (cpu.CacheLineSize - 1)
allocSize := (num*stateSizeCacheAligned + physPageSize - 1) &^ (physPageSize - 1)
num = (physPageSize / stateSizeCacheAligned) * (allocSize / physPageSize)
p, err := mmap(nil, allocSize, vgetrandomAlloc.mmapProt, vgetrandomAlloc.mmapFlags, -1, 0)
if err != 0 {
unlock(&vgetrandomAlloc.statesLock)
return 0
}
setVMAName(p, allocSize, "getrandom states")
newBlock := uintptr(p)
if vgetrandomAlloc.states == nil {
vgetrandomAlloc.states = make([]uintptr, 0, num)
}
for i := uintptr(0); i < num; i++ {
if (newBlock&(physPageSize-1))+vgetrandomAlloc.stateSize > physPageSize {
newBlock = (newBlock + physPageSize - 1) &^ (physPageSize - 1)
}
vgetrandomAlloc.states = append(vgetrandomAlloc.states, newBlock)
newBlock += stateSizeCacheAligned
}
}
state := vgetrandomAlloc.states[len(vgetrandomAlloc.states)-1]
vgetrandomAlloc.states = vgetrandomAlloc.states[:len(vgetrandomAlloc.states)-1]
unlock(&vgetrandomAlloc.statesLock)
return state
}
// Free vgetrandom state from the M (if any) prior to destroying the M.
//
// This may allocate, so it must have a P.
func vgetrandomDestroy(mp *m) {
if mp.vgetrandomState == 0 {
return
}
lock(&vgetrandomAlloc.statesLock)
vgetrandomAlloc.states = append(vgetrandomAlloc.states, mp.vgetrandomState)
unlock(&vgetrandomAlloc.statesLock)
}
// This is exported for use in internal/syscall/unix as well as x/sys/unix.
//
//go:linkname vgetrandom
func vgetrandom(p []byte, flags uint32) (ret int, supported bool) {
if vgetrandomAlloc.stateSize == 0 {
return -1, false
}
// vDSO code may spill registers to the stack
// Make sure they're zeroed if we're running in secret mode
gp := getg()
if goexperiment.RuntimeSecret && gp.secret > 0 {
secretEraseRegisters()
}
// We use getg().m instead of acquirem() here, because always taking
// the lock is slightly more expensive than not always taking the lock.
// However, we *do* require that m doesn't migrate elsewhere during the
// execution of the vDSO. So, we exploit two details:
// 1) Asynchronous preemption is aborted when PC is in the runtime.
// 2) Most of the time, this function only calls vgetrandom1(), which
// does not have a preamble that synchronously preempts.
// We do need to take the lock when getting a new state for m, but this
// is very much the slow path, in the sense that it only ever happens
// once over the entire lifetime of an m. So, a simple getg().m suffices.
mp := getg().m
if mp.vgetrandomState == 0 {
mp.locks++
state := vgetrandomGetState()
mp.locks--
if state == 0 {
return -1, false
}
mp.vgetrandomState = state
}
return vgetrandom1(unsafe.SliceData(p), uintptr(len(p)), flags, mp.vgetrandomState, vgetrandomAlloc.stateSize), true
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !android
package runtime
//go:nosplit
func writeErr(b []byte) {
if len(b) > 0 {
writeErrData(&b[0], int32(len(b)))
}
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unify
import (
"fmt"
"iter"
"maps"
"slices"
)
type Closure struct {
val *Value
env envSet
}
func NewSum(vs ...*Value) Closure {
id := &ident{name: "sum"}
return Closure{NewValue(Var{id}), topEnv.bind(id, vs...)}
}
// IsBottom returns whether c consists of no values.
func (c Closure) IsBottom() bool {
return c.val.Domain == nil
}
// Summands returns the top-level Values of c. This assumes the top-level of c
// was constructed as a sum, and is mostly useful for debugging.
func (c Closure) Summands() iter.Seq[*Value] {
return func(yield func(*Value) bool) {
var rec func(v *Value, env envSet) bool
rec = func(v *Value, env envSet) bool {
switch d := v.Domain.(type) {
case Var:
parts := env.partitionBy(d.id)
for _, part := range parts {
// It may be a sum of sums. Walk into this value.
if !rec(part.value, part.env) {
return false
}
}
return true
default:
return yield(v)
}
}
rec(c.val, c.env)
}
}
// All enumerates all possible concrete values of c by substituting variables
// from the environment.
//
// E.g., enumerating this Value
//
// a: !sum [1, 2]
// b: !sum [3, 4]
//
// results in
//
// - {a: 1, b: 3}
// - {a: 1, b: 4}
// - {a: 2, b: 3}
// - {a: 2, b: 4}
func (c Closure) All() iter.Seq[*Value] {
// In order to enumerate all concrete values under all possible variable
// bindings, we use a "non-deterministic continuation passing style" to
// implement this. We use CPS to traverse the Value tree, threading the
// (possibly narrowing) environment through that CPS following an Euler
// tour. Where the environment permits multiple choices, we invoke the same
// continuation for each choice. Similar to a yield function, the
// continuation can return false to stop the non-deterministic walk.
return func(yield func(*Value) bool) {
c.val.all1(c.env, func(v *Value, e envSet) bool {
return yield(v)
})
}
}
func (v *Value) all1(e envSet, cont func(*Value, envSet) bool) bool {
switch d := v.Domain.(type) {
default:
panic(fmt.Sprintf("unknown domain type %T", d))
case nil:
return true
case Top, String:
return cont(v, e)
case Def:
fields := d.keys()
// We can reuse this parts slice because we're doing a DFS through the
// state space. (Otherwise, we'd have to do some messy threading of an
// immutable slice-like value through allElt.)
parts := make(map[string]*Value, len(fields))
// TODO: If there are no Vars or Sums under this Def, then nothing can
// change the Value or env, so we could just cont(v, e).
var allElt func(elt int, e envSet) bool
allElt = func(elt int, e envSet) bool {
if elt == len(fields) {
// Build a new Def from the concrete parts. Clone parts because
// we may reuse it on other non-deterministic branches.
nVal := newValueFrom(Def{maps.Clone(parts)}, v)
return cont(nVal, e)
}
return d.fields[fields[elt]].all1(e, func(v *Value, e envSet) bool {
parts[fields[elt]] = v
return allElt(elt+1, e)
})
}
return allElt(0, e)
case Tuple:
// Essentially the same as Def.
if d.repeat != nil {
// There's nothing we can do with this.
return cont(v, e)
}
parts := make([]*Value, len(d.vs))
var allElt func(elt int, e envSet) bool
allElt = func(elt int, e envSet) bool {
if elt == len(d.vs) {
// Build a new tuple from the concrete parts. Clone parts because
// we may reuse it on other non-deterministic branches.
nVal := newValueFrom(Tuple{vs: slices.Clone(parts)}, v)
return cont(nVal, e)
}
return d.vs[elt].all1(e, func(v *Value, e envSet) bool {
parts[elt] = v
return allElt(elt+1, e)
})
}
return allElt(0, e)
case Var:
// Go each way this variable can be bound.
for _, ePart := range e.partitionBy(d.id) {
// d.id is no longer bound in this environment partition. We'll may
// need it later in the Euler tour, so bind it back to this single
// value.
env := ePart.env.bind(d.id, ePart.value)
if !ePart.value.all1(env, cont) {
return false
}
}
return true
}
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unify
import (
"fmt"
"iter"
"maps"
"reflect"
"regexp"
"slices"
"strconv"
"strings"
)
// A Domain is a non-empty set of values, all of the same kind.
//
// Domain may be a scalar:
//
// - [String] - Represents string-typed values.
//
// Or a composite:
//
// - [Def] - A mapping from fixed keys to [Domain]s.
//
// - [Tuple] - A fixed-length sequence of [Domain]s or
// all possible lengths repeating a [Domain].
//
// Or top or bottom:
//
// - [Top] - Represents all possible values of all kinds.
//
// - nil - Represents no values.
//
// Or a variable:
//
// - [Var] - A value captured in the environment.
type Domain interface {
Exact() bool
WhyNotExact() string
// decode stores this value in a Go value. If this value is not exact, this
// returns a potentially wrapped *inexactError.
decode(reflect.Value) error
}
type inexactError struct {
valueType string
goType string
}
func (e *inexactError) Error() string {
return fmt.Sprintf("cannot store inexact %s value in %s", e.valueType, e.goType)
}
type decodeError struct {
path string
err error
}
func newDecodeError(path string, err error) *decodeError {
if err, ok := err.(*decodeError); ok {
return &decodeError{path: path + "." + err.path, err: err.err}
}
return &decodeError{path: path, err: err}
}
func (e *decodeError) Unwrap() error {
return e.err
}
func (e *decodeError) Error() string {
return fmt.Sprintf("%s: %s", e.path, e.err)
}
// Top represents all possible values of all possible types.
type Top struct{}
func (t Top) Exact() bool { return false }
func (t Top) WhyNotExact() string { return "is top" }
func (t Top) decode(rv reflect.Value) error {
// We can decode Top into a pointer-typed value as nil.
if rv.Kind() != reflect.Pointer {
return &inexactError{"top", rv.Type().String()}
}
rv.SetZero()
return nil
}
// A Def is a mapping from field names to [Value]s. Any fields not explicitly
// listed have [Value] [Top].
type Def struct {
fields map[string]*Value
}
// A DefBuilder builds a [Def] one field at a time. The zero value is an empty
// [Def].
type DefBuilder struct {
fields map[string]*Value
}
func (b *DefBuilder) Add(name string, v *Value) {
if b.fields == nil {
b.fields = make(map[string]*Value)
}
if old, ok := b.fields[name]; ok {
panic(fmt.Sprintf("duplicate field %q, added value is %v, old value is %v", name, v, old))
}
b.fields[name] = v
}
// Build constructs a [Def] from the fields added to this builder.
func (b *DefBuilder) Build() Def {
return Def{maps.Clone(b.fields)}
}
// Exact returns true if all field Values are exact.
func (d Def) Exact() bool {
for _, v := range d.fields {
if !v.Exact() {
return false
}
}
return true
}
// WhyNotExact returns why the value is not exact
func (d Def) WhyNotExact() string {
for s, v := range d.fields {
if !v.Exact() {
w := v.WhyNotExact()
return "field " + s + ": " + w
}
}
return ""
}
func (d Def) decode(rv reflect.Value) error {
if rv.Kind() != reflect.Struct {
return fmt.Errorf("cannot decode Def into %s", rv.Type())
}
var lowered map[string]string // Lower case -> canonical for d.fields.
rt := rv.Type()
for fi := range rv.NumField() {
fType := rt.Field(fi)
if fType.PkgPath != "" {
continue
}
v := d.fields[fType.Name]
if v == nil {
v = topValue
// Try a case-insensitive match
canon, ok := d.fields[strings.ToLower(fType.Name)]
if ok {
v = canon
} else {
if lowered == nil {
lowered = make(map[string]string, len(d.fields))
for k := range d.fields {
l := strings.ToLower(k)
if k != l {
lowered[l] = k
}
}
}
canon, ok := lowered[strings.ToLower(fType.Name)]
if ok {
v = d.fields[canon]
}
}
}
if err := decodeReflect(v, rv.Field(fi)); err != nil {
return newDecodeError(fType.Name, err)
}
}
return nil
}
func (d Def) keys() []string {
return slices.Sorted(maps.Keys(d.fields))
}
func (d Def) All() iter.Seq2[string, *Value] {
// TODO: We call All fairly often. It's probably bad to sort this every
// time.
keys := slices.Sorted(maps.Keys(d.fields))
return func(yield func(string, *Value) bool) {
for _, k := range keys {
if !yield(k, d.fields[k]) {
return
}
}
}
}
// A Tuple is a sequence of Values in one of two forms: 1. a fixed-length tuple,
// where each Value can be different or 2. a "repeated tuple", which is a Value
// repeated 0 or more times.
type Tuple struct {
vs []*Value
// repeat, if non-nil, means this Tuple consists of an element repeated 0 or
// more times. If repeat is non-nil, vs must be nil. This is a generator
// function because we don't necessarily want *exactly* the same Value
// repeated. For example, in YAML encoding, a !sum in a repeated tuple needs
// a fresh variable in each instance.
repeat []func(envSet) (*Value, envSet)
}
func NewTuple(vs ...*Value) Tuple {
return Tuple{vs: vs}
}
func NewRepeat(gens ...func(envSet) (*Value, envSet)) Tuple {
return Tuple{repeat: gens}
}
func (d Tuple) Exact() bool {
if d.repeat != nil {
return false
}
for _, v := range d.vs {
if !v.Exact() {
return false
}
}
return true
}
func (d Tuple) WhyNotExact() string {
if d.repeat != nil {
return "d.repeat is not nil"
}
for i, v := range d.vs {
if !v.Exact() {
w := v.WhyNotExact()
return "index " + strconv.FormatInt(int64(i), 10) + ": " + w
}
}
return ""
}
func (d Tuple) decode(rv reflect.Value) error {
if d.repeat != nil {
return &inexactError{"repeated tuple", rv.Type().String()}
}
// TODO: We could also do arrays.
if rv.Kind() != reflect.Slice {
return fmt.Errorf("cannot decode Tuple into %s", rv.Type())
}
if rv.IsNil() || rv.Cap() < len(d.vs) {
rv.Set(reflect.MakeSlice(rv.Type(), len(d.vs), len(d.vs)))
} else {
rv.SetLen(len(d.vs))
}
for i, v := range d.vs {
if err := decodeReflect(v, rv.Index(i)); err != nil {
return newDecodeError(fmt.Sprintf("%d", i), err)
}
}
return nil
}
// A String represents a set of strings. It can represent the intersection of a
// set of regexps, or a single exact string. In general, the domain of a String
// is non-empty, but we do not attempt to prove emptiness of a regexp value.
type String struct {
kind stringKind
re []*regexp.Regexp // Intersection of regexps
exact string
}
type stringKind int
const (
stringRegex stringKind = iota
stringExact
)
func NewStringRegex(exprs ...string) (String, error) {
if len(exprs) == 0 {
exprs = []string{""}
}
v := String{kind: -1}
for _, expr := range exprs {
if expr == "" {
// Skip constructing the regexp. It won't have a "literal prefix"
// and so we wind up thinking this is a regexp instead of an exact
// (empty) string.
v = String{kind: stringExact, exact: ""}
continue
}
re, err := regexp.Compile(`\A(?:` + expr + `)\z`)
if err != nil {
return String{}, fmt.Errorf("parsing value: %s", err)
}
// An exact value narrows the whole domain to exact, so we're done, but
// should keep parsing.
if v.kind == stringExact {
continue
}
if exact, complete := re.LiteralPrefix(); complete {
v = String{kind: stringExact, exact: exact}
} else {
v.kind = stringRegex
v.re = append(v.re, re)
}
}
return v, nil
}
func NewStringExact(s string) String {
return String{kind: stringExact, exact: s}
}
// Exact returns whether this Value is known to consist of a single string.
func (d String) Exact() bool {
return d.kind == stringExact
}
func (d String) WhyNotExact() string {
if d.kind == stringExact {
return ""
}
return "string is not exact"
}
func (d String) decode(rv reflect.Value) error {
if d.kind != stringExact {
return &inexactError{"regex", rv.Type().String()}
}
switch rv.Kind() {
default:
return fmt.Errorf("cannot decode String into %s", rv.Type())
case reflect.String:
rv.SetString(d.exact)
case reflect.Int:
i, err := strconv.Atoi(d.exact)
if err != nil {
return fmt.Errorf("cannot decode String into %s: %s", rv.Type(), err)
}
rv.SetInt(int64(i))
case reflect.Bool:
b, err := strconv.ParseBool(d.exact)
if err != nil {
return fmt.Errorf("cannot decode String into %s: %s", rv.Type(), err)
}
rv.SetBool(b)
}
return nil
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unify
import (
"bytes"
"fmt"
"html"
"io"
"os"
"os/exec"
"strings"
)
const maxNodes = 30
type dotEncoder struct {
w *bytes.Buffer
idGen int // Node name generation
valLimit int // Limit the number of Values in a subgraph
idp identPrinter
}
func newDotEncoder() *dotEncoder {
return &dotEncoder{
w: new(bytes.Buffer),
}
}
func (enc *dotEncoder) clear() {
enc.w.Reset()
enc.idGen = 0
}
func (enc *dotEncoder) writeTo(w io.Writer) {
fmt.Fprintln(w, "digraph {")
// Use the "new" ranking algorithm, which lets us put nodes from different
// clusters in the same rank.
fmt.Fprintln(w, "newrank=true;")
fmt.Fprintln(w, "node [shape=box, ordering=out];")
w.Write(enc.w.Bytes())
fmt.Fprintln(w, "}")
}
func (enc *dotEncoder) writeSvg(w io.Writer) error {
cmd := exec.Command("dot", "-Tsvg")
in, err := cmd.StdinPipe()
if err != nil {
return err
}
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil {
return err
}
enc.writeTo(in)
in.Close()
if err := cmd.Wait(); err != nil {
return err
}
// Trim SVG header so the result can be embedded
//
// TODO: In Graphviz 10.0.1, we could use -Tsvg_inline.
svg := out.Bytes()
if i := bytes.Index(svg, []byte("<svg ")); i >= 0 {
svg = svg[i:]
}
_, err = w.Write(svg)
return err
}
func (enc *dotEncoder) newID(f string) string {
id := fmt.Sprintf(f, enc.idGen)
enc.idGen++
return id
}
func (enc *dotEncoder) node(label, sublabel string) string {
id := enc.newID("n%d")
l := html.EscapeString(label)
if sublabel != "" {
l += fmt.Sprintf("<BR ALIGN=\"CENTER\"/><FONT POINT-SIZE=\"10\">%s</FONT>", html.EscapeString(sublabel))
}
fmt.Fprintf(enc.w, "%s [label=<%s>];\n", id, l)
return id
}
func (enc *dotEncoder) edge(from, to string, label string, args ...any) {
l := fmt.Sprintf(label, args...)
fmt.Fprintf(enc.w, "%s -> %s [label=%q];\n", from, to, l)
}
func (enc *dotEncoder) valueSubgraph(v *Value) {
enc.valLimit = maxNodes
cID := enc.newID("cluster_%d")
fmt.Fprintf(enc.w, "subgraph %s {\n", cID)
fmt.Fprintf(enc.w, "style=invis;")
vID := enc.value(v)
fmt.Fprintf(enc.w, "}\n")
// We don't need the IDs right now.
_, _ = cID, vID
}
func (enc *dotEncoder) value(v *Value) string {
if enc.valLimit <= 0 {
id := enc.newID("n%d")
fmt.Fprintf(enc.w, "%s [label=\"...\", shape=triangle];\n", id)
return id
}
enc.valLimit--
switch vd := v.Domain.(type) {
default:
panic(fmt.Sprintf("unknown domain type %T", vd))
case nil:
return enc.node("_|_", "")
case Top:
return enc.node("_", "")
// TODO: Like in YAML, figure out if this is just a sum. In dot, we
// could say any unentangled variable is a sum, and if it has more than
// one reference just share the node.
// case Sum:
// node := enc.node("Sum", "")
// for i, elt := range vd.vs {
// enc.edge(node, enc.value(elt), "%d", i)
// if enc.valLimit <= 0 {
// break
// }
// }
// return node
case Def:
node := enc.node("Def", "")
for k, v := range vd.All() {
enc.edge(node, enc.value(v), "%s", k)
if enc.valLimit <= 0 {
break
}
}
return node
case Tuple:
if vd.repeat == nil {
label := "Tuple"
node := enc.node(label, "")
for i, elt := range vd.vs {
enc.edge(node, enc.value(elt), "%d", i)
if enc.valLimit <= 0 {
break
}
}
return node
} else {
// TODO
return enc.node("TODO: Repeat", "")
}
case String:
switch vd.kind {
case stringExact:
return enc.node(fmt.Sprintf("%q", vd.exact), "")
case stringRegex:
var parts []string
for _, re := range vd.re {
parts = append(parts, fmt.Sprintf("%q", re))
}
return enc.node(strings.Join(parts, "&"), "")
}
panic("bad String kind")
case Var:
return enc.node(fmt.Sprintf("Var %s", enc.idp.unique(vd.id)), "")
}
}
func (enc *dotEncoder) envSubgraph(e envSet) {
enc.valLimit = maxNodes
cID := enc.newID("cluster_%d")
fmt.Fprintf(enc.w, "subgraph %s {\n", cID)
fmt.Fprintf(enc.w, "style=invis;")
vID := enc.env(e.root)
fmt.Fprintf(enc.w, "}\n")
_, _ = cID, vID
}
func (enc *dotEncoder) env(e *envExpr) string {
switch e.kind {
default:
panic("bad kind")
case envZero:
return enc.node("0", "")
case envUnit:
return enc.node("1", "")
case envBinding:
node := enc.node(fmt.Sprintf("%q :", enc.idp.unique(e.id)), "")
enc.edge(node, enc.value(e.val), "")
return node
case envProduct:
node := enc.node("⨯", "")
for _, op := range e.operands {
enc.edge(node, enc.env(op), "")
}
return node
case envSum:
node := enc.node("+", "")
for _, op := range e.operands {
enc.edge(node, enc.env(op), "")
}
return node
}
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unify
import (
"fmt"
"iter"
"reflect"
"strings"
)
// An envSet is an immutable set of environments, where each environment is a
// mapping from [ident]s to [Value]s.
//
// To keep this compact, we use an algebraic representation similar to
// relational algebra. The atoms are zero, unit, or a singular binding:
//
// - A singular binding {x: v} is an environment set consisting of a single
// environment that binds a single ident x to a single value v.
//
// - Zero (0) is the empty set.
//
// - Unit (1) is an environment set consisting of a single, empty environment
// (no bindings).
//
// From these, we build up more complex sets of environments using sums and
// cross products:
//
// - A sum, E + F, is simply the union of the two environment sets: E ∪ F
//
// - A cross product, E ⨯ F, is the Cartesian product of the two environment
// sets, followed by joining each pair of environments: {e ⊕ f | (e, f) ∊ E ⨯ F}
//
// The join of two environments, e ⊕ f, is an environment that contains all of
// the bindings in either e or f. To detect bugs, it is an error if an
// identifier is bound in both e and f (however, see below for what we could do
// differently).
//
// Environment sets form a commutative semiring and thus obey the usual
// commutative semiring rules:
//
// e + 0 = e
// e ⨯ 0 = 0
// e ⨯ 1 = e
// e + f = f + e
// e ⨯ f = f ⨯ e
//
// Furthermore, environments sets are additively and multiplicatively idempotent
// because + and ⨯ are themselves defined in terms of sets:
//
// e + e = e
// e ⨯ e = e
//
// # Examples
//
// To represent {{x: 1, y: 1}, {x: 2, y: 2}}, we build the two environments and
// sum them:
//
// ({x: 1} ⨯ {y: 1}) + ({x: 2} ⨯ {y: 2})
//
// If we add a third variable z that can be 1 or 2, independent of x and y, we
// get four logical environments:
//
// {x: 1, y: 1, z: 1}
// {x: 2, y: 2, z: 1}
// {x: 1, y: 1, z: 2}
// {x: 2, y: 2, z: 2}
//
// This could be represented as a sum of all four environments, but because z is
// independent, we can use a more compact representation:
//
// (({x: 1} ⨯ {y: 1}) + ({x: 2} ⨯ {y: 2})) ⨯ ({z: 1} + {z: 2})
//
// # Generalized cross product
//
// While cross-product is currently restricted to disjoint environments, we
// could generalize the definition of joining two environments to:
//
// {xₖ: vₖ} ⊕ {xₖ: wₖ} = {xₖ: vₖ ∩ wₖ} (where unbound idents are bound to the [Top] value, ⟙)
//
// where v ∩ w is the unification of v and w. This itself could be coarsened to
//
// v ∩ w = v if w = ⟙
// = w if v = ⟙
// = v if v = w
// = 0 otherwise
//
// We could use this rule to implement substitution. For example, E ⨯ {x: 1}
// narrows environment set E to only environments in which x is bound to 1. But
// we currently don't do this.
type envSet struct {
root *envExpr
}
type envExpr struct {
// TODO: A tree-based data structure for this may not be ideal, since it
// involves a lot of walking to find things and we often have to do deep
// rewrites anyway for partitioning. Would some flattened array-style
// representation be better, possibly combined with an index of ident uses?
// We could even combine that with an immutable array abstraction (ala
// Clojure) that could enable more efficient construction operations.
kind envExprKind
// For envBinding
id *ident
val *Value
// For sum or product. Len must be >= 2 and none of the elements can have
// the same kind as this node.
operands []*envExpr
}
type envExprKind byte
const (
envZero envExprKind = iota
envUnit
envProduct
envSum
envBinding
)
var (
// topEnv is the unit value (multiplicative identity) of a [envSet].
topEnv = envSet{envExprUnit}
// bottomEnv is the zero value (additive identity) of a [envSet].
bottomEnv = envSet{envExprZero}
envExprZero = &envExpr{kind: envZero}
envExprUnit = &envExpr{kind: envUnit}
)
// bind binds id to each of vals in e.
//
// Its panics if id is already bound in e.
//
// Environments are typically initially constructed by starting with [topEnv]
// and calling bind one or more times.
func (e envSet) bind(id *ident, vals ...*Value) envSet {
if e.isEmpty() {
return bottomEnv
}
// TODO: If any of vals are _, should we just drop that val? We're kind of
// inconsistent about whether an id missing from e means id is invalid or
// means id is _.
// Check that id isn't present in e.
for range e.root.bindings(id) {
panic("id " + id.name + " already present in environment")
}
// Create a sum of all the values.
bindings := make([]*envExpr, 0, 1)
for _, val := range vals {
bindings = append(bindings, &envExpr{kind: envBinding, id: id, val: val})
}
// Multiply it in.
return envSet{newEnvExprProduct(e.root, newEnvExprSum(bindings...))}
}
func (e envSet) isEmpty() bool {
return e.root.kind == envZero
}
// bindings yields all [envBinding] nodes in e with the given id. If id is nil,
// it yields all binding nodes.
func (e *envExpr) bindings(id *ident) iter.Seq[*envExpr] {
// This is just a pre-order walk and it happens this is the only thing we
// need a pre-order walk for.
return func(yield func(*envExpr) bool) {
var rec func(e *envExpr) bool
rec = func(e *envExpr) bool {
if e.kind == envBinding && (id == nil || e.id == id) {
if !yield(e) {
return false
}
}
for _, o := range e.operands {
if !rec(o) {
return false
}
}
return true
}
rec(e)
}
}
// newEnvExprProduct constructs a product node from exprs, performing
// simplifications. It does NOT check that bindings are disjoint.
func newEnvExprProduct(exprs ...*envExpr) *envExpr {
factors := make([]*envExpr, 0, 2)
for _, expr := range exprs {
switch expr.kind {
case envZero:
return envExprZero
case envUnit:
// No effect on product
case envProduct:
factors = append(factors, expr.operands...)
default:
factors = append(factors, expr)
}
}
if len(factors) == 0 {
return envExprUnit
} else if len(factors) == 1 {
return factors[0]
}
return &envExpr{kind: envProduct, operands: factors}
}
// newEnvExprSum constructs a sum node from exprs, performing simplifications.
func newEnvExprSum(exprs ...*envExpr) *envExpr {
// TODO: If all of envs are products (or bindings), factor any common terms.
// E.g., x * y + x * z ==> x * (y + z). This is easy to do for binding
// terms, but harder to do for more general terms.
var have smallSet[*envExpr]
terms := make([]*envExpr, 0, 2)
for _, expr := range exprs {
switch expr.kind {
case envZero:
// No effect on sum
case envSum:
for _, expr1 := range expr.operands {
if have.Add(expr1) {
terms = append(terms, expr1)
}
}
default:
if have.Add(expr) {
terms = append(terms, expr)
}
}
}
if len(terms) == 0 {
return envExprZero
} else if len(terms) == 1 {
return terms[0]
}
return &envExpr{kind: envSum, operands: terms}
}
func crossEnvs(env1, env2 envSet) envSet {
// Confirm that envs have disjoint idents.
var ids1 smallSet[*ident]
for e := range env1.root.bindings(nil) {
ids1.Add(e.id)
}
for e := range env2.root.bindings(nil) {
if ids1.Has(e.id) {
panic(fmt.Sprintf("%s bound on both sides of cross-product", e.id.name))
}
}
return envSet{newEnvExprProduct(env1.root, env2.root)}
}
func unionEnvs(envs ...envSet) envSet {
exprs := make([]*envExpr, len(envs))
for i := range envs {
exprs[i] = envs[i].root
}
return envSet{newEnvExprSum(exprs...)}
}
// envPartition is a subset of an env where id is bound to value in all
// deterministic environments.
type envPartition struct {
id *ident
value *Value
env envSet
}
// partitionBy splits e by distinct bindings of id and removes id from each
// partition.
//
// If there are environments in e where id is not bound, they will not be
// reflected in any partition.
//
// It panics if e is bottom, since attempting to partition an empty environment
// set almost certainly indicates a bug.
func (e envSet) partitionBy(id *ident) []envPartition {
if e.isEmpty() {
// We could return zero partitions, but getting here at all almost
// certainly indicates a bug.
panic("cannot partition empty environment set")
}
// Emit a partition for each value of id.
var seen smallSet[*Value]
var parts []envPartition
for n := range e.root.bindings(id) {
if !seen.Add(n.val) {
// Already emitted a partition for this value.
continue
}
parts = append(parts, envPartition{
id: id,
value: n.val,
env: envSet{e.root.substitute(id, n.val)},
})
}
return parts
}
// substitute replaces bindings of id to val with 1 and bindings of id to any
// other value with 0 and simplifies the result.
func (e *envExpr) substitute(id *ident, val *Value) *envExpr {
switch e.kind {
default:
panic("bad kind")
case envZero, envUnit:
return e
case envBinding:
if e.id != id {
return e
} else if e.val != val {
return envExprZero
} else {
return envExprUnit
}
case envProduct, envSum:
// Substitute each operand. Sometimes, this won't change anything, so we
// build the new operands list lazily.
var nOperands []*envExpr
for i, op := range e.operands {
nOp := op.substitute(id, val)
if nOperands == nil && op != nOp {
// Operand diverged; initialize nOperands.
nOperands = make([]*envExpr, 0, len(e.operands))
nOperands = append(nOperands, e.operands[:i]...)
}
if nOperands != nil {
nOperands = append(nOperands, nOp)
}
}
if nOperands == nil {
// Nothing changed.
return e
}
if e.kind == envProduct {
return newEnvExprProduct(nOperands...)
} else {
return newEnvExprSum(nOperands...)
}
}
}
// A smallSet is a set optimized for stack allocation when small.
type smallSet[T comparable] struct {
array [32]T
n int
m map[T]struct{}
}
// Has returns whether val is in set.
func (s *smallSet[T]) Has(val T) bool {
arr := s.array[:s.n]
for i := range arr {
if arr[i] == val {
return true
}
}
_, ok := s.m[val]
return ok
}
// Add adds val to the set and returns true if it was added (not already
// present).
func (s *smallSet[T]) Add(val T) bool {
// Test for presence.
if s.Has(val) {
return false
}
// Add it
if s.n < len(s.array) {
s.array[s.n] = val
s.n++
} else {
if s.m == nil {
s.m = make(map[T]struct{})
}
s.m[val] = struct{}{}
}
return true
}
type ident struct {
_ [0]func() // Not comparable (only compare *ident)
name string
}
type Var struct {
id *ident
}
func (d Var) Exact() bool {
// These can't appear in concrete Values.
panic("Exact called on non-concrete Value")
}
func (d Var) WhyNotExact() string {
// These can't appear in concrete Values.
return "WhyNotExact called on non-concrete Value"
}
func (d Var) decode(rv reflect.Value) error {
return &inexactError{"var", rv.Type().String()}
}
func (d Var) unify(w *Value, e envSet, swap bool, uf *unifier) (Domain, envSet, error) {
// TODO: Vars from !sums in the input can have a huge number of values.
// Unifying these could be way more efficient with some indexes over any
// exact values we can pull out, like Def fields that are exact Strings.
// Maybe we try to produce an array of yes/no/maybe matches and then we only
// have to do deeper evaluation of the maybes. We could probably cache this
// on an envTerm. It may also help to special-case Var/Var unification to
// pick which one to index versus enumerate.
if vd, ok := w.Domain.(Var); ok && d.id == vd.id {
// Unifying $x with $x results in $x. If we descend into this we'll have
// problems because we strip $x out of the environment to keep ourselves
// honest and then can't find it on the other side.
//
// TODO: I'm not positive this is the right fix.
return vd, e, nil
}
// We need to unify w with the value of d in each possible environment. We
// can save some work by grouping environments by the value of d, since
// there will be a lot of redundancy here.
var nEnvs []envSet
envParts := e.partitionBy(d.id)
for i, envPart := range envParts {
exit := uf.enterVar(d.id, i)
// Each branch logically gets its own copy of the initial environment
// (narrowed down to just this binding of the variable), and each branch
// may result in different changes to that starting environment.
res, e2, err := w.unify(envPart.value, envPart.env, swap, uf)
exit.exit()
if err != nil {
return nil, envSet{}, err
}
if res.Domain == nil {
// This branch entirely failed to unify, so it's gone.
continue
}
nEnv := e2.bind(d.id, res)
nEnvs = append(nEnvs, nEnv)
}
if len(nEnvs) == 0 {
// All branches failed
return nil, bottomEnv, nil
}
// The effect of this is entirely captured in the environment. We can return
// back the same Bind node.
return d, unionEnvs(nEnvs...), nil
}
// An identPrinter maps [ident]s to unique string names.
type identPrinter struct {
ids map[*ident]string
idGen map[string]int
}
func (p *identPrinter) unique(id *ident) string {
if p.ids == nil {
p.ids = make(map[*ident]string)
p.idGen = make(map[string]int)
}
name, ok := p.ids[id]
if !ok {
gen := p.idGen[id.name]
p.idGen[id.name]++
if gen == 0 {
name = id.name
} else {
name = fmt.Sprintf("%s#%d", id.name, gen)
}
p.ids[id] = name
}
return name
}
func (p *identPrinter) slice(ids []*ident) string {
var strs []string
for _, id := range ids {
strs = append(strs, p.unique(id))
}
return fmt.Sprintf("[%s]", strings.Join(strs, ", "))
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unify
import (
"fmt"
"html"
"io"
"strings"
)
func (t *tracer) writeHTML(w io.Writer) {
if !t.saveTree {
panic("writeHTML called without tracer.saveTree")
}
fmt.Fprintf(w, "<html><head><style>%s</style></head>", htmlCSS)
for _, root := range t.trees {
dot := newDotEncoder()
html := htmlTracer{w: w, dot: dot}
html.writeTree(root)
}
fmt.Fprintf(w, "</html>\n")
}
const htmlCSS = `
.unify {
display: grid;
grid-auto-columns: min-content;
text-align: center;
}
.header {
grid-row: 1;
font-weight: bold;
padding: 0.25em;
position: sticky;
top: 0;
background: white;
}
.envFactor {
display: grid;
grid-auto-rows: min-content;
grid-template-columns: subgrid;
text-align: center;
}
`
type htmlTracer struct {
w io.Writer
dot *dotEncoder
svgs map[any]string
}
func (t *htmlTracer) writeTree(node *traceTree) {
// TODO: This could be really nice.
//
// - Put nodes that were unified on the same rank with {rank=same; a; b}
//
// - On hover, highlight nodes that node was unified with and the result. If
// it's a variable, highlight it in the environment, too.
//
// - On click, show the details of unifying that node.
//
// This could be the only way to navigate, without necessarily needing the
// whole nest of <detail> nodes.
// TODO: It might be possible to write this out on the fly.
t.emit([]*Value{node.v, node.w}, []string{"v", "w"}, node.envIn)
// Render children.
for i, child := range node.children {
if i >= 10 {
fmt.Fprintf(t.w, `<div style="margin-left: 4em">...</div>`)
break
}
fmt.Fprintf(t.w, `<details style="margin-left: 4em"><summary>%s</summary>`, html.EscapeString(child.label))
t.writeTree(child)
fmt.Fprintf(t.w, "</details>\n")
}
// Render result.
if node.err != nil {
fmt.Fprintf(t.w, "Error: %s\n", html.EscapeString(node.err.Error()))
} else {
t.emit([]*Value{node.res}, []string{"res"}, node.env)
}
}
func htmlSVG[Key comparable](t *htmlTracer, f func(Key), arg Key) string {
if s, ok := t.svgs[arg]; ok {
return s
}
var buf strings.Builder
f(arg)
t.dot.writeSvg(&buf)
t.dot.clear()
svg := buf.String()
if t.svgs == nil {
t.svgs = make(map[any]string)
}
t.svgs[arg] = svg
buf.Reset()
return svg
}
func (t *htmlTracer) emit(vs []*Value, labels []string, env envSet) {
fmt.Fprintf(t.w, `<div class="unify">`)
for i, v := range vs {
fmt.Fprintf(t.w, `<div class="header" style="grid-column: %d">%s</div>`, i+1, html.EscapeString(labels[i]))
fmt.Fprintf(t.w, `<div style="grid-area: 2 / %d">%s</div>`, i+1, htmlSVG(t, t.dot.valueSubgraph, v))
}
col := len(vs)
fmt.Fprintf(t.w, `<div class="header" style="grid-column: %d">in</div>`, col+1)
fmt.Fprintf(t.w, `<div style="grid-area: 2 / %d">%s</div>`, col+1, htmlSVG(t, t.dot.envSubgraph, env))
fmt.Fprintf(t.w, `</div>`)
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unify
import (
"fmt"
)
type Pos struct {
Path string
Line int
}
func (p Pos) String() string {
var b []byte
b, _ = p.AppendText(b)
return string(b)
}
func (p Pos) AppendText(b []byte) ([]byte, error) {
if p.Line == 0 {
if p.Path == "" {
return append(b, "?:?"...), nil
} else {
return append(b, p.Path...), nil
}
} else if p.Path == "" {
return fmt.Appendf(b, "?:%d", p.Line), nil
}
return fmt.Appendf(b, "%s:%d", p.Path, p.Line), nil
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unify
import (
"fmt"
"io"
"strings"
"gopkg.in/yaml.v3"
)
// debugDotInHTML, if true, includes dot code for all graphs in the HTML. Useful
// for debugging the dot output itself.
const debugDotInHTML = false
var Debug struct {
// UnifyLog, if non-nil, receives a streaming text trace of unification.
UnifyLog io.Writer
// HTML, if non-nil, writes an HTML trace of unification to HTML.
HTML io.Writer
}
type tracer struct {
logw io.Writer
enc yamlEncoder // Print consistent idents throughout
saveTree bool // if set, record tree; required for HTML output
path []string
node *traceTree
trees []*traceTree
}
type traceTree struct {
label string // Identifies this node as a child of parent
v, w *Value // Unification inputs
envIn envSet
res *Value // Unification result
env envSet
err error // or error
parent *traceTree
children []*traceTree
}
type tracerExit struct {
t *tracer
len int
node *traceTree
}
func (t *tracer) enter(pat string, vals ...any) tracerExit {
if t == nil {
return tracerExit{}
}
label := fmt.Sprintf(pat, vals...)
var p *traceTree
if t.saveTree {
p = t.node
if p != nil {
t.node = &traceTree{label: label, parent: p}
p.children = append(p.children, t.node)
}
}
t.path = append(t.path, label)
return tracerExit{t, len(t.path) - 1, p}
}
func (t *tracer) enterVar(id *ident, branch int) tracerExit {
if t == nil {
return tracerExit{}
}
// Use the tracer's ident printer
return t.enter("Var %s br %d", t.enc.idp.unique(id), branch)
}
func (te tracerExit) exit() {
if te.t == nil {
return
}
te.t.path = te.t.path[:te.len]
te.t.node = te.node
}
func indentf(prefix string, pat string, vals ...any) string {
s := fmt.Sprintf(pat, vals...)
if len(prefix) == 0 {
return s
}
if !strings.Contains(s, "\n") {
return prefix + s
}
indent := prefix
if strings.TrimLeft(prefix, " ") != "" {
// Prefix has non-space characters in it. Construct an all space-indent.
indent = strings.Repeat(" ", len(prefix))
}
return prefix + strings.ReplaceAll(s, "\n", "\n"+indent)
}
func yamlf(prefix string, node *yaml.Node) string {
b, err := yaml.Marshal(node)
if err != nil {
return fmt.Sprintf("<marshal failed: %s>", err)
}
return strings.TrimRight(indentf(prefix, "%s", b), " \n")
}
func (t *tracer) logf(pat string, vals ...any) {
if t == nil || t.logw == nil {
return
}
prefix := fmt.Sprintf("[%s] ", strings.Join(t.path, "/"))
s := indentf(prefix, pat, vals...)
s = strings.TrimRight(s, " \n")
fmt.Fprintf(t.logw, "%s\n", s)
}
func (t *tracer) traceUnify(v, w *Value, e envSet) {
if t == nil {
return
}
t.enc.e = e // Interpret values w.r.t. e
t.logf("Unify\n%s\nwith\n%s\nin\n%s",
yamlf(" ", t.enc.value(v)),
yamlf(" ", t.enc.value(w)),
yamlf(" ", t.enc.env(e)))
t.enc.e = envSet{}
if t.saveTree {
if t.node == nil {
t.node = &traceTree{}
t.trees = append(t.trees, t.node)
}
t.node.v, t.node.w, t.node.envIn = v, w, e
}
}
func (t *tracer) traceDone(res *Value, e envSet, err error) {
if t == nil {
return
}
if err != nil {
t.logf("==> %s", err)
} else {
t.logf("==>\n%s", yamlf(" ", t.enc.closure(Closure{res, e})))
}
if t.saveTree {
node := t.node
if node == nil {
panic("popped top of trace stack")
}
node.res, node.err = res, err
node.env = e
}
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package unify implements unification of structured values.
//
// A [Value] represents a possibly infinite set of concrete values, where a
// value is either a string ([String]), a tuple of values ([Tuple]), or a
// string-keyed map of values called a "def" ([Def]). These sets can be further
// constrained by variables ([Var]). A [Value] combined with bindings of
// variables is a [Closure].
//
// [Unify] finds a [Closure] that satisfies two or more other [Closure]s. This
// can be thought of as intersecting the sets represented by these Closures'
// values, or as the greatest lower bound/infimum of these Closures. If no such
// Closure exists, the result of unification is "bottom", or the empty set.
//
// # Examples
//
// The regular expression "a*" is the infinite set of strings of zero or more
// "a"s. "a*" can be unified with "a" or "aa" or "aaa", and the result is just
// "a", "aa", or "aaa", respectively. However, unifying "a*" with "b" fails
// because there are no values that satisfy both.
//
// Sums express sets directly. For example, !sum [a, b] is the set consisting of
// "a" and "b". Unifying this with !sum [b, c] results in just "b". This also
// makes it easy to demonstrate that unification isn't necessarily a single
// concrete value. For example, unifying !sum [a, b, c] with !sum [b, c, d]
// results in two concrete values: "b" and "c".
//
// The special value _ or "top" represents all possible values. Unifying _ with
// any value x results in x.
//
// Unifying composite values—tuples and defs—unifies their elements.
//
// The value [a*, aa] is an infinite set of tuples. If we unify that with the
// value [aaa, a*], the only possible value that satisfies both is [aaa, aa].
// Likewise, this is the intersection of the sets described by these two values.
//
// Defs are similar to tuples, but they are indexed by strings and don't have a
// fixed length. For example, {x: a, y: b} is a def with two fields. Any field
// not mentioned in a def is implicitly top. Thus, unifying this with {y: b, z:
// c} results in {x: a, y: b, z: c}.
//
// Variables constrain values. For example, the value [$x, $x] represents all
// tuples whose first and second values are the same, but doesn't otherwise
// constrain that value. Thus, this set includes [a, a] as well as [[b, c, d],
// [b, c, d]], but it doesn't include [a, b].
//
// Sums are internally implemented as fresh variables that are simultaneously
// bound to all values of the sum. That is !sum [a, b] is actually $var (where
// var is some fresh name), closed under the environment $var=a | $var=b.
package unify
import (
"errors"
"fmt"
"slices"
)
// Unify computes a Closure that satisfies each input Closure. If no such
// Closure exists, it returns bottom.
func Unify(closures ...Closure) (Closure, error) {
if len(closures) == 0 {
return Closure{topValue, topEnv}, nil
}
var trace *tracer
if Debug.UnifyLog != nil || Debug.HTML != nil {
trace = &tracer{
logw: Debug.UnifyLog,
saveTree: Debug.HTML != nil,
}
}
unified := closures[0]
for _, c := range closures[1:] {
var err error
uf := newUnifier()
uf.tracer = trace
e := crossEnvs(unified.env, c.env)
unified.val, unified.env, err = unified.val.unify(c.val, e, false, uf)
if Debug.HTML != nil {
uf.writeHTML(Debug.HTML)
}
if err != nil {
return Closure{}, err
}
}
return unified, nil
}
type unifier struct {
*tracer
}
func newUnifier() *unifier {
return &unifier{}
}
// errDomains is a sentinel error used between unify and unify1 to indicate that
// unify1 could not unify the domains of the two values.
var errDomains = errors.New("cannot unify domains")
func (v *Value) unify(w *Value, e envSet, swap bool, uf *unifier) (*Value, envSet, error) {
if swap {
// Put the values in order. This just happens to be a handy choke-point
// to do this at.
v, w = w, v
}
uf.traceUnify(v, w, e)
d, e2, err := v.unify1(w, e, false, uf)
if err == errDomains {
// Try the other order.
d, e2, err = w.unify1(v, e, true, uf)
if err == errDomains {
// Okay, we really can't unify these.
err = fmt.Errorf("cannot unify %T (%s) and %T (%s): kind mismatch", v.Domain, v.PosString(), w.Domain, w.PosString())
}
}
if err != nil {
uf.traceDone(nil, envSet{}, err)
return nil, envSet{}, err
}
res := unified(d, v, w)
uf.traceDone(res, e2, nil)
if d == nil {
// Double check that a bottom Value also has a bottom env.
if !e2.isEmpty() {
panic("bottom Value has non-bottom environment")
}
}
return res, e2, nil
}
func (v *Value) unify1(w *Value, e envSet, swap bool, uf *unifier) (Domain, envSet, error) {
// TODO: If there's an error, attach position information to it.
vd, wd := v.Domain, w.Domain
// Bottom returns bottom, and eliminates all possible environments.
if vd == nil || wd == nil {
return nil, bottomEnv, nil
}
// Top always returns the other.
if _, ok := vd.(Top); ok {
return wd, e, nil
}
// Variables
if vd, ok := vd.(Var); ok {
return vd.unify(w, e, swap, uf)
}
// Composite values
if vd, ok := vd.(Def); ok {
if wd, ok := wd.(Def); ok {
return vd.unify(wd, e, swap, uf)
}
}
if vd, ok := vd.(Tuple); ok {
if wd, ok := wd.(Tuple); ok {
return vd.unify(wd, e, swap, uf)
}
}
// Scalar values
if vd, ok := vd.(String); ok {
if wd, ok := wd.(String); ok {
res := vd.unify(wd)
if res == nil {
e = bottomEnv
}
return res, e, nil
}
}
return nil, envSet{}, errDomains
}
func (d Def) unify(o Def, e envSet, swap bool, uf *unifier) (Domain, envSet, error) {
out := Def{fields: make(map[string]*Value)}
// Check keys of d against o.
for key, dv := range d.All() {
ov, ok := o.fields[key]
if !ok {
// ov is implicitly Top. Bypass unification.
out.fields[key] = dv
continue
}
exit := uf.enter("%s", key)
res, e2, err := dv.unify(ov, e, swap, uf)
exit.exit()
if err != nil {
return nil, envSet{}, err
} else if res.Domain == nil {
// No match.
return nil, bottomEnv, nil
}
out.fields[key] = res
e = e2
}
// Check keys of o that we didn't already check. These all implicitly match
// because we know the corresponding fields in d are all Top.
for key, dv := range o.All() {
if _, ok := d.fields[key]; !ok {
out.fields[key] = dv
}
}
return out, e, nil
}
func (v Tuple) unify(w Tuple, e envSet, swap bool, uf *unifier) (Domain, envSet, error) {
if v.repeat != nil && w.repeat != nil {
// Since we generate the content of these lazily, there's not much we
// can do but just stick them on a list to unify later.
return Tuple{repeat: concat(v.repeat, w.repeat)}, e, nil
}
// Expand any repeated tuples.
tuples := make([]Tuple, 0, 2)
if v.repeat == nil {
tuples = append(tuples, v)
} else {
v2, e2 := v.doRepeat(e, len(w.vs))
tuples = append(tuples, v2...)
e = e2
}
if w.repeat == nil {
tuples = append(tuples, w)
} else {
w2, e2 := w.doRepeat(e, len(v.vs))
tuples = append(tuples, w2...)
e = e2
}
// Now unify all of the tuples (usually this will be just 2 tuples)
out := tuples[0]
for _, t := range tuples[1:] {
if len(out.vs) != len(t.vs) {
uf.logf("tuple length mismatch")
return nil, bottomEnv, nil
}
zs := make([]*Value, len(out.vs))
for i, v1 := range out.vs {
exit := uf.enter("%d", i)
z, e2, err := v1.unify(t.vs[i], e, swap, uf)
exit.exit()
if err != nil {
return nil, envSet{}, err
} else if z.Domain == nil {
return nil, bottomEnv, nil
}
zs[i] = z
e = e2
}
out = Tuple{vs: zs}
}
return out, e, nil
}
// doRepeat creates a fixed-length tuple from a repeated tuple. The caller is
// expected to unify the returned tuples.
func (v Tuple) doRepeat(e envSet, n int) ([]Tuple, envSet) {
res := make([]Tuple, len(v.repeat))
for i, gen := range v.repeat {
res[i].vs = make([]*Value, n)
for j := range n {
res[i].vs[j], e = gen(e)
}
}
return res, e
}
// unify intersects the domains of two [String]s. If it can prove that this
// domain is empty, it returns nil (bottom).
//
// TODO: Consider splitting literals and regexps into two domains.
func (v String) unify(w String) Domain {
// Unification is symmetric, so put them in order of string kind so we only
// have to deal with half the cases.
if v.kind > w.kind {
v, w = w, v
}
switch v.kind {
case stringRegex:
switch w.kind {
case stringRegex:
// Construct a match against all of the regexps
return String{kind: stringRegex, re: slices.Concat(v.re, w.re)}
case stringExact:
for _, re := range v.re {
if !re.MatchString(w.exact) {
return nil
}
}
return w
}
case stringExact:
if v.exact != w.exact {
return nil
}
return v
}
panic("bad string kind")
}
func concat[T any](s1, s2 []T) []T {
// Reuse s1 or s2 if possible.
if len(s1) == 0 {
return s2
}
return append(s1[:len(s1):len(s1)], s2...)
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unify
import (
"fmt"
"iter"
"reflect"
)
// A Value represents a structured, non-deterministic value consisting of
// strings, tuples of Values, and string-keyed maps of Values. A
// non-deterministic Value will also contain variables, which are resolved via
// an environment as part of a [Closure].
//
// For debugging, a Value can also track the source position it was read from in
// an input file, and its provenance from other Values.
type Value struct {
Domain Domain
// A Value has either a pos or parents (or neither).
pos *Pos
parents *[2]*Value
}
var (
topValue = &Value{Domain: Top{}}
bottomValue = &Value{Domain: nil}
)
// NewValue returns a new [Value] with the given domain and no position
// information.
func NewValue(d Domain) *Value {
return &Value{Domain: d}
}
// NewValuePos returns a new [Value] with the given domain at position p.
func NewValuePos(d Domain, p Pos) *Value {
return &Value{Domain: d, pos: &p}
}
// newValueFrom returns a new [Value] with the given domain that copies the
// position information of p.
func newValueFrom(d Domain, p *Value) *Value {
return &Value{Domain: d, pos: p.pos, parents: p.parents}
}
func unified(d Domain, p1, p2 *Value) *Value {
return &Value{Domain: d, parents: &[2]*Value{p1, p2}}
}
func (v *Value) Pos() Pos {
if v.pos == nil {
return Pos{}
}
return *v.pos
}
func (v *Value) PosString() string {
var b []byte
for root := range v.Provenance() {
if len(b) > 0 {
b = append(b, ' ')
}
b, _ = root.pos.AppendText(b)
}
return string(b)
}
func (v *Value) WhyNotExact() string {
if v.Domain == nil {
return "v.Domain is nil"
}
return v.Domain.WhyNotExact()
}
func (v *Value) Exact() bool {
if v.Domain == nil {
return false
}
return v.Domain.Exact()
}
// Decode decodes v into a Go value.
//
// v must be exact, except that it can include Top. into must be a pointer.
// [Def]s are decoded into structs. [Tuple]s are decoded into slices. [String]s
// are decoded into strings or ints. Any field can itself be a pointer to one of
// these types. Top can be decoded into a pointer-typed field and will set the
// field to nil. Anything else will allocate a value if necessary.
//
// Any type may implement [Decoder], in which case its DecodeUnified method will
// be called instead of using the default decoding scheme.
func (v *Value) Decode(into any) error {
rv := reflect.ValueOf(into)
if rv.Kind() != reflect.Pointer {
return fmt.Errorf("cannot decode into non-pointer %T", into)
}
return decodeReflect(v, rv.Elem())
}
func decodeReflect(v *Value, rv reflect.Value) error {
var ptr reflect.Value
if rv.Kind() == reflect.Pointer {
if rv.IsNil() {
// Transparently allocate through pointers, *except* for Top, which
// wants to set the pointer to nil.
//
// TODO: Drop this condition if I switch to an explicit Optional[T]
// or move the Top logic into Def.
if _, ok := v.Domain.(Top); !ok {
// Allocate the value to fill in, but don't actually store it in
// the pointer until we successfully decode.
ptr = rv
rv = reflect.New(rv.Type().Elem()).Elem()
}
} else {
rv = rv.Elem()
}
}
var err error
if reflect.PointerTo(rv.Type()).Implements(decoderType) {
// Use the custom decoder.
err = rv.Addr().Interface().(Decoder).DecodeUnified(v)
} else {
err = v.Domain.decode(rv)
}
if err == nil && ptr.IsValid() {
ptr.Set(rv.Addr())
}
return err
}
// Decoder can be implemented by types as a custom implementation of [Decode]
// for that type.
type Decoder interface {
DecodeUnified(v *Value) error
}
var decoderType = reflect.TypeOf((*Decoder)(nil)).Elem()
// Provenance iterates over all of the source Values that have contributed to
// this Value.
func (v *Value) Provenance() iter.Seq[*Value] {
return func(yield func(*Value) bool) {
var rec func(d *Value) bool
rec = func(d *Value) bool {
if d.pos != nil {
if !yield(d) {
return false
}
}
if d.parents != nil {
for _, p := range d.parents {
if !rec(p) {
return false
}
}
}
return true
}
rec(v)
}
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unify
import (
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"regexp"
"strings"
"gopkg.in/yaml.v3"
)
// ReadOpts provides options to [Read] and related functions. The zero value is
// the default options.
type ReadOpts struct {
// FS, if non-nil, is the file system from which to resolve !import file
// names.
FS fs.FS
}
// Read reads a [Closure] in YAML format from r, using path for error messages.
//
// It maps YAML nodes into terminal Values as follows:
//
// - "_" or !top _ is the top value ([Top]).
//
// - "_|_" or !bottom _ is the bottom value. This is an error during
// unmarshaling, but can appear in marshaled values.
//
// - "$<name>" or !var <name> is a variable ([Var]). Everywhere the same name
// appears within a single unmarshal operation, it is mapped to the same
// variable. Different unmarshal operations get different variables, even if
// they have the same string name.
//
// - !regex "x" is a regular expression ([String]), as is any string that
// doesn't match "_", "_|_", or "$...". Regular expressions are implicitly
// anchored at the beginning and end. If the string doesn't contain any
// meta-characters (that is, it's a "literal" regular expression), then it's
// treated as an exact string.
//
// - !string "x", or any int, float, bool, or binary value is an exact string
// ([String]).
//
// - !regex [x, y, ...] is an intersection of regular expressions ([String]).
//
// It maps YAML nodes into non-terminal Values as follows:
//
// - Sequence nodes like [x, y, z] are tuples ([Tuple]).
//
// - !repeat [x] is a repeated tuple ([Tuple]), which is 0 or more instances of
// x. There must be exactly one element in the list.
//
// - Mapping nodes like {a: x, b: y} are defs ([Def]). Any fields not listed are
// implicitly top.
//
// - !sum [x, y, z] is a sum of its children. This can be thought of as a union
// of the values x, y, and z, or as a non-deterministic choice between x, y, and
// z. If a variable appears both inside the sum and outside of it, only the
// non-deterministic choice view really works. The unifier does not directly
// implement sums; instead, this is decoded as a fresh variable that's
// simultaneously bound to x, y, and z.
//
// - !import glob is like a !sum, but its children are read from all files
// matching the given glob pattern, which is interpreted relative to the current
// file path. Each file gets its own variable scope.
func Read(r io.Reader, path string, opts ReadOpts) (Closure, error) {
dec := yamlDecoder{opts: opts, path: path, env: topEnv}
v, err := dec.read(r)
if err != nil {
return Closure{}, err
}
return dec.close(v), nil
}
// ReadFile reads a [Closure] in YAML format from a file.
//
// The file must consist of a single YAML document.
//
// If opts.FS is not set, this sets it to a FS rooted at path's directory.
//
// See [Read] for details.
func ReadFile(path string, opts ReadOpts) (Closure, error) {
f, err := os.Open(path)
if err != nil {
return Closure{}, err
}
defer f.Close()
if opts.FS == nil {
opts.FS = os.DirFS(filepath.Dir(path))
}
return Read(f, path, opts)
}
// UnmarshalYAML implements [yaml.Unmarshaler].
//
// Since there is no way to pass [ReadOpts] to this function, it assumes default
// options.
func (c *Closure) UnmarshalYAML(node *yaml.Node) error {
dec := yamlDecoder{path: "<yaml.Node>", env: topEnv}
v, err := dec.root(node)
if err != nil {
return err
}
*c = dec.close(v)
return nil
}
type yamlDecoder struct {
opts ReadOpts
path string
vars map[string]*ident
nSums int
env envSet
}
func (dec *yamlDecoder) read(r io.Reader) (*Value, error) {
n, err := readOneNode(r)
if err != nil {
return nil, fmt.Errorf("%s: %w", dec.path, err)
}
// Decode YAML node to a Value
v, err := dec.root(n)
if err != nil {
return nil, fmt.Errorf("%s: %w", dec.path, err)
}
return v, nil
}
// readOneNode reads a single YAML document from r and returns an error if there
// are more documents in r.
func readOneNode(r io.Reader) (*yaml.Node, error) {
yd := yaml.NewDecoder(r)
// Decode as a YAML node
var node yaml.Node
if err := yd.Decode(&node); err != nil {
return nil, err
}
np := &node
if np.Kind == yaml.DocumentNode {
np = node.Content[0]
}
// Ensure there are no more YAML docs in this file
if err := yd.Decode(nil); err == nil {
return nil, fmt.Errorf("must not contain multiple documents")
} else if err != io.EOF {
return nil, err
}
return np, nil
}
// root parses the root of a file.
func (dec *yamlDecoder) root(node *yaml.Node) (*Value, error) {
// Prepare for variable name resolution in this file. This may be a nested
// root, so restore the current values when we're done.
oldVars, oldNSums := dec.vars, dec.nSums
defer func() {
dec.vars, dec.nSums = oldVars, oldNSums
}()
dec.vars = make(map[string]*ident, 0)
dec.nSums = 0
return dec.value(node)
}
// close wraps a decoded [Value] into a [Closure].
func (dec *yamlDecoder) close(v *Value) Closure {
return Closure{v, dec.env}
}
func (dec *yamlDecoder) value(node *yaml.Node) (vOut *Value, errOut error) {
pos := &Pos{Path: dec.path, Line: node.Line}
// Resolve alias nodes.
if node.Kind == yaml.AliasNode {
node = node.Alias
}
mk := func(d Domain) (*Value, error) {
v := &Value{Domain: d, pos: pos}
return v, nil
}
mk2 := func(d Domain, err error) (*Value, error) {
if err != nil {
return nil, err
}
return mk(d)
}
// is tests the kind and long tag of node.
is := func(kind yaml.Kind, tag string) bool {
return node.Kind == kind && node.LongTag() == tag
}
isExact := func() bool {
if node.Kind != yaml.ScalarNode {
return false
}
// We treat any string-ish YAML node as a string.
switch node.LongTag() {
case "!string", "tag:yaml.org,2002:int", "tag:yaml.org,2002:float", "tag:yaml.org,2002:bool", "tag:yaml.org,2002:binary":
return true
}
return false
}
// !!str nodes provide a short-hand syntax for several leaf domains that are
// also available under explicit tags. To simplify checking below, we set
// strVal to non-"" only for !!str nodes.
strVal := ""
isStr := is(yaml.ScalarNode, "tag:yaml.org,2002:str")
if isStr {
strVal = node.Value
}
switch {
case is(yaml.ScalarNode, "!var"):
strVal = "$" + node.Value
fallthrough
case strings.HasPrefix(strVal, "$"):
id, ok := dec.vars[strVal]
if !ok {
// We encode different idents with the same string name by adding a
// #N suffix. Strip that off so it doesn't accumulate. This isn't
// meant to be used in user-written input, though nothing stops that.
name, _, _ := strings.Cut(strVal, "#")
id = &ident{name: name}
dec.vars[strVal] = id
dec.env = dec.env.bind(id, topValue)
}
return mk(Var{id: id})
case strVal == "_" || is(yaml.ScalarNode, "!top"):
return mk(Top{})
case strVal == "_|_" || is(yaml.ScalarNode, "!bottom"):
return nil, errors.New("found bottom")
case isExact():
val := node.Value
return mk(NewStringExact(val))
case isStr || is(yaml.ScalarNode, "!regex"):
// Any other string we treat as a regex. This will produce an exact
// string anyway if the regex is literal.
val := node.Value
return mk2(NewStringRegex(val))
case is(yaml.SequenceNode, "!regex"):
var vals []string
if err := node.Decode(&vals); err != nil {
return nil, err
}
return mk2(NewStringRegex(vals...))
case is(yaml.MappingNode, "tag:yaml.org,2002:map"):
var db DefBuilder
for i := 0; i < len(node.Content); i += 2 {
key := node.Content[i]
if key.Kind != yaml.ScalarNode {
return nil, fmt.Errorf("non-scalar key %q", key.Value)
}
val, err := dec.value(node.Content[i+1])
if err != nil {
return nil, err
}
db.Add(key.Value, val)
}
return mk(db.Build())
case is(yaml.SequenceNode, "tag:yaml.org,2002:seq"):
elts := node.Content
vs := make([]*Value, 0, len(elts))
for _, elt := range elts {
v, err := dec.value(elt)
if err != nil {
return nil, err
}
vs = append(vs, v)
}
return mk(NewTuple(vs...))
case is(yaml.SequenceNode, "!repeat") || is(yaml.SequenceNode, "!repeat-unify"):
// !repeat must have one child. !repeat-unify is used internally for
// delayed unification, and is the same, it's just allowed to have more
// than one child.
if node.LongTag() == "!repeat" && len(node.Content) != 1 {
return nil, fmt.Errorf("!repeat must have exactly one child")
}
// Decode the children to make sure they're well-formed, but otherwise
// discard that decoding and do it again every time we need a new
// element.
var gen []func(e envSet) (*Value, envSet)
origEnv := dec.env
elts := node.Content
for i, elt := range elts {
_, err := dec.value(elt)
if err != nil {
return nil, err
}
// Undo any effects on the environment. We *do* keep any named
// variables that were added to the vars map in case they were
// introduced within the element.
//
// TODO: If we change how we implement repeat nodes, we might be
// able to drop yamlEncoder.env and yamlDecoder.env.
dec.env = origEnv
// Add a generator function
gen = append(gen, func(e envSet) (*Value, envSet) {
dec.env = e
// TODO: If this is in a sum, this tends to generate a ton of
// fresh variables that are different on each branch of the
// parent sum. Does it make sense to hold on to the i'th value
// of the tuple after we've generated it?
v, err := dec.value(elts[i])
if err != nil {
// It worked the first time, so this really shouldn't hapen.
panic("decoding repeat element failed")
}
return v, dec.env
})
}
return mk(NewRepeat(gen...))
case is(yaml.SequenceNode, "!sum"):
vs := make([]*Value, 0, len(node.Content))
for _, elt := range node.Content {
v, err := dec.value(elt)
if err != nil {
return nil, err
}
vs = append(vs, v)
}
if len(vs) == 1 {
return vs[0], nil
}
// A sum is implemented as a fresh variable that's simultaneously bound
// to each of the descendants.
id := &ident{name: fmt.Sprintf("sum%d", dec.nSums)}
dec.nSums++
dec.env = dec.env.bind(id, vs...)
return mk(Var{id: id})
case is(yaml.ScalarNode, "!import"):
if dec.opts.FS == nil {
return nil, fmt.Errorf("!import not allowed (ReadOpts.FS not set)")
}
pat := node.Value
if !fs.ValidPath(pat) {
// This will result in Glob returning no results. Give a more useful
// error message for this case.
return nil, fmt.Errorf("!import path must not contain '.' or '..'")
}
ms, err := fs.Glob(dec.opts.FS, pat)
if err != nil {
return nil, fmt.Errorf("resolving !import: %w", err)
}
if len(ms) == 0 {
return nil, fmt.Errorf("!import did not match any files")
}
// Parse each file
vs := make([]*Value, 0, len(ms))
for _, m := range ms {
v, err := dec.import1(m)
if err != nil {
return nil, err
}
vs = append(vs, v)
}
// Create a sum.
if len(vs) == 1 {
return vs[0], nil
}
id := &ident{name: "import"}
dec.env = dec.env.bind(id, vs...)
return mk(Var{id: id})
}
return nil, fmt.Errorf("unknown node kind %d %v", node.Kind, node.Tag)
}
func (dec *yamlDecoder) import1(path string) (*Value, error) {
// Make sure we can open the path first.
f, err := dec.opts.FS.Open(path)
if err != nil {
return nil, fmt.Errorf("!import failed: %w", err)
}
defer f.Close()
// Prepare the enter path.
oldFS, oldPath := dec.opts.FS, dec.path
defer func() {
dec.opts.FS, dec.path = oldFS, oldPath
}()
// Enter path, which is relative to the current path's directory.
newPath := filepath.Join(filepath.Dir(dec.path), path)
subFS, err := fs.Sub(dec.opts.FS, filepath.Dir(path))
if err != nil {
return nil, err
}
dec.opts.FS, dec.path = subFS, newPath
// Parse the file.
return dec.read(f)
}
type yamlEncoder struct {
idp identPrinter
e envSet // We track the environment for !repeat nodes.
}
// TODO: Switch some Value marshaling to Closure?
func (c Closure) MarshalYAML() (any, error) {
// TODO: If the environment is trivial, just marshal the value.
enc := &yamlEncoder{}
return enc.closure(c), nil
}
func (c Closure) String() string {
b, err := yaml.Marshal(c)
if err != nil {
return fmt.Sprintf("marshal failed: %s", err)
}
return string(b)
}
func (v *Value) MarshalYAML() (any, error) {
enc := &yamlEncoder{e: topEnv}
return enc.value(v), nil
}
func (v *Value) String() string {
b, err := yaml.Marshal(v)
if err != nil {
return fmt.Sprintf("marshal failed: %s", err)
}
return string(b)
}
func (enc *yamlEncoder) closure(c Closure) *yaml.Node {
enc.e = c.env
var n yaml.Node
n.Kind = yaml.MappingNode
n.Tag = "!closure"
n.Content = make([]*yaml.Node, 4)
n.Content[0] = new(yaml.Node)
n.Content[0].SetString("env")
n.Content[2] = new(yaml.Node)
n.Content[2].SetString("in")
n.Content[3] = enc.value(c.val)
// Fill in the env after we've written the value in case value encoding
// affects the env.
n.Content[1] = enc.env(enc.e)
enc.e = envSet{} // Allow GC'ing the env
return &n
}
func (enc *yamlEncoder) env(e envSet) *yaml.Node {
var encode func(e *envExpr) *yaml.Node
encode = func(e *envExpr) *yaml.Node {
var n yaml.Node
switch e.kind {
default:
panic("bad kind")
case envZero:
n.SetString("0")
case envUnit:
n.SetString("1")
case envBinding:
var id yaml.Node
id.SetString(enc.idp.unique(e.id))
n.Kind = yaml.MappingNode
n.Content = []*yaml.Node{&id, enc.value(e.val)}
case envProduct, envSum:
n.Kind = yaml.SequenceNode
if e.kind == envProduct {
n.Tag = "!product"
} else {
n.Tag = "!sum"
}
for _, e2 := range e.operands {
n.Content = append(n.Content, encode(e2))
}
}
return &n
}
return encode(e.root)
}
var yamlIntRe = regexp.MustCompile(`^-?[0-9]+$`)
func (enc *yamlEncoder) value(v *Value) *yaml.Node {
var n yaml.Node
switch d := v.Domain.(type) {
case nil:
// Not allowed by unmarshaler, but useful for understanding when
// something goes horribly wrong.
//
// TODO: We might be able to track useful provenance for this, which
// would really help with debugging unexpected bottoms.
n.SetString("_|_")
return &n
case Top:
n.SetString("_")
return &n
case Def:
n.Kind = yaml.MappingNode
for k, elt := range d.All() {
var kn yaml.Node
kn.SetString(k)
n.Content = append(n.Content, &kn, enc.value(elt))
}
n.HeadComment = v.PosString()
return &n
case Tuple:
n.Kind = yaml.SequenceNode
if d.repeat == nil {
for _, elt := range d.vs {
n.Content = append(n.Content, enc.value(elt))
}
} else {
if len(d.repeat) == 1 {
n.Tag = "!repeat"
} else {
n.Tag = "!repeat-unify"
}
// TODO: I'm not positive this will round-trip everything correctly.
for _, gen := range d.repeat {
v, e := gen(enc.e)
enc.e = e
n.Content = append(n.Content, enc.value(v))
}
}
return &n
case String:
switch d.kind {
case stringExact:
n.SetString(d.exact)
switch {
// Make this into a "nice" !!int node if I can.
case yamlIntRe.MatchString(d.exact):
n.Tag = "tag:yaml.org,2002:int"
// Or a "nice" !!bool node.
case d.exact == "false" || d.exact == "true":
n.Tag = "tag:yaml.org,2002:bool"
// If this doesn't require escaping, leave it as a str node to avoid
// the annoying YAML tags. Otherwise, mark it as an exact string.
// Alternatively, we could always emit a str node with regexp
// quoting.
case d.exact != regexp.QuoteMeta(d.exact):
n.Tag = "!string"
}
return &n
case stringRegex:
o := make([]string, 0, 1)
for _, re := range d.re {
s := re.String()
s = strings.TrimSuffix(strings.TrimPrefix(s, `\A(?:`), `)\z`)
o = append(o, s)
}
if len(o) == 1 {
n.SetString(o[0])
return &n
}
n.Encode(o)
n.Tag = "!regex"
return &n
}
panic("bad String kind")
case Var:
// TODO: If Var only appears once in the whole Value and is independent
// in the environment (part of a term that is only over Var), then emit
// this as a !sum instead.
if false {
var vs []*Value // TODO: Get values of this var.
if len(vs) == 1 {
return enc.value(vs[0])
}
n.Kind = yaml.SequenceNode
n.Tag = "!sum"
for _, elt := range vs {
n.Content = append(n.Content, enc.value(elt))
}
return &n
}
n.SetString(enc.idp.unique(d.id))
if !strings.HasPrefix(d.id.name, "$") {
n.Tag = "!var"
}
return &n
}
panic(fmt.Sprintf("unknown domain type %T", v.Domain))
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !compiler_bootstrap
package strconv
import "internal/bytealg"
// index returns the index of the first instance of c in s, or -1 if missing.
func index(s string, c byte) int {
return bytealg.IndexByteString(s, c)
}
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package strconv
import (
"errors"
"internal/strconv"
"internal/stringslite"
)
// IntSize is the size in bits of an int or uint value.
const IntSize = strconv.IntSize
// ParseBool returns the boolean value represented by the string.
// It accepts 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False.
// Any other value returns an error.
func ParseBool(str string) (bool, error) {
x, err := strconv.ParseBool(str)
if err != nil {
return x, toError("ParseBool", str, 0, 0, err)
}
return x, nil
}
// FormatBool returns "true" or "false" according to the value of b.
func FormatBool(b bool) string {
return strconv.FormatBool(b)
}
// AppendBool appends "true" or "false", according to the value of b,
// to dst and returns the extended buffer.
func AppendBool(dst []byte, b bool) []byte {
return strconv.AppendBool(dst, b)
}
// ParseComplex converts the string s to a complex number
// with the precision specified by bitSize: 64 for complex64, or 128 for complex128.
// When bitSize=64, the result still has type complex128, but it will be
// convertible to complex64 without changing its value.
//
// The number represented by s must be of the form N, Ni, or N±Ni, where N stands
// for a floating-point number as recognized by [ParseFloat], and i is the imaginary
// component. If the second N is unsigned, a + sign is required between the two components
// as indicated by the ±. If the second N is NaN, only a + sign is accepted.
// The form may be parenthesized and cannot contain any spaces.
// The resulting complex number consists of the two components converted by ParseFloat.
//
// The errors that ParseComplex returns have concrete type [*NumError]
// and include err.Num = s.
//
// If s is not syntactically well-formed, ParseComplex returns err.Err = ErrSyntax.
//
// If s is syntactically well-formed but either component is more than 1/2 ULP
// away from the largest floating point number of the given component's size,
// ParseComplex returns err.Err = ErrRange and c = ±Inf for the respective component.
func ParseComplex(s string, bitSize int) (complex128, error) {
x, err := strconv.ParseComplex(s, bitSize)
if err != nil {
return x, toError("ParseComplex", s, 0, bitSize, err)
}
return x, nil
}
// ParseFloat converts the string s to a floating-point number
// with the precision specified by bitSize: 32 for float32, or 64 for float64.
// When bitSize=32, the result still has type float64, but it will be
// convertible to float32 without changing its value.
//
// ParseFloat accepts decimal and hexadecimal floating-point numbers
// as defined by the Go syntax for [floating-point literals].
// If s is well-formed and near a valid floating-point number,
// ParseFloat returns the nearest floating-point number rounded
// using IEEE754 unbiased rounding.
// (Parsing a hexadecimal floating-point value only rounds when
// there are more bits in the hexadecimal representation than
// will fit in the mantissa.)
//
// The errors that ParseFloat returns have concrete type *NumError
// and include err.Num = s.
//
// If s is not syntactically well-formed, ParseFloat returns err.Err = ErrSyntax.
//
// If s is syntactically well-formed but is more than 1/2 ULP
// away from the largest floating point number of the given size,
// ParseFloat returns f = ±Inf, err.Err = ErrRange.
//
// ParseFloat recognizes the string "NaN", and the (possibly signed) strings "Inf" and "Infinity"
// as their respective special floating point values. It ignores case when matching.
//
// [floating-point literals]: https://go.dev/ref/spec#Floating-point_literals
func ParseFloat(s string, bitSize int) (float64, error) {
x, err := strconv.ParseFloat(s, bitSize)
if err != nil {
return x, toError("ParseFloat", s, 0, bitSize, err)
}
return x, nil
}
// ParseUint is like [ParseInt] but for unsigned numbers.
//
// A sign prefix is not permitted.
func ParseUint(s string, base int, bitSize int) (uint64, error) {
x, err := strconv.ParseUint(s, base, bitSize)
if err != nil {
return x, toError("ParseUint", s, base, bitSize, err)
}
return x, nil
}
// ParseInt interprets a string s in the given base (0, 2 to 36) and
// bit size (0 to 64) and returns the corresponding value i.
//
// The string may begin with a leading sign: "+" or "-".
//
// If the base argument is 0, the true base is implied by the string's
// prefix following the sign (if present): 2 for "0b", 8 for "0" or "0o",
// 16 for "0x", and 10 otherwise. Also, for argument base 0 only,
// underscore characters are permitted as defined by the Go syntax for
// [integer literals].
//
// The bitSize argument specifies the integer type
// that the result must fit into. Bit sizes 0, 8, 16, 32, and 64
// correspond to int, int8, int16, int32, and int64.
// If bitSize is below 0 or above 64, an error is returned.
//
// The errors that ParseInt returns have concrete type [*NumError]
// and include err.Num = s. If s is empty or contains invalid
// digits, err.Err = [ErrSyntax] and the returned value is 0;
// if the value corresponding to s cannot be represented by a
// signed integer of the given size, err.Err = [ErrRange] and the
// returned value is the maximum magnitude integer of the
// appropriate bitSize and sign.
//
// [integer literals]: https://go.dev/ref/spec#Integer_literals
func ParseInt(s string, base int, bitSize int) (i int64, err error) {
x, err := strconv.ParseInt(s, base, bitSize)
if err != nil {
return x, toError("ParseInt", s, base, bitSize, err)
}
return x, nil
}
// Atoi is equivalent to ParseInt(s, 10, 0), converted to type int.
func Atoi(s string) (int, error) {
x, err := strconv.Atoi(s)
if err != nil {
return x, toError("Atoi", s, 0, 0, err)
}
return x, nil
}
// FormatComplex converts the complex number c to a string of the
// form (a+bi) where a and b are the real and imaginary parts,
// formatted according to the format fmt and precision prec.
//
// The format fmt and precision prec have the same meaning as in [FormatFloat].
// It rounds the result assuming that the original was obtained from a complex
// value of bitSize bits, which must be 64 for complex64 and 128 for complex128.
func FormatComplex(c complex128, fmt byte, prec, bitSize int) string {
return strconv.FormatComplex(c, fmt, prec, bitSize)
}
// FormatFloat converts the floating-point number f to a string,
// according to the format fmt and precision prec. It rounds the
// result assuming that the original was obtained from a floating-point
// value of bitSize bits (32 for float32, 64 for float64).
//
// The format fmt is one of
// - 'b' (-ddddp±ddd, a binary exponent),
// - 'e' (-d.dddde±dd, a decimal exponent),
// - 'E' (-d.ddddE±dd, a decimal exponent),
// - 'f' (-ddd.dddd, no exponent),
// - 'g' ('e' for large exponents, 'f' otherwise),
// - 'G' ('E' for large exponents, 'f' otherwise),
// - 'x' (-0xd.ddddp±ddd, a hexadecimal fraction and binary exponent), or
// - 'X' (-0Xd.ddddP±ddd, a hexadecimal fraction and binary exponent).
//
// The precision prec controls the number of digits (excluding the exponent)
// printed by the 'e', 'E', 'f', 'g', 'G', 'x', and 'X' formats.
// For 'e', 'E', 'f', 'x', and 'X', it is the number of digits after the decimal point.
// For 'g' and 'G' it is the maximum number of significant digits (trailing
// zeros are removed).
// The special precision -1 uses the smallest number of digits
// necessary such that ParseFloat will return f exactly.
// The exponent is written as a decimal integer;
// for all formats other than 'b', it will be at least two digits.
func FormatFloat(f float64, fmt byte, prec, bitSize int) string {
return strconv.FormatFloat(f, fmt, prec, bitSize)
}
// AppendFloat appends the string form of the floating-point number f,
// as generated by [FormatFloat], to dst and returns the extended buffer.
func AppendFloat(dst []byte, f float64, fmt byte, prec, bitSize int) []byte {
return strconv.AppendFloat(dst, f, fmt, prec, bitSize)
}
// FormatUint returns the string representation of i in the given base,
// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z'
// for digit values >= 10.
func FormatUint(i uint64, base int) string {
return strconv.FormatUint(i, base)
}
// FormatInt returns the string representation of i in the given base,
// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z'
// for digit values >= 10.
func FormatInt(i int64, base int) string {
return strconv.FormatInt(i, base)
}
// Itoa is equivalent to [FormatInt](int64(i), 10).
func Itoa(i int) string {
return strconv.Itoa(i)
}
// AppendInt appends the string form of the integer i,
// as generated by [FormatInt], to dst and returns the extended buffer.
func AppendInt(dst []byte, i int64, base int) []byte {
return strconv.AppendInt(dst, i, base)
}
// AppendUint appends the string form of the unsigned integer i,
// as generated by [FormatUint], to dst and returns the extended buffer.
func AppendUint(dst []byte, i uint64, base int) []byte {
return strconv.AppendUint(dst, i, base)
}
// toError converts from internal/strconv.Error to the error guaranteed by this package's APIs.
func toError(fn, s string, base, bitSize int, err error) error {
switch err {
case strconv.ErrSyntax:
return syntaxError(fn, s)
case strconv.ErrRange:
return rangeError(fn, s)
case strconv.ErrBase:
return baseError(fn, s, base)
case strconv.ErrBitSize:
return bitSizeError(fn, s, bitSize)
}
return err
}
// ErrRange indicates that a value is out of range for the target type.
var ErrRange = errors.New("value out of range")
// ErrSyntax indicates that a value does not have the right syntax for the target type.
var ErrSyntax = errors.New("invalid syntax")
// A NumError records a failed conversion.
type NumError struct {
Func string // the failing function (ParseBool, ParseInt, ParseUint, ParseFloat, ParseComplex)
Num string // the input
Err error // the reason the conversion failed (e.g. ErrRange, ErrSyntax, etc.)
}
func (e *NumError) Error() string {
return "strconv." + e.Func + ": " + "parsing " + Quote(e.Num) + ": " + e.Err.Error()
}
func (e *NumError) Unwrap() error { return e.Err }
// All ParseXXX functions allow the input string to escape to the error value.
// This hurts strconv.ParseXXX(string(b)) calls where b is []byte since
// the conversion from []byte must allocate a string on the heap.
// If we assume errors are infrequent, then we can avoid escaping the input
// back to the output by copying it first. This allows the compiler to call
// strconv.ParseXXX without a heap allocation for most []byte to string
// conversions, since it can now prove that the string cannot escape Parse.
func syntaxError(fn, str string) *NumError {
return &NumError{fn, stringslite.Clone(str), ErrSyntax}
}
func rangeError(fn, str string) *NumError {
return &NumError{fn, stringslite.Clone(str), ErrRange}
}
func baseError(fn, str string, base int) *NumError {
return &NumError{fn, stringslite.Clone(str), errors.New("invalid base " + Itoa(base))}
}
func bitSizeError(fn, str string, bitSize int) *NumError {
return &NumError{fn, stringslite.Clone(str), errors.New("invalid bit size " + Itoa(bitSize))}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run makeisprint.go -output isprint.go
package strconv
import (
"unicode/utf8"
)
const (
lowerhex = "0123456789abcdef"
upperhex = "0123456789ABCDEF"
)
// contains reports whether the string contains the byte c.
func contains(s string, c byte) bool {
return index(s, c) != -1
}
func quoteWith(s string, quote byte, ASCIIonly, graphicOnly bool) string {
return string(appendQuotedWith(make([]byte, 0, 3*len(s)/2), s, quote, ASCIIonly, graphicOnly))
}
func quoteRuneWith(r rune, quote byte, ASCIIonly, graphicOnly bool) string {
return string(appendQuotedRuneWith(nil, r, quote, ASCIIonly, graphicOnly))
}
func appendQuotedWith(buf []byte, s string, quote byte, ASCIIonly, graphicOnly bool) []byte {
// Often called with big strings, so preallocate. If there's quoting,
// this is conservative but still helps a lot.
if cap(buf)-len(buf) < len(s) {
nBuf := make([]byte, len(buf), len(buf)+1+len(s)+1)
copy(nBuf, buf)
buf = nBuf
}
buf = append(buf, quote)
for r, width := rune(0), 0; len(s) > 0; s = s[width:] {
r, width = utf8.DecodeRuneInString(s)
if width == 1 && r == utf8.RuneError {
buf = append(buf, `\x`...)
buf = append(buf, lowerhex[s[0]>>4])
buf = append(buf, lowerhex[s[0]&0xF])
continue
}
buf = appendEscapedRune(buf, r, quote, ASCIIonly, graphicOnly)
}
buf = append(buf, quote)
return buf
}
func appendQuotedRuneWith(buf []byte, r rune, quote byte, ASCIIonly, graphicOnly bool) []byte {
buf = append(buf, quote)
if !utf8.ValidRune(r) {
r = utf8.RuneError
}
buf = appendEscapedRune(buf, r, quote, ASCIIonly, graphicOnly)
buf = append(buf, quote)
return buf
}
func appendEscapedRune(buf []byte, r rune, quote byte, ASCIIonly, graphicOnly bool) []byte {
if r == rune(quote) || r == '\\' { // always backslashed
buf = append(buf, '\\')
buf = append(buf, byte(r))
return buf
}
if ASCIIonly {
if r < utf8.RuneSelf && IsPrint(r) {
buf = append(buf, byte(r))
return buf
}
} else if IsPrint(r) || graphicOnly && isInGraphicList(r) {
return utf8.AppendRune(buf, r)
}
switch r {
case '\a':
buf = append(buf, `\a`...)
case '\b':
buf = append(buf, `\b`...)
case '\f':
buf = append(buf, `\f`...)
case '\n':
buf = append(buf, `\n`...)
case '\r':
buf = append(buf, `\r`...)
case '\t':
buf = append(buf, `\t`...)
case '\v':
buf = append(buf, `\v`...)
default:
switch {
case r < ' ' || r == 0x7f:
buf = append(buf, `\x`...)
buf = append(buf, lowerhex[byte(r)>>4])
buf = append(buf, lowerhex[byte(r)&0xF])
case !utf8.ValidRune(r):
r = 0xFFFD
fallthrough
case r < 0x10000:
buf = append(buf, `\u`...)
for s := 12; s >= 0; s -= 4 {
buf = append(buf, lowerhex[r>>uint(s)&0xF])
}
default:
buf = append(buf, `\U`...)
for s := 28; s >= 0; s -= 4 {
buf = append(buf, lowerhex[r>>uint(s)&0xF])
}
}
}
return buf
}
// Quote returns a double-quoted Go string literal representing s. The
// returned string uses Go escape sequences (\t, \n, \xFF, \u0100) for
// control characters and non-printable characters as defined by
// [IsPrint].
func Quote(s string) string {
return quoteWith(s, '"', false, false)
}
// AppendQuote appends a double-quoted Go string literal representing s,
// as generated by [Quote], to dst and returns the extended buffer.
func AppendQuote(dst []byte, s string) []byte {
return appendQuotedWith(dst, s, '"', false, false)
}
// QuoteToASCII returns a double-quoted Go string literal representing s.
// The returned string uses Go escape sequences (\t, \n, \xFF, \u0100) for
// non-ASCII characters and non-printable characters as defined by [IsPrint].
func QuoteToASCII(s string) string {
return quoteWith(s, '"', true, false)
}
// AppendQuoteToASCII appends a double-quoted Go string literal representing s,
// as generated by [QuoteToASCII], to dst and returns the extended buffer.
func AppendQuoteToASCII(dst []byte, s string) []byte {
return appendQuotedWith(dst, s, '"', true, false)
}
// QuoteToGraphic returns a double-quoted Go string literal representing s.
// The returned string leaves Unicode graphic characters, as defined by
// [IsGraphic], unchanged and uses Go escape sequences (\t, \n, \xFF, \u0100)
// for non-graphic characters.
func QuoteToGraphic(s string) string {
return quoteWith(s, '"', false, true)
}
// AppendQuoteToGraphic appends a double-quoted Go string literal representing s,
// as generated by [QuoteToGraphic], to dst and returns the extended buffer.
func AppendQuoteToGraphic(dst []byte, s string) []byte {
return appendQuotedWith(dst, s, '"', false, true)
}
// QuoteRune returns a single-quoted Go character literal representing the
// rune. The returned string uses Go escape sequences (\t, \n, \xFF, \u0100)
// for control characters and non-printable characters as defined by [IsPrint].
// If r is not a valid Unicode code point, it is interpreted as the Unicode
// replacement character U+FFFD.
func QuoteRune(r rune) string {
return quoteRuneWith(r, '\'', false, false)
}
// AppendQuoteRune appends a single-quoted Go character literal representing the rune,
// as generated by [QuoteRune], to dst and returns the extended buffer.
func AppendQuoteRune(dst []byte, r rune) []byte {
return appendQuotedRuneWith(dst, r, '\'', false, false)
}
// QuoteRuneToASCII returns a single-quoted Go character literal representing
// the rune. The returned string uses Go escape sequences (\t, \n, \xFF,
// \u0100) for non-ASCII characters and non-printable characters as defined
// by [IsPrint].
// If r is not a valid Unicode code point, it is interpreted as the Unicode
// replacement character U+FFFD.
func QuoteRuneToASCII(r rune) string {
return quoteRuneWith(r, '\'', true, false)
}
// AppendQuoteRuneToASCII appends a single-quoted Go character literal representing the rune,
// as generated by [QuoteRuneToASCII], to dst and returns the extended buffer.
func AppendQuoteRuneToASCII(dst []byte, r rune) []byte {
return appendQuotedRuneWith(dst, r, '\'', true, false)
}
// QuoteRuneToGraphic returns a single-quoted Go character literal representing
// the rune. If the rune is not a Unicode graphic character,
// as defined by [IsGraphic], the returned string will use a Go escape sequence
// (\t, \n, \xFF, \u0100).
// If r is not a valid Unicode code point, it is interpreted as the Unicode
// replacement character U+FFFD.
func QuoteRuneToGraphic(r rune) string {
return quoteRuneWith(r, '\'', false, true)
}
// AppendQuoteRuneToGraphic appends a single-quoted Go character literal representing the rune,
// as generated by [QuoteRuneToGraphic], to dst and returns the extended buffer.
func AppendQuoteRuneToGraphic(dst []byte, r rune) []byte {
return appendQuotedRuneWith(dst, r, '\'', false, true)
}
// CanBackquote reports whether the string s can be represented
// unchanged as a single-line backquoted string without control
// characters other than tab.
func CanBackquote(s string) bool {
for len(s) > 0 {
r, wid := utf8.DecodeRuneInString(s)
s = s[wid:]
if wid > 1 {
if r == '\ufeff' {
return false // BOMs are invisible and should not be quoted.
}
continue // All other multibyte runes are correctly encoded and assumed printable.
}
if r == utf8.RuneError {
return false
}
if (r < ' ' && r != '\t') || r == '`' || r == '\u007F' {
return false
}
}
return true
}
func unhex(b byte) (v rune, ok bool) {
c := rune(b)
switch {
case '0' <= c && c <= '9':
return c - '0', true
case 'a' <= c && c <= 'f':
return c - 'a' + 10, true
case 'A' <= c && c <= 'F':
return c - 'A' + 10, true
}
return
}
// UnquoteChar decodes the first character or byte in the escaped string
// or character literal represented by the string s.
// It returns four values:
//
// 1. value, the decoded Unicode code point or byte value;
// 2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
// 3. tail, the remainder of the string after the character; and
// 4. an error that will be nil if the character is syntactically valid.
//
// The second argument, quote, specifies the type of literal being parsed
// and therefore which escaped quote character is permitted.
// If set to a single quote, it permits the sequence \' and disallows unescaped '.
// If set to a double quote, it permits \" and disallows unescaped ".
// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
func UnquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
// easy cases
if len(s) == 0 {
err = ErrSyntax
return
}
switch c := s[0]; {
case c == quote && (quote == '\'' || quote == '"'):
err = ErrSyntax
return
case c >= utf8.RuneSelf:
r, size := utf8.DecodeRuneInString(s)
return r, true, s[size:], nil
case c != '\\':
return rune(s[0]), false, s[1:], nil
}
// hard case: c is backslash
if len(s) <= 1 {
err = ErrSyntax
return
}
c := s[1]
s = s[2:]
switch c {
case 'a':
value = '\a'
case 'b':
value = '\b'
case 'f':
value = '\f'
case 'n':
value = '\n'
case 'r':
value = '\r'
case 't':
value = '\t'
case 'v':
value = '\v'
case 'x', 'u', 'U':
n := 0
switch c {
case 'x':
n = 2
case 'u':
n = 4
case 'U':
n = 8
}
var v rune
if len(s) < n {
err = ErrSyntax
return
}
for j := 0; j < n; j++ {
x, ok := unhex(s[j])
if !ok {
err = ErrSyntax
return
}
v = v<<4 | x
}
s = s[n:]
if c == 'x' {
// single-byte string, possibly not UTF-8
value = v
break
}
if !utf8.ValidRune(v) {
err = ErrSyntax
return
}
value = v
multibyte = true
case '0', '1', '2', '3', '4', '5', '6', '7':
v := rune(c) - '0'
if len(s) < 2 {
err = ErrSyntax
return
}
for j := 0; j < 2; j++ { // one digit already; two more
x := rune(s[j]) - '0'
if x < 0 || x > 7 {
err = ErrSyntax
return
}
v = (v << 3) | x
}
s = s[2:]
if v > 255 {
err = ErrSyntax
return
}
value = v
case '\\':
value = '\\'
case '\'', '"':
if c != quote {
err = ErrSyntax
return
}
value = rune(c)
default:
err = ErrSyntax
return
}
tail = s
return
}
// QuotedPrefix returns the quoted string (as understood by [Unquote]) at the prefix of s.
// If s does not start with a valid quoted string, QuotedPrefix returns an error.
func QuotedPrefix(s string) (string, error) {
out, _, err := unquote(s, false)
return out, err
}
// Unquote interprets s as a single-quoted, double-quoted,
// or backquoted Go string literal, returning the string value
// that s quotes. (If s is single-quoted, it would be a Go
// character literal; Unquote returns the corresponding
// one-character string. For an empty character literal
// Unquote returns the empty string.)
func Unquote(s string) (string, error) {
out, rem, err := unquote(s, true)
if len(rem) > 0 {
return "", ErrSyntax
}
return out, err
}
// unquote parses a quoted string at the start of the input,
// returning the parsed prefix, the remaining suffix, and any parse errors.
// If unescape is true, the parsed prefix is unescaped,
// otherwise the input prefix is provided verbatim.
func unquote(in string, unescape bool) (out, rem string, err error) {
// Determine the quote form and optimistically find the terminating quote.
if len(in) < 2 {
return "", in, ErrSyntax
}
quote := in[0]
end := index(in[1:], quote)
if end < 0 {
return "", in, ErrSyntax
}
end += 2 // position after terminating quote; may be wrong if escape sequences are present
switch quote {
case '`':
switch {
case !unescape:
out = in[:end] // include quotes
case !contains(in[:end], '\r'):
out = in[len("`") : end-len("`")] // exclude quotes
default:
// Carriage return characters ('\r') inside raw string literals
// are discarded from the raw string value.
buf := make([]byte, 0, end-len("`")-len("\r")-len("`"))
for i := len("`"); i < end-len("`"); i++ {
if in[i] != '\r' {
buf = append(buf, in[i])
}
}
out = string(buf)
}
// NOTE: Prior implementations did not verify that raw strings consist
// of valid UTF-8 characters and we continue to not verify it as such.
// The Go specification does not explicitly require valid UTF-8,
// but only mention that it is implicitly valid for Go source code
// (which must be valid UTF-8).
return out, in[end:], nil
case '"', '\'':
// Handle quoted strings without any escape sequences.
if !contains(in[:end], '\\') && !contains(in[:end], '\n') {
var valid bool
switch quote {
case '"':
valid = utf8.ValidString(in[len(`"`) : end-len(`"`)])
case '\'':
r, n := utf8.DecodeRuneInString(in[len("'") : end-len("'")])
valid = len("'")+n+len("'") == end && (r != utf8.RuneError || n != 1)
}
if valid {
out = in[:end]
if unescape {
out = out[1 : end-1] // exclude quotes
}
return out, in[end:], nil
}
}
// Handle quoted strings with escape sequences.
var buf []byte
in0 := in
in = in[1:] // skip starting quote
if unescape {
buf = make([]byte, 0, 3*end/2) // try to avoid more allocations
}
for len(in) > 0 && in[0] != quote {
// Process the next character,
// rejecting any unescaped newline characters which are invalid.
r, multibyte, rem, err := UnquoteChar(in, quote)
if in[0] == '\n' || err != nil {
return "", in0, ErrSyntax
}
in = rem
// Append the character if unescaping the input.
if unescape {
if r < utf8.RuneSelf || !multibyte {
buf = append(buf, byte(r))
} else {
buf = utf8.AppendRune(buf, r)
}
}
// Single quoted strings must be a single character.
if quote == '\'' {
break
}
}
// Verify that the string ends with a terminating quote.
if !(len(in) > 0 && in[0] == quote) {
return "", in0, ErrSyntax
}
in = in[1:] // skip terminating quote
if unescape {
return string(buf), in, nil
}
return in0[:len(in0)-len(in)], in, nil
default:
return "", in, ErrSyntax
}
}
// bsearch is semantically the same as [slices.BinarySearch] (without NaN checks)
// We copied this function because we can not import "slices" here.
func bsearch[S ~[]E, E ~uint16 | ~uint32](s S, v E) (int, bool) {
n := len(s)
i, j := 0, n
for i < j {
h := i + (j-i)>>1
if s[h] < v {
i = h + 1
} else {
j = h
}
}
return i, i < n && s[i] == v
}
// TODO: IsPrint is a local implementation of unicode.IsPrint, verified by the tests
// to give the same answer. It allows this package not to depend on unicode,
// and therefore not pull in all the Unicode tables. If the linker were better
// at tossing unused tables, we could get rid of this implementation.
// That would be nice.
// IsPrint reports whether the rune is defined as printable by Go, with
// the same definition as [unicode.IsPrint]: letters, numbers, punctuation,
// symbols and ASCII space.
func IsPrint(r rune) bool {
// Fast check for Latin-1
if r <= 0xFF {
if 0x20 <= r && r <= 0x7E {
// All the ASCII is printable from space through DEL-1.
return true
}
if 0xA1 <= r && r <= 0xFF {
// Similarly for ¡ through ÿ...
return r != 0xAD // ...except for the bizarre soft hyphen.
}
return false
}
// Same algorithm, either on uint16 or uint32 value.
// First, find first i such that isPrint[i] >= x.
// This is the index of either the start or end of a pair that might span x.
// The start is even (isPrint[i&^1]) and the end is odd (isPrint[i|1]).
// If we find x in a range, make sure x is not in isNotPrint list.
if 0 <= r && r < 1<<16 {
rr, isPrint, isNotPrint := uint16(r), isPrint16, isNotPrint16
i, _ := bsearch(isPrint, rr)
if i >= len(isPrint) || rr < isPrint[i&^1] || isPrint[i|1] < rr {
return false
}
_, found := bsearch(isNotPrint, rr)
return !found
}
rr, isPrint, isNotPrint := uint32(r), isPrint32, isNotPrint32
i, _ := bsearch(isPrint, rr)
if i >= len(isPrint) || rr < isPrint[i&^1] || isPrint[i|1] < rr {
return false
}
if r >= 0x20000 {
return true
}
r -= 0x10000
_, found := bsearch(isNotPrint, uint16(r))
return !found
}
// IsGraphic reports whether the rune is defined as a Graphic by Unicode. Such
// characters include letters, marks, numbers, punctuation, symbols, and
// spaces, from categories L, M, N, P, S, and Zs.
func IsGraphic(r rune) bool {
if IsPrint(r) {
return true
}
return isInGraphicList(r)
}
// isInGraphicList reports whether the rune is in the isGraphic list. This separation
// from IsGraphic allows quoteWith to avoid two calls to IsPrint.
// Should be called only if IsPrint fails.
func isInGraphicList(r rune) bool {
// We know r must fit in 16 bits - see makeisprint.go.
if r > 0xFFFF {
return false
}
_, found := bsearch(isGraphic, uint16(r))
return found
}
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package strings
import (
"internal/abi"
"internal/bytealg"
"unicode/utf8"
"unsafe"
)
// A Builder is used to efficiently build a string using [Builder.Write] methods.
// It minimizes memory copying. The zero value is ready to use.
// Do not copy a non-zero Builder.
type Builder struct {
addr *Builder // of receiver, to detect copies by value
// External users should never get direct access to this buffer, since
// the slice at some point will be converted to a string using unsafe, also
// data between len(buf) and cap(buf) might be uninitialized.
buf []byte
}
// copyCheck implements a dynamic check to prevent modification after
// copying a non-zero Builder, which would be unsafe (see #25907, #47276).
//
// We cannot add a noCopy field to Builder, to cause vet's copylocks
// check to report copying, because copylocks cannot reliably
// discriminate the zero and nonzero cases.
func (b *Builder) copyCheck() {
if b.addr == nil {
// This hack works around a failing of Go's escape analysis
// that was causing b to escape and be heap allocated.
// See issue 23382.
// TODO: once issue 7921 is fixed, this should be reverted to
// just "b.addr = b".
b.addr = (*Builder)(abi.NoEscape(unsafe.Pointer(b)))
} else if b.addr != b {
panic("strings: illegal use of non-zero Builder copied by value")
}
}
// String returns the accumulated string.
func (b *Builder) String() string {
return unsafe.String(unsafe.SliceData(b.buf), len(b.buf))
}
// Len returns the number of accumulated bytes; b.Len() == len(b.String()).
func (b *Builder) Len() int { return len(b.buf) }
// Cap returns the capacity of the builder's underlying byte slice. It is the
// total space allocated for the string being built and includes any bytes
// already written.
func (b *Builder) Cap() int { return cap(b.buf) }
// Reset resets the [Builder] to be empty.
func (b *Builder) Reset() {
b.addr = nil
b.buf = nil
}
// grow copies the buffer to a new, larger buffer so that there are at least n
// bytes of capacity beyond len(b.buf).
func (b *Builder) grow(n int) {
buf := bytealg.MakeNoZero(2*cap(b.buf) + n)[:len(b.buf)]
copy(buf, b.buf)
b.buf = buf
}
// Grow grows b's capacity, if necessary, to guarantee space for
// another n bytes. After Grow(n), at least n bytes can be written to b
// without another allocation. If n is negative, Grow panics.
func (b *Builder) Grow(n int) {
b.copyCheck()
if n < 0 {
panic("strings.Builder.Grow: negative count")
}
if cap(b.buf)-len(b.buf) < n {
b.grow(n)
}
}
// Write appends the contents of p to b's buffer.
// Write always returns len(p), nil.
func (b *Builder) Write(p []byte) (int, error) {
b.copyCheck()
b.buf = append(b.buf, p...)
return len(p), nil
}
// WriteByte appends the byte c to b's buffer.
// The returned error is always nil.
func (b *Builder) WriteByte(c byte) error {
b.copyCheck()
b.buf = append(b.buf, c)
return nil
}
// WriteRune appends the UTF-8 encoding of Unicode code point r to b's buffer.
// It returns the length of r and a nil error.
func (b *Builder) WriteRune(r rune) (int, error) {
b.copyCheck()
n := len(b.buf)
b.buf = utf8.AppendRune(b.buf, r)
return len(b.buf) - n, nil
}
// WriteString appends the contents of s to b's buffer.
// It returns the length of s and a nil error.
func (b *Builder) WriteString(s string) (int, error) {
b.copyCheck()
b.buf = append(b.buf, s...)
return len(s), nil
}
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package strings
import (
"internal/stringslite"
)
// Clone returns a fresh copy of s.
// It guarantees to make a copy of s into a new allocation,
// which can be important when retaining only a small substring
// of a much larger string. Using Clone can help such programs
// use less memory. Of course, since using Clone makes a copy,
// overuse of Clone can make programs use more memory.
// Clone should typically be used only rarely, and only when
// profiling indicates that it is needed.
// For strings of length zero the string "" will be returned
// and no allocation is made.
func Clone(s string) string {
return stringslite.Clone(s)
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package strings
import "internal/bytealg"
// Compare returns an integer comparing two strings lexicographically.
// The result will be 0 if a == b, -1 if a < b, and +1 if a > b.
//
// Use Compare when you need to perform a three-way comparison (with
// [slices.SortFunc], for example). It is usually clearer and always faster
// to use the built-in string comparison operators ==, <, >, and so on.
func Compare(a, b string) int {
return bytealg.CompareString(a, b)
}
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package strings
import (
"iter"
"unicode"
"unicode/utf8"
)
// Lines returns an iterator over the newline-terminated lines in the string s.
// The lines yielded by the iterator include their terminating newlines.
// If s is empty, the iterator yields no lines at all.
// If s does not end in a newline, the final yielded line will not end in a newline.
// It returns a single-use iterator.
func Lines(s string) iter.Seq[string] {
return func(yield func(string) bool) {
for len(s) > 0 {
var line string
if i := IndexByte(s, '\n'); i >= 0 {
line, s = s[:i+1], s[i+1:]
} else {
line, s = s, ""
}
if !yield(line) {
return
}
}
}
}
// splitSeq is SplitSeq or SplitAfterSeq, configured by how many
// bytes of sep to include in the results (none or all).
func splitSeq(s, sep string, sepSave int) iter.Seq[string] {
return func(yield func(string) bool) {
if len(sep) == 0 {
for len(s) > 0 {
_, size := utf8.DecodeRuneInString(s)
if !yield(s[:size]) {
return
}
s = s[size:]
}
return
}
for {
i := Index(s, sep)
if i < 0 {
break
}
frag := s[:i+sepSave]
if !yield(frag) {
return
}
s = s[i+len(sep):]
}
yield(s)
}
}
// SplitSeq returns an iterator over all substrings of s separated by sep.
// The iterator yields the same strings that would be returned by [Split](s, sep),
// but without constructing the slice.
// It returns a single-use iterator.
func SplitSeq(s, sep string) iter.Seq[string] {
return splitSeq(s, sep, 0)
}
// SplitAfterSeq returns an iterator over substrings of s split after each instance of sep.
// The iterator yields the same strings that would be returned by [SplitAfter](s, sep),
// but without constructing the slice.
// It returns a single-use iterator.
func SplitAfterSeq(s, sep string) iter.Seq[string] {
return splitSeq(s, sep, len(sep))
}
// FieldsSeq returns an iterator over substrings of s split around runs of
// whitespace characters, as defined by [unicode.IsSpace].
// The iterator yields the same strings that would be returned by [Fields](s),
// but without constructing the slice.
func FieldsSeq(s string) iter.Seq[string] {
return func(yield func(string) bool) {
start := -1
for i := 0; i < len(s); {
size := 1
r := rune(s[i])
isSpace := asciiSpace[s[i]] != 0
if r >= utf8.RuneSelf {
r, size = utf8.DecodeRuneInString(s[i:])
isSpace = unicode.IsSpace(r)
}
if isSpace {
if start >= 0 {
if !yield(s[start:i]) {
return
}
start = -1
}
} else if start < 0 {
start = i
}
i += size
}
if start >= 0 {
yield(s[start:])
}
}
}
// FieldsFuncSeq returns an iterator over substrings of s split around runs of
// Unicode code points satisfying f(c).
// The iterator yields the same strings that would be returned by [FieldsFunc](s),
// but without constructing the slice.
func FieldsFuncSeq(s string, f func(rune) bool) iter.Seq[string] {
return func(yield func(string) bool) {
start := -1
for i := 0; i < len(s); {
r, size := utf8.DecodeRuneInString(s[i:])
if f(r) {
if start >= 0 {
if !yield(s[start:i]) {
return
}
start = -1
}
} else if start < 0 {
start = i
}
i += size
}
if start >= 0 {
yield(s[start:])
}
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package strings
import (
"errors"
"io"
"unicode/utf8"
)
// A Reader implements the [io.Reader], [io.ReaderAt], [io.ByteReader], [io.ByteScanner],
// [io.RuneReader], [io.RuneScanner], [io.Seeker], and [io.WriterTo] interfaces by reading
// from a string.
// The zero value for Reader operates like a Reader of an empty string.
type Reader struct {
s string
i int64 // current reading index
prevRune int // index of previous rune; or < 0
}
// Len returns the number of bytes of the unread portion of the
// string.
func (r *Reader) Len() int {
if r.i >= int64(len(r.s)) {
return 0
}
return int(int64(len(r.s)) - r.i)
}
// Size returns the original length of the underlying string.
// Size is the number of bytes available for reading via [Reader.ReadAt].
// The returned value is always the same and is not affected by calls
// to any other method.
func (r *Reader) Size() int64 { return int64(len(r.s)) }
// Read implements the [io.Reader] interface.
func (r *Reader) Read(b []byte) (n int, err error) {
if r.i >= int64(len(r.s)) {
return 0, io.EOF
}
r.prevRune = -1
n = copy(b, r.s[r.i:])
r.i += int64(n)
return
}
// ReadAt implements the [io.ReaderAt] interface.
func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
// cannot modify state - see io.ReaderAt
if off < 0 {
return 0, errors.New("strings.Reader.ReadAt: negative offset")
}
if off >= int64(len(r.s)) {
return 0, io.EOF
}
n = copy(b, r.s[off:])
if n < len(b) {
err = io.EOF
}
return
}
// ReadByte implements the [io.ByteReader] interface.
func (r *Reader) ReadByte() (byte, error) {
r.prevRune = -1
if r.i >= int64(len(r.s)) {
return 0, io.EOF
}
b := r.s[r.i]
r.i++
return b, nil
}
// UnreadByte implements the [io.ByteScanner] interface.
func (r *Reader) UnreadByte() error {
if r.i <= 0 {
return errors.New("strings.Reader.UnreadByte: at beginning of string")
}
r.prevRune = -1
r.i--
return nil
}
// ReadRune implements the [io.RuneReader] interface.
func (r *Reader) ReadRune() (ch rune, size int, err error) {
if r.i >= int64(len(r.s)) {
r.prevRune = -1
return 0, 0, io.EOF
}
r.prevRune = int(r.i)
ch, size = utf8.DecodeRuneInString(r.s[r.i:])
r.i += int64(size)
return
}
// UnreadRune implements the [io.RuneScanner] interface.
func (r *Reader) UnreadRune() error {
if r.i <= 0 {
return errors.New("strings.Reader.UnreadRune: at beginning of string")
}
if r.prevRune < 0 {
return errors.New("strings.Reader.UnreadRune: previous operation was not ReadRune")
}
r.i = int64(r.prevRune)
r.prevRune = -1
return nil
}
// Seek implements the [io.Seeker] interface.
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
r.prevRune = -1
var abs int64
switch whence {
case io.SeekStart:
abs = offset
case io.SeekCurrent:
abs = r.i + offset
case io.SeekEnd:
abs = int64(len(r.s)) + offset
default:
return 0, errors.New("strings.Reader.Seek: invalid whence")
}
if abs < 0 {
return 0, errors.New("strings.Reader.Seek: negative position")
}
r.i = abs
return abs, nil
}
// WriteTo implements the [io.WriterTo] interface.
func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
r.prevRune = -1
if r.i >= int64(len(r.s)) {
return 0, nil
}
s := r.s[r.i:]
m, err := io.WriteString(w, s)
if m > len(s) {
panic("strings.Reader.WriteTo: invalid WriteString count")
}
r.i += int64(m)
n = int64(m)
if m != len(s) && err == nil {
err = io.ErrShortWrite
}
return
}
// Reset resets the [Reader] to be reading from s.
func (r *Reader) Reset(s string) { *r = Reader{s, 0, -1} }
// NewReader returns a new [Reader] reading from s.
// It is similar to [bytes.NewBufferString] but more efficient and non-writable.
func NewReader(s string) *Reader { return &Reader{s, 0, -1} }
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package strings
import (
"io"
"sync"
)
// Replacer replaces a list of strings with replacements.
// It is safe for concurrent use by multiple goroutines.
type Replacer struct {
once sync.Once // guards buildOnce method
r replacer
oldnew []string
}
// replacer is the interface that a replacement algorithm needs to implement.
type replacer interface {
Replace(s string) string
WriteString(w io.Writer, s string) (n int, err error)
}
// NewReplacer returns a new [Replacer] from a list of old, new string
// pairs. Replacements are performed in the order they appear in the
// target string, without overlapping matches. The old string
// comparisons are done in argument order.
//
// NewReplacer panics if given an odd number of arguments.
func NewReplacer(oldnew ...string) *Replacer {
if len(oldnew)%2 == 1 {
panic("strings.NewReplacer: odd argument count")
}
return &Replacer{oldnew: append([]string(nil), oldnew...)}
}
func (r *Replacer) buildOnce() {
r.r = r.build()
r.oldnew = nil
}
func (b *Replacer) build() replacer {
oldnew := b.oldnew
if len(oldnew) == 2 && len(oldnew[0]) > 1 {
return makeSingleStringReplacer(oldnew[0], oldnew[1])
}
allNewBytes := true
for i := 0; i < len(oldnew); i += 2 {
if len(oldnew[i]) != 1 {
return makeGenericReplacer(oldnew)
}
if len(oldnew[i+1]) != 1 {
allNewBytes = false
}
}
if allNewBytes {
r := byteReplacer{}
for i := range r {
r[i] = byte(i)
}
// The first occurrence of old->new map takes precedence
// over the others with the same old string.
for i := len(oldnew) - 2; i >= 0; i -= 2 {
o := oldnew[i][0]
n := oldnew[i+1][0]
r[o] = n
}
return &r
}
r := byteStringReplacer{toReplace: make([]string, 0, len(oldnew)/2)}
// The first occurrence of old->new map takes precedence
// over the others with the same old string.
for i := len(oldnew) - 2; i >= 0; i -= 2 {
o := oldnew[i][0]
n := oldnew[i+1]
// To avoid counting repetitions multiple times.
if r.replacements[o] == nil {
// We need to use string([]byte{o}) instead of string(o),
// to avoid utf8 encoding of o.
// E. g. byte(150) produces string of length 2.
r.toReplace = append(r.toReplace, string([]byte{o}))
}
r.replacements[o] = []byte(n)
}
return &r
}
// Replace returns a copy of s with all replacements performed.
func (r *Replacer) Replace(s string) string {
r.once.Do(r.buildOnce)
return r.r.Replace(s)
}
// WriteString writes s to w with all replacements performed.
func (r *Replacer) WriteString(w io.Writer, s string) (n int, err error) {
r.once.Do(r.buildOnce)
return r.r.WriteString(w, s)
}
// trieNode is a node in a lookup trie for prioritized key/value pairs. Keys
// and values may be empty. For example, the trie containing keys "ax", "ay",
// "bcbc", "x" and "xy" could have eight nodes:
//
// n0 -
// n1 a-
// n2 .x+
// n3 .y+
// n4 b-
// n5 .cbc+
// n6 x+
// n7 .y+
//
// n0 is the root node, and its children are n1, n4 and n6; n1's children are
// n2 and n3; n4's child is n5; n6's child is n7. Nodes n0, n1 and n4 (marked
// with a trailing "-") are partial keys, and nodes n2, n3, n5, n6 and n7
// (marked with a trailing "+") are complete keys.
type trieNode struct {
// value is the value of the trie node's key/value pair. It is empty if
// this node is not a complete key.
value string
// priority is the priority (higher is more important) of the trie node's
// key/value pair; keys are not necessarily matched shortest- or longest-
// first. Priority is positive if this node is a complete key, and zero
// otherwise. In the example above, positive/zero priorities are marked
// with a trailing "+" or "-".
priority int
// A trie node may have zero, one or more child nodes:
// * if the remaining fields are zero, there are no children.
// * if prefix and next are non-zero, there is one child in next.
// * if table is non-zero, it defines all the children.
//
// Prefixes are preferred over tables when there is one child, but the
// root node always uses a table for lookup efficiency.
// prefix is the difference in keys between this trie node and the next.
// In the example above, node n4 has prefix "cbc" and n4's next node is n5.
// Node n5 has no children and so has zero prefix, next and table fields.
prefix string
next *trieNode
// table is a lookup table indexed by the next byte in the key, after
// remapping that byte through genericReplacer.mapping to create a dense
// index. In the example above, the keys only use 'a', 'b', 'c', 'x' and
// 'y', which remap to 0, 1, 2, 3 and 4. All other bytes remap to 5, and
// genericReplacer.tableSize will be 5. Node n0's table will be
// []*trieNode{ 0:n1, 1:n4, 3:n6 }, where the 0, 1 and 3 are the remapped
// 'a', 'b' and 'x'.
table []*trieNode
}
func (t *trieNode) add(key, val string, priority int, r *genericReplacer) {
if key == "" {
if t.priority == 0 {
t.value = val
t.priority = priority
}
return
}
if t.prefix != "" {
// Need to split the prefix among multiple nodes.
var n int // length of the longest common prefix
for ; n < len(t.prefix) && n < len(key); n++ {
if t.prefix[n] != key[n] {
break
}
}
if n == len(t.prefix) {
t.next.add(key[n:], val, priority, r)
} else if n == 0 {
// First byte differs, start a new lookup table here. Looking up
// what is currently t.prefix[0] will lead to prefixNode, and
// looking up key[0] will lead to keyNode.
var prefixNode *trieNode
if len(t.prefix) == 1 {
prefixNode = t.next
} else {
prefixNode = &trieNode{
prefix: t.prefix[1:],
next: t.next,
}
}
keyNode := new(trieNode)
t.table = make([]*trieNode, r.tableSize)
t.table[r.mapping[t.prefix[0]]] = prefixNode
t.table[r.mapping[key[0]]] = keyNode
t.prefix = ""
t.next = nil
keyNode.add(key[1:], val, priority, r)
} else {
// Insert new node after the common section of the prefix.
next := &trieNode{
prefix: t.prefix[n:],
next: t.next,
}
t.prefix = t.prefix[:n]
t.next = next
next.add(key[n:], val, priority, r)
}
} else if t.table != nil {
// Insert into existing table.
m := r.mapping[key[0]]
if t.table[m] == nil {
t.table[m] = new(trieNode)
}
t.table[m].add(key[1:], val, priority, r)
} else {
t.prefix = key
t.next = new(trieNode)
t.next.add("", val, priority, r)
}
}
func (r *genericReplacer) lookup(s string, ignoreRoot bool) (val string, keylen int, found bool) {
// Iterate down the trie to the end, and grab the value and keylen with
// the highest priority.
bestPriority := 0
node := &r.root
n := 0
for node != nil {
if node.priority > bestPriority && !(ignoreRoot && node == &r.root) {
bestPriority = node.priority
val = node.value
keylen = n
found = true
}
if s == "" {
break
}
if node.table != nil {
index := r.mapping[s[0]]
if int(index) == r.tableSize {
break
}
node = node.table[index]
s = s[1:]
n++
} else if node.prefix != "" && HasPrefix(s, node.prefix) {
n += len(node.prefix)
s = s[len(node.prefix):]
node = node.next
} else {
break
}
}
return
}
// genericReplacer is the fully generic algorithm.
// It's used as a fallback when nothing faster can be used.
type genericReplacer struct {
root trieNode
// tableSize is the size of a trie node's lookup table. It is the number
// of unique key bytes.
tableSize int
// mapping maps from key bytes to a dense index for trieNode.table.
mapping [256]byte
}
func makeGenericReplacer(oldnew []string) *genericReplacer {
r := new(genericReplacer)
// Find each byte used, then assign them each an index.
for i := 0; i < len(oldnew); i += 2 {
key := oldnew[i]
for j := 0; j < len(key); j++ {
r.mapping[key[j]] = 1
}
}
for _, b := range r.mapping {
r.tableSize += int(b)
}
var index byte
for i, b := range r.mapping {
if b == 0 {
r.mapping[i] = byte(r.tableSize)
} else {
r.mapping[i] = index
index++
}
}
// Ensure root node uses a lookup table (for performance).
r.root.table = make([]*trieNode, r.tableSize)
for i := 0; i < len(oldnew); i += 2 {
r.root.add(oldnew[i], oldnew[i+1], len(oldnew)-i, r)
}
return r
}
type appendSliceWriter []byte
// Write writes to the buffer to satisfy [io.Writer].
func (w *appendSliceWriter) Write(p []byte) (int, error) {
*w = append(*w, p...)
return len(p), nil
}
// WriteString writes to the buffer without string->[]byte->string allocations.
func (w *appendSliceWriter) WriteString(s string) (int, error) {
*w = append(*w, s...)
return len(s), nil
}
type stringWriter struct {
w io.Writer
}
func (w stringWriter) WriteString(s string) (int, error) {
return w.w.Write([]byte(s))
}
func getStringWriter(w io.Writer) io.StringWriter {
sw, ok := w.(io.StringWriter)
if !ok {
sw = stringWriter{w}
}
return sw
}
func (r *genericReplacer) Replace(s string) string {
buf := make(appendSliceWriter, 0, len(s))
r.WriteString(&buf, s)
return string(buf)
}
func (r *genericReplacer) WriteString(w io.Writer, s string) (n int, err error) {
sw := getStringWriter(w)
var last, wn int
var prevMatchEmpty bool
for i := 0; i <= len(s); {
// Fast path: s[i] is not a prefix of any pattern.
if i != len(s) && r.root.priority == 0 {
index := int(r.mapping[s[i]])
if index == r.tableSize || r.root.table[index] == nil {
i++
continue
}
}
// Ignore the empty match iff the previous loop found the empty match.
val, keylen, match := r.lookup(s[i:], prevMatchEmpty)
prevMatchEmpty = match && keylen == 0
if match {
wn, err = sw.WriteString(s[last:i])
n += wn
if err != nil {
return
}
wn, err = sw.WriteString(val)
n += wn
if err != nil {
return
}
i += keylen
last = i
continue
}
i++
}
if last != len(s) {
wn, err = sw.WriteString(s[last:])
n += wn
}
return
}
// singleStringReplacer is the implementation that's used when there is only
// one string to replace (and that string has more than one byte).
type singleStringReplacer struct {
finder *stringFinder
// value is the new string that replaces that pattern when it's found.
value string
}
func makeSingleStringReplacer(pattern string, value string) *singleStringReplacer {
return &singleStringReplacer{finder: makeStringFinder(pattern), value: value}
}
func (r *singleStringReplacer) Replace(s string) string {
var buf Builder
i, matched := 0, false
for {
match := r.finder.next(s[i:])
if match == -1 {
break
}
matched = true
buf.Grow(match + len(r.value))
buf.WriteString(s[i : i+match])
buf.WriteString(r.value)
i += match + len(r.finder.pattern)
}
if !matched {
return s
}
buf.WriteString(s[i:])
return buf.String()
}
func (r *singleStringReplacer) WriteString(w io.Writer, s string) (n int, err error) {
sw := getStringWriter(w)
var i, wn int
for {
match := r.finder.next(s[i:])
if match == -1 {
break
}
wn, err = sw.WriteString(s[i : i+match])
n += wn
if err != nil {
return
}
wn, err = sw.WriteString(r.value)
n += wn
if err != nil {
return
}
i += match + len(r.finder.pattern)
}
wn, err = sw.WriteString(s[i:])
n += wn
return
}
// byteReplacer is the implementation that's used when all the "old"
// and "new" values are single ASCII bytes.
// The array contains replacement bytes indexed by old byte.
type byteReplacer [256]byte
func (r *byteReplacer) Replace(s string) string {
var buf []byte // lazily allocated
for i := 0; i < len(s); i++ {
b := s[i]
if r[b] != b {
if buf == nil {
buf = []byte(s)
}
buf[i] = r[b]
}
}
if buf == nil {
return s
}
return string(buf)
}
func (r *byteReplacer) WriteString(w io.Writer, s string) (n int, err error) {
sw := getStringWriter(w)
last := 0
for i := 0; i < len(s); i++ {
b := s[i]
if r[b] == b {
continue
}
if last != i {
wn, err := sw.WriteString(s[last:i])
n += wn
if err != nil {
return n, err
}
}
last = i + 1
nw, err := w.Write(r[b : int(b)+1])
n += nw
if err != nil {
return n, err
}
}
if last != len(s) {
nw, err := sw.WriteString(s[last:])
n += nw
if err != nil {
return n, err
}
}
return n, nil
}
// byteStringReplacer is the implementation that's used when all the
// "old" values are single ASCII bytes but the "new" values vary in size.
type byteStringReplacer struct {
// replacements contains replacement byte slices indexed by old byte.
// A nil []byte means that the old byte should not be replaced.
replacements [256][]byte
// toReplace keeps a list of bytes to replace. Depending on length of toReplace
// and length of target string it may be faster to use Count, or a plain loop.
// We store single byte as a string, because Count takes a string.
toReplace []string
}
// countCutOff controls the ratio of a string length to a number of replacements
// at which (*byteStringReplacer).Replace switches algorithms.
// For strings with higher ration of length to replacements than that value,
// we call Count, for each replacement from toReplace.
// For strings, with a lower ratio we use simple loop, because of Count overhead.
// countCutOff is an empirically determined overhead multiplier.
// TODO(tocarip) revisit once we have register-based abi/mid-stack inlining.
const countCutOff = 8
func (r *byteStringReplacer) Replace(s string) string {
newSize := len(s)
anyChanges := false
// Is it faster to use Count?
if len(r.toReplace)*countCutOff <= len(s) {
for _, x := range r.toReplace {
if c := Count(s, x); c != 0 {
// The -1 is because we are replacing 1 byte with len(replacements[b]) bytes.
newSize += c * (len(r.replacements[x[0]]) - 1)
anyChanges = true
}
}
} else {
for i := 0; i < len(s); i++ {
b := s[i]
if r.replacements[b] != nil {
// See above for explanation of -1
newSize += len(r.replacements[b]) - 1
anyChanges = true
}
}
}
if !anyChanges {
return s
}
buf := make([]byte, newSize)
j := 0
for i := 0; i < len(s); i++ {
b := s[i]
if r.replacements[b] != nil {
j += copy(buf[j:], r.replacements[b])
} else {
buf[j] = b
j++
}
}
return string(buf)
}
func (r *byteStringReplacer) WriteString(w io.Writer, s string) (n int, err error) {
sw := getStringWriter(w)
last := 0
for i := 0; i < len(s); i++ {
b := s[i]
if r.replacements[b] == nil {
continue
}
if last != i {
nw, err := sw.WriteString(s[last:i])
n += nw
if err != nil {
return n, err
}
}
last = i + 1
nw, err := w.Write(r.replacements[b])
n += nw
if err != nil {
return n, err
}
}
if last != len(s) {
var nw int
nw, err = sw.WriteString(s[last:])
n += nw
}
return
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package strings
// stringFinder efficiently finds strings in a source text. It's implemented
// using the Boyer-Moore string search algorithm:
// https://en.wikipedia.org/wiki/Boyer-Moore_string_search_algorithm
// https://www.cs.utexas.edu/~moore/publications/fstrpos.pdf (note: this aged
// document uses 1-based indexing)
type stringFinder struct {
// pattern is the string that we are searching for in the text.
pattern string
// badCharSkip[b] contains the distance between the last byte of pattern
// and the rightmost occurrence of b in pattern. If b is not in pattern,
// badCharSkip[b] is len(pattern).
//
// Whenever a mismatch is found with byte b in the text, we can safely
// shift the matching frame at least badCharSkip[b] until the next time
// the matching char could be in alignment.
badCharSkip [256]int
// goodSuffixSkip[i] defines how far we can shift the matching frame given
// that the suffix pattern[i+1:] matches, but the byte pattern[i] does
// not. There are two cases to consider:
//
// 1. The matched suffix occurs elsewhere in pattern (with a different
// byte preceding it that we might possibly match). In this case, we can
// shift the matching frame to align with the next suffix chunk. For
// example, the pattern "mississi" has the suffix "issi" next occurring
// (in right-to-left order) at index 1, so goodSuffixSkip[3] ==
// shift+len(suffix) == 3+4 == 7.
//
// 2. If the matched suffix does not occur elsewhere in pattern, then the
// matching frame may share part of its prefix with the end of the
// matching suffix. In this case, goodSuffixSkip[i] will contain how far
// to shift the frame to align this portion of the prefix to the
// suffix. For example, in the pattern "abcxxxabc", when the first
// mismatch from the back is found to be in position 3, the matching
// suffix "xxabc" is not found elsewhere in the pattern. However, its
// rightmost "abc" (at position 6) is a prefix of the whole pattern, so
// goodSuffixSkip[3] == shift+len(suffix) == 6+5 == 11.
goodSuffixSkip []int
}
func makeStringFinder(pattern string) *stringFinder {
f := &stringFinder{
pattern: pattern,
goodSuffixSkip: make([]int, len(pattern)),
}
// last is the index of the last character in the pattern.
last := len(pattern) - 1
// Build bad character table.
// Bytes not in the pattern can skip one pattern's length.
for i := range f.badCharSkip {
f.badCharSkip[i] = len(pattern)
}
// The loop condition is < instead of <= so that the last byte does not
// have a zero distance to itself. Finding this byte out of place implies
// that it is not in the last position.
for i := 0; i < last; i++ {
f.badCharSkip[pattern[i]] = last - i
}
// Build good suffix table.
// First pass: set each value to the next index which starts a prefix of
// pattern.
lastPrefix := last
for i := last; i >= 0; i-- {
if HasPrefix(pattern, pattern[i+1:]) {
lastPrefix = i + 1
}
// lastPrefix is the shift, and (last-i) is len(suffix).
f.goodSuffixSkip[i] = lastPrefix + last - i
}
// Second pass: find repeats of pattern's suffix starting from the front.
for i := 0; i < last; i++ {
lenSuffix := longestCommonSuffix(pattern, pattern[1:i+1])
if pattern[i-lenSuffix] != pattern[last-lenSuffix] {
// (last-i) is the shift, and lenSuffix is len(suffix).
f.goodSuffixSkip[last-lenSuffix] = lenSuffix + last - i
}
}
return f
}
func longestCommonSuffix(a, b string) (i int) {
for ; i < len(a) && i < len(b); i++ {
if a[len(a)-1-i] != b[len(b)-1-i] {
break
}
}
return
}
// next returns the index in text of the first occurrence of the pattern. If
// the pattern is not found, it returns -1.
func (f *stringFinder) next(text string) int {
i := len(f.pattern) - 1
for i < len(text) {
// Compare backwards from the end until the first unmatching character.
j := len(f.pattern) - 1
for j >= 0 && text[i] == f.pattern[j] {
i--
j--
}
if j < 0 {
return i + 1 // match
}
i += max(f.badCharSkip[text[i]], f.goodSuffixSkip[j])
}
return -1
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package strings implements simple functions to manipulate UTF-8 encoded strings.
//
// For information about UTF-8 strings in Go, see https://blog.golang.org/strings.
package strings
import (
"internal/bytealg"
"internal/stringslite"
"math/bits"
"unicode"
"unicode/utf8"
)
const maxInt = int(^uint(0) >> 1)
// explode splits s into a slice of UTF-8 strings,
// one string per Unicode character up to a maximum of n (n < 0 means no limit).
// Invalid UTF-8 bytes are sliced individually.
func explode(s string, n int) []string {
l := utf8.RuneCountInString(s)
if n < 0 || n > l {
n = l
}
a := make([]string, n)
for i := 0; i < n-1; i++ {
_, size := utf8.DecodeRuneInString(s)
a[i] = s[:size]
s = s[size:]
}
if n > 0 {
a[n-1] = s
}
return a
}
// Count counts the number of non-overlapping instances of substr in s.
// If substr is an empty string, Count returns 1 + the number of Unicode code points in s.
func Count(s, substr string) int {
// special case
if len(substr) == 0 {
return utf8.RuneCountInString(s) + 1
}
if len(substr) == 1 {
return bytealg.CountString(s, substr[0])
}
n := 0
for {
i := Index(s, substr)
if i == -1 {
return n
}
n++
s = s[i+len(substr):]
}
}
// Contains reports whether substr is within s.
func Contains(s, substr string) bool {
return Index(s, substr) >= 0
}
// ContainsAny reports whether any Unicode code points in chars are within s.
func ContainsAny(s, chars string) bool {
return IndexAny(s, chars) >= 0
}
// ContainsRune reports whether the Unicode code point r is within s.
func ContainsRune(s string, r rune) bool {
return IndexRune(s, r) >= 0
}
// ContainsFunc reports whether any Unicode code points r within s satisfy f(r).
// It stops as soon as a call to f returns true.
func ContainsFunc(s string, f func(rune) bool) bool {
return IndexFunc(s, f) >= 0
}
// LastIndex returns the index of the last instance of substr in s, or -1 if substr is not present in s.
func LastIndex(s, substr string) int {
n := len(substr)
switch {
case n == 0:
return len(s)
case n == 1:
return bytealg.LastIndexByteString(s, substr[0])
case n == len(s):
if substr == s {
return 0
}
return -1
case n > len(s):
return -1
}
return bytealg.LastIndexRabinKarp(s, substr)
}
// IndexByte returns the index of the first instance of c in s, or -1 if c is not present in s.
func IndexByte(s string, c byte) int {
return stringslite.IndexByte(s, c)
}
// IndexRune returns the index of the first instance of the Unicode code point
// r, or -1 if rune is not present in s.
// If r is [utf8.RuneError], it returns the first instance of any
// invalid UTF-8 byte sequence.
func IndexRune(s string, r rune) int {
const haveFastIndex = bytealg.MaxBruteForce > 0
switch {
case 0 <= r && r < utf8.RuneSelf:
return IndexByte(s, byte(r))
case r == utf8.RuneError:
for i, r := range s {
if r == utf8.RuneError {
return i
}
}
return -1
case !utf8.ValidRune(r):
return -1
default:
// Search for rune r using the last byte of its UTF-8 encoded form.
// The distribution of the last byte is more uniform compared to the
// first byte which has a 78% chance of being [240, 243, 244].
rs := string(r)
last := len(rs) - 1
i := last
fails := 0
for i < len(s) {
if s[i] != rs[last] {
o := IndexByte(s[i+1:], rs[last])
if o < 0 {
return -1
}
i += o + 1
}
// Step backwards comparing bytes.
for j := 1; j < len(rs); j++ {
if s[i-j] != rs[last-j] {
goto next
}
}
return i - last
next:
fails++
i++
if (haveFastIndex && fails > bytealg.Cutover(i)) && i < len(s) ||
(!haveFastIndex && fails >= 4+i>>4 && i < len(s)) {
goto fallback
}
}
return -1
fallback:
// see comment in ../bytes/bytes.go
if haveFastIndex {
if j := bytealg.IndexString(s[i-last:], string(r)); j >= 0 {
return i + j - last
}
} else {
c0 := rs[last]
c1 := rs[last-1]
loop:
for ; i < len(s); i++ {
if s[i] == c0 && s[i-1] == c1 {
for k := 2; k < len(rs); k++ {
if s[i-k] != rs[last-k] {
continue loop
}
}
return i - last
}
}
}
return -1
}
}
// IndexAny returns the index of the first instance of any Unicode code point
// from chars in s, or -1 if no Unicode code point from chars is present in s.
func IndexAny(s, chars string) int {
if chars == "" {
// Avoid scanning all of s.
return -1
}
if len(chars) == 1 {
// Avoid scanning all of s.
r := rune(chars[0])
if r >= utf8.RuneSelf {
r = utf8.RuneError
}
return IndexRune(s, r)
}
if shouldUseASCIISet(len(s)) {
if as, isASCII := makeASCIISet(chars); isASCII {
for i := 0; i < len(s); i++ {
if as.contains(s[i]) {
return i
}
}
return -1
}
}
for i, c := range s {
if IndexRune(chars, c) >= 0 {
return i
}
}
return -1
}
// LastIndexAny returns the index of the last instance of any Unicode code
// point from chars in s, or -1 if no Unicode code point from chars is
// present in s.
func LastIndexAny(s, chars string) int {
if chars == "" {
// Avoid scanning all of s.
return -1
}
if len(s) == 1 {
rc := rune(s[0])
if rc >= utf8.RuneSelf {
rc = utf8.RuneError
}
if IndexRune(chars, rc) >= 0 {
return 0
}
return -1
}
if shouldUseASCIISet(len(s)) {
if as, isASCII := makeASCIISet(chars); isASCII {
for i := len(s) - 1; i >= 0; i-- {
if as.contains(s[i]) {
return i
}
}
return -1
}
}
if len(chars) == 1 {
rc := rune(chars[0])
if rc >= utf8.RuneSelf {
rc = utf8.RuneError
}
for i := len(s); i > 0; {
r, size := utf8.DecodeLastRuneInString(s[:i])
i -= size
if rc == r {
return i
}
}
return -1
}
for i := len(s); i > 0; {
r, size := utf8.DecodeLastRuneInString(s[:i])
i -= size
if IndexRune(chars, r) >= 0 {
return i
}
}
return -1
}
// LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
func LastIndexByte(s string, c byte) int {
return bytealg.LastIndexByteString(s, c)
}
// Generic split: splits after each instance of sep,
// including sepSave bytes of sep in the subarrays.
func genSplit(s, sep string, sepSave, n int) []string {
if n == 0 {
return nil
}
if sep == "" {
return explode(s, n)
}
if n < 0 {
n = Count(s, sep) + 1
}
if n > len(s)+1 {
n = len(s) + 1
}
a := make([]string, n)
n--
i := 0
for i < n {
m := Index(s, sep)
if m < 0 {
break
}
a[i] = s[:m+sepSave]
s = s[m+len(sep):]
i++
}
a[i] = s
return a[:i+1]
}
// SplitN slices s into substrings separated by sep and returns a slice of
// the substrings between those separators.
//
// The count determines the number of substrings to return:
// - n > 0: at most n substrings; the last substring will be the unsplit remainder;
// - n == 0: the result is nil (zero substrings);
// - n < 0: all substrings.
//
// Edge cases for s and sep (for example, empty strings) are handled
// as described in the documentation for [Split].
//
// To split around the first instance of a separator, see [Cut].
func SplitN(s, sep string, n int) []string { return genSplit(s, sep, 0, n) }
// SplitAfterN slices s into substrings after each instance of sep and
// returns a slice of those substrings.
//
// The count determines the number of substrings to return:
// - n > 0: at most n substrings; the last substring will be the unsplit remainder;
// - n == 0: the result is nil (zero substrings);
// - n < 0: all substrings.
//
// Edge cases for s and sep (for example, empty strings) are handled
// as described in the documentation for [SplitAfter].
func SplitAfterN(s, sep string, n int) []string {
return genSplit(s, sep, len(sep), n)
}
// Split slices s into all substrings separated by sep and returns a slice of
// the substrings between those separators.
//
// If s does not contain sep and sep is not empty, Split returns a
// slice of length 1 whose only element is s.
//
// If sep is empty, Split splits after each UTF-8 sequence. If both s
// and sep are empty, Split returns an empty slice.
//
// It is equivalent to [SplitN] with a count of -1.
//
// To split around the first instance of a separator, see [Cut].
func Split(s, sep string) []string { return genSplit(s, sep, 0, -1) }
// SplitAfter slices s into all substrings after each instance of sep and
// returns a slice of those substrings.
//
// If s does not contain sep and sep is not empty, SplitAfter returns
// a slice of length 1 whose only element is s.
//
// If sep is empty, SplitAfter splits after each UTF-8 sequence. If
// both s and sep are empty, SplitAfter returns an empty slice.
//
// It is equivalent to [SplitAfterN] with a count of -1.
func SplitAfter(s, sep string) []string {
return genSplit(s, sep, len(sep), -1)
}
var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
// Fields splits the string s around each instance of one or more consecutive white space
// characters, as defined by [unicode.IsSpace], returning a slice of substrings of s or an
// empty slice if s contains only white space. Every element of the returned slice is
// non-empty. Unlike [Split], leading and trailing runs of white space characters
// are discarded.
func Fields(s string) []string {
// First count the fields.
// This is an exact count if s is ASCII, otherwise it is an approximation.
n := 0
wasSpace := 1
// setBits is used to track which bits are set in the bytes of s.
setBits := uint8(0)
for i := 0; i < len(s); i++ {
r := s[i]
setBits |= r
isSpace := int(asciiSpace[r])
n += wasSpace & ^isSpace
wasSpace = isSpace
}
if setBits >= utf8.RuneSelf {
// Some runes in the input string are not ASCII.
return FieldsFunc(s, unicode.IsSpace)
}
// ASCII fast path
a := make([]string, n)
na := 0
fieldStart := 0
i := 0
// Skip spaces in the front of the input.
for i < len(s) && asciiSpace[s[i]] != 0 {
i++
}
fieldStart = i
for i < len(s) {
if asciiSpace[s[i]] == 0 {
i++
continue
}
a[na] = s[fieldStart:i]
na++
i++
// Skip spaces in between fields.
for i < len(s) && asciiSpace[s[i]] != 0 {
i++
}
fieldStart = i
}
if fieldStart < len(s) { // Last field might end at EOF.
a[na] = s[fieldStart:]
}
return a
}
// FieldsFunc splits the string s at each run of Unicode code points c satisfying f(c)
// and returns an array of slices of s. If all code points in s satisfy f(c) or the
// string is empty, an empty slice is returned. Every element of the returned slice is
// non-empty. Unlike [Split], leading and trailing runs of code points satisfying f(c)
// are discarded.
//
// FieldsFunc makes no guarantees about the order in which it calls f(c)
// and assumes that f always returns the same value for a given c.
func FieldsFunc(s string, f func(rune) bool) []string {
// A span is used to record a slice of s of the form s[start:end].
// The start index is inclusive and the end index is exclusive.
type span struct {
start int
end int
}
spans := make([]span, 0, 32)
// Find the field start and end indices.
// Doing this in a separate pass (rather than slicing the string s
// and collecting the result substrings right away) is significantly
// more efficient, possibly due to cache effects.
start := -1 // valid span start if >= 0
for end, rune := range s {
if f(rune) {
if start >= 0 {
spans = append(spans, span{start, end})
// Set start to a negative value.
// Note: using -1 here consistently and reproducibly
// slows down this code by a several percent on amd64.
start = ^start
}
} else {
if start < 0 {
start = end
}
}
}
// Last field might end at EOF.
if start >= 0 {
spans = append(spans, span{start, len(s)})
}
// Create strings from recorded field indices.
a := make([]string, len(spans))
for i, span := range spans {
a[i] = s[span.start:span.end]
}
return a
}
// Join concatenates the elements of its first argument to create a single string. The separator
// string sep is placed between elements in the resulting string.
func Join(elems []string, sep string) string {
switch len(elems) {
case 0:
return ""
case 1:
return elems[0]
}
var n int
if len(sep) > 0 {
if len(sep) >= maxInt/(len(elems)-1) {
panic("strings: Join output length overflow")
}
n += len(sep) * (len(elems) - 1)
}
for _, elem := range elems {
if len(elem) > maxInt-n {
panic("strings: Join output length overflow")
}
n += len(elem)
}
var b Builder
b.Grow(n)
b.WriteString(elems[0])
for _, s := range elems[1:] {
b.WriteString(sep)
b.WriteString(s)
}
return b.String()
}
// HasPrefix reports whether the string s begins with prefix.
func HasPrefix(s, prefix string) bool {
return stringslite.HasPrefix(s, prefix)
}
// HasSuffix reports whether the string s ends with suffix.
func HasSuffix(s, suffix string) bool {
return stringslite.HasSuffix(s, suffix)
}
// Map returns a copy of the string s with all its characters modified
// according to the mapping function. If mapping returns a negative value, the character is
// dropped from the string with no replacement.
func Map(mapping func(rune) rune, s string) string {
// In the worst case, the string can grow when mapped, making
// things unpleasant. But it's so rare we barge in assuming it's
// fine. It could also shrink but that falls out naturally.
// The output buffer b is initialized on demand, the first
// time a character differs.
var b Builder
for i, c := range s {
r := mapping(c)
if r == c && c != utf8.RuneError {
continue
}
var width int
if c == utf8.RuneError {
c, width = utf8.DecodeRuneInString(s[i:])
if width != 1 && r == c {
continue
}
} else {
width = utf8.RuneLen(c)
}
b.Grow(len(s) + utf8.UTFMax)
b.WriteString(s[:i])
if r >= 0 {
b.WriteRune(r)
}
s = s[i+width:]
break
}
// Fast path for unchanged input
if b.Cap() == 0 { // didn't call b.Grow above
return s
}
for _, c := range s {
r := mapping(c)
if r >= 0 {
// common case
// Due to inlining, it is more performant to determine if WriteByte should be
// invoked rather than always call WriteRune
if r < utf8.RuneSelf {
b.WriteByte(byte(r))
} else {
// r is not an ASCII rune.
b.WriteRune(r)
}
}
}
return b.String()
}
// According to static analysis, spaces, dashes, zeros, equals, and tabs
// are the most commonly repeated string literal,
// often used for display on fixed-width terminal windows.
// Pre-declare constants for these for O(1) repetition in the common-case.
const (
repeatedSpaces = "" +
" " +
" "
repeatedDashes = "" +
"----------------------------------------------------------------" +
"----------------------------------------------------------------"
repeatedZeroes = "" +
"0000000000000000000000000000000000000000000000000000000000000000"
repeatedEquals = "" +
"================================================================" +
"================================================================"
repeatedTabs = "" +
"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t" +
"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"
)
// Repeat returns a new string consisting of count copies of the string s.
//
// It panics if count is negative or if the result of (len(s) * count)
// overflows.
func Repeat(s string, count int) string {
switch count {
case 0:
return ""
case 1:
return s
}
// Since we cannot return an error on overflow,
// we should panic if the repeat will generate an overflow.
// See golang.org/issue/16237.
if count < 0 {
panic("strings: negative Repeat count")
}
hi, lo := bits.Mul(uint(len(s)), uint(count))
if hi > 0 || lo > uint(maxInt) {
panic("strings: Repeat output length overflow")
}
n := int(lo) // lo = len(s) * count
if len(s) == 0 {
return ""
}
// Optimize for commonly repeated strings of relatively short length.
switch s[0] {
case ' ', '-', '0', '=', '\t':
switch {
case n <= len(repeatedSpaces) && HasPrefix(repeatedSpaces, s):
return repeatedSpaces[:n]
case n <= len(repeatedDashes) && HasPrefix(repeatedDashes, s):
return repeatedDashes[:n]
case n <= len(repeatedZeroes) && HasPrefix(repeatedZeroes, s):
return repeatedZeroes[:n]
case n <= len(repeatedEquals) && HasPrefix(repeatedEquals, s):
return repeatedEquals[:n]
case n <= len(repeatedTabs) && HasPrefix(repeatedTabs, s):
return repeatedTabs[:n]
}
}
// Past a certain chunk size it is counterproductive to use
// larger chunks as the source of the write, as when the source
// is too large we are basically just thrashing the CPU D-cache.
// So if the result length is larger than an empirically-found
// limit (8KB), we stop growing the source string once the limit
// is reached and keep reusing the same source string - that
// should therefore be always resident in the L1 cache - until we
// have completed the construction of the result.
// This yields significant speedups (up to +100%) in cases where
// the result length is large (roughly, over L2 cache size).
const chunkLimit = 8 * 1024
chunkMax := n
if n > chunkLimit {
chunkMax = chunkLimit / len(s) * len(s)
if chunkMax == 0 {
chunkMax = len(s)
}
}
var b Builder
b.Grow(n)
b.WriteString(s)
for b.Len() < n {
chunk := min(n-b.Len(), b.Len(), chunkMax)
b.WriteString(b.String()[:chunk])
}
return b.String()
}
// ToUpper returns s with all Unicode letters mapped to their upper case.
func ToUpper(s string) string {
isASCII, hasLower := true, false
for i := 0; i < len(s); i++ {
c := s[i]
if c >= utf8.RuneSelf {
isASCII = false
break
}
hasLower = hasLower || ('a' <= c && c <= 'z')
}
if isASCII { // optimize for ASCII-only strings.
if !hasLower {
return s
}
var (
b Builder
pos int
)
b.Grow(len(s))
for i := 0; i < len(s); i++ {
c := s[i]
if 'a' <= c && c <= 'z' {
c -= 'a' - 'A'
if pos < i {
b.WriteString(s[pos:i])
}
b.WriteByte(c)
pos = i + 1
}
}
if pos < len(s) {
b.WriteString(s[pos:])
}
return b.String()
}
return Map(unicode.ToUpper, s)
}
// ToLower returns s with all Unicode letters mapped to their lower case.
func ToLower(s string) string {
isASCII, hasUpper := true, false
for i := 0; i < len(s); i++ {
c := s[i]
if c >= utf8.RuneSelf {
isASCII = false
break
}
hasUpper = hasUpper || ('A' <= c && c <= 'Z')
}
if isASCII { // optimize for ASCII-only strings.
if !hasUpper {
return s
}
var (
b Builder
pos int
)
b.Grow(len(s))
for i := 0; i < len(s); i++ {
c := s[i]
if 'A' <= c && c <= 'Z' {
c += 'a' - 'A'
if pos < i {
b.WriteString(s[pos:i])
}
b.WriteByte(c)
pos = i + 1
}
}
if pos < len(s) {
b.WriteString(s[pos:])
}
return b.String()
}
return Map(unicode.ToLower, s)
}
// ToTitle returns a copy of the string s with all Unicode letters mapped to
// their Unicode title case.
func ToTitle(s string) string { return Map(unicode.ToTitle, s) }
// ToUpperSpecial returns a copy of the string s with all Unicode letters mapped to their
// upper case using the case mapping specified by c.
func ToUpperSpecial(c unicode.SpecialCase, s string) string {
return Map(c.ToUpper, s)
}
// ToLowerSpecial returns a copy of the string s with all Unicode letters mapped to their
// lower case using the case mapping specified by c.
func ToLowerSpecial(c unicode.SpecialCase, s string) string {
return Map(c.ToLower, s)
}
// ToTitleSpecial returns a copy of the string s with all Unicode letters mapped to their
// Unicode title case, giving priority to the special casing rules.
func ToTitleSpecial(c unicode.SpecialCase, s string) string {
return Map(c.ToTitle, s)
}
// ToValidUTF8 returns a copy of the string s with each run of invalid UTF-8 byte sequences
// replaced by the replacement string, which may be empty.
func ToValidUTF8(s, replacement string) string {
var b Builder
for i, c := range s {
if c != utf8.RuneError {
continue
}
_, wid := utf8.DecodeRuneInString(s[i:])
if wid == 1 {
b.Grow(len(s) + len(replacement))
b.WriteString(s[:i])
s = s[i:]
break
}
}
// Fast path for unchanged input
if b.Cap() == 0 { // didn't call b.Grow above
return s
}
invalid := false // previous byte was from an invalid UTF-8 sequence
for i := 0; i < len(s); {
c := s[i]
if c < utf8.RuneSelf {
i++
invalid = false
b.WriteByte(c)
continue
}
_, wid := utf8.DecodeRuneInString(s[i:])
if wid == 1 {
i++
if !invalid {
invalid = true
b.WriteString(replacement)
}
continue
}
invalid = false
b.WriteString(s[i : i+wid])
i += wid
}
return b.String()
}
// isSeparator reports whether the rune could mark a word boundary.
// TODO: update when package unicode captures more of the properties.
func isSeparator(r rune) bool {
// ASCII alphanumerics and underscore are not separators
if r <= 0x7F {
switch {
case '0' <= r && r <= '9':
return false
case 'a' <= r && r <= 'z':
return false
case 'A' <= r && r <= 'Z':
return false
case r == '_':
return false
}
return true
}
// Letters and digits are not separators
if unicode.IsLetter(r) || unicode.IsDigit(r) {
return false
}
// Otherwise, all we can do for now is treat spaces as separators.
return unicode.IsSpace(r)
}
// Title returns a copy of the string s with all Unicode letters that begin words
// mapped to their Unicode title case.
//
// Deprecated: The rule Title uses for word boundaries does not handle Unicode
// punctuation properly. Use golang.org/x/text/cases instead.
func Title(s string) string {
// Use a closure here to remember state.
// Hackish but effective. Depends on Map scanning in order and calling
// the closure once per rune.
prev := ' '
return Map(
func(r rune) rune {
if isSeparator(prev) {
prev = r
return unicode.ToTitle(r)
}
prev = r
return r
},
s)
}
// TrimLeftFunc returns a slice of the string s with all leading
// Unicode code points c satisfying f(c) removed.
func TrimLeftFunc(s string, f func(rune) bool) string {
i := indexFunc(s, f, false)
if i == -1 {
return ""
}
return s[i:]
}
// TrimRightFunc returns a slice of the string s with all trailing
// Unicode code points c satisfying f(c) removed.
func TrimRightFunc(s string, f func(rune) bool) string {
i := lastIndexFunc(s, f, false)
if i >= 0 {
_, wid := utf8.DecodeRuneInString(s[i:])
i += wid
} else {
i++
}
return s[0:i]
}
// TrimFunc returns a slice of the string s with all leading
// and trailing Unicode code points c satisfying f(c) removed.
func TrimFunc(s string, f func(rune) bool) string {
return TrimRightFunc(TrimLeftFunc(s, f), f)
}
// IndexFunc returns the index into s of the first Unicode
// code point satisfying f(c), or -1 if none do.
func IndexFunc(s string, f func(rune) bool) int {
return indexFunc(s, f, true)
}
// LastIndexFunc returns the index into s of the last
// Unicode code point satisfying f(c), or -1 if none do.
func LastIndexFunc(s string, f func(rune) bool) int {
return lastIndexFunc(s, f, true)
}
// indexFunc is the same as IndexFunc except that if
// truth==false, the sense of the predicate function is
// inverted.
func indexFunc(s string, f func(rune) bool, truth bool) int {
for i, r := range s {
if f(r) == truth {
return i
}
}
return -1
}
// lastIndexFunc is the same as LastIndexFunc except that if
// truth==false, the sense of the predicate function is
// inverted.
func lastIndexFunc(s string, f func(rune) bool, truth bool) int {
for i := len(s); i > 0; {
r, size := utf8.DecodeLastRuneInString(s[0:i])
i -= size
if f(r) == truth {
return i
}
}
return -1
}
// asciiSet is a 256-byte lookup table for fast ASCII character membership testing.
// Each element corresponds to an ASCII character value, with true indicating the
// character is in the set. Using bool instead of byte allows the compiler to
// eliminate the comparison instruction, as bool values are guaranteed to be 0 or 1.
//
// The full 256-element table is used rather than a 128-element table to avoid
// additional operations in the lookup path. Alternative approaches were tested:
// - [128]bool with explicit bounds check (if c >= 128): introduces branches
// that cause pipeline stalls, resulting in ~70% slower performance
// - [128]bool with masking (c&0x7f): eliminates bounds checks but the AND
// operation still costs ~10% performance compared to direct indexing
//
// The 256-element array allows direct indexing with no bounds checks, no branches,
// and no masking operations, providing optimal performance. The additional 128 bytes
// of memory is a worthwhile tradeoff for the simpler, faster code.
type asciiSet [256]bool
// makeASCIISet creates a set of ASCII characters and reports whether all
// characters in chars are ASCII.
func makeASCIISet(chars string) (as asciiSet, ok bool) {
for i := 0; i < len(chars); i++ {
c := chars[i]
if c >= utf8.RuneSelf {
return as, false
}
as[c] = true
}
return as, true
}
// contains reports whether c is inside the set.
func (as *asciiSet) contains(c byte) bool {
return as[c]
}
// shouldUseASCIISet returns whether to use the lookup table optimization.
// The threshold of 8 bytes balances initialization cost against per-byte
// search cost, performing well across all charset sizes.
//
// More complex heuristics (e.g., different thresholds per charset size)
// add branching overhead that eats away any theoretical improvements.
func shouldUseASCIISet(bufLen int) bool {
return bufLen > 8
}
// Trim returns a slice of the string s with all leading and
// trailing Unicode code points contained in cutset removed.
func Trim(s, cutset string) string {
if s == "" || cutset == "" {
return s
}
if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
return trimLeftByte(trimRightByte(s, cutset[0]), cutset[0])
}
if as, ok := makeASCIISet(cutset); ok {
return trimLeftASCII(trimRightASCII(s, &as), &as)
}
return trimLeftUnicode(trimRightUnicode(s, cutset), cutset)
}
// TrimLeft returns a slice of the string s with all leading
// Unicode code points contained in cutset removed.
//
// To remove a prefix, use [TrimPrefix] instead.
func TrimLeft(s, cutset string) string {
if s == "" || cutset == "" {
return s
}
if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
return trimLeftByte(s, cutset[0])
}
if as, ok := makeASCIISet(cutset); ok {
return trimLeftASCII(s, &as)
}
return trimLeftUnicode(s, cutset)
}
func trimLeftByte(s string, c byte) string {
for len(s) > 0 && s[0] == c {
s = s[1:]
}
return s
}
func trimLeftASCII(s string, as *asciiSet) string {
for len(s) > 0 {
if !as.contains(s[0]) {
break
}
s = s[1:]
}
return s
}
func trimLeftUnicode(s, cutset string) string {
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if !ContainsRune(cutset, r) {
break
}
s = s[n:]
}
return s
}
// TrimRight returns a slice of the string s, with all trailing
// Unicode code points contained in cutset removed.
//
// To remove a suffix, use [TrimSuffix] instead.
func TrimRight(s, cutset string) string {
if s == "" || cutset == "" {
return s
}
if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
return trimRightByte(s, cutset[0])
}
if as, ok := makeASCIISet(cutset); ok {
return trimRightASCII(s, &as)
}
return trimRightUnicode(s, cutset)
}
func trimRightByte(s string, c byte) string {
for len(s) > 0 && s[len(s)-1] == c {
s = s[:len(s)-1]
}
return s
}
func trimRightASCII(s string, as *asciiSet) string {
for len(s) > 0 {
if !as.contains(s[len(s)-1]) {
break
}
s = s[:len(s)-1]
}
return s
}
func trimRightUnicode(s, cutset string) string {
for len(s) > 0 {
r, n := rune(s[len(s)-1]), 1
if r >= utf8.RuneSelf {
r, n = utf8.DecodeLastRuneInString(s)
}
if !ContainsRune(cutset, r) {
break
}
s = s[:len(s)-n]
}
return s
}
// TrimSpace returns a slice (substring) of the string s,
// with all leading and trailing white space removed,
// as defined by Unicode.
func TrimSpace(s string) string {
// Fast path for ASCII: look for the first ASCII non-space byte.
for lo, c := range []byte(s) {
if c >= utf8.RuneSelf {
// If we run into a non-ASCII byte, fall back to the
// slower unicode-aware method on the remaining bytes.
return TrimFunc(s[lo:], unicode.IsSpace)
}
if asciiSpace[c] != 0 {
continue
}
s = s[lo:]
// Now look for the first ASCII non-space byte from the end.
for hi := len(s) - 1; hi >= 0; hi-- {
c := s[hi]
if c >= utf8.RuneSelf {
return TrimRightFunc(s[:hi+1], unicode.IsSpace)
}
if asciiSpace[c] == 0 {
// At this point, s[:hi+1] starts and ends with ASCII
// non-space bytes, so we're done. Non-ASCII cases have
// already been handled above.
return s[:hi+1]
}
}
}
return ""
}
// TrimPrefix returns s without the provided leading prefix string.
// If s doesn't start with prefix, s is returned unchanged.
func TrimPrefix(s, prefix string) string {
return stringslite.TrimPrefix(s, prefix)
}
// TrimSuffix returns s without the provided trailing suffix string.
// If s doesn't end with suffix, s is returned unchanged.
func TrimSuffix(s, suffix string) string {
return stringslite.TrimSuffix(s, suffix)
}
// Replace returns a copy of the string s with the first n
// non-overlapping instances of old replaced by new.
// If old is empty, it matches at the beginning of the string
// and after each UTF-8 sequence, yielding up to k+1 replacements
// for a k-rune string.
// If n < 0, there is no limit on the number of replacements.
func Replace(s, old, new string, n int) string {
if old == new || n == 0 {
return s // avoid allocation
}
// Compute number of replacements.
if m := Count(s, old); m == 0 {
return s // avoid allocation
} else if n < 0 || m < n {
n = m
}
// Apply replacements to buffer.
var b Builder
b.Grow(len(s) + n*(len(new)-len(old)))
start := 0
if len(old) > 0 {
for range n {
j := start + Index(s[start:], old)
b.WriteString(s[start:j])
b.WriteString(new)
start = j + len(old)
}
} else { // len(old) == 0
b.WriteString(new)
for range n - 1 {
_, wid := utf8.DecodeRuneInString(s[start:])
j := start + wid
b.WriteString(s[start:j])
b.WriteString(new)
start = j
}
}
b.WriteString(s[start:])
return b.String()
}
// ReplaceAll returns a copy of the string s with all
// non-overlapping instances of old replaced by new.
// If old is empty, it matches at the beginning of the string
// and after each UTF-8 sequence, yielding up to k+1 replacements
// for a k-rune string.
func ReplaceAll(s, old, new string) string {
return Replace(s, old, new, -1)
}
// EqualFold reports whether s and t, interpreted as UTF-8 strings,
// are equal under simple Unicode case-folding, which is a more general
// form of case-insensitivity.
func EqualFold(s, t string) bool {
// ASCII fast path
i := 0
for n := min(len(s), len(t)); i < n; i++ {
sr := s[i]
tr := t[i]
if sr|tr >= utf8.RuneSelf {
goto hasUnicode
}
// Easy case.
if tr == sr {
continue
}
// Make sr < tr to simplify what follows.
if tr < sr {
tr, sr = sr, tr
}
// ASCII only, sr/tr must be upper/lower case
if 'A' <= sr && sr <= 'Z' && tr == sr+'a'-'A' {
continue
}
return false
}
// Check if we've exhausted both strings.
return len(s) == len(t)
hasUnicode:
s = s[i:]
t = t[i:]
for _, sr := range s {
// If t is exhausted the strings are not equal.
if len(t) == 0 {
return false
}
// Extract first rune from second string.
tr, size := utf8.DecodeRuneInString(t)
t = t[size:]
// If they match, keep going; if not, return false.
// Easy case.
if tr == sr {
continue
}
// Make sr < tr to simplify what follows.
if tr < sr {
tr, sr = sr, tr
}
// Fast check for ASCII.
if tr < utf8.RuneSelf {
// ASCII only, sr/tr must be upper/lower case
if 'A' <= sr && sr <= 'Z' && tr == sr+'a'-'A' {
continue
}
return false
}
// General case. SimpleFold(x) returns the next equivalent rune > x
// or wraps around to smaller values.
r := unicode.SimpleFold(sr)
for r != sr && r < tr {
r = unicode.SimpleFold(r)
}
if r == tr {
continue
}
return false
}
// First string is empty, so check if the second one is also empty.
return len(t) == 0
}
// Index returns the index of the first instance of substr in s, or -1 if substr is not present in s.
func Index(s, substr string) int {
return stringslite.Index(s, substr)
}
// Cut slices s around the first instance of sep,
// returning the text before and after sep.
// The found result reports whether sep appears in s.
// If sep does not appear in s, cut returns s, "", false.
func Cut(s, sep string) (before, after string, found bool) {
return stringslite.Cut(s, sep)
}
// CutPrefix returns s without the provided leading prefix string
// and reports whether it found the prefix.
// If s doesn't start with prefix, CutPrefix returns s, false.
// If prefix is the empty string, CutPrefix returns s, true.
func CutPrefix(s, prefix string) (after string, found bool) {
return stringslite.CutPrefix(s, prefix)
}
// CutSuffix returns s without the provided ending suffix string
// and reports whether it found the suffix.
// If s doesn't end with suffix, CutSuffix returns s, false.
// If suffix is the empty string, CutSuffix returns s, true.
func CutSuffix(s, suffix string) (before string, found bool) {
return stringslite.CutSuffix(s, suffix)
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package iotest
import (
"io"
"log"
)
type writeLogger struct {
prefix string
w io.Writer
}
func (l *writeLogger) Write(p []byte) (n int, err error) {
n, err = l.w.Write(p)
if err != nil {
log.Printf("%s %x: %v", l.prefix, p[0:n], err)
} else {
log.Printf("%s %x", l.prefix, p[0:n])
}
return
}
// NewWriteLogger returns a writer that behaves like w except
// that it logs (using [log.Printf]) each write to standard error,
// printing the prefix and the hexadecimal data written.
func NewWriteLogger(prefix string, w io.Writer) io.Writer {
return &writeLogger{prefix, w}
}
type readLogger struct {
prefix string
r io.Reader
}
func (l *readLogger) Read(p []byte) (n int, err error) {
n, err = l.r.Read(p)
if err != nil {
log.Printf("%s %x: %v", l.prefix, p[0:n], err)
} else {
log.Printf("%s %x", l.prefix, p[0:n])
}
return
}
// NewReadLogger returns a reader that behaves like r except
// that it logs (using [log.Printf]) each read to standard error,
// printing the prefix and the hexadecimal data read.
func NewReadLogger(prefix string, r io.Reader) io.Reader {
return &readLogger{prefix, r}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package iotest implements Readers and Writers useful mainly for testing.
package iotest
import (
"bytes"
"errors"
"fmt"
"io"
)
// OneByteReader returns a Reader that implements
// each non-empty Read by reading one byte from r.
func OneByteReader(r io.Reader) io.Reader { return &oneByteReader{r} }
type oneByteReader struct {
r io.Reader
}
func (r *oneByteReader) Read(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
return r.r.Read(p[0:1])
}
// HalfReader returns a Reader that implements Read
// by reading half as many requested bytes from r.
func HalfReader(r io.Reader) io.Reader { return &halfReader{r} }
type halfReader struct {
r io.Reader
}
func (r *halfReader) Read(p []byte) (int, error) {
return r.r.Read(p[0 : (len(p)+1)/2])
}
// DataErrReader changes the way errors are handled by a Reader. Normally, a
// Reader returns an error (typically EOF) from the first Read call after the
// last piece of data is read. DataErrReader wraps a Reader and changes its
// behavior so the final error is returned along with the final data, instead
// of in the first call after the final data.
func DataErrReader(r io.Reader) io.Reader { return &dataErrReader{r, nil, make([]byte, 1024)} }
type dataErrReader struct {
r io.Reader
unread []byte
data []byte
}
func (r *dataErrReader) Read(p []byte) (n int, err error) {
// loop because first call needs two reads:
// one to get data and a second to look for an error.
for {
if len(r.unread) == 0 {
n1, err1 := r.r.Read(r.data)
r.unread = r.data[0:n1]
err = err1
}
if n > 0 || err != nil {
break
}
n = copy(p, r.unread)
r.unread = r.unread[n:]
}
return
}
// ErrTimeout is a fake timeout error.
var ErrTimeout = errors.New("timeout")
// TimeoutReader returns [ErrTimeout] on the second read
// with no data. Subsequent calls to read succeed.
func TimeoutReader(r io.Reader) io.Reader { return &timeoutReader{r, 0} }
type timeoutReader struct {
r io.Reader
count int
}
func (r *timeoutReader) Read(p []byte) (int, error) {
r.count++
if r.count == 2 {
return 0, ErrTimeout
}
return r.r.Read(p)
}
// ErrReader returns an [io.Reader] that returns 0, err from all Read calls.
func ErrReader(err error) io.Reader {
return &errReader{err: err}
}
type errReader struct {
err error
}
func (r *errReader) Read(p []byte) (int, error) {
return 0, r.err
}
type smallByteReader struct {
r io.Reader
off int
n int
}
func (r *smallByteReader) Read(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
r.n = r.n%3 + 1
n := r.n
if n > len(p) {
n = len(p)
}
n, err := r.r.Read(p[0:n])
if err != nil && err != io.EOF {
err = fmt.Errorf("Read(%d bytes at offset %d): %v", n, r.off, err)
}
r.off += n
return n, err
}
// TestReader tests that reading from r returns the expected file content.
// It does reads of different sizes, until EOF.
// If r implements [io.ReaderAt] or [io.Seeker], TestReader also checks
// that those operations behave as they should.
//
// If TestReader finds any misbehaviors, it returns an error reporting them.
// The error text may span multiple lines.
func TestReader(r io.Reader, content []byte) error {
if len(content) > 0 {
n, err := r.Read(nil)
if n != 0 || err != nil {
return fmt.Errorf("Read(0) = %d, %v, want 0, nil", n, err)
}
}
data, err := io.ReadAll(&smallByteReader{r: r})
if err != nil {
return err
}
if !bytes.Equal(data, content) {
return fmt.Errorf("ReadAll(small amounts) = %q\n\twant %q", data, content)
}
n, err := r.Read(make([]byte, 10))
if n != 0 || err != io.EOF {
return fmt.Errorf("Read(10) at EOF = %v, %v, want 0, EOF", n, err)
}
if r, ok := r.(io.ReadSeeker); ok {
// Seek(0, 1) should report the current file position (EOF).
if off, err := r.Seek(0, 1); off != int64(len(content)) || err != nil {
return fmt.Errorf("Seek(0, 1) from EOF = %d, %v, want %d, nil", off, err, len(content))
}
// Seek backward partway through file, in two steps.
// If middle == 0, len(content) == 0, can't use the -1 and +1 seeks.
middle := len(content) - len(content)/3
if middle > 0 {
if off, err := r.Seek(-1, 1); off != int64(len(content)-1) || err != nil {
return fmt.Errorf("Seek(-1, 1) from EOF = %d, %v, want %d, nil", -off, err, len(content)-1)
}
if off, err := r.Seek(int64(-len(content)/3), 1); off != int64(middle-1) || err != nil {
return fmt.Errorf("Seek(%d, 1) from %d = %d, %v, want %d, nil", -len(content)/3, len(content)-1, off, err, middle-1)
}
if off, err := r.Seek(+1, 1); off != int64(middle) || err != nil {
return fmt.Errorf("Seek(+1, 1) from %d = %d, %v, want %d, nil", middle-1, off, err, middle)
}
}
// Seek(0, 1) should report the current file position (middle).
if off, err := r.Seek(0, 1); off != int64(middle) || err != nil {
return fmt.Errorf("Seek(0, 1) from %d = %d, %v, want %d, nil", middle, off, err, middle)
}
// Reading forward should return the last part of the file.
data, err := io.ReadAll(&smallByteReader{r: r})
if err != nil {
return fmt.Errorf("ReadAll from offset %d: %v", middle, err)
}
if !bytes.Equal(data, content[middle:]) {
return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle, data, content[middle:])
}
// Seek relative to end of file, but start elsewhere.
if off, err := r.Seek(int64(middle/2), 0); off != int64(middle/2) || err != nil {
return fmt.Errorf("Seek(%d, 0) from EOF = %d, %v, want %d, nil", middle/2, off, err, middle/2)
}
if off, err := r.Seek(int64(-len(content)/3), 2); off != int64(middle) || err != nil {
return fmt.Errorf("Seek(%d, 2) from %d = %d, %v, want %d, nil", -len(content)/3, middle/2, off, err, middle)
}
// Reading forward should return the last part of the file (again).
data, err = io.ReadAll(&smallByteReader{r: r})
if err != nil {
return fmt.Errorf("ReadAll from offset %d: %v", middle, err)
}
if !bytes.Equal(data, content[middle:]) {
return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle, data, content[middle:])
}
// Absolute seek & read forward.
if off, err := r.Seek(int64(middle/2), 0); off != int64(middle/2) || err != nil {
return fmt.Errorf("Seek(%d, 0) from EOF = %d, %v, want %d, nil", middle/2, off, err, middle/2)
}
data, err = io.ReadAll(r)
if err != nil {
return fmt.Errorf("ReadAll from offset %d: %v", middle/2, err)
}
if !bytes.Equal(data, content[middle/2:]) {
return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle/2, data, content[middle/2:])
}
}
if r, ok := r.(io.ReaderAt); ok {
data := make([]byte, len(content), len(content)+1)
for i := range data {
data[i] = 0xfe
}
n, err := r.ReadAt(data, 0)
if n != len(data) || err != nil && err != io.EOF {
return fmt.Errorf("ReadAt(%d, 0) = %v, %v, want %d, nil or EOF", len(data), n, err, len(data))
}
if !bytes.Equal(data, content) {
return fmt.Errorf("ReadAt(%d, 0) = %q\n\twant %q", len(data), data, content)
}
n, err = r.ReadAt(data[:1], int64(len(data)))
if n != 0 || err != io.EOF {
return fmt.Errorf("ReadAt(1, %d) = %v, %v, want 0, EOF", len(data), n, err)
}
for i := range data {
data[i] = 0xfe
}
n, err = r.ReadAt(data[:cap(data)], 0)
if n != len(data) || err != io.EOF {
return fmt.Errorf("ReadAt(%d, 0) = %v, %v, want %d, EOF", cap(data), n, err, len(data))
}
if !bytes.Equal(data, content) {
return fmt.Errorf("ReadAt(%d, 0) = %q\n\twant %q", len(data), data, content)
}
for i := range data {
data[i] = 0xfe
}
for i := range data {
n, err = r.ReadAt(data[i:i+1], int64(i))
if n != 1 || err != nil && (i != len(data)-1 || err != io.EOF) {
want := "nil"
if i == len(data)-1 {
want = "nil or EOF"
}
return fmt.Errorf("ReadAt(1, %d) = %v, %v, want 1, %s", i, n, err, want)
}
if data[i] != content[i] {
return fmt.Errorf("ReadAt(1, %d) = %q want %q", i, data[i:i+1], content[i:i+1])
}
}
}
return nil
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package iotest
import "io"
// TruncateWriter returns a Writer that writes to w
// but stops silently after n bytes.
func TruncateWriter(w io.Writer, n int64) io.Writer {
return &truncateWriter{w, n}
}
type truncateWriter struct {
w io.Writer
n int64
}
func (t *truncateWriter) Write(p []byte) (n int, err error) {
if t.n <= 0 {
return len(p), nil
}
// real write
n = len(p)
if int64(n) > t.n {
n = int(t.n)
}
n, err = t.w.Write(p[0:n])
t.n -= int64(n)
if err == nil {
n = len(p)
}
return
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package scanner provides a scanner and tokenizer for UTF-8-encoded text.
// It takes an io.Reader providing the source, which then can be tokenized
// through repeated calls to the Scan function. For compatibility with
// existing tools, the NUL character is not allowed. If the first character
// in the source is a UTF-8 encoded byte order mark (BOM), it is discarded.
//
// By default, a [Scanner] skips white space and Go comments and recognizes all
// literals as defined by the Go language specification. It may be
// customized to recognize only a subset of those literals and to recognize
// different identifier and white space characters.
package scanner
import (
"bytes"
"fmt"
"io"
"os"
"unicode"
"unicode/utf8"
)
// Position is a value that represents a source position.
// A position is valid if Line > 0.
type Position struct {
Filename string // filename, if any
Offset int // byte offset, starting at 0
Line int // line number, starting at 1
Column int // column number, starting at 1 (character count per line)
}
// IsValid reports whether the position is valid.
func (pos *Position) IsValid() bool { return pos.Line > 0 }
func (pos Position) String() string {
s := pos.Filename
if s == "" {
s = "<input>"
}
if pos.IsValid() {
s += fmt.Sprintf(":%d:%d", pos.Line, pos.Column)
}
return s
}
// Predefined mode bits to control recognition of tokens. For instance,
// to configure a [Scanner] such that it only recognizes (Go) identifiers,
// integers, and skips comments, set the Scanner's Mode field to:
//
// ScanIdents | ScanInts | ScanComments | SkipComments
//
// With the exceptions of comments, which are skipped if SkipComments is
// set, unrecognized tokens are not ignored. Instead, the scanner simply
// returns the respective individual characters (or possibly sub-tokens).
// For instance, if the mode is ScanIdents (not ScanStrings), the string
// "foo" is scanned as the token sequence '"' [Ident] '"'.
//
// Use GoTokens to configure the Scanner such that it accepts all Go
// literal tokens including Go identifiers. Comments will be skipped.
const (
ScanIdents = 1 << -Ident
ScanInts = 1 << -Int
ScanFloats = 1 << -Float // includes Ints and hexadecimal floats
ScanChars = 1 << -Char
ScanStrings = 1 << -String
ScanRawStrings = 1 << -RawString
ScanComments = 1 << -Comment
SkipComments = 1 << -skipComment // if set with ScanComments, comments become white space
GoTokens = ScanIdents | ScanFloats | ScanChars | ScanStrings | ScanRawStrings | ScanComments | SkipComments
)
// The result of Scan is one of these tokens or a Unicode character.
const (
EOF = -(iota + 1)
Ident
Int
Float
Char
String
RawString
Comment
// internal use only
skipComment
)
var tokenString = map[rune]string{
EOF: "EOF",
Ident: "Ident",
Int: "Int",
Float: "Float",
Char: "Char",
String: "String",
RawString: "RawString",
Comment: "Comment",
}
// TokenString returns a printable string for a token or Unicode character.
func TokenString(tok rune) string {
if s, found := tokenString[tok]; found {
return s
}
return fmt.Sprintf("%q", string(tok))
}
// GoWhitespace is the default value for the [Scanner]'s Whitespace field.
// Its value selects Go's white space characters.
const GoWhitespace = 1<<'\t' | 1<<'\n' | 1<<'\r' | 1<<' '
const bufLen = 1024 // at least utf8.UTFMax
// A Scanner implements reading of Unicode characters and tokens from an [io.Reader].
type Scanner struct {
// Input
src io.Reader
// Source buffer
srcBuf [bufLen + 1]byte // +1 for sentinel for common case of s.next()
srcPos int // reading position (srcBuf index)
srcEnd int // source end (srcBuf index)
// Source position
srcBufOffset int // byte offset of srcBuf[0] in source
line int // line count
column int // character count
lastLineLen int // length of last line in characters (for correct column reporting)
lastCharLen int // length of last character in bytes
// Token text buffer
// Typically, token text is stored completely in srcBuf, but in general
// the token text's head may be buffered in tokBuf while the token text's
// tail is stored in srcBuf.
tokBuf bytes.Buffer // token text head that is not in srcBuf anymore
tokPos int // token text tail position (srcBuf index); valid if >= 0
tokEnd int // token text tail end (srcBuf index)
// One character look-ahead
ch rune // character before current srcPos
// Error is called for each error encountered. If no Error
// function is set, the error is reported to os.Stderr.
Error func(s *Scanner, msg string)
// ErrorCount is incremented by one for each error encountered.
ErrorCount int
// The Mode field controls which tokens are recognized. For instance,
// to recognize Ints, set the ScanInts bit in Mode. The field may be
// changed at any time.
Mode uint
// The Whitespace field controls which characters are recognized
// as white space. To recognize a character ch <= ' ' as white space,
// set the ch'th bit in Whitespace (the Scanner's behavior is undefined
// for values ch > ' '). The field may be changed at any time.
Whitespace uint64
// IsIdentRune is a predicate controlling the characters accepted
// as the ith rune in an identifier. The set of valid characters
// must not intersect with the set of white space characters.
// If no IsIdentRune function is set, regular Go identifiers are
// accepted instead. The field may be changed at any time.
IsIdentRune func(ch rune, i int) bool
// Start position of most recently scanned token; set by Scan.
// Calling Init or Next invalidates the position (Line == 0).
// The Filename field is always left untouched by the Scanner.
// If an error is reported (via Error) and Position is invalid,
// the scanner is not inside a token. Call Pos to obtain an error
// position in that case, or to obtain the position immediately
// after the most recently scanned token.
Position
}
// Init initializes a [Scanner] with a new source and returns s.
// [Scanner.Error] is set to nil, [Scanner.ErrorCount] is set to 0, [Scanner.Mode] is set to [GoTokens],
// and [Scanner.Whitespace] is set to [GoWhitespace].
func (s *Scanner) Init(src io.Reader) *Scanner {
s.src = src
// initialize source buffer
// (the first call to next() will fill it by calling src.Read)
s.srcBuf[0] = utf8.RuneSelf // sentinel
s.srcPos = 0
s.srcEnd = 0
// initialize source position
s.srcBufOffset = 0
s.line = 1
s.column = 0
s.lastLineLen = 0
s.lastCharLen = 0
// initialize token text buffer
// (required for first call to next()).
s.tokPos = -1
// initialize one character look-ahead
s.ch = -2 // no char read yet, not EOF
// initialize public fields
s.Error = nil
s.ErrorCount = 0
s.Mode = GoTokens
s.Whitespace = GoWhitespace
s.Line = 0 // invalidate token position
return s
}
// next reads and returns the next Unicode character. It is designed such
// that only a minimal amount of work needs to be done in the common ASCII
// case (one test to check for both ASCII and end-of-buffer, and one test
// to check for newlines).
func (s *Scanner) next() rune {
ch, width := rune(s.srcBuf[s.srcPos]), 1
if ch >= utf8.RuneSelf {
// uncommon case: not ASCII or not enough bytes
for s.srcPos+utf8.UTFMax > s.srcEnd && !utf8.FullRune(s.srcBuf[s.srcPos:s.srcEnd]) {
// not enough bytes: read some more, but first
// save away token text if any
if s.tokPos >= 0 {
s.tokBuf.Write(s.srcBuf[s.tokPos:s.srcPos])
s.tokPos = 0
// s.tokEnd is set by Scan()
}
// move unread bytes to beginning of buffer
copy(s.srcBuf[0:], s.srcBuf[s.srcPos:s.srcEnd])
s.srcBufOffset += s.srcPos
// read more bytes
// (an io.Reader must return io.EOF when it reaches
// the end of what it is reading - simply returning
// n == 0 will make this loop retry forever; but the
// error is in the reader implementation in that case)
i := s.srcEnd - s.srcPos
n, err := s.src.Read(s.srcBuf[i:bufLen])
s.srcPos = 0
s.srcEnd = i + n
s.srcBuf[s.srcEnd] = utf8.RuneSelf // sentinel
if err != nil {
if err != io.EOF {
s.error(err.Error())
}
if s.srcEnd == 0 {
if s.lastCharLen > 0 {
// previous character was not EOF
s.column++
}
s.lastCharLen = 0
return EOF
}
// If err == EOF, we won't be getting more
// bytes; break to avoid infinite loop. If
// err is something else, we don't know if
// we can get more bytes; thus also break.
break
}
}
// at least one byte
ch = rune(s.srcBuf[s.srcPos])
if ch >= utf8.RuneSelf {
// uncommon case: not ASCII
ch, width = utf8.DecodeRune(s.srcBuf[s.srcPos:s.srcEnd])
if ch == utf8.RuneError && width == 1 {
// advance for correct error position
s.srcPos += width
s.lastCharLen = width
s.column++
s.error("invalid UTF-8 encoding")
return ch
}
}
}
// advance
s.srcPos += width
s.lastCharLen = width
s.column++
// special situations
switch ch {
case 0:
// for compatibility with other tools
s.error("invalid character NUL")
case '\n':
s.line++
s.lastLineLen = s.column
s.column = 0
}
return ch
}
// Next reads and returns the next Unicode character.
// It returns [EOF] at the end of the source. It reports
// a read error by calling s.Error, if not nil; otherwise
// it prints an error message to [os.Stderr]. Next does not
// update the [Scanner.Position] field; use [Scanner.Pos]() to
// get the current position.
func (s *Scanner) Next() rune {
s.tokPos = -1 // don't collect token text
s.Line = 0 // invalidate token position
ch := s.Peek()
if ch != EOF {
s.ch = s.next()
}
return ch
}
// Peek returns the next Unicode character in the source without advancing
// the scanner. It returns [EOF] if the scanner's position is at the last
// character of the source.
func (s *Scanner) Peek() rune {
if s.ch == -2 {
// this code is only run for the very first character
s.ch = s.next()
if s.ch == '\uFEFF' {
s.ch = s.next() // ignore BOM
}
}
return s.ch
}
func (s *Scanner) error(msg string) {
s.tokEnd = s.srcPos - s.lastCharLen // make sure token text is terminated
s.ErrorCount++
if s.Error != nil {
s.Error(s, msg)
return
}
pos := s.Position
if !pos.IsValid() {
pos = s.Pos()
}
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
}
func (s *Scanner) errorf(format string, args ...any) {
s.error(fmt.Sprintf(format, args...))
}
func (s *Scanner) isIdentRune(ch rune, i int) bool {
if s.IsIdentRune != nil {
return ch != EOF && s.IsIdentRune(ch, i)
}
return ch == '_' || unicode.IsLetter(ch) || unicode.IsDigit(ch) && i > 0
}
func (s *Scanner) scanIdentifier() rune {
// we know the zero'th rune is OK; start scanning at the next one
ch := s.next()
for i := 1; s.isIdentRune(ch, i); i++ {
ch = s.next()
}
return ch
}
func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter
func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
func isHex(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f' }
// digits accepts the sequence { digit | '_' } starting with ch0.
// If base <= 10, digits accepts any decimal digit but records
// the first invalid digit >= base in *invalid if *invalid == 0.
// digits returns the first rune that is not part of the sequence
// anymore, and a bitset describing whether the sequence contained
// digits (bit 0 is set), or separators '_' (bit 1 is set).
func (s *Scanner) digits(ch0 rune, base int, invalid *rune) (ch rune, digsep int) {
ch = ch0
if base <= 10 {
max := rune('0' + base)
for isDecimal(ch) || ch == '_' {
ds := 1
if ch == '_' {
ds = 2
} else if ch >= max && *invalid == 0 {
*invalid = ch
}
digsep |= ds
ch = s.next()
}
} else {
for isHex(ch) || ch == '_' {
ds := 1
if ch == '_' {
ds = 2
}
digsep |= ds
ch = s.next()
}
}
return
}
func (s *Scanner) scanNumber(ch rune, seenDot bool) (rune, rune) {
base := 10 // number base
prefix := rune(0) // one of 0 (decimal), '0' (0-octal), 'x', 'o', or 'b'
digsep := 0 // bit 0: digit present, bit 1: '_' present
invalid := rune(0) // invalid digit in literal, or 0
// integer part
var tok rune
var ds int
if !seenDot {
tok = Int
if ch == '0' {
ch = s.next()
switch lower(ch) {
case 'x':
ch = s.next()
base, prefix = 16, 'x'
case 'o':
ch = s.next()
base, prefix = 8, 'o'
case 'b':
ch = s.next()
base, prefix = 2, 'b'
default:
base, prefix = 8, '0'
digsep = 1 // leading 0
}
}
ch, ds = s.digits(ch, base, &invalid)
digsep |= ds
if ch == '.' && s.Mode&ScanFloats != 0 {
ch = s.next()
seenDot = true
}
}
// fractional part
if seenDot {
tok = Float
if prefix == 'o' || prefix == 'b' {
s.error("invalid radix point in " + litname(prefix))
}
ch, ds = s.digits(ch, base, &invalid)
digsep |= ds
}
if digsep&1 == 0 {
s.error(litname(prefix) + " has no digits")
}
// exponent
if e := lower(ch); (e == 'e' || e == 'p') && s.Mode&ScanFloats != 0 {
switch {
case e == 'e' && prefix != 0 && prefix != '0':
s.errorf("%q exponent requires decimal mantissa", ch)
case e == 'p' && prefix != 'x':
s.errorf("%q exponent requires hexadecimal mantissa", ch)
}
ch = s.next()
tok = Float
if ch == '+' || ch == '-' {
ch = s.next()
}
ch, ds = s.digits(ch, 10, nil)
digsep |= ds
if ds&1 == 0 {
s.error("exponent has no digits")
}
} else if prefix == 'x' && tok == Float {
s.error("hexadecimal mantissa requires a 'p' exponent")
}
if tok == Int && invalid != 0 {
s.errorf("invalid digit %q in %s", invalid, litname(prefix))
}
if digsep&2 != 0 {
s.tokEnd = s.srcPos - s.lastCharLen // make sure token text is terminated
if i := invalidSep(s.TokenText()); i >= 0 {
s.error("'_' must separate successive digits")
}
}
return tok, ch
}
func litname(prefix rune) string {
switch prefix {
default:
return "decimal literal"
case 'x':
return "hexadecimal literal"
case 'o', '0':
return "octal literal"
case 'b':
return "binary literal"
}
}
// invalidSep returns the index of the first invalid separator in x, or -1.
func invalidSep(x string) int {
x1 := ' ' // prefix char, we only care if it's 'x'
d := '.' // digit, one of '_', '0' (a digit), or '.' (anything else)
i := 0
// a prefix counts as a digit
if len(x) >= 2 && x[0] == '0' {
x1 = lower(rune(x[1]))
if x1 == 'x' || x1 == 'o' || x1 == 'b' {
d = '0'
i = 2
}
}
// mantissa and exponent
for ; i < len(x); i++ {
p := d // previous digit
d = rune(x[i])
switch {
case d == '_':
if p != '0' {
return i
}
case isDecimal(d) || x1 == 'x' && isHex(d):
d = '0'
default:
if p == '_' {
return i - 1
}
d = '.'
}
}
if d == '_' {
return len(x) - 1
}
return -1
}
func digitVal(ch rune) int {
switch {
case '0' <= ch && ch <= '9':
return int(ch - '0')
case 'a' <= lower(ch) && lower(ch) <= 'f':
return int(lower(ch) - 'a' + 10)
}
return 16 // larger than any legal digit val
}
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
for n > 0 && digitVal(ch) < base {
ch = s.next()
n--
}
if n > 0 {
s.error("invalid char escape")
}
return ch
}
func (s *Scanner) scanEscape(quote rune) rune {
ch := s.next() // read character after '/'
switch ch {
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
// nothing to do
ch = s.next()
case '0', '1', '2', '3', '4', '5', '6', '7':
ch = s.scanDigits(ch, 8, 3)
case 'x':
ch = s.scanDigits(s.next(), 16, 2)
case 'u':
ch = s.scanDigits(s.next(), 16, 4)
case 'U':
ch = s.scanDigits(s.next(), 16, 8)
default:
s.error("invalid char escape")
}
return ch
}
func (s *Scanner) scanString(quote rune) (n int) {
ch := s.next() // read character after quote
for ch != quote {
if ch == '\n' || ch < 0 {
s.error("literal not terminated")
return
}
if ch == '\\' {
ch = s.scanEscape(quote)
} else {
ch = s.next()
}
n++
}
return
}
func (s *Scanner) scanRawString() {
ch := s.next() // read character after '`'
for ch != '`' {
if ch < 0 {
s.error("literal not terminated")
return
}
ch = s.next()
}
}
func (s *Scanner) scanChar() {
if s.scanString('\'') != 1 {
s.error("invalid char literal")
}
}
func (s *Scanner) scanComment(ch rune) rune {
// ch == '/' || ch == '*'
if ch == '/' {
// line comment
ch = s.next() // read character after "//"
for ch != '\n' && ch >= 0 {
ch = s.next()
}
return ch
}
// general comment
ch = s.next() // read character after "/*"
for {
if ch < 0 {
s.error("comment not terminated")
break
}
ch0 := ch
ch = s.next()
if ch0 == '*' && ch == '/' {
ch = s.next()
break
}
}
return ch
}
// Scan reads the next token or Unicode character from source and returns it.
// It only recognizes tokens t for which the respective [Scanner.Mode] bit (1<<-t) is set.
// It returns [EOF] at the end of the source. It reports scanner errors (read and
// token errors) by calling s.Error, if not nil; otherwise it prints an error
// message to [os.Stderr].
func (s *Scanner) Scan() rune {
ch := s.Peek()
// reset token text position
s.tokPos = -1
s.Line = 0
redo:
// skip white space
for s.Whitespace&(1<<uint(ch)) != 0 {
ch = s.next()
}
// start collecting token text
s.tokBuf.Reset()
s.tokPos = s.srcPos - s.lastCharLen
// set token position
// (this is a slightly optimized version of the code in Pos())
s.Offset = s.srcBufOffset + s.tokPos
if s.column > 0 {
// common case: last character was not a '\n'
s.Line = s.line
s.Column = s.column
} else {
// last character was a '\n'
// (we cannot be at the beginning of the source
// since we have called next() at least once)
s.Line = s.line - 1
s.Column = s.lastLineLen
}
// determine token value
tok := ch
switch {
case s.isIdentRune(ch, 0):
if s.Mode&ScanIdents != 0 {
tok = Ident
ch = s.scanIdentifier()
} else {
ch = s.next()
}
case isDecimal(ch):
if s.Mode&(ScanInts|ScanFloats) != 0 {
tok, ch = s.scanNumber(ch, false)
} else {
ch = s.next()
}
default:
switch ch {
case EOF:
break
case '"':
if s.Mode&ScanStrings != 0 {
s.scanString('"')
tok = String
}
ch = s.next()
case '\'':
if s.Mode&ScanChars != 0 {
s.scanChar()
tok = Char
}
ch = s.next()
case '.':
ch = s.next()
if isDecimal(ch) && s.Mode&ScanFloats != 0 {
tok, ch = s.scanNumber(ch, true)
}
case '/':
ch = s.next()
if (ch == '/' || ch == '*') && s.Mode&ScanComments != 0 {
if s.Mode&SkipComments != 0 {
s.tokPos = -1 // don't collect token text
ch = s.scanComment(ch)
goto redo
}
ch = s.scanComment(ch)
tok = Comment
}
case '`':
if s.Mode&ScanRawStrings != 0 {
s.scanRawString()
tok = RawString
}
ch = s.next()
default:
ch = s.next()
}
}
// end of token text
s.tokEnd = s.srcPos - s.lastCharLen
s.ch = ch
return tok
}
// Pos returns the position of the character immediately after
// the character or token returned by the last call to [Scanner.Next] or [Scanner.Scan].
// Use the [Scanner.Position] field for the start position of the most
// recently scanned token.
func (s *Scanner) Pos() (pos Position) {
pos.Filename = s.Filename
pos.Offset = s.srcBufOffset + s.srcPos - s.lastCharLen
switch {
case s.column > 0:
// common case: last character was not a '\n'
pos.Line = s.line
pos.Column = s.column
case s.lastLineLen > 0:
// last character was a '\n'
pos.Line = s.line - 1
pos.Column = s.lastLineLen
default:
// at the beginning of the source
pos.Line = 1
pos.Column = 1
}
return
}
// TokenText returns the string corresponding to the most recently scanned token.
// Valid after calling [Scanner.Scan] and in calls of [Scanner.Error].
func (s *Scanner) TokenText() string {
if s.tokPos < 0 {
// no token text
return ""
}
if s.tokEnd < s.tokPos {
// if EOF was reached, s.tokEnd is set to -1 (s.srcPos == 0)
s.tokEnd = s.tokPos
}
// s.tokEnd >= s.tokPos
if s.tokBuf.Len() == 0 {
// common case: the entire token text is still in srcBuf
return string(s.srcBuf[s.tokPos:s.tokEnd])
}
// part of the token text was saved in tokBuf: save the rest in
// tokBuf as well and return its content
s.tokBuf.Write(s.srcBuf[s.tokPos:s.tokEnd])
s.tokPos = s.tokEnd // ensure idempotency of TokenText() call
return s.tokBuf.String()
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tabwriter implements a write filter (tabwriter.Writer) that
// translates tabbed columns in input into properly aligned text.
//
// The package is using the Elastic Tabstops algorithm described at
// http://nickgravgaard.com/elastictabstops/index.html.
//
// The text/tabwriter package is frozen and is not accepting new features.
package tabwriter
import (
"fmt"
"io"
"unicode/utf8"
)
// ----------------------------------------------------------------------------
// Filter implementation
// A cell represents a segment of text terminated by tabs or line breaks.
// The text itself is stored in a separate buffer; cell only describes the
// segment's size in bytes, its width in runes, and whether it's an htab
// ('\t') terminated cell.
type cell struct {
size int // cell size in bytes
width int // cell width in runes
htab bool // true if the cell is terminated by an htab ('\t')
}
// A Writer is a filter that inserts padding around tab-delimited
// columns in its input to align them in the output.
//
// The Writer treats incoming bytes as UTF-8-encoded text consisting
// of cells terminated by horizontal ('\t') or vertical ('\v') tabs,
// and newline ('\n') or formfeed ('\f') characters; both newline and
// formfeed act as line breaks.
//
// Tab-terminated cells in contiguous lines constitute a column. The
// Writer inserts padding as needed to make all cells in a column have
// the same width, effectively aligning the columns. It assumes that
// all characters have the same width, except for tabs for which a
// tabwidth must be specified. Column cells must be tab-terminated, not
// tab-separated: non-tab terminated trailing text at the end of a line
// forms a cell but that cell is not part of an aligned column.
// For instance, in this example (where | stands for a horizontal tab):
//
// aaaa|bbb|d
// aa |b |dd
// a |
// aa |cccc|eee
//
// the b and c are in distinct columns (the b column is not contiguous
// all the way). The d and e are not in a column at all (there's no
// terminating tab, nor would the column be contiguous).
//
// The Writer assumes that all Unicode code points have the same width;
// this may not be true in some fonts or if the string contains combining
// characters.
//
// If [DiscardEmptyColumns] is set, empty columns that are terminated
// entirely by vertical (or "soft") tabs are discarded. Columns
// terminated by horizontal (or "hard") tabs are not affected by
// this flag.
//
// If a Writer is configured to filter HTML, HTML tags and entities
// are passed through. The widths of tags and entities are
// assumed to be zero (tags) and one (entities) for formatting purposes.
//
// A segment of text may be escaped by bracketing it with [Escape]
// characters. The tabwriter passes escaped text segments through
// unchanged. In particular, it does not interpret any tabs or line
// breaks within the segment. If the [StripEscape] flag is set, the
// Escape characters are stripped from the output; otherwise they
// are passed through as well. For the purpose of formatting, the
// width of the escaped text is always computed excluding the Escape
// characters.
//
// The formfeed character acts like a newline but it also terminates
// all columns in the current line (effectively calling [Writer.Flush]). Tab-
// terminated cells in the next line start new columns. Unless found
// inside an HTML tag or inside an escaped text segment, formfeed
// characters appear as newlines in the output.
//
// The Writer must buffer input internally, because proper spacing
// of one line may depend on the cells in future lines. Clients must
// call Flush when done calling [Writer.Write].
type Writer struct {
// configuration
output io.Writer
minwidth int
tabwidth int
padding int
padbytes [8]byte
flags uint
// current state
buf []byte // collected text excluding tabs or line breaks
pos int // buffer position up to which cell.width of incomplete cell has been computed
cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
lines [][]cell // list of lines; each line is a list of cells
widths []int // list of column widths in runes - re-used during formatting
}
// addLine adds a new line.
// flushed is a hint indicating whether the underlying writer was just flushed.
// If so, the previous line is not likely to be a good indicator of the new line's cells.
func (b *Writer) addLine(flushed bool) {
// Grow slice instead of appending,
// as that gives us an opportunity
// to re-use an existing []cell.
if n := len(b.lines) + 1; n <= cap(b.lines) {
b.lines = b.lines[:n]
b.lines[n-1] = b.lines[n-1][:0]
} else {
b.lines = append(b.lines, nil)
}
if !flushed {
// The previous line is probably a good indicator
// of how many cells the current line will have.
// If the current line's capacity is smaller than that,
// abandon it and make a new one.
if n := len(b.lines); n >= 2 {
if prev := len(b.lines[n-2]); prev > cap(b.lines[n-1]) {
b.lines[n-1] = make([]cell, 0, prev)
}
}
}
}
// Reset the current state.
func (b *Writer) reset() {
b.buf = b.buf[:0]
b.pos = 0
b.cell = cell{}
b.endChar = 0
b.lines = b.lines[0:0]
b.widths = b.widths[0:0]
b.addLine(true)
}
// Internal representation (current state):
//
// - all text written is appended to buf; tabs and line breaks are stripped away
// - at any given time there is a (possibly empty) incomplete cell at the end
// (the cell starts after a tab or line break)
// - cell.size is the number of bytes belonging to the cell so far
// - cell.width is text width in runes of that cell from the start of the cell to
// position pos; html tags and entities are excluded from this width if html
// filtering is enabled
// - the sizes and widths of processed text are kept in the lines list
// which contains a list of cells for each line
// - the widths list is a temporary list with current widths used during
// formatting; it is kept in Writer because it's re-used
//
// |<---------- size ---------->|
// | |
// |<- width ->|<- ignored ->| |
// | | | |
// [---processed---tab------------<tag>...</tag>...]
// ^ ^ ^
// | | |
// buf start of incomplete cell pos
// Formatting can be controlled with these flags.
const (
// Ignore html tags and treat entities (starting with '&'
// and ending in ';') as single characters (width = 1).
FilterHTML uint = 1 << iota
// Strip Escape characters bracketing escaped text segments
// instead of passing them through unchanged with the text.
StripEscape
// Force right-alignment of cell content.
// Default is left-alignment.
AlignRight
// Handle empty columns as if they were not present in
// the input in the first place.
DiscardEmptyColumns
// Always use tabs for indentation columns (i.e., padding of
// leading empty cells on the left) independent of padchar.
TabIndent
// Print a vertical bar ('|') between columns (after formatting).
// Discarded columns appear as zero-width columns ("||").
Debug
)
// A [Writer] must be initialized with a call to Init. The first parameter (output)
// specifies the filter output. The remaining parameters control the formatting:
//
// minwidth minimal cell width including any padding
// tabwidth width of tab characters (equivalent number of spaces)
// padding padding added to a cell before computing its width
// padchar ASCII char used for padding
// if padchar == '\t', the Writer will assume that the
// width of a '\t' in the formatted output is tabwidth,
// and cells are left-aligned independent of align_left
// (for correct-looking results, tabwidth must correspond
// to the tab width in the viewer displaying the result)
// flags formatting control
func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
if minwidth < 0 || tabwidth < 0 || padding < 0 {
panic("negative minwidth, tabwidth, or padding")
}
b.output = output
b.minwidth = minwidth
b.tabwidth = tabwidth
b.padding = padding
for i := range b.padbytes {
b.padbytes[i] = padchar
}
if padchar == '\t' {
// tab padding enforces left-alignment
flags &^= AlignRight
}
b.flags = flags
b.reset()
return b
}
// debugging support (keep code around)
func (b *Writer) dump() {
pos := 0
for i, line := range b.lines {
print("(", i, ") ")
for _, c := range line {
print("[", string(b.buf[pos:pos+c.size]), "]")
pos += c.size
}
print("\n")
}
print("\n")
}
// local error wrapper so we can distinguish errors we want to return
// as errors from genuine panics (which we don't want to return as errors)
type osError struct {
err error
}
func (b *Writer) write0(buf []byte) {
n, err := b.output.Write(buf)
if n != len(buf) && err == nil {
err = io.ErrShortWrite
}
if err != nil {
panic(osError{err})
}
}
func (b *Writer) writeN(src []byte, n int) {
for n > len(src) {
b.write0(src)
n -= len(src)
}
b.write0(src[0:n])
}
var (
newline = []byte{'\n'}
tabs = []byte("\t\t\t\t\t\t\t\t")
)
func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
if b.padbytes[0] == '\t' || useTabs {
// padding is done with tabs
if b.tabwidth == 0 {
return // tabs have no width - can't do any padding
}
// make cellw the smallest multiple of b.tabwidth
cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
n := cellw - textw // amount of padding
if n < 0 {
panic("internal error")
}
b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
return
}
// padding is done with non-tab characters
b.writeN(b.padbytes[0:], cellw-textw)
}
var vbar = []byte{'|'}
func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
pos = pos0
for i := line0; i < line1; i++ {
line := b.lines[i]
// if TabIndent is set, use tabs to pad leading empty cells
useTabs := b.flags&TabIndent != 0
for j, c := range line {
if j > 0 && b.flags&Debug != 0 {
// indicate column break
b.write0(vbar)
}
if c.size == 0 {
// empty cell
if j < len(b.widths) {
b.writePadding(c.width, b.widths[j], useTabs)
}
} else {
// non-empty cell
useTabs = false
if b.flags&AlignRight == 0 { // align left
b.write0(b.buf[pos : pos+c.size])
pos += c.size
if j < len(b.widths) {
b.writePadding(c.width, b.widths[j], false)
}
} else { // align right
if j < len(b.widths) {
b.writePadding(c.width, b.widths[j], false)
}
b.write0(b.buf[pos : pos+c.size])
pos += c.size
}
}
}
if i+1 == len(b.lines) {
// last buffered line - we don't have a newline, so just write
// any outstanding buffered data
b.write0(b.buf[pos : pos+b.cell.size])
pos += b.cell.size
} else {
// not the last line - write newline
b.write0(newline)
}
}
return
}
// Format the text between line0 and line1 (excluding line1); pos
// is the buffer position corresponding to the beginning of line0.
// Returns the buffer position corresponding to the beginning of
// line1 and an error, if any.
func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
pos = pos0
column := len(b.widths)
for this := line0; this < line1; this++ {
line := b.lines[this]
if column >= len(line)-1 {
continue
}
// cell exists in this column => this line
// has more cells than the previous line
// (the last cell per line is ignored because cells are
// tab-terminated; the last cell per line describes the
// text before the newline/formfeed and does not belong
// to a column)
// print unprinted lines until beginning of block
pos = b.writeLines(pos, line0, this)
line0 = this
// column block begin
width := b.minwidth // minimal column width
discardable := true // true if all cells in this column are empty and "soft"
for ; this < line1; this++ {
line = b.lines[this]
if column >= len(line)-1 {
break
}
// cell exists in this column
c := line[column]
// update width
if w := c.width + b.padding; w > width {
width = w
}
// update discardable
if c.width > 0 || c.htab {
discardable = false
}
}
// column block end
// discard empty columns if necessary
if discardable && b.flags&DiscardEmptyColumns != 0 {
width = 0
}
// format and print all columns to the right of this column
// (we know the widths of this column and all columns to the left)
b.widths = append(b.widths, width) // push width
pos = b.format(pos, line0, this)
b.widths = b.widths[0 : len(b.widths)-1] // pop width
line0 = this
}
// print unprinted lines until end
return b.writeLines(pos, line0, line1)
}
// Append text to current cell.
func (b *Writer) append(text []byte) {
b.buf = append(b.buf, text...)
b.cell.size += len(text)
}
// Update the cell width.
func (b *Writer) updateWidth() {
b.cell.width += utf8.RuneCount(b.buf[b.pos:])
b.pos = len(b.buf)
}
// To escape a text segment, bracket it with Escape characters.
// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
// does not terminate a cell and constitutes a single character of
// width one for formatting purposes.
//
// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
const Escape = '\xff'
// Start escaped mode.
func (b *Writer) startEscape(ch byte) {
switch ch {
case Escape:
b.endChar = Escape
case '<':
b.endChar = '>'
case '&':
b.endChar = ';'
}
}
// Terminate escaped mode. If the escaped text was an HTML tag, its width
// is assumed to be zero for formatting purposes; if it was an HTML entity,
// its width is assumed to be one. In all other cases, the width is the
// unicode width of the text.
func (b *Writer) endEscape() {
switch b.endChar {
case Escape:
b.updateWidth()
if b.flags&StripEscape == 0 {
b.cell.width -= 2 // don't count the Escape chars
}
case '>': // tag of zero width
case ';':
b.cell.width++ // entity, count as one rune
}
b.pos = len(b.buf)
b.endChar = 0
}
// Terminate the current cell by adding it to the list of cells of the
// current line. Returns the number of cells in that line.
func (b *Writer) terminateCell(htab bool) int {
b.cell.htab = htab
line := &b.lines[len(b.lines)-1]
*line = append(*line, b.cell)
b.cell = cell{}
return len(*line)
}
func (b *Writer) handlePanic(err *error, op string) {
if e := recover(); e != nil {
if op == "Flush" {
// If Flush ran into a panic, we still need to reset.
b.reset()
}
if nerr, ok := e.(osError); ok {
*err = nerr.err
return
}
panic(fmt.Sprintf("tabwriter: panic during %s (%v)", op, e))
}
}
// Flush should be called after the last call to [Writer.Write] to ensure
// that any data buffered in the [Writer] is written to output. Any
// incomplete escape sequence at the end is considered
// complete for formatting purposes.
func (b *Writer) Flush() error {
return b.flush()
}
// flush is the internal version of Flush, with a named return value which we
// don't want to expose.
func (b *Writer) flush() (err error) {
defer b.handlePanic(&err, "Flush")
b.flushNoDefers()
return nil
}
// flushNoDefers is like flush, but without a deferred handlePanic call. This
// can be called from other methods which already have their own deferred
// handlePanic calls, such as Write, and avoid the extra defer work.
func (b *Writer) flushNoDefers() {
// add current cell if not empty
if b.cell.size > 0 {
if b.endChar != 0 {
// inside escape - terminate it even if incomplete
b.endEscape()
}
b.terminateCell(false)
}
// format contents of buffer
b.format(0, 0, len(b.lines))
b.reset()
}
var hbar = []byte("---\n")
// Write writes buf to the writer b.
// The only errors returned are ones encountered
// while writing to the underlying output stream.
func (b *Writer) Write(buf []byte) (n int, err error) {
defer b.handlePanic(&err, "Write")
// split text into cells
n = 0
for i, ch := range buf {
if b.endChar == 0 {
// outside escape
switch ch {
case '\t', '\v', '\n', '\f':
// end of cell
b.append(buf[n:i])
b.updateWidth()
n = i + 1 // ch consumed
ncells := b.terminateCell(ch == '\t')
if ch == '\n' || ch == '\f' {
// terminate line
b.addLine(ch == '\f')
if ch == '\f' || ncells == 1 {
// A '\f' always forces a flush. Otherwise, if the previous
// line has only one cell which does not have an impact on
// the formatting of the following lines (the last cell per
// line is ignored by format()), thus we can flush the
// Writer contents.
b.flushNoDefers()
if ch == '\f' && b.flags&Debug != 0 {
// indicate section break
b.write0(hbar)
}
}
}
case Escape:
// start of escaped sequence
b.append(buf[n:i])
b.updateWidth()
n = i
if b.flags&StripEscape != 0 {
n++ // strip Escape
}
b.startEscape(Escape)
case '<', '&':
// possibly an html tag/entity
if b.flags&FilterHTML != 0 {
// begin of tag/entity
b.append(buf[n:i])
b.updateWidth()
n = i
b.startEscape(ch)
}
}
} else {
// inside escape
if ch == b.endChar {
// end of tag/entity
j := i + 1
if ch == Escape && b.flags&StripEscape != 0 {
j = i // strip Escape
}
b.append(buf[n:j])
n = i + 1 // ch consumed
b.endEscape()
}
}
}
// append leftover text
b.append(buf[n:])
n = len(buf)
return
}
// NewWriter allocates and initializes a new [Writer].
// The parameters are the same as for the Init function.
func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"errors"
"fmt"
"internal/fmtsort"
"io"
"reflect"
"runtime"
"strings"
"text/template/parse"
)
// maxExecDepth specifies the maximum stack depth of templates within
// templates. This limit is only practically reached by accidentally
// recursive template invocations. This limit allows us to return
// an error instead of triggering a stack overflow.
var maxExecDepth = initMaxExecDepth()
func initMaxExecDepth() int {
if runtime.GOARCH == "wasm" {
return 1000
}
return 100000
}
// state represents the state of an execution. It's not part of the
// template so that multiple executions of the same template
// can execute in parallel.
type state struct {
tmpl *Template
wr io.Writer
node parse.Node // current node, for errors
vars []variable // push-down stack of variable values.
depth int // the height of the stack of executing templates.
}
// variable holds the dynamic value of a variable such as $, $x etc.
type variable struct {
name string
value reflect.Value
}
// push pushes a new variable on the stack.
func (s *state) push(name string, value reflect.Value) {
s.vars = append(s.vars, variable{name, value})
}
// mark returns the length of the variable stack.
func (s *state) mark() int {
return len(s.vars)
}
// pop pops the variable stack up to the mark.
func (s *state) pop(mark int) {
s.vars = s.vars[0:mark]
}
// setVar overwrites the last declared variable with the given name.
// Used by variable assignments.
func (s *state) setVar(name string, value reflect.Value) {
for i := s.mark() - 1; i >= 0; i-- {
if s.vars[i].name == name {
s.vars[i].value = value
return
}
}
s.errorf("undefined variable: %s", name)
}
// setTopVar overwrites the top-nth variable on the stack. Used by range iterations.
func (s *state) setTopVar(n int, value reflect.Value) {
s.vars[len(s.vars)-n].value = value
}
// varValue returns the value of the named variable.
func (s *state) varValue(name string) reflect.Value {
for i := s.mark() - 1; i >= 0; i-- {
if s.vars[i].name == name {
return s.vars[i].value
}
}
s.errorf("undefined variable: %s", name)
return zero
}
var zero reflect.Value
type missingValType struct{}
var missingVal = reflect.ValueOf(missingValType{})
var missingValReflectType = reflect.TypeFor[missingValType]()
func isMissing(v reflect.Value) bool {
return v.IsValid() && v.Type() == missingValReflectType
}
// at marks the state to be on node n, for error reporting.
func (s *state) at(node parse.Node) {
s.node = node
}
// doublePercent returns the string with %'s replaced by %%, if necessary,
// so it can be used safely inside a Printf format string.
func doublePercent(str string) string {
return strings.ReplaceAll(str, "%", "%%")
}
// TODO: It would be nice if ExecError was more broken down, but
// the way ErrorContext embeds the template name makes the
// processing too clumsy.
// ExecError is the custom error type returned when Execute has an
// error evaluating its template. (If a write error occurs, the actual
// error is returned; it will not be of type ExecError.)
type ExecError struct {
Name string // Name of template.
Err error // Pre-formatted error.
}
func (e ExecError) Error() string {
return e.Err.Error()
}
func (e ExecError) Unwrap() error {
return e.Err
}
// errorf records an ExecError and terminates processing.
func (s *state) errorf(format string, args ...any) {
name := doublePercent(s.tmpl.Name())
if s.node == nil {
format = fmt.Sprintf("template: %s: %s", name, format)
} else {
location, context := s.tmpl.ErrorContext(s.node)
format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
}
panic(ExecError{
Name: s.tmpl.Name(),
Err: fmt.Errorf(format, args...),
})
}
// writeError is the wrapper type used internally when Execute has an
// error writing to its output. We strip the wrapper in errRecover.
// Note that this is not an implementation of error, so it cannot escape
// from the package as an error value.
type writeError struct {
Err error // Original error.
}
func (s *state) writeError(err error) {
panic(writeError{
Err: err,
})
}
// errRecover is the handler that turns panics into returns from the top
// level of Parse.
func errRecover(errp *error) {
e := recover()
if e != nil {
switch err := e.(type) {
case runtime.Error:
panic(e)
case writeError:
*errp = err.Err // Strip the wrapper.
case ExecError:
*errp = err // Keep the wrapper.
default:
panic(e)
}
}
}
// ExecuteTemplate applies the template associated with t that has the given name
// to the specified data object and writes the output to wr.
// If an error occurs executing the template or writing its output,
// execution stops, but partial results may already have been written to
// the output writer.
// A template may be executed safely in parallel, although if parallel
// executions share a Writer the output may be interleaved.
func (t *Template) ExecuteTemplate(wr io.Writer, name string, data any) error {
tmpl := t.Lookup(name)
if tmpl == nil {
return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
}
return tmpl.Execute(wr, data)
}
// Execute applies a parsed template to the specified data object,
// and writes the output to wr.
// If an error occurs executing the template or writing its output,
// execution stops, but partial results may already have been written to
// the output writer.
// A template may be executed safely in parallel, although if parallel
// executions share a Writer the output may be interleaved.
//
// If data is a [reflect.Value], the template applies to the concrete
// value that the reflect.Value holds, as in [fmt.Print].
func (t *Template) Execute(wr io.Writer, data any) error {
return t.execute(wr, data)
}
func (t *Template) execute(wr io.Writer, data any) (err error) {
defer errRecover(&err)
value, ok := data.(reflect.Value)
if !ok {
value = reflect.ValueOf(data)
}
state := &state{
tmpl: t,
wr: wr,
vars: []variable{{"$", value}},
}
if t.Tree == nil || t.Root == nil {
state.errorf("%q is an incomplete or empty template", t.Name())
}
state.walk(value, t.Root)
return
}
// DefinedTemplates returns a string listing the defined templates,
// prefixed by the string "; defined templates are: ". If there are none,
// it returns the empty string. For generating an error message here
// and in [html/template].
func (t *Template) DefinedTemplates() string {
if t.common == nil {
return ""
}
var b strings.Builder
t.muTmpl.RLock()
defer t.muTmpl.RUnlock()
for name, tmpl := range t.tmpl {
if tmpl.Tree == nil || tmpl.Root == nil {
continue
}
if b.Len() == 0 {
b.WriteString("; defined templates are: ")
} else {
b.WriteString(", ")
}
fmt.Fprintf(&b, "%q", name)
}
return b.String()
}
// Sentinel errors for use with panic to signal early exits from range loops.
var (
walkBreak = errors.New("break")
walkContinue = errors.New("continue")
)
// Walk functions step through the major pieces of the template structure,
// generating output as they go.
func (s *state) walk(dot reflect.Value, node parse.Node) {
s.at(node)
switch node := node.(type) {
case *parse.ActionNode:
// Do not pop variables so they persist until next end.
// Also, if the action declares variables, don't print the result.
val := s.evalPipeline(dot, node.Pipe)
if len(node.Pipe.Decl) == 0 {
s.printValue(node, val)
}
case *parse.BreakNode:
panic(walkBreak)
case *parse.CommentNode:
case *parse.ContinueNode:
panic(walkContinue)
case *parse.IfNode:
s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
case *parse.ListNode:
for _, node := range node.Nodes {
s.walk(dot, node)
}
case *parse.RangeNode:
s.walkRange(dot, node)
case *parse.TemplateNode:
s.walkTemplate(dot, node)
case *parse.TextNode:
if _, err := s.wr.Write(node.Text); err != nil {
s.writeError(err)
}
case *parse.WithNode:
s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
default:
s.errorf("unknown node: %s", node)
}
}
// walkIfOrWith walks an 'if' or 'with' node. The two control structures
// are identical in behavior except that 'with' sets dot.
func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
defer s.pop(s.mark())
val := s.evalPipeline(dot, pipe)
truth, ok := isTrue(indirectInterface(val))
if !ok {
s.errorf("if/with can't use %v", val)
}
if truth {
if typ == parse.NodeWith {
s.walk(val, list)
} else {
s.walk(dot, list)
}
} else if elseList != nil {
s.walk(dot, elseList)
}
}
// IsTrue reports whether the value is 'true', in the sense of not the zero of its type,
// and whether the value has a meaningful truth value. This is the definition of
// truth used by if and other such actions.
func IsTrue(val any) (truth, ok bool) {
return isTrue(reflect.ValueOf(val))
}
func isTrue(val reflect.Value) (truth, ok bool) {
if !val.IsValid() {
// Something like var x interface{}, never set. It's a form of nil.
return false, true
}
switch val.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
truth = val.Len() > 0
case reflect.Bool:
truth = val.Bool()
case reflect.Complex64, reflect.Complex128:
truth = val.Complex() != 0
case reflect.Chan, reflect.Func, reflect.Pointer, reflect.UnsafePointer, reflect.Interface:
truth = !val.IsNil()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
truth = val.Int() != 0
case reflect.Float32, reflect.Float64:
truth = val.Float() != 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
truth = val.Uint() != 0
case reflect.Struct:
truth = true // Struct values are always true.
default:
return
}
return truth, true
}
func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
s.at(r)
defer func() {
if r := recover(); r != nil && r != walkBreak {
panic(r)
}
}()
defer s.pop(s.mark())
val, _ := indirect(s.evalPipeline(dot, r.Pipe))
// mark top of stack before any variables in the body are pushed.
mark := s.mark()
oneIteration := func(index, elem reflect.Value) {
if len(r.Pipe.Decl) > 0 {
if r.Pipe.IsAssign {
// With two variables, index comes first.
// With one, we use the element.
if len(r.Pipe.Decl) > 1 {
s.setVar(r.Pipe.Decl[0].Ident[0], index)
} else {
s.setVar(r.Pipe.Decl[0].Ident[0], elem)
}
} else {
// Set top var (lexically the second if there
// are two) to the element.
s.setTopVar(1, elem)
}
}
if len(r.Pipe.Decl) > 1 {
if r.Pipe.IsAssign {
s.setVar(r.Pipe.Decl[1].Ident[0], elem)
} else {
// Set next var (lexically the first if there
// are two) to the index.
s.setTopVar(2, index)
}
}
defer s.pop(mark)
defer func() {
// Consume panic(walkContinue)
if r := recover(); r != nil && r != walkContinue {
panic(r)
}
}()
s.walk(elem, r.List)
}
switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if len(r.Pipe.Decl) > 1 {
s.errorf("can't use %v to iterate over more than one variable", val)
break
}
run := false
for v := range val.Seq() {
run = true
// Pass element as second value, as we do for channels.
oneIteration(reflect.Value{}, v)
}
if !run {
break
}
return
case reflect.Array, reflect.Slice:
if val.Len() == 0 {
break
}
for i := 0; i < val.Len(); i++ {
oneIteration(reflect.ValueOf(i), val.Index(i))
}
return
case reflect.Map:
if val.Len() == 0 {
break
}
om := fmtsort.Sort(val)
for _, m := range om {
oneIteration(m.Key, m.Value)
}
return
case reflect.Chan:
if val.IsNil() {
break
}
if val.Type().ChanDir() == reflect.SendDir {
s.errorf("range over send-only channel %v", val)
break
}
i := 0
for ; ; i++ {
elem, ok := val.Recv()
if !ok {
break
}
oneIteration(reflect.ValueOf(i), elem)
}
if i == 0 {
break
}
return
case reflect.Invalid:
break // An invalid value is likely a nil map, etc. and acts like an empty map.
case reflect.Func:
if val.Type().CanSeq() {
if len(r.Pipe.Decl) > 1 {
s.errorf("can't use %v iterate over more than one variable", val)
break
}
run := false
for v := range val.Seq() {
run = true
// Pass element as second value,
// as we do for channels.
oneIteration(reflect.Value{}, v)
}
if !run {
break
}
return
}
if val.Type().CanSeq2() {
run := false
for i, v := range val.Seq2() {
run = true
if len(r.Pipe.Decl) > 1 {
oneIteration(i, v)
} else {
// If there is only one range variable,
// oneIteration will use the
// second value.
oneIteration(reflect.Value{}, i)
}
}
if !run {
break
}
return
}
fallthrough
default:
s.errorf("range can't iterate over %v", val)
}
if r.ElseList != nil {
s.walk(dot, r.ElseList)
}
}
func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
s.at(t)
tmpl := s.tmpl.Lookup(t.Name)
if tmpl == nil {
s.errorf("template %q not defined", t.Name)
}
if s.depth == maxExecDepth {
s.errorf("exceeded maximum template depth (%v)", maxExecDepth)
}
// Variables declared by the pipeline persist.
dot = s.evalPipeline(dot, t.Pipe)
newState := *s
newState.depth++
newState.tmpl = tmpl
// No dynamic scoping: template invocations inherit no variables.
newState.vars = []variable{{"$", dot}}
newState.walk(dot, tmpl.Root)
}
// Eval functions evaluate pipelines, commands, and their elements and extract
// values from the data structure by examining fields, calling methods, and so on.
// The printing of those values happens only through walk functions.
// evalPipeline returns the value acquired by evaluating a pipeline. If the
// pipeline has a variable declaration, the variable will be pushed on the
// stack. Callers should therefore pop the stack after they are finished
// executing commands depending on the pipeline value.
func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
if pipe == nil {
return
}
s.at(pipe)
value = missingVal
for _, cmd := range pipe.Cmds {
value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
// If the object has type interface{}, dig down one level to the thing inside.
if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
value = value.Elem()
}
}
for _, variable := range pipe.Decl {
if pipe.IsAssign {
s.setVar(variable.Ident[0], value)
} else {
s.push(variable.Ident[0], value)
}
}
return value
}
func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
if len(args) > 1 || !isMissing(final) {
s.errorf("can't give argument to non-function %s", args[0])
}
}
func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
firstWord := cmd.Args[0]
switch n := firstWord.(type) {
case *parse.FieldNode:
return s.evalFieldNode(dot, n, cmd.Args, final)
case *parse.ChainNode:
return s.evalChainNode(dot, n, cmd.Args, final)
case *parse.IdentifierNode:
// Must be a function.
return s.evalFunction(dot, n, cmd, cmd.Args, final)
case *parse.PipeNode:
// Parenthesized pipeline. The arguments are all inside the pipeline; final must be absent.
s.notAFunction(cmd.Args, final)
return s.evalPipeline(dot, n)
case *parse.VariableNode:
return s.evalVariableNode(dot, n, cmd.Args, final)
}
s.at(firstWord)
s.notAFunction(cmd.Args, final)
switch word := firstWord.(type) {
case *parse.BoolNode:
return reflect.ValueOf(word.True)
case *parse.DotNode:
return dot
case *parse.NilNode:
s.errorf("nil is not a command")
case *parse.NumberNode:
return s.idealConstant(word)
case *parse.StringNode:
return reflect.ValueOf(word.Text)
}
s.errorf("can't evaluate command %q", firstWord)
panic("not reached")
}
// idealConstant is called to return the value of a number in a context where
// we don't know the type. In that case, the syntax of the number tells us
// its type, and we use Go rules to resolve. Note there is no such thing as
// a uint ideal constant in this situation - the value must be of int type.
func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
// These are ideal constants but we don't know the type
// and we have no context. (If it was a method argument,
// we'd know what we need.) The syntax guides us to some extent.
s.at(constant)
switch {
case constant.IsComplex:
return reflect.ValueOf(constant.Complex128) // incontrovertible.
case constant.IsFloat &&
!isHexInt(constant.Text) && !isRuneInt(constant.Text) &&
strings.ContainsAny(constant.Text, ".eEpP"):
return reflect.ValueOf(constant.Float64)
case constant.IsInt:
n := int(constant.Int64)
if int64(n) != constant.Int64 {
s.errorf("%s overflows int", constant.Text)
}
return reflect.ValueOf(n)
case constant.IsUint:
s.errorf("%s overflows int", constant.Text)
}
return zero
}
func isRuneInt(s string) bool {
return len(s) > 0 && s[0] == '\''
}
func isHexInt(s string) bool {
return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') && !strings.ContainsAny(s, "pP")
}
func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
s.at(field)
return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
}
func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
s.at(chain)
if len(chain.Field) == 0 {
s.errorf("internal error: no fields in evalChainNode")
}
if chain.Node.Type() == parse.NodeNil {
s.errorf("indirection through explicit nil in %s", chain)
}
// (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
pipe := s.evalArg(dot, nil, chain.Node)
return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
}
func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
// $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
s.at(variable)
value := s.varValue(variable.Ident[0])
if len(variable.Ident) == 1 {
s.notAFunction(args, final)
return value
}
return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
}
// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
// dot is the environment in which to evaluate arguments, while
// receiver is the value being walked along the chain.
func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
n := len(ident)
for i := 0; i < n-1; i++ {
receiver = s.evalField(dot, ident[i], node, nil, missingVal, receiver)
}
// Now if it's a method, it gets the arguments.
return s.evalField(dot, ident[n-1], node, args, final, receiver)
}
func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
s.at(node)
name := node.Ident
function, isBuiltin, ok := findFunction(name, s.tmpl)
if !ok {
s.errorf("%q is not a defined function", name)
}
return s.evalCall(dot, function, isBuiltin, cmd, name, args, final)
}
// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
// The 'final' argument represents the return value from the preceding
// value of the pipeline, if any.
func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
if !receiver.IsValid() {
if s.tmpl.option.missingKey == mapError { // Treat invalid value as missing map key.
s.errorf("nil data; no entry for key %q", fieldName)
}
return zero
}
typ := receiver.Type()
receiver, isNil := indirect(receiver)
if receiver.Kind() == reflect.Interface && isNil {
// Calling a method on a nil interface can't work. The
// MethodByName method call below would panic.
s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
return zero
}
// Unless it's an interface, need to get to a value of type *T to guarantee
// we see all methods of T and *T.
ptr := receiver
if ptr.Kind() != reflect.Interface && ptr.Kind() != reflect.Pointer && ptr.CanAddr() {
ptr = ptr.Addr()
}
if method := ptr.MethodByName(fieldName); method.IsValid() {
return s.evalCall(dot, method, false, node, fieldName, args, final)
}
hasArgs := len(args) > 1 || !isMissing(final)
// It's not a method; must be a field of a struct or an element of a map.
switch receiver.Kind() {
case reflect.Struct:
tField, ok := receiver.Type().FieldByName(fieldName)
if ok {
field, err := receiver.FieldByIndexErr(tField.Index)
if !tField.IsExported() {
s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
}
if err != nil {
s.errorf("%v", err)
}
// If it's a function, we must call it.
if hasArgs {
s.errorf("%s has arguments but cannot be invoked as function", fieldName)
}
return field
}
case reflect.Map:
// If it's a map, attempt to use the field name as a key.
nameVal := reflect.ValueOf(fieldName)
if nameVal.Type().AssignableTo(receiver.Type().Key()) {
if hasArgs {
s.errorf("%s is not a method but has arguments", fieldName)
}
result := receiver.MapIndex(nameVal)
if !result.IsValid() {
switch s.tmpl.option.missingKey {
case mapInvalid:
// Just use the invalid value.
case mapZeroValue:
result = reflect.Zero(receiver.Type().Elem())
case mapError:
s.errorf("map has no entry for key %q", fieldName)
}
}
return result
}
case reflect.Pointer:
etyp := receiver.Type().Elem()
if etyp.Kind() == reflect.Struct {
if _, ok := etyp.FieldByName(fieldName); !ok {
// If there's no such field, say "can't evaluate"
// instead of "nil pointer evaluating".
break
}
}
if isNil {
s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
}
}
s.errorf("can't evaluate field %s in type %s", fieldName, typ)
panic("not reached")
}
var (
errorType = reflect.TypeFor[error]()
fmtStringerType = reflect.TypeFor[fmt.Stringer]()
reflectValueType = reflect.TypeFor[reflect.Value]()
)
// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
// as the function itself.
func (s *state) evalCall(dot, fun reflect.Value, isBuiltin bool, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
if args != nil {
args = args[1:] // Zeroth arg is function name/node; not passed to function.
}
typ := fun.Type()
numIn := len(args)
if !isMissing(final) {
numIn++
}
numFixed := len(args)
if typ.IsVariadic() {
numFixed = typ.NumIn() - 1 // last arg is the variadic one.
if numIn < numFixed {
s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
}
} else if numIn != typ.NumIn() {
s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), numIn)
}
if err := goodFunc(name, typ); err != nil {
s.errorf("%v", err)
}
unwrap := func(v reflect.Value) reflect.Value {
if v.Type() == reflectValueType {
v = v.Interface().(reflect.Value)
}
return v
}
// Special case for builtin and/or, which short-circuit.
if isBuiltin && (name == "and" || name == "or") {
argType := typ.In(0)
var v reflect.Value
for _, arg := range args {
v = s.evalArg(dot, argType, arg).Interface().(reflect.Value)
if truth(v) == (name == "or") {
// This value was already unwrapped
// by the .Interface().(reflect.Value).
return v
}
}
if !final.Equal(missingVal) {
// The last argument to and/or is coming from
// the pipeline. We didn't short circuit on an earlier
// argument, so we are going to return this one.
// We don't have to evaluate final, but we do
// have to check its type. Then, since we are
// going to return it, we have to unwrap it.
v = unwrap(s.validateType(final, argType))
}
return v
}
// Build the arg list.
argv := make([]reflect.Value, numIn)
// Args must be evaluated. Fixed args first.
i := 0
for ; i < numFixed && i < len(args); i++ {
argv[i] = s.evalArg(dot, typ.In(i), args[i])
}
// Now the ... args.
if typ.IsVariadic() {
argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
for ; i < len(args); i++ {
argv[i] = s.evalArg(dot, argType, args[i])
}
}
// Add final value if necessary.
if !isMissing(final) {
t := typ.In(typ.NumIn() - 1)
if typ.IsVariadic() {
if numIn-1 < numFixed {
// The added final argument corresponds to a fixed parameter of the function.
// Validate against the type of the actual parameter.
t = typ.In(numIn - 1)
} else {
// The added final argument corresponds to the variadic part.
// Validate against the type of the elements of the variadic slice.
t = t.Elem()
}
}
argv[i] = s.validateType(final, t)
}
// Special case for the "call" builtin.
// Insert the name of the callee function as the first argument.
if isBuiltin && name == "call" {
var calleeName string
if len(args) == 0 {
// final must be present or we would have errored out above.
calleeName = final.String()
} else {
calleeName = args[0].String()
}
argv = append([]reflect.Value{reflect.ValueOf(calleeName)}, argv...)
fun = reflect.ValueOf(call)
}
v, err := safeCall(fun, argv)
// If we have an error that is not nil, stop execution and return that
// error to the caller.
if err != nil {
s.at(node)
s.errorf("error calling %s: %w", name, err)
}
return unwrap(v)
}
// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
func canBeNil(typ reflect.Type) bool {
switch typ.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Pointer, reflect.Slice:
return true
case reflect.Struct:
return typ == reflectValueType
}
return false
}
// validateType guarantees that the value is valid and assignable to the type.
func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
if !value.IsValid() {
if typ == nil {
// An untyped nil interface{}. Accept as a proper nil value.
return reflect.ValueOf(nil)
}
if canBeNil(typ) {
// Like above, but use the zero value of the non-nil type.
return reflect.Zero(typ)
}
s.errorf("invalid value; expected %s", typ)
}
if typ == reflectValueType && value.Type() != typ {
return reflect.ValueOf(value)
}
if typ != nil && !value.Type().AssignableTo(typ) {
if value.Kind() == reflect.Interface && !value.IsNil() {
value = value.Elem()
if value.Type().AssignableTo(typ) {
return value
}
// fallthrough
}
// Does one dereference or indirection work? We could do more, as we
// do with method receivers, but that gets messy and method receivers
// are much more constrained, so it makes more sense there than here.
// Besides, one is almost always all you need.
switch {
case value.Kind() == reflect.Pointer && value.Type().Elem().AssignableTo(typ):
value = value.Elem()
if !value.IsValid() {
s.errorf("dereference of nil pointer of type %s", typ)
}
case reflect.PointerTo(value.Type()).AssignableTo(typ) && value.CanAddr():
value = value.Addr()
default:
s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
}
}
return value
}
func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
s.at(n)
switch arg := n.(type) {
case *parse.DotNode:
return s.validateType(dot, typ)
case *parse.NilNode:
if canBeNil(typ) {
return reflect.Zero(typ)
}
s.errorf("cannot assign nil to %s", typ)
case *parse.FieldNode:
return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, missingVal), typ)
case *parse.VariableNode:
return s.validateType(s.evalVariableNode(dot, arg, nil, missingVal), typ)
case *parse.PipeNode:
return s.validateType(s.evalPipeline(dot, arg), typ)
case *parse.IdentifierNode:
return s.validateType(s.evalFunction(dot, arg, arg, nil, missingVal), typ)
case *parse.ChainNode:
return s.validateType(s.evalChainNode(dot, arg, nil, missingVal), typ)
}
switch typ.Kind() {
case reflect.Bool:
return s.evalBool(typ, n)
case reflect.Complex64, reflect.Complex128:
return s.evalComplex(typ, n)
case reflect.Float32, reflect.Float64:
return s.evalFloat(typ, n)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return s.evalInteger(typ, n)
case reflect.Interface:
if typ.NumMethod() == 0 {
return s.evalEmptyInterface(dot, n)
}
case reflect.Struct:
if typ == reflectValueType {
return reflect.ValueOf(s.evalEmptyInterface(dot, n))
}
case reflect.String:
return s.evalString(typ, n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return s.evalUnsignedInteger(typ, n)
}
s.errorf("can't handle %s for arg of type %s", n, typ)
panic("not reached")
}
func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
s.at(n)
if n, ok := n.(*parse.BoolNode); ok {
value := reflect.New(typ).Elem()
value.SetBool(n.True)
return value
}
s.errorf("expected bool; found %s", n)
panic("not reached")
}
func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
s.at(n)
if n, ok := n.(*parse.StringNode); ok {
value := reflect.New(typ).Elem()
value.SetString(n.Text)
return value
}
s.errorf("expected string; found %s", n)
panic("not reached")
}
func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
s.at(n)
if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
value := reflect.New(typ).Elem()
value.SetInt(n.Int64)
return value
}
s.errorf("expected integer; found %s", n)
panic("not reached")
}
func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
s.at(n)
if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
value := reflect.New(typ).Elem()
value.SetUint(n.Uint64)
return value
}
s.errorf("expected unsigned integer; found %s", n)
panic("not reached")
}
func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
s.at(n)
if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
value := reflect.New(typ).Elem()
value.SetFloat(n.Float64)
return value
}
s.errorf("expected float; found %s", n)
panic("not reached")
}
func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
value := reflect.New(typ).Elem()
value.SetComplex(n.Complex128)
return value
}
s.errorf("expected complex; found %s", n)
panic("not reached")
}
func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
s.at(n)
switch n := n.(type) {
case *parse.BoolNode:
return reflect.ValueOf(n.True)
case *parse.DotNode:
return dot
case *parse.FieldNode:
return s.evalFieldNode(dot, n, nil, missingVal)
case *parse.IdentifierNode:
return s.evalFunction(dot, n, n, nil, missingVal)
case *parse.NilNode:
// NilNode is handled in evalArg, the only place that calls here.
s.errorf("evalEmptyInterface: nil (can't happen)")
case *parse.NumberNode:
return s.idealConstant(n)
case *parse.StringNode:
return reflect.ValueOf(n.Text)
case *parse.VariableNode:
return s.evalVariableNode(dot, n, nil, missingVal)
case *parse.PipeNode:
return s.evalPipeline(dot, n)
}
s.errorf("can't handle assignment of %s to empty interface argument", n)
panic("not reached")
}
// indirect returns the item at the end of indirection, and a bool to indicate
// if it's nil. If the returned bool is true, the returned value's kind will be
// either a pointer or interface.
func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
for ; v.Kind() == reflect.Pointer || v.Kind() == reflect.Interface; v = v.Elem() {
if v.IsNil() {
return v, true
}
}
return v, false
}
// indirectInterface returns the concrete value in an interface value,
// or else the zero reflect.Value.
// That is, if v represents the interface value x, the result is the same as reflect.ValueOf(x):
// the fact that x was an interface value is forgotten.
func indirectInterface(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Interface {
return v
}
if v.IsNil() {
return reflect.Value{}
}
return v.Elem()
}
// printValue writes the textual representation of the value to the output of
// the template.
func (s *state) printValue(n parse.Node, v reflect.Value) {
s.at(n)
iface, ok := printableValue(v)
if !ok {
s.errorf("can't print %s of type %s", n, v.Type())
}
_, err := fmt.Fprint(s.wr, iface)
if err != nil {
s.writeError(err)
}
}
// printableValue returns the, possibly indirected, interface value inside v that
// is best for a call to formatted printer.
func printableValue(v reflect.Value) (any, bool) {
if v.Kind() == reflect.Pointer {
v, _ = indirect(v) // fmt.Fprint handles nil.
}
if !v.IsValid() {
return "<no value>", true
}
if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
if v.CanAddr() && (reflect.PointerTo(v.Type()).Implements(errorType) || reflect.PointerTo(v.Type()).Implements(fmtStringerType)) {
v = v.Addr()
} else {
switch v.Kind() {
case reflect.Chan, reflect.Func:
return nil, false
}
}
}
return v.Interface(), true
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"errors"
"fmt"
"io"
"net/url"
"reflect"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
// FuncMap is the type of the map defining the mapping from names to functions.
// Each function must have either a single return value, or two return values of
// which the second has type error. In that case, if the second (error)
// return value evaluates to non-nil during execution, execution terminates and
// Execute returns that error.
//
// Errors returned by Execute wrap the underlying error; call [errors.AsType] to
// unwrap them.
//
// When template execution invokes a function with an argument list, that list
// must be assignable to the function's parameter types. Functions meant to
// apply to arguments of arbitrary type can use parameters of type interface{} or
// of type [reflect.Value]. Similarly, functions meant to return a result of arbitrary
// type can return interface{} or [reflect.Value].
type FuncMap map[string]any
// builtins returns the FuncMap.
// It is not a global variable so the linker can dead code eliminate
// more when this isn't called. See golang.org/issue/36021.
// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
func builtins() FuncMap {
return FuncMap{
"and": and,
"call": emptyCall,
"html": HTMLEscaper,
"index": index,
"slice": slice,
"js": JSEscaper,
"len": length,
"not": not,
"or": or,
"print": fmt.Sprint,
"printf": fmt.Sprintf,
"println": fmt.Sprintln,
"urlquery": URLQueryEscaper,
// Comparisons
"eq": eq, // ==
"ge": ge, // >=
"gt": gt, // >
"le": le, // <=
"lt": lt, // <
"ne": ne, // !=
}
}
// builtinFuncs lazily computes & caches the builtinFuncs map.
var builtinFuncs = sync.OnceValue(func() map[string]reflect.Value {
funcMap := builtins()
m := make(map[string]reflect.Value, len(funcMap))
addValueFuncs(m, funcMap)
return m
})
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
for name, fn := range in {
if !goodName(name) {
panic(fmt.Errorf("function name %q is not a valid identifier", name))
}
v := reflect.ValueOf(fn)
if v.Kind() != reflect.Func {
panic("value for " + name + " not a function")
}
if err := goodFunc(name, v.Type()); err != nil {
panic(err)
}
out[name] = v
}
}
// addFuncs adds to values the functions in funcs. It does no checking of the input -
// call addValueFuncs first.
func addFuncs(out, in FuncMap) {
for name, fn := range in {
out[name] = fn
}
}
// goodFunc reports whether the function or method has the right result signature.
func goodFunc(name string, typ reflect.Type) error {
// We allow functions with 1 result or 2 results where the second is an error.
switch numOut := typ.NumOut(); {
case numOut == 1:
return nil
case numOut == 2 && typ.Out(1) == errorType:
return nil
case numOut == 2:
return fmt.Errorf("invalid function signature for %s: second return value should be error; is %s", name, typ.Out(1))
default:
return fmt.Errorf("function %s has %d return values; should be 1 or 2", name, typ.NumOut())
}
}
// goodName reports whether the function name is a valid identifier.
func goodName(name string) bool {
if name == "" {
return false
}
for i, r := range name {
switch {
case r == '_':
case i == 0 && !unicode.IsLetter(r):
return false
case !unicode.IsLetter(r) && !unicode.IsDigit(r):
return false
}
}
return true
}
// findFunction looks for a function in the template, and global map.
func findFunction(name string, tmpl *Template) (v reflect.Value, isBuiltin, ok bool) {
if tmpl != nil && tmpl.common != nil {
tmpl.muFuncs.RLock()
defer tmpl.muFuncs.RUnlock()
if fn := tmpl.execFuncs[name]; fn.IsValid() {
return fn, false, true
}
}
if fn := builtinFuncs()[name]; fn.IsValid() {
return fn, true, true
}
return reflect.Value{}, false, false
}
// prepareArg checks if value can be used as an argument of type argType, and
// converts an invalid value to appropriate zero if possible.
func prepareArg(value reflect.Value, argType reflect.Type) (reflect.Value, error) {
if !value.IsValid() {
if !canBeNil(argType) {
return reflect.Value{}, fmt.Errorf("value is nil; should be of type %s", argType)
}
value = reflect.Zero(argType)
}
if value.Type().AssignableTo(argType) {
return value, nil
}
if intLike(value.Kind()) && intLike(argType.Kind()) && value.Type().ConvertibleTo(argType) {
value = value.Convert(argType)
return value, nil
}
return reflect.Value{}, fmt.Errorf("value has type %s; should be %s", value.Type(), argType)
}
func intLike(typ reflect.Kind) bool {
switch typ {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return true
}
return false
}
// indexArg checks if a reflect.Value can be used as an index, and converts it to int if possible.
func indexArg(index reflect.Value, cap int) (int, error) {
var x int64
switch index.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
x = index.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
x = int64(index.Uint())
case reflect.Invalid:
return 0, fmt.Errorf("cannot index slice/array with nil")
default:
return 0, fmt.Errorf("cannot index slice/array with type %s", index.Type())
}
if x < 0 || int(x) < 0 || int(x) > cap {
return 0, fmt.Errorf("index out of range: %d", x)
}
return int(x), nil
}
// Indexing.
// index returns the result of indexing its first argument by the following
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
// indexed item must be a map, slice, or array.
func index(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
item = indirectInterface(item)
if !item.IsValid() {
return reflect.Value{}, fmt.Errorf("index of untyped nil")
}
for _, index := range indexes {
index = indirectInterface(index)
var isNil bool
if item, isNil = indirect(item); isNil {
return reflect.Value{}, fmt.Errorf("index of nil pointer")
}
switch item.Kind() {
case reflect.Array, reflect.Slice, reflect.String:
x, err := indexArg(index, item.Len())
if err != nil {
return reflect.Value{}, err
}
item = item.Index(x)
case reflect.Map:
index, err := prepareArg(index, item.Type().Key())
if err != nil {
return reflect.Value{}, err
}
if x := item.MapIndex(index); x.IsValid() {
item = x
} else {
item = reflect.Zero(item.Type().Elem())
}
case reflect.Invalid:
// the loop holds invariant: item.IsValid()
panic("unreachable")
default:
return reflect.Value{}, fmt.Errorf("can't index item of type %s", item.Type())
}
}
return item, nil
}
// Slicing.
// slice returns the result of slicing its first argument by the remaining
// arguments. Thus "slice x 1 2" is, in Go syntax, x[1:2], while "slice x"
// is x[:], "slice x 1" is x[1:], and "slice x 1 2 3" is x[1:2:3]. The first
// argument must be a string, slice, or array.
func slice(item reflect.Value, indexes ...reflect.Value) (reflect.Value, error) {
item = indirectInterface(item)
if !item.IsValid() {
return reflect.Value{}, fmt.Errorf("slice of untyped nil")
}
var isNil bool
if item, isNil = indirect(item); isNil {
return reflect.Value{}, fmt.Errorf("slice of nil pointer")
}
if len(indexes) > 3 {
return reflect.Value{}, fmt.Errorf("too many slice indexes: %d", len(indexes))
}
var cap int
switch item.Kind() {
case reflect.String:
if len(indexes) == 3 {
return reflect.Value{}, fmt.Errorf("cannot 3-index slice a string")
}
cap = item.Len()
case reflect.Array, reflect.Slice:
cap = item.Cap()
default:
return reflect.Value{}, fmt.Errorf("can't slice item of type %s", item.Type())
}
// set default values for cases item[:], item[i:].
idx := [3]int{0, item.Len()}
for i, index := range indexes {
x, err := indexArg(index, cap)
if err != nil {
return reflect.Value{}, err
}
idx[i] = x
}
// given item[i:j], make sure i <= j.
if idx[0] > idx[1] {
return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[0], idx[1])
}
if len(indexes) < 3 {
return item.Slice(idx[0], idx[1]), nil
}
// given item[i:j:k], make sure i <= j <= k.
if idx[1] > idx[2] {
return reflect.Value{}, fmt.Errorf("invalid slice index: %d > %d", idx[1], idx[2])
}
return item.Slice3(idx[0], idx[1], idx[2]), nil
}
// Length
// length returns the length of the item, with an error if it has no defined length.
func length(item reflect.Value) (int, error) {
item, isNil := indirect(item)
if isNil {
return 0, fmt.Errorf("len of nil pointer")
}
switch item.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
return item.Len(), nil
}
return 0, fmt.Errorf("len of type %s", item.Type())
}
// Function invocation
func emptyCall(fn reflect.Value, args ...reflect.Value) reflect.Value {
panic("unreachable") // implemented as a special case in evalCall
}
// call returns the result of evaluating the first argument as a function.
// The function must return 1 result, or 2 results, the second of which is an error.
func call(name string, fn reflect.Value, args ...reflect.Value) (reflect.Value, error) {
fn = indirectInterface(fn)
if !fn.IsValid() {
return reflect.Value{}, fmt.Errorf("call of nil")
}
typ := fn.Type()
if typ.Kind() != reflect.Func {
return reflect.Value{}, fmt.Errorf("non-function %s of type %s", name, typ)
}
if err := goodFunc(name, typ); err != nil {
return reflect.Value{}, err
}
numIn := typ.NumIn()
var dddType reflect.Type
if typ.IsVariadic() {
if len(args) < numIn-1 {
return reflect.Value{}, fmt.Errorf("wrong number of args for %s: got %d want at least %d", name, len(args), numIn-1)
}
dddType = typ.In(numIn - 1).Elem()
} else {
if len(args) != numIn {
return reflect.Value{}, fmt.Errorf("wrong number of args for %s: got %d want %d", name, len(args), numIn)
}
}
argv := make([]reflect.Value, len(args))
for i, arg := range args {
arg = indirectInterface(arg)
// Compute the expected type. Clumsy because of variadics.
argType := dddType
if !typ.IsVariadic() || i < numIn-1 {
argType = typ.In(i)
}
var err error
if argv[i], err = prepareArg(arg, argType); err != nil {
return reflect.Value{}, fmt.Errorf("arg %d: %w", i, err)
}
}
return safeCall(fn, argv)
}
// safeCall runs fun.Call(args), and returns the resulting value and error, if
// any. If the call panics, the panic value is returned as an error.
func safeCall(fun reflect.Value, args []reflect.Value) (val reflect.Value, err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(error); ok {
err = e
} else {
err = fmt.Errorf("%v", r)
}
}
}()
ret := fun.Call(args)
if len(ret) == 2 && !ret[1].IsNil() {
return ret[0], ret[1].Interface().(error)
}
return ret[0], nil
}
// Boolean logic.
func truth(arg reflect.Value) bool {
t, _ := isTrue(indirectInterface(arg))
return t
}
// and computes the Boolean AND of its arguments, returning
// the first false argument it encounters, or the last argument.
func and(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
panic("unreachable") // implemented as a special case in evalCall
}
// or computes the Boolean OR of its arguments, returning
// the first true argument it encounters, or the last argument.
func or(arg0 reflect.Value, args ...reflect.Value) reflect.Value {
panic("unreachable") // implemented as a special case in evalCall
}
// not returns the Boolean negation of its argument.
func not(arg reflect.Value) bool {
return !truth(arg)
}
// Comparison.
// TODO: Perhaps allow comparison between signed and unsigned integers.
var (
errBadComparisonType = errors.New("invalid type for comparison")
errNoComparison = errors.New("missing argument for comparison")
)
type kind int
const (
invalidKind kind = iota
boolKind
complexKind
intKind
floatKind
stringKind
uintKind
)
func basicKind(v reflect.Value) (kind, error) {
switch v.Kind() {
case reflect.Bool:
return boolKind, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return intKind, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return uintKind, nil
case reflect.Float32, reflect.Float64:
return floatKind, nil
case reflect.Complex64, reflect.Complex128:
return complexKind, nil
case reflect.String:
return stringKind, nil
}
return invalidKind, errBadComparisonType
}
// isNil returns true if v is the zero reflect.Value, or nil of its type.
func isNil(v reflect.Value) bool {
if !v.IsValid() {
return true
}
switch v.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Pointer, reflect.Slice:
return v.IsNil()
}
return false
}
// canCompare reports whether v1 and v2 are both the same kind, or one is nil.
// Called only when dealing with nillable types, or there's about to be an error.
func canCompare(v1, v2 reflect.Value) bool {
k1 := v1.Kind()
k2 := v2.Kind()
if k1 == k2 {
return true
}
// We know the type can be compared to nil.
return k1 == reflect.Invalid || k2 == reflect.Invalid
}
// eq evaluates the comparison a == b || a == c || ...
func eq(arg1 reflect.Value, arg2 ...reflect.Value) (bool, error) {
arg1 = indirectInterface(arg1)
if len(arg2) == 0 {
return false, errNoComparison
}
k1, _ := basicKind(arg1)
for _, arg := range arg2 {
arg = indirectInterface(arg)
k2, _ := basicKind(arg)
truth := false
if k1 != k2 {
// Special case: Can compare integer values regardless of type's sign.
switch {
case k1 == intKind && k2 == uintKind:
truth = arg1.Int() >= 0 && uint64(arg1.Int()) == arg.Uint()
case k1 == uintKind && k2 == intKind:
truth = arg.Int() >= 0 && arg1.Uint() == uint64(arg.Int())
default:
if arg1.IsValid() && arg.IsValid() {
return false, fmt.Errorf("incompatible types for comparison: %v and %v", arg1.Type(), arg.Type())
}
}
} else {
switch k1 {
case boolKind:
truth = arg1.Bool() == arg.Bool()
case complexKind:
truth = arg1.Complex() == arg.Complex()
case floatKind:
truth = arg1.Float() == arg.Float()
case intKind:
truth = arg1.Int() == arg.Int()
case stringKind:
truth = arg1.String() == arg.String()
case uintKind:
truth = arg1.Uint() == arg.Uint()
default:
if !canCompare(arg1, arg) {
return false, fmt.Errorf("non-comparable types %s: %v, %s: %v", arg1, arg1.Type(), arg.Type(), arg)
}
if isNil(arg1) || isNil(arg) {
truth = isNil(arg) == isNil(arg1)
} else {
if !arg.Type().Comparable() {
return false, fmt.Errorf("non-comparable type %s: %v", arg, arg.Type())
}
truth = arg1.Interface() == arg.Interface()
}
}
}
if truth {
return true, nil
}
}
return false, nil
}
// ne evaluates the comparison a != b.
func ne(arg1, arg2 reflect.Value) (bool, error) {
// != is the inverse of ==.
equal, err := eq(arg1, arg2)
return !equal, err
}
// lt evaluates the comparison a < b.
func lt(arg1, arg2 reflect.Value) (bool, error) {
arg1 = indirectInterface(arg1)
k1, err := basicKind(arg1)
if err != nil {
return false, err
}
arg2 = indirectInterface(arg2)
k2, err := basicKind(arg2)
if err != nil {
return false, err
}
truth := false
if k1 != k2 {
// Special case: Can compare integer values regardless of type's sign.
switch {
case k1 == intKind && k2 == uintKind:
truth = arg1.Int() < 0 || uint64(arg1.Int()) < arg2.Uint()
case k1 == uintKind && k2 == intKind:
truth = arg2.Int() >= 0 && arg1.Uint() < uint64(arg2.Int())
default:
return false, fmt.Errorf("incompatible types for comparison: %v and %v", arg1.Type(), arg2.Type())
}
} else {
switch k1 {
case boolKind, complexKind:
return false, errBadComparisonType
case floatKind:
truth = arg1.Float() < arg2.Float()
case intKind:
truth = arg1.Int() < arg2.Int()
case stringKind:
truth = arg1.String() < arg2.String()
case uintKind:
truth = arg1.Uint() < arg2.Uint()
default:
panic("invalid kind")
}
}
return truth, nil
}
// le evaluates the comparison <= b.
func le(arg1, arg2 reflect.Value) (bool, error) {
// <= is < or ==.
lessThan, err := lt(arg1, arg2)
if lessThan || err != nil {
return lessThan, err
}
return eq(arg1, arg2)
}
// gt evaluates the comparison a > b.
func gt(arg1, arg2 reflect.Value) (bool, error) {
// > is the inverse of <=.
lessOrEqual, err := le(arg1, arg2)
if err != nil {
return false, err
}
return !lessOrEqual, nil
}
// ge evaluates the comparison a >= b.
func ge(arg1, arg2 reflect.Value) (bool, error) {
// >= is the inverse of <.
lessThan, err := lt(arg1, arg2)
if err != nil {
return false, err
}
return !lessThan, nil
}
// HTML escaping.
var (
htmlQuot = []byte(""") // shorter than """
htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
htmlAmp = []byte("&")
htmlLt = []byte("<")
htmlGt = []byte(">")
htmlNull = []byte("\uFFFD")
)
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
func HTMLEscape(w io.Writer, b []byte) {
last := 0
for i, c := range b {
var html []byte
switch c {
case '\000':
html = htmlNull
case '"':
html = htmlQuot
case '\'':
html = htmlApos
case '&':
html = htmlAmp
case '<':
html = htmlLt
case '>':
html = htmlGt
default:
continue
}
w.Write(b[last:i])
w.Write(html)
last = i + 1
}
w.Write(b[last:])
}
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
func HTMLEscapeString(s string) string {
// Avoid allocation if we can.
if !strings.ContainsAny(s, "'\"&<>\000") {
return s
}
var b strings.Builder
HTMLEscape(&b, []byte(s))
return b.String()
}
// HTMLEscaper returns the escaped HTML equivalent of the textual
// representation of its arguments.
func HTMLEscaper(args ...any) string {
return HTMLEscapeString(evalArgs(args))
}
// JavaScript escaping.
var (
jsLowUni = []byte(`\u00`)
hex = []byte("0123456789ABCDEF")
jsBackslash = []byte(`\\`)
jsApos = []byte(`\'`)
jsQuot = []byte(`\"`)
jsLt = []byte(`\u003C`)
jsGt = []byte(`\u003E`)
jsAmp = []byte(`\u0026`)
jsEq = []byte(`\u003D`)
)
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
func JSEscape(w io.Writer, b []byte) {
last := 0
for i := 0; i < len(b); i++ {
c := b[i]
if !jsIsSpecial(rune(c)) {
// fast path: nothing to do
continue
}
w.Write(b[last:i])
if c < utf8.RuneSelf {
// Quotes, slashes and angle brackets get quoted.
// Control characters get written as \u00XX.
switch c {
case '\\':
w.Write(jsBackslash)
case '\'':
w.Write(jsApos)
case '"':
w.Write(jsQuot)
case '<':
w.Write(jsLt)
case '>':
w.Write(jsGt)
case '&':
w.Write(jsAmp)
case '=':
w.Write(jsEq)
default:
w.Write(jsLowUni)
t, b := c>>4, c&0x0f
w.Write(hex[t : t+1])
w.Write(hex[b : b+1])
}
} else {
// Unicode rune.
r, size := utf8.DecodeRune(b[i:])
if unicode.IsPrint(r) {
w.Write(b[i : i+size])
} else {
fmt.Fprintf(w, "\\u%04X", r)
}
i += size - 1
}
last = i + 1
}
w.Write(b[last:])
}
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
func JSEscapeString(s string) string {
// Avoid allocation if we can.
if strings.IndexFunc(s, jsIsSpecial) < 0 {
return s
}
var b strings.Builder
JSEscape(&b, []byte(s))
return b.String()
}
func jsIsSpecial(r rune) bool {
switch r {
case '\\', '\'', '"', '<', '>', '&', '=':
return true
}
return r < ' ' || utf8.RuneSelf <= r
}
// JSEscaper returns the escaped JavaScript equivalent of the textual
// representation of its arguments.
func JSEscaper(args ...any) string {
return JSEscapeString(evalArgs(args))
}
// URLQueryEscaper returns the escaped value of the textual representation of
// its arguments in a form suitable for embedding in a URL query.
func URLQueryEscaper(args ...any) string {
return url.QueryEscape(evalArgs(args))
}
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
//
// fmt.Sprint(args...)
//
// except that each argument is indirected (if a pointer), as required,
// using the same rules as the default string evaluation during template
// execution.
func evalArgs(args []any) string {
ok := false
var s string
// Fast path for simple common case.
if len(args) == 1 {
s, ok = args[0].(string)
}
if !ok {
for i, arg := range args {
a, ok := printableValue(reflect.ValueOf(arg))
if ok {
args[i] = a
} // else let fmt do its thing
}
s = fmt.Sprint(args...)
}
return s
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Helper functions to make constructing templates easier.
package template
import (
"fmt"
"io/fs"
"os"
"path"
"path/filepath"
)
// Functions and methods to parse templates.
// Must is a helper that wraps a call to a function returning ([*Template], error)
// and panics if the error is non-nil. It is intended for use in variable
// initializations such as
//
// var t = template.Must(template.New("name").Parse("text"))
func Must(t *Template, err error) *Template {
if err != nil {
panic(err)
}
return t
}
// ParseFiles creates a new [Template] and parses the template definitions from
// the named files. The returned template's name will have the base name and
// parsed contents of the first file. There must be at least one file.
// If an error occurs, parsing stops and the returned *Template is nil.
//
// When parsing multiple files with the same name in different directories,
// the last one mentioned will be the one that results.
// For instance, ParseFiles("a/foo", "b/foo") stores "b/foo" as the template
// named "foo", while "a/foo" is unavailable.
func ParseFiles(filenames ...string) (*Template, error) {
return parseFiles(nil, readFileOS, filenames...)
}
// ParseFiles parses the named files and associates the resulting templates with
// t. If an error occurs, parsing stops and the returned template is nil;
// otherwise it is t. There must be at least one file.
// Since the templates created by ParseFiles are named by the base
// (see [filepath.Base]) names of the argument files, t should usually have the
// name of one of the (base) names of the files. If it does not, depending on
// t's contents before calling ParseFiles, t.Execute may fail. In that
// case use t.ExecuteTemplate to execute a valid template.
//
// When parsing multiple files with the same name in different directories,
// the last one mentioned will be the one that results.
func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
t.init()
return parseFiles(t, readFileOS, filenames...)
}
// parseFiles is the helper for the method and function. If the argument
// template is nil, it is created from the first file.
func parseFiles(t *Template, readFile func(string) (string, []byte, error), filenames ...string) (*Template, error) {
if len(filenames) == 0 {
// Not really a problem, but be consistent.
return nil, fmt.Errorf("template: no files named in call to ParseFiles")
}
for _, filename := range filenames {
name, b, err := readFile(filename)
if err != nil {
return nil, err
}
s := string(b)
// First template becomes return value if not already defined,
// and we use that one for subsequent New calls to associate
// all the templates together. Also, if this file has the same name
// as t, this file becomes the contents of t, so
// t, err := New(name).Funcs(xxx).ParseFiles(name)
// works. Otherwise we create a new template associated with t.
var tmpl *Template
if t == nil {
t = New(name)
}
if name == t.Name() {
tmpl = t
} else {
tmpl = t.New(name)
}
_, err = tmpl.Parse(s)
if err != nil {
return nil, err
}
}
return t, nil
}
// ParseGlob creates a new [Template] and parses the template definitions from
// the files identified by the pattern. The files are matched according to the
// semantics of [filepath.Match], and the pattern must match at least one file.
// The returned template will have the [filepath.Base] name and (parsed)
// contents of the first file matched by the pattern. ParseGlob is equivalent to
// calling [ParseFiles] with the list of files matched by the pattern.
//
// When parsing multiple files with the same name in different directories,
// the last one mentioned will be the one that results.
func ParseGlob(pattern string) (*Template, error) {
return parseGlob(nil, pattern)
}
// ParseGlob parses the template definitions in the files identified by the
// pattern and associates the resulting templates with t. The files are matched
// according to the semantics of [filepath.Match], and the pattern must match at
// least one file. ParseGlob is equivalent to calling [Template.ParseFiles] with
// the list of files matched by the pattern.
//
// When parsing multiple files with the same name in different directories,
// the last one mentioned will be the one that results.
func (t *Template) ParseGlob(pattern string) (*Template, error) {
t.init()
return parseGlob(t, pattern)
}
// parseGlob is the implementation of the function and method ParseGlob.
func parseGlob(t *Template, pattern string) (*Template, error) {
filenames, err := filepath.Glob(pattern)
if err != nil {
return nil, err
}
if len(filenames) == 0 {
return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
}
return parseFiles(t, readFileOS, filenames...)
}
// ParseFS is like [Template.ParseFiles] or [Template.ParseGlob] but reads from the file system fsys
// instead of the host operating system's file system.
// It accepts a list of glob patterns (see [path.Match]).
// (Note that most file names serve as glob patterns matching only themselves.)
func ParseFS(fsys fs.FS, patterns ...string) (*Template, error) {
return parseFS(nil, fsys, patterns)
}
// ParseFS is like [Template.ParseFiles] or [Template.ParseGlob] but reads from the file system fsys
// instead of the host operating system's file system.
// It accepts a list of glob patterns (see [path.Match]).
// (Note that most file names serve as glob patterns matching only themselves.)
func (t *Template) ParseFS(fsys fs.FS, patterns ...string) (*Template, error) {
t.init()
return parseFS(t, fsys, patterns)
}
func parseFS(t *Template, fsys fs.FS, patterns []string) (*Template, error) {
var filenames []string
for _, pattern := range patterns {
list, err := fs.Glob(fsys, pattern)
if err != nil {
return nil, err
}
if len(list) == 0 {
return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
}
filenames = append(filenames, list...)
}
return parseFiles(t, readFileFS(fsys), filenames...)
}
func readFileOS(file string) (name string, b []byte, err error) {
name = filepath.Base(file)
b, err = os.ReadFile(file)
return
}
func readFileFS(fsys fs.FS) func(string) (string, []byte, error) {
return func(file string) (name string, b []byte, err error) {
name = path.Base(file)
b, err = fs.ReadFile(fsys, file)
return
}
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the code to handle template options.
package template
import "strings"
// missingKeyAction defines how to respond to indexing a map with a key that is not present.
type missingKeyAction int
const (
mapInvalid missingKeyAction = iota // Return an invalid reflect.Value.
mapZeroValue // Return the zero value for the map element.
mapError // Error out
)
type option struct {
missingKey missingKeyAction
}
// Option sets options for the template. Options are described by
// strings, either a simple string or "key=value". There can be at
// most one equals sign in an option string. If the option string
// is unrecognized or otherwise invalid, Option panics.
//
// Known options:
//
// missingkey: Control the behavior during execution if a map is
// indexed with a key that is not present in the map.
//
// "missingkey=default" or "missingkey=invalid"
// The default behavior: Do nothing and continue execution.
// If printed, the result of the index operation is the string
// "<no value>".
// "missingkey=zero"
// The operation returns the zero value for the map type's element.
// "missingkey=error"
// Execution stops immediately with an error.
func (t *Template) Option(opt ...string) *Template {
t.init()
for _, s := range opt {
t.setOption(s)
}
return t
}
func (t *Template) setOption(opt string) {
if opt == "" {
panic("empty option string")
}
// key=value
if key, value, ok := strings.Cut(opt, "="); ok {
switch key {
case "missingkey":
switch value {
case "invalid", "default":
t.option.missingKey = mapInvalid
return
case "zero":
t.option.missingKey = mapZeroValue
return
case "error":
t.option.missingKey = mapError
return
}
}
}
panic("unrecognized option: " + opt)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package parse
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
// item represents a token or text string returned from the scanner.
type item struct {
typ itemType // The type of this item.
pos Pos // The starting position, in bytes, of this item in the input string.
val string // The value of this item.
line int // The line number at the start of this item.
}
func (i item) String() string {
switch {
case i.typ == itemEOF:
return "EOF"
case i.typ == itemError:
return i.val
case i.typ > itemKeyword:
return fmt.Sprintf("<%s>", i.val)
case len(i.val) > 10:
return fmt.Sprintf("%.10q...", i.val)
}
return fmt.Sprintf("%q", i.val)
}
// itemType identifies the type of lex items.
type itemType int
const (
itemError itemType = iota // error occurred; value is text of error
itemBool // boolean constant
itemChar // printable ASCII character; grab bag for comma etc.
itemCharConstant // character constant
itemComment // comment text
itemComplex // complex constant (1+2i); imaginary is just a number
itemAssign // equals ('=') introducing an assignment
itemDeclare // colon-equals (':=') introducing a declaration
itemEOF
itemField // alphanumeric identifier starting with '.'
itemIdentifier // alphanumeric identifier not starting with '.'
itemLeftDelim // left action delimiter
itemLeftParen // '(' inside action
itemNumber // simple number, including imaginary
itemPipe // pipe symbol
itemRawString // raw quoted string (includes quotes)
itemRightDelim // right action delimiter
itemRightParen // ')' inside action
itemSpace // run of spaces separating arguments
itemString // quoted string (includes quotes)
itemText // plain text
itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
// Keywords appear after all the rest.
itemKeyword // used only to delimit the keywords
itemBlock // block keyword
itemBreak // break keyword
itemContinue // continue keyword
itemDot // the cursor, spelled '.'
itemDefine // define keyword
itemElse // else keyword
itemEnd // end keyword
itemIf // if keyword
itemNil // the untyped nil constant, easiest to treat as a keyword
itemRange // range keyword
itemTemplate // template keyword
itemWith // with keyword
)
var key = map[string]itemType{
".": itemDot,
"block": itemBlock,
"break": itemBreak,
"continue": itemContinue,
"define": itemDefine,
"else": itemElse,
"end": itemEnd,
"if": itemIf,
"range": itemRange,
"nil": itemNil,
"template": itemTemplate,
"with": itemWith,
}
const eof = -1
// Trimming spaces.
// If the action begins "{{- " rather than "{{", then all space/tab/newlines
// preceding the action are trimmed; conversely if it ends " -}}" the
// leading spaces are trimmed. This is done entirely in the lexer; the
// parser never sees it happen. We require an ASCII space (' ', \t, \r, \n)
// to be present to avoid ambiguity with things like "{{-3}}". It reads
// better with the space present anyway. For simplicity, only ASCII
// does the job.
const (
spaceChars = " \t\r\n" // These are the space characters defined by Go itself.
trimMarker = '-' // Attached to left/right delimiter, trims trailing spaces from preceding/following text.
trimMarkerLen = Pos(1 + 1) // marker plus space before or after
)
// stateFn represents the state of the scanner as a function that returns the next state.
type stateFn func(*lexer) stateFn
// lexer holds the state of the scanner.
type lexer struct {
name string // the name of the input; used only for error reports
input string // the string being scanned
leftDelim string // start of action marker
rightDelim string // end of action marker
pos Pos // current position in the input
start Pos // start position of this item
atEOF bool // we have hit the end of input and returned eof
parenDepth int // nesting depth of ( ) exprs
line int // 1+number of newlines seen
startLine int // start line of this item
item item // item to return to parser
insideAction bool // are we inside an action?
options lexOptions
}
// lexOptions control behavior of the lexer. All default to false.
type lexOptions struct {
emitComment bool // emit itemComment tokens.
breakOK bool // break keyword allowed
continueOK bool // continue keyword allowed
}
// next returns the next rune in the input.
func (l *lexer) next() rune {
if int(l.pos) >= len(l.input) {
l.atEOF = true
return eof
}
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
l.pos += Pos(w)
if r == '\n' {
l.line++
}
return r
}
// peek returns but does not consume the next rune in the input.
func (l *lexer) peek() rune {
r := l.next()
l.backup()
return r
}
// backup steps back one rune.
func (l *lexer) backup() {
if !l.atEOF && l.pos > 0 {
r, w := utf8.DecodeLastRuneInString(l.input[:l.pos])
l.pos -= Pos(w)
// Correct newline count.
if r == '\n' {
l.line--
}
}
}
// thisItem returns the item at the current input point with the specified type
// and advances the input.
func (l *lexer) thisItem(t itemType) item {
i := item{t, l.start, l.input[l.start:l.pos], l.startLine}
l.start = l.pos
l.startLine = l.line
return i
}
// emit passes the trailing text as an item back to the parser.
func (l *lexer) emit(t itemType) stateFn {
return l.emitItem(l.thisItem(t))
}
// emitItem passes the specified item to the parser.
func (l *lexer) emitItem(i item) stateFn {
l.item = i
return nil
}
// ignore skips over the pending input before this point.
// It tracks newlines in the ignored text, so use it only
// for text that is skipped without calling l.next.
func (l *lexer) ignore() {
l.line += strings.Count(l.input[l.start:l.pos], "\n")
l.start = l.pos
l.startLine = l.line
}
// accept consumes the next rune if it's from the valid set.
func (l *lexer) accept(valid string) bool {
if strings.ContainsRune(valid, l.next()) {
return true
}
l.backup()
return false
}
// acceptRun consumes a run of runes from the valid set.
func (l *lexer) acceptRun(valid string) {
for strings.ContainsRune(valid, l.next()) {
}
l.backup()
}
// errorf returns an error token and terminates the scan by passing
// back a nil pointer that will be the next state, terminating l.nextItem.
func (l *lexer) errorf(format string, args ...any) stateFn {
l.item = item{itemError, l.start, fmt.Sprintf(format, args...), l.startLine}
l.start = 0
l.pos = 0
l.input = l.input[:0]
return nil
}
// nextItem returns the next item from the input.
// Called by the parser, not in the lexing goroutine.
func (l *lexer) nextItem() item {
l.item = item{itemEOF, l.pos, "EOF", l.startLine}
state := lexText
if l.insideAction {
state = lexInsideAction
}
for {
state = state(l)
if state == nil {
return l.item
}
}
}
// lex creates a new scanner for the input string.
func lex(name, input, left, right string) *lexer {
if left == "" {
left = leftDelim
}
if right == "" {
right = rightDelim
}
l := &lexer{
name: name,
input: input,
leftDelim: left,
rightDelim: right,
line: 1,
startLine: 1,
insideAction: false,
}
return l
}
// state functions
const (
leftDelim = "{{"
rightDelim = "}}"
leftComment = "/*"
rightComment = "*/"
)
// lexText scans until an opening action delimiter, "{{".
func lexText(l *lexer) stateFn {
if x := strings.Index(l.input[l.pos:], l.leftDelim); x >= 0 {
if x > 0 {
l.pos += Pos(x)
// Do we trim any trailing space?
trimLength := Pos(0)
delimEnd := l.pos + Pos(len(l.leftDelim))
if hasLeftTrimMarker(l.input[delimEnd:]) {
trimLength = rightTrimLength(l.input[l.start:l.pos])
}
l.pos -= trimLength
l.line += strings.Count(l.input[l.start:l.pos], "\n")
i := l.thisItem(itemText)
l.pos += trimLength
l.ignore()
if len(i.val) > 0 {
return l.emitItem(i)
}
}
return lexLeftDelim
}
l.pos = Pos(len(l.input))
// Correctly reached EOF.
if l.pos > l.start {
l.line += strings.Count(l.input[l.start:l.pos], "\n")
return l.emit(itemText)
}
return l.emit(itemEOF)
}
// rightTrimLength returns the length of the spaces at the end of the string.
func rightTrimLength(s string) Pos {
return Pos(len(s) - len(strings.TrimRight(s, spaceChars)))
}
// atRightDelim reports whether the lexer is at a right delimiter, possibly preceded by a trim marker.
func (l *lexer) atRightDelim() (delim, trimSpaces bool) {
if hasRightTrimMarker(l.input[l.pos:]) && strings.HasPrefix(l.input[l.pos+trimMarkerLen:], l.rightDelim) { // With trim marker.
return true, true
}
if strings.HasPrefix(l.input[l.pos:], l.rightDelim) { // Without trim marker.
return true, false
}
return false, false
}
// leftTrimLength returns the length of the spaces at the beginning of the string.
func leftTrimLength(s string) Pos {
return Pos(len(s) - len(strings.TrimLeft(s, spaceChars)))
}
// lexLeftDelim scans the left delimiter, which is known to be present, possibly with a trim marker.
// (The text to be trimmed has already been emitted.)
func lexLeftDelim(l *lexer) stateFn {
l.pos += Pos(len(l.leftDelim))
trimSpace := hasLeftTrimMarker(l.input[l.pos:])
afterMarker := Pos(0)
if trimSpace {
afterMarker = trimMarkerLen
}
if strings.HasPrefix(l.input[l.pos+afterMarker:], leftComment) {
l.pos += afterMarker
l.ignore()
return lexComment
}
i := l.thisItem(itemLeftDelim)
l.insideAction = true
l.pos += afterMarker
l.ignore()
l.parenDepth = 0
return l.emitItem(i)
}
// lexComment scans a comment. The left comment marker is known to be present.
func lexComment(l *lexer) stateFn {
l.pos += Pos(len(leftComment))
x := strings.Index(l.input[l.pos:], rightComment)
if x < 0 {
return l.errorf("unclosed comment")
}
l.pos += Pos(x + len(rightComment))
delim, trimSpace := l.atRightDelim()
if !delim {
return l.errorf("comment ends before closing delimiter")
}
l.line += strings.Count(l.input[l.start:l.pos], "\n")
i := l.thisItem(itemComment)
if trimSpace {
l.pos += trimMarkerLen
}
l.pos += Pos(len(l.rightDelim))
if trimSpace {
l.pos += leftTrimLength(l.input[l.pos:])
}
l.ignore()
if l.options.emitComment {
return l.emitItem(i)
}
return lexText
}
// lexRightDelim scans the right delimiter, which is known to be present, possibly with a trim marker.
func lexRightDelim(l *lexer) stateFn {
_, trimSpace := l.atRightDelim()
if trimSpace {
l.pos += trimMarkerLen
l.ignore()
}
l.pos += Pos(len(l.rightDelim))
i := l.thisItem(itemRightDelim)
if trimSpace {
l.pos += leftTrimLength(l.input[l.pos:])
l.ignore()
}
l.insideAction = false
return l.emitItem(i)
}
// lexInsideAction scans the elements inside action delimiters.
func lexInsideAction(l *lexer) stateFn {
// Either number, quoted string, or identifier.
// Spaces separate arguments; runs of spaces turn into itemSpace.
// Pipe symbols separate and are emitted.
delim, _ := l.atRightDelim()
if delim {
if l.parenDepth == 0 {
return lexRightDelim
}
return l.errorf("unclosed left paren")
}
switch r := l.next(); {
case r == eof:
return l.errorf("unclosed action")
case isSpace(r):
l.backup() // Put space back in case we have " -}}".
return lexSpace
case r == '=':
return l.emit(itemAssign)
case r == ':':
if l.next() != '=' {
return l.errorf("expected :=")
}
return l.emit(itemDeclare)
case r == '|':
return l.emit(itemPipe)
case r == '"':
return lexQuote
case r == '`':
return lexRawQuote
case r == '$':
return lexVariable
case r == '\'':
return lexChar
case r == '.':
// special look-ahead for ".field" so we don't break l.backup().
if l.pos < Pos(len(l.input)) {
r := l.input[l.pos]
if r < '0' || '9' < r {
return lexField
}
}
fallthrough // '.' can start a number.
case r == '+' || r == '-' || ('0' <= r && r <= '9'):
l.backup()
return lexNumber
case isAlphaNumeric(r):
l.backup()
return lexIdentifier
case r == '(':
l.parenDepth++
return l.emit(itemLeftParen)
case r == ')':
l.parenDepth--
if l.parenDepth < 0 {
return l.errorf("unexpected right paren")
}
return l.emit(itemRightParen)
case r <= unicode.MaxASCII && unicode.IsPrint(r):
return l.emit(itemChar)
default:
return l.errorf("unrecognized character in action: %#U", r)
}
}
// lexSpace scans a run of space characters.
// We have not consumed the first space, which is known to be present.
// Take care if there is a trim-marked right delimiter, which starts with a space.
func lexSpace(l *lexer) stateFn {
var r rune
var numSpaces int
for {
r = l.peek()
if !isSpace(r) {
break
}
l.next()
numSpaces++
}
// Be careful about a trim-marked closing delimiter, which has a minus
// after a space. We know there is a space, so check for the '-' that might follow.
if hasRightTrimMarker(l.input[l.pos-1:]) && strings.HasPrefix(l.input[l.pos-1+trimMarkerLen:], l.rightDelim) {
l.backup() // Before the space.
if numSpaces == 1 {
return lexRightDelim // On the delim, so go right to that.
}
}
return l.emit(itemSpace)
}
// lexIdentifier scans an alphanumeric.
func lexIdentifier(l *lexer) stateFn {
for {
switch r := l.next(); {
case isAlphaNumeric(r):
// absorb.
default:
l.backup()
word := l.input[l.start:l.pos]
if !l.atTerminator() {
return l.errorf("bad character %#U", r)
}
switch {
case key[word] > itemKeyword:
item := key[word]
if item == itemBreak && !l.options.breakOK || item == itemContinue && !l.options.continueOK {
return l.emit(itemIdentifier)
}
return l.emit(item)
case word[0] == '.':
return l.emit(itemField)
case word == "true", word == "false":
return l.emit(itemBool)
default:
return l.emit(itemIdentifier)
}
}
}
}
// lexField scans a field: .Alphanumeric.
// The . has been scanned.
func lexField(l *lexer) stateFn {
return lexFieldOrVariable(l, itemField)
}
// lexVariable scans a Variable: $Alphanumeric.
// The $ has been scanned.
func lexVariable(l *lexer) stateFn {
if l.atTerminator() { // Nothing interesting follows -> "$".
return l.emit(itemVariable)
}
return lexFieldOrVariable(l, itemVariable)
}
// lexFieldOrVariable scans a field or variable: [.$]Alphanumeric.
// The . or $ has been scanned.
func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
if l.atTerminator() { // Nothing interesting follows -> "." or "$".
if typ == itemVariable {
return l.emit(itemVariable)
}
return l.emit(itemDot)
}
var r rune
for {
r = l.next()
if !isAlphaNumeric(r) {
l.backup()
break
}
}
if !l.atTerminator() {
return l.errorf("bad character %#U", r)
}
return l.emit(typ)
}
// atTerminator reports whether the input is at valid termination character to
// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
// like "$x+2" not being acceptable without a space, in case we decide one
// day to implement arithmetic.
func (l *lexer) atTerminator() bool {
r := l.peek()
if isSpace(r) {
return true
}
switch r {
case eof, '.', ',', '|', ':', ')', '(':
return true
}
return strings.HasPrefix(l.input[l.pos:], l.rightDelim)
}
// lexChar scans a character constant. The initial quote is already
// scanned. Syntax checking is done by the parser.
func lexChar(l *lexer) stateFn {
Loop:
for {
switch l.next() {
case '\\':
if r := l.next(); r != eof && r != '\n' {
break
}
fallthrough
case eof, '\n':
return l.errorf("unterminated character constant")
case '\'':
break Loop
}
}
return l.emit(itemCharConstant)
}
// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
// and "089" - but when it's wrong the input is invalid and the parser (via
// strconv) will notice.
func lexNumber(l *lexer) stateFn {
if !l.scanNumber() {
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
}
if sign := l.peek(); sign == '+' || sign == '-' {
// Complex: 1+2i. No spaces, must end in 'i'.
if !l.scanNumber() || l.input[l.pos-1] != 'i' {
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
}
return l.emit(itemComplex)
}
return l.emit(itemNumber)
}
func (l *lexer) scanNumber() bool {
// Optional leading sign.
l.accept("+-")
// Is it hex?
digits := "0123456789_"
if l.accept("0") {
// Note: Leading 0 does not mean octal in floats.
if l.accept("xX") {
digits = "0123456789abcdefABCDEF_"
} else if l.accept("oO") {
digits = "01234567_"
} else if l.accept("bB") {
digits = "01_"
}
}
l.acceptRun(digits)
if l.accept(".") {
l.acceptRun(digits)
}
if len(digits) == 10+1 && l.accept("eE") {
l.accept("+-")
l.acceptRun("0123456789_")
}
if len(digits) == 16+6+1 && l.accept("pP") {
l.accept("+-")
l.acceptRun("0123456789_")
}
// Is it imaginary?
l.accept("i")
// Next thing mustn't be alphanumeric.
if isAlphaNumeric(l.peek()) {
l.next()
return false
}
return true
}
// lexQuote scans a quoted string.
func lexQuote(l *lexer) stateFn {
Loop:
for {
switch l.next() {
case '\\':
if r := l.next(); r != eof && r != '\n' {
break
}
fallthrough
case eof, '\n':
return l.errorf("unterminated quoted string")
case '"':
break Loop
}
}
return l.emit(itemString)
}
// lexRawQuote scans a raw quoted string.
func lexRawQuote(l *lexer) stateFn {
Loop:
for {
switch l.next() {
case eof:
return l.errorf("unterminated raw quoted string")
case '`':
break Loop
}
}
return l.emit(itemRawString)
}
// isSpace reports whether r is a space character.
func isSpace(r rune) bool {
return r == ' ' || r == '\t' || r == '\r' || r == '\n'
}
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
func isAlphaNumeric(r rune) bool {
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
}
func hasLeftTrimMarker(s string) bool {
return len(s) >= 2 && s[0] == trimMarker && isSpace(rune(s[1]))
}
func hasRightTrimMarker(s string) bool {
return len(s) >= 2 && isSpace(rune(s[0])) && s[1] == trimMarker
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Parse nodes.
package parse
import (
"fmt"
"strconv"
"strings"
)
var textFormat = "%s" // Changed to "%q" in tests for better error messages.
// A Node is an element in the parse tree. The interface is trivial.
// The interface contains an unexported method so that only
// types local to this package can satisfy it.
type Node interface {
Type() NodeType
String() string
// Copy does a deep copy of the Node and all its components.
// To avoid type assertions, some XxxNodes also have specialized
// CopyXxx methods that return *XxxNode.
Copy() Node
Position() Pos // byte position of start of node in full original input string
// tree returns the containing *Tree.
// It is unexported so all implementations of Node are in this package.
tree() *Tree
// writeTo writes the String output to the builder.
writeTo(*strings.Builder)
}
// NodeType identifies the type of a parse tree node.
type NodeType int
// Pos represents a byte position in the original input text from which
// this template was parsed.
type Pos int
func (p Pos) Position() Pos {
return p
}
// Type returns itself and provides an easy default implementation
// for embedding in a Node. Embedded in all non-trivial Nodes.
func (t NodeType) Type() NodeType {
return t
}
const (
NodeText NodeType = iota // Plain text.
NodeAction // A non-control action such as a field evaluation.
NodeBool // A boolean constant.
NodeChain // A sequence of field accesses.
NodeCommand // An element of a pipeline.
NodeDot // The cursor, dot.
nodeElse // An else action. Not added to tree.
nodeEnd // An end action. Not added to tree.
NodeField // A field or method name.
NodeIdentifier // An identifier; always a function name.
NodeIf // An if action.
NodeList // A list of Nodes.
NodeNil // An untyped nil constant.
NodeNumber // A numerical constant.
NodePipe // A pipeline of commands.
NodeRange // A range action.
NodeString // A string constant.
NodeTemplate // A template invocation action.
NodeVariable // A $ variable.
NodeWith // A with action.
NodeComment // A comment.
NodeBreak // A break action.
NodeContinue // A continue action.
)
// Nodes.
// ListNode holds a sequence of nodes.
type ListNode struct {
NodeType
Pos
tr *Tree
Nodes []Node // The element nodes in lexical order.
}
func (t *Tree) newList(pos Pos) *ListNode {
return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
}
func (l *ListNode) append(n Node) {
l.Nodes = append(l.Nodes, n)
}
func (l *ListNode) tree() *Tree {
return l.tr
}
func (l *ListNode) String() string {
var sb strings.Builder
l.writeTo(&sb)
return sb.String()
}
func (l *ListNode) writeTo(sb *strings.Builder) {
for _, n := range l.Nodes {
n.writeTo(sb)
}
}
func (l *ListNode) CopyList() *ListNode {
if l == nil {
return l
}
n := l.tr.newList(l.Pos)
for _, elem := range l.Nodes {
n.append(elem.Copy())
}
return n
}
func (l *ListNode) Copy() Node {
return l.CopyList()
}
// TextNode holds plain text.
type TextNode struct {
NodeType
Pos
tr *Tree
Text []byte // The text; may span newlines.
}
func (t *Tree) newText(pos Pos, text string) *TextNode {
return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
}
func (t *TextNode) String() string {
return fmt.Sprintf(textFormat, t.Text)
}
func (t *TextNode) writeTo(sb *strings.Builder) {
sb.WriteString(t.String())
}
func (t *TextNode) tree() *Tree {
return t.tr
}
func (t *TextNode) Copy() Node {
return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
}
// CommentNode holds a comment.
type CommentNode struct {
NodeType
Pos
tr *Tree
Text string // Comment text.
}
func (t *Tree) newComment(pos Pos, text string) *CommentNode {
return &CommentNode{tr: t, NodeType: NodeComment, Pos: pos, Text: text}
}
func (c *CommentNode) String() string {
var sb strings.Builder
c.writeTo(&sb)
return sb.String()
}
func (c *CommentNode) writeTo(sb *strings.Builder) {
sb.WriteString("{{")
sb.WriteString(c.Text)
sb.WriteString("}}")
}
func (c *CommentNode) tree() *Tree {
return c.tr
}
func (c *CommentNode) Copy() Node {
return &CommentNode{tr: c.tr, NodeType: NodeComment, Pos: c.Pos, Text: c.Text}
}
// PipeNode holds a pipeline with optional declaration
type PipeNode struct {
NodeType
Pos
tr *Tree
Line int // The line number in the input. Deprecated: Kept for compatibility.
IsAssign bool // The variables are being assigned, not declared.
Decl []*VariableNode // Variables in lexical order.
Cmds []*CommandNode // The commands in lexical order.
}
func (t *Tree) newPipeline(pos Pos, line int, vars []*VariableNode) *PipeNode {
return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: vars}
}
func (p *PipeNode) append(command *CommandNode) {
p.Cmds = append(p.Cmds, command)
}
func (p *PipeNode) String() string {
var sb strings.Builder
p.writeTo(&sb)
return sb.String()
}
func (p *PipeNode) writeTo(sb *strings.Builder) {
if len(p.Decl) > 0 {
for i, v := range p.Decl {
if i > 0 {
sb.WriteString(", ")
}
v.writeTo(sb)
}
if p.IsAssign {
sb.WriteString(" = ")
} else {
sb.WriteString(" := ")
}
}
for i, c := range p.Cmds {
if i > 0 {
sb.WriteString(" | ")
}
c.writeTo(sb)
}
}
func (p *PipeNode) tree() *Tree {
return p.tr
}
func (p *PipeNode) CopyPipe() *PipeNode {
if p == nil {
return p
}
vars := make([]*VariableNode, len(p.Decl))
for i, d := range p.Decl {
vars[i] = d.Copy().(*VariableNode)
}
n := p.tr.newPipeline(p.Pos, p.Line, vars)
n.IsAssign = p.IsAssign
for _, c := range p.Cmds {
n.append(c.Copy().(*CommandNode))
}
return n
}
func (p *PipeNode) Copy() Node {
return p.CopyPipe()
}
// ActionNode holds an action (something bounded by delimiters).
// Control actions have their own nodes; ActionNode represents simple
// ones such as field evaluations and parenthesized pipelines.
type ActionNode struct {
NodeType
Pos
tr *Tree
Line int // The line number in the input. Deprecated: Kept for compatibility.
Pipe *PipeNode // The pipeline in the action.
}
func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
}
func (a *ActionNode) String() string {
var sb strings.Builder
a.writeTo(&sb)
return sb.String()
}
func (a *ActionNode) writeTo(sb *strings.Builder) {
sb.WriteString("{{")
a.Pipe.writeTo(sb)
sb.WriteString("}}")
}
func (a *ActionNode) tree() *Tree {
return a.tr
}
func (a *ActionNode) Copy() Node {
return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
}
// CommandNode holds a command (a pipeline inside an evaluating action).
type CommandNode struct {
NodeType
Pos
tr *Tree
Args []Node // Arguments in lexical order: Identifier, field, or constant.
}
func (t *Tree) newCommand(pos Pos) *CommandNode {
return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
}
func (c *CommandNode) append(arg Node) {
c.Args = append(c.Args, arg)
}
func (c *CommandNode) String() string {
var sb strings.Builder
c.writeTo(&sb)
return sb.String()
}
func (c *CommandNode) writeTo(sb *strings.Builder) {
for i, arg := range c.Args {
if i > 0 {
sb.WriteByte(' ')
}
if arg, ok := arg.(*PipeNode); ok {
sb.WriteByte('(')
arg.writeTo(sb)
sb.WriteByte(')')
continue
}
arg.writeTo(sb)
}
}
func (c *CommandNode) tree() *Tree {
return c.tr
}
func (c *CommandNode) Copy() Node {
if c == nil {
return c
}
n := c.tr.newCommand(c.Pos)
for _, c := range c.Args {
n.append(c.Copy())
}
return n
}
// IdentifierNode holds an identifier.
type IdentifierNode struct {
NodeType
Pos
tr *Tree
Ident string // The identifier's name.
}
// NewIdentifier returns a new [IdentifierNode] with the given identifier name.
func NewIdentifier(ident string) *IdentifierNode {
return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
}
// SetPos sets the position. [NewIdentifier] is a public method so we can't modify its signature.
// Chained for convenience.
// TODO: fix one day?
func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
i.Pos = pos
return i
}
// SetTree sets the parent tree for the node. [NewIdentifier] is a public method so we can't modify its signature.
// Chained for convenience.
// TODO: fix one day?
func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
i.tr = t
return i
}
func (i *IdentifierNode) String() string {
return i.Ident
}
func (i *IdentifierNode) writeTo(sb *strings.Builder) {
sb.WriteString(i.String())
}
func (i *IdentifierNode) tree() *Tree {
return i.tr
}
func (i *IdentifierNode) Copy() Node {
return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
}
// VariableNode holds a list of variable names, possibly with chained field
// accesses. The dollar sign is part of the (first) name.
type VariableNode struct {
NodeType
Pos
tr *Tree
Ident []string // Variable name and fields in lexical order.
}
func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
}
func (v *VariableNode) String() string {
var sb strings.Builder
v.writeTo(&sb)
return sb.String()
}
func (v *VariableNode) writeTo(sb *strings.Builder) {
for i, id := range v.Ident {
if i > 0 {
sb.WriteByte('.')
}
sb.WriteString(id)
}
}
func (v *VariableNode) tree() *Tree {
return v.tr
}
func (v *VariableNode) Copy() Node {
return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
}
// DotNode holds the special identifier '.'.
type DotNode struct {
NodeType
Pos
tr *Tree
}
func (t *Tree) newDot(pos Pos) *DotNode {
return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
}
func (d *DotNode) Type() NodeType {
// Override method on embedded NodeType for API compatibility.
// TODO: Not really a problem; could change API without effect but
// api tool complains.
return NodeDot
}
func (d *DotNode) String() string {
return "."
}
func (d *DotNode) writeTo(sb *strings.Builder) {
sb.WriteString(d.String())
}
func (d *DotNode) tree() *Tree {
return d.tr
}
func (d *DotNode) Copy() Node {
return d.tr.newDot(d.Pos)
}
// NilNode holds the special identifier 'nil' representing an untyped nil constant.
type NilNode struct {
NodeType
Pos
tr *Tree
}
func (t *Tree) newNil(pos Pos) *NilNode {
return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
}
func (n *NilNode) Type() NodeType {
// Override method on embedded NodeType for API compatibility.
// TODO: Not really a problem; could change API without effect but
// api tool complains.
return NodeNil
}
func (n *NilNode) String() string {
return "nil"
}
func (n *NilNode) writeTo(sb *strings.Builder) {
sb.WriteString(n.String())
}
func (n *NilNode) tree() *Tree {
return n.tr
}
func (n *NilNode) Copy() Node {
return n.tr.newNil(n.Pos)
}
// FieldNode holds a field (identifier starting with '.').
// The names may be chained ('.x.y').
// The period is dropped from each ident.
type FieldNode struct {
NodeType
Pos
tr *Tree
Ident []string // The identifiers in lexical order.
}
func (t *Tree) newField(pos Pos, ident string) *FieldNode {
return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
}
func (f *FieldNode) String() string {
var sb strings.Builder
f.writeTo(&sb)
return sb.String()
}
func (f *FieldNode) writeTo(sb *strings.Builder) {
for _, id := range f.Ident {
sb.WriteByte('.')
sb.WriteString(id)
}
}
func (f *FieldNode) tree() *Tree {
return f.tr
}
func (f *FieldNode) Copy() Node {
return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
}
// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
// The names may be chained ('.x.y').
// The periods are dropped from each ident.
type ChainNode struct {
NodeType
Pos
tr *Tree
Node Node
Field []string // The identifiers in lexical order.
}
func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
}
// Add adds the named field (which should start with a period) to the end of the chain.
func (c *ChainNode) Add(field string) {
if len(field) == 0 || field[0] != '.' {
panic("no dot in field")
}
field = field[1:] // Remove leading dot.
if field == "" {
panic("empty field")
}
c.Field = append(c.Field, field)
}
func (c *ChainNode) String() string {
var sb strings.Builder
c.writeTo(&sb)
return sb.String()
}
func (c *ChainNode) writeTo(sb *strings.Builder) {
if _, ok := c.Node.(*PipeNode); ok {
sb.WriteByte('(')
c.Node.writeTo(sb)
sb.WriteByte(')')
} else {
c.Node.writeTo(sb)
}
for _, field := range c.Field {
sb.WriteByte('.')
sb.WriteString(field)
}
}
func (c *ChainNode) tree() *Tree {
return c.tr
}
func (c *ChainNode) Copy() Node {
return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
}
// BoolNode holds a boolean constant.
type BoolNode struct {
NodeType
Pos
tr *Tree
True bool // The value of the boolean constant.
}
func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
}
func (b *BoolNode) String() string {
if b.True {
return "true"
}
return "false"
}
func (b *BoolNode) writeTo(sb *strings.Builder) {
sb.WriteString(b.String())
}
func (b *BoolNode) tree() *Tree {
return b.tr
}
func (b *BoolNode) Copy() Node {
return b.tr.newBool(b.Pos, b.True)
}
// NumberNode holds a number: signed or unsigned integer, float, or complex.
// The value is parsed and stored under all the types that can represent the value.
// This simulates in a small amount of code the behavior of Go's ideal constants.
type NumberNode struct {
NodeType
Pos
tr *Tree
IsInt bool // Number has an integral value.
IsUint bool // Number has an unsigned integral value.
IsFloat bool // Number has a floating-point value.
IsComplex bool // Number is complex.
Int64 int64 // The signed integer value.
Uint64 uint64 // The unsigned integer value.
Float64 float64 // The floating-point value.
Complex128 complex128 // The complex value.
Text string // The original textual representation from the input.
}
func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
switch typ {
case itemCharConstant:
rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
if err != nil {
return nil, err
}
if tail != "'" {
return nil, fmt.Errorf("malformed character constant: %s", text)
}
n.Int64 = int64(rune)
n.IsInt = true
n.Uint64 = uint64(rune)
n.IsUint = true
n.Float64 = float64(rune) // odd but those are the rules.
n.IsFloat = true
return n, nil
case itemComplex:
// fmt.Sscan can parse the pair, so let it do the work.
if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
return nil, err
}
n.IsComplex = true
n.simplifyComplex()
return n, nil
}
// Imaginary constants can only be complex unless they are zero.
if len(text) > 0 && text[len(text)-1] == 'i' {
f, err := strconv.ParseFloat(text[:len(text)-1], 64)
if err == nil {
n.IsComplex = true
n.Complex128 = complex(0, f)
n.simplifyComplex()
return n, nil
}
}
// Do integer test first so we get 0x123 etc.
u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
if err == nil {
n.IsUint = true
n.Uint64 = u
}
i, err := strconv.ParseInt(text, 0, 64)
if err == nil {
n.IsInt = true
n.Int64 = i
if i == 0 {
n.IsUint = true // in case of -0.
n.Uint64 = u
}
}
// If an integer extraction succeeded, promote the float.
if n.IsInt {
n.IsFloat = true
n.Float64 = float64(n.Int64)
} else if n.IsUint {
n.IsFloat = true
n.Float64 = float64(n.Uint64)
} else {
f, err := strconv.ParseFloat(text, 64)
if err == nil {
// If we parsed it as a float but it looks like an integer,
// it's a huge number too large to fit in an int. Reject it.
if !strings.ContainsAny(text, ".eEpP") {
return nil, fmt.Errorf("integer overflow: %q", text)
}
n.IsFloat = true
n.Float64 = f
// If a floating-point extraction succeeded, extract the int if needed.
if !n.IsInt && float64(int64(f)) == f {
n.IsInt = true
n.Int64 = int64(f)
}
if !n.IsUint && float64(uint64(f)) == f {
n.IsUint = true
n.Uint64 = uint64(f)
}
}
}
if !n.IsInt && !n.IsUint && !n.IsFloat {
return nil, fmt.Errorf("illegal number syntax: %q", text)
}
return n, nil
}
// simplifyComplex pulls out any other types that are represented by the complex number.
// These all require that the imaginary part be zero.
func (n *NumberNode) simplifyComplex() {
n.IsFloat = imag(n.Complex128) == 0
if n.IsFloat {
n.Float64 = real(n.Complex128)
n.IsInt = float64(int64(n.Float64)) == n.Float64
if n.IsInt {
n.Int64 = int64(n.Float64)
}
n.IsUint = float64(uint64(n.Float64)) == n.Float64
if n.IsUint {
n.Uint64 = uint64(n.Float64)
}
}
}
func (n *NumberNode) String() string {
return n.Text
}
func (n *NumberNode) writeTo(sb *strings.Builder) {
sb.WriteString(n.String())
}
func (n *NumberNode) tree() *Tree {
return n.tr
}
func (n *NumberNode) Copy() Node {
nn := new(NumberNode)
*nn = *n // Easy, fast, correct.
return nn
}
// StringNode holds a string constant. The value has been "unquoted".
type StringNode struct {
NodeType
Pos
tr *Tree
Quoted string // The original text of the string, with quotes.
Text string // The string, after quote processing.
}
func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
}
func (s *StringNode) String() string {
return s.Quoted
}
func (s *StringNode) writeTo(sb *strings.Builder) {
sb.WriteString(s.String())
}
func (s *StringNode) tree() *Tree {
return s.tr
}
func (s *StringNode) Copy() Node {
return s.tr.newString(s.Pos, s.Quoted, s.Text)
}
// endNode represents an {{end}} action.
// It does not appear in the final parse tree.
type endNode struct {
NodeType
Pos
tr *Tree
}
func (t *Tree) newEnd(pos Pos) *endNode {
return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
}
func (e *endNode) String() string {
return "{{end}}"
}
func (e *endNode) writeTo(sb *strings.Builder) {
sb.WriteString(e.String())
}
func (e *endNode) tree() *Tree {
return e.tr
}
func (e *endNode) Copy() Node {
return e.tr.newEnd(e.Pos)
}
// elseNode represents an {{else}} action. Does not appear in the final tree.
type elseNode struct {
NodeType
Pos
tr *Tree
Line int // The line number in the input. Deprecated: Kept for compatibility.
}
func (t *Tree) newElse(pos Pos, line int) *elseNode {
return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
}
func (e *elseNode) Type() NodeType {
return nodeElse
}
func (e *elseNode) String() string {
return "{{else}}"
}
func (e *elseNode) writeTo(sb *strings.Builder) {
sb.WriteString(e.String())
}
func (e *elseNode) tree() *Tree {
return e.tr
}
func (e *elseNode) Copy() Node {
return e.tr.newElse(e.Pos, e.Line)
}
// BranchNode is the common representation of if, range, and with.
type BranchNode struct {
NodeType
Pos
tr *Tree
Line int // The line number in the input. Deprecated: Kept for compatibility.
Pipe *PipeNode // The pipeline to be evaluated.
List *ListNode // What to execute if the value is non-empty.
ElseList *ListNode // What to execute if the value is empty (nil if absent).
}
func (b *BranchNode) String() string {
var sb strings.Builder
b.writeTo(&sb)
return sb.String()
}
func (b *BranchNode) writeTo(sb *strings.Builder) {
name := ""
switch b.NodeType {
case NodeIf:
name = "if"
case NodeRange:
name = "range"
case NodeWith:
name = "with"
default:
panic("unknown branch type")
}
sb.WriteString("{{")
sb.WriteString(name)
sb.WriteByte(' ')
b.Pipe.writeTo(sb)
sb.WriteString("}}")
b.List.writeTo(sb)
if b.ElseList != nil {
sb.WriteString("{{else}}")
b.ElseList.writeTo(sb)
}
sb.WriteString("{{end}}")
}
func (b *BranchNode) tree() *Tree {
return b.tr
}
func (b *BranchNode) Copy() Node {
switch b.NodeType {
case NodeIf:
return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
case NodeRange:
return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
case NodeWith:
return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
default:
panic("unknown branch type")
}
}
// IfNode represents an {{if}} action and its commands.
type IfNode struct {
BranchNode
}
func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
}
func (i *IfNode) Copy() Node {
return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
}
// BreakNode represents a {{break}} action.
type BreakNode struct {
tr *Tree
NodeType
Pos
Line int
}
func (t *Tree) newBreak(pos Pos, line int) *BreakNode {
return &BreakNode{tr: t, NodeType: NodeBreak, Pos: pos, Line: line}
}
func (b *BreakNode) Copy() Node { return b.tr.newBreak(b.Pos, b.Line) }
func (b *BreakNode) String() string { return "{{break}}" }
func (b *BreakNode) tree() *Tree { return b.tr }
func (b *BreakNode) writeTo(sb *strings.Builder) { sb.WriteString("{{break}}") }
// ContinueNode represents a {{continue}} action.
type ContinueNode struct {
tr *Tree
NodeType
Pos
Line int
}
func (t *Tree) newContinue(pos Pos, line int) *ContinueNode {
return &ContinueNode{tr: t, NodeType: NodeContinue, Pos: pos, Line: line}
}
func (c *ContinueNode) Copy() Node { return c.tr.newContinue(c.Pos, c.Line) }
func (c *ContinueNode) String() string { return "{{continue}}" }
func (c *ContinueNode) tree() *Tree { return c.tr }
func (c *ContinueNode) writeTo(sb *strings.Builder) { sb.WriteString("{{continue}}") }
// RangeNode represents a {{range}} action and its commands.
type RangeNode struct {
BranchNode
}
func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
}
func (r *RangeNode) Copy() Node {
return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
}
// WithNode represents a {{with}} action and its commands.
type WithNode struct {
BranchNode
}
func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
}
func (w *WithNode) Copy() Node {
return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
}
// TemplateNode represents a {{template}} action.
type TemplateNode struct {
NodeType
Pos
tr *Tree
Line int // The line number in the input. Deprecated: Kept for compatibility.
Name string // The name of the template (unquoted).
Pipe *PipeNode // The command to evaluate as dot for the template.
}
func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
}
func (t *TemplateNode) String() string {
var sb strings.Builder
t.writeTo(&sb)
return sb.String()
}
func (t *TemplateNode) writeTo(sb *strings.Builder) {
sb.WriteString("{{template ")
sb.WriteString(strconv.Quote(t.Name))
if t.Pipe != nil {
sb.WriteByte(' ')
t.Pipe.writeTo(sb)
}
sb.WriteString("}}")
}
func (t *TemplateNode) tree() *Tree {
return t.tr
}
func (t *TemplateNode) Copy() Node {
return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package parse builds parse trees for templates as defined by text/template
// and html/template. Clients should use those packages to construct templates
// rather than this one, which provides shared internal data structures not
// intended for general use.
package parse
import (
"bytes"
"fmt"
"runtime"
"strconv"
"strings"
)
// Tree is the representation of a single parsed template.
type Tree struct {
Name string // name of the template represented by the tree.
ParseName string // name of the top-level template during parsing, for error messages.
Root *ListNode // top-level root of the tree.
Mode Mode // parsing mode.
text string // text parsed to create the template (or its parent)
// Parsing only; cleared after parse.
funcs []map[string]any
lex *lexer
token [3]item // three-token lookahead for parser.
peekCount int
vars []string // variables defined at the moment.
treeSet map[string]*Tree
actionLine int // line of left delim starting action
rangeDepth int
stackDepth int // depth of nested parenthesized expressions
}
// A Mode value is a set of flags (or 0). Modes control parser behavior.
type Mode uint
const (
ParseComments Mode = 1 << iota // parse comments and add them to AST
SkipFuncCheck // do not check that functions are defined
)
// maxStackDepth is the maximum depth permitted for nested
// parenthesized expressions.
var maxStackDepth = 10000
// init reduces maxStackDepth for WebAssembly due to its smaller stack size.
func init() {
if runtime.GOARCH == "wasm" {
maxStackDepth = 1000
}
}
// Copy returns a copy of the [Tree]. Any parsing state is discarded.
func (t *Tree) Copy() *Tree {
if t == nil {
return nil
}
return &Tree{
Name: t.Name,
ParseName: t.ParseName,
Root: t.Root.CopyList(),
text: t.text,
}
}
// Parse returns a map from template name to [Tree], created by parsing the
// templates described in the argument string. The top-level template will be
// given the specified name. If an error is encountered, parsing stops and an
// empty map is returned with the error.
func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]any) (map[string]*Tree, error) {
treeSet := make(map[string]*Tree)
t := New(name)
t.text = text
_, err := t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
return treeSet, err
}
// next returns the next token.
func (t *Tree) next() item {
if t.peekCount > 0 {
t.peekCount--
} else {
t.token[0] = t.lex.nextItem()
}
return t.token[t.peekCount]
}
// backup backs the input stream up one token.
func (t *Tree) backup() {
t.peekCount++
}
// backup2 backs the input stream up two tokens.
// The zeroth token is already there.
func (t *Tree) backup2(t1 item) {
t.token[1] = t1
t.peekCount = 2
}
// backup3 backs the input stream up three tokens
// The zeroth token is already there.
func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
t.token[1] = t1
t.token[2] = t2
t.peekCount = 3
}
// peek returns but does not consume the next token.
func (t *Tree) peek() item {
if t.peekCount > 0 {
return t.token[t.peekCount-1]
}
t.peekCount = 1
t.token[0] = t.lex.nextItem()
return t.token[0]
}
// nextNonSpace returns the next non-space token.
func (t *Tree) nextNonSpace() (token item) {
for {
token = t.next()
if token.typ != itemSpace {
break
}
}
return token
}
// peekNonSpace returns but does not consume the next non-space token.
func (t *Tree) peekNonSpace() item {
token := t.nextNonSpace()
t.backup()
return token
}
// Parsing.
// New allocates a new parse tree with the given name.
func New(name string, funcs ...map[string]any) *Tree {
return &Tree{
Name: name,
funcs: funcs,
}
}
// ErrorContext returns a textual representation of the location of the node in the input text.
// The receiver is only used when the node does not have a pointer to the tree inside,
// which can occur in old code.
func (t *Tree) ErrorContext(n Node) (location, context string) {
pos := int(n.Position())
tree := n.tree()
if tree == nil {
tree = t
}
text := tree.text[:pos]
byteNum := strings.LastIndex(text, "\n")
if byteNum == -1 {
byteNum = pos // On first line.
} else {
byteNum++ // After the newline.
byteNum = pos - byteNum
}
lineNum := 1 + strings.Count(text, "\n")
context = n.String()
return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
}
// errorf formats the error and terminates processing.
func (t *Tree) errorf(format string, args ...any) {
t.Root = nil
format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.token[0].line, format)
panic(fmt.Errorf(format, args...))
}
// error terminates processing.
func (t *Tree) error(err error) {
t.errorf("%s", err)
}
// expect consumes the next token and guarantees it has the required type.
func (t *Tree) expect(expected itemType, context string) item {
token := t.nextNonSpace()
if token.typ != expected {
t.unexpected(token, context)
}
return token
}
// expectOneOf consumes the next token and guarantees it has one of the required types.
func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
token := t.nextNonSpace()
if token.typ != expected1 && token.typ != expected2 {
t.unexpected(token, context)
}
return token
}
// unexpected complains about the token and terminates processing.
func (t *Tree) unexpected(token item, context string) {
if token.typ == itemError {
extra := ""
if t.actionLine != 0 && t.actionLine != token.line {
extra = fmt.Sprintf(" in action started at %s:%d", t.ParseName, t.actionLine)
if strings.HasSuffix(token.val, " action") {
extra = extra[len(" in action"):] // avoid "action in action"
}
}
t.errorf("%s%s", token, extra)
}
t.errorf("unexpected %s in %s", token, context)
}
// recover is the handler that turns panics into returns from the top level of Parse.
func (t *Tree) recover(errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
panic(e)
}
if t != nil {
t.stopParse()
}
*errp = e.(error)
}
}
// startParse initializes the parser, using the lexer.
func (t *Tree) startParse(funcs []map[string]any, lex *lexer, treeSet map[string]*Tree) {
t.Root = nil
t.lex = lex
t.vars = []string{"$"}
t.funcs = funcs
t.treeSet = treeSet
t.stackDepth = 0
lex.options = lexOptions{
emitComment: t.Mode&ParseComments != 0,
breakOK: !t.hasFunction("break"),
continueOK: !t.hasFunction("continue"),
}
}
// stopParse terminates parsing.
func (t *Tree) stopParse() {
t.lex = nil
t.vars = nil
t.funcs = nil
t.treeSet = nil
}
// Parse parses the template definition string to construct a representation of
// the template for execution. If either action delimiter string is empty, the
// default ("{{" or "}}") is used. Embedded template definitions are added to
// the treeSet map.
func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]any) (tree *Tree, err error) {
defer t.recover(&err)
t.ParseName = t.Name
lexer := lex(t.Name, text, leftDelim, rightDelim)
t.startParse(funcs, lexer, treeSet)
t.text = text
t.parse()
t.add()
t.stopParse()
return t, nil
}
// add adds tree to t.treeSet.
func (t *Tree) add() {
tree := t.treeSet[t.Name]
if tree == nil || IsEmptyTree(tree.Root) {
t.treeSet[t.Name] = t
return
}
if !IsEmptyTree(t.Root) {
t.errorf("template: multiple definition of template %q", t.Name)
}
}
// IsEmptyTree reports whether this tree (node) is empty of everything but space or comments.
func IsEmptyTree(n Node) bool {
switch n := n.(type) {
case nil:
return true
case *ActionNode:
case *CommentNode:
return true
case *IfNode:
case *ListNode:
for _, node := range n.Nodes {
if !IsEmptyTree(node) {
return false
}
}
return true
case *RangeNode:
case *TemplateNode:
case *TextNode:
return len(bytes.TrimSpace(n.Text)) == 0
case *WithNode:
default:
panic("unknown node: " + n.String())
}
return false
}
// parse is the top-level parser for a template, essentially the same
// as itemList except it also parses {{define}} actions.
// It runs to EOF.
func (t *Tree) parse() {
t.Root = t.newList(t.peek().pos)
for t.peek().typ != itemEOF {
if t.peek().typ == itemLeftDelim {
delim := t.next()
if t.nextNonSpace().typ == itemDefine {
newT := New("definition") // name will be updated once we know it.
newT.text = t.text
newT.Mode = t.Mode
newT.ParseName = t.ParseName
newT.startParse(t.funcs, t.lex, t.treeSet)
newT.parseDefinition()
continue
}
t.backup2(delim)
}
switch n := t.textOrAction(); n.Type() {
case nodeEnd, nodeElse:
t.errorf("unexpected %s", n)
default:
t.Root.append(n)
}
}
}
// parseDefinition parses a {{define}} ... {{end}} template definition and
// installs the definition in t.treeSet. The "define" keyword has already
// been scanned.
func (t *Tree) parseDefinition() {
const context = "define clause"
name := t.expectOneOf(itemString, itemRawString, context)
var err error
t.Name, err = strconv.Unquote(name.val)
if err != nil {
t.error(err)
}
t.expect(itemRightDelim, context)
var end Node
t.Root, end = t.itemList()
if end.Type() != nodeEnd {
t.errorf("unexpected %s in %s", end, context)
}
t.add()
t.stopParse()
}
// itemList:
//
// textOrAction*
//
// Terminates at {{end}} or {{else}}, returned separately.
func (t *Tree) itemList() (list *ListNode, next Node) {
list = t.newList(t.peekNonSpace().pos)
for t.peekNonSpace().typ != itemEOF {
n := t.textOrAction()
switch n.Type() {
case nodeEnd, nodeElse:
return list, n
}
list.append(n)
}
t.errorf("unexpected EOF")
return
}
// textOrAction:
//
// text | comment | action
func (t *Tree) textOrAction() Node {
switch token := t.nextNonSpace(); token.typ {
case itemText:
return t.newText(token.pos, token.val)
case itemLeftDelim:
t.actionLine = token.line
defer t.clearActionLine()
return t.action()
case itemComment:
return t.newComment(token.pos, token.val)
default:
t.unexpected(token, "input")
}
return nil
}
func (t *Tree) clearActionLine() {
t.actionLine = 0
}
// Action:
//
// control
// command ("|" command)*
//
// Left delim is past. Now get actions.
// First word could be a keyword such as range.
func (t *Tree) action() (n Node) {
switch token := t.nextNonSpace(); token.typ {
case itemBlock:
return t.blockControl()
case itemBreak:
return t.breakControl(token.pos, token.line)
case itemContinue:
return t.continueControl(token.pos, token.line)
case itemElse:
return t.elseControl()
case itemEnd:
return t.endControl()
case itemIf:
return t.ifControl()
case itemRange:
return t.rangeControl()
case itemTemplate:
return t.templateControl()
case itemWith:
return t.withControl()
}
t.backup()
token := t.peek()
// Do not pop variables; they persist until "end".
return t.newAction(token.pos, token.line, t.pipeline("command", itemRightDelim))
}
// Break:
//
// {{break}}
//
// Break keyword is past.
func (t *Tree) breakControl(pos Pos, line int) Node {
if token := t.nextNonSpace(); token.typ != itemRightDelim {
t.unexpected(token, "{{break}}")
}
if t.rangeDepth == 0 {
t.errorf("{{break}} outside {{range}}")
}
return t.newBreak(pos, line)
}
// Continue:
//
// {{continue}}
//
// Continue keyword is past.
func (t *Tree) continueControl(pos Pos, line int) Node {
if token := t.nextNonSpace(); token.typ != itemRightDelim {
t.unexpected(token, "{{continue}}")
}
if t.rangeDepth == 0 {
t.errorf("{{continue}} outside {{range}}")
}
return t.newContinue(pos, line)
}
// Pipeline:
//
// declarations? command ('|' command)*
func (t *Tree) pipeline(context string, end itemType) (pipe *PipeNode) {
token := t.peekNonSpace()
pipe = t.newPipeline(token.pos, token.line, nil)
// Are there declarations or assignments?
decls:
if v := t.peekNonSpace(); v.typ == itemVariable {
t.next()
// Since space is a token, we need 3-token look-ahead here in the worst case:
// in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
// argument variable rather than a declaration. So remember the token
// adjacent to the variable so we can push it back if necessary.
tokenAfterVariable := t.peek()
next := t.peekNonSpace()
switch {
case next.typ == itemAssign, next.typ == itemDeclare:
pipe.IsAssign = next.typ == itemAssign
t.nextNonSpace()
pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val))
t.vars = append(t.vars, v.val)
case next.typ == itemChar && next.val == ",":
t.nextNonSpace()
pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val))
t.vars = append(t.vars, v.val)
if context == "range" && len(pipe.Decl) < 2 {
switch t.peekNonSpace().typ {
case itemVariable, itemRightDelim, itemRightParen:
// second initialized variable in a range pipeline
goto decls
default:
t.errorf("range can only initialize variables")
}
}
t.errorf("too many declarations in %s", context)
case tokenAfterVariable.typ == itemSpace:
t.backup3(v, tokenAfterVariable)
default:
t.backup2(v)
}
}
for {
switch token := t.nextNonSpace(); token.typ {
case end:
// At this point, the pipeline is complete
t.checkPipeline(pipe, context)
return
case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
t.backup()
pipe.append(t.command())
default:
t.unexpected(token, context)
}
}
}
func (t *Tree) checkPipeline(pipe *PipeNode, context string) {
// Reject empty pipelines
if len(pipe.Cmds) == 0 {
t.errorf("missing value for %s", context)
}
// Only the first command of a pipeline can start with a non executable operand
for i, c := range pipe.Cmds[1:] {
switch c.Args[0].Type() {
case NodeBool, NodeDot, NodeNil, NodeNumber, NodeString:
// With A|B|C, pipeline stage 2 is B
t.errorf("non executable command in pipeline stage %d", i+2)
}
}
}
func (t *Tree) parseControl(context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
defer t.popVars(len(t.vars))
pipe = t.pipeline(context, itemRightDelim)
if context == "range" {
t.rangeDepth++
}
var next Node
list, next = t.itemList()
if context == "range" {
t.rangeDepth--
}
switch next.Type() {
case nodeEnd: //done
case nodeElse:
// Special case for "else if" and "else with".
// If the "else" is followed immediately by an "if" or "with",
// the elseControl will have left the "if" or "with" token pending. Treat
// {{if a}}_{{else if b}}_{{end}}
// {{with a}}_{{else with b}}_{{end}}
// as
// {{if a}}_{{else}}{{if b}}_{{end}}{{end}}
// {{with a}}_{{else}}{{with b}}_{{end}}{{end}}.
// To do this, parse the "if" or "with" as usual and stop at it {{end}};
// the subsequent{{end}} is assumed. This technique works even for long if-else-if chains.
if context == "if" && t.peek().typ == itemIf {
t.next() // Consume the "if" token.
elseList = t.newList(next.Position())
elseList.append(t.ifControl())
} else if context == "with" && t.peek().typ == itemWith {
t.next()
elseList = t.newList(next.Position())
elseList.append(t.withControl())
} else {
elseList, next = t.itemList()
if next.Type() != nodeEnd {
t.errorf("expected end; found %s", next)
}
}
}
return pipe.Position(), pipe.Line, pipe, list, elseList
}
// If:
//
// {{if pipeline}} itemList {{end}}
// {{if pipeline}} itemList {{else}} itemList {{end}}
//
// If keyword is past.
func (t *Tree) ifControl() Node {
return t.newIf(t.parseControl("if"))
}
// Range:
//
// {{range pipeline}} itemList {{end}}
// {{range pipeline}} itemList {{else}} itemList {{end}}
//
// Range keyword is past.
func (t *Tree) rangeControl() Node {
r := t.newRange(t.parseControl("range"))
return r
}
// With:
//
// {{with pipeline}} itemList {{end}}
// {{with pipeline}} itemList {{else}} itemList {{end}}
//
// If keyword is past.
func (t *Tree) withControl() Node {
return t.newWith(t.parseControl("with"))
}
// End:
//
// {{end}}
//
// End keyword is past.
func (t *Tree) endControl() Node {
return t.newEnd(t.expect(itemRightDelim, "end").pos)
}
// Else:
//
// {{else}}
//
// Else keyword is past.
func (t *Tree) elseControl() Node {
peek := t.peekNonSpace()
// The "{{else if ... " and "{{else with ..." will be
// treated as "{{else}}{{if ..." and "{{else}}{{with ...".
// So return the else node here.
if peek.typ == itemIf || peek.typ == itemWith {
return t.newElse(peek.pos, peek.line)
}
token := t.expect(itemRightDelim, "else")
return t.newElse(token.pos, token.line)
}
// Block:
//
// {{block stringValue pipeline}}
//
// Block keyword is past.
// The name must be something that can evaluate to a string.
// The pipeline is mandatory.
func (t *Tree) blockControl() Node {
const context = "block clause"
token := t.nextNonSpace()
name := t.parseTemplateName(token, context)
pipe := t.pipeline(context, itemRightDelim)
block := New(name) // name will be updated once we know it.
block.text = t.text
block.Mode = t.Mode
block.ParseName = t.ParseName
block.startParse(t.funcs, t.lex, t.treeSet)
var end Node
block.Root, end = block.itemList()
if end.Type() != nodeEnd {
t.errorf("unexpected %s in %s", end, context)
}
block.add()
block.stopParse()
return t.newTemplate(token.pos, token.line, name, pipe)
}
// Template:
//
// {{template stringValue pipeline}}
//
// Template keyword is past. The name must be something that can evaluate
// to a string.
func (t *Tree) templateControl() Node {
const context = "template clause"
token := t.nextNonSpace()
name := t.parseTemplateName(token, context)
var pipe *PipeNode
if t.nextNonSpace().typ != itemRightDelim {
t.backup()
// Do not pop variables; they persist until "end".
pipe = t.pipeline(context, itemRightDelim)
}
return t.newTemplate(token.pos, token.line, name, pipe)
}
func (t *Tree) parseTemplateName(token item, context string) (name string) {
switch token.typ {
case itemString, itemRawString:
s, err := strconv.Unquote(token.val)
if err != nil {
t.error(err)
}
name = s
default:
t.unexpected(token, context)
}
return
}
// command:
//
// operand (space operand)*
//
// space-separated arguments up to a pipeline character or right delimiter.
// we consume the pipe character but leave the right delim to terminate the action.
func (t *Tree) command() *CommandNode {
cmd := t.newCommand(t.peekNonSpace().pos)
for {
t.peekNonSpace() // skip leading spaces.
operand := t.operand()
if operand != nil {
cmd.append(operand)
}
switch token := t.next(); token.typ {
case itemSpace:
continue
case itemRightDelim, itemRightParen:
t.backup()
case itemPipe:
// nothing here; break loop below
default:
t.unexpected(token, "operand")
}
break
}
if len(cmd.Args) == 0 {
t.errorf("empty command")
}
return cmd
}
// operand:
//
// term .Field*
//
// An operand is a space-separated component of a command,
// a term possibly followed by field accesses.
// A nil return means the next item is not an operand.
func (t *Tree) operand() Node {
node := t.term()
if node == nil {
return nil
}
if t.peek().typ == itemField {
chain := t.newChain(t.peek().pos, node)
for t.peek().typ == itemField {
chain.Add(t.next().val)
}
// Compatibility with original API: If the term is of type NodeField
// or NodeVariable, just put more fields on the original.
// Otherwise, keep the Chain node.
// Obvious parsing errors involving literal values are detected here.
// More complex error cases will have to be handled at execution time.
switch node.Type() {
case NodeField:
node = t.newField(chain.Position(), chain.String())
case NodeVariable:
node = t.newVariable(chain.Position(), chain.String())
case NodeBool, NodeString, NodeNumber, NodeNil, NodeDot:
t.errorf("unexpected . after term %q", node.String())
default:
node = chain
}
}
return node
}
// term:
//
// literal (number, string, nil, boolean)
// function (identifier)
// .
// .Field
// $
// '(' pipeline ')'
//
// A term is a simple "expression".
// A nil return means the next item is not a term.
func (t *Tree) term() Node {
switch token := t.nextNonSpace(); token.typ {
case itemIdentifier:
checkFunc := t.Mode&SkipFuncCheck == 0
if checkFunc && !t.hasFunction(token.val) {
t.errorf("function %q not defined", token.val)
}
return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
case itemDot:
return t.newDot(token.pos)
case itemNil:
return t.newNil(token.pos)
case itemVariable:
return t.useVar(token.pos, token.val)
case itemField:
return t.newField(token.pos, token.val)
case itemBool:
return t.newBool(token.pos, token.val == "true")
case itemCharConstant, itemComplex, itemNumber:
number, err := t.newNumber(token.pos, token.val, token.typ)
if err != nil {
t.error(err)
}
return number
case itemLeftParen:
if t.stackDepth >= maxStackDepth {
t.errorf("max expression depth exceeded")
}
t.stackDepth++
defer func() { t.stackDepth-- }()
return t.pipeline("parenthesized pipeline", itemRightParen)
case itemString, itemRawString:
s, err := strconv.Unquote(token.val)
if err != nil {
t.error(err)
}
return t.newString(token.pos, token.val, s)
}
t.backup()
return nil
}
// hasFunction reports if a function name exists in the Tree's maps.
func (t *Tree) hasFunction(name string) bool {
for _, funcMap := range t.funcs {
if funcMap == nil {
continue
}
if funcMap[name] != nil {
return true
}
}
return false
}
// popVars trims the variable list to the specified length
func (t *Tree) popVars(n int) {
t.vars = t.vars[:n]
}
// useVar returns a node for a variable reference. It errors if the
// variable is not defined.
func (t *Tree) useVar(pos Pos, name string) Node {
v := t.newVariable(pos, name)
for _, varName := range t.vars {
if varName == v.Ident[0] {
return v
}
}
t.errorf("undefined variable %q", v.Ident[0])
return nil
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package template
import (
"maps"
"reflect"
"sync"
"text/template/parse"
)
// common holds the information shared by related templates.
type common struct {
tmpl map[string]*Template // Map from name to defined templates.
muTmpl sync.RWMutex // protects tmpl
option option
// We use two maps, one for parsing and one for execution.
// This separation makes the API cleaner since it doesn't
// expose reflection to the client.
muFuncs sync.RWMutex // protects parseFuncs and execFuncs
parseFuncs FuncMap
execFuncs map[string]reflect.Value
}
// Template is the representation of a parsed template. The *parse.Tree
// field is exported only for use by [html/template] and should be treated
// as unexported by all other clients.
type Template struct {
name string
*parse.Tree
*common
leftDelim string
rightDelim string
}
// New allocates a new, undefined template with the given name.
func New(name string) *Template {
t := &Template{
name: name,
}
t.init()
return t
}
// Name returns the name of the template.
func (t *Template) Name() string {
return t.name
}
// New allocates a new, undefined template associated with the given one and with the same
// delimiters. The association, which is transitive, allows one template to
// invoke another with a {{template}} action.
//
// Because associated templates share underlying data, template construction
// cannot be done safely in parallel. Once the templates are constructed, they
// can be executed in parallel.
func (t *Template) New(name string) *Template {
t.init()
nt := &Template{
name: name,
common: t.common,
leftDelim: t.leftDelim,
rightDelim: t.rightDelim,
}
return nt
}
// init guarantees that t has a valid common structure.
func (t *Template) init() {
if t.common == nil {
c := new(common)
c.tmpl = make(map[string]*Template)
c.parseFuncs = make(FuncMap)
c.execFuncs = make(map[string]reflect.Value)
t.common = c
}
}
// Clone returns a duplicate of the template, including all associated
// templates. The actual representation is not copied, but the name space of
// associated templates is, so further calls to [Template.Parse] in the copy will add
// templates to the copy but not to the original. Clone can be used to prepare
// common templates and use them with variant definitions for other templates
// by adding the variants after the clone is made.
func (t *Template) Clone() (*Template, error) {
nt := t.copy(nil)
nt.init()
if t.common == nil {
return nt, nil
}
nt.option = t.option
t.muTmpl.RLock()
defer t.muTmpl.RUnlock()
for k, v := range t.tmpl {
if k == t.name {
nt.tmpl[t.name] = nt
continue
}
// The associated templates share nt's common structure.
tmpl := v.copy(nt.common)
nt.tmpl[k] = tmpl
}
t.muFuncs.RLock()
defer t.muFuncs.RUnlock()
maps.Copy(nt.parseFuncs, t.parseFuncs)
maps.Copy(nt.execFuncs, t.execFuncs)
return nt, nil
}
// copy returns a shallow copy of t, with common set to the argument.
func (t *Template) copy(c *common) *Template {
return &Template{
name: t.name,
Tree: t.Tree,
common: c,
leftDelim: t.leftDelim,
rightDelim: t.rightDelim,
}
}
// AddParseTree associates the argument parse tree with the template t, giving
// it the specified name. If the template has not been defined, this tree becomes
// its definition. If it has been defined and already has that name, the existing
// definition is replaced; otherwise a new template is created, defined, and returned.
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
t.init()
t.muTmpl.Lock()
defer t.muTmpl.Unlock()
nt := t
if name != t.name {
nt = t.New(name)
}
// Even if nt == t, we need to install it in the common.tmpl map.
if t.associate(nt, tree) || nt.Tree == nil {
nt.Tree = tree
}
return nt, nil
}
// Templates returns a slice of defined templates associated with t.
func (t *Template) Templates() []*Template {
if t.common == nil {
return nil
}
// Return a slice so we don't expose the map.
t.muTmpl.RLock()
defer t.muTmpl.RUnlock()
m := make([]*Template, 0, len(t.tmpl))
for _, v := range t.tmpl {
m = append(m, v)
}
return m
}
// Delims sets the action delimiters to the specified strings, to be used in
// subsequent calls to [Template.Parse], [Template.ParseFiles], or [Template.ParseGlob]. Nested template
// definitions will inherit the settings. An empty delimiter stands for the
// corresponding default: {{ or }}.
// The return value is the template, so calls can be chained.
func (t *Template) Delims(left, right string) *Template {
t.init()
t.leftDelim = left
t.rightDelim = right
return t
}
// Funcs adds the elements of the argument map to the template's function map.
// It must be called before the template is parsed.
// It panics if a value in the map is not a function with appropriate return
// type or if the name cannot be used syntactically as a function in a template.
// It is legal to overwrite elements of the map. The return value is the template,
// so calls can be chained.
func (t *Template) Funcs(funcMap FuncMap) *Template {
t.init()
t.muFuncs.Lock()
defer t.muFuncs.Unlock()
addValueFuncs(t.execFuncs, funcMap)
addFuncs(t.parseFuncs, funcMap)
return t
}
// Lookup returns the template with the given name that is associated with t.
// It returns nil if there is no such template or the template has no definition.
func (t *Template) Lookup(name string) *Template {
if t.common == nil {
return nil
}
t.muTmpl.RLock()
defer t.muTmpl.RUnlock()
return t.tmpl[name]
}
// Parse parses text as a template body for t.
// Named template definitions ({{define ...}} or {{block ...}} statements) in text
// define additional templates associated with t and are removed from the
// definition of t itself.
//
// Templates can be redefined in successive calls to Parse.
// A template definition with a body containing only white space and comments
// is considered empty and will not replace an existing template's body.
// This allows using Parse to add new named template definitions without
// overwriting the main template body.
func (t *Template) Parse(text string) (*Template, error) {
t.init()
t.muFuncs.RLock()
trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins())
t.muFuncs.RUnlock()
if err != nil {
return nil, err
}
// Add the newly parsed trees, including the one for t, into our common structure.
for name, tree := range trees {
if _, err := t.AddParseTree(name, tree); err != nil {
return nil, err
}
}
return t, nil
}
// associate installs the new template into the group of templates associated
// with t. The two are already known to share the common structure.
// The boolean return value reports whether to store this tree as t.Tree.
func (t *Template) associate(new *Template, tree *parse.Tree) bool {
if new.common != t.common {
panic("internal error: associate not common")
}
if old := t.tmpl[new.name]; old != nil && parse.IsEmptyTree(tree.Root) && old.Tree != nil {
// If a template by that name exists,
// don't replace it with an empty template.
return false
}
t.tmpl[new.name] = new
return true
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package time
import (
"errors"
"internal/stringslite"
_ "unsafe" // for linkname
)
// These are predefined layouts for use in [Time.Format] and [time.Parse].
// The reference time used in these layouts is the specific time stamp:
//
// 01/02 03:04:05PM '06 -0700
//
// (January 2, 15:04:05, 2006, in time zone seven hours west of GMT).
// That value is recorded as the constant named [Layout], listed below. As a Unix
// time, this is 1136239445. Since MST is GMT-0700, the reference would be
// printed by the Unix date command as:
//
// Mon Jan 2 15:04:05 MST 2006
//
// It is a regrettable historic error that the date uses the American convention
// of putting the numerical month before the day.
//
// The example for Time.Format demonstrates the working of the layout string
// in detail and is a good reference.
//
// Note that the [RFC822], [RFC850], and [RFC1123] formats should be applied
// only to local times. Applying them to UTC times will use "UTC" as the
// time zone abbreviation, while strictly speaking those RFCs require the
// use of "GMT" in that case.
// When using the [RFC1123] or [RFC1123Z] formats for parsing, note that these
// formats define a leading zero for the day-in-month portion, which is not
// strictly allowed by RFC 1123. This will result in an error when parsing
// date strings that occur in the first 9 days of a given month.
// In general [RFC1123Z] should be used instead of [RFC1123] for servers
// that insist on that format, and [RFC3339] should be preferred for new protocols.
// [RFC3339], [RFC822], [RFC822Z], [RFC1123], and [RFC1123Z] are useful for formatting;
// when used with time.Parse they do not accept all the time formats
// permitted by the RFCs and they do accept time formats not formally defined.
// The [RFC3339Nano] format removes trailing zeros from the seconds field
// and thus may not sort correctly once formatted.
//
// Most programs can use one of the defined constants as the layout passed to
// Format or Parse. The rest of this comment can be ignored unless you are
// creating a custom layout string.
//
// To define your own format, write down what the reference time would look like
// formatted your way; see the values of constants like [ANSIC], [StampMicro] or
// [Kitchen] for examples. The model is to demonstrate what the reference time
// looks like so that the Format and Parse methods can apply the same
// transformation to a general time value.
//
// Here is a summary of the components of a layout string. Each element shows by
// example the formatting of an element of the reference time. Only these values
// are recognized. Text in the layout string that is not recognized as part of
// the reference time is echoed verbatim during Format and expected to appear
// verbatim in the input to Parse.
//
// Year: "2006" "06"
// Month: "Jan" "January" "01" "1"
// Day of the week: "Mon" "Monday"
// Day of the month: "2" "_2" "02"
// Day of the year: "__2" "002"
// Hour: "15" "3" "03" (PM or AM)
// Minute: "4" "04"
// Second: "5" "05"
// AM/PM mark: "PM"
//
// Numeric time zone offsets format as follows:
//
// "-0700" ±hhmm
// "-07:00" ±hh:mm
// "-07" ±hh
// "-070000" ±hhmmss
// "-07:00:00" ±hh:mm:ss
//
// Replacing the sign in the format with a Z triggers
// the ISO 8601 behavior of printing Z instead of an
// offset for the UTC zone. Thus:
//
// "Z0700" Z or ±hhmm
// "Z07:00" Z or ±hh:mm
// "Z07" Z or ±hh
// "Z070000" Z or ±hhmmss
// "Z07:00:00" Z or ±hh:mm:ss
//
// Within the format string, the underscores in "_2" and "__2" represent spaces
// that may be replaced by digits if the following number has multiple digits,
// for compatibility with fixed-width Unix time formats. A leading zero represents
// a zero-padded value.
//
// The formats __2 and 002 are space-padded and zero-padded
// three-character day of year; there is no unpadded day of year format.
//
// A comma or decimal point followed by one or more zeros represents
// a fractional second, printed to the given number of decimal places.
// A comma or decimal point followed by one or more nines represents
// a fractional second, printed to the given number of decimal places, with
// trailing zeros removed.
// For example "15:04:05,000" or "15:04:05.000" formats or parses with
// millisecond precision.
//
// Some valid layouts are invalid time values for time.Parse, due to formats
// such as _ for space padding and Z for zone information.
const (
Layout = "01/02 03:04:05PM '06 -0700" // The reference time, in numerical order.
ANSIC = "Mon Jan _2 15:04:05 2006"
UnixDate = "Mon Jan _2 15:04:05 MST 2006"
RubyDate = "Mon Jan 02 15:04:05 -0700 2006"
RFC822 = "02 Jan 06 15:04 MST"
RFC822Z = "02 Jan 06 15:04 -0700" // RFC822 with numeric zone
RFC850 = "Monday, 02-Jan-06 15:04:05 MST"
RFC1123 = "Mon, 02 Jan 2006 15:04:05 MST"
RFC1123Z = "Mon, 02 Jan 2006 15:04:05 -0700" // RFC1123 with numeric zone
RFC3339 = "2006-01-02T15:04:05Z07:00"
RFC3339Nano = "2006-01-02T15:04:05.999999999Z07:00"
Kitchen = "3:04PM"
// Handy time stamps.
Stamp = "Jan _2 15:04:05"
StampMilli = "Jan _2 15:04:05.000"
StampMicro = "Jan _2 15:04:05.000000"
StampNano = "Jan _2 15:04:05.000000000"
DateTime = "2006-01-02 15:04:05"
DateOnly = "2006-01-02"
TimeOnly = "15:04:05"
)
const (
_ = iota
stdLongMonth = iota + stdNeedDate // "January"
stdMonth // "Jan"
stdNumMonth // "1"
stdZeroMonth // "01"
stdLongWeekDay // "Monday"
stdWeekDay // "Mon"
stdDay // "2"
stdUnderDay // "_2"
stdZeroDay // "02"
stdUnderYearDay = iota + stdNeedYday // "__2"
stdZeroYearDay // "002"
stdHour = iota + stdNeedClock // "15"
stdHour12 // "3"
stdZeroHour12 // "03"
stdMinute // "4"
stdZeroMinute // "04"
stdSecond // "5"
stdZeroSecond // "05"
stdLongYear = iota + stdNeedDate // "2006"
stdYear // "06"
stdPM = iota + stdNeedClock // "PM"
stdpm // "pm"
stdTZ = iota // "MST"
stdISO8601TZ // "Z0700" // prints Z for UTC
stdISO8601SecondsTZ // "Z070000"
stdISO8601ShortTZ // "Z07"
stdISO8601ColonTZ // "Z07:00" // prints Z for UTC
stdISO8601ColonSecondsTZ // "Z07:00:00"
stdNumTZ // "-0700" // always numeric
stdNumSecondsTz // "-070000"
stdNumShortTZ // "-07" // always numeric
stdNumColonTZ // "-07:00" // always numeric
stdNumColonSecondsTZ // "-07:00:00"
stdFracSecond0 // ".0", ".00", ... , trailing zeros included
stdFracSecond9 // ".9", ".99", ..., trailing zeros omitted
stdNeedDate = 1 << 8 // need month, day, year
stdNeedYday = 1 << 9 // need yday
stdNeedClock = 1 << 10 // need hour, minute, second
stdArgShift = 16 // extra argument in high bits, above low stdArgShift
stdSeparatorShift = 28 // extra argument in high 4 bits for fractional second separators
stdMask = 1<<stdArgShift - 1 // mask out argument
)
// std0x records the std values for "01", "02", ..., "06".
var std0x = [...]int{stdZeroMonth, stdZeroDay, stdZeroHour12, stdZeroMinute, stdZeroSecond, stdYear}
// startsWithLowerCase reports whether the string has a lower-case letter at the beginning.
// Its purpose is to prevent matching strings like "Month" when looking for "Mon".
func startsWithLowerCase(str string) bool {
if len(str) == 0 {
return false
}
c := str[0]
return 'a' <= c && c <= 'z'
}
// nextStdChunk finds the first occurrence of a std string in
// layout and returns the text before, the std string, and the text after.
//
// nextStdChunk should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - github.com/searKing/golang/go
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname nextStdChunk
func nextStdChunk(layout string) (prefix string, std int, suffix string) {
for i := 0; i < len(layout); i++ {
switch c := int(layout[i]); c {
case 'J': // January, Jan
if len(layout) >= i+3 && layout[i:i+3] == "Jan" {
if len(layout) >= i+7 && layout[i:i+7] == "January" {
return layout[0:i], stdLongMonth, layout[i+7:]
}
if !startsWithLowerCase(layout[i+3:]) {
return layout[0:i], stdMonth, layout[i+3:]
}
}
case 'M': // Monday, Mon, MST
if len(layout) >= i+3 {
if layout[i:i+3] == "Mon" {
if len(layout) >= i+6 && layout[i:i+6] == "Monday" {
return layout[0:i], stdLongWeekDay, layout[i+6:]
}
if !startsWithLowerCase(layout[i+3:]) {
return layout[0:i], stdWeekDay, layout[i+3:]
}
}
if layout[i:i+3] == "MST" {
return layout[0:i], stdTZ, layout[i+3:]
}
}
case '0': // 01, 02, 03, 04, 05, 06, 002
if len(layout) >= i+2 && '1' <= layout[i+1] && layout[i+1] <= '6' {
return layout[0:i], std0x[layout[i+1]-'1'], layout[i+2:]
}
if len(layout) >= i+3 && layout[i+1] == '0' && layout[i+2] == '2' {
return layout[0:i], stdZeroYearDay, layout[i+3:]
}
case '1': // 15, 1
if len(layout) >= i+2 && layout[i+1] == '5' {
return layout[0:i], stdHour, layout[i+2:]
}
return layout[0:i], stdNumMonth, layout[i+1:]
case '2': // 2006, 2
if len(layout) >= i+4 && layout[i:i+4] == "2006" {
return layout[0:i], stdLongYear, layout[i+4:]
}
return layout[0:i], stdDay, layout[i+1:]
case '_': // _2, _2006, __2
if len(layout) >= i+2 && layout[i+1] == '2' {
// _2006 is really a literal _, followed by stdLongYear
if len(layout) >= i+5 && layout[i+1:i+5] == "2006" {
return layout[0 : i+1], stdLongYear, layout[i+5:]
}
return layout[0:i], stdUnderDay, layout[i+2:]
}
if len(layout) >= i+3 && layout[i+1] == '_' && layout[i+2] == '2' {
return layout[0:i], stdUnderYearDay, layout[i+3:]
}
case '3':
return layout[0:i], stdHour12, layout[i+1:]
case '4':
return layout[0:i], stdMinute, layout[i+1:]
case '5':
return layout[0:i], stdSecond, layout[i+1:]
case 'P': // PM
if len(layout) >= i+2 && layout[i+1] == 'M' {
return layout[0:i], stdPM, layout[i+2:]
}
case 'p': // pm
if len(layout) >= i+2 && layout[i+1] == 'm' {
return layout[0:i], stdpm, layout[i+2:]
}
case '-': // -070000, -07:00:00, -0700, -07:00, -07
if len(layout) >= i+7 && layout[i:i+7] == "-070000" {
return layout[0:i], stdNumSecondsTz, layout[i+7:]
}
if len(layout) >= i+9 && layout[i:i+9] == "-07:00:00" {
return layout[0:i], stdNumColonSecondsTZ, layout[i+9:]
}
if len(layout) >= i+5 && layout[i:i+5] == "-0700" {
return layout[0:i], stdNumTZ, layout[i+5:]
}
if len(layout) >= i+6 && layout[i:i+6] == "-07:00" {
return layout[0:i], stdNumColonTZ, layout[i+6:]
}
if len(layout) >= i+3 && layout[i:i+3] == "-07" {
return layout[0:i], stdNumShortTZ, layout[i+3:]
}
case 'Z': // Z070000, Z07:00:00, Z0700, Z07:00,
if len(layout) >= i+7 && layout[i:i+7] == "Z070000" {
return layout[0:i], stdISO8601SecondsTZ, layout[i+7:]
}
if len(layout) >= i+9 && layout[i:i+9] == "Z07:00:00" {
return layout[0:i], stdISO8601ColonSecondsTZ, layout[i+9:]
}
if len(layout) >= i+5 && layout[i:i+5] == "Z0700" {
return layout[0:i], stdISO8601TZ, layout[i+5:]
}
if len(layout) >= i+6 && layout[i:i+6] == "Z07:00" {
return layout[0:i], stdISO8601ColonTZ, layout[i+6:]
}
if len(layout) >= i+3 && layout[i:i+3] == "Z07" {
return layout[0:i], stdISO8601ShortTZ, layout[i+3:]
}
case '.', ',': // ,000, or .000, or ,999, or .999 - repeated digits for fractional seconds.
if i+1 < len(layout) && (layout[i+1] == '0' || layout[i+1] == '9') {
ch := layout[i+1]
j := i + 1
for j < len(layout) && layout[j] == ch {
j++
}
// String of digits must end here - only fractional second is all digits.
if !isDigit(layout, j) {
code := stdFracSecond0
if layout[i+1] == '9' {
code = stdFracSecond9
}
std := stdFracSecond(code, j-(i+1), c)
return layout[0:i], std, layout[j:]
}
}
}
}
return layout, 0, ""
}
var longDayNames = []string{
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
}
var shortDayNames = []string{
"Sun",
"Mon",
"Tue",
"Wed",
"Thu",
"Fri",
"Sat",
}
var shortMonthNames = []string{
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
}
var longMonthNames = []string{
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
}
// match reports whether s1 and s2 match ignoring case.
// It is assumed s1 and s2 are the same length.
func match(s1, s2 string) bool {
for i := 0; i < len(s1); i++ {
c1 := s1[i]
c2 := s2[i]
if c1 != c2 {
// Switch to lower-case; 'a'-'A' is known to be a single bit.
c1 |= 'a' - 'A'
c2 |= 'a' - 'A'
if c1 != c2 || c1 < 'a' || c1 > 'z' {
return false
}
}
}
return true
}
func lookup(tab []string, val string) (int, string, error) {
for i, v := range tab {
if len(val) >= len(v) && match(val[:len(v)], v) {
return i, val[len(v):], nil
}
}
return -1, val, errBad
}
// appendInt appends the decimal form of x to b and returns the result.
// If the decimal form (excluding sign) is shorter than width, the result is padded with leading 0's.
// Duplicates functionality in strconv, but avoids dependency.
func appendInt(b []byte, x int, width int) []byte {
u := uint(x)
if x < 0 {
b = append(b, '-')
u = uint(-x)
}
// 2-digit and 4-digit fields are the most common in time formats.
utod := func(u uint) byte { return '0' + byte(u) }
switch {
case width == 2 && u < 1e2:
return append(b, utod(u/1e1), utod(u%1e1))
case width == 4 && u < 1e4:
return append(b, utod(u/1e3), utod(u/1e2%1e1), utod(u/1e1%1e1), utod(u%1e1))
}
// Compute the number of decimal digits.
var n int
if u == 0 {
n = 1
}
for u2 := u; u2 > 0; u2 /= 10 {
n++
}
// Add 0-padding.
for pad := width - n; pad > 0; pad-- {
b = append(b, '0')
}
// Ensure capacity.
if len(b)+n <= cap(b) {
b = b[:len(b)+n]
} else {
b = append(b, make([]byte, n)...)
}
// Assemble decimal in reverse order.
i := len(b) - 1
for u >= 10 && i > 0 {
q := u / 10
b[i] = utod(u - q*10)
u = q
i--
}
b[i] = utod(u)
return b
}
// Never printed, just needs to be non-nil for return by atoi.
var errAtoi = errors.New("time: invalid number")
// Duplicates functionality in strconv, but avoids dependency.
func atoi[bytes []byte | string](s bytes) (x int, err error) {
neg := false
if len(s) > 0 && (s[0] == '-' || s[0] == '+') {
neg = s[0] == '-'
s = s[1:]
}
q, rem, err := leadingInt(s)
x = int(q)
if err != nil || len(rem) > 0 {
return 0, errAtoi
}
if neg {
x = -x
}
return x, nil
}
// The "std" value passed to appendNano contains two packed fields: the number of
// digits after the decimal and the separator character (period or comma).
// These functions pack and unpack that variable.
func stdFracSecond(code, n, c int) int {
// Use 0xfff to make the failure case even more absurd.
if c == '.' {
return code | ((n & 0xfff) << stdArgShift)
}
return code | ((n & 0xfff) << stdArgShift) | 1<<stdSeparatorShift
}
func digitsLen(std int) int {
return (std >> stdArgShift) & 0xfff
}
func separator(std int) byte {
if (std >> stdSeparatorShift) == 0 {
return '.'
}
return ','
}
// appendNano appends a fractional second, as nanoseconds, to b
// and returns the result. The nanosec must be within [0, 999999999].
func appendNano(b []byte, nanosec int, std int) []byte {
trim := std&stdMask == stdFracSecond9
n := digitsLen(std)
if trim && (n == 0 || nanosec == 0) {
return b
}
dot := separator(std)
b = append(b, dot)
b = appendInt(b, nanosec, 9)
if n < 9 {
b = b[:len(b)-9+n]
}
if trim {
for len(b) > 0 && b[len(b)-1] == '0' {
b = b[:len(b)-1]
}
if len(b) > 0 && b[len(b)-1] == dot {
b = b[:len(b)-1]
}
}
return b
}
// String returns the time formatted using the format string
//
// "2006-01-02 15:04:05.999999999 -0700 MST"
//
// If the time has a monotonic clock reading, the returned string
// includes a final field "m=±<value>", where value is the monotonic
// clock reading formatted as a decimal number of seconds.
//
// The returned string is meant for debugging; for a stable serialized
// representation, use t.MarshalText, t.MarshalBinary, or t.Format
// with an explicit format string.
func (t Time) String() string {
s := t.Format("2006-01-02 15:04:05.999999999 -0700 MST")
// Format monotonic clock reading as m=±ddd.nnnnnnnnn.
if t.wall&hasMonotonic != 0 {
m2 := uint64(t.ext)
sign := byte('+')
if t.ext < 0 {
sign = '-'
m2 = -m2
}
m1, m2 := m2/1e9, m2%1e9
m0, m1 := m1/1e9, m1%1e9
buf := make([]byte, 0, 24)
buf = append(buf, " m="...)
buf = append(buf, sign)
wid := 0
if m0 != 0 {
buf = appendInt(buf, int(m0), 0)
wid = 9
}
buf = appendInt(buf, int(m1), wid)
buf = append(buf, '.')
buf = appendInt(buf, int(m2), 9)
s += string(buf)
}
return s
}
// GoString implements [fmt.GoStringer] and formats t to be printed in Go source
// code.
func (t Time) GoString() string {
abs := t.absSec()
year, month, day := abs.days().date()
hour, minute, second := abs.clock()
buf := make([]byte, 0, len("time.Date(9999, time.September, 31, 23, 59, 59, 999999999, time.Local)"))
buf = append(buf, "time.Date("...)
buf = appendInt(buf, year, 0)
if January <= month && month <= December {
buf = append(buf, ", time."...)
buf = append(buf, longMonthNames[month-1]...)
} else {
// It's difficult to construct a time.Time with a date outside the
// standard range but we might as well try to handle the case.
buf = appendInt(buf, int(month), 0)
}
buf = append(buf, ", "...)
buf = appendInt(buf, day, 0)
buf = append(buf, ", "...)
buf = appendInt(buf, hour, 0)
buf = append(buf, ", "...)
buf = appendInt(buf, minute, 0)
buf = append(buf, ", "...)
buf = appendInt(buf, second, 0)
buf = append(buf, ", "...)
buf = appendInt(buf, t.Nanosecond(), 0)
buf = append(buf, ", "...)
switch loc := t.Location(); loc {
case UTC, nil:
buf = append(buf, "time.UTC"...)
case Local:
buf = append(buf, "time.Local"...)
default:
// there are several options for how we could display this, none of
// which are great:
//
// - use Location(loc.name), which is not technically valid syntax
// - use LoadLocation(loc.name), which will cause a syntax error when
// embedded and also would require us to escape the string without
// importing fmt or strconv
// - try to use FixedZone, which would also require escaping the name
// and would represent e.g. "America/Los_Angeles" daylight saving time
// shifts inaccurately
// - use the pointer format, which is no worse than you'd get with the
// old fmt.Sprintf("%#v", t) format.
//
// Of these, Location(loc.name) is the least disruptive. This is an edge
// case we hope not to hit too often.
buf = append(buf, `time.Location(`...)
buf = append(buf, quote(loc.name)...)
buf = append(buf, ')')
}
buf = append(buf, ')')
return string(buf)
}
// Format returns a textual representation of the time value formatted according
// to the layout defined by the argument. See the documentation for the
// constant called [Layout] to see how to represent the layout format.
//
// The executable example for [Time.Format] demonstrates the working
// of the layout string in detail and is a good reference.
func (t Time) Format(layout string) string {
const bufSize = 64
var b []byte
max := len(layout) + 10
if max < bufSize {
var buf [bufSize]byte
b = buf[:0]
} else {
b = make([]byte, 0, max)
}
b = t.AppendFormat(b, layout)
return string(b)
}
// AppendFormat is like [Time.Format] but appends the textual
// representation to b and returns the extended buffer.
func (t Time) AppendFormat(b []byte, layout string) []byte {
// Optimize for RFC3339 as it accounts for over half of all representations.
switch layout {
case RFC3339:
return t.appendFormatRFC3339(b, false)
case RFC3339Nano:
return t.appendFormatRFC3339(b, true)
default:
return t.appendFormat(b, layout)
}
}
func (t Time) appendFormat(b []byte, layout string) []byte {
name, offset, abs := t.locabs()
days := abs.days()
var (
year int = -1
month Month
day int
yday int = -1
hour int = -1
min int
sec int
)
// Each iteration generates one std value.
for layout != "" {
prefix, std, suffix := nextStdChunk(layout)
if prefix != "" {
b = append(b, prefix...)
}
if std == 0 {
break
}
layout = suffix
// Compute year, month, day if needed.
if year < 0 && std&stdNeedDate != 0 {
year, month, day = days.date()
}
if yday < 0 && std&stdNeedYday != 0 {
_, yday = days.yearYday()
}
// Compute hour, minute, second if needed.
if hour < 0 && std&stdNeedClock != 0 {
hour, min, sec = abs.clock()
}
switch std & stdMask {
case stdYear:
y := year
if y < 0 {
y = -y
}
b = appendInt(b, y%100, 2)
case stdLongYear:
b = appendInt(b, year, 4)
case stdMonth:
b = append(b, month.String()[:3]...)
case stdLongMonth:
m := month.String()
b = append(b, m...)
case stdNumMonth:
b = appendInt(b, int(month), 0)
case stdZeroMonth:
b = appendInt(b, int(month), 2)
case stdWeekDay:
b = append(b, days.weekday().String()[:3]...)
case stdLongWeekDay:
s := days.weekday().String()
b = append(b, s...)
case stdDay:
b = appendInt(b, day, 0)
case stdUnderDay:
if day < 10 {
b = append(b, ' ')
}
b = appendInt(b, day, 0)
case stdZeroDay:
b = appendInt(b, day, 2)
case stdUnderYearDay:
if yday < 100 {
b = append(b, ' ')
if yday < 10 {
b = append(b, ' ')
}
}
b = appendInt(b, yday, 0)
case stdZeroYearDay:
b = appendInt(b, yday, 3)
case stdHour:
b = appendInt(b, hour, 2)
case stdHour12:
// Noon is 12PM, midnight is 12AM.
hr := hour % 12
if hr == 0 {
hr = 12
}
b = appendInt(b, hr, 0)
case stdZeroHour12:
// Noon is 12PM, midnight is 12AM.
hr := hour % 12
if hr == 0 {
hr = 12
}
b = appendInt(b, hr, 2)
case stdMinute:
b = appendInt(b, min, 0)
case stdZeroMinute:
b = appendInt(b, min, 2)
case stdSecond:
b = appendInt(b, sec, 0)
case stdZeroSecond:
b = appendInt(b, sec, 2)
case stdPM:
if hour >= 12 {
b = append(b, "PM"...)
} else {
b = append(b, "AM"...)
}
case stdpm:
if hour >= 12 {
b = append(b, "pm"...)
} else {
b = append(b, "am"...)
}
case stdISO8601TZ, stdISO8601ColonTZ, stdISO8601SecondsTZ, stdISO8601ShortTZ, stdISO8601ColonSecondsTZ, stdNumTZ, stdNumColonTZ, stdNumSecondsTz, stdNumShortTZ, stdNumColonSecondsTZ:
// Ugly special case. We cheat and take the "Z" variants
// to mean "the time zone as formatted for ISO 8601".
if offset == 0 && (std == stdISO8601TZ || std == stdISO8601ColonTZ || std == stdISO8601SecondsTZ || std == stdISO8601ShortTZ || std == stdISO8601ColonSecondsTZ) {
b = append(b, 'Z')
break
}
zone := offset / 60 // convert to minutes
absoffset := offset
if zone < 0 {
b = append(b, '-')
zone = -zone
absoffset = -absoffset
} else {
b = append(b, '+')
}
b = appendInt(b, zone/60, 2)
if std == stdISO8601ColonTZ || std == stdNumColonTZ || std == stdISO8601ColonSecondsTZ || std == stdNumColonSecondsTZ {
b = append(b, ':')
}
if std != stdNumShortTZ && std != stdISO8601ShortTZ {
b = appendInt(b, zone%60, 2)
}
// append seconds if appropriate
if std == stdISO8601SecondsTZ || std == stdNumSecondsTz || std == stdNumColonSecondsTZ || std == stdISO8601ColonSecondsTZ {
if std == stdNumColonSecondsTZ || std == stdISO8601ColonSecondsTZ {
b = append(b, ':')
}
b = appendInt(b, absoffset%60, 2)
}
case stdTZ:
if name != "" {
b = append(b, name...)
break
}
// No time zone known for this time, but we must print one.
// Use the -0700 format.
zone := offset / 60 // convert to minutes
if zone < 0 {
b = append(b, '-')
zone = -zone
} else {
b = append(b, '+')
}
b = appendInt(b, zone/60, 2)
b = appendInt(b, zone%60, 2)
case stdFracSecond0, stdFracSecond9:
b = appendNano(b, t.Nanosecond(), std)
}
}
return b
}
var errBad = errors.New("bad value for field") // placeholder not passed to user
// ParseError describes a problem parsing a time string.
type ParseError struct {
Layout string
Value string
LayoutElem string
ValueElem string
Message string
}
// newParseError creates a new ParseError.
// The provided value and valueElem are cloned to avoid escaping their values.
func newParseError(layout, value, layoutElem, valueElem, message string) *ParseError {
valueCopy := stringslite.Clone(value)
valueElemCopy := stringslite.Clone(valueElem)
return &ParseError{layout, valueCopy, layoutElem, valueElemCopy, message}
}
// These are borrowed from unicode/utf8 and strconv and replicate behavior in
// that package, since we can't take a dependency on either.
const (
lowerhex = "0123456789abcdef"
runeSelf = 0x80
runeError = '\uFFFD'
)
func quote(s string) string {
buf := make([]byte, 1, len(s)+2) // slice will be at least len(s) + quotes
buf[0] = '"'
for i, c := range s {
if c >= runeSelf || c < ' ' {
// This means you are asking us to parse a time.Duration or
// time.Location with unprintable or non-ASCII characters in it.
// We don't expect to hit this case very often. We could try to
// reproduce strconv.Quote's behavior with full fidelity but
// given how rarely we expect to hit these edge cases, speed and
// conciseness are better.
var width int
if c == runeError {
width = 1
if i+2 < len(s) && s[i:i+3] == string(runeError) {
width = 3
}
} else {
width = len(string(c))
}
for j := 0; j < width; j++ {
buf = append(buf, `\x`...)
buf = append(buf, lowerhex[s[i+j]>>4])
buf = append(buf, lowerhex[s[i+j]&0xF])
}
} else {
if c == '"' || c == '\\' {
buf = append(buf, '\\')
}
buf = append(buf, byte(c))
}
}
buf = append(buf, '"')
return string(buf)
}
// Error returns the string representation of a ParseError.
func (e *ParseError) Error() string {
if e.Message == "" {
return "parsing time " +
quote(e.Value) + " as " +
quote(e.Layout) + ": cannot parse " +
quote(e.ValueElem) + " as " +
quote(e.LayoutElem)
}
return "parsing time " +
quote(e.Value) + e.Message
}
// isDigit reports whether s[i] is in range and is a decimal digit.
func isDigit[bytes []byte | string](s bytes, i int) bool {
if len(s) <= i {
return false
}
c := s[i]
return '0' <= c && c <= '9'
}
// getnum parses s[0:1] or s[0:2] (fixed forces s[0:2])
// as a decimal integer and returns the integer and the
// remainder of the string.
func getnum(s string, fixed bool) (int, string, error) {
if !isDigit(s, 0) {
return 0, s, errBad
}
if !isDigit(s, 1) {
if fixed {
return 0, s, errBad
}
return int(s[0] - '0'), s[1:], nil
}
return int(s[0]-'0')*10 + int(s[1]-'0'), s[2:], nil
}
// getnum3 parses s[0:1], s[0:2], or s[0:3] (fixed forces s[0:3])
// as a decimal integer and returns the integer and the remainder
// of the string.
func getnum3(s string, fixed bool) (int, string, error) {
var n, i int
for i = 0; i < 3 && isDigit(s, i); i++ {
n = n*10 + int(s[i]-'0')
}
if i == 0 || fixed && i != 3 {
return 0, s, errBad
}
return n, s[i:], nil
}
func cutspace(s string) string {
for len(s) > 0 && s[0] == ' ' {
s = s[1:]
}
return s
}
// skip removes the given prefix from value,
// treating runs of space characters as equivalent.
func skip(value, prefix string) (string, error) {
for len(prefix) > 0 {
if prefix[0] == ' ' {
if len(value) > 0 && value[0] != ' ' {
return value, errBad
}
prefix = cutspace(prefix)
value = cutspace(value)
continue
}
if len(value) == 0 || value[0] != prefix[0] {
return value, errBad
}
prefix = prefix[1:]
value = value[1:]
}
return value, nil
}
// Parse parses a formatted string and returns the time value it represents.
// See the documentation for the constant called [Layout] to see how to
// represent the format. The second argument must be parseable using
// the format string (layout) provided as the first argument.
//
// The example for [Time.Format] demonstrates the working of the layout string
// in detail and is a good reference.
//
// When parsing (only), the input may contain a fractional second
// field immediately after the seconds field, even if the layout does not
// signify its presence. In that case either a comma or a decimal point
// followed by a maximal series of digits is parsed as a fractional second.
// Fractional seconds are truncated to nanosecond precision.
//
// Elements omitted from the layout are assumed to be zero or, when
// zero is impossible, one, so parsing "3:04pm" returns the time
// corresponding to Jan 1, year 0, 15:04:00 UTC (note that because the year is
// 0, this time is before the zero Time).
// Years must be in the range 0000..9999. The day of the week is checked
// for syntax but it is otherwise ignored.
//
// For layouts specifying the two-digit year 06, a value NN >= 69 will be treated
// as 19NN and a value NN < 69 will be treated as 20NN.
//
// Timestamps representing leap seconds (second 60) cannot be parsed.
// These are not representable by [Time].
//
// The remainder of this comment describes the handling of time zones.
//
// In the absence of a time zone indicator, Parse returns a time in UTC.
//
// When parsing a time with a zone offset like -0700, if the offset corresponds
// to a time zone used by the current location ([Local]), then Parse uses that
// location and zone in the returned time. Otherwise it records the time as
// being in a fabricated location with time fixed at the given zone offset.
//
// When parsing a time with a zone abbreviation like MST, if the zone abbreviation
// has a defined offset in the current location, then that offset is used.
// The zone abbreviation "UTC" is recognized as UTC regardless of location.
// If the zone abbreviation is unknown, Parse records the time as being
// in a fabricated location with the given zone abbreviation and a zero offset.
// This choice means that such a time can be parsed and reformatted with the
// same layout losslessly, but the exact instant used in the representation will
// differ by the actual zone offset. To avoid such problems, prefer time layouts
// that use a numeric zone offset, or use [ParseInLocation].
func Parse(layout, value string) (Time, error) {
// Optimize for RFC3339 as it accounts for over half of all representations.
if layout == RFC3339 || layout == RFC3339Nano {
if t, ok := parseRFC3339(value, Local); ok {
return t, nil
}
}
return parse(layout, value, UTC, Local)
}
// ParseInLocation is like Parse but differs in two important ways.
// First, in the absence of time zone information, Parse interprets a time as UTC;
// ParseInLocation interprets the time as in the given location.
// Second, when given a zone offset or abbreviation, Parse tries to match it
// against the Local location; ParseInLocation uses the given location.
func ParseInLocation(layout, value string, loc *Location) (Time, error) {
// Optimize for RFC3339 as it accounts for over half of all representations.
if layout == RFC3339 || layout == RFC3339Nano {
if t, ok := parseRFC3339(value, loc); ok {
return t, nil
}
}
return parse(layout, value, loc, loc)
}
func parse(layout, value string, defaultLocation, local *Location) (Time, error) {
alayout, avalue := layout, value
rangeErrString := "" // set if a value is out of range
amSet := false // do we need to subtract 12 from the hour for midnight?
pmSet := false // do we need to add 12 to the hour?
// Time being constructed.
var (
year int
month int = -1
day int = -1
yday int = -1
hour int
min int
sec int
nsec int
z *Location
zoneOffset int = -1
zoneName string
)
// Each iteration processes one std value.
for {
var err error
prefix, std, suffix := nextStdChunk(layout)
stdstr := layout[len(prefix) : len(layout)-len(suffix)]
value, err = skip(value, prefix)
if err != nil {
return Time{}, newParseError(alayout, avalue, prefix, value, "")
}
if std == 0 {
if len(value) != 0 {
return Time{}, newParseError(alayout, avalue, "", value, ": extra text: "+quote(value))
}
break
}
layout = suffix
var p string
hold := value
switch std & stdMask {
case stdYear:
if len(value) < 2 {
err = errBad
break
}
p, value = value[0:2], value[2:]
year, err = atoi(p)
if err != nil {
break
}
if year >= 69 { // Unix time starts Dec 31 1969 in some time zones
year += 1900
} else {
year += 2000
}
case stdLongYear:
if len(value) < 4 || !isDigit(value, 0) {
err = errBad
break
}
p, value = value[0:4], value[4:]
year, err = atoi(p)
case stdMonth:
month, value, err = lookup(shortMonthNames, value)
month++
case stdLongMonth:
month, value, err = lookup(longMonthNames, value)
month++
case stdNumMonth, stdZeroMonth:
month, value, err = getnum(value, std == stdZeroMonth)
if err == nil && (month <= 0 || 12 < month) {
rangeErrString = "month"
}
case stdWeekDay:
// Ignore weekday except for error checking.
_, value, err = lookup(shortDayNames, value)
case stdLongWeekDay:
_, value, err = lookup(longDayNames, value)
case stdDay, stdUnderDay, stdZeroDay:
if std == stdUnderDay && len(value) > 0 && value[0] == ' ' {
value = value[1:]
}
day, value, err = getnum(value, std == stdZeroDay)
// Note that we allow any one- or two-digit day here.
// The month, day, year combination is validated after we've completed parsing.
case stdUnderYearDay, stdZeroYearDay:
for i := 0; i < 2; i++ {
if std == stdUnderYearDay && len(value) > 0 && value[0] == ' ' {
value = value[1:]
}
}
yday, value, err = getnum3(value, std == stdZeroYearDay)
// Note that we allow any one-, two-, or three-digit year-day here.
// The year-day, year combination is validated after we've completed parsing.
case stdHour:
hour, value, err = getnum(value, false)
if hour < 0 || 24 <= hour {
rangeErrString = "hour"
}
case stdHour12, stdZeroHour12:
hour, value, err = getnum(value, std == stdZeroHour12)
if hour < 0 || 12 < hour {
rangeErrString = "hour"
}
case stdMinute, stdZeroMinute:
min, value, err = getnum(value, std == stdZeroMinute)
if min < 0 || 60 <= min {
rangeErrString = "minute"
}
case stdSecond, stdZeroSecond:
sec, value, err = getnum(value, std == stdZeroSecond)
if err != nil {
break
}
if sec < 0 || 60 <= sec {
rangeErrString = "second"
break
}
// Special case: do we have a fractional second but no
// fractional second in the format?
if len(value) >= 2 && commaOrPeriod(value[0]) && isDigit(value, 1) {
_, std, _ = nextStdChunk(layout)
std &= stdMask
if std == stdFracSecond0 || std == stdFracSecond9 {
// Fractional second in the layout; proceed normally
break
}
// No fractional second in the layout but we have one in the input.
n := 2
for ; n < len(value) && isDigit(value, n); n++ {
}
nsec, rangeErrString, err = parseNanoseconds(value, n)
value = value[n:]
}
case stdPM:
if len(value) < 2 {
err = errBad
break
}
p, value = value[0:2], value[2:]
switch p {
case "PM":
pmSet = true
case "AM":
amSet = true
default:
err = errBad
}
case stdpm:
if len(value) < 2 {
err = errBad
break
}
p, value = value[0:2], value[2:]
switch p {
case "pm":
pmSet = true
case "am":
amSet = true
default:
err = errBad
}
case stdISO8601TZ, stdISO8601ShortTZ, stdISO8601ColonTZ, stdISO8601SecondsTZ, stdISO8601ColonSecondsTZ:
if len(value) >= 1 && value[0] == 'Z' {
value = value[1:]
z = UTC
break
}
fallthrough
case stdNumTZ, stdNumShortTZ, stdNumColonTZ, stdNumSecondsTz, stdNumColonSecondsTZ:
var sign, hour, min, seconds string
if std == stdISO8601ColonTZ || std == stdNumColonTZ {
if len(value) < 6 {
err = errBad
break
}
if value[3] != ':' {
err = errBad
break
}
sign, hour, min, seconds, value = value[0:1], value[1:3], value[4:6], "00", value[6:]
} else if std == stdNumShortTZ || std == stdISO8601ShortTZ {
if len(value) < 3 {
err = errBad
break
}
sign, hour, min, seconds, value = value[0:1], value[1:3], "00", "00", value[3:]
} else if std == stdISO8601ColonSecondsTZ || std == stdNumColonSecondsTZ {
if len(value) < 9 {
err = errBad
break
}
if value[3] != ':' || value[6] != ':' {
err = errBad
break
}
sign, hour, min, seconds, value = value[0:1], value[1:3], value[4:6], value[7:9], value[9:]
} else if std == stdISO8601SecondsTZ || std == stdNumSecondsTz {
if len(value) < 7 {
err = errBad
break
}
sign, hour, min, seconds, value = value[0:1], value[1:3], value[3:5], value[5:7], value[7:]
} else {
if len(value) < 5 {
err = errBad
break
}
sign, hour, min, seconds, value = value[0:1], value[1:3], value[3:5], "00", value[5:]
}
var hr, mm, ss int
hr, _, err = getnum(hour, true)
if err == nil {
mm, _, err = getnum(min, true)
if err == nil {
ss, _, err = getnum(seconds, true)
}
}
// The range test use > rather than >=,
// as some people do write offsets of 24 hours
// or 60 minutes or 60 seconds.
if hr > 24 {
rangeErrString = "time zone offset hour"
}
if mm > 60 {
rangeErrString = "time zone offset minute"
}
if ss > 60 {
rangeErrString = "time zone offset second"
}
zoneOffset = (hr*60+mm)*60 + ss // offset is in seconds
switch sign[0] {
case '+':
case '-':
zoneOffset = -zoneOffset
default:
err = errBad
}
case stdTZ:
// Does it look like a time zone?
if len(value) >= 3 && value[0:3] == "UTC" {
z = UTC
value = value[3:]
break
}
n, ok := parseTimeZone(value)
if !ok {
err = errBad
break
}
zoneName, value = value[:n], value[n:]
case stdFracSecond0:
// stdFracSecond0 requires the exact number of digits as specified in
// the layout.
ndigit := 1 + digitsLen(std)
if len(value) < ndigit {
err = errBad
break
}
nsec, rangeErrString, err = parseNanoseconds(value, ndigit)
value = value[ndigit:]
case stdFracSecond9:
if len(value) < 2 || !commaOrPeriod(value[0]) || value[1] < '0' || '9' < value[1] {
// Fractional second omitted.
break
}
// Take any number of digits, even more than asked for,
// because it is what the stdSecond case would do.
i := 0
for i+1 < len(value) && '0' <= value[i+1] && value[i+1] <= '9' {
i++
}
nsec, rangeErrString, err = parseNanoseconds(value, 1+i)
value = value[1+i:]
}
if rangeErrString != "" {
return Time{}, newParseError(alayout, avalue, stdstr, value, ": "+rangeErrString+" out of range")
}
if err != nil {
return Time{}, newParseError(alayout, avalue, stdstr, hold, "")
}
}
if pmSet && hour < 12 {
hour += 12
} else if amSet && hour == 12 {
hour = 0
}
// Convert yday to day, month.
if yday >= 0 {
var d int
var m int
if isLeap(year) {
if yday == 31+29 {
m = int(February)
d = 29
} else if yday > 31+29 {
yday--
}
}
if yday < 1 || yday > 365 {
return Time{}, newParseError(alayout, avalue, "", value, ": day-of-year out of range")
}
if m == 0 {
m = (yday-1)/31 + 1
if daysBefore(Month(m+1)) < yday {
m++
}
d = yday - daysBefore(Month(m))
}
// If month, day already seen, yday's m, d must match.
// Otherwise, set them from m, d.
if month >= 0 && month != m {
return Time{}, newParseError(alayout, avalue, "", value, ": day-of-year does not match month")
}
month = m
if day >= 0 && day != d {
return Time{}, newParseError(alayout, avalue, "", value, ": day-of-year does not match day")
}
day = d
} else {
if month < 0 {
month = int(January)
}
if day < 0 {
day = 1
}
}
// Validate the day of the month.
if day < 1 || day > daysIn(Month(month), year) {
return Time{}, newParseError(alayout, avalue, "", value, ": day out of range")
}
if z != nil {
return Date(year, Month(month), day, hour, min, sec, nsec, z), nil
}
if zoneOffset != -1 {
t := Date(year, Month(month), day, hour, min, sec, nsec, UTC)
t.addSec(-int64(zoneOffset))
// Look for local zone with the given offset.
// If that zone was in effect at the given time, use it.
name, offset, _, _, _ := local.lookup(t.unixSec())
if offset == zoneOffset && (zoneName == "" || name == zoneName) {
t.setLoc(local)
return t, nil
}
// Otherwise create fake zone to record offset.
zoneNameCopy := stringslite.Clone(zoneName) // avoid leaking the input value
t.setLoc(FixedZone(zoneNameCopy, zoneOffset))
return t, nil
}
if zoneName != "" {
t := Date(year, Month(month), day, hour, min, sec, nsec, UTC)
// Look for local zone with the given offset.
// If that zone was in effect at the given time, use it.
offset, ok := local.lookupName(zoneName, t.unixSec())
if ok {
t.addSec(-int64(offset))
t.setLoc(local)
return t, nil
}
// Otherwise, create fake zone with unknown offset.
if len(zoneName) > 3 && zoneName[:3] == "GMT" {
offset, _ = atoi(zoneName[3:]) // Guaranteed OK by parseGMT.
offset *= 3600
}
zoneNameCopy := stringslite.Clone(zoneName) // avoid leaking the input value
t.setLoc(FixedZone(zoneNameCopy, offset))
return t, nil
}
// Otherwise, fall back to default.
return Date(year, Month(month), day, hour, min, sec, nsec, defaultLocation), nil
}
// parseTimeZone parses a time zone string and returns its length. Time zones
// are human-generated and unpredictable. We can't do precise error checking.
// On the other hand, for a correct parse there must be a time zone at the
// beginning of the string, so it's almost always true that there's one
// there. We look at the beginning of the string for a run of upper-case letters.
// If there are more than 5, it's an error.
// If there are 4 or 5 and the last is a T, it's a time zone.
// If there are 3, it's a time zone.
// Otherwise, other than special cases, it's not a time zone.
// GMT is special because it can have an hour offset.
func parseTimeZone(value string) (length int, ok bool) {
if len(value) < 3 {
return 0, false
}
// Special case 1: ChST and MeST are the only zones with a lower-case letter.
if len(value) >= 4 && (value[:4] == "ChST" || value[:4] == "MeST") {
return 4, true
}
// Special case 2: GMT may have an hour offset; treat it specially.
if value[:3] == "GMT" {
length = parseGMT(value)
return length, true
}
// Special Case 3: Some time zones are not named, but have +/-00 format
if value[0] == '+' || value[0] == '-' {
length = parseSignedOffset(value)
ok := length > 0 // parseSignedOffset returns 0 in case of bad input
return length, ok
}
// How many upper-case letters are there? Need at least three, at most five.
var nUpper int
for nUpper = 0; nUpper < 6; nUpper++ {
if nUpper >= len(value) {
break
}
if c := value[nUpper]; c < 'A' || 'Z' < c {
break
}
}
switch nUpper {
case 0, 1, 2, 6:
return 0, false
case 5: // Must end in T to match.
if value[4] == 'T' {
return 5, true
}
case 4:
// Must end in T, except one special case.
if value[3] == 'T' || value[:4] == "WITA" {
return 4, true
}
case 3:
return 3, true
}
return 0, false
}
// parseGMT parses a GMT time zone. The input string is known to start "GMT".
// The function checks whether that is followed by a sign and a number in the
// range -23 through +23 excluding zero.
func parseGMT(value string) int {
value = value[3:]
if len(value) == 0 {
return 3
}
return 3 + parseSignedOffset(value)
}
// parseSignedOffset parses a signed timezone offset (e.g. "+03" or "-04").
// The function checks for a signed number in the range -23 through +23 excluding zero.
// Returns length of the found offset string or 0 otherwise.
func parseSignedOffset(value string) int {
sign := value[0]
if sign != '-' && sign != '+' {
return 0
}
x, rem, err := leadingInt(value[1:])
// fail if nothing consumed by leadingInt
if err != nil || value[1:] == rem {
return 0
}
if x > 23 {
return 0
}
return len(value) - len(rem)
}
func commaOrPeriod(b byte) bool {
return b == '.' || b == ','
}
func parseNanoseconds[bytes []byte | string](value bytes, nbytes int) (ns int, rangeErrString string, err error) {
if !commaOrPeriod(value[0]) {
err = errBad
return
}
if nbytes > 10 {
value = value[:10]
nbytes = 10
}
if ns, err = atoi(value[1:nbytes]); err != nil {
return
}
if ns < 0 {
rangeErrString = "fractional second"
return
}
// We need nanoseconds, which means scaling by the number
// of missing digits in the format, maximum length 10.
scaleDigits := 10 - nbytes
for i := 0; i < scaleDigits; i++ {
ns *= 10
}
return
}
var errLeadingInt = errors.New("time: bad [0-9]*") // never printed
// leadingInt consumes the leading [0-9]* from s.
func leadingInt[bytes []byte | string](s bytes) (x uint64, rem bytes, err error) {
i := 0
for ; i < len(s); i++ {
c := s[i]
if c < '0' || c > '9' {
break
}
if x > 1<<63/10 {
// overflow
return 0, rem, errLeadingInt
}
x = x*10 + uint64(c) - '0'
if x > 1<<63 {
// overflow
return 0, rem, errLeadingInt
}
}
return x, s[i:], nil
}
// leadingFraction consumes the leading [0-9]* from s.
// It is used only for fractions, so does not return an error on overflow,
// it just stops accumulating precision.
func leadingFraction(s string) (x uint64, scale float64, rem string) {
i := 0
scale = 1
overflow := false
for ; i < len(s); i++ {
c := s[i]
if c < '0' || c > '9' {
break
}
if overflow {
continue
}
if x > (1<<63-1)/10 {
// It's possible for overflow to give a positive number, so take care.
overflow = true
continue
}
y := x*10 + uint64(c) - '0'
if y > 1<<63 {
overflow = true
continue
}
x = y
scale *= 10
}
return x, scale, s[i:]
}
// parseDurationError describes a problem parsing a duration string.
type parseDurationError struct {
message string
value string
}
func (e *parseDurationError) Error() string {
return "time: " + e.message + " " + quote(e.value)
}
var unitMap = map[string]uint64{
"ns": uint64(Nanosecond),
"us": uint64(Microsecond),
"µs": uint64(Microsecond), // U+00B5 = micro symbol
"μs": uint64(Microsecond), // U+03BC = Greek letter mu
"ms": uint64(Millisecond),
"s": uint64(Second),
"m": uint64(Minute),
"h": uint64(Hour),
}
// ParseDuration parses a duration string.
// A duration string is a possibly signed sequence of
// decimal numbers, each with optional fraction and a unit suffix,
// such as "300ms", "-1.5h" or "2h45m".
// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
func ParseDuration(s string) (Duration, error) {
// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
orig := s
var d uint64
neg := false
// Consume [-+]?
if s != "" {
c := s[0]
if c == '-' || c == '+' {
neg = c == '-'
s = s[1:]
}
}
// Special case: if all that is left is "0", this is zero.
if s == "0" {
return 0, nil
}
if s == "" {
return 0, &parseDurationError{"invalid duration", orig}
}
for s != "" {
var (
v, f uint64 // integers before, after decimal point
scale float64 = 1 // value = v + f/scale
)
var err error
// The next character must be [0-9.]
if !(s[0] == '.' || '0' <= s[0] && s[0] <= '9') {
return 0, &parseDurationError{"invalid duration", orig}
}
// Consume [0-9]*
pl := len(s)
v, s, err = leadingInt(s)
if err != nil {
return 0, &parseDurationError{"invalid duration", orig}
}
pre := pl != len(s) // whether we consumed anything before a period
// Consume (\.[0-9]*)?
post := false
if s != "" && s[0] == '.' {
s = s[1:]
pl := len(s)
f, scale, s = leadingFraction(s)
post = pl != len(s)
}
if !pre && !post {
// no digits (e.g. ".s" or "-.s")
return 0, &parseDurationError{"invalid duration", orig}
}
// Consume unit.
i := 0
for ; i < len(s); i++ {
c := s[i]
if c == '.' || '0' <= c && c <= '9' {
break
}
}
if i == 0 {
return 0, &parseDurationError{"missing unit in duration", orig}
}
u := s[:i]
s = s[i:]
unit, ok := unitMap[u]
if !ok {
return 0, &parseDurationError{"unknown unit " + quote(u) + " in duration", orig}
}
if v > 1<<63/unit {
// overflow
return 0, &parseDurationError{"invalid duration", orig}
}
v *= unit
if f > 0 {
// float64 is needed to be nanosecond accurate for fractions of hours.
// v >= 0 && (f*unit/scale) <= 3.6e+12 (ns/h, h is the largest unit)
v += uint64(float64(f) * (float64(unit) / scale))
if v > 1<<63 {
// overflow
return 0, &parseDurationError{"invalid duration", orig}
}
}
d += v
if d > 1<<63 {
return 0, &parseDurationError{"invalid duration", orig}
}
}
if neg {
return -Duration(d), nil
}
if d > 1<<63-1 {
return 0, &parseDurationError{"invalid duration", orig}
}
return Duration(d), nil
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package time
import "errors"
// RFC 3339 is the most commonly used format.
//
// It is implicitly used by the Time.(Marshal|Unmarshal)(Text|JSON) methods.
// Also, according to analysis on https://go.dev/issue/52746,
// RFC 3339 accounts for 57% of all explicitly specified time formats,
// with the second most popular format only being used 8% of the time.
// The overwhelming use of RFC 3339 compared to all other formats justifies
// the addition of logic to optimize formatting and parsing.
func (t Time) appendFormatRFC3339(b []byte, nanos bool) []byte {
_, offset, abs := t.locabs()
// Format date.
year, month, day := abs.days().date()
b = appendInt(b, year, 4)
b = append(b, '-')
b = appendInt(b, int(month), 2)
b = append(b, '-')
b = appendInt(b, day, 2)
b = append(b, 'T')
// Format time.
hour, min, sec := abs.clock()
b = appendInt(b, hour, 2)
b = append(b, ':')
b = appendInt(b, min, 2)
b = append(b, ':')
b = appendInt(b, sec, 2)
if nanos {
std := stdFracSecond(stdFracSecond9, 9, '.')
b = appendNano(b, t.Nanosecond(), std)
}
if offset == 0 {
return append(b, 'Z')
}
// Format zone.
zone := offset / 60 // convert to minutes
if zone < 0 {
b = append(b, '-')
zone = -zone
} else {
b = append(b, '+')
}
b = appendInt(b, zone/60, 2)
b = append(b, ':')
b = appendInt(b, zone%60, 2)
return b
}
func (t Time) appendStrictRFC3339(b []byte) ([]byte, error) {
n0 := len(b)
b = t.appendFormatRFC3339(b, true)
// Not all valid Go timestamps can be serialized as valid RFC 3339.
// Explicitly check for these edge cases.
// See https://go.dev/issue/4556 and https://go.dev/issue/54580.
num2 := func(b []byte) byte { return 10*(b[0]-'0') + (b[1] - '0') }
switch {
case b[n0+len("9999")] != '-': // year must be exactly 4 digits wide
return b, errors.New("year outside of range [0,9999]")
case b[len(b)-1] != 'Z':
c := b[len(b)-len("Z07:00")]
if ('0' <= c && c <= '9') || num2(b[len(b)-len("07:00"):]) >= 24 {
return b, errors.New("timezone hour outside of range [0,23]")
}
}
return b, nil
}
func parseRFC3339[bytes []byte | string](s bytes, local *Location) (Time, bool) {
// parseUint parses s as an unsigned decimal integer and
// verifies that it is within some range.
// If it is invalid or out-of-range,
// it sets ok to false and returns the min value.
ok := true
parseUint := func(s bytes, min, max int) (x int) {
for _, c := range []byte(s) {
if c < '0' || '9' < c {
ok = false
return min
}
x = x*10 + int(c) - '0'
}
if x < min || max < x {
ok = false
return min
}
return x
}
// Parse the date and time.
if len(s) < len("2006-01-02T15:04:05") {
return Time{}, false
}
year := parseUint(s[0:4], 0, 9999) // e.g., 2006
month := parseUint(s[5:7], 1, 12) // e.g., 01
day := parseUint(s[8:10], 1, daysIn(Month(month), year)) // e.g., 02
hour := parseUint(s[11:13], 0, 23) // e.g., 15
min := parseUint(s[14:16], 0, 59) // e.g., 04
sec := parseUint(s[17:19], 0, 59) // e.g., 05
if !ok || !(s[4] == '-' && s[7] == '-' && s[10] == 'T' && s[13] == ':' && s[16] == ':') {
return Time{}, false
}
s = s[19:]
// Parse the fractional second.
var nsec int
if len(s) >= 2 && s[0] == '.' && isDigit(s, 1) {
n := 2
for ; n < len(s) && isDigit(s, n); n++ {
}
nsec, _, _ = parseNanoseconds(s, n)
s = s[n:]
}
// Parse the time zone.
t := Date(year, Month(month), day, hour, min, sec, nsec, UTC)
if len(s) != 1 || s[0] != 'Z' {
if len(s) != len("-07:00") {
return Time{}, false
}
hr := parseUint(s[1:3], 0, 23) // e.g., 07
mm := parseUint(s[4:6], 0, 59) // e.g., 00
if !ok || !((s[0] == '-' || s[0] == '+') && s[3] == ':') {
return Time{}, false
}
zoneOffset := (hr*60 + mm) * 60
if s[0] == '-' {
zoneOffset *= -1
}
t.addSec(-int64(zoneOffset))
// Use local zone with the given offset if possible.
if _, offset, _, _, _ := local.lookup(t.unixSec()); offset == zoneOffset {
t.setLoc(local)
} else {
t.setLoc(FixedZone("", zoneOffset))
}
}
return t, true
}
func parseStrictRFC3339(b []byte) (Time, error) {
t, ok := parseRFC3339(b, Local)
if !ok {
t, err := Parse(RFC3339, string(b))
if err != nil {
return Time{}, err
}
// The parse template syntax cannot correctly validate RFC 3339.
// Explicitly check for cases that Parse is unable to validate for.
// See https://go.dev/issue/54580.
num2 := func(b []byte) byte { return 10*(b[0]-'0') + (b[1] - '0') }
switch {
// TODO(https://go.dev/issue/54580): Strict parsing is disabled for now.
// Enable this again with a GODEBUG opt-out.
case true:
return t, nil
case b[len("2006-01-02T")+1] == ':': // hour must be two digits
return Time{}, &ParseError{RFC3339, string(b), "15", string(b[len("2006-01-02T"):][:1]), ""}
case b[len("2006-01-02T15:04:05")] == ',': // sub-second separator must be a period
return Time{}, &ParseError{RFC3339, string(b), ".", ",", ""}
case b[len(b)-1] != 'Z':
switch {
case num2(b[len(b)-len("07:00"):]) >= 24: // timezone hour must be in range
return Time{}, &ParseError{RFC3339, string(b), "Z07:00", string(b[len(b)-len("Z07:00"):]), ": timezone hour out of range"}
case num2(b[len(b)-len("00"):]) >= 60: // timezone minute must be in range
return Time{}, &ParseError{RFC3339, string(b), "Z07:00", string(b[len(b)-len("Z07:00"):]), ": timezone minute out of range"}
}
default: // unknown error; should not occur
return Time{}, &ParseError{RFC3339, string(b), RFC3339, string(b), ""}
}
}
return t, nil
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package time
import (
"internal/godebug"
"unsafe"
)
// Sleep pauses the current goroutine for at least the duration d.
// A negative or zero duration causes Sleep to return immediately.
func Sleep(d Duration)
var asynctimerchan = godebug.New("asynctimerchan")
// syncTimer returns c as an unsafe.Pointer, for passing to newTimer.
// If the GODEBUG asynctimerchan has disabled the async timer chan
// code, then syncTimer always returns nil, to disable the special
// channel code paths in the runtime.
func syncTimer(c chan Time) unsafe.Pointer {
// If asynctimerchan=1, we don't even tell the runtime
// about channel timers, so that we get the pre-Go 1.23 code paths.
if asynctimerchan.Value() == "1" {
asynctimerchan.IncNonDefault()
return nil
}
// Otherwise pass to runtime.
// This handles asynctimerchan=0, which is the default Go 1.23 behavior,
// as well as asynctimerchan=2, which is like asynctimerchan=1
// but implemented entirely by the runtime.
// The only reason to use asynctimerchan=2 is for debugging
// a problem fixed by asynctimerchan=1: it enables the new
// GC-able timer channels (#61542) but not the sync channels (#37196).
//
// If we decide to roll back the sync channels, we will still have
// a fully tested async runtime implementation (asynctimerchan=2)
// and can make this function always return c.
//
// If we decide to keep the sync channels, we can delete all the
// handling of asynctimerchan in the runtime and keep just this
// function to handle asynctimerchan=1.
return *(*unsafe.Pointer)(unsafe.Pointer(&c))
}
// when is a helper function for setting the 'when' field of a runtimeTimer.
// It returns what the time will be, in nanoseconds, Duration d in the future.
// If d is negative, it is ignored. If the returned value would be less than
// zero because of an overflow, MaxInt64 is returned.
func when(d Duration) int64 {
if d <= 0 {
return runtimeNano()
}
t := runtimeNano() + int64(d)
if t < 0 {
// N.B. runtimeNano() and d are always positive, so addition
// (including overflow) will never result in t == 0.
t = 1<<63 - 1 // math.MaxInt64
}
return t
}
// These functions are pushed to package time from package runtime.
// The arg cp is a chan Time, but the declaration in runtime uses a pointer,
// so we use a pointer here too. This keeps some tools that aggressively
// compare linknamed symbol definitions happier.
//
//go:linkname newTimer
func newTimer(when, period int64, f func(any, uintptr, int64), arg any, cp unsafe.Pointer) *Timer
//go:linkname stopTimer
func stopTimer(*Timer) bool
//go:linkname resetTimer
func resetTimer(t *Timer, when, period int64) bool
// Note: The runtime knows the layout of struct Timer, since newTimer allocates it.
// The runtime also knows that Ticker and Timer have the same layout.
// There are extra fields after the channel, reserved for the runtime
// and inaccessible to users.
// The Timer type represents a single event.
// When the Timer expires, the current time will be sent on C,
// unless the Timer was created by [AfterFunc].
// A Timer must be created with [NewTimer] or AfterFunc.
type Timer struct {
C <-chan Time
initTimer bool
}
// Stop prevents the [Timer] from firing.
// It returns true if the call stops the timer, false if the timer has already
// expired or been stopped.
//
// For a func-based timer created with [AfterFunc](d, f),
// if t.Stop returns false, then the timer has already expired
// and the function f has been started in its own goroutine;
// Stop does not wait for f to complete before returning.
// If the caller needs to know whether f is completed,
// it must coordinate with f explicitly.
//
// For a chan-based timer created with NewTimer(d), as of Go 1.23,
// any receive from t.C after Stop has returned is guaranteed to block
// rather than receive a stale time value from before the Stop;
// if the program has not received from t.C already and the timer is
// running, Stop is guaranteed to return true.
// Before Go 1.23, the only safe way to use Stop was insert an extra
// <-t.C if Stop returned false to drain a potential stale value.
// See the [NewTimer] documentation for more details.
func (t *Timer) Stop() bool {
if !t.initTimer {
panic("time: Stop called on uninitialized Timer")
}
return stopTimer(t)
}
// NewTimer creates a new Timer that will send
// the current time on its channel after at least duration d.
//
// Before Go 1.23, the garbage collector did not recover
// timers that had not yet expired or been stopped, so code often
// immediately deferred t.Stop after calling NewTimer, to make
// the timer recoverable when it was no longer needed.
// As of Go 1.23, the garbage collector can recover unreferenced
// timers, even if they haven't expired or been stopped.
// The Stop method is no longer necessary to help the garbage collector.
// (Code may of course still want to call Stop to stop the timer for other reasons.)
//
// Before Go 1.23, the channel associated with a Timer was
// asynchronous (buffered, capacity 1), which meant that
// stale time values could be received even after [Timer.Stop]
// or [Timer.Reset] returned.
// As of Go 1.23, the channel is synchronous (unbuffered, capacity 0),
// eliminating the possibility of those stale values.
//
// The GODEBUG setting asynctimerchan=1 restores both pre-Go 1.23
// behaviors: when set, unexpired timers won't be garbage collected, and
// channels will have buffered capacity. This setting may be removed
// in Go 1.27 or later.
func NewTimer(d Duration) *Timer {
c := make(chan Time, 1)
t := newTimer(when(d), 0, sendTime, c, syncTimer(c))
t.C = c
return t
}
// Reset changes the timer to expire after duration d.
// It returns true if the timer had been active, false if the timer had
// expired or been stopped.
//
// For a func-based timer created with [AfterFunc](d, f), Reset either reschedules
// when f will run, in which case Reset returns true, or schedules f
// to run again, in which case it returns false.
// When Reset returns false, Reset neither waits for the prior f to
// complete before returning nor does it guarantee that the subsequent
// goroutine running f does not run concurrently with the prior
// one. If the caller needs to know whether the prior execution of
// f is completed, it must coordinate with f explicitly.
//
// For a chan-based timer created with NewTimer, as of Go 1.23,
// any receive from t.C after Reset has returned is guaranteed not
// to receive a time value corresponding to the previous timer settings;
// if the program has not received from t.C already and the timer is
// running, Reset is guaranteed to return true.
// Before Go 1.23, the only safe way to use Reset was to call [Timer.Stop]
// and explicitly drain the timer first.
// See the [NewTimer] documentation for more details.
func (t *Timer) Reset(d Duration) bool {
if !t.initTimer {
panic("time: Reset called on uninitialized Timer")
}
w := when(d)
return resetTimer(t, w, 0)
}
// sendTime does a non-blocking send of the current time on c.
func sendTime(c any, seq uintptr, delta int64) {
// delta is how long ago the channel send was supposed to happen.
// The current time can be arbitrarily far into the future, because the runtime
// can delay a sendTime call until a goroutine tries to receive from
// the channel. Subtract delta to go back to the old time that we
// used to send.
select {
case c.(chan Time) <- Now().Add(Duration(-delta)):
default:
}
}
// After waits for the duration to elapse and then sends the current time
// on the returned channel.
// It is equivalent to [NewTimer](d).C.
//
// Before Go 1.23, this documentation warned that the underlying
// [Timer] would not be recovered by the garbage collector until the
// timer fired, and that if efficiency was a concern, code should use
// NewTimer instead and call [Timer.Stop] if the timer is no longer needed.
// As of Go 1.23, the garbage collector can recover unreferenced,
// unstopped timers. There is no reason to prefer NewTimer when After will do.
func After(d Duration) <-chan Time {
return NewTimer(d).C
}
// AfterFunc waits for the duration to elapse and then calls f
// in its own goroutine. It returns a [Timer] that can
// be used to cancel the call using its Stop method.
// The returned Timer's C field is not used and will be nil.
func AfterFunc(d Duration, f func()) *Timer {
return newTimer(when(d), 0, goFunc, f, nil)
}
func goFunc(arg any, seq uintptr, delta int64) {
go arg.(func())()
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix || (js && wasm) || wasip1
package time
import (
"errors"
"runtime"
"syscall"
)
// for testing: whatever interrupts a sleep
func interrupt() {
// There is no mechanism in wasi to interrupt the call to poll_oneoff
// used to implement runtime.usleep so this function does nothing, which
// somewhat defeats the purpose of TestSleep but we are still better off
// validating that time elapses when the process calls time.Sleep than
// skipping the test altogether.
if runtime.GOOS != "wasip1" {
syscall.Kill(syscall.Getpid(), syscall.SIGCHLD)
}
}
func open(name string) (uintptr, error) {
fd, err := syscall.Open(name, syscall.O_RDONLY, 0)
if err != nil {
return 0, err
}
return uintptr(fd), nil
}
func read(fd uintptr, buf []byte) (int, error) {
return syscall.Read(int(fd), buf)
}
func closefd(fd uintptr) {
syscall.Close(int(fd))
}
func preadn(fd uintptr, buf []byte, off int) error {
whence := seekStart
if off < 0 {
whence = seekEnd
}
if _, err := syscall.Seek(int(fd), int64(off), whence); err != nil {
return err
}
for len(buf) > 0 {
m, err := syscall.Read(int(fd), buf)
if m <= 0 {
if err == nil {
return errors.New("short read")
}
return err
}
buf = buf[m:]
}
return nil
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package time
import "unsafe"
// Note: The runtime knows the layout of struct Ticker, since newTimer allocates it.
// Note also that Ticker and Timer have the same layout, so that newTimer can handle both.
// The initTimer and initTicker fields are named differently so that
// users cannot convert between the two without unsafe.
// A Ticker holds a channel that delivers “ticks” of a clock
// at intervals.
type Ticker struct {
C <-chan Time // The channel on which the ticks are delivered.
initTicker bool
}
// NewTicker returns a new [Ticker] containing a channel that will send
// the current time on the channel after each tick. The period of the
// ticks is specified by the duration argument. The ticker will adjust
// the time interval or drop ticks to make up for slow receivers.
// The duration d must be greater than zero; if not, NewTicker will
// panic.
//
// Before Go 1.23, the garbage collector did not recover
// tickers that had not yet expired or been stopped, so code often
// immediately deferred t.Stop after calling NewTicker, to make
// the ticker recoverable when it was no longer needed.
// As of Go 1.23, the garbage collector can recover unreferenced
// tickers, even if they haven't been stopped.
// The Stop method is no longer necessary to help the garbage collector.
// (Code may of course still want to call Stop to stop the ticker for other reasons.)
func NewTicker(d Duration) *Ticker {
if d <= 0 {
panic("non-positive interval for NewTicker")
}
// Give the channel a 1-element time buffer.
// If the client falls behind while reading, we drop ticks
// on the floor until the client catches up.
c := make(chan Time, 1)
t := (*Ticker)(unsafe.Pointer(newTimer(when(d), int64(d), sendTime, c, syncTimer(c))))
t.C = c
return t
}
// Stop turns off a ticker. After Stop, no more ticks will be sent.
// Stop does not close the channel, to permit calling [Ticker.Reset],
// and to prevent a concurrent goroutine reading from the channel
// from seeing an erroneous "tick".
func (t *Ticker) Stop() {
if !t.initTicker {
// This is misuse, and the same for time.Timer would panic,
// but this didn't always panic, and we keep it not panicking
// to avoid breaking old programs. See issue 21874.
return
}
stopTimer((*Timer)(unsafe.Pointer(t)))
}
// Reset stops a ticker and resets its period to the specified duration.
// The next tick will arrive after the new period elapses. The duration d
// must be greater than zero; if not, Reset will panic.
func (t *Ticker) Reset(d Duration) {
if d <= 0 {
panic("non-positive interval for Ticker.Reset")
}
if !t.initTicker {
panic("time: Reset called on uninitialized Ticker")
}
resetTimer((*Timer)(unsafe.Pointer(t)), when(d), int64(d))
}
// Tick is a convenience wrapper for [NewTicker] providing access to the ticking
// channel only. Unlike NewTicker, Tick will return nil if d <= 0.
//
// Before Go 1.23, this documentation warned that the underlying
// [Ticker] would never be recovered by the garbage collector, and that
// if efficiency was a concern, code should use NewTicker instead and
// call [Ticker.Stop] when the ticker is no longer needed.
// As of Go 1.23, the garbage collector can recover unreferenced
// tickers, even if they haven't been stopped.
// The Stop method is no longer necessary to help the garbage collector.
// There is no longer any reason to prefer NewTicker when Tick will do.
func Tick(d Duration) <-chan Time {
if d <= 0 {
return nil
}
return NewTicker(d).C
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package time provides functionality for measuring and displaying time.
//
// The calendrical calculations always assume a Gregorian calendar, with
// no leap seconds.
//
// # Monotonic Clocks
//
// Operating systems provide both a “wall clock,” which is subject to
// changes for clock synchronization, and a “monotonic clock,” which is
// not. The general rule is that the wall clock is for telling time and
// the monotonic clock is for measuring time. Rather than split the API,
// in this package the Time returned by [time.Now] contains both a wall
// clock reading and a monotonic clock reading; later time-telling
// operations use the wall clock reading, but later time-measuring
// operations, specifically comparisons and subtractions, use the
// monotonic clock reading.
//
// For example, this code always computes a positive elapsed time of
// approximately 20 milliseconds, even if the wall clock is changed during
// the operation being timed:
//
// start := time.Now()
// ... operation that takes 20 milliseconds ...
// t := time.Now()
// elapsed := t.Sub(start)
//
// Other idioms, such as [time.Since](start), [time.Until](deadline), and
// time.Now().Before(deadline), are similarly robust against wall clock
// resets.
//
// The rest of this section gives the precise details of how operations
// use monotonic clocks, but understanding those details is not required
// to use this package.
//
// The Time returned by time.Now contains a monotonic clock reading.
// If Time t has a monotonic clock reading, t.Add adds the same duration to
// both the wall clock and monotonic clock readings to compute the result.
// Because t.AddDate(y, m, d), t.Round(d), and t.Truncate(d) are wall time
// computations, they always strip any monotonic clock reading from their results.
// Because t.In, t.Local, and t.UTC are used for their effect on the interpretation
// of the wall time, they also strip any monotonic clock reading from their results.
// The canonical way to strip a monotonic clock reading is to use t = t.Round(0).
//
// If Times t and u both contain monotonic clock readings, the operations
// t.After(u), t.Before(u), t.Equal(u), t.Compare(u), and t.Sub(u) are carried out
// using the monotonic clock readings alone, ignoring the wall clock
// readings. If either t or u contains no monotonic clock reading, these
// operations fall back to using the wall clock readings.
//
// On some systems the monotonic clock will stop if the computer goes to sleep.
// On such a system, t.Sub(u) may not accurately reflect the actual
// time that passed between t and u. The same applies to other functions and
// methods that subtract times, such as [Since], [Until], [Time.Before], [Time.After],
// [Time.Add], [Time.Equal] and [Time.Compare]. In some cases, you may need to strip
// the monotonic clock to get accurate results.
//
// Because the monotonic clock reading has no meaning outside
// the current process, the serialized forms generated by t.GobEncode,
// t.MarshalBinary, t.MarshalJSON, and t.MarshalText omit the monotonic
// clock reading, and t.Format provides no format for it. Similarly, the
// constructors [time.Date], [time.Parse], [time.ParseInLocation], and [time.Unix],
// as well as the unmarshalers t.GobDecode, t.UnmarshalBinary.
// t.UnmarshalJSON, and t.UnmarshalText always create times with
// no monotonic clock reading.
//
// The monotonic clock reading exists only in [Time] values. It is not
// a part of [Duration] values or the Unix times returned by t.Unix and
// friends.
//
// Note that the Go == operator compares not just the time instant but
// also the [Location] and the monotonic clock reading. See the
// documentation for the Time type for a discussion of equality
// testing for Time values.
//
// For debugging, the result of t.String does include the monotonic
// clock reading if present. If t != u because of different monotonic clock readings,
// that difference will be visible when printing t.String() and u.String().
//
// # Timer Resolution
//
// [Timer] resolution varies depending on the Go runtime, the operating system
// and the underlying hardware.
// On Unix, the resolution is ~1ms.
// On Windows version 1803 and newer, the resolution is ~0.5ms.
// On older Windows versions, the default resolution is ~16ms, but
// a higher resolution may be requested using [golang.org/x/sys/windows.TimeBeginPeriod].
package time
import (
"errors"
"math/bits"
_ "unsafe" // for go:linkname
)
// A Time represents an instant in time with nanosecond precision.
//
// Programs using times should typically store and pass them as values,
// not pointers. That is, time variables and struct fields should be of
// type [time.Time], not *time.Time.
//
// A Time value can be used by multiple goroutines simultaneously except
// that the methods [Time.GobDecode], [Time.UnmarshalBinary], [Time.UnmarshalJSON] and
// [Time.UnmarshalText] are not concurrency-safe.
//
// Time instants can be compared using the [Time.Before], [Time.After], and [Time.Equal] methods.
// The [Time.Sub] method subtracts two instants, producing a [Duration].
// The [Time.Add] method adds a Time and a Duration, producing a Time.
//
// The zero value of type Time is January 1, year 1, 00:00:00.000000000 UTC.
// As this time is unlikely to come up in practice, the [Time.IsZero] method gives
// a simple way of detecting a time that has not been initialized explicitly.
//
// Each time has an associated [Location]. The methods [Time.Local], [Time.UTC], and Time.In return a
// Time with a specific Location. Changing the Location of a Time value with
// these methods does not change the actual instant it represents, only the time
// zone in which to interpret it.
//
// Representations of a Time value saved by the [Time.GobEncode], [Time.MarshalBinary], [Time.AppendBinary],
// [Time.MarshalJSON], [Time.MarshalText] and [Time.AppendText] methods store the [Time.Location]'s offset,
// but not the location name. They therefore lose information about Daylight Saving Time.
//
// In addition to the required “wall clock” reading, a Time may contain an optional
// reading of the current process's monotonic clock, to provide additional precision
// for comparison or subtraction.
// See the “Monotonic Clocks” section in the package documentation for details.
//
// Note that the Go == operator compares not just the time instant but also the
// Location and the monotonic clock reading. Therefore, Time values should not
// be used as map or database keys without first guaranteeing that the
// identical Location has been set for all values, which can be achieved
// through use of the UTC or Local method, and that the monotonic clock reading
// has been stripped by setting t = t.Round(0). In general, prefer t.Equal(u)
// to t == u, since t.Equal uses the most accurate comparison available and
// correctly handles the case when only one of its arguments has a monotonic
// clock reading.
type Time struct {
// wall and ext encode the wall time seconds, wall time nanoseconds,
// and optional monotonic clock reading in nanoseconds.
//
// From high to low bit position, wall encodes a 1-bit flag (hasMonotonic),
// a 33-bit seconds field, and a 30-bit wall time nanoseconds field.
// The nanoseconds field is in the range [0, 999999999].
// If the hasMonotonic bit is 0, then the 33-bit field must be zero
// and the full signed 64-bit wall seconds since Jan 1 year 1 is stored in ext.
// If the hasMonotonic bit is 1, then the 33-bit field holds a 33-bit
// unsigned wall seconds since Jan 1 year 1885, and ext holds a
// signed 64-bit monotonic clock reading, nanoseconds since process start.
wall uint64
ext int64
// loc specifies the Location that should be used to
// determine the minute, hour, month, day, and year
// that correspond to this Time.
// The nil location means UTC.
// All UTC times are represented with loc==nil, never loc==&utcLoc.
loc *Location
}
const (
hasMonotonic = 1 << 63
maxWall = wallToInternal + (1<<33 - 1) // year 2157
minWall = wallToInternal // year 1885
nsecMask = 1<<30 - 1
nsecShift = 30
)
// These helpers for manipulating the wall and monotonic clock readings
// take pointer receivers, even when they don't modify the time,
// to make them cheaper to call.
// nsec returns the time's nanoseconds.
func (t *Time) nsec() int32 {
return int32(t.wall & nsecMask)
}
// sec returns the time's seconds since Jan 1 year 1.
func (t *Time) sec() int64 {
if t.wall&hasMonotonic != 0 {
return wallToInternal + int64(t.wall<<1>>(nsecShift+1))
}
return t.ext
}
// unixSec returns the time's seconds since Jan 1 1970 (Unix time).
func (t *Time) unixSec() int64 { return t.sec() + internalToUnix }
// addSec adds d seconds to the time.
func (t *Time) addSec(d int64) {
if t.wall&hasMonotonic != 0 {
sec := int64(t.wall << 1 >> (nsecShift + 1))
dsec := sec + d
if 0 <= dsec && dsec <= 1<<33-1 {
t.wall = t.wall&nsecMask | uint64(dsec)<<nsecShift | hasMonotonic
return
}
// Wall second now out of range for packed field.
// Move to ext.
t.stripMono()
}
// Check if the sum of t.ext and d overflows and handle it properly.
sum := t.ext + d
if (sum > t.ext) == (d > 0) {
t.ext = sum
} else if d > 0 {
t.ext = 1<<63 - 1
} else {
t.ext = -(1<<63 - 1)
}
}
// setLoc sets the location associated with the time.
func (t *Time) setLoc(loc *Location) {
if loc == &utcLoc {
loc = nil
}
t.stripMono()
t.loc = loc
}
// stripMono strips the monotonic clock reading in t.
func (t *Time) stripMono() {
if t.wall&hasMonotonic != 0 {
t.ext = t.sec()
t.wall &= nsecMask
}
}
// setMono sets the monotonic clock reading in t.
// If t cannot hold a monotonic clock reading,
// because its wall time is too large,
// setMono is a no-op.
func (t *Time) setMono(m int64) {
if t.wall&hasMonotonic == 0 {
sec := t.ext
if sec < minWall || maxWall < sec {
return
}
t.wall |= hasMonotonic | uint64(sec-minWall)<<nsecShift
}
t.ext = m
}
// mono returns t's monotonic clock reading.
// It returns 0 for a missing reading.
// This function is used only for testing,
// so it's OK that technically 0 is a valid
// monotonic clock reading as well.
func (t *Time) mono() int64 {
if t.wall&hasMonotonic == 0 {
return 0
}
return t.ext
}
// IsZero reports whether t represents the zero time instant,
// January 1, year 1, 00:00:00 UTC.
func (t Time) IsZero() bool {
// If hasMonotonic is set in t.wall, then the time can't be before 1885, so it can't be the year 1.
// If hasMonotonic is zero, then all the bits in wall other than the nanoseconds field should be 0.
// So if there are no nanoseconds then t.wall == 0, and if there are no seconds then t.ext == 0.
// This is equivalent to t.sec() == 0 && t.nsec() == 0, but is more efficient.
return t.wall == 0 && t.ext == 0
}
// After reports whether the time instant t is after u.
func (t Time) After(u Time) bool {
if t.wall&u.wall&hasMonotonic != 0 {
return t.ext > u.ext
}
ts := t.sec()
us := u.sec()
return ts > us || ts == us && t.nsec() > u.nsec()
}
// Before reports whether the time instant t is before u.
func (t Time) Before(u Time) bool {
if t.wall&u.wall&hasMonotonic != 0 {
return t.ext < u.ext
}
ts := t.sec()
us := u.sec()
return ts < us || ts == us && t.nsec() < u.nsec()
}
// Compare compares the time instant t with u. If t is before u, it returns -1;
// if t is after u, it returns +1; if they're the same, it returns 0.
func (t Time) Compare(u Time) int {
var tc, uc int64
if t.wall&u.wall&hasMonotonic != 0 {
tc, uc = t.ext, u.ext
} else {
tc, uc = t.sec(), u.sec()
if tc == uc {
tc, uc = int64(t.nsec()), int64(u.nsec())
}
}
switch {
case tc < uc:
return -1
case tc > uc:
return +1
}
return 0
}
// Equal reports whether t and u represent the same time instant.
// Two times can be equal even if they are in different locations.
// For example, 6:00 +0200 and 4:00 UTC are Equal.
// See the documentation on the Time type for the pitfalls of using == with
// Time values; most code should use Equal instead.
func (t Time) Equal(u Time) bool {
if t.wall&u.wall&hasMonotonic != 0 {
return t.ext == u.ext
}
return t.sec() == u.sec() && t.nsec() == u.nsec()
}
// A Month specifies a month of the year (January = 1, ...).
type Month int
const (
January Month = 1 + iota
February
March
April
May
June
July
August
September
October
November
December
)
// String returns the English name of the month ("January", "February", ...).
func (m Month) String() string {
if January <= m && m <= December {
return longMonthNames[m-1]
}
buf := make([]byte, 20)
n := fmtInt(buf, uint64(m))
return "%!Month(" + string(buf[n:]) + ")"
}
// A Weekday specifies a day of the week (Sunday = 0, ...).
type Weekday int
const (
Sunday Weekday = iota
Monday
Tuesday
Wednesday
Thursday
Friday
Saturday
)
// String returns the English name of the day ("Sunday", "Monday", ...).
func (d Weekday) String() string {
if Sunday <= d && d <= Saturday {
return longDayNames[d]
}
buf := make([]byte, 20)
n := fmtInt(buf, uint64(d))
return "%!Weekday(" + string(buf[n:]) + ")"
}
// Computations on Times
//
// The zero value for a Time is defined to be
// January 1, year 1, 00:00:00.000000000 UTC
// which (1) looks like a zero, or as close as you can get in a date
// (1-1-1 00:00:00 UTC), (2) is unlikely enough to arise in practice to
// be a suitable "not set" sentinel, unlike Jan 1 1970, and (3) has a
// non-negative year even in time zones west of UTC, unlike 1-1-0
// 00:00:00 UTC, which would be 12-31-(-1) 19:00:00 in New York.
//
// The zero Time value does not force a specific epoch for the time
// representation. For example, to use the Unix epoch internally, we
// could define that to distinguish a zero value from Jan 1 1970, that
// time would be represented by sec=-1, nsec=1e9. However, it does
// suggest a representation, namely using 1-1-1 00:00:00 UTC as the
// epoch, and that's what we do.
//
// The Add and Sub computations are oblivious to the choice of epoch.
//
// The presentation computations - year, month, minute, and so on - all
// rely heavily on division and modulus by positive constants. For
// calendrical calculations we want these divisions to round down, even
// for negative values, so that the remainder is always positive, but
// Go's division (like most hardware division instructions) rounds to
// zero. We can still do those computations and then adjust the result
// for a negative numerator, but it's annoying to write the adjustment
// over and over. Instead, we can change to a different epoch so long
// ago that all the times we care about will be positive, and then round
// to zero and round down coincide. These presentation routines already
// have to add the zone offset, so adding the translation to the
// alternate epoch is cheap. For example, having a non-negative time t
// means that we can write
//
// sec = t % 60
//
// instead of
//
// sec = t % 60
// if sec < 0 {
// sec += 60
// }
//
// everywhere.
//
// The calendar runs on an exact 400 year cycle: a 400-year calendar
// printed for 1970-2369 will apply as well to 2370-2769. Even the days
// of the week match up. It simplifies date computations to choose the
// cycle boundaries so that the exceptional years are always delayed as
// long as possible: March 1, year 0 is such a day:
// the first leap day (Feb 29) is four years minus one day away,
// the first multiple-of-4 year without a Feb 29 is 100 years minus one day away,
// and the first multiple-of-100 year with a Feb 29 is 400 years minus one day away.
// March 1 year Y for any Y = 0 mod 400 is also such a day.
//
// Finally, it's convenient if the delta between the Unix epoch and
// long-ago epoch is representable by an int64 constant.
//
// These three considerations—choose an epoch as early as possible, that
// starts on March 1 of a year equal to 0 mod 400, and that is no more than
// 2⁶³ seconds earlier than 1970—bring us to the year -292277022400.
// We refer to this moment as the absolute zero instant, and to times
// measured as a uint64 seconds since this year as absolute times.
//
// Times measured as an int64 seconds since the year 1—the representation
// used for Time's sec field—are called internal times.
//
// Times measured as an int64 seconds since the year 1970 are called Unix
// times.
//
// It is tempting to just use the year 1 as the absolute epoch, defining
// that the routines are only valid for years >= 1. However, the
// routines would then be invalid when displaying the epoch in time zones
// west of UTC, since it is year 0. It doesn't seem tenable to say that
// printing the zero time correctly isn't supported in half the time
// zones. By comparison, it's reasonable to mishandle some times in
// the year -292277022400.
//
// All this is opaque to clients of the API and can be changed if a
// better implementation presents itself.
//
// The date calculations are implemented using the following clever math from
// Cassio Neri and Lorenz Schneider, “Euclidean affine functions and their
// application to calendar algorithms,” SP&E 2023. https://doi.org/10.1002/spe.3172
//
// Define a “calendrical division” (f, f°, f*) to be a triple of functions converting
// one time unit into a whole number of larger units and the remainder and back.
// For example, in a calendar with no leap years, (d/365, d%365, y*365) is the
// calendrical division for days into years:
//
// (f) year := days/365
// (f°) yday := days%365
// (f*) days := year*365 (+ yday)
//
// Note that f* is usually the “easy” function to write: it's the
// calendrical multiplication that inverts the more complex division.
//
// Neri and Schneider prove that when f* takes the form
//
// f*(n) = (a n + b) / c
//
// using integer division rounding down with a ≥ c > 0,
// which they call a Euclidean affine function or EAF, then:
//
// f(n) = (c n + c - b - 1) / a
// f°(n) = (c n + c - b - 1) % a / c
//
// This gives a fairly direct calculation for any calendrical division for which
// we can write the calendrical multiplication in EAF form.
// Because the epoch has been shifted to March 1, all the calendrical
// multiplications turn out to be possible to write in EAF form.
// When a date is broken into [century, cyear, amonth, mday],
// with century, cyear, and mday 0-based,
// and amonth 3-based (March = 3, ..., January = 13, February = 14),
// the calendrical multiplications written in EAF form are:
//
// yday = (153 (amonth-3) + 2) / 5 = (153 amonth - 457) / 5
// cday = 365 cyear + cyear/4 = 1461 cyear / 4
// centurydays = 36524 century + century/4 = 146097 century / 4
// days = centurydays + cday + yday + mday.
//
// We can only handle one periodic cycle per equation, so the year
// calculation must be split into [century, cyear], handling both the
// 100-year cycle and the 400-year cycle.
//
// The yday calculation is not obvious but derives from the fact
// that the March through January calendar repeats the 5-month
// 153-day cycle 31, 30, 31, 30, 31 (we don't care about February
// because yday only ever count the days _before_ February 1,
// since February is the last month).
//
// Using the rule for deriving f and f° from f*, these multiplications
// convert to these divisions:
//
// century := (4 days + 3) / 146097
// cdays := (4 days + 3) % 146097 / 4
// cyear := (4 cdays + 3) / 1461
// ayday := (4 cdays + 3) % 1461 / 4
// amonth := (5 ayday + 461) / 153
// mday := (5 ayday + 461) % 153 / 5
//
// The a in ayday and amonth stands for absolute (March 1-based)
// to distinguish from the standard yday (January 1-based).
//
// After computing these, we can translate from the March 1 calendar
// to the standard January 1 calendar with branch-free math assuming a
// branch-free conversion from bool to int 0 or 1, denoted int(b) here:
//
// isJanFeb := int(yday >= marchThruDecember)
// month := amonth - isJanFeb*12
// year := century*100 + cyear + isJanFeb
// isLeap := int(cyear%4 == 0) & (int(cyear != 0) | int(century%4 == 0))
// day := 1 + mday
// yday := 1 + ayday + 31 + 28 + isLeap&^isJanFeb - 365*isJanFeb
//
// isLeap is the standard leap-year rule, but the split year form
// makes the divisions all reduce to binary masking.
// Note that day and yday are 1-based, in contrast to mday and ayday.
// To keep the various units separate, we define integer types
// for each. These are never stored in interfaces nor allocated,
// so their type information does not appear in Go binaries.
const (
secondsPerMinute = 60
secondsPerHour = 60 * secondsPerMinute
secondsPerDay = 24 * secondsPerHour
secondsPerWeek = 7 * secondsPerDay
daysPer400Years = 365*400 + 97
// Days from March 1 through end of year
marchThruDecember = 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31
// absoluteYears is the number of years we subtract from internal time to get absolute time.
// This value must be 0 mod 400, and it defines the “absolute zero instant”
// mentioned in the “Computations on Times” comment above: March 1, -absoluteYears.
// Dates before the absolute epoch will not compute correctly,
// but otherwise the value can be changed as needed.
absoluteYears = 292277022400
// The year of the zero Time.
// Assumed by the unixToInternal computation below.
internalYear = 1
// Offsets to convert between internal and absolute or Unix times.
absoluteToInternal int64 = -(absoluteYears*365.2425 + marchThruDecember) * secondsPerDay
internalToAbsolute = -absoluteToInternal
unixToInternal int64 = (1969*365 + 1969/4 - 1969/100 + 1969/400) * secondsPerDay
internalToUnix int64 = -unixToInternal
absoluteToUnix = absoluteToInternal + internalToUnix
unixToAbsolute = unixToInternal + internalToAbsolute
wallToInternal int64 = (1884*365 + 1884/4 - 1884/100 + 1884/400) * secondsPerDay
)
// An absSeconds counts the number of seconds since the absolute zero instant.
type absSeconds uint64
// An absDays counts the number of days since the absolute zero instant.
type absDays uint64
// An absCentury counts the number of centuries since the absolute zero instant.
type absCentury uint64
// An absCyear counts the number of years since the start of a century.
type absCyear int
// An absYday counts the number of days since the start of a year.
// Note that absolute years start on March 1.
type absYday int
// An absMonth counts the number of months since the start of a year.
// absMonth=0 denotes March.
type absMonth int
// An absLeap is a single bit (0 or 1) denoting whether a given year is a leap year.
type absLeap int
// An absJanFeb is a single bit (0 or 1) denoting whether a given day falls in January or February.
// That is a special case because the absolute years start in March (unlike normal calendar years).
type absJanFeb int
// dateToAbsDays takes a standard year/month/day and returns the
// number of days from the absolute epoch to that day.
// The days argument can be out of range and in particular can be negative.
func dateToAbsDays(year int64, month Month, day int) absDays {
// See “Computations on Times” comment above.
amonth := uint32(month)
janFeb := uint32(0)
if amonth < 3 {
janFeb = 1
}
amonth += 12 * janFeb
y := uint64(year) - uint64(janFeb) + absoluteYears
// For amonth is in the range [3,14], we want:
//
// ayday := (153*amonth - 457) / 5
//
// (See the “Computations on Times” comment above
// as well as Neri and Schneider, section 7.)
//
// That is equivalent to:
//
// ayday := (979*amonth - 2919) >> 5
//
// and the latter form uses a couple fewer instructions,
// so use it, saving a few cycles.
// See Neri and Schneider, section 8.3
// for more about this optimization.
//
// (Note that there is no saved division, because the compiler
// implements / 5 without division in all cases.)
ayday := (979*amonth - 2919) >> 5
century := y / 100
cyear := uint32(y % 100)
cday := 1461 * cyear / 4
centurydays := 146097 * century / 4
return absDays(centurydays + uint64(int64(cday+ayday)+int64(day)-1))
}
// days converts absolute seconds to absolute days.
func (abs absSeconds) days() absDays {
return absDays(abs / secondsPerDay)
}
// split splits days into century, cyear, ayday.
func (days absDays) split() (century absCentury, cyear absCyear, ayday absYday) {
// See “Computations on Times” comment above.
d := 4*uint64(days) + 3
century = absCentury(d / 146097)
// This should be
// cday := uint32(d % 146097) / 4
// cd := 4*cday + 3
// which is to say
// cday := uint32(d % 146097) >> 2
// cd := cday<<2 + 3
// but of course (x>>2<<2)+3 == x|3,
// so do that instead.
cd := uint32(d%146097) | 3
// For cdays in the range [0,146097] (100 years), we want:
//
// cyear := (4 cdays + 3) / 1461
// yday := (4 cdays + 3) % 1461 / 4
//
// (See the “Computations on Times” comment above
// as well as Neri and Schneider, section 7.)
//
// That is equivalent to:
//
// cyear := (2939745 cdays) >> 32
// yday := (2939745 cdays) & 0xFFFFFFFF / 2939745 / 4
//
// so do that instead, saving a few cycles.
// See Neri and Schneider, section 8.3
// for more about this optimization.
hi, lo := bits.Mul32(2939745, cd)
cyear = absCyear(hi)
ayday = absYday(lo / 2939745 / 4)
return
}
// split splits ayday into absolute month and standard (1-based) day-in-month.
func (ayday absYday) split() (m absMonth, mday int) {
// See “Computations on Times” comment above.
//
// For yday in the range [0,366],
//
// amonth := (5 yday + 461) / 153
// mday := (5 yday + 461) % 153 / 5
//
// is equivalent to:
//
// amonth = (2141 yday + 197913) >> 16
// mday = (2141 yday + 197913) & 0xFFFF / 2141
//
// so do that instead, saving a few cycles.
// See Neri and Schneider, section 8.3.
d := 2141*uint32(ayday) + 197913
return absMonth(d >> 16), 1 + int((d&0xFFFF)/2141)
}
// janFeb returns 1 if the March 1-based ayday is in January or February, 0 otherwise.
func (ayday absYday) janFeb() absJanFeb {
// See “Computations on Times” comment above.
jf := absJanFeb(0)
if ayday >= marchThruDecember {
jf = 1
}
return jf
}
// month returns the standard Month for (m, janFeb)
func (m absMonth) month(janFeb absJanFeb) Month {
// See “Computations on Times” comment above.
return Month(m) - Month(janFeb)*12
}
// leap returns 1 if (century, cyear) is a leap year, 0 otherwise.
func (century absCentury) leap(cyear absCyear) absLeap {
// See “Computations on Times” comment above.
y4ok := 0
if cyear%4 == 0 {
y4ok = 1
}
y100ok := 0
if cyear != 0 {
y100ok = 1
}
y400ok := 0
if century%4 == 0 {
y400ok = 1
}
return absLeap(y4ok & (y100ok | y400ok))
}
// year returns the standard year for (century, cyear, janFeb).
func (century absCentury) year(cyear absCyear, janFeb absJanFeb) int {
// See “Computations on Times” comment above.
return int(uint64(century)*100-absoluteYears) + int(cyear) + int(janFeb)
}
// yday returns the standard 1-based yday for (ayday, janFeb, leap).
func (ayday absYday) yday(janFeb absJanFeb, leap absLeap) int {
// See “Computations on Times” comment above.
return int(ayday) + (1 + 31 + 28) + int(leap)&^int(janFeb) - 365*int(janFeb)
}
// date converts days into standard year, month, day.
func (days absDays) date() (year int, month Month, day int) {
century, cyear, ayday := days.split()
amonth, day := ayday.split()
janFeb := ayday.janFeb()
year = century.year(cyear, janFeb)
month = amonth.month(janFeb)
return
}
// yearYday converts days into the standard year and 1-based yday.
func (days absDays) yearYday() (year, yday int) {
century, cyear, ayday := days.split()
janFeb := ayday.janFeb()
year = century.year(cyear, janFeb)
yday = ayday.yday(janFeb, century.leap(cyear))
return
}
// absSec returns the time t as an absolute seconds, adjusted by the zone offset.
// It is called when computing a presentation property like Month or Hour.
// We'd rather call it abs, but there are linknames to abs that make that problematic.
// See timeAbs below.
func (t Time) absSec() absSeconds {
l := t.loc
// Avoid function calls when possible.
if l == nil || l == &localLoc {
l = l.get()
}
sec := t.unixSec()
if l != &utcLoc {
if l.cacheZone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
sec += int64(l.cacheZone.offset)
} else {
_, offset, _, _, _ := l.lookup(sec)
sec += int64(offset)
}
}
return absSeconds(sec + (unixToInternal + internalToAbsolute))
}
// locabs is a combination of the Zone and abs methods,
// extracting both return values from a single zone lookup.
func (t Time) locabs() (name string, offset int, abs absSeconds) {
l := t.loc
if l == nil || l == &localLoc {
l = l.get()
}
// Avoid function call if we hit the local time cache.
sec := t.unixSec()
if l != &utcLoc {
if l.cacheZone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
name = l.cacheZone.name
offset = l.cacheZone.offset
} else {
name, offset, _, _, _ = l.lookup(sec)
}
sec += int64(offset)
} else {
name = "UTC"
}
abs = absSeconds(sec + (unixToInternal + internalToAbsolute))
return
}
// Date returns the year, month, and day in which t occurs.
func (t Time) Date() (year int, month Month, day int) {
return t.absSec().days().date()
}
// Year returns the year in which t occurs.
func (t Time) Year() int {
century, cyear, ayday := t.absSec().days().split()
janFeb := ayday.janFeb()
return century.year(cyear, janFeb)
}
// Month returns the month of the year specified by t.
func (t Time) Month() Month {
_, _, ayday := t.absSec().days().split()
amonth, _ := ayday.split()
return amonth.month(ayday.janFeb())
}
// Day returns the day of the month specified by t.
func (t Time) Day() int {
_, _, ayday := t.absSec().days().split()
_, day := ayday.split()
return day
}
// Weekday returns the day of the week specified by t.
func (t Time) Weekday() Weekday {
return t.absSec().days().weekday()
}
// weekday returns the day of the week specified by days.
func (days absDays) weekday() Weekday {
// March 1 of the absolute year, like March 1 of 2000, was a Wednesday.
return Weekday((uint64(days) + uint64(Wednesday)) % 7)
}
// ISOWeek returns the ISO 8601 year and week number in which t occurs.
// Week ranges from 1 to 53. Jan 01 to Jan 03 of year n might belong to
// week 52 or 53 of year n-1, and Dec 29 to Dec 31 might belong to week 1
// of year n+1.
func (t Time) ISOWeek() (year, week int) {
// According to the rule that the first calendar week of a calendar year is
// the week including the first Thursday of that year, and that the last one is
// the week immediately preceding the first calendar week of the next calendar year.
// See https://www.iso.org/obp/ui#iso:std:iso:8601:-1:ed-1:v1:en:term:3.1.1.23 for details.
// weeks start with Monday
// Monday Tuesday Wednesday Thursday Friday Saturday Sunday
// 1 2 3 4 5 6 7
// +3 +2 +1 0 -1 -2 -3
// the offset to Thursday
days := t.absSec().days()
thu := days + absDays(Thursday-((days-1).weekday()+1))
year, yday := thu.yearYday()
return year, (yday-1)/7 + 1
}
// Clock returns the hour, minute, and second within the day specified by t.
func (t Time) Clock() (hour, min, sec int) {
return t.absSec().clock()
}
// clock returns the hour, minute, and second within the day specified by abs.
func (abs absSeconds) clock() (hour, min, sec int) {
sec = int(abs % secondsPerDay)
hour = sec / secondsPerHour
sec -= hour * secondsPerHour
min = sec / secondsPerMinute
sec -= min * secondsPerMinute
return
}
// Hour returns the hour within the day specified by t, in the range [0, 23].
func (t Time) Hour() int {
return int(t.absSec()%secondsPerDay) / secondsPerHour
}
// Minute returns the minute offset within the hour specified by t, in the range [0, 59].
func (t Time) Minute() int {
return int(t.absSec()%secondsPerHour) / secondsPerMinute
}
// Second returns the second offset within the minute specified by t, in the range [0, 59].
func (t Time) Second() int {
return int(t.absSec() % secondsPerMinute)
}
// Nanosecond returns the nanosecond offset within the second specified by t,
// in the range [0, 999999999].
func (t Time) Nanosecond() int {
return int(t.nsec())
}
// YearDay returns the day of the year specified by t, in the range [1,365] for non-leap years,
// and [1,366] in leap years.
func (t Time) YearDay() int {
_, yday := t.absSec().days().yearYday()
return yday
}
// A Duration represents the elapsed time between two instants
// as an int64 nanosecond count. The representation limits the
// largest representable duration to approximately 290 years.
type Duration int64
const (
minDuration Duration = -1 << 63
maxDuration Duration = 1<<63 - 1
)
// Common durations. There is no definition for units of Day or larger
// to avoid confusion across daylight savings time zone transitions.
//
// To count the number of units in a [Duration], divide:
//
// second := time.Second
// fmt.Print(int64(second/time.Millisecond)) // prints 1000
//
// To convert an integer number of units to a Duration, multiply:
//
// seconds := 10
// fmt.Print(time.Duration(seconds)*time.Second) // prints 10s
const (
Nanosecond Duration = 1
Microsecond = 1000 * Nanosecond
Millisecond = 1000 * Microsecond
Second = 1000 * Millisecond
Minute = 60 * Second
Hour = 60 * Minute
)
// String returns a string representing the duration in the form "72h3m0.5s".
// Leading zero units are omitted. As a special case, durations less than one
// second format use a smaller unit (milli-, micro-, or nanoseconds) to ensure
// that the leading digit is non-zero. The zero duration formats as 0s.
func (d Duration) String() string {
// This is inlinable to take advantage of "function outlining".
// Thus, the caller can decide whether a string must be heap allocated.
var arr [32]byte
n := d.format(&arr)
return string(arr[n:])
}
// format formats the representation of d into the end of buf and
// returns the offset of the first character.
func (d Duration) format(buf *[32]byte) int {
// Largest time is 2540400h10m10.000000000s
w := len(buf)
u := uint64(d)
neg := d < 0
if neg {
u = -u
}
if u < uint64(Second) {
// Special case: if duration is smaller than a second,
// use smaller units, like 1.2ms
var prec int
w--
buf[w] = 's'
w--
switch {
case u == 0:
buf[w] = '0'
return w
case u < uint64(Microsecond):
// print nanoseconds
prec = 0
buf[w] = 'n'
case u < uint64(Millisecond):
// print microseconds
prec = 3
// U+00B5 'µ' micro sign == 0xC2 0xB5
w-- // Need room for two bytes.
copy(buf[w:], "µ")
default:
// print milliseconds
prec = 6
buf[w] = 'm'
}
w, u = fmtFrac(buf[:w], u, prec)
w = fmtInt(buf[:w], u)
} else {
w--
buf[w] = 's'
w, u = fmtFrac(buf[:w], u, 9)
// u is now integer seconds
w = fmtInt(buf[:w], u%60)
u /= 60
// u is now integer minutes
if u > 0 {
w--
buf[w] = 'm'
w = fmtInt(buf[:w], u%60)
u /= 60
// u is now integer hours
// Stop at hours because days can be different lengths.
if u > 0 {
w--
buf[w] = 'h'
w = fmtInt(buf[:w], u)
}
}
}
if neg {
w--
buf[w] = '-'
}
return w
}
// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the
// tail of buf, omitting trailing zeros. It omits the decimal
// point too when the fraction is 0. It returns the index where the
// output bytes begin and the value v/10**prec.
func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) {
// Omit trailing zeros up to and including decimal point.
w := len(buf)
print := false
for i := 0; i < prec; i++ {
digit := v % 10
print = print || digit != 0
if print {
w--
buf[w] = byte(digit) + '0'
}
v /= 10
}
if print {
w--
buf[w] = '.'
}
return w, v
}
// fmtInt formats v into the tail of buf.
// It returns the index where the output begins.
func fmtInt(buf []byte, v uint64) int {
w := len(buf)
if v == 0 {
w--
buf[w] = '0'
} else {
for v > 0 {
w--
buf[w] = byte(v%10) + '0'
v /= 10
}
}
return w
}
// Nanoseconds returns the duration as an integer nanosecond count.
func (d Duration) Nanoseconds() int64 { return int64(d) }
// Microseconds returns the duration as an integer microsecond count.
func (d Duration) Microseconds() int64 { return int64(d) / 1e3 }
// Milliseconds returns the duration as an integer millisecond count.
func (d Duration) Milliseconds() int64 { return int64(d) / 1e6 }
// These methods return float64 because the dominant
// use case is for printing a floating point number like 1.5s, and
// a truncation to integer would make them not useful in those cases.
// Splitting the integer and fraction ourselves guarantees that
// converting the returned float64 to an integer rounds the same
// way that a pure integer conversion would have, even in cases
// where, say, float64(d.Nanoseconds())/1e9 would have rounded
// differently.
// Seconds returns the duration as a floating point number of seconds.
func (d Duration) Seconds() float64 {
sec := d / Second
nsec := d % Second
return float64(sec) + float64(nsec)/1e9
}
// Minutes returns the duration as a floating point number of minutes.
func (d Duration) Minutes() float64 {
min := d / Minute
nsec := d % Minute
return float64(min) + float64(nsec)/(60*1e9)
}
// Hours returns the duration as a floating point number of hours.
func (d Duration) Hours() float64 {
hour := d / Hour
nsec := d % Hour
return float64(hour) + float64(nsec)/(60*60*1e9)
}
// Truncate returns the result of rounding d toward zero to a multiple of m.
// If m <= 0, Truncate returns d unchanged.
func (d Duration) Truncate(m Duration) Duration {
if m <= 0 {
return d
}
return d - d%m
}
// lessThanHalf reports whether x+x < y but avoids overflow,
// assuming x and y are both positive (Duration is signed).
func lessThanHalf(x, y Duration) bool {
return uint64(x)+uint64(x) < uint64(y)
}
// Round returns the result of rounding d to the nearest multiple of m.
// The rounding behavior for halfway values is to round away from zero.
// If the result exceeds the maximum (or minimum)
// value that can be stored in a [Duration],
// Round returns the maximum (or minimum) duration.
// If m <= 0, Round returns d unchanged.
func (d Duration) Round(m Duration) Duration {
if m <= 0 {
return d
}
r := d % m
if d < 0 {
r = -r
if lessThanHalf(r, m) {
return d + r
}
if d1 := d - m + r; d1 < d {
return d1
}
return minDuration // overflow
}
if lessThanHalf(r, m) {
return d - r
}
if d1 := d + m - r; d1 > d {
return d1
}
return maxDuration // overflow
}
// Abs returns the absolute value of d.
// As a special case, Duration([math.MinInt64]) is converted to Duration([math.MaxInt64]),
// reducing its magnitude by 1 nanosecond.
func (d Duration) Abs() Duration {
switch {
case d >= 0:
return d
case d == minDuration:
return maxDuration
default:
return -d
}
}
// Add returns the time t+d.
func (t Time) Add(d Duration) Time {
dsec := int64(d / 1e9)
nsec := t.nsec() + int32(d%1e9)
if nsec >= 1e9 {
dsec++
nsec -= 1e9
} else if nsec < 0 {
dsec--
nsec += 1e9
}
t.wall = t.wall&^nsecMask | uint64(nsec) // update nsec
t.addSec(dsec)
if t.wall&hasMonotonic != 0 {
te := t.ext + int64(d)
if d < 0 && te > t.ext || d > 0 && te < t.ext {
// Monotonic clock reading now out of range; degrade to wall-only.
t.stripMono()
} else {
t.ext = te
}
}
return t
}
// Sub returns the duration t-u. If the result exceeds the maximum (or minimum)
// value that can be stored in a [Duration], the maximum (or minimum) duration
// will be returned.
// To compute t-d for a duration d, use t.Add(-d).
func (t Time) Sub(u Time) Duration {
if t.wall&u.wall&hasMonotonic != 0 {
return subMono(t.ext, u.ext)
}
d := Duration(t.sec()-u.sec())*Second + Duration(t.nsec()-u.nsec())
// Check for overflow or underflow.
switch {
case u.Add(d).Equal(t):
return d // d is correct
case t.Before(u):
return minDuration // t - u is negative out of range
default:
return maxDuration // t - u is positive out of range
}
}
func subMono(t, u int64) Duration {
d := Duration(t - u)
if d < 0 && t > u {
return maxDuration // t - u is positive out of range
}
if d > 0 && t < u {
return minDuration // t - u is negative out of range
}
return d
}
// Since returns the time elapsed since t.
// It is shorthand for time.Now().Sub(t).
func Since(t Time) Duration {
if t.wall&hasMonotonic != 0 && !runtimeIsBubbled() {
// Common case optimization: if t has monotonic time, then Sub will use only it.
return subMono(runtimeNano()-startNano, t.ext)
}
return Now().Sub(t)
}
// Until returns the duration until t.
// It is shorthand for t.Sub(time.Now()).
func Until(t Time) Duration {
if t.wall&hasMonotonic != 0 && !runtimeIsBubbled() {
// Common case optimization: if t has monotonic time, then Sub will use only it.
return subMono(t.ext, runtimeNano()-startNano)
}
return t.Sub(Now())
}
// AddDate returns the time corresponding to adding the
// given number of years, months, and days to t.
// For example, AddDate(-1, 2, 3) applied to January 1, 2011
// returns March 4, 2010.
//
// Note that dates are fundamentally coupled to timezones, and calendrical
// periods like days don't have fixed durations. AddDate uses the Location of
// the Time value to determine these durations. That means that the same
// AddDate arguments can produce a different shift in absolute time depending on
// the base Time value and its Location. For example, AddDate(0, 0, 1) applied
// to 12:00 on March 27 always returns 12:00 on March 28. At some locations and
// in some years this is a 24 hour shift. In others it's a 23 hour shift due to
// daylight savings time transitions.
//
// AddDate normalizes its result in the same way that Date does,
// so, for example, adding one month to October 31 yields
// December 1, the normalized form for November 31.
func (t Time) AddDate(years int, months int, days int) Time {
year, month, day := t.Date()
hour, min, sec := t.Clock()
return Date(year+years, month+Month(months), day+days, hour, min, sec, int(t.nsec()), t.Location())
}
// daysBefore returns the number of days in a non-leap year before month m.
// daysBefore(December+1) returns 365.
func daysBefore(m Month) int {
adj := 0
if m >= March {
adj = -2
}
// With the -2 adjustment after February,
// we need to compute the running sum of:
// 0 31 30 31 30 31 30 31 31 30 31 30 31
// which is:
// 0 31 61 92 122 153 183 214 245 275 306 336 367
// This is almost exactly 367/12×(m-1) except for the
// occasional off-by-one suggesting there may be an
// integer approximation of the form (a×m + b)/c.
// A brute force search over small a, b, c finds that
// (214×m - 211) / 7 computes the function perfectly.
return (214*int(m)-211)/7 + adj
}
func daysIn(m Month, year int) int {
if m == February {
if isLeap(year) {
return 29
}
return 28
}
// With the special case of February eliminated, the pattern is
// 31 30 31 30 31 30 31 31 30 31 30 31
// Adding m&1 produces the basic alternation;
// adding (m>>3)&1 inverts the alternation starting in August.
return 30 + int((m+m>>3)&1)
}
// Provided by package runtime.
//
// now returns the current real time, and is superseded by runtimeNow which returns
// the fake synctest clock when appropriate.
//
// now should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
// - github.com/phuslu/log
// - github.com/sethvargo/go-limiter
// - github.com/ulule/limiter/v3
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
func now() (sec int64, nsec int32, mono int64)
// runtimeNow returns the current time.
// When called within a synctest.Run bubble, it returns the group's fake clock.
//
//go:linkname runtimeNow
func runtimeNow() (sec int64, nsec int32, mono int64)
// runtimeNano returns the current value of the runtime clock in nanoseconds.
// When called within a synctest.Run bubble, it returns the group's fake clock.
//
//go:linkname runtimeNano
func runtimeNano() int64
//go:linkname runtimeIsBubbled
func runtimeIsBubbled() bool
// Monotonic times are reported as offsets from startNano.
// We initialize startNano to runtimeNano() - 1 so that on systems where
// monotonic time resolution is fairly low (e.g. Windows 2008
// which appears to have a default resolution of 15ms),
// we avoid ever reporting a monotonic time of 0.
// (Callers may want to use 0 as "time not set".)
var startNano int64 = runtimeNano() - 1
// x/tools uses a linkname of time.Now in its tests. No harm done.
//go:linkname Now
// Now returns the current local time.
func Now() Time {
sec, nsec, mono := runtimeNow()
if mono == 0 {
return Time{uint64(nsec), sec + unixToInternal, Local}
}
mono -= startNano
sec += unixToInternal - minWall
if uint64(sec)>>33 != 0 {
// Seconds field overflowed the 33 bits available when
// storing a monotonic time. This will be true after
// March 16, 2157.
return Time{uint64(nsec), sec + minWall, Local}
}
return Time{hasMonotonic | uint64(sec)<<nsecShift | uint64(nsec), mono, Local}
}
func unixTime(sec int64, nsec int32) Time {
return Time{uint64(nsec), sec + unixToInternal, Local}
}
// UTC returns t with the location set to UTC.
func (t Time) UTC() Time {
t.setLoc(&utcLoc)
return t
}
// Local returns t with the location set to local time.
func (t Time) Local() Time {
t.setLoc(Local)
return t
}
// In returns a copy of t representing the same time instant, but
// with the copy's location information set to loc for display
// purposes.
//
// In panics if loc is nil.
func (t Time) In(loc *Location) Time {
if loc == nil {
panic("time: missing Location in call to Time.In")
}
t.setLoc(loc)
return t
}
// Location returns the time zone information associated with t.
func (t Time) Location() *Location {
l := t.loc
if l == nil {
l = UTC
}
return l
}
// Zone computes the time zone in effect at time t, returning the abbreviated
// name of the zone (such as "CET") and its offset in seconds east of UTC.
func (t Time) Zone() (name string, offset int) {
name, offset, _, _, _ = t.loc.lookup(t.unixSec())
return
}
// ZoneBounds returns the bounds of the time zone in effect at time t.
// The zone begins at start and the next zone begins at end.
// If the zone begins at the beginning of time, start will be returned as a zero Time.
// If the zone goes on forever, end will be returned as a zero Time.
// The Location of the returned times will be the same as t.
func (t Time) ZoneBounds() (start, end Time) {
_, _, startSec, endSec, _ := t.loc.lookup(t.unixSec())
if startSec != alpha {
start = unixTime(startSec, 0)
start.setLoc(t.loc)
}
if endSec != omega {
end = unixTime(endSec, 0)
end.setLoc(t.loc)
}
return
}
// Unix returns t as a Unix time, the number of seconds elapsed
// since January 1, 1970 UTC. The result does not depend on the
// location associated with t.
// Unix-like operating systems often record time as a 32-bit
// count of seconds, but since the method here returns a 64-bit
// value it is valid for billions of years into the past or future.
func (t Time) Unix() int64 {
return t.unixSec()
}
// UnixMilli returns t as a Unix time, the number of milliseconds elapsed since
// January 1, 1970 UTC. The result is undefined if the Unix time in
// milliseconds cannot be represented by an int64 (a date more than 292 million
// years before or after 1970). The result does not depend on the
// location associated with t.
func (t Time) UnixMilli() int64 {
return t.unixSec()*1e3 + int64(t.nsec())/1e6
}
// UnixMicro returns t as a Unix time, the number of microseconds elapsed since
// January 1, 1970 UTC. The result is undefined if the Unix time in
// microseconds cannot be represented by an int64 (a date before year -290307 or
// after year 294246). The result does not depend on the location associated
// with t.
func (t Time) UnixMicro() int64 {
return t.unixSec()*1e6 + int64(t.nsec())/1e3
}
// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
// since January 1, 1970 UTC. The result is undefined if the Unix time
// in nanoseconds cannot be represented by an int64 (a date before the year
// 1678 or after 2262). Note that this means the result of calling UnixNano
// on the zero Time is undefined. The result does not depend on the
// location associated with t.
func (t Time) UnixNano() int64 {
return (t.unixSec())*1e9 + int64(t.nsec())
}
const (
timeBinaryVersionV1 byte = iota + 1 // For general situation
timeBinaryVersionV2 // For LMT only
)
// AppendBinary implements the [encoding.BinaryAppender] interface.
func (t Time) AppendBinary(b []byte) ([]byte, error) {
var offsetMin int16 // minutes east of UTC. -1 is UTC.
var offsetSec int8
version := timeBinaryVersionV1
if t.Location() == UTC {
offsetMin = -1
} else {
_, offset := t.Zone()
if offset%60 != 0 {
version = timeBinaryVersionV2
offsetSec = int8(offset % 60)
}
offset /= 60
if offset < -32768 || offset == -1 || offset > 32767 {
return b, errors.New("Time.MarshalBinary: unexpected zone offset")
}
offsetMin = int16(offset)
}
sec := t.sec()
nsec := t.nsec()
b = append(b,
version, // byte 0 : version
byte(sec>>56), // bytes 1-8: seconds
byte(sec>>48),
byte(sec>>40),
byte(sec>>32),
byte(sec>>24),
byte(sec>>16),
byte(sec>>8),
byte(sec),
byte(nsec>>24), // bytes 9-12: nanoseconds
byte(nsec>>16),
byte(nsec>>8),
byte(nsec),
byte(offsetMin>>8), // bytes 13-14: zone offset in minutes
byte(offsetMin),
)
if version == timeBinaryVersionV2 {
b = append(b, byte(offsetSec))
}
return b, nil
}
// MarshalBinary implements the [encoding.BinaryMarshaler] interface.
func (t Time) MarshalBinary() ([]byte, error) {
b, err := t.AppendBinary(make([]byte, 0, 16))
if err != nil {
return nil, err
}
return b, nil
}
// UnmarshalBinary implements the [encoding.BinaryUnmarshaler] interface.
func (t *Time) UnmarshalBinary(data []byte) error {
buf := data
if len(buf) == 0 {
return errors.New("Time.UnmarshalBinary: no data")
}
version := buf[0]
if version != timeBinaryVersionV1 && version != timeBinaryVersionV2 {
return errors.New("Time.UnmarshalBinary: unsupported version")
}
wantLen := /*version*/ 1 + /*sec*/ 8 + /*nsec*/ 4 + /*zone offset*/ 2
if version == timeBinaryVersionV2 {
wantLen++
}
if len(buf) != wantLen {
return errors.New("Time.UnmarshalBinary: invalid length")
}
buf = buf[1:]
sec := int64(buf[7]) | int64(buf[6])<<8 | int64(buf[5])<<16 | int64(buf[4])<<24 |
int64(buf[3])<<32 | int64(buf[2])<<40 | int64(buf[1])<<48 | int64(buf[0])<<56
buf = buf[8:]
nsec := int32(buf[3]) | int32(buf[2])<<8 | int32(buf[1])<<16 | int32(buf[0])<<24
buf = buf[4:]
offset := int(int16(buf[1])|int16(buf[0])<<8) * 60
if version == timeBinaryVersionV2 {
offset += int(buf[2])
}
*t = Time{}
t.wall = uint64(nsec)
t.ext = sec
if offset == -1*60 {
t.setLoc(&utcLoc)
} else if _, localoff, _, _, _ := Local.lookup(t.unixSec()); offset == localoff {
t.setLoc(Local)
} else {
t.setLoc(FixedZone("", offset))
}
return nil
}
// TODO(rsc): Remove GobEncoder, GobDecoder, MarshalJSON, UnmarshalJSON in Go 2.
// The same semantics will be provided by the generic MarshalBinary, MarshalText,
// UnmarshalBinary, UnmarshalText.
// GobEncode implements the gob.GobEncoder interface.
func (t Time) GobEncode() ([]byte, error) {
return t.MarshalBinary()
}
// GobDecode implements the gob.GobDecoder interface.
func (t *Time) GobDecode(data []byte) error {
return t.UnmarshalBinary(data)
}
// MarshalJSON implements the [encoding/json.Marshaler] interface.
// The time is a quoted string in the RFC 3339 format with sub-second precision.
// If the timestamp cannot be represented as valid RFC 3339
// (e.g., the year is out of range), then an error is reported.
func (t Time) MarshalJSON() ([]byte, error) {
b := make([]byte, 0, len(RFC3339Nano)+len(`""`))
b = append(b, '"')
b, err := t.appendStrictRFC3339(b)
b = append(b, '"')
if err != nil {
return nil, errors.New("Time.MarshalJSON: " + err.Error())
}
return b, nil
}
// UnmarshalJSON implements the [encoding/json.Unmarshaler] interface.
// The time must be a quoted string in the RFC 3339 format.
func (t *Time) UnmarshalJSON(data []byte) error {
if string(data) == "null" {
return nil
}
// TODO(https://go.dev/issue/47353): Properly unescape a JSON string.
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("Time.UnmarshalJSON: input is not a JSON string")
}
data = data[len(`"`) : len(data)-len(`"`)]
var err error
*t, err = parseStrictRFC3339(data)
return err
}
func (t Time) appendTo(b []byte, errPrefix string) ([]byte, error) {
b, err := t.appendStrictRFC3339(b)
if err != nil {
return nil, errors.New(errPrefix + err.Error())
}
return b, nil
}
// AppendText implements the [encoding.TextAppender] interface.
// The time is formatted in RFC 3339 format with sub-second precision.
// If the timestamp cannot be represented as valid RFC 3339
// (e.g., the year is out of range), then an error is returned.
func (t Time) AppendText(b []byte) ([]byte, error) {
return t.appendTo(b, "Time.AppendText: ")
}
// MarshalText implements the [encoding.TextMarshaler] interface. The output
// matches that of calling the [Time.AppendText] method.
//
// See [Time.AppendText] for more information.
func (t Time) MarshalText() ([]byte, error) {
return t.appendTo(make([]byte, 0, len(RFC3339Nano)), "Time.MarshalText: ")
}
// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
// The time must be in the RFC 3339 format.
func (t *Time) UnmarshalText(data []byte) error {
var err error
*t, err = parseStrictRFC3339(data)
return err
}
// Unix returns the local Time corresponding to the given Unix time,
// sec seconds and nsec nanoseconds since January 1, 1970 UTC.
// It is valid to pass nsec outside the range [0, 999999999].
// Not all sec values have a corresponding time value. One such
// value is 1<<63-1 (the largest int64 value).
func Unix(sec int64, nsec int64) Time {
if nsec < 0 || nsec >= 1e9 {
n := nsec / 1e9
sec += n
nsec -= n * 1e9
if nsec < 0 {
nsec += 1e9
sec--
}
}
return unixTime(sec, int32(nsec))
}
// UnixMilli returns the local Time corresponding to the given Unix time,
// msec milliseconds since January 1, 1970 UTC.
func UnixMilli(msec int64) Time {
return Unix(msec/1e3, (msec%1e3)*1e6)
}
// UnixMicro returns the local Time corresponding to the given Unix time,
// usec microseconds since January 1, 1970 UTC.
func UnixMicro(usec int64) Time {
return Unix(usec/1e6, (usec%1e6)*1e3)
}
// IsDST reports whether the time in the configured location is in Daylight Savings Time.
func (t Time) IsDST() bool {
_, _, _, _, isDST := t.loc.lookup(t.Unix())
return isDST
}
func isLeap(year int) bool {
// year%4 == 0 && (year%100 != 0 || year%400 == 0)
// Bottom 2 bits must be clear.
// For multiples of 25, bottom 4 bits must be clear.
// Thanks to Cassio Neri for this trick.
mask := 0xf
if year%25 != 0 {
mask = 3
}
return year&mask == 0
}
// norm returns nhi, nlo such that
//
// hi * base + lo == nhi * base + nlo
// 0 <= nlo < base
func norm(hi, lo, base int) (nhi, nlo int) {
if lo < 0 {
n := (-lo-1)/base + 1
hi -= n
lo += n * base
}
if lo >= base {
n := lo / base
hi += n
lo -= n * base
}
return hi, lo
}
// Date returns the Time corresponding to
//
// yyyy-mm-dd hh:mm:ss + nsec nanoseconds
//
// in the appropriate zone for that time in the given location.
//
// The month, day, hour, min, sec, and nsec values may be outside
// their usual ranges and will be normalized during the conversion.
// For example, October 32 converts to November 1.
//
// A daylight savings time transition skips or repeats times.
// For example, in the United States, March 13, 2011 2:15am never occurred,
// while November 6, 2011 1:15am occurred twice. In such cases, the
// choice of time zone, and therefore the time, is not well-defined.
// Date returns a time that is correct in one of the two zones involved
// in the transition, but it does not guarantee which.
//
// Date panics if loc is nil.
func Date(year int, month Month, day, hour, min, sec, nsec int, loc *Location) Time {
if loc == nil {
panic("time: missing Location in call to Date")
}
// Normalize month, overflowing into year.
m := int(month) - 1
year, m = norm(year, m, 12)
month = Month(m) + 1
// Normalize nsec, sec, min, hour, overflowing into day.
sec, nsec = norm(sec, nsec, 1e9)
min, sec = norm(min, sec, 60)
hour, min = norm(hour, min, 60)
day, hour = norm(day, hour, 24)
// Convert to absolute time and then Unix time.
unix := int64(dateToAbsDays(int64(year), month, day))*secondsPerDay +
int64(hour*secondsPerHour+min*secondsPerMinute+sec) +
absoluteToUnix
// Look for zone offset for expected time, so we can adjust to UTC.
// The lookup function expects UTC, so first we pass unix in the
// hope that it will not be too close to a zone transition,
// and then adjust if it is.
_, offset, start, end, _ := loc.lookup(unix)
if offset != 0 {
utc := unix - int64(offset)
// If utc is valid for the time zone we found, then we have the right offset.
// If not, we get the correct offset by looking up utc in the location.
if utc < start || utc >= end {
_, offset, _, _, _ = loc.lookup(utc)
}
unix -= int64(offset)
}
t := unixTime(unix, int32(nsec))
t.setLoc(loc)
return t
}
// Truncate returns the result of rounding t down to a multiple of d (since the zero time).
// If d <= 0, Truncate returns t stripped of any monotonic clock reading but otherwise unchanged.
//
// Truncate operates on the time as an absolute duration since the
// zero time; it does not operate on the presentation form of the
// time. Thus, Truncate(Hour) may return a time with a non-zero
// minute, depending on the time's Location.
func (t Time) Truncate(d Duration) Time {
t.stripMono()
if d <= 0 {
return t
}
_, r := div(t, d)
return t.Add(-r)
}
// Round returns the result of rounding t to the nearest multiple of d (since the zero time).
// The rounding behavior for halfway values is to round up.
// If d <= 0, Round returns t stripped of any monotonic clock reading but otherwise unchanged.
//
// Round operates on the time as an absolute duration since the
// zero time; it does not operate on the presentation form of the
// time. Thus, Round(Hour) may return a time with a non-zero
// minute, depending on the time's Location.
func (t Time) Round(d Duration) Time {
t.stripMono()
if d <= 0 {
return t
}
_, r := div(t, d)
if lessThanHalf(r, d) {
return t.Add(-r)
}
return t.Add(d - r)
}
// div divides t by d and returns the quotient parity and remainder.
// We don't use the quotient parity anymore (round half up instead of round to even)
// but it's still here in case we change our minds.
func div(t Time, d Duration) (qmod2 int, r Duration) {
neg := false
nsec := t.nsec()
sec := t.sec()
if sec < 0 {
// Operate on absolute value.
neg = true
sec = -sec
nsec = -nsec
if nsec < 0 {
nsec += 1e9
sec-- // sec >= 1 before the -- so safe
}
}
switch {
// Special case: 2d divides 1 second.
case d < Second && Second%(d+d) == 0:
qmod2 = int(nsec/int32(d)) & 1
r = Duration(nsec % int32(d))
// Special case: d is a multiple of 1 second.
case d%Second == 0:
d1 := int64(d / Second)
qmod2 = int(sec/d1) & 1
r = Duration(sec%d1)*Second + Duration(nsec)
// General case.
// This could be faster if more cleverness were applied,
// but it's really only here to avoid special case restrictions in the API.
// No one will care about these cases.
default:
// Compute nanoseconds as 128-bit number.
sec := uint64(sec)
tmp := (sec >> 32) * 1e9
u1 := tmp >> 32
u0 := tmp << 32
tmp = (sec & 0xFFFFFFFF) * 1e9
u0x, u0 := u0, u0+tmp
if u0 < u0x {
u1++
}
u0x, u0 = u0, u0+uint64(nsec)
if u0 < u0x {
u1++
}
// Compute remainder by subtracting r<<k for decreasing k.
// Quotient parity is whether we subtract on last round.
d1 := uint64(d)
for d1>>63 != 1 {
d1 <<= 1
}
d0 := uint64(0)
for {
qmod2 = 0
if u1 > d1 || u1 == d1 && u0 >= d0 {
// subtract
qmod2 = 1
u0x, u0 = u0, u0-d0
if u0 > u0x {
u1--
}
u1 -= d1
}
if d1 == 0 && d0 == uint64(d) {
break
}
d0 >>= 1
d0 |= (d1 & 1) << 63
d1 >>= 1
}
r = Duration(u0)
}
if neg && r != 0 {
// If input was negative and not an exact multiple of d, we computed q, r such that
// q*d + r = -t
// But the right answers are given by -(q-1), d-r:
// q*d + r = -t
// -q*d - r = t
// -(q-1)*d + (d - r) = t
qmod2 ^= 1
r = d - r
}
return
}
// Regrettable Linkname Compatibility
//
// timeAbs, absDate, and absClock mimic old internal details, no longer used.
// Widely used packages linknamed these to get “faster” time routines.
// Notable members of the hall of shame include:
// - gitee.com/quant1x/gox
// - github.com/phuslu/log
//
// phuslu hard-coded 'Unix time + 9223372028715321600' [sic]
// as the input to absDate and absClock, using the old Jan 1-based
// absolute times.
// quant1x linknamed the time.Time.abs method and passed the
// result of that method to absDate and absClock.
//
// Keeping both of these working forces us to provide these three
// routines here, operating on the old Jan 1-based epoch instead
// of the new March 1-based epoch. And the fact that time.Time.abs
// was linknamed means that we have to call the current abs method
// something different (time.Time.absSec, defined above) to make it
// possible to provide this simulation of the old routines here.
//
// None of this code is linked into the binary if not referenced by
// these linkname-happy packages. In particular, despite its name,
// time.Time.abs does not appear in the time.Time method table.
//
// Do not remove these routines or their linknames, or change the
// type signature or meaning of arguments.
//go:linkname legacyTimeTimeAbs time.Time.abs
func legacyTimeTimeAbs(t Time) uint64 {
return uint64(t.absSec() - marchThruDecember*secondsPerDay)
}
//go:linkname legacyAbsClock time.absClock
func legacyAbsClock(abs uint64) (hour, min, sec int) {
return absSeconds(abs + marchThruDecember*secondsPerDay).clock()
}
//go:linkname legacyAbsDate time.absDate
func legacyAbsDate(abs uint64, full bool) (year int, month Month, day int, yday int) {
d := absSeconds(abs + marchThruDecember*secondsPerDay).days()
year, month, day = d.date()
_, yday = d.yearYday()
yday-- // yearYday is 1-based, old API was 0-based
return
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package time
import (
"errors"
"sync"
"syscall"
)
//go:generate env ZONEINFO=$GOROOT/lib/time/zoneinfo.zip go run genzabbrs.go -output zoneinfo_abbrs_windows.go
// A Location maps time instants to the zone in use at that time.
// Typically, the Location represents the collection of time offsets
// in use in a geographical area. For many Locations the time offset varies
// depending on whether daylight savings time is in use at the time instant.
//
// Location is used to provide a time zone in a printed Time value and for
// calculations involving intervals that may cross daylight savings time
// boundaries.
type Location struct {
name string
zone []zone
tx []zoneTrans
// The tzdata information can be followed by a string that describes
// how to handle DST transitions not recorded in zoneTrans.
// The format is the TZ environment variable without a colon; see
// https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap08.html.
// Example string, for America/Los_Angeles: PST8PDT,M3.2.0,M11.1.0
extend string
// Most lookups will be for the current time.
// To avoid the binary search through tx, keep a
// static one-element cache that gives the correct
// zone for the time when the Location was created.
// if cacheStart <= t < cacheEnd,
// lookup can return cacheZone.
// The units for cacheStart and cacheEnd are seconds
// since January 1, 1970 UTC, to match the argument
// to lookup.
cacheStart int64
cacheEnd int64
cacheZone *zone
}
// A zone represents a single time zone such as CET.
type zone struct {
name string // abbreviated name, "CET"
offset int // seconds east of UTC
isDST bool // is this zone Daylight Savings Time?
}
// A zoneTrans represents a single time zone transition.
type zoneTrans struct {
when int64 // transition time, in seconds since 1970 GMT
index uint8 // the index of the zone that goes into effect at that time
isstd, isutc bool // ignored - no idea what these mean
}
// alpha and omega are the beginning and end of time for zone
// transitions.
const (
alpha = -1 << 63 // math.MinInt64
omega = 1<<63 - 1 // math.MaxInt64
)
// UTC represents Universal Coordinated Time (UTC).
var UTC *Location = &utcLoc
// utcLoc is separate so that get can refer to &utcLoc
// and ensure that it never returns a nil *Location,
// even if a badly behaved client has changed UTC.
var utcLoc = Location{name: "UTC"}
// Local represents the system's local time zone.
// On Unix systems, Local consults the TZ environment
// variable to find the time zone to use. No TZ means
// use the system default /etc/localtime.
// TZ="" means use UTC.
// TZ="foo" means use file foo in the system timezone directory.
var Local *Location = &localLoc
// localLoc is separate so that initLocal can initialize
// it even if a client has changed Local.
var localLoc Location
var localOnce sync.Once
func (l *Location) get() *Location {
if l == nil {
return &utcLoc
}
if l == &localLoc {
localOnce.Do(initLocal)
}
return l
}
// String returns a descriptive name for the time zone information,
// corresponding to the name argument to [LoadLocation] or [FixedZone].
func (l *Location) String() string {
return l.get().name
}
var unnamedFixedZones []*Location
var unnamedFixedZonesOnce sync.Once
// FixedZone returns a [Location] that always uses
// the given zone name and offset (seconds east of UTC).
func FixedZone(name string, offset int) *Location {
// Most calls to FixedZone have an unnamed zone with an offset by the hour.
// Optimize for that case by returning the same *Location for a given hour.
const hoursBeforeUTC = 12
const hoursAfterUTC = 14
hour := offset / 60 / 60
if name == "" && -hoursBeforeUTC <= hour && hour <= +hoursAfterUTC && hour*60*60 == offset {
unnamedFixedZonesOnce.Do(func() {
unnamedFixedZones = make([]*Location, hoursBeforeUTC+1+hoursAfterUTC)
for hr := -hoursBeforeUTC; hr <= +hoursAfterUTC; hr++ {
unnamedFixedZones[hr+hoursBeforeUTC] = fixedZone("", hr*60*60)
}
})
return unnamedFixedZones[hour+hoursBeforeUTC]
}
return fixedZone(name, offset)
}
func fixedZone(name string, offset int) *Location {
l := &Location{
name: name,
zone: []zone{{name, offset, false}},
tx: []zoneTrans{{alpha, 0, false, false}},
cacheStart: alpha,
cacheEnd: omega,
}
l.cacheZone = &l.zone[0]
return l
}
// lookup returns information about the time zone in use at an
// instant in time expressed as seconds since January 1, 1970 00:00:00 UTC.
//
// The returned information gives the name of the zone (such as "CET"),
// the start and end times bracketing sec when that zone is in effect,
// the offset in seconds east of UTC (such as -5*60*60), and whether
// the daylight savings is being observed at that time.
func (l *Location) lookup(sec int64) (name string, offset int, start, end int64, isDST bool) {
l = l.get()
if len(l.zone) == 0 {
name = "UTC"
offset = 0
start = alpha
end = omega
isDST = false
return
}
if zone := l.cacheZone; zone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
name = zone.name
offset = zone.offset
start = l.cacheStart
end = l.cacheEnd
isDST = zone.isDST
return
}
if len(l.tx) == 0 || sec < l.tx[0].when {
zone := &l.zone[l.lookupFirstZone()]
name = zone.name
offset = zone.offset
start = alpha
if len(l.tx) > 0 {
end = l.tx[0].when
} else {
end = omega
}
isDST = zone.isDST
return
}
// Binary search for entry with largest time <= sec.
// Not using sort.Search to avoid dependencies.
tx := l.tx
end = omega
lo := 0
hi := len(tx)
for hi-lo > 1 {
m := int(uint(lo+hi) >> 1)
lim := tx[m].when
if sec < lim {
end = lim
hi = m
} else {
lo = m
}
}
zone := &l.zone[tx[lo].index]
name = zone.name
offset = zone.offset
start = tx[lo].when
// end = maintained during the search
isDST = zone.isDST
// If we're at the end of the known zone transitions,
// try the extend string.
if lo == len(tx)-1 && l.extend != "" {
if ename, eoffset, estart, eend, eisDST, ok := tzset(l.extend, start, sec); ok {
return ename, eoffset, estart, eend, eisDST
}
}
return
}
// lookupFirstZone returns the index of the time zone to use for times
// before the first transition time, or when there are no transition
// times.
//
// The reference implementation in localtime.c from
// https://www.iana.org/time-zones/repository/releases/tzcode2013g.tar.gz
// implements the following algorithm for these cases:
// 1. If the first zone is unused by the transitions, use it.
// 2. Otherwise, if there are transition times, and the first
// transition is to a zone in daylight time, find the first
// non-daylight-time zone before and closest to the first transition
// zone.
// 3. Otherwise, use the first zone that is not daylight time, if
// there is one.
// 4. Otherwise, use the first zone.
func (l *Location) lookupFirstZone() int {
// Case 1.
if !l.firstZoneUsed() {
return 0
}
// Case 2.
if len(l.tx) > 0 && l.zone[l.tx[0].index].isDST {
for zi := int(l.tx[0].index) - 1; zi >= 0; zi-- {
if !l.zone[zi].isDST {
return zi
}
}
}
// Case 3.
for zi := range l.zone {
if !l.zone[zi].isDST {
return zi
}
}
// Case 4.
return 0
}
// firstZoneUsed reports whether the first zone is used by some
// transition.
func (l *Location) firstZoneUsed() bool {
for _, tx := range l.tx {
if tx.index == 0 {
return true
}
}
return false
}
// tzset takes a timezone string like the one found in the TZ environment
// variable, the time of the last time zone transition expressed as seconds
// since January 1, 1970 00:00:00 UTC, and a time expressed the same way.
// We call this a tzset string since in C the function tzset reads TZ.
// The return values are as for lookup, plus ok which reports whether the
// parse succeeded.
func tzset(s string, lastTxSec, sec int64) (name string, offset int, start, end int64, isDST, ok bool) {
var (
stdName, dstName string
stdOffset, dstOffset int
)
stdName, s, ok = tzsetName(s)
if ok {
stdOffset, s, ok = tzsetOffset(s)
}
if !ok {
return "", 0, 0, 0, false, false
}
// The numbers in the tzset string are added to local time to get UTC,
// but our offsets are added to UTC to get local time,
// so we negate the number we see here.
stdOffset = -stdOffset
if len(s) == 0 || s[0] == ',' {
// No daylight savings time.
return stdName, stdOffset, lastTxSec, omega, false, true
}
dstName, s, ok = tzsetName(s)
if ok {
if len(s) == 0 || s[0] == ',' {
dstOffset = stdOffset + secondsPerHour
} else {
dstOffset, s, ok = tzsetOffset(s)
dstOffset = -dstOffset // as with stdOffset, above
}
}
if !ok {
return "", 0, 0, 0, false, false
}
if len(s) == 0 {
// Default DST rules per tzcode.
s = ",M3.2.0,M11.1.0"
}
// The TZ definition does not mention ';' here but tzcode accepts it.
if s[0] != ',' && s[0] != ';' {
return "", 0, 0, 0, false, false
}
s = s[1:]
var startRule, endRule rule
startRule, s, ok = tzsetRule(s)
if !ok || len(s) == 0 || s[0] != ',' {
return "", 0, 0, 0, false, false
}
s = s[1:]
endRule, s, ok = tzsetRule(s)
if !ok || len(s) > 0 {
return "", 0, 0, 0, false, false
}
// Compute start of year in seconds since Unix epoch,
// and seconds since then to get to sec.
year, yday := absSeconds(sec + unixToInternal + internalToAbsolute).days().yearYday()
ysec := int64((yday-1)*secondsPerDay) + sec%secondsPerDay
ystart := sec - ysec
startSec := int64(tzruleTime(year, startRule, stdOffset))
endSec := int64(tzruleTime(year, endRule, dstOffset))
dstIsDST, stdIsDST := true, false
// Note: this is a flipping of "DST" and "STD" while retaining the labels
// This happens in southern hemispheres. The labelling here thus is a little
// inconsistent with the goal.
if endSec < startSec {
startSec, endSec = endSec, startSec
stdName, dstName = dstName, stdName
stdOffset, dstOffset = dstOffset, stdOffset
stdIsDST, dstIsDST = dstIsDST, stdIsDST
}
// The start and end values that we return are accurate
// close to a daylight savings transition, but are otherwise
// just the start and end of the year. That suffices for
// the only caller that cares, which is Date.
if ysec < startSec {
return stdName, stdOffset, ystart, startSec + ystart, stdIsDST, true
} else if ysec >= endSec {
return stdName, stdOffset, endSec + ystart, ystart + 365*secondsPerDay, stdIsDST, true
} else {
return dstName, dstOffset, startSec + ystart, endSec + ystart, dstIsDST, true
}
}
// tzsetName returns the timezone name at the start of the tzset string s,
// and the remainder of s, and reports whether the parsing is OK.
func tzsetName(s string) (string, string, bool) {
if len(s) == 0 {
return "", "", false
}
if s[0] != '<' {
for i, r := range s {
switch r {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ',', '-', '+':
if i < 3 {
return "", "", false
}
return s[:i], s[i:], true
}
}
if len(s) < 3 {
return "", "", false
}
return s, "", true
} else {
for i, r := range s {
if r == '>' {
return s[1:i], s[i+1:], true
}
}
return "", "", false
}
}
// tzsetOffset returns the timezone offset at the start of the tzset string s,
// and the remainder of s, and reports whether the parsing is OK.
// The timezone offset is returned as a number of seconds.
func tzsetOffset(s string) (offset int, rest string, ok bool) {
if len(s) == 0 {
return 0, "", false
}
neg := false
if s[0] == '+' {
s = s[1:]
} else if s[0] == '-' {
s = s[1:]
neg = true
}
// The tzdata code permits values up to 24 * 7 here,
// although POSIX does not.
var hours int
hours, s, ok = tzsetNum(s, 0, 24*7)
if !ok {
return 0, "", false
}
off := hours * secondsPerHour
if len(s) == 0 || s[0] != ':' {
if neg {
off = -off
}
return off, s, true
}
var mins int
mins, s, ok = tzsetNum(s[1:], 0, 59)
if !ok {
return 0, "", false
}
off += mins * secondsPerMinute
if len(s) == 0 || s[0] != ':' {
if neg {
off = -off
}
return off, s, true
}
var secs int
secs, s, ok = tzsetNum(s[1:], 0, 59)
if !ok {
return 0, "", false
}
off += secs
if neg {
off = -off
}
return off, s, true
}
// ruleKind is the kinds of rules that can be seen in a tzset string.
type ruleKind int
const (
ruleJulian ruleKind = iota
ruleDOY
ruleMonthWeekDay
)
// rule is a rule read from a tzset string.
type rule struct {
kind ruleKind
day int
week int
mon int
time int // transition time
}
// tzsetRule parses a rule from a tzset string.
// It returns the rule, and the remainder of the string, and reports success.
func tzsetRule(s string) (rule, string, bool) {
var r rule
if len(s) == 0 {
return rule{}, "", false
}
ok := false
if s[0] == 'J' {
var jday int
jday, s, ok = tzsetNum(s[1:], 1, 365)
if !ok {
return rule{}, "", false
}
r.kind = ruleJulian
r.day = jday
} else if s[0] == 'M' {
var mon int
mon, s, ok = tzsetNum(s[1:], 1, 12)
if !ok || len(s) == 0 || s[0] != '.' {
return rule{}, "", false
}
var week int
week, s, ok = tzsetNum(s[1:], 1, 5)
if !ok || len(s) == 0 || s[0] != '.' {
return rule{}, "", false
}
var day int
day, s, ok = tzsetNum(s[1:], 0, 6)
if !ok {
return rule{}, "", false
}
r.kind = ruleMonthWeekDay
r.day = day
r.week = week
r.mon = mon
} else {
var day int
day, s, ok = tzsetNum(s, 0, 365)
if !ok {
return rule{}, "", false
}
r.kind = ruleDOY
r.day = day
}
if len(s) == 0 || s[0] != '/' {
r.time = 2 * secondsPerHour // 2am is the default
return r, s, true
}
offset, s, ok := tzsetOffset(s[1:])
if !ok {
return rule{}, "", false
}
r.time = offset
return r, s, true
}
// tzsetNum parses a number from a tzset string.
// It returns the number, and the remainder of the string, and reports success.
// The number must be between min and max.
func tzsetNum(s string, min, max int) (num int, rest string, ok bool) {
if len(s) == 0 {
return 0, "", false
}
num = 0
for i, r := range s {
if r < '0' || r > '9' {
if i == 0 || num < min {
return 0, "", false
}
return num, s[i:], true
}
num *= 10
num += int(r) - '0'
if num > max {
return 0, "", false
}
}
if num < min {
return 0, "", false
}
return num, "", true
}
// tzruleTime takes a year, a rule, and a timezone offset,
// and returns the number of seconds since the start of the year
// that the rule takes effect.
func tzruleTime(year int, r rule, off int) int {
var s int
switch r.kind {
case ruleJulian:
s = (r.day - 1) * secondsPerDay
if isLeap(year) && r.day >= 60 {
s += secondsPerDay
}
case ruleDOY:
s = r.day * secondsPerDay
case ruleMonthWeekDay:
// Zeller's Congruence.
m1 := (r.mon+9)%12 + 1
yy0 := year
if r.mon <= 2 {
yy0--
}
yy1 := yy0 / 100
yy2 := yy0 % 100
dow := ((26*m1-2)/10 + 1 + yy2 + yy2/4 + yy1/4 - 2*yy1) % 7
if dow < 0 {
dow += 7
}
// Now dow is the day-of-week of the first day of r.mon.
// Get the day-of-month of the first "dow" day.
d := r.day - dow
if d < 0 {
d += 7
}
for i := 1; i < r.week; i++ {
if d+7 >= daysIn(Month(r.mon), year) {
break
}
d += 7
}
d += daysBefore(Month(r.mon))
if isLeap(year) && r.mon > 2 {
d++
}
s = d * secondsPerDay
}
return s + r.time - off
}
// lookupName returns information about the time zone with
// the given name (such as "EST") at the given pseudo-Unix time
// (what the given time of day would be in UTC).
func (l *Location) lookupName(name string, unix int64) (offset int, ok bool) {
l = l.get()
// First try for a zone with the right name that was actually
// in effect at the given time. (In Sydney, Australia, both standard
// and daylight-savings time are abbreviated "EST". Using the
// offset helps us pick the right one for the given time.
// It's not perfect: during the backward transition we might pick
// either one.)
for i := range l.zone {
zone := &l.zone[i]
if zone.name == name {
nam, offset, _, _, _ := l.lookup(unix - int64(zone.offset))
if nam == zone.name {
return offset, true
}
}
}
// Otherwise fall back to an ordinary name match.
for i := range l.zone {
zone := &l.zone[i]
if zone.name == name {
return zone.offset, true
}
}
// Otherwise, give up.
return
}
// NOTE(rsc): Eventually we will need to accept the POSIX TZ environment
// syntax too, but I don't feel like implementing it today.
var errLocation = errors.New("time: invalid location name")
var zoneinfo *string
var zoneinfoOnce sync.Once
// LoadLocation returns a [Location] with the given name.
//
// If the name is "" or "UTC", LoadLocation returns [UTC].
// If the name is "Local", LoadLocation returns [Local].
//
// Otherwise, a new [Location] is created where the name is taken
// to be a location name corresponding to a file
// in the IANA Time Zone database, such as "America/New_York".
//
// LoadLocation looks for the IANA Time Zone database in the following
// locations in order:
//
// - the directory or uncompressed zip file named by the ZONEINFO environment variable
// - on a Unix system, the system standard installation location
// - $GOROOT/lib/time/zoneinfo.zip
// - the time/tzdata package, if it was imported
func LoadLocation(name string) (*Location, error) {
if name == "" || name == "UTC" {
return UTC, nil
}
if name == "Local" {
return Local, nil
}
if containsDotDot(name) || name[0] == '/' || name[0] == '\\' {
// No valid IANA Time Zone name contains a single dot,
// much less dot dot. Likewise, none begin with a slash.
return nil, errLocation
}
zoneinfoOnce.Do(func() {
env, _ := syscall.Getenv("ZONEINFO")
zoneinfo = &env
})
var firstErr error
if *zoneinfo != "" {
if zoneData, err := loadTzinfoFromDirOrZip(*zoneinfo, name); err == nil {
if z, err := LoadLocationFromTZData(name, zoneData); err == nil {
return z, nil
}
firstErr = err
} else if err != syscall.ENOENT {
firstErr = err
}
}
if z, err := loadLocation(name, platformZoneSources); err == nil {
return z, nil
} else if firstErr == nil {
firstErr = err
}
return nil, firstErr
}
// containsDotDot reports whether s contains "..".
func containsDotDot(s string) bool {
if len(s) < 2 {
return false
}
for i := 0; i < len(s)-1; i++ {
if s[i] == '.' && s[i+1] == '.' {
return true
}
}
return false
}
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !ios && !android
package time
func gorootZoneSource(goroot string) (string, bool) {
if goroot == "" {
return "", false
}
return goroot + "/lib/time/zoneinfo.zip", true
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Parse "zoneinfo" time zone file.
// This is a fairly standard file format used on OS X, Linux, BSD, Sun, and others.
// See tzfile(5), https://en.wikipedia.org/wiki/Zoneinfo,
// and ftp://munnari.oz.au/pub/oldtz/
package time
import (
"errors"
"internal/bytealg"
"runtime"
"syscall"
_ "unsafe" // for linkname
)
// registerLoadFromEmbeddedTZData is called by the time/tzdata package,
// if it is imported.
//
//go:linkname registerLoadFromEmbeddedTZData
func registerLoadFromEmbeddedTZData(f func(string) (string, error)) {
loadFromEmbeddedTZData = f
}
// loadFromEmbeddedTZData is used to load a specific tzdata file
// from tzdata information embedded in the binary itself.
// This is set when the time/tzdata package is imported,
// via registerLoadFromEmbeddedTzdata.
var loadFromEmbeddedTZData func(zipname string) (string, error)
// maxFileSize is the max permitted size of files read by readFile.
// As reference, the zoneinfo.zip distributed by Go is ~350 KB,
// so 10MB is overkill.
const maxFileSize = 10 << 20
type fileSizeError string
func (f fileSizeError) Error() string {
return "time: file " + string(f) + " is too large"
}
// Copies of io.Seek* constants to avoid importing "io":
const (
seekStart = 0
seekCurrent = 1
seekEnd = 2
)
// Simple I/O interface to binary blob of data.
type dataIO struct {
p []byte
error bool
}
func (d *dataIO) read(n int) []byte {
if len(d.p) < n {
d.p = nil
d.error = true
return nil
}
p := d.p[0:n]
d.p = d.p[n:]
return p
}
func (d *dataIO) big4() (n uint32, ok bool) {
p := d.read(4)
if len(p) < 4 {
d.error = true
return 0, false
}
return uint32(p[3]) | uint32(p[2])<<8 | uint32(p[1])<<16 | uint32(p[0])<<24, true
}
func (d *dataIO) big8() (n uint64, ok bool) {
n1, ok1 := d.big4()
n2, ok2 := d.big4()
if !ok1 || !ok2 {
d.error = true
return 0, false
}
return (uint64(n1) << 32) | uint64(n2), true
}
func (d *dataIO) byte() (n byte, ok bool) {
p := d.read(1)
if len(p) < 1 {
d.error = true
return 0, false
}
return p[0], true
}
// rest returns the rest of the data in the buffer.
func (d *dataIO) rest() []byte {
r := d.p
d.p = nil
return r
}
// Make a string by stopping at the first NUL
func byteString(p []byte) string {
if i := bytealg.IndexByte(p, 0); i != -1 {
p = p[:i]
}
return string(p)
}
var errBadData = errors.New("malformed time zone information")
// LoadLocationFromTZData returns a new [Location] with the given name
// initialized from the IANA Time Zone database-formatted data.
// The data should be in the format of a standard IANA time zone file
// (for example, the content of /etc/localtime on Unix systems).
func LoadLocationFromTZData(name string, data []byte) (*Location, error) {
d := dataIO{data, false}
// 4-byte magic "TZif"
if magic := d.read(4); string(magic) != "TZif" {
return nil, errBadData
}
// 1-byte version, then 15 bytes of padding
var version int
var p []byte
if p = d.read(16); len(p) != 16 {
return nil, errBadData
} else {
switch p[0] {
case 0:
version = 1
case '2':
version = 2
case '3':
version = 3
default:
return nil, errBadData
}
}
// six big-endian 32-bit integers:
// number of UTC/local indicators
// number of standard/wall indicators
// number of leap seconds
// number of transition times
// number of local time zones
// number of characters of time zone abbrev strings
const (
NUTCLocal = iota
NStdWall
NLeap
NTime
NZone
NChar
)
var n [6]int
for i := 0; i < 6; i++ {
nn, ok := d.big4()
if !ok {
return nil, errBadData
}
if uint32(int(nn)) != nn {
return nil, errBadData
}
n[i] = int(nn)
}
// If we have version 2 or 3, then the data is first written out
// in a 32-bit format, then written out again in a 64-bit format.
// Skip the 32-bit format and read the 64-bit one, as it can
// describe a broader range of dates.
is64 := false
if version > 1 {
// Skip the 32-bit data.
skip := n[NTime]*4 +
n[NTime] +
n[NZone]*6 +
n[NChar] +
n[NLeap]*8 +
n[NStdWall] +
n[NUTCLocal]
// Skip the version 2 header that we just read.
skip += 4 + 16
d.read(skip)
is64 = true
// Read the counts again, they can differ.
for i := 0; i < 6; i++ {
nn, ok := d.big4()
if !ok {
return nil, errBadData
}
if uint32(int(nn)) != nn {
return nil, errBadData
}
n[i] = int(nn)
}
}
size := 4
if is64 {
size = 8
}
// Transition times.
txtimes := dataIO{d.read(n[NTime] * size), false}
// Time zone indices for transition times.
txzones := d.read(n[NTime])
// Zone info structures
zonedata := dataIO{d.read(n[NZone] * 6), false}
// Time zone abbreviations.
abbrev := d.read(n[NChar])
// Leap-second time pairs
d.read(n[NLeap] * (size + 4))
// Whether tx times associated with local time types
// are specified as standard time or wall time.
isstd := d.read(n[NStdWall])
// Whether tx times associated with local time types
// are specified as UTC or local time.
isutc := d.read(n[NUTCLocal])
if d.error { // ran out of data
return nil, errBadData
}
var extend string
rest := d.rest()
if len(rest) > 2 && rest[0] == '\n' && rest[len(rest)-1] == '\n' {
extend = string(rest[1 : len(rest)-1])
}
// Now we can build up a useful data structure.
// First the zone information.
// utcoff[4] isdst[1] nameindex[1]
nzone := n[NZone]
if nzone == 0 {
// Reject tzdata files with no zones. There's nothing useful in them.
// This also avoids a panic later when we add and then use a fake transition (golang.org/issue/29437).
return nil, errBadData
}
zones := make([]zone, nzone)
for i := range zones {
var ok bool
var n uint32
if n, ok = zonedata.big4(); !ok {
return nil, errBadData
}
if uint32(int(n)) != n {
return nil, errBadData
}
zones[i].offset = int(int32(n))
var b byte
if b, ok = zonedata.byte(); !ok {
return nil, errBadData
}
zones[i].isDST = b != 0
if b, ok = zonedata.byte(); !ok || int(b) >= len(abbrev) {
return nil, errBadData
}
zones[i].name = byteString(abbrev[b:])
if runtime.GOOS == "aix" && len(name) > 8 && (name[:8] == "Etc/GMT+" || name[:8] == "Etc/GMT-") {
// There is a bug with AIX 7.2 TL 0 with files in Etc,
// GMT+1 will return GMT-1 instead of GMT+1 or -01.
if name != "Etc/GMT+0" {
// GMT+0 is OK
zones[i].name = name[4:]
}
}
}
// Now the transition time info.
tx := make([]zoneTrans, n[NTime])
for i := range tx {
var n int64
if !is64 {
if n4, ok := txtimes.big4(); !ok {
return nil, errBadData
} else {
n = int64(int32(n4))
}
} else {
if n8, ok := txtimes.big8(); !ok {
return nil, errBadData
} else {
n = int64(n8)
}
}
tx[i].when = n
if int(txzones[i]) >= len(zones) {
return nil, errBadData
}
tx[i].index = txzones[i]
if i < len(isstd) {
tx[i].isstd = isstd[i] != 0
}
if i < len(isutc) {
tx[i].isutc = isutc[i] != 0
}
}
if len(tx) == 0 {
// Build fake transition to cover all time.
// This happens in fixed locations like "Etc/GMT0".
tx = append(tx, zoneTrans{when: alpha, index: 0})
}
// Committed to succeed.
l := &Location{zone: zones, tx: tx, name: name, extend: extend}
// Fill in the cache with information about right now,
// since that will be the most common lookup.
sec, _, _ := runtimeNow()
for i := range tx {
if tx[i].when <= sec && (i+1 == len(tx) || sec < tx[i+1].when) {
l.cacheStart = tx[i].when
l.cacheEnd = omega
l.cacheZone = &l.zone[tx[i].index]
if i+1 < len(tx) {
l.cacheEnd = tx[i+1].when
} else if l.extend != "" {
// If we're at the end of the known zone transitions,
// try the extend string.
if name, offset, estart, eend, isDST, ok := tzset(l.extend, l.cacheStart, sec); ok {
l.cacheStart = estart
l.cacheEnd = eend
// Find the zone that is returned by tzset to avoid allocation if possible.
if zoneIdx := findZone(l.zone, name, offset, isDST); zoneIdx != -1 {
l.cacheZone = &l.zone[zoneIdx]
} else {
l.cacheZone = &zone{
name: name,
offset: offset,
isDST: isDST,
}
}
}
}
break
}
}
return l, nil
}
func findZone(zones []zone, name string, offset int, isDST bool) int {
for i, z := range zones {
if z.name == name && z.offset == offset && z.isDST == isDST {
return i
}
}
return -1
}
// loadTzinfoFromDirOrZip returns the contents of the file with the given name
// in dir. dir can either be an uncompressed zip file, or a directory.
func loadTzinfoFromDirOrZip(dir, name string) ([]byte, error) {
if len(dir) > 4 && dir[len(dir)-4:] == ".zip" {
return loadTzinfoFromZip(dir, name)
}
if dir != "" {
name = dir + "/" + name
}
return readFile(name)
}
// There are 500+ zoneinfo files. Rather than distribute them all
// individually, we ship them in an uncompressed zip file.
// Used this way, the zip file format serves as a commonly readable
// container for the individual small files. We choose zip over tar
// because zip files have a contiguous table of contents, making
// individual file lookups faster, and because the per-file overhead
// in a zip file is considerably less than tar's 512 bytes.
// get4 returns the little-endian 32-bit value in b.
func get4(b []byte) int {
if len(b) < 4 {
return 0
}
return int(b[0]) | int(b[1])<<8 | int(b[2])<<16 | int(b[3])<<24
}
// get2 returns the little-endian 16-bit value in b.
func get2(b []byte) int {
if len(b) < 2 {
return 0
}
return int(b[0]) | int(b[1])<<8
}
// loadTzinfoFromZip returns the contents of the file with the given name
// in the given uncompressed zip file.
func loadTzinfoFromZip(zipfile, name string) ([]byte, error) {
fd, err := open(zipfile)
if err != nil {
return nil, err
}
defer closefd(fd)
const (
zecheader = 0x06054b50
zcheader = 0x02014b50
ztailsize = 22
zheadersize = 30
zheader = 0x04034b50
)
buf := make([]byte, ztailsize)
if err := preadn(fd, buf, -ztailsize); err != nil || get4(buf) != zecheader {
return nil, errors.New("corrupt zip file " + zipfile)
}
n := get2(buf[10:])
size := get4(buf[12:])
off := get4(buf[16:])
buf = make([]byte, size)
if err := preadn(fd, buf, off); err != nil {
return nil, errors.New("corrupt zip file " + zipfile)
}
for i := 0; i < n; i++ {
// zip entry layout:
// 0 magic[4]
// 4 madevers[1]
// 5 madeos[1]
// 6 extvers[1]
// 7 extos[1]
// 8 flags[2]
// 10 meth[2]
// 12 modtime[2]
// 14 moddate[2]
// 16 crc[4]
// 20 csize[4]
// 24 uncsize[4]
// 28 namelen[2]
// 30 xlen[2]
// 32 fclen[2]
// 34 disknum[2]
// 36 iattr[2]
// 38 eattr[4]
// 42 off[4]
// 46 name[namelen]
// 46+namelen+xlen+fclen - next header
//
if get4(buf) != zcheader {
break
}
meth := get2(buf[10:])
size := get4(buf[24:])
namelen := get2(buf[28:])
xlen := get2(buf[30:])
fclen := get2(buf[32:])
off := get4(buf[42:])
zname := buf[46 : 46+namelen]
buf = buf[46+namelen+xlen+fclen:]
if string(zname) != name {
continue
}
if meth != 0 {
return nil, errors.New("unsupported compression for " + name + " in " + zipfile)
}
// zip per-file header layout:
// 0 magic[4]
// 4 extvers[1]
// 5 extos[1]
// 6 flags[2]
// 8 meth[2]
// 10 modtime[2]
// 12 moddate[2]
// 14 crc[4]
// 18 csize[4]
// 22 uncsize[4]
// 26 namelen[2]
// 28 xlen[2]
// 30 name[namelen]
// 30+namelen+xlen - file data
//
buf = make([]byte, zheadersize+namelen)
if err := preadn(fd, buf, off); err != nil ||
get4(buf) != zheader ||
get2(buf[8:]) != meth ||
get2(buf[26:]) != namelen ||
string(buf[30:30+namelen]) != name {
return nil, errors.New("corrupt zip file " + zipfile)
}
xlen = get2(buf[28:])
buf = make([]byte, size)
if err := preadn(fd, buf, off+30+namelen+xlen); err != nil {
return nil, errors.New("corrupt zip file " + zipfile)
}
return buf, nil
}
return nil, syscall.ENOENT
}
// loadTzinfoFromTzdata returns the time zone information of the time zone
// with the given name, from a tzdata database file as they are typically
// found on android.
var loadTzinfoFromTzdata func(file, name string) ([]byte, error)
// loadTzinfo returns the time zone information of the time zone
// with the given name, from a given source. A source may be a
// timezone database directory, tzdata database file or an uncompressed
// zip file, containing the contents of such a directory.
func loadTzinfo(name string, source string) ([]byte, error) {
if len(source) >= 6 && source[len(source)-6:] == "tzdata" {
return loadTzinfoFromTzdata(source, name)
}
return loadTzinfoFromDirOrZip(source, name)
}
// loadLocation returns the Location with the given name from one of
// the specified sources. See loadTzinfo for a list of supported sources.
// The first timezone data matching the given name that is successfully loaded
// and parsed is returned as a Location.
func loadLocation(name string, sources []string) (z *Location, firstErr error) {
for _, source := range sources {
zoneData, err := loadTzinfo(name, source)
if err == nil {
if z, err = LoadLocationFromTZData(name, zoneData); err == nil {
return z, nil
}
}
if firstErr == nil && err != syscall.ENOENT {
firstErr = err
}
}
if loadFromEmbeddedTZData != nil {
zoneData, err := loadFromEmbeddedTZData(name)
if err == nil {
if z, err = LoadLocationFromTZData(name, []byte(zoneData)); err == nil {
return z, nil
}
}
if firstErr == nil && err != syscall.ENOENT {
firstErr = err
}
}
if source, ok := gorootZoneSource(runtime.GOROOT()); ok {
zoneData, err := loadTzinfo(name, source)
if err == nil {
if z, err = LoadLocationFromTZData(name, zoneData); err == nil {
return z, nil
}
}
if firstErr == nil && err != syscall.ENOENT {
firstErr = err
}
}
if firstErr != nil {
return nil, firstErr
}
return nil, errors.New("unknown time zone " + name)
}
// readFile reads and returns the content of the named file.
// It is a trivial implementation of os.ReadFile, reimplemented
// here to avoid depending on io/ioutil or os.
// It returns an error if name exceeds maxFileSize bytes.
func readFile(name string) ([]byte, error) {
f, err := open(name)
if err != nil {
return nil, err
}
defer closefd(f)
var (
buf [4096]byte
ret []byte
n int
)
for {
n, err = read(f, buf[:])
if n > 0 {
ret = append(ret, buf[:n]...)
}
if n == 0 || err != nil {
break
}
if len(ret) > maxFileSize {
return nil, fileSizeError(name)
}
}
return ret, err
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix && !ios && !android
// Parse "zoneinfo" time zone file.
// This is a fairly standard file format used on OS X, Linux, BSD, Sun, and others.
// See tzfile(5), https://en.wikipedia.org/wiki/Zoneinfo,
// and ftp://munnari.oz.au/pub/oldtz/
package time
import (
"syscall"
)
// Many systems use /usr/share/zoneinfo, Solaris 2 has
// /usr/share/lib/zoneinfo, IRIX 6 has /usr/lib/locale/TZ,
// NixOS has /etc/zoneinfo.
var platformZoneSources = []string{
"/usr/share/zoneinfo/",
"/usr/share/lib/zoneinfo/",
"/usr/lib/locale/TZ/",
"/etc/zoneinfo",
}
func initLocal() {
// consult $TZ to find the time zone to use.
// no $TZ means use the system default /etc/localtime.
// $TZ="" means use UTC.
// $TZ="foo" or $TZ=":foo" if foo is an absolute path, then the file pointed
// by foo will be used to initialize timezone; otherwise, file
// /usr/share/zoneinfo/foo will be used.
tz, ok := syscall.Getenv("TZ")
switch {
case !ok:
z, err := loadLocation("localtime", []string{"/etc"})
if err == nil {
localLoc = *z
localLoc.name = "Local"
return
}
case tz != "":
if tz[0] == ':' {
tz = tz[1:]
}
if tz != "" && tz[0] == '/' {
if z, err := loadLocation(tz, []string{""}); err == nil {
localLoc = *z
if tz == "/etc/localtime" {
localLoc.name = "Local"
} else {
localLoc.name = tz
}
return
}
} else if tz != "" && tz != "UTC" {
if z, err := loadLocation(tz, platformZoneSources); err == nil {
localLoc = *z
return
}
}
}
// Fall back to UTC.
localLoc.name = "UTC"
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unicode
// IsDigit reports whether the rune is a decimal digit.
func IsDigit(r rune) bool {
if r <= MaxLatin1 {
return '0' <= r && r <= '9'
}
return isExcludingLatin(Digit, r)
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unicode
// Bit masks for each code point under U+0100, for fast lookup.
const (
pC = 1 << iota // a control character.
pP // a punctuation character.
pN // a numeral.
pS // a symbolic character.
pZ // a spacing character.
pLu // an upper-case letter.
pLl // a lower-case letter.
pp // a printable character according to Go's definition.
pg = pp | pZ // a graphical character according to the Unicode definition.
pLo = pLl | pLu // a letter that is neither upper nor lower case.
pLmask = pLo
)
// GraphicRanges defines the set of graphic characters according to Unicode.
var GraphicRanges = []*RangeTable{
L, M, N, P, S, Zs,
}
// PrintRanges defines the set of printable characters according to Go.
// ASCII space, U+0020, is handled separately.
var PrintRanges = []*RangeTable{
L, M, N, P, S,
}
// IsGraphic reports whether the rune is defined as a Graphic by Unicode.
// Such characters include letters, marks, numbers, punctuation, symbols, and
// spaces, from categories [L], [M], [N], [P], [S], [Zs].
func IsGraphic(r rune) bool {
// We convert to uint32 to avoid the extra test for negative,
// and in the index we convert to uint8 to avoid the range check.
if uint32(r) <= MaxLatin1 {
return properties[uint8(r)]&pg != 0
}
return In(r, GraphicRanges...)
}
// IsPrint reports whether the rune is defined as printable by Go. Such
// characters include letters, marks, numbers, punctuation, symbols, and the
// ASCII space character, from categories [L], [M], [N], [P], [S] and the ASCII space
// character. This categorization is the same as [IsGraphic] except that the
// only spacing character is ASCII space, U+0020.
func IsPrint(r rune) bool {
if uint32(r) <= MaxLatin1 {
return properties[uint8(r)]&pp != 0
}
return In(r, PrintRanges...)
}
// IsOneOf reports whether the rune is a member of one of the ranges.
// The function "In" provides a nicer signature and should be used in preference to IsOneOf.
func IsOneOf(ranges []*RangeTable, r rune) bool {
for _, inside := range ranges {
if Is(inside, r) {
return true
}
}
return false
}
// In reports whether the rune is a member of one of the ranges.
func In(r rune, ranges ...*RangeTable) bool {
for _, inside := range ranges {
if Is(inside, r) {
return true
}
}
return false
}
// IsControl reports whether the rune is a control character.
// The [C] ([Other]) Unicode category includes more code points
// such as surrogates; use [Is](C, r) to test for them.
func IsControl(r rune) bool {
if uint32(r) <= MaxLatin1 {
return properties[uint8(r)]&pC != 0
}
// All control characters are < MaxLatin1.
return false
}
// IsLetter reports whether the rune is a letter (category [L]).
func IsLetter(r rune) bool {
if uint32(r) <= MaxLatin1 {
return properties[uint8(r)]&(pLmask) != 0
}
return isExcludingLatin(Letter, r)
}
// IsMark reports whether the rune is a mark character (category [M]).
func IsMark(r rune) bool {
// There are no mark characters in Latin-1.
return isExcludingLatin(Mark, r)
}
// IsNumber reports whether the rune is a number (category [N]).
func IsNumber(r rune) bool {
if uint32(r) <= MaxLatin1 {
return properties[uint8(r)]&pN != 0
}
return isExcludingLatin(Number, r)
}
// IsPunct reports whether the rune is a Unicode punctuation character
// (category [P]).
func IsPunct(r rune) bool {
if uint32(r) <= MaxLatin1 {
return properties[uint8(r)]&pP != 0
}
return Is(Punct, r)
}
// IsSpace reports whether the rune is a space character as defined
// by Unicode's White Space property; in the Latin-1 space
// this is
//
// '\t', '\n', '\v', '\f', '\r', ' ', U+0085 (NEL), U+00A0 (NBSP).
//
// Other definitions of spacing characters are set by category
// Z and property [Pattern_White_Space].
func IsSpace(r rune) bool {
// This property isn't the same as Z; special-case it.
if uint32(r) <= MaxLatin1 {
switch r {
case '\t', '\n', '\v', '\f', '\r', ' ', 0x85, 0xA0:
return true
}
return false
}
return isExcludingLatin(White_Space, r)
}
// IsSymbol reports whether the rune is a symbolic character.
func IsSymbol(r rune) bool {
if uint32(r) <= MaxLatin1 {
return properties[uint8(r)]&pS != 0
}
return isExcludingLatin(Symbol, r)
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package unicode provides data and functions to test some properties of
// Unicode code points.
package unicode
const (
MaxRune = '\U0010FFFF' // Maximum valid Unicode code point.
ReplacementChar = '\uFFFD' // Represents invalid code points.
MaxASCII = '\u007F' // maximum ASCII value.
MaxLatin1 = '\u00FF' // maximum Latin-1 value.
)
// RangeTable defines a set of Unicode code points by listing the ranges of
// code points within the set. The ranges are listed in two slices
// to save space: a slice of 16-bit ranges and a slice of 32-bit ranges.
// The two slices must be in sorted order and non-overlapping.
// Also, R32 should contain only values >= 0x10000 (1<<16).
type RangeTable struct {
R16 []Range16
R32 []Range32
LatinOffset int // number of entries in R16 with Hi <= MaxLatin1
}
// Range16 represents of a range of 16-bit Unicode code points. The range runs from Lo to Hi
// inclusive and has the specified stride.
type Range16 struct {
Lo uint16
Hi uint16
Stride uint16
}
// Range32 represents of a range of Unicode code points and is used when one or
// more of the values will not fit in 16 bits. The range runs from Lo to Hi
// inclusive and has the specified stride. Lo and Hi must always be >= 1<<16.
type Range32 struct {
Lo uint32
Hi uint32
Stride uint32
}
// CaseRange represents a range of Unicode code points for simple (one
// code point to one code point) case conversion.
// The range runs from Lo to Hi inclusive, with a fixed stride of 1. Deltas
// are the number to add to the code point to reach the code point for a
// different case for that character. They may be negative. If zero, it
// means the character is in the corresponding case. There is a special
// case representing sequences of alternating corresponding Upper and Lower
// pairs. It appears with a fixed Delta of
//
// {UpperLower, UpperLower, UpperLower}
//
// The constant UpperLower has an otherwise impossible delta value.
type CaseRange struct {
Lo uint32
Hi uint32
Delta d
}
// SpecialCase represents language-specific case mappings such as Turkish.
// Methods of SpecialCase customize (by overriding) the standard mappings.
type SpecialCase []CaseRange
// BUG(r): There is no mechanism for full case folding, that is, for
// characters that involve multiple runes in the input or output.
// Indices into the Delta arrays inside CaseRanges for case mapping.
const (
UpperCase = iota
LowerCase
TitleCase
MaxCase
)
type d [MaxCase]rune // to make the CaseRanges text shorter
// If the Delta field of a [CaseRange] is UpperLower, it means
// this CaseRange represents a sequence of the form (say)
// [Upper] [Lower] [Upper] [Lower].
const (
UpperLower = MaxRune + 1 // (Cannot be a valid delta.)
)
// linearMax is the maximum size table for linear search for non-Latin1 rune.
// Derived by running 'go test -calibrate'.
const linearMax = 18
// is16 reports whether r is in the sorted slice of 16-bit ranges.
func is16(ranges []Range16, r uint16) bool {
if len(ranges) <= linearMax || r <= MaxLatin1 {
for i := range ranges {
range_ := &ranges[i]
if r < range_.Lo {
return false
}
if r <= range_.Hi {
return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
}
}
return false
}
// binary search over ranges
lo := 0
hi := len(ranges)
for lo < hi {
m := int(uint(lo+hi) >> 1)
range_ := &ranges[m]
if range_.Lo <= r && r <= range_.Hi {
return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
}
if r < range_.Lo {
hi = m
} else {
lo = m + 1
}
}
return false
}
// is32 reports whether r is in the sorted slice of 32-bit ranges.
func is32(ranges []Range32, r uint32) bool {
if len(ranges) <= linearMax {
for i := range ranges {
range_ := &ranges[i]
if r < range_.Lo {
return false
}
if r <= range_.Hi {
return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
}
}
return false
}
// binary search over ranges
lo := 0
hi := len(ranges)
for lo < hi {
m := int(uint(lo+hi) >> 1)
range_ := ranges[m]
if range_.Lo <= r && r <= range_.Hi {
return range_.Stride == 1 || (r-range_.Lo)%range_.Stride == 0
}
if r < range_.Lo {
hi = m
} else {
lo = m + 1
}
}
return false
}
// Is reports whether the rune is in the specified table of ranges.
func Is(rangeTab *RangeTable, r rune) bool {
r16 := rangeTab.R16
// Compare as uint32 to correctly handle negative runes.
if len(r16) > 0 && uint32(r) <= uint32(r16[len(r16)-1].Hi) {
return is16(r16, uint16(r))
}
r32 := rangeTab.R32
if len(r32) > 0 && r >= rune(r32[0].Lo) {
return is32(r32, uint32(r))
}
return false
}
func isExcludingLatin(rangeTab *RangeTable, r rune) bool {
r16 := rangeTab.R16
// Compare as uint32 to correctly handle negative runes.
if off := rangeTab.LatinOffset; len(r16) > off && uint32(r) <= uint32(r16[len(r16)-1].Hi) {
return is16(r16[off:], uint16(r))
}
r32 := rangeTab.R32
if len(r32) > 0 && r >= rune(r32[0].Lo) {
return is32(r32, uint32(r))
}
return false
}
// IsUpper reports whether the rune is an upper case letter.
func IsUpper(r rune) bool {
// See comment in IsGraphic.
if uint32(r) <= MaxLatin1 {
return properties[uint8(r)]&pLmask == pLu
}
return isExcludingLatin(Upper, r)
}
// IsLower reports whether the rune is a lower case letter.
func IsLower(r rune) bool {
// See comment in IsGraphic.
if uint32(r) <= MaxLatin1 {
return properties[uint8(r)]&pLmask == pLl
}
return isExcludingLatin(Lower, r)
}
// IsTitle reports whether the rune is a title case letter.
func IsTitle(r rune) bool {
if r <= MaxLatin1 {
return false
}
return isExcludingLatin(Title, r)
}
// lookupCaseRange returns the CaseRange mapping for rune r or nil if no
// mapping exists for r.
func lookupCaseRange(r rune, caseRange []CaseRange) *CaseRange {
// binary search over ranges
lo := 0
hi := len(caseRange)
for lo < hi {
m := int(uint(lo+hi) >> 1)
cr := &caseRange[m]
if rune(cr.Lo) <= r && r <= rune(cr.Hi) {
return cr
}
if r < rune(cr.Lo) {
hi = m
} else {
lo = m + 1
}
}
return nil
}
// convertCase converts r to _case using CaseRange cr.
func convertCase(_case int, r rune, cr *CaseRange) rune {
delta := cr.Delta[_case]
if delta > MaxRune {
// In an Upper-Lower sequence, which always starts with
// an UpperCase letter, the real deltas always look like:
// {0, 1, 0} UpperCase (Lower is next)
// {-1, 0, -1} LowerCase (Upper, Title are previous)
// The characters at even offsets from the beginning of the
// sequence are upper case; the ones at odd offsets are lower.
// The correct mapping can be done by clearing or setting the low
// bit in the sequence offset.
// The constants UpperCase and TitleCase are even while LowerCase
// is odd so we take the low bit from _case.
return rune(cr.Lo) + ((r-rune(cr.Lo))&^1 | rune(_case&1))
}
return r + delta
}
// to maps the rune using the specified case mapping.
// It additionally reports whether caseRange contained a mapping for r.
func to(_case int, r rune, caseRange []CaseRange) (mappedRune rune, foundMapping bool) {
if _case < 0 || MaxCase <= _case {
return ReplacementChar, false // as reasonable an error as any
}
if cr := lookupCaseRange(r, caseRange); cr != nil {
return convertCase(_case, r, cr), true
}
return r, false
}
// To maps the rune to the specified case: [UpperCase], [LowerCase], or [TitleCase].
func To(_case int, r rune) rune {
r, _ = to(_case, r, CaseRanges)
return r
}
// ToUpper maps the rune to upper case.
func ToUpper(r rune) rune {
if r <= MaxASCII {
if 'a' <= r && r <= 'z' {
r -= 'a' - 'A'
}
return r
}
return To(UpperCase, r)
}
// ToLower maps the rune to lower case.
func ToLower(r rune) rune {
if r <= MaxASCII {
if 'A' <= r && r <= 'Z' {
r += 'a' - 'A'
}
return r
}
return To(LowerCase, r)
}
// ToTitle maps the rune to title case.
func ToTitle(r rune) rune {
if r <= MaxASCII {
if 'a' <= r && r <= 'z' { // title case is upper case for ASCII
r -= 'a' - 'A'
}
return r
}
return To(TitleCase, r)
}
// ToUpper maps the rune to upper case giving priority to the special mapping.
func (special SpecialCase) ToUpper(r rune) rune {
r1, hadMapping := to(UpperCase, r, []CaseRange(special))
if r1 == r && !hadMapping {
r1 = ToUpper(r)
}
return r1
}
// ToTitle maps the rune to title case giving priority to the special mapping.
func (special SpecialCase) ToTitle(r rune) rune {
r1, hadMapping := to(TitleCase, r, []CaseRange(special))
if r1 == r && !hadMapping {
r1 = ToTitle(r)
}
return r1
}
// ToLower maps the rune to lower case giving priority to the special mapping.
func (special SpecialCase) ToLower(r rune) rune {
r1, hadMapping := to(LowerCase, r, []CaseRange(special))
if r1 == r && !hadMapping {
r1 = ToLower(r)
}
return r1
}
// caseOrbit is defined in tables.go as []foldPair. Right now all the
// entries fit in uint16, so use uint16. If that changes, compilation
// will fail (the constants in the composite literal will not fit in uint16)
// and the types here can change to uint32.
type foldPair struct {
From uint16
To uint16
}
// SimpleFold iterates over Unicode code points equivalent under
// the Unicode-defined simple case folding. Among the code points
// equivalent to rune (including rune itself), SimpleFold returns the
// smallest rune > r if one exists, or else the smallest rune >= 0.
// If r is not a valid Unicode code point, SimpleFold(r) returns r.
//
// For example:
//
// SimpleFold('A') = 'a'
// SimpleFold('a') = 'A'
//
// SimpleFold('K') = 'k'
// SimpleFold('k') = '\u212A' (Kelvin symbol, K)
// SimpleFold('\u212A') = 'K'
//
// SimpleFold('1') = '1'
//
// SimpleFold(-2) = -2
func SimpleFold(r rune) rune {
if r < 0 || r > MaxRune {
return r
}
if int(r) < len(asciiFold) {
return rune(asciiFold[r])
}
// Consult caseOrbit table for special cases.
lo := 0
hi := len(caseOrbit)
for lo < hi {
m := int(uint(lo+hi) >> 1)
if rune(caseOrbit[m].From) < r {
lo = m + 1
} else {
hi = m
}
}
if lo < len(caseOrbit) && rune(caseOrbit[lo].From) == r {
return rune(caseOrbit[lo].To)
}
// No folding specified. This is a one- or two-element
// equivalence class containing rune and ToLower(rune)
// and ToUpper(rune) if they are different from rune.
if cr := lookupCaseRange(r, CaseRanges); cr != nil {
if l := convertCase(LowerCase, r, cr); l != r {
return l
}
return convertCase(UpperCase, r, cr)
}
return r
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package utf16 implements encoding and decoding of UTF-16 sequences.
package utf16
// The conditions replacementChar==unicode.ReplacementChar and
// maxRune==unicode.MaxRune are verified in the tests.
// Defining them locally avoids this package depending on package unicode.
const (
replacementChar = '\uFFFD' // Unicode replacement character
maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
)
const (
// 0xd800-0xdc00 encodes the high 10 bits of a pair.
// 0xdc00-0xe000 encodes the low 10 bits of a pair.
// the value is those 20 bits plus 0x10000.
surr1 = 0xd800
surr2 = 0xdc00
surr3 = 0xe000
surrSelf = 0x10000
)
// IsSurrogate reports whether the specified Unicode code point
// can appear in a surrogate pair.
func IsSurrogate(r rune) bool {
return surr1 <= r && r < surr3
}
// DecodeRune returns the UTF-16 decoding of a surrogate pair.
// If the pair is not a valid UTF-16 surrogate pair, DecodeRune returns
// the Unicode replacement code point U+FFFD.
func DecodeRune(r1, r2 rune) rune {
if surr1 <= r1 && r1 < surr2 && surr2 <= r2 && r2 < surr3 {
return (r1-surr1)<<10 | (r2 - surr2) + surrSelf
}
return replacementChar
}
// EncodeRune returns the UTF-16 surrogate pair r1, r2 for the given rune.
// If the rune is not a valid Unicode code point or does not need encoding,
// EncodeRune returns U+FFFD, U+FFFD.
func EncodeRune(r rune) (r1, r2 rune) {
if r < surrSelf || r > maxRune {
return replacementChar, replacementChar
}
r -= surrSelf
return surr1 + (r>>10)&0x3ff, surr2 + r&0x3ff
}
// RuneLen returns the number of 16-bit words in the UTF-16 encoding of the rune.
// It returns -1 if the rune is not a valid value to encode in UTF-16.
func RuneLen(r rune) int {
switch {
case 0 <= r && r < surr1, surr3 <= r && r < surrSelf:
return 1
case surrSelf <= r && r <= maxRune:
return 2
default:
return -1
}
}
// Encode returns the UTF-16 encoding of the Unicode code point sequence s.
func Encode(s []rune) []uint16 {
n := len(s)
for _, v := range s {
if v >= surrSelf {
n++
}
}
a := make([]uint16, n)
n = 0
for _, v := range s {
switch RuneLen(v) {
case 1: // normal rune
a[n] = uint16(v)
n++
case 2: // needs surrogate sequence
r1, r2 := EncodeRune(v)
a[n] = uint16(r1)
a[n+1] = uint16(r2)
n += 2
default:
a[n] = uint16(replacementChar)
n++
}
}
return a[:n]
}
// AppendRune appends the UTF-16 encoding of the Unicode code point r
// to the end of p and returns the extended buffer. If the rune is not
// a valid Unicode code point, it appends the encoding of U+FFFD.
func AppendRune(a []uint16, r rune) []uint16 {
// This function is inlineable for fast handling of ASCII.
switch {
case 0 <= r && r < surr1, surr3 <= r && r < surrSelf:
// normal rune
return append(a, uint16(r))
case surrSelf <= r && r <= maxRune:
// needs surrogate sequence
r1, r2 := EncodeRune(r)
return append(a, uint16(r1), uint16(r2))
}
return append(a, replacementChar)
}
// Decode returns the Unicode code point sequence represented
// by the UTF-16 encoding s.
func Decode(s []uint16) []rune {
// Preallocate capacity to hold up to 64 runes.
// Decode inlines, so the allocation can live on the stack.
buf := make([]rune, 0, 64)
return decode(s, buf)
}
// decode appends to buf the Unicode code point sequence represented
// by the UTF-16 encoding s and return the extended buffer.
func decode(s []uint16, buf []rune) []rune {
for i := 0; i < len(s); i++ {
var ar rune
switch r := s[i]; {
case r < surr1, surr3 <= r:
// normal rune
ar = rune(r)
case surr1 <= r && r < surr2 && i+1 < len(s) &&
surr2 <= s[i+1] && s[i+1] < surr3:
// valid surrogate sequence
ar = DecodeRune(rune(r), rune(s[i+1]))
i++
default:
// invalid surrogate sequence
ar = replacementChar
}
buf = append(buf, ar)
}
return buf
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package utf8 implements functions and constants to support text encoded in
// UTF-8. It includes functions to translate between runes and UTF-8 byte sequences.
// See https://en.wikipedia.org/wiki/UTF-8
package utf8
// The conditions RuneError==unicode.ReplacementChar and
// MaxRune==unicode.MaxRune are verified in the tests.
// Defining them locally avoids this package depending on package unicode.
// Numbers fundamental to the encoding.
const (
RuneError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
RuneSelf = 0x80 // characters below RuneSelf are represented as themselves in a single byte.
MaxRune = '\U0010FFFF' // Maximum valid Unicode code point.
UTFMax = 4 // maximum number of bytes of a UTF-8 encoded Unicode character.
)
// Code points in the surrogate range are not valid for UTF-8.
const (
surrogateMin = 0xD800
surrogateMax = 0xDFFF
)
const (
t1 = 0b00000000
tx = 0b10000000
t2 = 0b11000000
t3 = 0b11100000
t4 = 0b11110000
t5 = 0b11111000
maskx = 0b00111111
mask2 = 0b00011111
mask3 = 0b00001111
mask4 = 0b00000111
rune1Max = 1<<7 - 1
rune2Max = 1<<11 - 1
rune3Max = 1<<16 - 1
// The default lowest and highest continuation byte.
locb = 0b10000000
hicb = 0b10111111
// These names of these constants are chosen to give nice alignment in the
// table below. The first nibble is an index into acceptRanges or F for
// special one-byte cases. The second nibble is the Rune length or the
// Status for the special one-byte case.
xx = 0xF1 // invalid: size 1
as = 0xF0 // ASCII: size 1
s1 = 0x02 // accept 0, size 2
s2 = 0x13 // accept 1, size 3
s3 = 0x03 // accept 0, size 3
s4 = 0x23 // accept 2, size 3
s5 = 0x34 // accept 3, size 4
s6 = 0x04 // accept 0, size 4
s7 = 0x44 // accept 4, size 4
)
const (
runeErrorByte0 = t3 | (RuneError >> 12)
runeErrorByte1 = tx | (RuneError>>6)&maskx
runeErrorByte2 = tx | RuneError&maskx
)
// first is information about the first byte in a UTF-8 sequence.
var first = [256]uint8{
// 1 2 3 4 5 6 7 8 9 A B C D E F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
// 1 2 3 4 5 6 7 8 9 A B C D E F
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
}
// acceptRange gives the range of valid values for the second byte in a UTF-8
// sequence.
type acceptRange struct {
lo uint8 // lowest value for second byte.
hi uint8 // highest value for second byte.
}
// acceptRanges has size 16 to avoid bounds checks in the code that uses it.
var acceptRanges = [16]acceptRange{
0: {locb, hicb},
1: {0xA0, hicb},
2: {locb, 0x9F},
3: {0x90, hicb},
4: {locb, 0x8F},
}
// FullRune reports whether the bytes in p begin with a full UTF-8 encoding of a rune.
// An invalid encoding is considered a full Rune since it will convert as a width-1 error rune.
func FullRune(p []byte) bool {
n := len(p)
if n == 0 {
return false
}
x := first[p[0]]
if n >= int(x&7) {
return true // ASCII, invalid or valid.
}
// Must be short or invalid.
accept := acceptRanges[x>>4]
if n > 1 && (p[1] < accept.lo || accept.hi < p[1]) {
return true
} else if n > 2 && (p[2] < locb || hicb < p[2]) {
return true
}
return false
}
// FullRuneInString is like FullRune but its input is a string.
func FullRuneInString(s string) bool {
n := len(s)
if n == 0 {
return false
}
x := first[s[0]]
if n >= int(x&7) {
return true // ASCII, invalid, or valid.
}
// Must be short or invalid.
accept := acceptRanges[x>>4]
if n > 1 && (s[1] < accept.lo || accept.hi < s[1]) {
return true
} else if n > 2 && (s[2] < locb || hicb < s[2]) {
return true
}
return false
}
// DecodeRune unpacks the first UTF-8 encoding in p and returns the rune and
// its width in bytes. If p is empty it returns ([RuneError], 0). Otherwise, if
// the encoding is invalid, it returns (RuneError, 1). Both are impossible
// results for correct, non-empty UTF-8.
//
// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
// out of range, or is not the shortest possible UTF-8 encoding for the
// value. No other validation is performed.
func DecodeRune(p []byte) (r rune, size int) {
// Inlineable fast path for ASCII characters; see #48195.
// This implementation is weird but effective at rendering the
// function inlineable.
for _, b := range p {
if b < RuneSelf {
return rune(b), 1
}
break
}
r, size = decodeRuneSlow(p)
return
}
func decodeRuneSlow(p []byte) (r rune, size int) {
n := len(p)
if n < 1 {
return RuneError, 0
}
p0 := p[0]
x := first[p0]
if x >= as {
// The following code simulates an additional check for x == xx and
// handling the ASCII and invalid cases accordingly. This mask-and-or
// approach prevents an additional branch.
mask := rune(x) << 31 >> 31 // Create 0x0000 or 0xFFFF.
return rune(p[0])&^mask | RuneError&mask, 1
}
sz := int(x & 7)
accept := acceptRanges[x>>4]
if n < sz {
return RuneError, 1
}
b1 := p[1]
if b1 < accept.lo || accept.hi < b1 {
return RuneError, 1
}
if sz <= 2 { // <= instead of == to help the compiler eliminate some bounds checks
return rune(p0&mask2)<<6 | rune(b1&maskx), 2
}
b2 := p[2]
if b2 < locb || hicb < b2 {
return RuneError, 1
}
if sz <= 3 {
return rune(p0&mask3)<<12 | rune(b1&maskx)<<6 | rune(b2&maskx), 3
}
b3 := p[3]
if b3 < locb || hicb < b3 {
return RuneError, 1
}
return rune(p0&mask4)<<18 | rune(b1&maskx)<<12 | rune(b2&maskx)<<6 | rune(b3&maskx), 4
}
// DecodeRuneInString is like [DecodeRune] but its input is a string. If s is
// empty it returns ([RuneError], 0). Otherwise, if the encoding is invalid, it
// returns (RuneError, 1). Both are impossible results for correct, non-empty
// UTF-8.
//
// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
// out of range, or is not the shortest possible UTF-8 encoding for the
// value. No other validation is performed.
func DecodeRuneInString(s string) (r rune, size int) {
// Inlineable fast path for ASCII characters; see #48195.
// This implementation is a bit weird but effective at rendering the
// function inlineable.
if s != "" && s[0] < RuneSelf {
return rune(s[0]), 1
} else {
r, size = decodeRuneInStringSlow(s)
}
return
}
func decodeRuneInStringSlow(s string) (rune, int) {
n := len(s)
if n < 1 {
return RuneError, 0
}
s0 := s[0]
x := first[s0]
if x >= as {
// The following code simulates an additional check for x == xx and
// handling the ASCII and invalid cases accordingly. This mask-and-or
// approach prevents an additional branch.
mask := rune(x) << 31 >> 31 // Create 0x0000 or 0xFFFF.
return rune(s[0])&^mask | RuneError&mask, 1
}
sz := int(x & 7)
accept := acceptRanges[x>>4]
if n < sz {
return RuneError, 1
}
s1 := s[1]
if s1 < accept.lo || accept.hi < s1 {
return RuneError, 1
}
if sz <= 2 { // <= instead of == to help the compiler eliminate some bounds checks
return rune(s0&mask2)<<6 | rune(s1&maskx), 2
}
s2 := s[2]
if s2 < locb || hicb < s2 {
return RuneError, 1
}
if sz <= 3 {
return rune(s0&mask3)<<12 | rune(s1&maskx)<<6 | rune(s2&maskx), 3
}
s3 := s[3]
if s3 < locb || hicb < s3 {
return RuneError, 1
}
return rune(s0&mask4)<<18 | rune(s1&maskx)<<12 | rune(s2&maskx)<<6 | rune(s3&maskx), 4
}
// DecodeLastRune unpacks the last UTF-8 encoding in p and returns the rune and
// its width in bytes. If p is empty it returns ([RuneError], 0). Otherwise, if
// the encoding is invalid, it returns (RuneError, 1). Both are impossible
// results for correct, non-empty UTF-8.
//
// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
// out of range, or is not the shortest possible UTF-8 encoding for the
// value. No other validation is performed.
func DecodeLastRune(p []byte) (r rune, size int) {
end := len(p)
if end == 0 {
return RuneError, 0
}
start := end - 1
r = rune(p[start])
if r < RuneSelf {
return r, 1
}
// guard against O(n^2) behavior when traversing
// backwards through strings with long sequences of
// invalid UTF-8.
lim := max(end-UTFMax, 0)
for start--; start >= lim; start-- {
if RuneStart(p[start]) {
break
}
}
if start < 0 {
start = 0
}
r, size = DecodeRune(p[start:end])
if start+size != end {
return RuneError, 1
}
return r, size
}
// DecodeLastRuneInString is like [DecodeLastRune] but its input is a string. If
// s is empty it returns ([RuneError], 0). Otherwise, if the encoding is invalid,
// it returns (RuneError, 1). Both are impossible results for correct,
// non-empty UTF-8.
//
// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
// out of range, or is not the shortest possible UTF-8 encoding for the
// value. No other validation is performed.
func DecodeLastRuneInString(s string) (r rune, size int) {
end := len(s)
if end == 0 {
return RuneError, 0
}
start := end - 1
r = rune(s[start])
if r < RuneSelf {
return r, 1
}
// guard against O(n^2) behavior when traversing
// backwards through strings with long sequences of
// invalid UTF-8.
lim := max(end-UTFMax, 0)
for start--; start >= lim; start-- {
if RuneStart(s[start]) {
break
}
}
if start < 0 {
start = 0
}
r, size = DecodeRuneInString(s[start:end])
if start+size != end {
return RuneError, 1
}
return r, size
}
// RuneLen returns the number of bytes in the UTF-8 encoding of the rune.
// It returns -1 if the rune is not a valid value to encode in UTF-8.
func RuneLen(r rune) int {
switch {
case r < 0:
return -1
case r <= rune1Max:
return 1
case r <= rune2Max:
return 2
case surrogateMin <= r && r <= surrogateMax:
return -1
case r <= rune3Max:
return 3
case r <= MaxRune:
return 4
}
return -1
}
// EncodeRune writes into p (which must be large enough) the UTF-8 encoding of the rune.
// If the rune is out of range, it writes the encoding of [RuneError].
// It returns the number of bytes written.
func EncodeRune(p []byte, r rune) int {
// This function is inlineable for fast handling of ASCII.
if uint32(r) <= rune1Max {
p[0] = byte(r)
return 1
}
return encodeRuneNonASCII(p, r)
}
func encodeRuneNonASCII(p []byte, r rune) int {
// Negative values are erroneous. Making it unsigned addresses the problem.
switch i := uint32(r); {
case i <= rune2Max:
_ = p[1] // eliminate bounds checks
p[0] = t2 | byte(r>>6)
p[1] = tx | byte(r)&maskx
return 2
case i < surrogateMin, surrogateMax < i && i <= rune3Max:
_ = p[2] // eliminate bounds checks
p[0] = t3 | byte(r>>12)
p[1] = tx | byte(r>>6)&maskx
p[2] = tx | byte(r)&maskx
return 3
case i > rune3Max && i <= MaxRune:
_ = p[3] // eliminate bounds checks
p[0] = t4 | byte(r>>18)
p[1] = tx | byte(r>>12)&maskx
p[2] = tx | byte(r>>6)&maskx
p[3] = tx | byte(r)&maskx
return 4
default:
_ = p[2] // eliminate bounds checks
p[0] = runeErrorByte0
p[1] = runeErrorByte1
p[2] = runeErrorByte2
return 3
}
}
// AppendRune appends the UTF-8 encoding of r to the end of p and
// returns the extended buffer. If the rune is out of range,
// it appends the encoding of [RuneError].
func AppendRune(p []byte, r rune) []byte {
// This function is inlineable for fast handling of ASCII.
if uint32(r) <= rune1Max {
return append(p, byte(r))
}
return appendRuneNonASCII(p, r)
}
func appendRuneNonASCII(p []byte, r rune) []byte {
// Negative values are erroneous. Making it unsigned addresses the problem.
switch i := uint32(r); {
case i <= rune2Max:
return append(p, t2|byte(r>>6), tx|byte(r)&maskx)
case i < surrogateMin, surrogateMax < i && i <= rune3Max:
return append(p, t3|byte(r>>12), tx|byte(r>>6)&maskx, tx|byte(r)&maskx)
case i > rune3Max && i <= MaxRune:
return append(p, t4|byte(r>>18), tx|byte(r>>12)&maskx, tx|byte(r>>6)&maskx, tx|byte(r)&maskx)
default:
return append(p, runeErrorByte0, runeErrorByte1, runeErrorByte2)
}
}
// RuneCount returns the number of runes in p. Erroneous and short
// encodings are treated as single runes of width 1 byte.
func RuneCount(p []byte) int {
np := len(p)
var n int
for ; n < np; n++ {
if c := p[n]; c >= RuneSelf {
// non-ASCII slow path
return n + RuneCountInString(string(p[n:]))
}
}
return n
}
// RuneCountInString is like [RuneCount] but its input is a string.
func RuneCountInString(s string) (n int) {
for range s {
n++
}
return n
}
// RuneStart reports whether the byte could be the first byte of an encoded,
// possibly invalid rune. Second and subsequent bytes always have the top two
// bits set to 10.
func RuneStart(b byte) bool { return b&0xC0 != 0x80 }
const ptrSize = 4 << (^uintptr(0) >> 63)
const hiBits = 0x8080808080808080 >> (64 - 8*ptrSize)
func word[T string | []byte](s T) uintptr {
if ptrSize == 4 {
return uintptr(s[0]) | uintptr(s[1])<<8 | uintptr(s[2])<<16 | uintptr(s[3])<<24
}
return uintptr(uint64(s[0]) | uint64(s[1])<<8 | uint64(s[2])<<16 | uint64(s[3])<<24 | uint64(s[4])<<32 | uint64(s[5])<<40 | uint64(s[6])<<48 | uint64(s[7])<<56)
}
// Valid reports whether p consists entirely of valid UTF-8-encoded runes.
func Valid(p []byte) bool {
// This optimization avoids the need to recompute the capacity
// when generating code for slicing p, bringing it to parity with
// ValidString, which was 20% faster on long ASCII strings.
p = p[:len(p):len(p)]
for len(p) > 0 {
p0 := p[0]
if p0 < RuneSelf {
p = p[1:]
// If there's one ASCII byte, there are probably more.
// Advance quickly through ASCII-only data.
// Note: using > instead of >= here is intentional. That avoids
// needing pointing-past-the-end fixup on the slice operations.
if len(p) > ptrSize && word(p)&hiBits == 0 {
p = p[ptrSize:]
if len(p) > 2*ptrSize && (word(p)|word(p[ptrSize:]))&hiBits == 0 {
p = p[2*ptrSize:]
for len(p) > 4*ptrSize && ((word(p)|word(p[ptrSize:]))|(word(p[2*ptrSize:])|word(p[3*ptrSize:])))&hiBits == 0 {
p = p[4*ptrSize:]
}
}
}
continue
}
x := first[p0]
size := int(x & 7)
accept := acceptRanges[x>>4]
switch size {
case 2:
if len(p) < 2 || p[1] < accept.lo || accept.hi < p[1] {
return false
}
p = p[2:]
case 3:
if len(p) < 3 || p[1] < accept.lo || accept.hi < p[1] || p[2] < locb || hicb < p[2] {
return false
}
p = p[3:]
case 4:
if len(p) < 4 || p[1] < accept.lo || accept.hi < p[1] || p[2] < locb || hicb < p[2] || p[3] < locb || hicb < p[3] {
return false
}
p = p[4:]
default:
return false // illegal starter byte
}
}
return true
}
// ValidString reports whether s consists entirely of valid UTF-8-encoded runes.
func ValidString(s string) bool {
for len(s) > 0 {
s0 := s[0]
if s0 < RuneSelf {
s = s[1:]
// If there's one ASCII byte, there are probably more.
// Advance quickly through ASCII-only data.
// Note: using > instead of >= here is intentional. That avoids
// needing pointing-past-the-end fixup on the slice operations.
if len(s) > ptrSize && word(s)&hiBits == 0 {
s = s[ptrSize:]
if len(s) > 2*ptrSize && (word(s)|word(s[ptrSize:]))&hiBits == 0 {
s = s[2*ptrSize:]
for len(s) > 4*ptrSize && ((word(s)|word(s[ptrSize:]))|(word(s[2*ptrSize:])|word(s[3*ptrSize:])))&hiBits == 0 {
s = s[4*ptrSize:]
}
}
}
continue
}
x := first[s0]
size := int(x & 7)
accept := acceptRanges[x>>4]
switch size {
case 2:
if len(s) < 2 || s[1] < accept.lo || accept.hi < s[1] {
return false
}
s = s[2:]
case 3:
if len(s) < 3 || s[1] < accept.lo || accept.hi < s[1] || s[2] < locb || hicb < s[2] {
return false
}
s = s[3:]
case 4:
if len(s) < 4 || s[1] < accept.lo || accept.hi < s[1] || s[2] < locb || hicb < s[2] || s[3] < locb || hicb < s[3] {
return false
}
s = s[4:]
default:
return false // illegal starter byte
}
}
return true
}
// ValidRune reports whether r can be legally encoded as UTF-8.
// Code points that are out of range or a surrogate half are illegal.
func ValidRune(r rune) bool {
switch {
case 0 <= r && r < surrogateMin:
return true
case surrogateMax < r && r <= MaxRune:
return true
}
return false
}
//go:build gofuzz
package fuzz_ng_archive_tar
import (
"google.golang.org/protobuf/proto"
"archive/tar"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FormatNewFromFuzz(p FormatEnum) tar.Format{
switch p {
case 1:
return tar.FormatUSTAR
case 2:
return tar.FormatPAX
case 3:
return tar.FormatGNU
}
return tar.FormatUnknown
}
func ConvertFormatNewFromFuzz(a []FormatEnum) []tar.Format{
r := make([]tar.Format, len(a))
for i := range a {
r[i] = FormatNewFromFuzz(a[i])
}
return r
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var HeaderResults []*tar.Header
HeaderResultsIndex := 0
var ReaderResults []*tar.Reader
ReaderResultsIndex := 0
var WriterResults []*tar.Writer
WriterResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_HeaderNgdotFileInfo:
if len(HeaderResults) == 0 {
continue
}
arg0 := HeaderResults[HeaderResultsIndex]
HeaderResultsIndex = (HeaderResultsIndex + 1) % len(HeaderResults)
arg0.FileInfo()
case *NgoloFuzzOne_FormatNgdotString:
arg0 := FormatNewFromFuzz(a.FormatNgdotString.F)
arg0.String()
case *NgoloFuzzOne_NewReader:
arg0 := bytes.NewReader(a.NewReader.R)
r0 := tar.NewReader(arg0)
if r0 != nil{
ReaderResults = append(ReaderResults, r0)
}
case *NgoloFuzzOne_ReaderNgdotNext:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.Next()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotRead:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.Read(a.ReaderNgdotRead.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewWriter:
arg0 := bytes.NewBuffer(a.NewWriter.W)
r0 := tar.NewWriter(arg0)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
case *NgoloFuzzOne_WriterNgdotFlush:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Flush()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotWriteHeader:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
if len(HeaderResults) == 0 {
continue
}
arg1 := HeaderResults[HeaderResultsIndex]
HeaderResultsIndex = (HeaderResultsIndex + 1) % len(HeaderResults)
r0 := arg0.WriteHeader(arg1)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotWrite:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
_, r1 := arg0.Write(a.WriterNgdotWrite.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotClose:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
HeaderNb := 0
HeaderResultsIndex := 0
ReaderNb := 0
ReaderResultsIndex := 0
WriterNb := 0
WriterResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_HeaderNgdotFileInfo:
if HeaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Header%d.FileInfo()\n", HeaderResultsIndex))
HeaderResultsIndex = (HeaderResultsIndex + 1) % HeaderNb
case *NgoloFuzzOne_FormatNgdotString:
w.WriteString(fmt.Sprintf("FormatNewFromFuzz(%#+v).String()\n", a.FormatNgdotString.F))
case *NgoloFuzzOne_NewReader:
w.WriteString(fmt.Sprintf("Reader%d := tar.NewReader(bytes.NewReader(%#+v))\n", ReaderNb, a.NewReader.R))
ReaderNb = ReaderNb + 1
case *NgoloFuzzOne_ReaderNgdotNext:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Next()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotRead:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Read(%#+v)\n", ReaderResultsIndex, a.ReaderNgdotRead.B))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_NewWriter:
w.WriteString(fmt.Sprintf("Writer%d := tar.NewWriter(bytes.NewBuffer(%#+v))\n", WriterNb, a.NewWriter.W))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_WriterNgdotFlush:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Flush()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotWriteHeader:
if WriterNb == 0 {
continue
}
if HeaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.WriteHeader(Header%d)\n", WriterResultsIndex, (HeaderResultsIndex + 0) % HeaderNb))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
HeaderResultsIndex = (HeaderResultsIndex + 1) % HeaderNb
case *NgoloFuzzOne_WriterNgdotWrite:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Write(%#+v)\n", WriterResultsIndex, a.WriterNgdotWrite.B))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotClose:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Close()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_archive_tar
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type FormatEnum int32
const (
FormatEnum_FormatUnknown FormatEnum = 0
FormatEnum_FormatUSTAR FormatEnum = 1
FormatEnum_FormatPAX FormatEnum = 2
FormatEnum_FormatGNU FormatEnum = 3
)
// Enum value maps for FormatEnum.
var (
FormatEnum_name = map[int32]string{
0: "FormatUnknown",
1: "FormatUSTAR",
2: "FormatPAX",
3: "FormatGNU",
}
FormatEnum_value = map[string]int32{
"FormatUnknown": 0,
"FormatUSTAR": 1,
"FormatPAX": 2,
"FormatGNU": 3,
}
)
func (x FormatEnum) Enum() *FormatEnum {
p := new(FormatEnum)
*p = x
return p
}
func (x FormatEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (FormatEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[0].Descriptor()
}
func (FormatEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[0]
}
func (x FormatEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use FormatEnum.Descriptor instead.
func (FormatEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type HeaderNgdotFileInfoArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HeaderNgdotFileInfoArgs) Reset() {
*x = HeaderNgdotFileInfoArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HeaderNgdotFileInfoArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HeaderNgdotFileInfoArgs) ProtoMessage() {}
func (x *HeaderNgdotFileInfoArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HeaderNgdotFileInfoArgs.ProtoReflect.Descriptor instead.
func (*HeaderNgdotFileInfoArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type FormatNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
F FormatEnum `protobuf:"varint,1,opt,name=f,proto3,enum=ngolofuzz.FormatEnum" json:"f,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FormatNgdotStringArgs) Reset() {
*x = FormatNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FormatNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FormatNgdotStringArgs) ProtoMessage() {}
func (x *FormatNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FormatNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*FormatNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *FormatNgdotStringArgs) GetF() FormatEnum {
if x != nil {
return x.F
}
return FormatEnum_FormatUnknown
}
type NewReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderArgs) Reset() {
*x = NewReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderArgs) ProtoMessage() {}
func (x *NewReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderArgs.ProtoReflect.Descriptor instead.
func (*NewReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NewReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type ReaderNgdotNextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotNextArgs) Reset() {
*x = ReaderNgdotNextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotNextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotNextArgs) ProtoMessage() {}
func (x *ReaderNgdotNextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotNextArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotNextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type ReaderNgdotReadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadArgs) Reset() {
*x = ReaderNgdotReadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadArgs) ProtoMessage() {}
func (x *ReaderNgdotReadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *ReaderNgdotReadArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type NewWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterArgs) Reset() {
*x = NewWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterArgs) ProtoMessage() {}
func (x *NewWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterArgs.ProtoReflect.Descriptor instead.
func (*NewWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NewWriterArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type WriterNgdotFlushArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotFlushArgs) Reset() {
*x = WriterNgdotFlushArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotFlushArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotFlushArgs) ProtoMessage() {}
func (x *WriterNgdotFlushArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotFlushArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotFlushArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type WriterNgdotWriteHeaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteHeaderArgs) Reset() {
*x = WriterNgdotWriteHeaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteHeaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteHeaderArgs) ProtoMessage() {}
func (x *WriterNgdotWriteHeaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteHeaderArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteHeaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type WriterNgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteArgs) Reset() {
*x = WriterNgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteArgs) ProtoMessage() {}
func (x *WriterNgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *WriterNgdotWriteArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type WriterNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCloseArgs) Reset() {
*x = WriterNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCloseArgs) ProtoMessage() {}
func (x *WriterNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_HeaderNgdotFileInfo
// *NgoloFuzzOne_FormatNgdotString
// *NgoloFuzzOne_NewReader
// *NgoloFuzzOne_ReaderNgdotNext
// *NgoloFuzzOne_ReaderNgdotRead
// *NgoloFuzzOne_NewWriter
// *NgoloFuzzOne_WriterNgdotFlush
// *NgoloFuzzOne_WriterNgdotWriteHeader
// *NgoloFuzzOne_WriterNgdotWrite
// *NgoloFuzzOne_WriterNgdotClose
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetHeaderNgdotFileInfo() *HeaderNgdotFileInfoArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_HeaderNgdotFileInfo); ok {
return x.HeaderNgdotFileInfo
}
}
return nil
}
func (x *NgoloFuzzOne) GetFormatNgdotString() *FormatNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FormatNgdotString); ok {
return x.FormatNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewReader() *NewReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReader); ok {
return x.NewReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotNext() *ReaderNgdotNextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotNext); ok {
return x.ReaderNgdotNext
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotRead() *ReaderNgdotReadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotRead); ok {
return x.ReaderNgdotRead
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriter() *NewWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriter); ok {
return x.NewWriter
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotFlush() *WriterNgdotFlushArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotFlush); ok {
return x.WriterNgdotFlush
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWriteHeader() *WriterNgdotWriteHeaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWriteHeader); ok {
return x.WriterNgdotWriteHeader
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWrite() *WriterNgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWrite); ok {
return x.WriterNgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotClose() *WriterNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotClose); ok {
return x.WriterNgdotClose
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_HeaderNgdotFileInfo struct {
HeaderNgdotFileInfo *HeaderNgdotFileInfoArgs `protobuf:"bytes,1,opt,name=HeaderNgdotFileInfo,proto3,oneof"`
}
type NgoloFuzzOne_FormatNgdotString struct {
FormatNgdotString *FormatNgdotStringArgs `protobuf:"bytes,2,opt,name=FormatNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_NewReader struct {
NewReader *NewReaderArgs `protobuf:"bytes,3,opt,name=NewReader,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotNext struct {
ReaderNgdotNext *ReaderNgdotNextArgs `protobuf:"bytes,4,opt,name=ReaderNgdotNext,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotRead struct {
ReaderNgdotRead *ReaderNgdotReadArgs `protobuf:"bytes,5,opt,name=ReaderNgdotRead,proto3,oneof"`
}
type NgoloFuzzOne_NewWriter struct {
NewWriter *NewWriterArgs `protobuf:"bytes,6,opt,name=NewWriter,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotFlush struct {
WriterNgdotFlush *WriterNgdotFlushArgs `protobuf:"bytes,7,opt,name=WriterNgdotFlush,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWriteHeader struct {
WriterNgdotWriteHeader *WriterNgdotWriteHeaderArgs `protobuf:"bytes,8,opt,name=WriterNgdotWriteHeader,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWrite struct {
WriterNgdotWrite *WriterNgdotWriteArgs `protobuf:"bytes,9,opt,name=WriterNgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotClose struct {
WriterNgdotClose *WriterNgdotCloseArgs `protobuf:"bytes,10,opt,name=WriterNgdotClose,proto3,oneof"`
}
func (*NgoloFuzzOne_HeaderNgdotFileInfo) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FormatNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotNext) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotRead) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotFlush) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWriteHeader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotClose) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x19\n" +
"\x17HeaderNgdotFileInfoArgs\"<\n" +
"\x15FormatNgdotStringArgs\x12#\n" +
"\x01f\x18\x01 \x01(\x0e2\x15.ngolofuzz.FormatEnumR\x01f\"\x1d\n" +
"\rNewReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x15\n" +
"\x13ReaderNgdotNextArgs\"#\n" +
"\x13ReaderNgdotReadArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"\x1d\n" +
"\rNewWriterArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"\x16\n" +
"\x14WriterNgdotFlushArgs\"\x1c\n" +
"\x1aWriterNgdotWriteHeaderArgs\"$\n" +
"\x14WriterNgdotWriteArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"\x16\n" +
"\x14WriterNgdotCloseArgs\"\x9a\x06\n" +
"\fNgoloFuzzOne\x12V\n" +
"\x13HeaderNgdotFileInfo\x18\x01 \x01(\v2\".ngolofuzz.HeaderNgdotFileInfoArgsH\x00R\x13HeaderNgdotFileInfo\x12P\n" +
"\x11FormatNgdotString\x18\x02 \x01(\v2 .ngolofuzz.FormatNgdotStringArgsH\x00R\x11FormatNgdotString\x128\n" +
"\tNewReader\x18\x03 \x01(\v2\x18.ngolofuzz.NewReaderArgsH\x00R\tNewReader\x12J\n" +
"\x0fReaderNgdotNext\x18\x04 \x01(\v2\x1e.ngolofuzz.ReaderNgdotNextArgsH\x00R\x0fReaderNgdotNext\x12J\n" +
"\x0fReaderNgdotRead\x18\x05 \x01(\v2\x1e.ngolofuzz.ReaderNgdotReadArgsH\x00R\x0fReaderNgdotRead\x128\n" +
"\tNewWriter\x18\x06 \x01(\v2\x18.ngolofuzz.NewWriterArgsH\x00R\tNewWriter\x12M\n" +
"\x10WriterNgdotFlush\x18\a \x01(\v2\x1f.ngolofuzz.WriterNgdotFlushArgsH\x00R\x10WriterNgdotFlush\x12_\n" +
"\x16WriterNgdotWriteHeader\x18\b \x01(\v2%.ngolofuzz.WriterNgdotWriteHeaderArgsH\x00R\x16WriterNgdotWriteHeader\x12M\n" +
"\x10WriterNgdotWrite\x18\t \x01(\v2\x1f.ngolofuzz.WriterNgdotWriteArgsH\x00R\x10WriterNgdotWrite\x12M\n" +
"\x10WriterNgdotClose\x18\n" +
" \x01(\v2\x1f.ngolofuzz.WriterNgdotCloseArgsH\x00R\x10WriterNgdotCloseB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04list*N\n" +
"\n" +
"FormatEnum\x12\x11\n" +
"\rFormatUnknown\x10\x00\x12\x0f\n" +
"\vFormatUSTAR\x10\x01\x12\r\n" +
"\tFormatPAX\x10\x02\x12\r\n" +
"\tFormatGNU\x10\x03B\x18Z\x16./;fuzz_ng_archive_tarb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
var file_ngolofuzz_proto_goTypes = []any{
(FormatEnum)(0), // 0: ngolofuzz.FormatEnum
(*HeaderNgdotFileInfoArgs)(nil), // 1: ngolofuzz.HeaderNgdotFileInfoArgs
(*FormatNgdotStringArgs)(nil), // 2: ngolofuzz.FormatNgdotStringArgs
(*NewReaderArgs)(nil), // 3: ngolofuzz.NewReaderArgs
(*ReaderNgdotNextArgs)(nil), // 4: ngolofuzz.ReaderNgdotNextArgs
(*ReaderNgdotReadArgs)(nil), // 5: ngolofuzz.ReaderNgdotReadArgs
(*NewWriterArgs)(nil), // 6: ngolofuzz.NewWriterArgs
(*WriterNgdotFlushArgs)(nil), // 7: ngolofuzz.WriterNgdotFlushArgs
(*WriterNgdotWriteHeaderArgs)(nil), // 8: ngolofuzz.WriterNgdotWriteHeaderArgs
(*WriterNgdotWriteArgs)(nil), // 9: ngolofuzz.WriterNgdotWriteArgs
(*WriterNgdotCloseArgs)(nil), // 10: ngolofuzz.WriterNgdotCloseArgs
(*NgoloFuzzOne)(nil), // 11: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 12: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 13: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.FormatNgdotStringArgs.f:type_name -> ngolofuzz.FormatEnum
1, // 1: ngolofuzz.NgoloFuzzOne.HeaderNgdotFileInfo:type_name -> ngolofuzz.HeaderNgdotFileInfoArgs
2, // 2: ngolofuzz.NgoloFuzzOne.FormatNgdotString:type_name -> ngolofuzz.FormatNgdotStringArgs
3, // 3: ngolofuzz.NgoloFuzzOne.NewReader:type_name -> ngolofuzz.NewReaderArgs
4, // 4: ngolofuzz.NgoloFuzzOne.ReaderNgdotNext:type_name -> ngolofuzz.ReaderNgdotNextArgs
5, // 5: ngolofuzz.NgoloFuzzOne.ReaderNgdotRead:type_name -> ngolofuzz.ReaderNgdotReadArgs
6, // 6: ngolofuzz.NgoloFuzzOne.NewWriter:type_name -> ngolofuzz.NewWriterArgs
7, // 7: ngolofuzz.NgoloFuzzOne.WriterNgdotFlush:type_name -> ngolofuzz.WriterNgdotFlushArgs
8, // 8: ngolofuzz.NgoloFuzzOne.WriterNgdotWriteHeader:type_name -> ngolofuzz.WriterNgdotWriteHeaderArgs
9, // 9: ngolofuzz.NgoloFuzzOne.WriterNgdotWrite:type_name -> ngolofuzz.WriterNgdotWriteArgs
10, // 10: ngolofuzz.NgoloFuzzOne.WriterNgdotClose:type_name -> ngolofuzz.WriterNgdotCloseArgs
11, // 11: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
12, // [12:12] is the sub-list for method output_type
12, // [12:12] is the sub-list for method input_type
12, // [12:12] is the sub-list for extension type_name
12, // [12:12] is the sub-list for extension extendee
0, // [0:12] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[10].OneofWrappers = []any{
(*NgoloFuzzOne_HeaderNgdotFileInfo)(nil),
(*NgoloFuzzOne_FormatNgdotString)(nil),
(*NgoloFuzzOne_NewReader)(nil),
(*NgoloFuzzOne_ReaderNgdotNext)(nil),
(*NgoloFuzzOne_ReaderNgdotRead)(nil),
(*NgoloFuzzOne_NewWriter)(nil),
(*NgoloFuzzOne_WriterNgdotFlush)(nil),
(*NgoloFuzzOne_WriterNgdotWriteHeader)(nil),
(*NgoloFuzzOne_WriterNgdotWrite)(nil),
(*NgoloFuzzOne_WriterNgdotClose)(nil),
}
file_ngolofuzz_proto_msgTypes[11].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 1,
NumMessages: 13,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
EnumInfos: file_ngolofuzz_proto_enumTypes,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_archive_zip
import (
"google.golang.org/protobuf/proto"
"archive/zip"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ReaderResults []*zip.Reader
ReaderResultsIndex := 0
var ReadCloserResults []*zip.ReadCloser
ReadCloserResultsIndex := 0
var FileHeaderResults []*zip.FileHeader
FileHeaderResultsIndex := 0
var WriterResults []*zip.Writer
WriterResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_OpenReader:
r0, r1 := zip.OpenReader(a.OpenReader.Name)
if r0 != nil{
ReadCloserResults = append(ReadCloserResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewReader:
arg0 := bytes.NewReader(a.NewReader.R)
r0, r1 := zip.NewReader(arg0, a.NewReader.Size)
if r0 != nil{
ReaderResults = append(ReaderResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReadCloserNgdotClose:
if len(ReadCloserResults) == 0 {
continue
}
arg0 := ReadCloserResults[ReadCloserResultsIndex]
ReadCloserResultsIndex = (ReadCloserResultsIndex + 1) % len(ReadCloserResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotOpen:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.Open(a.ReaderNgdotOpen.Name)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FileHeaderNgdotFileInfo:
if len(FileHeaderResults) == 0 {
continue
}
arg0 := FileHeaderResults[FileHeaderResultsIndex]
FileHeaderResultsIndex = (FileHeaderResultsIndex + 1) % len(FileHeaderResults)
arg0.FileInfo()
case *NgoloFuzzOne_FileHeaderNgdotModTime:
if len(FileHeaderResults) == 0 {
continue
}
arg0 := FileHeaderResults[FileHeaderResultsIndex]
FileHeaderResultsIndex = (FileHeaderResultsIndex + 1) % len(FileHeaderResults)
arg0.ModTime()
case *NgoloFuzzOne_FileHeaderNgdotMode:
if len(FileHeaderResults) == 0 {
continue
}
arg0 := FileHeaderResults[FileHeaderResultsIndex]
FileHeaderResultsIndex = (FileHeaderResultsIndex + 1) % len(FileHeaderResults)
arg0.Mode()
case *NgoloFuzzOne_NewWriter:
arg0 := bytes.NewBuffer(a.NewWriter.W)
r0 := zip.NewWriter(arg0)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
case *NgoloFuzzOne_WriterNgdotSetOffset:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg0.SetOffset(a.WriterNgdotSetOffset.N)
case *NgoloFuzzOne_WriterNgdotFlush:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Flush()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotSetComment:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.SetComment(a.WriterNgdotSetComment.Comment)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotClose:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotCreate:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
_, r1 := arg0.Create(a.WriterNgdotCreate.Name)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotCreateHeader:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
if len(FileHeaderResults) == 0 {
continue
}
arg1 := FileHeaderResults[FileHeaderResultsIndex]
FileHeaderResultsIndex = (FileHeaderResultsIndex + 1) % len(FileHeaderResults)
_, r1 := arg0.CreateHeader(arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotCreateRaw:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
if len(FileHeaderResults) == 0 {
continue
}
arg1 := FileHeaderResults[FileHeaderResultsIndex]
FileHeaderResultsIndex = (FileHeaderResultsIndex + 1) % len(FileHeaderResults)
_, r1 := arg0.CreateRaw(arg1)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ReaderNb := 0
ReaderResultsIndex := 0
ReadCloserNb := 0
ReadCloserResultsIndex := 0
FileHeaderNb := 0
FileHeaderResultsIndex := 0
WriterNb := 0
WriterResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_OpenReader:
w.WriteString(fmt.Sprintf("ReadCloser%d, _ := zip.OpenReader(%#+v)\n", ReadCloserNb, a.OpenReader.Name))
ReadCloserNb = ReadCloserNb + 1
case *NgoloFuzzOne_NewReader:
w.WriteString(fmt.Sprintf("Reader%d, _ := zip.NewReader(bytes.NewReader(%#+v), %#+v)\n", ReaderNb, a.NewReader.R, a.NewReader.Size))
ReaderNb = ReaderNb + 1
case *NgoloFuzzOne_ReadCloserNgdotClose:
if ReadCloserNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ReadCloser%d.Close()\n", ReadCloserResultsIndex))
ReadCloserResultsIndex = (ReadCloserResultsIndex + 1) % ReadCloserNb
case *NgoloFuzzOne_ReaderNgdotOpen:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Open(%#+v)\n", ReaderResultsIndex, a.ReaderNgdotOpen.Name))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_FileHeaderNgdotFileInfo:
if FileHeaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("FileHeader%d.FileInfo()\n", FileHeaderResultsIndex))
FileHeaderResultsIndex = (FileHeaderResultsIndex + 1) % FileHeaderNb
case *NgoloFuzzOne_FileHeaderNgdotModTime:
if FileHeaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("FileHeader%d.ModTime()\n", FileHeaderResultsIndex))
FileHeaderResultsIndex = (FileHeaderResultsIndex + 1) % FileHeaderNb
case *NgoloFuzzOne_FileHeaderNgdotMode:
if FileHeaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("FileHeader%d.Mode()\n", FileHeaderResultsIndex))
FileHeaderResultsIndex = (FileHeaderResultsIndex + 1) % FileHeaderNb
case *NgoloFuzzOne_NewWriter:
w.WriteString(fmt.Sprintf("Writer%d := zip.NewWriter(bytes.NewBuffer(%#+v))\n", WriterNb, a.NewWriter.W))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_WriterNgdotSetOffset:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.SetOffset(%#+v)\n", WriterResultsIndex, a.WriterNgdotSetOffset.N))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotFlush:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Flush()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotSetComment:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.SetComment(%#+v)\n", WriterResultsIndex, a.WriterNgdotSetComment.Comment))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotClose:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Close()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotCreate:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Create(%#+v)\n", WriterResultsIndex, a.WriterNgdotCreate.Name))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotCreateHeader:
if WriterNb == 0 {
continue
}
if FileHeaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.CreateHeader(FileHeader%d)\n", WriterResultsIndex, (FileHeaderResultsIndex + 0) % FileHeaderNb))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
FileHeaderResultsIndex = (FileHeaderResultsIndex + 1) % FileHeaderNb
case *NgoloFuzzOne_WriterNgdotCreateRaw:
if WriterNb == 0 {
continue
}
if FileHeaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.CreateRaw(FileHeader%d)\n", WriterResultsIndex, (FileHeaderResultsIndex + 0) % FileHeaderNb))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
FileHeaderResultsIndex = (FileHeaderResultsIndex + 1) % FileHeaderNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_archive_zip
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type OpenReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OpenReaderArgs) Reset() {
*x = OpenReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OpenReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OpenReaderArgs) ProtoMessage() {}
func (x *OpenReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OpenReaderArgs.ProtoReflect.Descriptor instead.
func (*OpenReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *OpenReaderArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type NewReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderArgs) Reset() {
*x = NewReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderArgs) ProtoMessage() {}
func (x *NewReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderArgs.ProtoReflect.Descriptor instead.
func (*NewReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NewReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
func (x *NewReaderArgs) GetSize() int64 {
if x != nil {
return x.Size
}
return 0
}
type ReadCloserNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReadCloserNgdotCloseArgs) Reset() {
*x = ReadCloserNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReadCloserNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReadCloserNgdotCloseArgs) ProtoMessage() {}
func (x *ReadCloserNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReadCloserNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*ReadCloserNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type ReaderNgdotOpenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotOpenArgs) Reset() {
*x = ReaderNgdotOpenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotOpenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotOpenArgs) ProtoMessage() {}
func (x *ReaderNgdotOpenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotOpenArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotOpenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ReaderNgdotOpenArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type FileHeaderNgdotFileInfoArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileHeaderNgdotFileInfoArgs) Reset() {
*x = FileHeaderNgdotFileInfoArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileHeaderNgdotFileInfoArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileHeaderNgdotFileInfoArgs) ProtoMessage() {}
func (x *FileHeaderNgdotFileInfoArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileHeaderNgdotFileInfoArgs.ProtoReflect.Descriptor instead.
func (*FileHeaderNgdotFileInfoArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type FileHeaderNgdotModTimeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileHeaderNgdotModTimeArgs) Reset() {
*x = FileHeaderNgdotModTimeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileHeaderNgdotModTimeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileHeaderNgdotModTimeArgs) ProtoMessage() {}
func (x *FileHeaderNgdotModTimeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileHeaderNgdotModTimeArgs.ProtoReflect.Descriptor instead.
func (*FileHeaderNgdotModTimeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type FileHeaderNgdotModeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileHeaderNgdotModeArgs) Reset() {
*x = FileHeaderNgdotModeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileHeaderNgdotModeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileHeaderNgdotModeArgs) ProtoMessage() {}
func (x *FileHeaderNgdotModeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileHeaderNgdotModeArgs.ProtoReflect.Descriptor instead.
func (*FileHeaderNgdotModeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type NewWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterArgs) Reset() {
*x = NewWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterArgs) ProtoMessage() {}
func (x *NewWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterArgs.ProtoReflect.Descriptor instead.
func (*NewWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NewWriterArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type WriterNgdotSetOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotSetOffsetArgs) Reset() {
*x = WriterNgdotSetOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotSetOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotSetOffsetArgs) ProtoMessage() {}
func (x *WriterNgdotSetOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotSetOffsetArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotSetOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *WriterNgdotSetOffsetArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type WriterNgdotFlushArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotFlushArgs) Reset() {
*x = WriterNgdotFlushArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotFlushArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotFlushArgs) ProtoMessage() {}
func (x *WriterNgdotFlushArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotFlushArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotFlushArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type WriterNgdotSetCommentArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Comment string `protobuf:"bytes,1,opt,name=comment,proto3" json:"comment,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotSetCommentArgs) Reset() {
*x = WriterNgdotSetCommentArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotSetCommentArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotSetCommentArgs) ProtoMessage() {}
func (x *WriterNgdotSetCommentArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotSetCommentArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotSetCommentArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *WriterNgdotSetCommentArgs) GetComment() string {
if x != nil {
return x.Comment
}
return ""
}
type WriterNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCloseArgs) Reset() {
*x = WriterNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCloseArgs) ProtoMessage() {}
func (x *WriterNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type WriterNgdotCreateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCreateArgs) Reset() {
*x = WriterNgdotCreateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCreateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCreateArgs) ProtoMessage() {}
func (x *WriterNgdotCreateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCreateArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCreateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *WriterNgdotCreateArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type WriterNgdotCreateHeaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCreateHeaderArgs) Reset() {
*x = WriterNgdotCreateHeaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCreateHeaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCreateHeaderArgs) ProtoMessage() {}
func (x *WriterNgdotCreateHeaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCreateHeaderArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCreateHeaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type WriterNgdotCreateRawArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCreateRawArgs) Reset() {
*x = WriterNgdotCreateRawArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCreateRawArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCreateRawArgs) ProtoMessage() {}
func (x *WriterNgdotCreateRawArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCreateRawArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCreateRawArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_OpenReader
// *NgoloFuzzOne_NewReader
// *NgoloFuzzOne_ReadCloserNgdotClose
// *NgoloFuzzOne_ReaderNgdotOpen
// *NgoloFuzzOne_FileHeaderNgdotFileInfo
// *NgoloFuzzOne_FileHeaderNgdotModTime
// *NgoloFuzzOne_FileHeaderNgdotMode
// *NgoloFuzzOne_NewWriter
// *NgoloFuzzOne_WriterNgdotSetOffset
// *NgoloFuzzOne_WriterNgdotFlush
// *NgoloFuzzOne_WriterNgdotSetComment
// *NgoloFuzzOne_WriterNgdotClose
// *NgoloFuzzOne_WriterNgdotCreate
// *NgoloFuzzOne_WriterNgdotCreateHeader
// *NgoloFuzzOne_WriterNgdotCreateRaw
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetOpenReader() *OpenReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_OpenReader); ok {
return x.OpenReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewReader() *NewReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReader); ok {
return x.NewReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetReadCloserNgdotClose() *ReadCloserNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReadCloserNgdotClose); ok {
return x.ReadCloserNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotOpen() *ReaderNgdotOpenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotOpen); ok {
return x.ReaderNgdotOpen
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileHeaderNgdotFileInfo() *FileHeaderNgdotFileInfoArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileHeaderNgdotFileInfo); ok {
return x.FileHeaderNgdotFileInfo
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileHeaderNgdotModTime() *FileHeaderNgdotModTimeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileHeaderNgdotModTime); ok {
return x.FileHeaderNgdotModTime
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileHeaderNgdotMode() *FileHeaderNgdotModeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileHeaderNgdotMode); ok {
return x.FileHeaderNgdotMode
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriter() *NewWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriter); ok {
return x.NewWriter
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotSetOffset() *WriterNgdotSetOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotSetOffset); ok {
return x.WriterNgdotSetOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotFlush() *WriterNgdotFlushArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotFlush); ok {
return x.WriterNgdotFlush
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotSetComment() *WriterNgdotSetCommentArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotSetComment); ok {
return x.WriterNgdotSetComment
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotClose() *WriterNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotClose); ok {
return x.WriterNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotCreate() *WriterNgdotCreateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotCreate); ok {
return x.WriterNgdotCreate
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotCreateHeader() *WriterNgdotCreateHeaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotCreateHeader); ok {
return x.WriterNgdotCreateHeader
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotCreateRaw() *WriterNgdotCreateRawArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotCreateRaw); ok {
return x.WriterNgdotCreateRaw
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_OpenReader struct {
OpenReader *OpenReaderArgs `protobuf:"bytes,1,opt,name=OpenReader,proto3,oneof"`
}
type NgoloFuzzOne_NewReader struct {
NewReader *NewReaderArgs `protobuf:"bytes,2,opt,name=NewReader,proto3,oneof"`
}
type NgoloFuzzOne_ReadCloserNgdotClose struct {
ReadCloserNgdotClose *ReadCloserNgdotCloseArgs `protobuf:"bytes,3,opt,name=ReadCloserNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotOpen struct {
ReaderNgdotOpen *ReaderNgdotOpenArgs `protobuf:"bytes,4,opt,name=ReaderNgdotOpen,proto3,oneof"`
}
type NgoloFuzzOne_FileHeaderNgdotFileInfo struct {
FileHeaderNgdotFileInfo *FileHeaderNgdotFileInfoArgs `protobuf:"bytes,5,opt,name=FileHeaderNgdotFileInfo,proto3,oneof"`
}
type NgoloFuzzOne_FileHeaderNgdotModTime struct {
FileHeaderNgdotModTime *FileHeaderNgdotModTimeArgs `protobuf:"bytes,6,opt,name=FileHeaderNgdotModTime,proto3,oneof"`
}
type NgoloFuzzOne_FileHeaderNgdotMode struct {
FileHeaderNgdotMode *FileHeaderNgdotModeArgs `protobuf:"bytes,7,opt,name=FileHeaderNgdotMode,proto3,oneof"`
}
type NgoloFuzzOne_NewWriter struct {
NewWriter *NewWriterArgs `protobuf:"bytes,8,opt,name=NewWriter,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotSetOffset struct {
WriterNgdotSetOffset *WriterNgdotSetOffsetArgs `protobuf:"bytes,9,opt,name=WriterNgdotSetOffset,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotFlush struct {
WriterNgdotFlush *WriterNgdotFlushArgs `protobuf:"bytes,10,opt,name=WriterNgdotFlush,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotSetComment struct {
WriterNgdotSetComment *WriterNgdotSetCommentArgs `protobuf:"bytes,11,opt,name=WriterNgdotSetComment,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotClose struct {
WriterNgdotClose *WriterNgdotCloseArgs `protobuf:"bytes,12,opt,name=WriterNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotCreate struct {
WriterNgdotCreate *WriterNgdotCreateArgs `protobuf:"bytes,13,opt,name=WriterNgdotCreate,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotCreateHeader struct {
WriterNgdotCreateHeader *WriterNgdotCreateHeaderArgs `protobuf:"bytes,14,opt,name=WriterNgdotCreateHeader,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotCreateRaw struct {
WriterNgdotCreateRaw *WriterNgdotCreateRawArgs `protobuf:"bytes,15,opt,name=WriterNgdotCreateRaw,proto3,oneof"`
}
func (*NgoloFuzzOne_OpenReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReadCloserNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotOpen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileHeaderNgdotFileInfo) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileHeaderNgdotModTime) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileHeaderNgdotMode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotSetOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotFlush) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotSetComment) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotCreate) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotCreateHeader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotCreateRaw) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"$\n" +
"\x0eOpenReaderArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"1\n" +
"\rNewReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\x12\x12\n" +
"\x04size\x18\x02 \x01(\x03R\x04size\"\x1a\n" +
"\x18ReadCloserNgdotCloseArgs\")\n" +
"\x13ReaderNgdotOpenArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x1d\n" +
"\x1bFileHeaderNgdotFileInfoArgs\"\x1c\n" +
"\x1aFileHeaderNgdotModTimeArgs\"\x19\n" +
"\x17FileHeaderNgdotModeArgs\"\x1d\n" +
"\rNewWriterArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"(\n" +
"\x18WriterNgdotSetOffsetArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"\x16\n" +
"\x14WriterNgdotFlushArgs\"5\n" +
"\x19WriterNgdotSetCommentArgs\x12\x18\n" +
"\acomment\x18\x01 \x01(\tR\acomment\"\x16\n" +
"\x14WriterNgdotCloseArgs\"+\n" +
"\x15WriterNgdotCreateArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x1d\n" +
"\x1bWriterNgdotCreateHeaderArgs\"\x1a\n" +
"\x18WriterNgdotCreateRawArgs\"\xf3\t\n" +
"\fNgoloFuzzOne\x12;\n" +
"\n" +
"OpenReader\x18\x01 \x01(\v2\x19.ngolofuzz.OpenReaderArgsH\x00R\n" +
"OpenReader\x128\n" +
"\tNewReader\x18\x02 \x01(\v2\x18.ngolofuzz.NewReaderArgsH\x00R\tNewReader\x12Y\n" +
"\x14ReadCloserNgdotClose\x18\x03 \x01(\v2#.ngolofuzz.ReadCloserNgdotCloseArgsH\x00R\x14ReadCloserNgdotClose\x12J\n" +
"\x0fReaderNgdotOpen\x18\x04 \x01(\v2\x1e.ngolofuzz.ReaderNgdotOpenArgsH\x00R\x0fReaderNgdotOpen\x12b\n" +
"\x17FileHeaderNgdotFileInfo\x18\x05 \x01(\v2&.ngolofuzz.FileHeaderNgdotFileInfoArgsH\x00R\x17FileHeaderNgdotFileInfo\x12_\n" +
"\x16FileHeaderNgdotModTime\x18\x06 \x01(\v2%.ngolofuzz.FileHeaderNgdotModTimeArgsH\x00R\x16FileHeaderNgdotModTime\x12V\n" +
"\x13FileHeaderNgdotMode\x18\a \x01(\v2\".ngolofuzz.FileHeaderNgdotModeArgsH\x00R\x13FileHeaderNgdotMode\x128\n" +
"\tNewWriter\x18\b \x01(\v2\x18.ngolofuzz.NewWriterArgsH\x00R\tNewWriter\x12Y\n" +
"\x14WriterNgdotSetOffset\x18\t \x01(\v2#.ngolofuzz.WriterNgdotSetOffsetArgsH\x00R\x14WriterNgdotSetOffset\x12M\n" +
"\x10WriterNgdotFlush\x18\n" +
" \x01(\v2\x1f.ngolofuzz.WriterNgdotFlushArgsH\x00R\x10WriterNgdotFlush\x12\\\n" +
"\x15WriterNgdotSetComment\x18\v \x01(\v2$.ngolofuzz.WriterNgdotSetCommentArgsH\x00R\x15WriterNgdotSetComment\x12M\n" +
"\x10WriterNgdotClose\x18\f \x01(\v2\x1f.ngolofuzz.WriterNgdotCloseArgsH\x00R\x10WriterNgdotClose\x12P\n" +
"\x11WriterNgdotCreate\x18\r \x01(\v2 .ngolofuzz.WriterNgdotCreateArgsH\x00R\x11WriterNgdotCreate\x12b\n" +
"\x17WriterNgdotCreateHeader\x18\x0e \x01(\v2&.ngolofuzz.WriterNgdotCreateHeaderArgsH\x00R\x17WriterNgdotCreateHeader\x12Y\n" +
"\x14WriterNgdotCreateRaw\x18\x0f \x01(\v2#.ngolofuzz.WriterNgdotCreateRawArgsH\x00R\x14WriterNgdotCreateRawB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x18Z\x16./;fuzz_ng_archive_zipb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 18)
var file_ngolofuzz_proto_goTypes = []any{
(*OpenReaderArgs)(nil), // 0: ngolofuzz.OpenReaderArgs
(*NewReaderArgs)(nil), // 1: ngolofuzz.NewReaderArgs
(*ReadCloserNgdotCloseArgs)(nil), // 2: ngolofuzz.ReadCloserNgdotCloseArgs
(*ReaderNgdotOpenArgs)(nil), // 3: ngolofuzz.ReaderNgdotOpenArgs
(*FileHeaderNgdotFileInfoArgs)(nil), // 4: ngolofuzz.FileHeaderNgdotFileInfoArgs
(*FileHeaderNgdotModTimeArgs)(nil), // 5: ngolofuzz.FileHeaderNgdotModTimeArgs
(*FileHeaderNgdotModeArgs)(nil), // 6: ngolofuzz.FileHeaderNgdotModeArgs
(*NewWriterArgs)(nil), // 7: ngolofuzz.NewWriterArgs
(*WriterNgdotSetOffsetArgs)(nil), // 8: ngolofuzz.WriterNgdotSetOffsetArgs
(*WriterNgdotFlushArgs)(nil), // 9: ngolofuzz.WriterNgdotFlushArgs
(*WriterNgdotSetCommentArgs)(nil), // 10: ngolofuzz.WriterNgdotSetCommentArgs
(*WriterNgdotCloseArgs)(nil), // 11: ngolofuzz.WriterNgdotCloseArgs
(*WriterNgdotCreateArgs)(nil), // 12: ngolofuzz.WriterNgdotCreateArgs
(*WriterNgdotCreateHeaderArgs)(nil), // 13: ngolofuzz.WriterNgdotCreateHeaderArgs
(*WriterNgdotCreateRawArgs)(nil), // 14: ngolofuzz.WriterNgdotCreateRawArgs
(*NgoloFuzzOne)(nil), // 15: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 16: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 17: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.OpenReader:type_name -> ngolofuzz.OpenReaderArgs
1, // 1: ngolofuzz.NgoloFuzzOne.NewReader:type_name -> ngolofuzz.NewReaderArgs
2, // 2: ngolofuzz.NgoloFuzzOne.ReadCloserNgdotClose:type_name -> ngolofuzz.ReadCloserNgdotCloseArgs
3, // 3: ngolofuzz.NgoloFuzzOne.ReaderNgdotOpen:type_name -> ngolofuzz.ReaderNgdotOpenArgs
4, // 4: ngolofuzz.NgoloFuzzOne.FileHeaderNgdotFileInfo:type_name -> ngolofuzz.FileHeaderNgdotFileInfoArgs
5, // 5: ngolofuzz.NgoloFuzzOne.FileHeaderNgdotModTime:type_name -> ngolofuzz.FileHeaderNgdotModTimeArgs
6, // 6: ngolofuzz.NgoloFuzzOne.FileHeaderNgdotMode:type_name -> ngolofuzz.FileHeaderNgdotModeArgs
7, // 7: ngolofuzz.NgoloFuzzOne.NewWriter:type_name -> ngolofuzz.NewWriterArgs
8, // 8: ngolofuzz.NgoloFuzzOne.WriterNgdotSetOffset:type_name -> ngolofuzz.WriterNgdotSetOffsetArgs
9, // 9: ngolofuzz.NgoloFuzzOne.WriterNgdotFlush:type_name -> ngolofuzz.WriterNgdotFlushArgs
10, // 10: ngolofuzz.NgoloFuzzOne.WriterNgdotSetComment:type_name -> ngolofuzz.WriterNgdotSetCommentArgs
11, // 11: ngolofuzz.NgoloFuzzOne.WriterNgdotClose:type_name -> ngolofuzz.WriterNgdotCloseArgs
12, // 12: ngolofuzz.NgoloFuzzOne.WriterNgdotCreate:type_name -> ngolofuzz.WriterNgdotCreateArgs
13, // 13: ngolofuzz.NgoloFuzzOne.WriterNgdotCreateHeader:type_name -> ngolofuzz.WriterNgdotCreateHeaderArgs
14, // 14: ngolofuzz.NgoloFuzzOne.WriterNgdotCreateRaw:type_name -> ngolofuzz.WriterNgdotCreateRawArgs
15, // 15: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
16, // [16:16] is the sub-list for method output_type
16, // [16:16] is the sub-list for method input_type
16, // [16:16] is the sub-list for extension type_name
16, // [16:16] is the sub-list for extension extendee
0, // [0:16] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[15].OneofWrappers = []any{
(*NgoloFuzzOne_OpenReader)(nil),
(*NgoloFuzzOne_NewReader)(nil),
(*NgoloFuzzOne_ReadCloserNgdotClose)(nil),
(*NgoloFuzzOne_ReaderNgdotOpen)(nil),
(*NgoloFuzzOne_FileHeaderNgdotFileInfo)(nil),
(*NgoloFuzzOne_FileHeaderNgdotModTime)(nil),
(*NgoloFuzzOne_FileHeaderNgdotMode)(nil),
(*NgoloFuzzOne_NewWriter)(nil),
(*NgoloFuzzOne_WriterNgdotSetOffset)(nil),
(*NgoloFuzzOne_WriterNgdotFlush)(nil),
(*NgoloFuzzOne_WriterNgdotSetComment)(nil),
(*NgoloFuzzOne_WriterNgdotClose)(nil),
(*NgoloFuzzOne_WriterNgdotCreate)(nil),
(*NgoloFuzzOne_WriterNgdotCreateHeader)(nil),
(*NgoloFuzzOne_WriterNgdotCreateRaw)(nil),
}
file_ngolofuzz_proto_msgTypes[16].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 18,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_bufio
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var WriterResults []*bufio.Writer
WriterResultsIndex := 0
var ScannerResults []*bufio.Scanner
ScannerResultsIndex := 0
var ReaderResults []*bufio.Reader
ReaderResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReaderSize:
arg0 := bytes.NewReader(a.NewReaderSize.Rd)
arg1 := int(a.NewReaderSize.Size)
r0 := bufio.NewReaderSize(arg0, arg1 % 0x10001)
if r0 != nil{
ReaderResults = append(ReaderResults, r0)
}
case *NgoloFuzzOne_NewReader:
arg0 := bytes.NewReader(a.NewReader.Rd)
r0 := bufio.NewReader(arg0)
if r0 != nil{
ReaderResults = append(ReaderResults, r0)
}
case *NgoloFuzzOne_ReaderNgdotSize:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg0.Size()
case *NgoloFuzzOne_ReaderNgdotReset:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg1 := bytes.NewReader(a.ReaderNgdotReset.R)
arg0.Reset(arg1)
case *NgoloFuzzOne_ReaderNgdotPeek:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg1 := int(a.ReaderNgdotPeek.N)
_, r1 := arg0.Peek(arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotDiscard:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg1 := int(a.ReaderNgdotDiscard.N)
_, r1 := arg0.Discard(arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotRead:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.Read(a.ReaderNgdotRead.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadByte:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.ReadByte()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotUnreadByte:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
r0 := arg0.UnreadByte()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadRune:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, _, r2 := arg0.ReadRune()
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotUnreadRune:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
r0 := arg0.UnreadRune()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotBuffered:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg0.Buffered()
case *NgoloFuzzOne_ReaderNgdotReadSlice:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg1 := byte(a.ReaderNgdotReadSlice.Delim)
_, r1 := arg0.ReadSlice(arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadLine:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, _, r2 := arg0.ReadLine()
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadBytes:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg1 := byte(a.ReaderNgdotReadBytes.Delim)
_, r1 := arg0.ReadBytes(arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadString:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg1 := byte(a.ReaderNgdotReadString.Delim)
_, r1 := arg0.ReadString(arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotWriteTo:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg1 := bytes.NewBuffer(a.ReaderNgdotWriteTo.W)
_, r1 := arg0.WriteTo(arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewWriterSize:
arg0 := bytes.NewBuffer(a.NewWriterSize.W)
arg1 := int(a.NewWriterSize.Size)
r0 := bufio.NewWriterSize(arg0, arg1 % 0x10001)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
case *NgoloFuzzOne_NewWriter:
arg0 := bytes.NewBuffer(a.NewWriter.W)
r0 := bufio.NewWriter(arg0)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
case *NgoloFuzzOne_WriterNgdotSize:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg0.Size()
case *NgoloFuzzOne_WriterNgdotReset:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg1 := bytes.NewBuffer(a.WriterNgdotReset.W)
arg0.Reset(arg1)
case *NgoloFuzzOne_WriterNgdotFlush:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Flush()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotAvailable:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg0.Available()
case *NgoloFuzzOne_WriterNgdotAvailableBuffer:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg0.AvailableBuffer()
case *NgoloFuzzOne_WriterNgdotBuffered:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg0.Buffered()
case *NgoloFuzzOne_WriterNgdotWrite:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
_, r1 := arg0.Write(a.WriterNgdotWrite.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotWriteByte:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg1 := byte(a.WriterNgdotWriteByte.C)
r0 := arg0.WriteByte(arg1)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotWriteRune:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg1 := GetRune(a.WriterNgdotWriteRune.R)
_, r1 := arg0.WriteRune(arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotWriteString:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
_, r1 := arg0.WriteString(a.WriterNgdotWriteString.S)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotReadFrom:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg1 := bytes.NewReader(a.WriterNgdotReadFrom.R)
_, r1 := arg0.ReadFrom(arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewReadWriter:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
if len(WriterResults) == 0 {
continue
}
arg1 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
bufio.NewReadWriter(arg0, arg1)
case *NgoloFuzzOne_NewScanner:
arg0 := bytes.NewReader(a.NewScanner.R)
r0 := bufio.NewScanner(arg0)
if r0 != nil{
ScannerResults = append(ScannerResults, r0)
}
case *NgoloFuzzOne_ScannerNgdotErr:
if len(ScannerResults) == 0 {
continue
}
arg0 := ScannerResults[ScannerResultsIndex]
ScannerResultsIndex = (ScannerResultsIndex + 1) % len(ScannerResults)
r0 := arg0.Err()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ScannerNgdotBytes:
if len(ScannerResults) == 0 {
continue
}
arg0 := ScannerResults[ScannerResultsIndex]
ScannerResultsIndex = (ScannerResultsIndex + 1) % len(ScannerResults)
arg0.Bytes()
case *NgoloFuzzOne_ScannerNgdotText:
if len(ScannerResults) == 0 {
continue
}
arg0 := ScannerResults[ScannerResultsIndex]
ScannerResultsIndex = (ScannerResultsIndex + 1) % len(ScannerResults)
arg0.Text()
case *NgoloFuzzOne_ScannerNgdotScan:
if len(ScannerResults) == 0 {
continue
}
arg0 := ScannerResults[ScannerResultsIndex]
ScannerResultsIndex = (ScannerResultsIndex + 1) % len(ScannerResults)
arg0.Scan()
case *NgoloFuzzOne_ScannerNgdotBuffer:
if len(ScannerResults) == 0 {
continue
}
arg0 := ScannerResults[ScannerResultsIndex]
ScannerResultsIndex = (ScannerResultsIndex + 1) % len(ScannerResults)
arg2 := int(a.ScannerNgdotBuffer.Max)
arg0.Buffer(a.ScannerNgdotBuffer.Buf, arg2)
case *NgoloFuzzOne_ScanRunes:
_, _, r2 := bufio.ScanRunes(a.ScanRunes.Data, a.ScanRunes.AtEOF)
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_ScanLines:
_, _, r2 := bufio.ScanLines(a.ScanLines.Data, a.ScanLines.AtEOF)
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_ScanWords:
_, _, r2 := bufio.ScanWords(a.ScanWords.Data, a.ScanWords.AtEOF)
if r2 != nil{
r2.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
WriterNb := 0
WriterResultsIndex := 0
ScannerNb := 0
ScannerResultsIndex := 0
ReaderNb := 0
ReaderResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReaderSize:
w.WriteString(fmt.Sprintf("Reader%d := bufio.NewReaderSize(bytes.NewReader(%#+v), int(%#+v) %% 0x10001)\n", ReaderNb, a.NewReaderSize.Rd, a.NewReaderSize.Size))
ReaderNb = ReaderNb + 1
case *NgoloFuzzOne_NewReader:
w.WriteString(fmt.Sprintf("Reader%d := bufio.NewReader(bytes.NewReader(%#+v))\n", ReaderNb, a.NewReader.Rd))
ReaderNb = ReaderNb + 1
case *NgoloFuzzOne_ReaderNgdotSize:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Size()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReset:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Reset(bytes.NewReader(%#+v))\n", ReaderResultsIndex, a.ReaderNgdotReset.R))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotPeek:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Peek(int(%#+v))\n", ReaderResultsIndex, a.ReaderNgdotPeek.N))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotDiscard:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Discard(int(%#+v))\n", ReaderResultsIndex, a.ReaderNgdotDiscard.N))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotRead:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Read(%#+v)\n", ReaderResultsIndex, a.ReaderNgdotRead.P))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadByte:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadByte()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotUnreadByte:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.UnreadByte()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadRune:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadRune()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotUnreadRune:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.UnreadRune()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotBuffered:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Buffered()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadSlice:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadSlice(byte(%#+v))\n", ReaderResultsIndex, a.ReaderNgdotReadSlice.Delim))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadLine:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadLine()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadBytes:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadBytes(byte(%#+v))\n", ReaderResultsIndex, a.ReaderNgdotReadBytes.Delim))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadString:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadString(byte(%#+v))\n", ReaderResultsIndex, a.ReaderNgdotReadString.Delim))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotWriteTo:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.WriteTo(bytes.NewBuffer(%#+v))\n", ReaderResultsIndex, a.ReaderNgdotWriteTo.W))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_NewWriterSize:
w.WriteString(fmt.Sprintf("Writer%d := bufio.NewWriterSize(bytes.NewBuffer(%#+v), int(%#+v) %% 0x10001)\n", WriterNb, a.NewWriterSize.W, a.NewWriterSize.Size))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_NewWriter:
w.WriteString(fmt.Sprintf("Writer%d := bufio.NewWriter(bytes.NewBuffer(%#+v))\n", WriterNb, a.NewWriter.W))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_WriterNgdotSize:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Size()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotReset:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Reset(bytes.NewBuffer(%#+v))\n", WriterResultsIndex, a.WriterNgdotReset.W))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotFlush:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Flush()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotAvailable:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Available()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotAvailableBuffer:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.AvailableBuffer()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotBuffered:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Buffered()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotWrite:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Write(%#+v)\n", WriterResultsIndex, a.WriterNgdotWrite.P))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotWriteByte:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.WriteByte(byte(%#+v))\n", WriterResultsIndex, a.WriterNgdotWriteByte.C))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotWriteRune:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.WriteRune(GetRune(%#+v))\n", WriterResultsIndex, a.WriterNgdotWriteRune.R))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotWriteString:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.WriteString(%#+v)\n", WriterResultsIndex, a.WriterNgdotWriteString.S))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotReadFrom:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.ReadFrom(bytes.NewReader(%#+v))\n", WriterResultsIndex, a.WriterNgdotReadFrom.R))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_NewReadWriter:
if ReaderNb == 0 {
continue
}
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("bufio.NewReadWriter(Reader%d, Writer%d)\n", (ReaderResultsIndex + 0) % ReaderNb, (WriterResultsIndex + 0) % WriterNb))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_NewScanner:
w.WriteString(fmt.Sprintf("Scanner%d := bufio.NewScanner(bytes.NewReader(%#+v))\n", ScannerNb, a.NewScanner.R))
ScannerNb = ScannerNb + 1
case *NgoloFuzzOne_ScannerNgdotErr:
if ScannerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Scanner%d.Err()\n", ScannerResultsIndex))
ScannerResultsIndex = (ScannerResultsIndex + 1) % ScannerNb
case *NgoloFuzzOne_ScannerNgdotBytes:
if ScannerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Scanner%d.Bytes()\n", ScannerResultsIndex))
ScannerResultsIndex = (ScannerResultsIndex + 1) % ScannerNb
case *NgoloFuzzOne_ScannerNgdotText:
if ScannerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Scanner%d.Text()\n", ScannerResultsIndex))
ScannerResultsIndex = (ScannerResultsIndex + 1) % ScannerNb
case *NgoloFuzzOne_ScannerNgdotScan:
if ScannerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Scanner%d.Scan()\n", ScannerResultsIndex))
ScannerResultsIndex = (ScannerResultsIndex + 1) % ScannerNb
case *NgoloFuzzOne_ScannerNgdotBuffer:
if ScannerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Scanner%d.Buffer(%#+v, int(%#+v))\n", ScannerResultsIndex, a.ScannerNgdotBuffer.Buf, a.ScannerNgdotBuffer.Max))
ScannerResultsIndex = (ScannerResultsIndex + 1) % ScannerNb
case *NgoloFuzzOne_ScanRunes:
w.WriteString(fmt.Sprintf("bufio.ScanRunes(%#+v, %#+v)\n", a.ScanRunes.Data, a.ScanRunes.AtEOF))
case *NgoloFuzzOne_ScanLines:
w.WriteString(fmt.Sprintf("bufio.ScanLines(%#+v, %#+v)\n", a.ScanLines.Data, a.ScanLines.AtEOF))
case *NgoloFuzzOne_ScanWords:
w.WriteString(fmt.Sprintf("bufio.ScanWords(%#+v, %#+v)\n", a.ScanWords.Data, a.ScanWords.AtEOF))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_bufio
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewReaderSizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Rd []byte `protobuf:"bytes,1,opt,name=rd,proto3" json:"rd,omitempty"`
Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderSizeArgs) Reset() {
*x = NewReaderSizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderSizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderSizeArgs) ProtoMessage() {}
func (x *NewReaderSizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderSizeArgs.ProtoReflect.Descriptor instead.
func (*NewReaderSizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewReaderSizeArgs) GetRd() []byte {
if x != nil {
return x.Rd
}
return nil
}
func (x *NewReaderSizeArgs) GetSize() int64 {
if x != nil {
return x.Size
}
return 0
}
type NewReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Rd []byte `protobuf:"bytes,1,opt,name=rd,proto3" json:"rd,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderArgs) Reset() {
*x = NewReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderArgs) ProtoMessage() {}
func (x *NewReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderArgs.ProtoReflect.Descriptor instead.
func (*NewReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NewReaderArgs) GetRd() []byte {
if x != nil {
return x.Rd
}
return nil
}
type ReaderNgdotSizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotSizeArgs) Reset() {
*x = ReaderNgdotSizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotSizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotSizeArgs) ProtoMessage() {}
func (x *ReaderNgdotSizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotSizeArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotSizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type ReaderNgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotResetArgs) Reset() {
*x = ReaderNgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotResetArgs) ProtoMessage() {}
func (x *ReaderNgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotResetArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ReaderNgdotResetArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type ReaderNgdotPeekArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotPeekArgs) Reset() {
*x = ReaderNgdotPeekArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotPeekArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotPeekArgs) ProtoMessage() {}
func (x *ReaderNgdotPeekArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotPeekArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotPeekArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *ReaderNgdotPeekArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type ReaderNgdotDiscardArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotDiscardArgs) Reset() {
*x = ReaderNgdotDiscardArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotDiscardArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotDiscardArgs) ProtoMessage() {}
func (x *ReaderNgdotDiscardArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotDiscardArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotDiscardArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *ReaderNgdotDiscardArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type ReaderNgdotReadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadArgs) Reset() {
*x = ReaderNgdotReadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadArgs) ProtoMessage() {}
func (x *ReaderNgdotReadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *ReaderNgdotReadArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type ReaderNgdotReadByteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadByteArgs) Reset() {
*x = ReaderNgdotReadByteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadByteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadByteArgs) ProtoMessage() {}
func (x *ReaderNgdotReadByteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadByteArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadByteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type ReaderNgdotUnreadByteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotUnreadByteArgs) Reset() {
*x = ReaderNgdotUnreadByteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotUnreadByteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotUnreadByteArgs) ProtoMessage() {}
func (x *ReaderNgdotUnreadByteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotUnreadByteArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotUnreadByteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type ReaderNgdotReadRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadRuneArgs) Reset() {
*x = ReaderNgdotReadRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadRuneArgs) ProtoMessage() {}
func (x *ReaderNgdotReadRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadRuneArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type ReaderNgdotUnreadRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotUnreadRuneArgs) Reset() {
*x = ReaderNgdotUnreadRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotUnreadRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotUnreadRuneArgs) ProtoMessage() {}
func (x *ReaderNgdotUnreadRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotUnreadRuneArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotUnreadRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
type ReaderNgdotBufferedArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotBufferedArgs) Reset() {
*x = ReaderNgdotBufferedArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotBufferedArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotBufferedArgs) ProtoMessage() {}
func (x *ReaderNgdotBufferedArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotBufferedArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotBufferedArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type ReaderNgdotReadSliceArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Delim uint32 `protobuf:"varint,1,opt,name=delim,proto3" json:"delim,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadSliceArgs) Reset() {
*x = ReaderNgdotReadSliceArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadSliceArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadSliceArgs) ProtoMessage() {}
func (x *ReaderNgdotReadSliceArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadSliceArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadSliceArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *ReaderNgdotReadSliceArgs) GetDelim() uint32 {
if x != nil {
return x.Delim
}
return 0
}
type ReaderNgdotReadLineArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadLineArgs) Reset() {
*x = ReaderNgdotReadLineArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadLineArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadLineArgs) ProtoMessage() {}
func (x *ReaderNgdotReadLineArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadLineArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadLineArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type ReaderNgdotReadBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Delim uint32 `protobuf:"varint,1,opt,name=delim,proto3" json:"delim,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadBytesArgs) Reset() {
*x = ReaderNgdotReadBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadBytesArgs) ProtoMessage() {}
func (x *ReaderNgdotReadBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadBytesArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *ReaderNgdotReadBytesArgs) GetDelim() uint32 {
if x != nil {
return x.Delim
}
return 0
}
type ReaderNgdotReadStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Delim uint32 `protobuf:"varint,1,opt,name=delim,proto3" json:"delim,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadStringArgs) Reset() {
*x = ReaderNgdotReadStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadStringArgs) ProtoMessage() {}
func (x *ReaderNgdotReadStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadStringArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *ReaderNgdotReadStringArgs) GetDelim() uint32 {
if x != nil {
return x.Delim
}
return 0
}
type ReaderNgdotWriteToArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotWriteToArgs) Reset() {
*x = ReaderNgdotWriteToArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotWriteToArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotWriteToArgs) ProtoMessage() {}
func (x *ReaderNgdotWriteToArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotWriteToArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotWriteToArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *ReaderNgdotWriteToArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type NewWriterSizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterSizeArgs) Reset() {
*x = NewWriterSizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterSizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterSizeArgs) ProtoMessage() {}
func (x *NewWriterSizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterSizeArgs.ProtoReflect.Descriptor instead.
func (*NewWriterSizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *NewWriterSizeArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *NewWriterSizeArgs) GetSize() int64 {
if x != nil {
return x.Size
}
return 0
}
type NewWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterArgs) Reset() {
*x = NewWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterArgs) ProtoMessage() {}
func (x *NewWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterArgs.ProtoReflect.Descriptor instead.
func (*NewWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *NewWriterArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type WriterNgdotSizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotSizeArgs) Reset() {
*x = WriterNgdotSizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotSizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotSizeArgs) ProtoMessage() {}
func (x *WriterNgdotSizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotSizeArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotSizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
type WriterNgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotResetArgs) Reset() {
*x = WriterNgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotResetArgs) ProtoMessage() {}
func (x *WriterNgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotResetArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *WriterNgdotResetArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type WriterNgdotFlushArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotFlushArgs) Reset() {
*x = WriterNgdotFlushArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotFlushArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotFlushArgs) ProtoMessage() {}
func (x *WriterNgdotFlushArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotFlushArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotFlushArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
type WriterNgdotAvailableArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotAvailableArgs) Reset() {
*x = WriterNgdotAvailableArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotAvailableArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotAvailableArgs) ProtoMessage() {}
func (x *WriterNgdotAvailableArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotAvailableArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotAvailableArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
type WriterNgdotAvailableBufferArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotAvailableBufferArgs) Reset() {
*x = WriterNgdotAvailableBufferArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotAvailableBufferArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotAvailableBufferArgs) ProtoMessage() {}
func (x *WriterNgdotAvailableBufferArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotAvailableBufferArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotAvailableBufferArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
type WriterNgdotBufferedArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotBufferedArgs) Reset() {
*x = WriterNgdotBufferedArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotBufferedArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotBufferedArgs) ProtoMessage() {}
func (x *WriterNgdotBufferedArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotBufferedArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotBufferedArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
type WriterNgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteArgs) Reset() {
*x = WriterNgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteArgs) ProtoMessage() {}
func (x *WriterNgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
func (x *WriterNgdotWriteArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type WriterNgdotWriteByteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
C uint32 `protobuf:"varint,1,opt,name=c,proto3" json:"c,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteByteArgs) Reset() {
*x = WriterNgdotWriteByteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteByteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteByteArgs) ProtoMessage() {}
func (x *WriterNgdotWriteByteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteByteArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteByteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
func (x *WriterNgdotWriteByteArgs) GetC() uint32 {
if x != nil {
return x.C
}
return 0
}
type WriterNgdotWriteRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteRuneArgs) Reset() {
*x = WriterNgdotWriteRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteRuneArgs) ProtoMessage() {}
func (x *WriterNgdotWriteRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteRuneArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
func (x *WriterNgdotWriteRuneArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type WriterNgdotWriteStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteStringArgs) Reset() {
*x = WriterNgdotWriteStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteStringArgs) ProtoMessage() {}
func (x *WriterNgdotWriteStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteStringArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
func (x *WriterNgdotWriteStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type WriterNgdotReadFromArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotReadFromArgs) Reset() {
*x = WriterNgdotReadFromArgs{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotReadFromArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotReadFromArgs) ProtoMessage() {}
func (x *WriterNgdotReadFromArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotReadFromArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotReadFromArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
func (x *WriterNgdotReadFromArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type NewReadWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReadWriterArgs) Reset() {
*x = NewReadWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReadWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReadWriterArgs) ProtoMessage() {}
func (x *NewReadWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReadWriterArgs.ProtoReflect.Descriptor instead.
func (*NewReadWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{30}
}
type NewScannerArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewScannerArgs) Reset() {
*x = NewScannerArgs{}
mi := &file_ngolofuzz_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewScannerArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewScannerArgs) ProtoMessage() {}
func (x *NewScannerArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewScannerArgs.ProtoReflect.Descriptor instead.
func (*NewScannerArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{31}
}
func (x *NewScannerArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type ScannerNgdotErrArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScannerNgdotErrArgs) Reset() {
*x = ScannerNgdotErrArgs{}
mi := &file_ngolofuzz_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScannerNgdotErrArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScannerNgdotErrArgs) ProtoMessage() {}
func (x *ScannerNgdotErrArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScannerNgdotErrArgs.ProtoReflect.Descriptor instead.
func (*ScannerNgdotErrArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{32}
}
type ScannerNgdotBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScannerNgdotBytesArgs) Reset() {
*x = ScannerNgdotBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScannerNgdotBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScannerNgdotBytesArgs) ProtoMessage() {}
func (x *ScannerNgdotBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[33]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScannerNgdotBytesArgs.ProtoReflect.Descriptor instead.
func (*ScannerNgdotBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{33}
}
type ScannerNgdotTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScannerNgdotTextArgs) Reset() {
*x = ScannerNgdotTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScannerNgdotTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScannerNgdotTextArgs) ProtoMessage() {}
func (x *ScannerNgdotTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[34]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScannerNgdotTextArgs.ProtoReflect.Descriptor instead.
func (*ScannerNgdotTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{34}
}
type ScannerNgdotScanArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScannerNgdotScanArgs) Reset() {
*x = ScannerNgdotScanArgs{}
mi := &file_ngolofuzz_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScannerNgdotScanArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScannerNgdotScanArgs) ProtoMessage() {}
func (x *ScannerNgdotScanArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[35]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScannerNgdotScanArgs.ProtoReflect.Descriptor instead.
func (*ScannerNgdotScanArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{35}
}
type ScannerNgdotBufferArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
Max int64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScannerNgdotBufferArgs) Reset() {
*x = ScannerNgdotBufferArgs{}
mi := &file_ngolofuzz_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScannerNgdotBufferArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScannerNgdotBufferArgs) ProtoMessage() {}
func (x *ScannerNgdotBufferArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[36]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScannerNgdotBufferArgs.ProtoReflect.Descriptor instead.
func (*ScannerNgdotBufferArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{36}
}
func (x *ScannerNgdotBufferArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
func (x *ScannerNgdotBufferArgs) GetMax() int64 {
if x != nil {
return x.Max
}
return 0
}
type ScanRunesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
AtEOF bool `protobuf:"varint,2,opt,name=atEOF,proto3" json:"atEOF,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScanRunesArgs) Reset() {
*x = ScanRunesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScanRunesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScanRunesArgs) ProtoMessage() {}
func (x *ScanRunesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[37]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScanRunesArgs.ProtoReflect.Descriptor instead.
func (*ScanRunesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{37}
}
func (x *ScanRunesArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
func (x *ScanRunesArgs) GetAtEOF() bool {
if x != nil {
return x.AtEOF
}
return false
}
type ScanLinesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
AtEOF bool `protobuf:"varint,2,opt,name=atEOF,proto3" json:"atEOF,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScanLinesArgs) Reset() {
*x = ScanLinesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScanLinesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScanLinesArgs) ProtoMessage() {}
func (x *ScanLinesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[38]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScanLinesArgs.ProtoReflect.Descriptor instead.
func (*ScanLinesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{38}
}
func (x *ScanLinesArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
func (x *ScanLinesArgs) GetAtEOF() bool {
if x != nil {
return x.AtEOF
}
return false
}
type ScanWordsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
AtEOF bool `protobuf:"varint,2,opt,name=atEOF,proto3" json:"atEOF,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScanWordsArgs) Reset() {
*x = ScanWordsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScanWordsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScanWordsArgs) ProtoMessage() {}
func (x *ScanWordsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[39]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScanWordsArgs.ProtoReflect.Descriptor instead.
func (*ScanWordsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{39}
}
func (x *ScanWordsArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
func (x *ScanWordsArgs) GetAtEOF() bool {
if x != nil {
return x.AtEOF
}
return false
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewReaderSize
// *NgoloFuzzOne_NewReader
// *NgoloFuzzOne_ReaderNgdotSize
// *NgoloFuzzOne_ReaderNgdotReset
// *NgoloFuzzOne_ReaderNgdotPeek
// *NgoloFuzzOne_ReaderNgdotDiscard
// *NgoloFuzzOne_ReaderNgdotRead
// *NgoloFuzzOne_ReaderNgdotReadByte
// *NgoloFuzzOne_ReaderNgdotUnreadByte
// *NgoloFuzzOne_ReaderNgdotReadRune
// *NgoloFuzzOne_ReaderNgdotUnreadRune
// *NgoloFuzzOne_ReaderNgdotBuffered
// *NgoloFuzzOne_ReaderNgdotReadSlice
// *NgoloFuzzOne_ReaderNgdotReadLine
// *NgoloFuzzOne_ReaderNgdotReadBytes
// *NgoloFuzzOne_ReaderNgdotReadString
// *NgoloFuzzOne_ReaderNgdotWriteTo
// *NgoloFuzzOne_NewWriterSize
// *NgoloFuzzOne_NewWriter
// *NgoloFuzzOne_WriterNgdotSize
// *NgoloFuzzOne_WriterNgdotReset
// *NgoloFuzzOne_WriterNgdotFlush
// *NgoloFuzzOne_WriterNgdotAvailable
// *NgoloFuzzOne_WriterNgdotAvailableBuffer
// *NgoloFuzzOne_WriterNgdotBuffered
// *NgoloFuzzOne_WriterNgdotWrite
// *NgoloFuzzOne_WriterNgdotWriteByte
// *NgoloFuzzOne_WriterNgdotWriteRune
// *NgoloFuzzOne_WriterNgdotWriteString
// *NgoloFuzzOne_WriterNgdotReadFrom
// *NgoloFuzzOne_NewReadWriter
// *NgoloFuzzOne_NewScanner
// *NgoloFuzzOne_ScannerNgdotErr
// *NgoloFuzzOne_ScannerNgdotBytes
// *NgoloFuzzOne_ScannerNgdotText
// *NgoloFuzzOne_ScannerNgdotScan
// *NgoloFuzzOne_ScannerNgdotBuffer
// *NgoloFuzzOne_ScanRunes
// *NgoloFuzzOne_ScanLines
// *NgoloFuzzOne_ScanWords
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[40]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{40}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewReaderSize() *NewReaderSizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReaderSize); ok {
return x.NewReaderSize
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewReader() *NewReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReader); ok {
return x.NewReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotSize() *ReaderNgdotSizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotSize); ok {
return x.ReaderNgdotSize
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReset() *ReaderNgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReset); ok {
return x.ReaderNgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotPeek() *ReaderNgdotPeekArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotPeek); ok {
return x.ReaderNgdotPeek
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotDiscard() *ReaderNgdotDiscardArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotDiscard); ok {
return x.ReaderNgdotDiscard
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotRead() *ReaderNgdotReadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotRead); ok {
return x.ReaderNgdotRead
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadByte() *ReaderNgdotReadByteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadByte); ok {
return x.ReaderNgdotReadByte
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotUnreadByte() *ReaderNgdotUnreadByteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotUnreadByte); ok {
return x.ReaderNgdotUnreadByte
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadRune() *ReaderNgdotReadRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadRune); ok {
return x.ReaderNgdotReadRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotUnreadRune() *ReaderNgdotUnreadRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotUnreadRune); ok {
return x.ReaderNgdotUnreadRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotBuffered() *ReaderNgdotBufferedArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotBuffered); ok {
return x.ReaderNgdotBuffered
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadSlice() *ReaderNgdotReadSliceArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadSlice); ok {
return x.ReaderNgdotReadSlice
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadLine() *ReaderNgdotReadLineArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadLine); ok {
return x.ReaderNgdotReadLine
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadBytes() *ReaderNgdotReadBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadBytes); ok {
return x.ReaderNgdotReadBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadString() *ReaderNgdotReadStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadString); ok {
return x.ReaderNgdotReadString
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotWriteTo() *ReaderNgdotWriteToArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotWriteTo); ok {
return x.ReaderNgdotWriteTo
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriterSize() *NewWriterSizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriterSize); ok {
return x.NewWriterSize
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriter() *NewWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriter); ok {
return x.NewWriter
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotSize() *WriterNgdotSizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotSize); ok {
return x.WriterNgdotSize
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotReset() *WriterNgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotReset); ok {
return x.WriterNgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotFlush() *WriterNgdotFlushArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotFlush); ok {
return x.WriterNgdotFlush
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotAvailable() *WriterNgdotAvailableArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotAvailable); ok {
return x.WriterNgdotAvailable
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotAvailableBuffer() *WriterNgdotAvailableBufferArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotAvailableBuffer); ok {
return x.WriterNgdotAvailableBuffer
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotBuffered() *WriterNgdotBufferedArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotBuffered); ok {
return x.WriterNgdotBuffered
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWrite() *WriterNgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWrite); ok {
return x.WriterNgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWriteByte() *WriterNgdotWriteByteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWriteByte); ok {
return x.WriterNgdotWriteByte
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWriteRune() *WriterNgdotWriteRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWriteRune); ok {
return x.WriterNgdotWriteRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWriteString() *WriterNgdotWriteStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWriteString); ok {
return x.WriterNgdotWriteString
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotReadFrom() *WriterNgdotReadFromArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotReadFrom); ok {
return x.WriterNgdotReadFrom
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewReadWriter() *NewReadWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReadWriter); ok {
return x.NewReadWriter
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewScanner() *NewScannerArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewScanner); ok {
return x.NewScanner
}
}
return nil
}
func (x *NgoloFuzzOne) GetScannerNgdotErr() *ScannerNgdotErrArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScannerNgdotErr); ok {
return x.ScannerNgdotErr
}
}
return nil
}
func (x *NgoloFuzzOne) GetScannerNgdotBytes() *ScannerNgdotBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScannerNgdotBytes); ok {
return x.ScannerNgdotBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetScannerNgdotText() *ScannerNgdotTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScannerNgdotText); ok {
return x.ScannerNgdotText
}
}
return nil
}
func (x *NgoloFuzzOne) GetScannerNgdotScan() *ScannerNgdotScanArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScannerNgdotScan); ok {
return x.ScannerNgdotScan
}
}
return nil
}
func (x *NgoloFuzzOne) GetScannerNgdotBuffer() *ScannerNgdotBufferArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScannerNgdotBuffer); ok {
return x.ScannerNgdotBuffer
}
}
return nil
}
func (x *NgoloFuzzOne) GetScanRunes() *ScanRunesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScanRunes); ok {
return x.ScanRunes
}
}
return nil
}
func (x *NgoloFuzzOne) GetScanLines() *ScanLinesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScanLines); ok {
return x.ScanLines
}
}
return nil
}
func (x *NgoloFuzzOne) GetScanWords() *ScanWordsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScanWords); ok {
return x.ScanWords
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewReaderSize struct {
NewReaderSize *NewReaderSizeArgs `protobuf:"bytes,1,opt,name=NewReaderSize,proto3,oneof"`
}
type NgoloFuzzOne_NewReader struct {
NewReader *NewReaderArgs `protobuf:"bytes,2,opt,name=NewReader,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotSize struct {
ReaderNgdotSize *ReaderNgdotSizeArgs `protobuf:"bytes,3,opt,name=ReaderNgdotSize,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReset struct {
ReaderNgdotReset *ReaderNgdotResetArgs `protobuf:"bytes,4,opt,name=ReaderNgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotPeek struct {
ReaderNgdotPeek *ReaderNgdotPeekArgs `protobuf:"bytes,5,opt,name=ReaderNgdotPeek,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotDiscard struct {
ReaderNgdotDiscard *ReaderNgdotDiscardArgs `protobuf:"bytes,6,opt,name=ReaderNgdotDiscard,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotRead struct {
ReaderNgdotRead *ReaderNgdotReadArgs `protobuf:"bytes,7,opt,name=ReaderNgdotRead,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadByte struct {
ReaderNgdotReadByte *ReaderNgdotReadByteArgs `protobuf:"bytes,8,opt,name=ReaderNgdotReadByte,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotUnreadByte struct {
ReaderNgdotUnreadByte *ReaderNgdotUnreadByteArgs `protobuf:"bytes,9,opt,name=ReaderNgdotUnreadByte,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadRune struct {
ReaderNgdotReadRune *ReaderNgdotReadRuneArgs `protobuf:"bytes,10,opt,name=ReaderNgdotReadRune,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotUnreadRune struct {
ReaderNgdotUnreadRune *ReaderNgdotUnreadRuneArgs `protobuf:"bytes,11,opt,name=ReaderNgdotUnreadRune,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotBuffered struct {
ReaderNgdotBuffered *ReaderNgdotBufferedArgs `protobuf:"bytes,12,opt,name=ReaderNgdotBuffered,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadSlice struct {
ReaderNgdotReadSlice *ReaderNgdotReadSliceArgs `protobuf:"bytes,13,opt,name=ReaderNgdotReadSlice,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadLine struct {
ReaderNgdotReadLine *ReaderNgdotReadLineArgs `protobuf:"bytes,14,opt,name=ReaderNgdotReadLine,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadBytes struct {
ReaderNgdotReadBytes *ReaderNgdotReadBytesArgs `protobuf:"bytes,15,opt,name=ReaderNgdotReadBytes,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadString struct {
ReaderNgdotReadString *ReaderNgdotReadStringArgs `protobuf:"bytes,16,opt,name=ReaderNgdotReadString,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotWriteTo struct {
ReaderNgdotWriteTo *ReaderNgdotWriteToArgs `protobuf:"bytes,17,opt,name=ReaderNgdotWriteTo,proto3,oneof"`
}
type NgoloFuzzOne_NewWriterSize struct {
NewWriterSize *NewWriterSizeArgs `protobuf:"bytes,18,opt,name=NewWriterSize,proto3,oneof"`
}
type NgoloFuzzOne_NewWriter struct {
NewWriter *NewWriterArgs `protobuf:"bytes,19,opt,name=NewWriter,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotSize struct {
WriterNgdotSize *WriterNgdotSizeArgs `protobuf:"bytes,20,opt,name=WriterNgdotSize,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotReset struct {
WriterNgdotReset *WriterNgdotResetArgs `protobuf:"bytes,21,opt,name=WriterNgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotFlush struct {
WriterNgdotFlush *WriterNgdotFlushArgs `protobuf:"bytes,22,opt,name=WriterNgdotFlush,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotAvailable struct {
WriterNgdotAvailable *WriterNgdotAvailableArgs `protobuf:"bytes,23,opt,name=WriterNgdotAvailable,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotAvailableBuffer struct {
WriterNgdotAvailableBuffer *WriterNgdotAvailableBufferArgs `protobuf:"bytes,24,opt,name=WriterNgdotAvailableBuffer,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotBuffered struct {
WriterNgdotBuffered *WriterNgdotBufferedArgs `protobuf:"bytes,25,opt,name=WriterNgdotBuffered,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWrite struct {
WriterNgdotWrite *WriterNgdotWriteArgs `protobuf:"bytes,26,opt,name=WriterNgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWriteByte struct {
WriterNgdotWriteByte *WriterNgdotWriteByteArgs `protobuf:"bytes,27,opt,name=WriterNgdotWriteByte,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWriteRune struct {
WriterNgdotWriteRune *WriterNgdotWriteRuneArgs `protobuf:"bytes,28,opt,name=WriterNgdotWriteRune,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWriteString struct {
WriterNgdotWriteString *WriterNgdotWriteStringArgs `protobuf:"bytes,29,opt,name=WriterNgdotWriteString,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotReadFrom struct {
WriterNgdotReadFrom *WriterNgdotReadFromArgs `protobuf:"bytes,30,opt,name=WriterNgdotReadFrom,proto3,oneof"`
}
type NgoloFuzzOne_NewReadWriter struct {
NewReadWriter *NewReadWriterArgs `protobuf:"bytes,31,opt,name=NewReadWriter,proto3,oneof"`
}
type NgoloFuzzOne_NewScanner struct {
NewScanner *NewScannerArgs `protobuf:"bytes,32,opt,name=NewScanner,proto3,oneof"`
}
type NgoloFuzzOne_ScannerNgdotErr struct {
ScannerNgdotErr *ScannerNgdotErrArgs `protobuf:"bytes,33,opt,name=ScannerNgdotErr,proto3,oneof"`
}
type NgoloFuzzOne_ScannerNgdotBytes struct {
ScannerNgdotBytes *ScannerNgdotBytesArgs `protobuf:"bytes,34,opt,name=ScannerNgdotBytes,proto3,oneof"`
}
type NgoloFuzzOne_ScannerNgdotText struct {
ScannerNgdotText *ScannerNgdotTextArgs `protobuf:"bytes,35,opt,name=ScannerNgdotText,proto3,oneof"`
}
type NgoloFuzzOne_ScannerNgdotScan struct {
ScannerNgdotScan *ScannerNgdotScanArgs `protobuf:"bytes,36,opt,name=ScannerNgdotScan,proto3,oneof"`
}
type NgoloFuzzOne_ScannerNgdotBuffer struct {
ScannerNgdotBuffer *ScannerNgdotBufferArgs `protobuf:"bytes,37,opt,name=ScannerNgdotBuffer,proto3,oneof"`
}
type NgoloFuzzOne_ScanRunes struct {
ScanRunes *ScanRunesArgs `protobuf:"bytes,38,opt,name=ScanRunes,proto3,oneof"`
}
type NgoloFuzzOne_ScanLines struct {
ScanLines *ScanLinesArgs `protobuf:"bytes,39,opt,name=ScanLines,proto3,oneof"`
}
type NgoloFuzzOne_ScanWords struct {
ScanWords *ScanWordsArgs `protobuf:"bytes,40,opt,name=ScanWords,proto3,oneof"`
}
func (*NgoloFuzzOne_NewReaderSize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotSize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotPeek) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotDiscard) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotRead) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadByte) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotUnreadByte) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotUnreadRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotBuffered) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadSlice) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadLine) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotWriteTo) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriterSize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotSize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotFlush) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotAvailable) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotAvailableBuffer) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotBuffered) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWriteByte) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWriteRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWriteString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotReadFrom) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewReadWriter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewScanner) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScannerNgdotErr) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScannerNgdotBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScannerNgdotText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScannerNgdotScan) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScannerNgdotBuffer) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScanRunes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScanLines) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScanWords) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[41]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{41}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[42]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{42}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"7\n" +
"\x11NewReaderSizeArgs\x12\x0e\n" +
"\x02rd\x18\x01 \x01(\fR\x02rd\x12\x12\n" +
"\x04size\x18\x02 \x01(\x03R\x04size\"\x1f\n" +
"\rNewReaderArgs\x12\x0e\n" +
"\x02rd\x18\x01 \x01(\fR\x02rd\"\x15\n" +
"\x13ReaderNgdotSizeArgs\"$\n" +
"\x14ReaderNgdotResetArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"#\n" +
"\x13ReaderNgdotPeekArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"&\n" +
"\x16ReaderNgdotDiscardArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"#\n" +
"\x13ReaderNgdotReadArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"\x19\n" +
"\x17ReaderNgdotReadByteArgs\"\x1b\n" +
"\x19ReaderNgdotUnreadByteArgs\"\x19\n" +
"\x17ReaderNgdotReadRuneArgs\"\x1b\n" +
"\x19ReaderNgdotUnreadRuneArgs\"\x19\n" +
"\x17ReaderNgdotBufferedArgs\"0\n" +
"\x18ReaderNgdotReadSliceArgs\x12\x14\n" +
"\x05delim\x18\x01 \x01(\rR\x05delim\"\x19\n" +
"\x17ReaderNgdotReadLineArgs\"0\n" +
"\x18ReaderNgdotReadBytesArgs\x12\x14\n" +
"\x05delim\x18\x01 \x01(\rR\x05delim\"1\n" +
"\x19ReaderNgdotReadStringArgs\x12\x14\n" +
"\x05delim\x18\x01 \x01(\rR\x05delim\"&\n" +
"\x16ReaderNgdotWriteToArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"5\n" +
"\x11NewWriterSizeArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\x12\n" +
"\x04size\x18\x02 \x01(\x03R\x04size\"\x1d\n" +
"\rNewWriterArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"\x15\n" +
"\x13WriterNgdotSizeArgs\"$\n" +
"\x14WriterNgdotResetArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"\x16\n" +
"\x14WriterNgdotFlushArgs\"\x1a\n" +
"\x18WriterNgdotAvailableArgs\" \n" +
"\x1eWriterNgdotAvailableBufferArgs\"\x19\n" +
"\x17WriterNgdotBufferedArgs\"$\n" +
"\x14WriterNgdotWriteArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"(\n" +
"\x18WriterNgdotWriteByteArgs\x12\f\n" +
"\x01c\x18\x01 \x01(\rR\x01c\"(\n" +
"\x18WriterNgdotWriteRuneArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"*\n" +
"\x1aWriterNgdotWriteStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"'\n" +
"\x17WriterNgdotReadFromArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x13\n" +
"\x11NewReadWriterArgs\"\x1e\n" +
"\x0eNewScannerArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x15\n" +
"\x13ScannerNgdotErrArgs\"\x17\n" +
"\x15ScannerNgdotBytesArgs\"\x16\n" +
"\x14ScannerNgdotTextArgs\"\x16\n" +
"\x14ScannerNgdotScanArgs\"<\n" +
"\x16ScannerNgdotBufferArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\x12\x10\n" +
"\x03max\x18\x02 \x01(\x03R\x03max\"9\n" +
"\rScanRunesArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\x12\x14\n" +
"\x05atEOF\x18\x02 \x01(\bR\x05atEOF\"9\n" +
"\rScanLinesArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\x12\x14\n" +
"\x05atEOF\x18\x02 \x01(\bR\x05atEOF\"9\n" +
"\rScanWordsArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\x12\x14\n" +
"\x05atEOF\x18\x02 \x01(\bR\x05atEOF\"\xad\x19\n" +
"\fNgoloFuzzOne\x12D\n" +
"\rNewReaderSize\x18\x01 \x01(\v2\x1c.ngolofuzz.NewReaderSizeArgsH\x00R\rNewReaderSize\x128\n" +
"\tNewReader\x18\x02 \x01(\v2\x18.ngolofuzz.NewReaderArgsH\x00R\tNewReader\x12J\n" +
"\x0fReaderNgdotSize\x18\x03 \x01(\v2\x1e.ngolofuzz.ReaderNgdotSizeArgsH\x00R\x0fReaderNgdotSize\x12M\n" +
"\x10ReaderNgdotReset\x18\x04 \x01(\v2\x1f.ngolofuzz.ReaderNgdotResetArgsH\x00R\x10ReaderNgdotReset\x12J\n" +
"\x0fReaderNgdotPeek\x18\x05 \x01(\v2\x1e.ngolofuzz.ReaderNgdotPeekArgsH\x00R\x0fReaderNgdotPeek\x12S\n" +
"\x12ReaderNgdotDiscard\x18\x06 \x01(\v2!.ngolofuzz.ReaderNgdotDiscardArgsH\x00R\x12ReaderNgdotDiscard\x12J\n" +
"\x0fReaderNgdotRead\x18\a \x01(\v2\x1e.ngolofuzz.ReaderNgdotReadArgsH\x00R\x0fReaderNgdotRead\x12V\n" +
"\x13ReaderNgdotReadByte\x18\b \x01(\v2\".ngolofuzz.ReaderNgdotReadByteArgsH\x00R\x13ReaderNgdotReadByte\x12\\\n" +
"\x15ReaderNgdotUnreadByte\x18\t \x01(\v2$.ngolofuzz.ReaderNgdotUnreadByteArgsH\x00R\x15ReaderNgdotUnreadByte\x12V\n" +
"\x13ReaderNgdotReadRune\x18\n" +
" \x01(\v2\".ngolofuzz.ReaderNgdotReadRuneArgsH\x00R\x13ReaderNgdotReadRune\x12\\\n" +
"\x15ReaderNgdotUnreadRune\x18\v \x01(\v2$.ngolofuzz.ReaderNgdotUnreadRuneArgsH\x00R\x15ReaderNgdotUnreadRune\x12V\n" +
"\x13ReaderNgdotBuffered\x18\f \x01(\v2\".ngolofuzz.ReaderNgdotBufferedArgsH\x00R\x13ReaderNgdotBuffered\x12Y\n" +
"\x14ReaderNgdotReadSlice\x18\r \x01(\v2#.ngolofuzz.ReaderNgdotReadSliceArgsH\x00R\x14ReaderNgdotReadSlice\x12V\n" +
"\x13ReaderNgdotReadLine\x18\x0e \x01(\v2\".ngolofuzz.ReaderNgdotReadLineArgsH\x00R\x13ReaderNgdotReadLine\x12Y\n" +
"\x14ReaderNgdotReadBytes\x18\x0f \x01(\v2#.ngolofuzz.ReaderNgdotReadBytesArgsH\x00R\x14ReaderNgdotReadBytes\x12\\\n" +
"\x15ReaderNgdotReadString\x18\x10 \x01(\v2$.ngolofuzz.ReaderNgdotReadStringArgsH\x00R\x15ReaderNgdotReadString\x12S\n" +
"\x12ReaderNgdotWriteTo\x18\x11 \x01(\v2!.ngolofuzz.ReaderNgdotWriteToArgsH\x00R\x12ReaderNgdotWriteTo\x12D\n" +
"\rNewWriterSize\x18\x12 \x01(\v2\x1c.ngolofuzz.NewWriterSizeArgsH\x00R\rNewWriterSize\x128\n" +
"\tNewWriter\x18\x13 \x01(\v2\x18.ngolofuzz.NewWriterArgsH\x00R\tNewWriter\x12J\n" +
"\x0fWriterNgdotSize\x18\x14 \x01(\v2\x1e.ngolofuzz.WriterNgdotSizeArgsH\x00R\x0fWriterNgdotSize\x12M\n" +
"\x10WriterNgdotReset\x18\x15 \x01(\v2\x1f.ngolofuzz.WriterNgdotResetArgsH\x00R\x10WriterNgdotReset\x12M\n" +
"\x10WriterNgdotFlush\x18\x16 \x01(\v2\x1f.ngolofuzz.WriterNgdotFlushArgsH\x00R\x10WriterNgdotFlush\x12Y\n" +
"\x14WriterNgdotAvailable\x18\x17 \x01(\v2#.ngolofuzz.WriterNgdotAvailableArgsH\x00R\x14WriterNgdotAvailable\x12k\n" +
"\x1aWriterNgdotAvailableBuffer\x18\x18 \x01(\v2).ngolofuzz.WriterNgdotAvailableBufferArgsH\x00R\x1aWriterNgdotAvailableBuffer\x12V\n" +
"\x13WriterNgdotBuffered\x18\x19 \x01(\v2\".ngolofuzz.WriterNgdotBufferedArgsH\x00R\x13WriterNgdotBuffered\x12M\n" +
"\x10WriterNgdotWrite\x18\x1a \x01(\v2\x1f.ngolofuzz.WriterNgdotWriteArgsH\x00R\x10WriterNgdotWrite\x12Y\n" +
"\x14WriterNgdotWriteByte\x18\x1b \x01(\v2#.ngolofuzz.WriterNgdotWriteByteArgsH\x00R\x14WriterNgdotWriteByte\x12Y\n" +
"\x14WriterNgdotWriteRune\x18\x1c \x01(\v2#.ngolofuzz.WriterNgdotWriteRuneArgsH\x00R\x14WriterNgdotWriteRune\x12_\n" +
"\x16WriterNgdotWriteString\x18\x1d \x01(\v2%.ngolofuzz.WriterNgdotWriteStringArgsH\x00R\x16WriterNgdotWriteString\x12V\n" +
"\x13WriterNgdotReadFrom\x18\x1e \x01(\v2\".ngolofuzz.WriterNgdotReadFromArgsH\x00R\x13WriterNgdotReadFrom\x12D\n" +
"\rNewReadWriter\x18\x1f \x01(\v2\x1c.ngolofuzz.NewReadWriterArgsH\x00R\rNewReadWriter\x12;\n" +
"\n" +
"NewScanner\x18 \x01(\v2\x19.ngolofuzz.NewScannerArgsH\x00R\n" +
"NewScanner\x12J\n" +
"\x0fScannerNgdotErr\x18! \x01(\v2\x1e.ngolofuzz.ScannerNgdotErrArgsH\x00R\x0fScannerNgdotErr\x12P\n" +
"\x11ScannerNgdotBytes\x18\" \x01(\v2 .ngolofuzz.ScannerNgdotBytesArgsH\x00R\x11ScannerNgdotBytes\x12M\n" +
"\x10ScannerNgdotText\x18# \x01(\v2\x1f.ngolofuzz.ScannerNgdotTextArgsH\x00R\x10ScannerNgdotText\x12M\n" +
"\x10ScannerNgdotScan\x18$ \x01(\v2\x1f.ngolofuzz.ScannerNgdotScanArgsH\x00R\x10ScannerNgdotScan\x12S\n" +
"\x12ScannerNgdotBuffer\x18% \x01(\v2!.ngolofuzz.ScannerNgdotBufferArgsH\x00R\x12ScannerNgdotBuffer\x128\n" +
"\tScanRunes\x18& \x01(\v2\x18.ngolofuzz.ScanRunesArgsH\x00R\tScanRunes\x128\n" +
"\tScanLines\x18' \x01(\v2\x18.ngolofuzz.ScanLinesArgsH\x00R\tScanLines\x128\n" +
"\tScanWords\x18( \x01(\v2\x18.ngolofuzz.ScanWordsArgsH\x00R\tScanWordsB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x12Z\x10./;fuzz_ng_bufiob\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 43)
var file_ngolofuzz_proto_goTypes = []any{
(*NewReaderSizeArgs)(nil), // 0: ngolofuzz.NewReaderSizeArgs
(*NewReaderArgs)(nil), // 1: ngolofuzz.NewReaderArgs
(*ReaderNgdotSizeArgs)(nil), // 2: ngolofuzz.ReaderNgdotSizeArgs
(*ReaderNgdotResetArgs)(nil), // 3: ngolofuzz.ReaderNgdotResetArgs
(*ReaderNgdotPeekArgs)(nil), // 4: ngolofuzz.ReaderNgdotPeekArgs
(*ReaderNgdotDiscardArgs)(nil), // 5: ngolofuzz.ReaderNgdotDiscardArgs
(*ReaderNgdotReadArgs)(nil), // 6: ngolofuzz.ReaderNgdotReadArgs
(*ReaderNgdotReadByteArgs)(nil), // 7: ngolofuzz.ReaderNgdotReadByteArgs
(*ReaderNgdotUnreadByteArgs)(nil), // 8: ngolofuzz.ReaderNgdotUnreadByteArgs
(*ReaderNgdotReadRuneArgs)(nil), // 9: ngolofuzz.ReaderNgdotReadRuneArgs
(*ReaderNgdotUnreadRuneArgs)(nil), // 10: ngolofuzz.ReaderNgdotUnreadRuneArgs
(*ReaderNgdotBufferedArgs)(nil), // 11: ngolofuzz.ReaderNgdotBufferedArgs
(*ReaderNgdotReadSliceArgs)(nil), // 12: ngolofuzz.ReaderNgdotReadSliceArgs
(*ReaderNgdotReadLineArgs)(nil), // 13: ngolofuzz.ReaderNgdotReadLineArgs
(*ReaderNgdotReadBytesArgs)(nil), // 14: ngolofuzz.ReaderNgdotReadBytesArgs
(*ReaderNgdotReadStringArgs)(nil), // 15: ngolofuzz.ReaderNgdotReadStringArgs
(*ReaderNgdotWriteToArgs)(nil), // 16: ngolofuzz.ReaderNgdotWriteToArgs
(*NewWriterSizeArgs)(nil), // 17: ngolofuzz.NewWriterSizeArgs
(*NewWriterArgs)(nil), // 18: ngolofuzz.NewWriterArgs
(*WriterNgdotSizeArgs)(nil), // 19: ngolofuzz.WriterNgdotSizeArgs
(*WriterNgdotResetArgs)(nil), // 20: ngolofuzz.WriterNgdotResetArgs
(*WriterNgdotFlushArgs)(nil), // 21: ngolofuzz.WriterNgdotFlushArgs
(*WriterNgdotAvailableArgs)(nil), // 22: ngolofuzz.WriterNgdotAvailableArgs
(*WriterNgdotAvailableBufferArgs)(nil), // 23: ngolofuzz.WriterNgdotAvailableBufferArgs
(*WriterNgdotBufferedArgs)(nil), // 24: ngolofuzz.WriterNgdotBufferedArgs
(*WriterNgdotWriteArgs)(nil), // 25: ngolofuzz.WriterNgdotWriteArgs
(*WriterNgdotWriteByteArgs)(nil), // 26: ngolofuzz.WriterNgdotWriteByteArgs
(*WriterNgdotWriteRuneArgs)(nil), // 27: ngolofuzz.WriterNgdotWriteRuneArgs
(*WriterNgdotWriteStringArgs)(nil), // 28: ngolofuzz.WriterNgdotWriteStringArgs
(*WriterNgdotReadFromArgs)(nil), // 29: ngolofuzz.WriterNgdotReadFromArgs
(*NewReadWriterArgs)(nil), // 30: ngolofuzz.NewReadWriterArgs
(*NewScannerArgs)(nil), // 31: ngolofuzz.NewScannerArgs
(*ScannerNgdotErrArgs)(nil), // 32: ngolofuzz.ScannerNgdotErrArgs
(*ScannerNgdotBytesArgs)(nil), // 33: ngolofuzz.ScannerNgdotBytesArgs
(*ScannerNgdotTextArgs)(nil), // 34: ngolofuzz.ScannerNgdotTextArgs
(*ScannerNgdotScanArgs)(nil), // 35: ngolofuzz.ScannerNgdotScanArgs
(*ScannerNgdotBufferArgs)(nil), // 36: ngolofuzz.ScannerNgdotBufferArgs
(*ScanRunesArgs)(nil), // 37: ngolofuzz.ScanRunesArgs
(*ScanLinesArgs)(nil), // 38: ngolofuzz.ScanLinesArgs
(*ScanWordsArgs)(nil), // 39: ngolofuzz.ScanWordsArgs
(*NgoloFuzzOne)(nil), // 40: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 41: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 42: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewReaderSize:type_name -> ngolofuzz.NewReaderSizeArgs
1, // 1: ngolofuzz.NgoloFuzzOne.NewReader:type_name -> ngolofuzz.NewReaderArgs
2, // 2: ngolofuzz.NgoloFuzzOne.ReaderNgdotSize:type_name -> ngolofuzz.ReaderNgdotSizeArgs
3, // 3: ngolofuzz.NgoloFuzzOne.ReaderNgdotReset:type_name -> ngolofuzz.ReaderNgdotResetArgs
4, // 4: ngolofuzz.NgoloFuzzOne.ReaderNgdotPeek:type_name -> ngolofuzz.ReaderNgdotPeekArgs
5, // 5: ngolofuzz.NgoloFuzzOne.ReaderNgdotDiscard:type_name -> ngolofuzz.ReaderNgdotDiscardArgs
6, // 6: ngolofuzz.NgoloFuzzOne.ReaderNgdotRead:type_name -> ngolofuzz.ReaderNgdotReadArgs
7, // 7: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadByte:type_name -> ngolofuzz.ReaderNgdotReadByteArgs
8, // 8: ngolofuzz.NgoloFuzzOne.ReaderNgdotUnreadByte:type_name -> ngolofuzz.ReaderNgdotUnreadByteArgs
9, // 9: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadRune:type_name -> ngolofuzz.ReaderNgdotReadRuneArgs
10, // 10: ngolofuzz.NgoloFuzzOne.ReaderNgdotUnreadRune:type_name -> ngolofuzz.ReaderNgdotUnreadRuneArgs
11, // 11: ngolofuzz.NgoloFuzzOne.ReaderNgdotBuffered:type_name -> ngolofuzz.ReaderNgdotBufferedArgs
12, // 12: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadSlice:type_name -> ngolofuzz.ReaderNgdotReadSliceArgs
13, // 13: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadLine:type_name -> ngolofuzz.ReaderNgdotReadLineArgs
14, // 14: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadBytes:type_name -> ngolofuzz.ReaderNgdotReadBytesArgs
15, // 15: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadString:type_name -> ngolofuzz.ReaderNgdotReadStringArgs
16, // 16: ngolofuzz.NgoloFuzzOne.ReaderNgdotWriteTo:type_name -> ngolofuzz.ReaderNgdotWriteToArgs
17, // 17: ngolofuzz.NgoloFuzzOne.NewWriterSize:type_name -> ngolofuzz.NewWriterSizeArgs
18, // 18: ngolofuzz.NgoloFuzzOne.NewWriter:type_name -> ngolofuzz.NewWriterArgs
19, // 19: ngolofuzz.NgoloFuzzOne.WriterNgdotSize:type_name -> ngolofuzz.WriterNgdotSizeArgs
20, // 20: ngolofuzz.NgoloFuzzOne.WriterNgdotReset:type_name -> ngolofuzz.WriterNgdotResetArgs
21, // 21: ngolofuzz.NgoloFuzzOne.WriterNgdotFlush:type_name -> ngolofuzz.WriterNgdotFlushArgs
22, // 22: ngolofuzz.NgoloFuzzOne.WriterNgdotAvailable:type_name -> ngolofuzz.WriterNgdotAvailableArgs
23, // 23: ngolofuzz.NgoloFuzzOne.WriterNgdotAvailableBuffer:type_name -> ngolofuzz.WriterNgdotAvailableBufferArgs
24, // 24: ngolofuzz.NgoloFuzzOne.WriterNgdotBuffered:type_name -> ngolofuzz.WriterNgdotBufferedArgs
25, // 25: ngolofuzz.NgoloFuzzOne.WriterNgdotWrite:type_name -> ngolofuzz.WriterNgdotWriteArgs
26, // 26: ngolofuzz.NgoloFuzzOne.WriterNgdotWriteByte:type_name -> ngolofuzz.WriterNgdotWriteByteArgs
27, // 27: ngolofuzz.NgoloFuzzOne.WriterNgdotWriteRune:type_name -> ngolofuzz.WriterNgdotWriteRuneArgs
28, // 28: ngolofuzz.NgoloFuzzOne.WriterNgdotWriteString:type_name -> ngolofuzz.WriterNgdotWriteStringArgs
29, // 29: ngolofuzz.NgoloFuzzOne.WriterNgdotReadFrom:type_name -> ngolofuzz.WriterNgdotReadFromArgs
30, // 30: ngolofuzz.NgoloFuzzOne.NewReadWriter:type_name -> ngolofuzz.NewReadWriterArgs
31, // 31: ngolofuzz.NgoloFuzzOne.NewScanner:type_name -> ngolofuzz.NewScannerArgs
32, // 32: ngolofuzz.NgoloFuzzOne.ScannerNgdotErr:type_name -> ngolofuzz.ScannerNgdotErrArgs
33, // 33: ngolofuzz.NgoloFuzzOne.ScannerNgdotBytes:type_name -> ngolofuzz.ScannerNgdotBytesArgs
34, // 34: ngolofuzz.NgoloFuzzOne.ScannerNgdotText:type_name -> ngolofuzz.ScannerNgdotTextArgs
35, // 35: ngolofuzz.NgoloFuzzOne.ScannerNgdotScan:type_name -> ngolofuzz.ScannerNgdotScanArgs
36, // 36: ngolofuzz.NgoloFuzzOne.ScannerNgdotBuffer:type_name -> ngolofuzz.ScannerNgdotBufferArgs
37, // 37: ngolofuzz.NgoloFuzzOne.ScanRunes:type_name -> ngolofuzz.ScanRunesArgs
38, // 38: ngolofuzz.NgoloFuzzOne.ScanLines:type_name -> ngolofuzz.ScanLinesArgs
39, // 39: ngolofuzz.NgoloFuzzOne.ScanWords:type_name -> ngolofuzz.ScanWordsArgs
40, // 40: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
41, // [41:41] is the sub-list for method output_type
41, // [41:41] is the sub-list for method input_type
41, // [41:41] is the sub-list for extension type_name
41, // [41:41] is the sub-list for extension extendee
0, // [0:41] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[40].OneofWrappers = []any{
(*NgoloFuzzOne_NewReaderSize)(nil),
(*NgoloFuzzOne_NewReader)(nil),
(*NgoloFuzzOne_ReaderNgdotSize)(nil),
(*NgoloFuzzOne_ReaderNgdotReset)(nil),
(*NgoloFuzzOne_ReaderNgdotPeek)(nil),
(*NgoloFuzzOne_ReaderNgdotDiscard)(nil),
(*NgoloFuzzOne_ReaderNgdotRead)(nil),
(*NgoloFuzzOne_ReaderNgdotReadByte)(nil),
(*NgoloFuzzOne_ReaderNgdotUnreadByte)(nil),
(*NgoloFuzzOne_ReaderNgdotReadRune)(nil),
(*NgoloFuzzOne_ReaderNgdotUnreadRune)(nil),
(*NgoloFuzzOne_ReaderNgdotBuffered)(nil),
(*NgoloFuzzOne_ReaderNgdotReadSlice)(nil),
(*NgoloFuzzOne_ReaderNgdotReadLine)(nil),
(*NgoloFuzzOne_ReaderNgdotReadBytes)(nil),
(*NgoloFuzzOne_ReaderNgdotReadString)(nil),
(*NgoloFuzzOne_ReaderNgdotWriteTo)(nil),
(*NgoloFuzzOne_NewWriterSize)(nil),
(*NgoloFuzzOne_NewWriter)(nil),
(*NgoloFuzzOne_WriterNgdotSize)(nil),
(*NgoloFuzzOne_WriterNgdotReset)(nil),
(*NgoloFuzzOne_WriterNgdotFlush)(nil),
(*NgoloFuzzOne_WriterNgdotAvailable)(nil),
(*NgoloFuzzOne_WriterNgdotAvailableBuffer)(nil),
(*NgoloFuzzOne_WriterNgdotBuffered)(nil),
(*NgoloFuzzOne_WriterNgdotWrite)(nil),
(*NgoloFuzzOne_WriterNgdotWriteByte)(nil),
(*NgoloFuzzOne_WriterNgdotWriteRune)(nil),
(*NgoloFuzzOne_WriterNgdotWriteString)(nil),
(*NgoloFuzzOne_WriterNgdotReadFrom)(nil),
(*NgoloFuzzOne_NewReadWriter)(nil),
(*NgoloFuzzOne_NewScanner)(nil),
(*NgoloFuzzOne_ScannerNgdotErr)(nil),
(*NgoloFuzzOne_ScannerNgdotBytes)(nil),
(*NgoloFuzzOne_ScannerNgdotText)(nil),
(*NgoloFuzzOne_ScannerNgdotScan)(nil),
(*NgoloFuzzOne_ScannerNgdotBuffer)(nil),
(*NgoloFuzzOne_ScanRunes)(nil),
(*NgoloFuzzOne_ScanLines)(nil),
(*NgoloFuzzOne_ScanWords)(nil),
}
file_ngolofuzz_proto_msgTypes[41].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 43,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_compress_bzip2
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"compress/bzip2"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReader:
arg0 := bytes.NewReader(a.NewReader.R)
bzip2.NewReader(arg0)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReader:
w.WriteString(fmt.Sprintf("bzip2.NewReader(bytes.NewReader(%#+v))\n", a.NewReader.R))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_compress_bzip2
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderArgs) Reset() {
*x = NewReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderArgs) ProtoMessage() {}
func (x *NewReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderArgs.ProtoReflect.Descriptor instead.
func (*NewReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewReader
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewReader() *NewReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReader); ok {
return x.NewReader
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewReader struct {
NewReader *NewReaderArgs `protobuf:"bytes,1,opt,name=NewReader,proto3,oneof"`
}
func (*NgoloFuzzOne_NewReader) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1d\n" +
"\rNewReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"P\n" +
"\fNgoloFuzzOne\x128\n" +
"\tNewReader\x18\x01 \x01(\v2\x18.ngolofuzz.NewReaderArgsH\x00R\tNewReaderB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1bZ\x19./;fuzz_ng_compress_bzip2b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_ngolofuzz_proto_goTypes = []any{
(*NewReaderArgs)(nil), // 0: ngolofuzz.NewReaderArgs
(*NgoloFuzzOne)(nil), // 1: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 2: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 3: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewReader:type_name -> ngolofuzz.NewReaderArgs
1, // 1: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[1].OneofWrappers = []any{
(*NgoloFuzzOne_NewReader)(nil),
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_compress_flate
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"compress/flate"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var WriterResults []*flate.Writer
WriterResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewWriter:
arg0 := bytes.NewBuffer(a.NewWriter.W)
arg1 := int(a.NewWriter.Level)
r0, r1 := flate.NewWriter(arg0, arg1)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewWriterDict:
arg0 := bytes.NewBuffer(a.NewWriterDict.W)
arg1 := int(a.NewWriterDict.Level)
r0, r1 := flate.NewWriterDict(arg0, arg1, a.NewWriterDict.Dict)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotWrite:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
_, r1 := arg0.Write(a.WriterNgdotWrite.Data)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotFlush:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Flush()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotClose:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotReset:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg1 := bytes.NewBuffer(a.WriterNgdotReset.Dst)
arg0.Reset(arg1)
case *NgoloFuzzOne_NewReader:
arg0 := bytes.NewReader(a.NewReader.R)
flate.NewReader(arg0)
case *NgoloFuzzOne_NewReaderDict:
arg0 := bytes.NewReader(a.NewReaderDict.R)
flate.NewReaderDict(arg0, a.NewReaderDict.Dict)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
WriterNb := 0
WriterResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewWriter:
w.WriteString(fmt.Sprintf("Writer%d, _ := flate.NewWriter(bytes.NewBuffer(%#+v), int(%#+v))\n", WriterNb, a.NewWriter.W, a.NewWriter.Level))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_NewWriterDict:
w.WriteString(fmt.Sprintf("Writer%d, _ := flate.NewWriterDict(bytes.NewBuffer(%#+v), int(%#+v), %#+v)\n", WriterNb, a.NewWriterDict.W, a.NewWriterDict.Level, a.NewWriterDict.Dict))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_WriterNgdotWrite:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Write(%#+v)\n", WriterResultsIndex, a.WriterNgdotWrite.Data))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotFlush:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Flush()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotClose:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Close()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotReset:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Reset(bytes.NewBuffer(%#+v))\n", WriterResultsIndex, a.WriterNgdotReset.Dst))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_NewReader:
w.WriteString(fmt.Sprintf("flate.NewReader(bytes.NewReader(%#+v))\n", a.NewReader.R))
case *NgoloFuzzOne_NewReaderDict:
w.WriteString(fmt.Sprintf("flate.NewReaderDict(bytes.NewReader(%#+v), %#+v)\n", a.NewReaderDict.R, a.NewReaderDict.Dict))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_compress_flate
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterArgs) Reset() {
*x = NewWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterArgs) ProtoMessage() {}
func (x *NewWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterArgs.ProtoReflect.Descriptor instead.
func (*NewWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewWriterArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *NewWriterArgs) GetLevel() int64 {
if x != nil {
return x.Level
}
return 0
}
type NewWriterDictArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"`
Dict []byte `protobuf:"bytes,3,opt,name=dict,proto3" json:"dict,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterDictArgs) Reset() {
*x = NewWriterDictArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterDictArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterDictArgs) ProtoMessage() {}
func (x *NewWriterDictArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterDictArgs.ProtoReflect.Descriptor instead.
func (*NewWriterDictArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NewWriterDictArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *NewWriterDictArgs) GetLevel() int64 {
if x != nil {
return x.Level
}
return 0
}
func (x *NewWriterDictArgs) GetDict() []byte {
if x != nil {
return x.Dict
}
return nil
}
type WriterNgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteArgs) Reset() {
*x = WriterNgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteArgs) ProtoMessage() {}
func (x *WriterNgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *WriterNgdotWriteArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type WriterNgdotFlushArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotFlushArgs) Reset() {
*x = WriterNgdotFlushArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotFlushArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotFlushArgs) ProtoMessage() {}
func (x *WriterNgdotFlushArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotFlushArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotFlushArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type WriterNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCloseArgs) Reset() {
*x = WriterNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCloseArgs) ProtoMessage() {}
func (x *WriterNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type WriterNgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotResetArgs) Reset() {
*x = WriterNgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotResetArgs) ProtoMessage() {}
func (x *WriterNgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotResetArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *WriterNgdotResetArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
type NewReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderArgs) Reset() {
*x = NewReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderArgs) ProtoMessage() {}
func (x *NewReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderArgs.ProtoReflect.Descriptor instead.
func (*NewReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NewReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type NewReaderDictArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
Dict []byte `protobuf:"bytes,2,opt,name=dict,proto3" json:"dict,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderDictArgs) Reset() {
*x = NewReaderDictArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderDictArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderDictArgs) ProtoMessage() {}
func (x *NewReaderDictArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderDictArgs.ProtoReflect.Descriptor instead.
func (*NewReaderDictArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NewReaderDictArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
func (x *NewReaderDictArgs) GetDict() []byte {
if x != nil {
return x.Dict
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewWriter
// *NgoloFuzzOne_NewWriterDict
// *NgoloFuzzOne_WriterNgdotWrite
// *NgoloFuzzOne_WriterNgdotFlush
// *NgoloFuzzOne_WriterNgdotClose
// *NgoloFuzzOne_WriterNgdotReset
// *NgoloFuzzOne_NewReader
// *NgoloFuzzOne_NewReaderDict
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriter() *NewWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriter); ok {
return x.NewWriter
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriterDict() *NewWriterDictArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriterDict); ok {
return x.NewWriterDict
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWrite() *WriterNgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWrite); ok {
return x.WriterNgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotFlush() *WriterNgdotFlushArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotFlush); ok {
return x.WriterNgdotFlush
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotClose() *WriterNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotClose); ok {
return x.WriterNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotReset() *WriterNgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotReset); ok {
return x.WriterNgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewReader() *NewReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReader); ok {
return x.NewReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewReaderDict() *NewReaderDictArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReaderDict); ok {
return x.NewReaderDict
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewWriter struct {
NewWriter *NewWriterArgs `protobuf:"bytes,1,opt,name=NewWriter,proto3,oneof"`
}
type NgoloFuzzOne_NewWriterDict struct {
NewWriterDict *NewWriterDictArgs `protobuf:"bytes,2,opt,name=NewWriterDict,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWrite struct {
WriterNgdotWrite *WriterNgdotWriteArgs `protobuf:"bytes,3,opt,name=WriterNgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotFlush struct {
WriterNgdotFlush *WriterNgdotFlushArgs `protobuf:"bytes,4,opt,name=WriterNgdotFlush,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotClose struct {
WriterNgdotClose *WriterNgdotCloseArgs `protobuf:"bytes,5,opt,name=WriterNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotReset struct {
WriterNgdotReset *WriterNgdotResetArgs `protobuf:"bytes,6,opt,name=WriterNgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_NewReader struct {
NewReader *NewReaderArgs `protobuf:"bytes,7,opt,name=NewReader,proto3,oneof"`
}
type NgoloFuzzOne_NewReaderDict struct {
NewReaderDict *NewReaderDictArgs `protobuf:"bytes,8,opt,name=NewReaderDict,proto3,oneof"`
}
func (*NgoloFuzzOne_NewWriter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriterDict) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotFlush) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewReaderDict) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"3\n" +
"\rNewWriterArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\x14\n" +
"\x05level\x18\x02 \x01(\x03R\x05level\"K\n" +
"\x11NewWriterDictArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\x14\n" +
"\x05level\x18\x02 \x01(\x03R\x05level\x12\x12\n" +
"\x04dict\x18\x03 \x01(\fR\x04dict\"*\n" +
"\x14WriterNgdotWriteArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\x16\n" +
"\x14WriterNgdotFlushArgs\"\x16\n" +
"\x14WriterNgdotCloseArgs\"(\n" +
"\x14WriterNgdotResetArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\"\x1d\n" +
"\rNewReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"5\n" +
"\x11NewReaderDictArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\x12\x12\n" +
"\x04dict\x18\x02 \x01(\fR\x04dict\"\xd2\x04\n" +
"\fNgoloFuzzOne\x128\n" +
"\tNewWriter\x18\x01 \x01(\v2\x18.ngolofuzz.NewWriterArgsH\x00R\tNewWriter\x12D\n" +
"\rNewWriterDict\x18\x02 \x01(\v2\x1c.ngolofuzz.NewWriterDictArgsH\x00R\rNewWriterDict\x12M\n" +
"\x10WriterNgdotWrite\x18\x03 \x01(\v2\x1f.ngolofuzz.WriterNgdotWriteArgsH\x00R\x10WriterNgdotWrite\x12M\n" +
"\x10WriterNgdotFlush\x18\x04 \x01(\v2\x1f.ngolofuzz.WriterNgdotFlushArgsH\x00R\x10WriterNgdotFlush\x12M\n" +
"\x10WriterNgdotClose\x18\x05 \x01(\v2\x1f.ngolofuzz.WriterNgdotCloseArgsH\x00R\x10WriterNgdotClose\x12M\n" +
"\x10WriterNgdotReset\x18\x06 \x01(\v2\x1f.ngolofuzz.WriterNgdotResetArgsH\x00R\x10WriterNgdotReset\x128\n" +
"\tNewReader\x18\a \x01(\v2\x18.ngolofuzz.NewReaderArgsH\x00R\tNewReader\x12D\n" +
"\rNewReaderDict\x18\b \x01(\v2\x1c.ngolofuzz.NewReaderDictArgsH\x00R\rNewReaderDictB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1bZ\x19./;fuzz_ng_compress_flateb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
var file_ngolofuzz_proto_goTypes = []any{
(*NewWriterArgs)(nil), // 0: ngolofuzz.NewWriterArgs
(*NewWriterDictArgs)(nil), // 1: ngolofuzz.NewWriterDictArgs
(*WriterNgdotWriteArgs)(nil), // 2: ngolofuzz.WriterNgdotWriteArgs
(*WriterNgdotFlushArgs)(nil), // 3: ngolofuzz.WriterNgdotFlushArgs
(*WriterNgdotCloseArgs)(nil), // 4: ngolofuzz.WriterNgdotCloseArgs
(*WriterNgdotResetArgs)(nil), // 5: ngolofuzz.WriterNgdotResetArgs
(*NewReaderArgs)(nil), // 6: ngolofuzz.NewReaderArgs
(*NewReaderDictArgs)(nil), // 7: ngolofuzz.NewReaderDictArgs
(*NgoloFuzzOne)(nil), // 8: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 9: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 10: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewWriter:type_name -> ngolofuzz.NewWriterArgs
1, // 1: ngolofuzz.NgoloFuzzOne.NewWriterDict:type_name -> ngolofuzz.NewWriterDictArgs
2, // 2: ngolofuzz.NgoloFuzzOne.WriterNgdotWrite:type_name -> ngolofuzz.WriterNgdotWriteArgs
3, // 3: ngolofuzz.NgoloFuzzOne.WriterNgdotFlush:type_name -> ngolofuzz.WriterNgdotFlushArgs
4, // 4: ngolofuzz.NgoloFuzzOne.WriterNgdotClose:type_name -> ngolofuzz.WriterNgdotCloseArgs
5, // 5: ngolofuzz.NgoloFuzzOne.WriterNgdotReset:type_name -> ngolofuzz.WriterNgdotResetArgs
6, // 6: ngolofuzz.NgoloFuzzOne.NewReader:type_name -> ngolofuzz.NewReaderArgs
7, // 7: ngolofuzz.NgoloFuzzOne.NewReaderDict:type_name -> ngolofuzz.NewReaderDictArgs
8, // 8: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
9, // [9:9] is the sub-list for method output_type
9, // [9:9] is the sub-list for method input_type
9, // [9:9] is the sub-list for extension type_name
9, // [9:9] is the sub-list for extension extendee
0, // [0:9] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[8].OneofWrappers = []any{
(*NgoloFuzzOne_NewWriter)(nil),
(*NgoloFuzzOne_NewWriterDict)(nil),
(*NgoloFuzzOne_WriterNgdotWrite)(nil),
(*NgoloFuzzOne_WriterNgdotFlush)(nil),
(*NgoloFuzzOne_WriterNgdotClose)(nil),
(*NgoloFuzzOne_WriterNgdotReset)(nil),
(*NgoloFuzzOne_NewReader)(nil),
(*NgoloFuzzOne_NewReaderDict)(nil),
}
file_ngolofuzz_proto_msgTypes[9].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 11,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_compress_gzip
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"compress/gzip"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ReaderResults []*gzip.Reader
ReaderResultsIndex := 0
var WriterResults []*gzip.Writer
WriterResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReader:
arg0 := bytes.NewReader(a.NewReader.R)
r0, r1 := gzip.NewReader(arg0)
if r0 != nil{
ReaderResults = append(ReaderResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReset:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg1 := bytes.NewReader(a.ReaderNgdotReset.R)
r0 := arg0.Reset(arg1)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotMultistream:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg0.Multistream(a.ReaderNgdotMultistream.Ok)
case *NgoloFuzzOne_ReaderNgdotRead:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.Read(a.ReaderNgdotRead.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotClose:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_NewWriter:
arg0 := bytes.NewBuffer(a.NewWriter.W)
r0 := gzip.NewWriter(arg0)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
case *NgoloFuzzOne_NewWriterLevel:
arg0 := bytes.NewBuffer(a.NewWriterLevel.W)
arg1 := int(a.NewWriterLevel.Level)
r0, r1 := gzip.NewWriterLevel(arg0, arg1)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotReset:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg1 := bytes.NewBuffer(a.WriterNgdotReset.W)
arg0.Reset(arg1)
case *NgoloFuzzOne_WriterNgdotWrite:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
_, r1 := arg0.Write(a.WriterNgdotWrite.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotFlush:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Flush()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotClose:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ReaderNb := 0
ReaderResultsIndex := 0
WriterNb := 0
WriterResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReader:
w.WriteString(fmt.Sprintf("Reader%d, _ := gzip.NewReader(bytes.NewReader(%#+v))\n", ReaderNb, a.NewReader.R))
ReaderNb = ReaderNb + 1
case *NgoloFuzzOne_ReaderNgdotReset:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Reset(bytes.NewReader(%#+v))\n", ReaderResultsIndex, a.ReaderNgdotReset.R))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotMultistream:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Multistream(%#+v)\n", ReaderResultsIndex, a.ReaderNgdotMultistream.Ok))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotRead:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Read(%#+v)\n", ReaderResultsIndex, a.ReaderNgdotRead.P))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotClose:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Close()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_NewWriter:
w.WriteString(fmt.Sprintf("Writer%d := gzip.NewWriter(bytes.NewBuffer(%#+v))\n", WriterNb, a.NewWriter.W))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_NewWriterLevel:
w.WriteString(fmt.Sprintf("Writer%d, _ := gzip.NewWriterLevel(bytes.NewBuffer(%#+v), int(%#+v))\n", WriterNb, a.NewWriterLevel.W, a.NewWriterLevel.Level))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_WriterNgdotReset:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Reset(bytes.NewBuffer(%#+v))\n", WriterResultsIndex, a.WriterNgdotReset.W))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotWrite:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Write(%#+v)\n", WriterResultsIndex, a.WriterNgdotWrite.P))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotFlush:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Flush()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotClose:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Close()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_compress_gzip
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderArgs) Reset() {
*x = NewReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderArgs) ProtoMessage() {}
func (x *NewReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderArgs.ProtoReflect.Descriptor instead.
func (*NewReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type ReaderNgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotResetArgs) Reset() {
*x = ReaderNgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotResetArgs) ProtoMessage() {}
func (x *ReaderNgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotResetArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *ReaderNgdotResetArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type ReaderNgdotMultistreamArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotMultistreamArgs) Reset() {
*x = ReaderNgdotMultistreamArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotMultistreamArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotMultistreamArgs) ProtoMessage() {}
func (x *ReaderNgdotMultistreamArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotMultistreamArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotMultistreamArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *ReaderNgdotMultistreamArgs) GetOk() bool {
if x != nil {
return x.Ok
}
return false
}
type ReaderNgdotReadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadArgs) Reset() {
*x = ReaderNgdotReadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadArgs) ProtoMessage() {}
func (x *ReaderNgdotReadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ReaderNgdotReadArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type ReaderNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotCloseArgs) Reset() {
*x = ReaderNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotCloseArgs) ProtoMessage() {}
func (x *ReaderNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type NewWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterArgs) Reset() {
*x = NewWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterArgs) ProtoMessage() {}
func (x *NewWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterArgs.ProtoReflect.Descriptor instead.
func (*NewWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NewWriterArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type NewWriterLevelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterLevelArgs) Reset() {
*x = NewWriterLevelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterLevelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterLevelArgs) ProtoMessage() {}
func (x *NewWriterLevelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterLevelArgs.ProtoReflect.Descriptor instead.
func (*NewWriterLevelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NewWriterLevelArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *NewWriterLevelArgs) GetLevel() int64 {
if x != nil {
return x.Level
}
return 0
}
type WriterNgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotResetArgs) Reset() {
*x = WriterNgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotResetArgs) ProtoMessage() {}
func (x *WriterNgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotResetArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *WriterNgdotResetArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type WriterNgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteArgs) Reset() {
*x = WriterNgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteArgs) ProtoMessage() {}
func (x *WriterNgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *WriterNgdotWriteArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type WriterNgdotFlushArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotFlushArgs) Reset() {
*x = WriterNgdotFlushArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotFlushArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotFlushArgs) ProtoMessage() {}
func (x *WriterNgdotFlushArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotFlushArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotFlushArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type WriterNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCloseArgs) Reset() {
*x = WriterNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCloseArgs) ProtoMessage() {}
func (x *WriterNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewReader
// *NgoloFuzzOne_ReaderNgdotReset
// *NgoloFuzzOne_ReaderNgdotMultistream
// *NgoloFuzzOne_ReaderNgdotRead
// *NgoloFuzzOne_ReaderNgdotClose
// *NgoloFuzzOne_NewWriter
// *NgoloFuzzOne_NewWriterLevel
// *NgoloFuzzOne_WriterNgdotReset
// *NgoloFuzzOne_WriterNgdotWrite
// *NgoloFuzzOne_WriterNgdotFlush
// *NgoloFuzzOne_WriterNgdotClose
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewReader() *NewReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReader); ok {
return x.NewReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReset() *ReaderNgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReset); ok {
return x.ReaderNgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotMultistream() *ReaderNgdotMultistreamArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotMultistream); ok {
return x.ReaderNgdotMultistream
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotRead() *ReaderNgdotReadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotRead); ok {
return x.ReaderNgdotRead
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotClose() *ReaderNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotClose); ok {
return x.ReaderNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriter() *NewWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriter); ok {
return x.NewWriter
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriterLevel() *NewWriterLevelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriterLevel); ok {
return x.NewWriterLevel
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotReset() *WriterNgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotReset); ok {
return x.WriterNgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWrite() *WriterNgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWrite); ok {
return x.WriterNgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotFlush() *WriterNgdotFlushArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotFlush); ok {
return x.WriterNgdotFlush
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotClose() *WriterNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotClose); ok {
return x.WriterNgdotClose
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewReader struct {
NewReader *NewReaderArgs `protobuf:"bytes,1,opt,name=NewReader,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReset struct {
ReaderNgdotReset *ReaderNgdotResetArgs `protobuf:"bytes,2,opt,name=ReaderNgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotMultistream struct {
ReaderNgdotMultistream *ReaderNgdotMultistreamArgs `protobuf:"bytes,3,opt,name=ReaderNgdotMultistream,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotRead struct {
ReaderNgdotRead *ReaderNgdotReadArgs `protobuf:"bytes,4,opt,name=ReaderNgdotRead,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotClose struct {
ReaderNgdotClose *ReaderNgdotCloseArgs `protobuf:"bytes,5,opt,name=ReaderNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_NewWriter struct {
NewWriter *NewWriterArgs `protobuf:"bytes,6,opt,name=NewWriter,proto3,oneof"`
}
type NgoloFuzzOne_NewWriterLevel struct {
NewWriterLevel *NewWriterLevelArgs `protobuf:"bytes,7,opt,name=NewWriterLevel,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotReset struct {
WriterNgdotReset *WriterNgdotResetArgs `protobuf:"bytes,8,opt,name=WriterNgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWrite struct {
WriterNgdotWrite *WriterNgdotWriteArgs `protobuf:"bytes,9,opt,name=WriterNgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotFlush struct {
WriterNgdotFlush *WriterNgdotFlushArgs `protobuf:"bytes,10,opt,name=WriterNgdotFlush,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotClose struct {
WriterNgdotClose *WriterNgdotCloseArgs `protobuf:"bytes,11,opt,name=WriterNgdotClose,proto3,oneof"`
}
func (*NgoloFuzzOne_NewReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotMultistream) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotRead) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriterLevel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotFlush) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotClose) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1d\n" +
"\rNewReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"$\n" +
"\x14ReaderNgdotResetArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\",\n" +
"\x1aReaderNgdotMultistreamArgs\x12\x0e\n" +
"\x02ok\x18\x01 \x01(\bR\x02ok\"#\n" +
"\x13ReaderNgdotReadArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"\x16\n" +
"\x14ReaderNgdotCloseArgs\"\x1d\n" +
"\rNewWriterArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"8\n" +
"\x12NewWriterLevelArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\x14\n" +
"\x05level\x18\x02 \x01(\x03R\x05level\"$\n" +
"\x14WriterNgdotResetArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"$\n" +
"\x14WriterNgdotWriteArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"\x16\n" +
"\x14WriterNgdotFlushArgs\"\x16\n" +
"\x14WriterNgdotCloseArgs\"\xda\x06\n" +
"\fNgoloFuzzOne\x128\n" +
"\tNewReader\x18\x01 \x01(\v2\x18.ngolofuzz.NewReaderArgsH\x00R\tNewReader\x12M\n" +
"\x10ReaderNgdotReset\x18\x02 \x01(\v2\x1f.ngolofuzz.ReaderNgdotResetArgsH\x00R\x10ReaderNgdotReset\x12_\n" +
"\x16ReaderNgdotMultistream\x18\x03 \x01(\v2%.ngolofuzz.ReaderNgdotMultistreamArgsH\x00R\x16ReaderNgdotMultistream\x12J\n" +
"\x0fReaderNgdotRead\x18\x04 \x01(\v2\x1e.ngolofuzz.ReaderNgdotReadArgsH\x00R\x0fReaderNgdotRead\x12M\n" +
"\x10ReaderNgdotClose\x18\x05 \x01(\v2\x1f.ngolofuzz.ReaderNgdotCloseArgsH\x00R\x10ReaderNgdotClose\x128\n" +
"\tNewWriter\x18\x06 \x01(\v2\x18.ngolofuzz.NewWriterArgsH\x00R\tNewWriter\x12G\n" +
"\x0eNewWriterLevel\x18\a \x01(\v2\x1d.ngolofuzz.NewWriterLevelArgsH\x00R\x0eNewWriterLevel\x12M\n" +
"\x10WriterNgdotReset\x18\b \x01(\v2\x1f.ngolofuzz.WriterNgdotResetArgsH\x00R\x10WriterNgdotReset\x12M\n" +
"\x10WriterNgdotWrite\x18\t \x01(\v2\x1f.ngolofuzz.WriterNgdotWriteArgsH\x00R\x10WriterNgdotWrite\x12M\n" +
"\x10WriterNgdotFlush\x18\n" +
" \x01(\v2\x1f.ngolofuzz.WriterNgdotFlushArgsH\x00R\x10WriterNgdotFlush\x12M\n" +
"\x10WriterNgdotClose\x18\v \x01(\v2\x1f.ngolofuzz.WriterNgdotCloseArgsH\x00R\x10WriterNgdotCloseB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_compress_gzipb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 14)
var file_ngolofuzz_proto_goTypes = []any{
(*NewReaderArgs)(nil), // 0: ngolofuzz.NewReaderArgs
(*ReaderNgdotResetArgs)(nil), // 1: ngolofuzz.ReaderNgdotResetArgs
(*ReaderNgdotMultistreamArgs)(nil), // 2: ngolofuzz.ReaderNgdotMultistreamArgs
(*ReaderNgdotReadArgs)(nil), // 3: ngolofuzz.ReaderNgdotReadArgs
(*ReaderNgdotCloseArgs)(nil), // 4: ngolofuzz.ReaderNgdotCloseArgs
(*NewWriterArgs)(nil), // 5: ngolofuzz.NewWriterArgs
(*NewWriterLevelArgs)(nil), // 6: ngolofuzz.NewWriterLevelArgs
(*WriterNgdotResetArgs)(nil), // 7: ngolofuzz.WriterNgdotResetArgs
(*WriterNgdotWriteArgs)(nil), // 8: ngolofuzz.WriterNgdotWriteArgs
(*WriterNgdotFlushArgs)(nil), // 9: ngolofuzz.WriterNgdotFlushArgs
(*WriterNgdotCloseArgs)(nil), // 10: ngolofuzz.WriterNgdotCloseArgs
(*NgoloFuzzOne)(nil), // 11: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 12: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 13: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewReader:type_name -> ngolofuzz.NewReaderArgs
1, // 1: ngolofuzz.NgoloFuzzOne.ReaderNgdotReset:type_name -> ngolofuzz.ReaderNgdotResetArgs
2, // 2: ngolofuzz.NgoloFuzzOne.ReaderNgdotMultistream:type_name -> ngolofuzz.ReaderNgdotMultistreamArgs
3, // 3: ngolofuzz.NgoloFuzzOne.ReaderNgdotRead:type_name -> ngolofuzz.ReaderNgdotReadArgs
4, // 4: ngolofuzz.NgoloFuzzOne.ReaderNgdotClose:type_name -> ngolofuzz.ReaderNgdotCloseArgs
5, // 5: ngolofuzz.NgoloFuzzOne.NewWriter:type_name -> ngolofuzz.NewWriterArgs
6, // 6: ngolofuzz.NgoloFuzzOne.NewWriterLevel:type_name -> ngolofuzz.NewWriterLevelArgs
7, // 7: ngolofuzz.NgoloFuzzOne.WriterNgdotReset:type_name -> ngolofuzz.WriterNgdotResetArgs
8, // 8: ngolofuzz.NgoloFuzzOne.WriterNgdotWrite:type_name -> ngolofuzz.WriterNgdotWriteArgs
9, // 9: ngolofuzz.NgoloFuzzOne.WriterNgdotFlush:type_name -> ngolofuzz.WriterNgdotFlushArgs
10, // 10: ngolofuzz.NgoloFuzzOne.WriterNgdotClose:type_name -> ngolofuzz.WriterNgdotCloseArgs
11, // 11: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
12, // [12:12] is the sub-list for method output_type
12, // [12:12] is the sub-list for method input_type
12, // [12:12] is the sub-list for extension type_name
12, // [12:12] is the sub-list for extension extendee
0, // [0:12] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[11].OneofWrappers = []any{
(*NgoloFuzzOne_NewReader)(nil),
(*NgoloFuzzOne_ReaderNgdotReset)(nil),
(*NgoloFuzzOne_ReaderNgdotMultistream)(nil),
(*NgoloFuzzOne_ReaderNgdotRead)(nil),
(*NgoloFuzzOne_ReaderNgdotClose)(nil),
(*NgoloFuzzOne_NewWriter)(nil),
(*NgoloFuzzOne_NewWriterLevel)(nil),
(*NgoloFuzzOne_WriterNgdotReset)(nil),
(*NgoloFuzzOne_WriterNgdotWrite)(nil),
(*NgoloFuzzOne_WriterNgdotFlush)(nil),
(*NgoloFuzzOne_WriterNgdotClose)(nil),
}
file_ngolofuzz_proto_msgTypes[12].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 14,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_compress_lzw
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"compress/lzw"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func OrderNewFromFuzz(p OrderEnum) lzw.Order{
switch p {
case 1:
return lzw.MSB
}
return lzw.LSB
}
func ConvertOrderNewFromFuzz(a []OrderEnum) []lzw.Order{
r := make([]lzw.Order, len(a))
for i := range a {
r[i] = OrderNewFromFuzz(a[i])
}
return r
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReader:
arg0 := bytes.NewReader(a.NewReader.R)
arg1 := OrderNewFromFuzz(a.NewReader.Order)
arg2 := int(a.NewReader.LitWidth)
lzw.NewReader(arg0, arg1, arg2)
case *NgoloFuzzOne_NewWriter:
arg0 := bytes.NewBuffer(a.NewWriter.W)
arg1 := OrderNewFromFuzz(a.NewWriter.Order)
arg2 := int(a.NewWriter.LitWidth)
lzw.NewWriter(arg0, arg1, arg2)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReader:
w.WriteString(fmt.Sprintf("lzw.NewReader(bytes.NewReader(%#+v), OrderNewFromFuzz(%#+v), int(%#+v))\n", a.NewReader.R, a.NewReader.Order, a.NewReader.LitWidth))
case *NgoloFuzzOne_NewWriter:
w.WriteString(fmt.Sprintf("lzw.NewWriter(bytes.NewBuffer(%#+v), OrderNewFromFuzz(%#+v), int(%#+v))\n", a.NewWriter.W, a.NewWriter.Order, a.NewWriter.LitWidth))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_compress_lzw
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type OrderEnum int32
const (
OrderEnum_LSB OrderEnum = 0
OrderEnum_MSB OrderEnum = 1
)
// Enum value maps for OrderEnum.
var (
OrderEnum_name = map[int32]string{
0: "LSB",
1: "MSB",
}
OrderEnum_value = map[string]int32{
"LSB": 0,
"MSB": 1,
}
)
func (x OrderEnum) Enum() *OrderEnum {
p := new(OrderEnum)
*p = x
return p
}
func (x OrderEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (OrderEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[0].Descriptor()
}
func (OrderEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[0]
}
func (x OrderEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use OrderEnum.Descriptor instead.
func (OrderEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type NewReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
Order OrderEnum `protobuf:"varint,2,opt,name=order,proto3,enum=ngolofuzz.OrderEnum" json:"order,omitempty"`
LitWidth int64 `protobuf:"varint,3,opt,name=litWidth,proto3" json:"litWidth,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderArgs) Reset() {
*x = NewReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderArgs) ProtoMessage() {}
func (x *NewReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderArgs.ProtoReflect.Descriptor instead.
func (*NewReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
func (x *NewReaderArgs) GetOrder() OrderEnum {
if x != nil {
return x.Order
}
return OrderEnum_LSB
}
func (x *NewReaderArgs) GetLitWidth() int64 {
if x != nil {
return x.LitWidth
}
return 0
}
type NewWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
Order OrderEnum `protobuf:"varint,2,opt,name=order,proto3,enum=ngolofuzz.OrderEnum" json:"order,omitempty"`
LitWidth int64 `protobuf:"varint,3,opt,name=litWidth,proto3" json:"litWidth,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterArgs) Reset() {
*x = NewWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterArgs) ProtoMessage() {}
func (x *NewWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterArgs.ProtoReflect.Descriptor instead.
func (*NewWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NewWriterArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *NewWriterArgs) GetOrder() OrderEnum {
if x != nil {
return x.Order
}
return OrderEnum_LSB
}
func (x *NewWriterArgs) GetLitWidth() int64 {
if x != nil {
return x.LitWidth
}
return 0
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewReader
// *NgoloFuzzOne_NewWriter
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewReader() *NewReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReader); ok {
return x.NewReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriter() *NewWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriter); ok {
return x.NewWriter
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewReader struct {
NewReader *NewReaderArgs `protobuf:"bytes,1,opt,name=NewReader,proto3,oneof"`
}
type NgoloFuzzOne_NewWriter struct {
NewWriter *NewWriterArgs `protobuf:"bytes,2,opt,name=NewWriter,proto3,oneof"`
}
func (*NgoloFuzzOne_NewReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriter) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"e\n" +
"\rNewReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\x12*\n" +
"\x05order\x18\x02 \x01(\x0e2\x14.ngolofuzz.OrderEnumR\x05order\x12\x1a\n" +
"\blitWidth\x18\x03 \x01(\x03R\blitWidth\"e\n" +
"\rNewWriterArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12*\n" +
"\x05order\x18\x02 \x01(\x0e2\x14.ngolofuzz.OrderEnumR\x05order\x12\x1a\n" +
"\blitWidth\x18\x03 \x01(\x03R\blitWidth\"\x8a\x01\n" +
"\fNgoloFuzzOne\x128\n" +
"\tNewReader\x18\x01 \x01(\v2\x18.ngolofuzz.NewReaderArgsH\x00R\tNewReader\x128\n" +
"\tNewWriter\x18\x02 \x01(\v2\x18.ngolofuzz.NewWriterArgsH\x00R\tNewWriterB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04list*\x1d\n" +
"\tOrderEnum\x12\a\n" +
"\x03LSB\x10\x00\x12\a\n" +
"\x03MSB\x10\x01B\x19Z\x17./;fuzz_ng_compress_lzwb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_ngolofuzz_proto_goTypes = []any{
(OrderEnum)(0), // 0: ngolofuzz.OrderEnum
(*NewReaderArgs)(nil), // 1: ngolofuzz.NewReaderArgs
(*NewWriterArgs)(nil), // 2: ngolofuzz.NewWriterArgs
(*NgoloFuzzOne)(nil), // 3: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 4: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 5: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NewReaderArgs.order:type_name -> ngolofuzz.OrderEnum
0, // 1: ngolofuzz.NewWriterArgs.order:type_name -> ngolofuzz.OrderEnum
1, // 2: ngolofuzz.NgoloFuzzOne.NewReader:type_name -> ngolofuzz.NewReaderArgs
2, // 3: ngolofuzz.NgoloFuzzOne.NewWriter:type_name -> ngolofuzz.NewWriterArgs
3, // 4: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
5, // [5:5] is the sub-list for method output_type
5, // [5:5] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzOne_NewReader)(nil),
(*NgoloFuzzOne_NewWriter)(nil),
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 1,
NumMessages: 5,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
EnumInfos: file_ngolofuzz_proto_enumTypes,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_compress_zlib
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"compress/zlib"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var WriterResults []*zlib.Writer
WriterResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReader:
arg0 := bytes.NewReader(a.NewReader.R)
_, r1 := zlib.NewReader(arg0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewReaderDict:
arg0 := bytes.NewReader(a.NewReaderDict.R)
_, r1 := zlib.NewReaderDict(arg0, a.NewReaderDict.Dict)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewWriter:
arg0 := bytes.NewBuffer(a.NewWriter.W)
r0 := zlib.NewWriter(arg0)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
case *NgoloFuzzOne_NewWriterLevel:
arg0 := bytes.NewBuffer(a.NewWriterLevel.W)
arg1 := int(a.NewWriterLevel.Level)
r0, r1 := zlib.NewWriterLevel(arg0, arg1)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewWriterLevelDict:
arg0 := bytes.NewBuffer(a.NewWriterLevelDict.W)
arg1 := int(a.NewWriterLevelDict.Level)
r0, r1 := zlib.NewWriterLevelDict(arg0, arg1, a.NewWriterLevelDict.Dict)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotReset:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg1 := bytes.NewBuffer(a.WriterNgdotReset.W)
arg0.Reset(arg1)
case *NgoloFuzzOne_WriterNgdotWrite:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
_, r1 := arg0.Write(a.WriterNgdotWrite.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotFlush:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Flush()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotClose:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
WriterNb := 0
WriterResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReader:
w.WriteString(fmt.Sprintf("zlib.NewReader(bytes.NewReader(%#+v))\n", a.NewReader.R))
case *NgoloFuzzOne_NewReaderDict:
w.WriteString(fmt.Sprintf("zlib.NewReaderDict(bytes.NewReader(%#+v), %#+v)\n", a.NewReaderDict.R, a.NewReaderDict.Dict))
case *NgoloFuzzOne_NewWriter:
w.WriteString(fmt.Sprintf("Writer%d := zlib.NewWriter(bytes.NewBuffer(%#+v))\n", WriterNb, a.NewWriter.W))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_NewWriterLevel:
w.WriteString(fmt.Sprintf("Writer%d, _ := zlib.NewWriterLevel(bytes.NewBuffer(%#+v), int(%#+v))\n", WriterNb, a.NewWriterLevel.W, a.NewWriterLevel.Level))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_NewWriterLevelDict:
w.WriteString(fmt.Sprintf("Writer%d, _ := zlib.NewWriterLevelDict(bytes.NewBuffer(%#+v), int(%#+v), %#+v)\n", WriterNb, a.NewWriterLevelDict.W, a.NewWriterLevelDict.Level, a.NewWriterLevelDict.Dict))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_WriterNgdotReset:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Reset(bytes.NewBuffer(%#+v))\n", WriterResultsIndex, a.WriterNgdotReset.W))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotWrite:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Write(%#+v)\n", WriterResultsIndex, a.WriterNgdotWrite.P))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotFlush:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Flush()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotClose:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Close()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_compress_zlib
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderArgs) Reset() {
*x = NewReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderArgs) ProtoMessage() {}
func (x *NewReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderArgs.ProtoReflect.Descriptor instead.
func (*NewReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type NewReaderDictArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
Dict []byte `protobuf:"bytes,2,opt,name=dict,proto3" json:"dict,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderDictArgs) Reset() {
*x = NewReaderDictArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderDictArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderDictArgs) ProtoMessage() {}
func (x *NewReaderDictArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderDictArgs.ProtoReflect.Descriptor instead.
func (*NewReaderDictArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NewReaderDictArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
func (x *NewReaderDictArgs) GetDict() []byte {
if x != nil {
return x.Dict
}
return nil
}
type NewWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterArgs) Reset() {
*x = NewWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterArgs) ProtoMessage() {}
func (x *NewWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterArgs.ProtoReflect.Descriptor instead.
func (*NewWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NewWriterArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type NewWriterLevelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterLevelArgs) Reset() {
*x = NewWriterLevelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterLevelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterLevelArgs) ProtoMessage() {}
func (x *NewWriterLevelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterLevelArgs.ProtoReflect.Descriptor instead.
func (*NewWriterLevelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NewWriterLevelArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *NewWriterLevelArgs) GetLevel() int64 {
if x != nil {
return x.Level
}
return 0
}
type NewWriterLevelDictArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"`
Dict []byte `protobuf:"bytes,3,opt,name=dict,proto3" json:"dict,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterLevelDictArgs) Reset() {
*x = NewWriterLevelDictArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterLevelDictArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterLevelDictArgs) ProtoMessage() {}
func (x *NewWriterLevelDictArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterLevelDictArgs.ProtoReflect.Descriptor instead.
func (*NewWriterLevelDictArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NewWriterLevelDictArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *NewWriterLevelDictArgs) GetLevel() int64 {
if x != nil {
return x.Level
}
return 0
}
func (x *NewWriterLevelDictArgs) GetDict() []byte {
if x != nil {
return x.Dict
}
return nil
}
type WriterNgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotResetArgs) Reset() {
*x = WriterNgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotResetArgs) ProtoMessage() {}
func (x *WriterNgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotResetArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *WriterNgdotResetArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type WriterNgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteArgs) Reset() {
*x = WriterNgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteArgs) ProtoMessage() {}
func (x *WriterNgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *WriterNgdotWriteArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type WriterNgdotFlushArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotFlushArgs) Reset() {
*x = WriterNgdotFlushArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotFlushArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotFlushArgs) ProtoMessage() {}
func (x *WriterNgdotFlushArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotFlushArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotFlushArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type WriterNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCloseArgs) Reset() {
*x = WriterNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCloseArgs) ProtoMessage() {}
func (x *WriterNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewReader
// *NgoloFuzzOne_NewReaderDict
// *NgoloFuzzOne_NewWriter
// *NgoloFuzzOne_NewWriterLevel
// *NgoloFuzzOne_NewWriterLevelDict
// *NgoloFuzzOne_WriterNgdotReset
// *NgoloFuzzOne_WriterNgdotWrite
// *NgoloFuzzOne_WriterNgdotFlush
// *NgoloFuzzOne_WriterNgdotClose
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewReader() *NewReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReader); ok {
return x.NewReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewReaderDict() *NewReaderDictArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReaderDict); ok {
return x.NewReaderDict
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriter() *NewWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriter); ok {
return x.NewWriter
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriterLevel() *NewWriterLevelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriterLevel); ok {
return x.NewWriterLevel
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriterLevelDict() *NewWriterLevelDictArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriterLevelDict); ok {
return x.NewWriterLevelDict
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotReset() *WriterNgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotReset); ok {
return x.WriterNgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWrite() *WriterNgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWrite); ok {
return x.WriterNgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotFlush() *WriterNgdotFlushArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotFlush); ok {
return x.WriterNgdotFlush
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotClose() *WriterNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotClose); ok {
return x.WriterNgdotClose
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewReader struct {
NewReader *NewReaderArgs `protobuf:"bytes,1,opt,name=NewReader,proto3,oneof"`
}
type NgoloFuzzOne_NewReaderDict struct {
NewReaderDict *NewReaderDictArgs `protobuf:"bytes,2,opt,name=NewReaderDict,proto3,oneof"`
}
type NgoloFuzzOne_NewWriter struct {
NewWriter *NewWriterArgs `protobuf:"bytes,3,opt,name=NewWriter,proto3,oneof"`
}
type NgoloFuzzOne_NewWriterLevel struct {
NewWriterLevel *NewWriterLevelArgs `protobuf:"bytes,4,opt,name=NewWriterLevel,proto3,oneof"`
}
type NgoloFuzzOne_NewWriterLevelDict struct {
NewWriterLevelDict *NewWriterLevelDictArgs `protobuf:"bytes,5,opt,name=NewWriterLevelDict,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotReset struct {
WriterNgdotReset *WriterNgdotResetArgs `protobuf:"bytes,6,opt,name=WriterNgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWrite struct {
WriterNgdotWrite *WriterNgdotWriteArgs `protobuf:"bytes,7,opt,name=WriterNgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotFlush struct {
WriterNgdotFlush *WriterNgdotFlushArgs `protobuf:"bytes,8,opt,name=WriterNgdotFlush,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotClose struct {
WriterNgdotClose *WriterNgdotCloseArgs `protobuf:"bytes,9,opt,name=WriterNgdotClose,proto3,oneof"`
}
func (*NgoloFuzzOne_NewReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewReaderDict) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriterLevel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriterLevelDict) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotFlush) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotClose) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1d\n" +
"\rNewReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"5\n" +
"\x11NewReaderDictArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\x12\x12\n" +
"\x04dict\x18\x02 \x01(\fR\x04dict\"\x1d\n" +
"\rNewWriterArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"8\n" +
"\x12NewWriterLevelArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\x14\n" +
"\x05level\x18\x02 \x01(\x03R\x05level\"P\n" +
"\x16NewWriterLevelDictArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\x14\n" +
"\x05level\x18\x02 \x01(\x03R\x05level\x12\x12\n" +
"\x04dict\x18\x03 \x01(\fR\x04dict\"$\n" +
"\x14WriterNgdotResetArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"$\n" +
"\x14WriterNgdotWriteArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"\x16\n" +
"\x14WriterNgdotFlushArgs\"\x16\n" +
"\x14WriterNgdotCloseArgs\"\xaa\x05\n" +
"\fNgoloFuzzOne\x128\n" +
"\tNewReader\x18\x01 \x01(\v2\x18.ngolofuzz.NewReaderArgsH\x00R\tNewReader\x12D\n" +
"\rNewReaderDict\x18\x02 \x01(\v2\x1c.ngolofuzz.NewReaderDictArgsH\x00R\rNewReaderDict\x128\n" +
"\tNewWriter\x18\x03 \x01(\v2\x18.ngolofuzz.NewWriterArgsH\x00R\tNewWriter\x12G\n" +
"\x0eNewWriterLevel\x18\x04 \x01(\v2\x1d.ngolofuzz.NewWriterLevelArgsH\x00R\x0eNewWriterLevel\x12S\n" +
"\x12NewWriterLevelDict\x18\x05 \x01(\v2!.ngolofuzz.NewWriterLevelDictArgsH\x00R\x12NewWriterLevelDict\x12M\n" +
"\x10WriterNgdotReset\x18\x06 \x01(\v2\x1f.ngolofuzz.WriterNgdotResetArgsH\x00R\x10WriterNgdotReset\x12M\n" +
"\x10WriterNgdotWrite\x18\a \x01(\v2\x1f.ngolofuzz.WriterNgdotWriteArgsH\x00R\x10WriterNgdotWrite\x12M\n" +
"\x10WriterNgdotFlush\x18\b \x01(\v2\x1f.ngolofuzz.WriterNgdotFlushArgsH\x00R\x10WriterNgdotFlush\x12M\n" +
"\x10WriterNgdotClose\x18\t \x01(\v2\x1f.ngolofuzz.WriterNgdotCloseArgsH\x00R\x10WriterNgdotCloseB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_compress_zlibb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
var file_ngolofuzz_proto_goTypes = []any{
(*NewReaderArgs)(nil), // 0: ngolofuzz.NewReaderArgs
(*NewReaderDictArgs)(nil), // 1: ngolofuzz.NewReaderDictArgs
(*NewWriterArgs)(nil), // 2: ngolofuzz.NewWriterArgs
(*NewWriterLevelArgs)(nil), // 3: ngolofuzz.NewWriterLevelArgs
(*NewWriterLevelDictArgs)(nil), // 4: ngolofuzz.NewWriterLevelDictArgs
(*WriterNgdotResetArgs)(nil), // 5: ngolofuzz.WriterNgdotResetArgs
(*WriterNgdotWriteArgs)(nil), // 6: ngolofuzz.WriterNgdotWriteArgs
(*WriterNgdotFlushArgs)(nil), // 7: ngolofuzz.WriterNgdotFlushArgs
(*WriterNgdotCloseArgs)(nil), // 8: ngolofuzz.WriterNgdotCloseArgs
(*NgoloFuzzOne)(nil), // 9: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 10: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 11: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewReader:type_name -> ngolofuzz.NewReaderArgs
1, // 1: ngolofuzz.NgoloFuzzOne.NewReaderDict:type_name -> ngolofuzz.NewReaderDictArgs
2, // 2: ngolofuzz.NgoloFuzzOne.NewWriter:type_name -> ngolofuzz.NewWriterArgs
3, // 3: ngolofuzz.NgoloFuzzOne.NewWriterLevel:type_name -> ngolofuzz.NewWriterLevelArgs
4, // 4: ngolofuzz.NgoloFuzzOne.NewWriterLevelDict:type_name -> ngolofuzz.NewWriterLevelDictArgs
5, // 5: ngolofuzz.NgoloFuzzOne.WriterNgdotReset:type_name -> ngolofuzz.WriterNgdotResetArgs
6, // 6: ngolofuzz.NgoloFuzzOne.WriterNgdotWrite:type_name -> ngolofuzz.WriterNgdotWriteArgs
7, // 7: ngolofuzz.NgoloFuzzOne.WriterNgdotFlush:type_name -> ngolofuzz.WriterNgdotFlushArgs
8, // 8: ngolofuzz.NgoloFuzzOne.WriterNgdotClose:type_name -> ngolofuzz.WriterNgdotCloseArgs
9, // 9: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
10, // [10:10] is the sub-list for method output_type
10, // [10:10] is the sub-list for method input_type
10, // [10:10] is the sub-list for extension type_name
10, // [10:10] is the sub-list for extension extendee
0, // [0:10] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[9].OneofWrappers = []any{
(*NgoloFuzzOne_NewReader)(nil),
(*NgoloFuzzOne_NewReaderDict)(nil),
(*NgoloFuzzOne_NewWriter)(nil),
(*NgoloFuzzOne_NewWriterLevel)(nil),
(*NgoloFuzzOne_NewWriterLevelDict)(nil),
(*NgoloFuzzOne_WriterNgdotReset)(nil),
(*NgoloFuzzOne_WriterNgdotWrite)(nil),
(*NgoloFuzzOne_WriterNgdotFlush)(nil),
(*NgoloFuzzOne_WriterNgdotClose)(nil),
}
file_ngolofuzz_proto_msgTypes[10].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 12,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_container_list
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"container/list"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ElementResults []*list.Element
ElementResultsIndex := 0
var ListResults []*list.List
ListResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ElementNgdotNext:
if len(ElementResults) == 0 {
continue
}
arg0 := ElementResults[ElementResultsIndex]
ElementResultsIndex = (ElementResultsIndex + 1) % len(ElementResults)
r0 := arg0.Next()
if r0 != nil{
ElementResults = append(ElementResults, r0)
}
case *NgoloFuzzOne_ElementNgdotPrev:
if len(ElementResults) == 0 {
continue
}
arg0 := ElementResults[ElementResultsIndex]
ElementResultsIndex = (ElementResultsIndex + 1) % len(ElementResults)
r0 := arg0.Prev()
if r0 != nil{
ElementResults = append(ElementResults, r0)
}
case *NgoloFuzzOne_ListNgdotInit:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
r0 := arg0.Init()
if r0 != nil{
ListResults = append(ListResults, r0)
}
case *NgoloFuzzOne_New:
r0 := list.New()
if r0 != nil{
ListResults = append(ListResults, r0)
}
case *NgoloFuzzOne_ListNgdotLen:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
arg0.Len()
case *NgoloFuzzOne_ListNgdotFront:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
r0 := arg0.Front()
if r0 != nil{
ElementResults = append(ElementResults, r0)
}
case *NgoloFuzzOne_ListNgdotBack:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
r0 := arg0.Back()
if r0 != nil{
ElementResults = append(ElementResults, r0)
}
case *NgoloFuzzOne_ListNgdotRemove:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
if len(ElementResults) == 0 {
continue
}
arg1 := ElementResults[ElementResultsIndex]
ElementResultsIndex = (ElementResultsIndex + 1) % len(ElementResults)
arg0.Remove(arg1)
case *NgoloFuzzOne_ListNgdotPushFront:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
r0 := arg0.PushFront(a.ListNgdotPushFront.V)
if r0 != nil{
ElementResults = append(ElementResults, r0)
}
case *NgoloFuzzOne_ListNgdotPushBack:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
r0 := arg0.PushBack(a.ListNgdotPushBack.V)
if r0 != nil{
ElementResults = append(ElementResults, r0)
}
case *NgoloFuzzOne_ListNgdotInsertBefore:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
if len(ElementResults) == 0 {
continue
}
arg2 := ElementResults[ElementResultsIndex]
ElementResultsIndex = (ElementResultsIndex + 1) % len(ElementResults)
r0 := arg0.InsertBefore(a.ListNgdotInsertBefore.V, arg2)
if r0 != nil{
ElementResults = append(ElementResults, r0)
}
case *NgoloFuzzOne_ListNgdotInsertAfter:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
if len(ElementResults) == 0 {
continue
}
arg2 := ElementResults[ElementResultsIndex]
ElementResultsIndex = (ElementResultsIndex + 1) % len(ElementResults)
r0 := arg0.InsertAfter(a.ListNgdotInsertAfter.V, arg2)
if r0 != nil{
ElementResults = append(ElementResults, r0)
}
case *NgoloFuzzOne_ListNgdotMoveToFront:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
if len(ElementResults) == 0 {
continue
}
arg1 := ElementResults[ElementResultsIndex]
ElementResultsIndex = (ElementResultsIndex + 1) % len(ElementResults)
arg0.MoveToFront(arg1)
case *NgoloFuzzOne_ListNgdotMoveToBack:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
if len(ElementResults) == 0 {
continue
}
arg1 := ElementResults[ElementResultsIndex]
ElementResultsIndex = (ElementResultsIndex + 1) % len(ElementResults)
arg0.MoveToBack(arg1)
case *NgoloFuzzOne_ListNgdotMoveBefore:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
if len(ElementResults) == 0 {
continue
}
arg1 := ElementResults[ElementResultsIndex]
ElementResultsIndex = (ElementResultsIndex + 1) % len(ElementResults)
if len(ElementResults) == 0 {
continue
}
arg2 := ElementResults[ElementResultsIndex]
ElementResultsIndex = (ElementResultsIndex + 1) % len(ElementResults)
arg0.MoveBefore(arg1, arg2)
case *NgoloFuzzOne_ListNgdotMoveAfter:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
if len(ElementResults) == 0 {
continue
}
arg1 := ElementResults[ElementResultsIndex]
ElementResultsIndex = (ElementResultsIndex + 1) % len(ElementResults)
if len(ElementResults) == 0 {
continue
}
arg2 := ElementResults[ElementResultsIndex]
ElementResultsIndex = (ElementResultsIndex + 1) % len(ElementResults)
arg0.MoveAfter(arg1, arg2)
case *NgoloFuzzOne_ListNgdotPushBackList:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
if len(ListResults) == 0 {
continue
}
arg1 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
arg0.PushBackList(arg1)
case *NgoloFuzzOne_ListNgdotPushFrontList:
if len(ListResults) == 0 {
continue
}
arg0 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
if len(ListResults) == 0 {
continue
}
arg1 := ListResults[ListResultsIndex]
ListResultsIndex = (ListResultsIndex + 1) % len(ListResults)
arg0.PushFrontList(arg1)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ElementNb := 0
ElementResultsIndex := 0
ListNb := 0
ListResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ElementNgdotNext:
if ElementNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Element%d := Element%d.Next()\n", ElementNb, ElementResultsIndex))
ElementNb = ElementNb + 1
ElementResultsIndex = (ElementResultsIndex + 1) % ElementNb
case *NgoloFuzzOne_ElementNgdotPrev:
if ElementNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Element%d := Element%d.Prev()\n", ElementNb, ElementResultsIndex))
ElementNb = ElementNb + 1
ElementResultsIndex = (ElementResultsIndex + 1) % ElementNb
case *NgoloFuzzOne_ListNgdotInit:
if ListNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("List%d := List%d.Init()\n", ListNb, ListResultsIndex))
ListNb = ListNb + 1
ListResultsIndex = (ListResultsIndex + 1) % ListNb
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("List%d := list.New()\n", ListNb))
ListNb = ListNb + 1
case *NgoloFuzzOne_ListNgdotLen:
if ListNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("List%d.Len()\n", ListResultsIndex))
ListResultsIndex = (ListResultsIndex + 1) % ListNb
case *NgoloFuzzOne_ListNgdotFront:
if ListNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Element%d := List%d.Front()\n", ElementNb, ListResultsIndex))
ElementNb = ElementNb + 1
ListResultsIndex = (ListResultsIndex + 1) % ListNb
case *NgoloFuzzOne_ListNgdotBack:
if ListNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Element%d := List%d.Back()\n", ElementNb, ListResultsIndex))
ElementNb = ElementNb + 1
ListResultsIndex = (ListResultsIndex + 1) % ListNb
case *NgoloFuzzOne_ListNgdotRemove:
if ListNb == 0 {
continue
}
if ElementNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("List%d.Remove(Element%d)\n", ListResultsIndex, (ElementResultsIndex + 0) % ElementNb))
ListResultsIndex = (ListResultsIndex + 1) % ListNb
ElementResultsIndex = (ElementResultsIndex + 1) % ElementNb
case *NgoloFuzzOne_ListNgdotPushFront:
if ListNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Element%d := List%d.PushFront(%#+v)\n", ElementNb, ListResultsIndex, a.ListNgdotPushFront.V))
ElementNb = ElementNb + 1
ListResultsIndex = (ListResultsIndex + 1) % ListNb
case *NgoloFuzzOne_ListNgdotPushBack:
if ListNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Element%d := List%d.PushBack(%#+v)\n", ElementNb, ListResultsIndex, a.ListNgdotPushBack.V))
ElementNb = ElementNb + 1
ListResultsIndex = (ListResultsIndex + 1) % ListNb
case *NgoloFuzzOne_ListNgdotInsertBefore:
if ListNb == 0 {
continue
}
if ElementNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Element%d := List%d.InsertBefore(%#+v, Element%d)\n", ElementNb, ListResultsIndex, a.ListNgdotInsertBefore.V, (ElementResultsIndex + 0) % ElementNb))
ElementNb = ElementNb + 1
ListResultsIndex = (ListResultsIndex + 1) % ListNb
ElementResultsIndex = (ElementResultsIndex + 1) % ElementNb
case *NgoloFuzzOne_ListNgdotInsertAfter:
if ListNb == 0 {
continue
}
if ElementNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Element%d := List%d.InsertAfter(%#+v, Element%d)\n", ElementNb, ListResultsIndex, a.ListNgdotInsertAfter.V, (ElementResultsIndex + 0) % ElementNb))
ElementNb = ElementNb + 1
ListResultsIndex = (ListResultsIndex + 1) % ListNb
ElementResultsIndex = (ElementResultsIndex + 1) % ElementNb
case *NgoloFuzzOne_ListNgdotMoveToFront:
if ListNb == 0 {
continue
}
if ElementNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("List%d.MoveToFront(Element%d)\n", ListResultsIndex, (ElementResultsIndex + 0) % ElementNb))
ListResultsIndex = (ListResultsIndex + 1) % ListNb
ElementResultsIndex = (ElementResultsIndex + 1) % ElementNb
case *NgoloFuzzOne_ListNgdotMoveToBack:
if ListNb == 0 {
continue
}
if ElementNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("List%d.MoveToBack(Element%d)\n", ListResultsIndex, (ElementResultsIndex + 0) % ElementNb))
ListResultsIndex = (ListResultsIndex + 1) % ListNb
ElementResultsIndex = (ElementResultsIndex + 1) % ElementNb
case *NgoloFuzzOne_ListNgdotMoveBefore:
if ListNb == 0 {
continue
}
if ElementNb == 0 {
continue
}
if ElementNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("List%d.MoveBefore(Element%d, Element%d)\n", ListResultsIndex, (ElementResultsIndex + 0) % ElementNb, (ElementResultsIndex + 1) % ElementNb))
ListResultsIndex = (ListResultsIndex + 1) % ListNb
ElementResultsIndex = (ElementResultsIndex + 1) % ElementNb
ElementResultsIndex = (ElementResultsIndex + 1) % ElementNb
case *NgoloFuzzOne_ListNgdotMoveAfter:
if ListNb == 0 {
continue
}
if ElementNb == 0 {
continue
}
if ElementNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("List%d.MoveAfter(Element%d, Element%d)\n", ListResultsIndex, (ElementResultsIndex + 0) % ElementNb, (ElementResultsIndex + 1) % ElementNb))
ListResultsIndex = (ListResultsIndex + 1) % ListNb
ElementResultsIndex = (ElementResultsIndex + 1) % ElementNb
ElementResultsIndex = (ElementResultsIndex + 1) % ElementNb
case *NgoloFuzzOne_ListNgdotPushBackList:
if ListNb == 0 {
continue
}
if ListNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("List%d.PushBackList(List%d)\n", ListResultsIndex, (ListResultsIndex + 1) % ListNb))
ListResultsIndex = (ListResultsIndex + 1) % ListNb
ListResultsIndex = (ListResultsIndex + 1) % ListNb
case *NgoloFuzzOne_ListNgdotPushFrontList:
if ListNb == 0 {
continue
}
if ListNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("List%d.PushFrontList(List%d)\n", ListResultsIndex, (ListResultsIndex + 1) % ListNb))
ListResultsIndex = (ListResultsIndex + 1) % ListNb
ListResultsIndex = (ListResultsIndex + 1) % ListNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_container_list
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ElementNgdotNextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ElementNgdotNextArgs) Reset() {
*x = ElementNgdotNextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ElementNgdotNextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ElementNgdotNextArgs) ProtoMessage() {}
func (x *ElementNgdotNextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ElementNgdotNextArgs.ProtoReflect.Descriptor instead.
func (*ElementNgdotNextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type ElementNgdotPrevArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ElementNgdotPrevArgs) Reset() {
*x = ElementNgdotPrevArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ElementNgdotPrevArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ElementNgdotPrevArgs) ProtoMessage() {}
func (x *ElementNgdotPrevArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ElementNgdotPrevArgs.ProtoReflect.Descriptor instead.
func (*ElementNgdotPrevArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type ListNgdotInitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotInitArgs) Reset() {
*x = ListNgdotInitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotInitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotInitArgs) ProtoMessage() {}
func (x *ListNgdotInitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotInitArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotInitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type ListNgdotLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotLenArgs) Reset() {
*x = ListNgdotLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotLenArgs) ProtoMessage() {}
func (x *ListNgdotLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotLenArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type ListNgdotFrontArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotFrontArgs) Reset() {
*x = ListNgdotFrontArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotFrontArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotFrontArgs) ProtoMessage() {}
func (x *ListNgdotFrontArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotFrontArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotFrontArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type ListNgdotBackArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotBackArgs) Reset() {
*x = ListNgdotBackArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotBackArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotBackArgs) ProtoMessage() {}
func (x *ListNgdotBackArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotBackArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotBackArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type ListNgdotRemoveArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotRemoveArgs) Reset() {
*x = ListNgdotRemoveArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotRemoveArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotRemoveArgs) ProtoMessage() {}
func (x *ListNgdotRemoveArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotRemoveArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotRemoveArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type ListNgdotPushFrontArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotPushFrontArgs) Reset() {
*x = ListNgdotPushFrontArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotPushFrontArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotPushFrontArgs) ProtoMessage() {}
func (x *ListNgdotPushFrontArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotPushFrontArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotPushFrontArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *ListNgdotPushFrontArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type ListNgdotPushBackArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotPushBackArgs) Reset() {
*x = ListNgdotPushBackArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotPushBackArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotPushBackArgs) ProtoMessage() {}
func (x *ListNgdotPushBackArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotPushBackArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotPushBackArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *ListNgdotPushBackArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type ListNgdotInsertBeforeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotInsertBeforeArgs) Reset() {
*x = ListNgdotInsertBeforeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotInsertBeforeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotInsertBeforeArgs) ProtoMessage() {}
func (x *ListNgdotInsertBeforeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotInsertBeforeArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotInsertBeforeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *ListNgdotInsertBeforeArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type ListNgdotInsertAfterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotInsertAfterArgs) Reset() {
*x = ListNgdotInsertAfterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotInsertAfterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotInsertAfterArgs) ProtoMessage() {}
func (x *ListNgdotInsertAfterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotInsertAfterArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotInsertAfterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *ListNgdotInsertAfterArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type ListNgdotMoveToFrontArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotMoveToFrontArgs) Reset() {
*x = ListNgdotMoveToFrontArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotMoveToFrontArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotMoveToFrontArgs) ProtoMessage() {}
func (x *ListNgdotMoveToFrontArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotMoveToFrontArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotMoveToFrontArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type ListNgdotMoveToBackArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotMoveToBackArgs) Reset() {
*x = ListNgdotMoveToBackArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotMoveToBackArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotMoveToBackArgs) ProtoMessage() {}
func (x *ListNgdotMoveToBackArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotMoveToBackArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotMoveToBackArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type ListNgdotMoveBeforeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotMoveBeforeArgs) Reset() {
*x = ListNgdotMoveBeforeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotMoveBeforeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotMoveBeforeArgs) ProtoMessage() {}
func (x *ListNgdotMoveBeforeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotMoveBeforeArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotMoveBeforeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type ListNgdotMoveAfterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotMoveAfterArgs) Reset() {
*x = ListNgdotMoveAfterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotMoveAfterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotMoveAfterArgs) ProtoMessage() {}
func (x *ListNgdotMoveAfterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotMoveAfterArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotMoveAfterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
type ListNgdotPushBackListArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotPushBackListArgs) Reset() {
*x = ListNgdotPushBackListArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotPushBackListArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotPushBackListArgs) ProtoMessage() {}
func (x *ListNgdotPushBackListArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotPushBackListArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotPushBackListArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
type ListNgdotPushFrontListArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNgdotPushFrontListArgs) Reset() {
*x = ListNgdotPushFrontListArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNgdotPushFrontListArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNgdotPushFrontListArgs) ProtoMessage() {}
func (x *ListNgdotPushFrontListArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNgdotPushFrontListArgs.ProtoReflect.Descriptor instead.
func (*ListNgdotPushFrontListArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_ElementNgdotNext
// *NgoloFuzzOne_ElementNgdotPrev
// *NgoloFuzzOne_ListNgdotInit
// *NgoloFuzzOne_New
// *NgoloFuzzOne_ListNgdotLen
// *NgoloFuzzOne_ListNgdotFront
// *NgoloFuzzOne_ListNgdotBack
// *NgoloFuzzOne_ListNgdotRemove
// *NgoloFuzzOne_ListNgdotPushFront
// *NgoloFuzzOne_ListNgdotPushBack
// *NgoloFuzzOne_ListNgdotInsertBefore
// *NgoloFuzzOne_ListNgdotInsertAfter
// *NgoloFuzzOne_ListNgdotMoveToFront
// *NgoloFuzzOne_ListNgdotMoveToBack
// *NgoloFuzzOne_ListNgdotMoveBefore
// *NgoloFuzzOne_ListNgdotMoveAfter
// *NgoloFuzzOne_ListNgdotPushBackList
// *NgoloFuzzOne_ListNgdotPushFrontList
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetElementNgdotNext() *ElementNgdotNextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ElementNgdotNext); ok {
return x.ElementNgdotNext
}
}
return nil
}
func (x *NgoloFuzzOne) GetElementNgdotPrev() *ElementNgdotPrevArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ElementNgdotPrev); ok {
return x.ElementNgdotPrev
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotInit() *ListNgdotInitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotInit); ok {
return x.ListNgdotInit
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotLen() *ListNgdotLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotLen); ok {
return x.ListNgdotLen
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotFront() *ListNgdotFrontArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotFront); ok {
return x.ListNgdotFront
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotBack() *ListNgdotBackArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotBack); ok {
return x.ListNgdotBack
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotRemove() *ListNgdotRemoveArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotRemove); ok {
return x.ListNgdotRemove
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotPushFront() *ListNgdotPushFrontArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotPushFront); ok {
return x.ListNgdotPushFront
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotPushBack() *ListNgdotPushBackArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotPushBack); ok {
return x.ListNgdotPushBack
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotInsertBefore() *ListNgdotInsertBeforeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotInsertBefore); ok {
return x.ListNgdotInsertBefore
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotInsertAfter() *ListNgdotInsertAfterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotInsertAfter); ok {
return x.ListNgdotInsertAfter
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotMoveToFront() *ListNgdotMoveToFrontArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotMoveToFront); ok {
return x.ListNgdotMoveToFront
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotMoveToBack() *ListNgdotMoveToBackArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotMoveToBack); ok {
return x.ListNgdotMoveToBack
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotMoveBefore() *ListNgdotMoveBeforeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotMoveBefore); ok {
return x.ListNgdotMoveBefore
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotMoveAfter() *ListNgdotMoveAfterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotMoveAfter); ok {
return x.ListNgdotMoveAfter
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotPushBackList() *ListNgdotPushBackListArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotPushBackList); ok {
return x.ListNgdotPushBackList
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNgdotPushFrontList() *ListNgdotPushFrontListArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNgdotPushFrontList); ok {
return x.ListNgdotPushFrontList
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_ElementNgdotNext struct {
ElementNgdotNext *ElementNgdotNextArgs `protobuf:"bytes,1,opt,name=ElementNgdotNext,proto3,oneof"`
}
type NgoloFuzzOne_ElementNgdotPrev struct {
ElementNgdotPrev *ElementNgdotPrevArgs `protobuf:"bytes,2,opt,name=ElementNgdotPrev,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotInit struct {
ListNgdotInit *ListNgdotInitArgs `protobuf:"bytes,3,opt,name=ListNgdotInit,proto3,oneof"`
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,4,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotLen struct {
ListNgdotLen *ListNgdotLenArgs `protobuf:"bytes,5,opt,name=ListNgdotLen,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotFront struct {
ListNgdotFront *ListNgdotFrontArgs `protobuf:"bytes,6,opt,name=ListNgdotFront,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotBack struct {
ListNgdotBack *ListNgdotBackArgs `protobuf:"bytes,7,opt,name=ListNgdotBack,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotRemove struct {
ListNgdotRemove *ListNgdotRemoveArgs `protobuf:"bytes,8,opt,name=ListNgdotRemove,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotPushFront struct {
ListNgdotPushFront *ListNgdotPushFrontArgs `protobuf:"bytes,9,opt,name=ListNgdotPushFront,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotPushBack struct {
ListNgdotPushBack *ListNgdotPushBackArgs `protobuf:"bytes,10,opt,name=ListNgdotPushBack,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotInsertBefore struct {
ListNgdotInsertBefore *ListNgdotInsertBeforeArgs `protobuf:"bytes,11,opt,name=ListNgdotInsertBefore,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotInsertAfter struct {
ListNgdotInsertAfter *ListNgdotInsertAfterArgs `protobuf:"bytes,12,opt,name=ListNgdotInsertAfter,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotMoveToFront struct {
ListNgdotMoveToFront *ListNgdotMoveToFrontArgs `protobuf:"bytes,13,opt,name=ListNgdotMoveToFront,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotMoveToBack struct {
ListNgdotMoveToBack *ListNgdotMoveToBackArgs `protobuf:"bytes,14,opt,name=ListNgdotMoveToBack,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotMoveBefore struct {
ListNgdotMoveBefore *ListNgdotMoveBeforeArgs `protobuf:"bytes,15,opt,name=ListNgdotMoveBefore,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotMoveAfter struct {
ListNgdotMoveAfter *ListNgdotMoveAfterArgs `protobuf:"bytes,16,opt,name=ListNgdotMoveAfter,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotPushBackList struct {
ListNgdotPushBackList *ListNgdotPushBackListArgs `protobuf:"bytes,17,opt,name=ListNgdotPushBackList,proto3,oneof"`
}
type NgoloFuzzOne_ListNgdotPushFrontList struct {
ListNgdotPushFrontList *ListNgdotPushFrontListArgs `protobuf:"bytes,18,opt,name=ListNgdotPushFrontList,proto3,oneof"`
}
func (*NgoloFuzzOne_ElementNgdotNext) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ElementNgdotPrev) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotInit) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotLen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotFront) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotBack) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotRemove) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotPushFront) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotPushBack) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotInsertBefore) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotInsertAfter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotMoveToFront) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotMoveToBack) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotMoveBefore) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotMoveAfter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotPushBackList) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNgdotPushFrontList) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x16\n" +
"\x14ElementNgdotNextArgs\"\x16\n" +
"\x14ElementNgdotPrevArgs\"\x13\n" +
"\x11ListNgdotInitArgs\"\t\n" +
"\aNewArgs\"\x12\n" +
"\x10ListNgdotLenArgs\"\x14\n" +
"\x12ListNgdotFrontArgs\"\x13\n" +
"\x11ListNgdotBackArgs\"\x15\n" +
"\x13ListNgdotRemoveArgs\"?\n" +
"\x16ListNgdotPushFrontArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\">\n" +
"\x15ListNgdotPushBackArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"B\n" +
"\x19ListNgdotInsertBeforeArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"A\n" +
"\x18ListNgdotInsertAfterArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"\x1a\n" +
"\x18ListNgdotMoveToFrontArgs\"\x19\n" +
"\x17ListNgdotMoveToBackArgs\"\x19\n" +
"\x17ListNgdotMoveBeforeArgs\"\x18\n" +
"\x16ListNgdotMoveAfterArgs\"\x1b\n" +
"\x19ListNgdotPushBackListArgs\"\x1c\n" +
"\x1aListNgdotPushFrontListArgs\"\xbf\v\n" +
"\fNgoloFuzzOne\x12M\n" +
"\x10ElementNgdotNext\x18\x01 \x01(\v2\x1f.ngolofuzz.ElementNgdotNextArgsH\x00R\x10ElementNgdotNext\x12M\n" +
"\x10ElementNgdotPrev\x18\x02 \x01(\v2\x1f.ngolofuzz.ElementNgdotPrevArgsH\x00R\x10ElementNgdotPrev\x12D\n" +
"\rListNgdotInit\x18\x03 \x01(\v2\x1c.ngolofuzz.ListNgdotInitArgsH\x00R\rListNgdotInit\x12&\n" +
"\x03New\x18\x04 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x12A\n" +
"\fListNgdotLen\x18\x05 \x01(\v2\x1b.ngolofuzz.ListNgdotLenArgsH\x00R\fListNgdotLen\x12G\n" +
"\x0eListNgdotFront\x18\x06 \x01(\v2\x1d.ngolofuzz.ListNgdotFrontArgsH\x00R\x0eListNgdotFront\x12D\n" +
"\rListNgdotBack\x18\a \x01(\v2\x1c.ngolofuzz.ListNgdotBackArgsH\x00R\rListNgdotBack\x12J\n" +
"\x0fListNgdotRemove\x18\b \x01(\v2\x1e.ngolofuzz.ListNgdotRemoveArgsH\x00R\x0fListNgdotRemove\x12S\n" +
"\x12ListNgdotPushFront\x18\t \x01(\v2!.ngolofuzz.ListNgdotPushFrontArgsH\x00R\x12ListNgdotPushFront\x12P\n" +
"\x11ListNgdotPushBack\x18\n" +
" \x01(\v2 .ngolofuzz.ListNgdotPushBackArgsH\x00R\x11ListNgdotPushBack\x12\\\n" +
"\x15ListNgdotInsertBefore\x18\v \x01(\v2$.ngolofuzz.ListNgdotInsertBeforeArgsH\x00R\x15ListNgdotInsertBefore\x12Y\n" +
"\x14ListNgdotInsertAfter\x18\f \x01(\v2#.ngolofuzz.ListNgdotInsertAfterArgsH\x00R\x14ListNgdotInsertAfter\x12Y\n" +
"\x14ListNgdotMoveToFront\x18\r \x01(\v2#.ngolofuzz.ListNgdotMoveToFrontArgsH\x00R\x14ListNgdotMoveToFront\x12V\n" +
"\x13ListNgdotMoveToBack\x18\x0e \x01(\v2\".ngolofuzz.ListNgdotMoveToBackArgsH\x00R\x13ListNgdotMoveToBack\x12V\n" +
"\x13ListNgdotMoveBefore\x18\x0f \x01(\v2\".ngolofuzz.ListNgdotMoveBeforeArgsH\x00R\x13ListNgdotMoveBefore\x12S\n" +
"\x12ListNgdotMoveAfter\x18\x10 \x01(\v2!.ngolofuzz.ListNgdotMoveAfterArgsH\x00R\x12ListNgdotMoveAfter\x12\\\n" +
"\x15ListNgdotPushBackList\x18\x11 \x01(\v2$.ngolofuzz.ListNgdotPushBackListArgsH\x00R\x15ListNgdotPushBackList\x12_\n" +
"\x16ListNgdotPushFrontList\x18\x12 \x01(\v2%.ngolofuzz.ListNgdotPushFrontListArgsH\x00R\x16ListNgdotPushFrontListB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1bZ\x19./;fuzz_ng_container_listb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
var file_ngolofuzz_proto_goTypes = []any{
(*ElementNgdotNextArgs)(nil), // 0: ngolofuzz.ElementNgdotNextArgs
(*ElementNgdotPrevArgs)(nil), // 1: ngolofuzz.ElementNgdotPrevArgs
(*ListNgdotInitArgs)(nil), // 2: ngolofuzz.ListNgdotInitArgs
(*NewArgs)(nil), // 3: ngolofuzz.NewArgs
(*ListNgdotLenArgs)(nil), // 4: ngolofuzz.ListNgdotLenArgs
(*ListNgdotFrontArgs)(nil), // 5: ngolofuzz.ListNgdotFrontArgs
(*ListNgdotBackArgs)(nil), // 6: ngolofuzz.ListNgdotBackArgs
(*ListNgdotRemoveArgs)(nil), // 7: ngolofuzz.ListNgdotRemoveArgs
(*ListNgdotPushFrontArgs)(nil), // 8: ngolofuzz.ListNgdotPushFrontArgs
(*ListNgdotPushBackArgs)(nil), // 9: ngolofuzz.ListNgdotPushBackArgs
(*ListNgdotInsertBeforeArgs)(nil), // 10: ngolofuzz.ListNgdotInsertBeforeArgs
(*ListNgdotInsertAfterArgs)(nil), // 11: ngolofuzz.ListNgdotInsertAfterArgs
(*ListNgdotMoveToFrontArgs)(nil), // 12: ngolofuzz.ListNgdotMoveToFrontArgs
(*ListNgdotMoveToBackArgs)(nil), // 13: ngolofuzz.ListNgdotMoveToBackArgs
(*ListNgdotMoveBeforeArgs)(nil), // 14: ngolofuzz.ListNgdotMoveBeforeArgs
(*ListNgdotMoveAfterArgs)(nil), // 15: ngolofuzz.ListNgdotMoveAfterArgs
(*ListNgdotPushBackListArgs)(nil), // 16: ngolofuzz.ListNgdotPushBackListArgs
(*ListNgdotPushFrontListArgs)(nil), // 17: ngolofuzz.ListNgdotPushFrontListArgs
(*NgoloFuzzOne)(nil), // 18: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 19: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 20: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
19, // 0: ngolofuzz.ListNgdotPushFrontArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
19, // 1: ngolofuzz.ListNgdotPushBackArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
19, // 2: ngolofuzz.ListNgdotInsertBeforeArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
19, // 3: ngolofuzz.ListNgdotInsertAfterArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
0, // 4: ngolofuzz.NgoloFuzzOne.ElementNgdotNext:type_name -> ngolofuzz.ElementNgdotNextArgs
1, // 5: ngolofuzz.NgoloFuzzOne.ElementNgdotPrev:type_name -> ngolofuzz.ElementNgdotPrevArgs
2, // 6: ngolofuzz.NgoloFuzzOne.ListNgdotInit:type_name -> ngolofuzz.ListNgdotInitArgs
3, // 7: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
4, // 8: ngolofuzz.NgoloFuzzOne.ListNgdotLen:type_name -> ngolofuzz.ListNgdotLenArgs
5, // 9: ngolofuzz.NgoloFuzzOne.ListNgdotFront:type_name -> ngolofuzz.ListNgdotFrontArgs
6, // 10: ngolofuzz.NgoloFuzzOne.ListNgdotBack:type_name -> ngolofuzz.ListNgdotBackArgs
7, // 11: ngolofuzz.NgoloFuzzOne.ListNgdotRemove:type_name -> ngolofuzz.ListNgdotRemoveArgs
8, // 12: ngolofuzz.NgoloFuzzOne.ListNgdotPushFront:type_name -> ngolofuzz.ListNgdotPushFrontArgs
9, // 13: ngolofuzz.NgoloFuzzOne.ListNgdotPushBack:type_name -> ngolofuzz.ListNgdotPushBackArgs
10, // 14: ngolofuzz.NgoloFuzzOne.ListNgdotInsertBefore:type_name -> ngolofuzz.ListNgdotInsertBeforeArgs
11, // 15: ngolofuzz.NgoloFuzzOne.ListNgdotInsertAfter:type_name -> ngolofuzz.ListNgdotInsertAfterArgs
12, // 16: ngolofuzz.NgoloFuzzOne.ListNgdotMoveToFront:type_name -> ngolofuzz.ListNgdotMoveToFrontArgs
13, // 17: ngolofuzz.NgoloFuzzOne.ListNgdotMoveToBack:type_name -> ngolofuzz.ListNgdotMoveToBackArgs
14, // 18: ngolofuzz.NgoloFuzzOne.ListNgdotMoveBefore:type_name -> ngolofuzz.ListNgdotMoveBeforeArgs
15, // 19: ngolofuzz.NgoloFuzzOne.ListNgdotMoveAfter:type_name -> ngolofuzz.ListNgdotMoveAfterArgs
16, // 20: ngolofuzz.NgoloFuzzOne.ListNgdotPushBackList:type_name -> ngolofuzz.ListNgdotPushBackListArgs
17, // 21: ngolofuzz.NgoloFuzzOne.ListNgdotPushFrontList:type_name -> ngolofuzz.ListNgdotPushFrontListArgs
18, // 22: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
23, // [23:23] is the sub-list for method output_type
23, // [23:23] is the sub-list for method input_type
23, // [23:23] is the sub-list for extension type_name
23, // [23:23] is the sub-list for extension extendee
0, // [0:23] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[18].OneofWrappers = []any{
(*NgoloFuzzOne_ElementNgdotNext)(nil),
(*NgoloFuzzOne_ElementNgdotPrev)(nil),
(*NgoloFuzzOne_ListNgdotInit)(nil),
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_ListNgdotLen)(nil),
(*NgoloFuzzOne_ListNgdotFront)(nil),
(*NgoloFuzzOne_ListNgdotBack)(nil),
(*NgoloFuzzOne_ListNgdotRemove)(nil),
(*NgoloFuzzOne_ListNgdotPushFront)(nil),
(*NgoloFuzzOne_ListNgdotPushBack)(nil),
(*NgoloFuzzOne_ListNgdotInsertBefore)(nil),
(*NgoloFuzzOne_ListNgdotInsertAfter)(nil),
(*NgoloFuzzOne_ListNgdotMoveToFront)(nil),
(*NgoloFuzzOne_ListNgdotMoveToBack)(nil),
(*NgoloFuzzOne_ListNgdotMoveBefore)(nil),
(*NgoloFuzzOne_ListNgdotMoveAfter)(nil),
(*NgoloFuzzOne_ListNgdotPushBackList)(nil),
(*NgoloFuzzOne_ListNgdotPushFrontList)(nil),
}
file_ngolofuzz_proto_msgTypes[19].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 21,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_container_ring
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"container/ring"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var RingResults []*ring.Ring
RingResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_RingNgdotNext:
if len(RingResults) == 0 {
continue
}
arg0 := RingResults[RingResultsIndex]
RingResultsIndex = (RingResultsIndex + 1) % len(RingResults)
r0 := arg0.Next()
if r0 != nil{
RingResults = append(RingResults, r0)
}
case *NgoloFuzzOne_RingNgdotPrev:
if len(RingResults) == 0 {
continue
}
arg0 := RingResults[RingResultsIndex]
RingResultsIndex = (RingResultsIndex + 1) % len(RingResults)
r0 := arg0.Prev()
if r0 != nil{
RingResults = append(RingResults, r0)
}
case *NgoloFuzzOne_RingNgdotMove:
if len(RingResults) == 0 {
continue
}
arg0 := RingResults[RingResultsIndex]
RingResultsIndex = (RingResultsIndex + 1) % len(RingResults)
arg1 := int(a.RingNgdotMove.N)
r0 := arg0.Move(arg1 % 0x10001)
if r0 != nil{
RingResults = append(RingResults, r0)
}
case *NgoloFuzzOne_New:
arg0 := int(a.New.N)
r0 := ring.New(arg0 % 0x10001)
if r0 != nil{
RingResults = append(RingResults, r0)
}
case *NgoloFuzzOne_RingNgdotLink:
if len(RingResults) == 0 {
continue
}
arg0 := RingResults[RingResultsIndex]
RingResultsIndex = (RingResultsIndex + 1) % len(RingResults)
if len(RingResults) == 0 {
continue
}
arg1 := RingResults[RingResultsIndex]
RingResultsIndex = (RingResultsIndex + 1) % len(RingResults)
r0 := arg0.Link(arg1)
if r0 != nil{
RingResults = append(RingResults, r0)
}
case *NgoloFuzzOne_RingNgdotUnlink:
if len(RingResults) == 0 {
continue
}
arg0 := RingResults[RingResultsIndex]
RingResultsIndex = (RingResultsIndex + 1) % len(RingResults)
arg1 := int(a.RingNgdotUnlink.N)
r0 := arg0.Unlink(arg1 % 0x10001)
if r0 != nil{
RingResults = append(RingResults, r0)
}
case *NgoloFuzzOne_RingNgdotLen:
if len(RingResults) == 0 {
continue
}
arg0 := RingResults[RingResultsIndex]
RingResultsIndex = (RingResultsIndex + 1) % len(RingResults)
arg0.Len()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
RingNb := 0
RingResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_RingNgdotNext:
if RingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Ring%d := Ring%d.Next()\n", RingNb, RingResultsIndex))
RingNb = RingNb + 1
RingResultsIndex = (RingResultsIndex + 1) % RingNb
case *NgoloFuzzOne_RingNgdotPrev:
if RingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Ring%d := Ring%d.Prev()\n", RingNb, RingResultsIndex))
RingNb = RingNb + 1
RingResultsIndex = (RingResultsIndex + 1) % RingNb
case *NgoloFuzzOne_RingNgdotMove:
if RingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Ring%d := Ring%d.Move(int(%#+v) %% 0x10001)\n", RingNb, RingResultsIndex, a.RingNgdotMove.N))
RingNb = RingNb + 1
RingResultsIndex = (RingResultsIndex + 1) % RingNb
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("Ring%d := ring.New(int(%#+v) %% 0x10001)\n", RingNb, a.New.N))
RingNb = RingNb + 1
case *NgoloFuzzOne_RingNgdotLink:
if RingNb == 0 {
continue
}
if RingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Ring%d := Ring%d.Link(Ring%d)\n", RingNb, RingResultsIndex, (RingResultsIndex + 1) % RingNb))
RingNb = RingNb + 1
RingResultsIndex = (RingResultsIndex + 1) % RingNb
RingResultsIndex = (RingResultsIndex + 1) % RingNb
case *NgoloFuzzOne_RingNgdotUnlink:
if RingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Ring%d := Ring%d.Unlink(int(%#+v) %% 0x10001)\n", RingNb, RingResultsIndex, a.RingNgdotUnlink.N))
RingNb = RingNb + 1
RingResultsIndex = (RingResultsIndex + 1) % RingNb
case *NgoloFuzzOne_RingNgdotLen:
if RingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Ring%d.Len()\n", RingResultsIndex))
RingResultsIndex = (RingResultsIndex + 1) % RingNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_container_ring
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type RingNgdotNextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RingNgdotNextArgs) Reset() {
*x = RingNgdotNextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RingNgdotNextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RingNgdotNextArgs) ProtoMessage() {}
func (x *RingNgdotNextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RingNgdotNextArgs.ProtoReflect.Descriptor instead.
func (*RingNgdotNextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type RingNgdotPrevArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RingNgdotPrevArgs) Reset() {
*x = RingNgdotPrevArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RingNgdotPrevArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RingNgdotPrevArgs) ProtoMessage() {}
func (x *RingNgdotPrevArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RingNgdotPrevArgs.ProtoReflect.Descriptor instead.
func (*RingNgdotPrevArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type RingNgdotMoveArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RingNgdotMoveArgs) Reset() {
*x = RingNgdotMoveArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RingNgdotMoveArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RingNgdotMoveArgs) ProtoMessage() {}
func (x *RingNgdotMoveArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RingNgdotMoveArgs.ProtoReflect.Descriptor instead.
func (*RingNgdotMoveArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *RingNgdotMoveArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NewArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type RingNgdotLinkArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RingNgdotLinkArgs) Reset() {
*x = RingNgdotLinkArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RingNgdotLinkArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RingNgdotLinkArgs) ProtoMessage() {}
func (x *RingNgdotLinkArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RingNgdotLinkArgs.ProtoReflect.Descriptor instead.
func (*RingNgdotLinkArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type RingNgdotUnlinkArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RingNgdotUnlinkArgs) Reset() {
*x = RingNgdotUnlinkArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RingNgdotUnlinkArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RingNgdotUnlinkArgs) ProtoMessage() {}
func (x *RingNgdotUnlinkArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RingNgdotUnlinkArgs.ProtoReflect.Descriptor instead.
func (*RingNgdotUnlinkArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *RingNgdotUnlinkArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type RingNgdotLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RingNgdotLenArgs) Reset() {
*x = RingNgdotLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RingNgdotLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RingNgdotLenArgs) ProtoMessage() {}
func (x *RingNgdotLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RingNgdotLenArgs.ProtoReflect.Descriptor instead.
func (*RingNgdotLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_RingNgdotNext
// *NgoloFuzzOne_RingNgdotPrev
// *NgoloFuzzOne_RingNgdotMove
// *NgoloFuzzOne_New
// *NgoloFuzzOne_RingNgdotLink
// *NgoloFuzzOne_RingNgdotUnlink
// *NgoloFuzzOne_RingNgdotLen
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetRingNgdotNext() *RingNgdotNextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RingNgdotNext); ok {
return x.RingNgdotNext
}
}
return nil
}
func (x *NgoloFuzzOne) GetRingNgdotPrev() *RingNgdotPrevArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RingNgdotPrev); ok {
return x.RingNgdotPrev
}
}
return nil
}
func (x *NgoloFuzzOne) GetRingNgdotMove() *RingNgdotMoveArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RingNgdotMove); ok {
return x.RingNgdotMove
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetRingNgdotLink() *RingNgdotLinkArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RingNgdotLink); ok {
return x.RingNgdotLink
}
}
return nil
}
func (x *NgoloFuzzOne) GetRingNgdotUnlink() *RingNgdotUnlinkArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RingNgdotUnlink); ok {
return x.RingNgdotUnlink
}
}
return nil
}
func (x *NgoloFuzzOne) GetRingNgdotLen() *RingNgdotLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RingNgdotLen); ok {
return x.RingNgdotLen
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_RingNgdotNext struct {
RingNgdotNext *RingNgdotNextArgs `protobuf:"bytes,1,opt,name=RingNgdotNext,proto3,oneof"`
}
type NgoloFuzzOne_RingNgdotPrev struct {
RingNgdotPrev *RingNgdotPrevArgs `protobuf:"bytes,2,opt,name=RingNgdotPrev,proto3,oneof"`
}
type NgoloFuzzOne_RingNgdotMove struct {
RingNgdotMove *RingNgdotMoveArgs `protobuf:"bytes,3,opt,name=RingNgdotMove,proto3,oneof"`
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,4,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_RingNgdotLink struct {
RingNgdotLink *RingNgdotLinkArgs `protobuf:"bytes,5,opt,name=RingNgdotLink,proto3,oneof"`
}
type NgoloFuzzOne_RingNgdotUnlink struct {
RingNgdotUnlink *RingNgdotUnlinkArgs `protobuf:"bytes,6,opt,name=RingNgdotUnlink,proto3,oneof"`
}
type NgoloFuzzOne_RingNgdotLen struct {
RingNgdotLen *RingNgdotLenArgs `protobuf:"bytes,7,opt,name=RingNgdotLen,proto3,oneof"`
}
func (*NgoloFuzzOne_RingNgdotNext) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RingNgdotPrev) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RingNgdotMove) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RingNgdotLink) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RingNgdotUnlink) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RingNgdotLen) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x13\n" +
"\x11RingNgdotNextArgs\"\x13\n" +
"\x11RingNgdotPrevArgs\"!\n" +
"\x11RingNgdotMoveArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"\x17\n" +
"\aNewArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"\x13\n" +
"\x11RingNgdotLinkArgs\"#\n" +
"\x13RingNgdotUnlinkArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"\x12\n" +
"\x10RingNgdotLenArgs\"\xe5\x03\n" +
"\fNgoloFuzzOne\x12D\n" +
"\rRingNgdotNext\x18\x01 \x01(\v2\x1c.ngolofuzz.RingNgdotNextArgsH\x00R\rRingNgdotNext\x12D\n" +
"\rRingNgdotPrev\x18\x02 \x01(\v2\x1c.ngolofuzz.RingNgdotPrevArgsH\x00R\rRingNgdotPrev\x12D\n" +
"\rRingNgdotMove\x18\x03 \x01(\v2\x1c.ngolofuzz.RingNgdotMoveArgsH\x00R\rRingNgdotMove\x12&\n" +
"\x03New\x18\x04 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x12D\n" +
"\rRingNgdotLink\x18\x05 \x01(\v2\x1c.ngolofuzz.RingNgdotLinkArgsH\x00R\rRingNgdotLink\x12J\n" +
"\x0fRingNgdotUnlink\x18\x06 \x01(\v2\x1e.ngolofuzz.RingNgdotUnlinkArgsH\x00R\x0fRingNgdotUnlink\x12A\n" +
"\fRingNgdotLen\x18\a \x01(\v2\x1b.ngolofuzz.RingNgdotLenArgsH\x00R\fRingNgdotLenB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1bZ\x19./;fuzz_ng_container_ringb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
var file_ngolofuzz_proto_goTypes = []any{
(*RingNgdotNextArgs)(nil), // 0: ngolofuzz.RingNgdotNextArgs
(*RingNgdotPrevArgs)(nil), // 1: ngolofuzz.RingNgdotPrevArgs
(*RingNgdotMoveArgs)(nil), // 2: ngolofuzz.RingNgdotMoveArgs
(*NewArgs)(nil), // 3: ngolofuzz.NewArgs
(*RingNgdotLinkArgs)(nil), // 4: ngolofuzz.RingNgdotLinkArgs
(*RingNgdotUnlinkArgs)(nil), // 5: ngolofuzz.RingNgdotUnlinkArgs
(*RingNgdotLenArgs)(nil), // 6: ngolofuzz.RingNgdotLenArgs
(*NgoloFuzzOne)(nil), // 7: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 8: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 9: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.RingNgdotNext:type_name -> ngolofuzz.RingNgdotNextArgs
1, // 1: ngolofuzz.NgoloFuzzOne.RingNgdotPrev:type_name -> ngolofuzz.RingNgdotPrevArgs
2, // 2: ngolofuzz.NgoloFuzzOne.RingNgdotMove:type_name -> ngolofuzz.RingNgdotMoveArgs
3, // 3: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
4, // 4: ngolofuzz.NgoloFuzzOne.RingNgdotLink:type_name -> ngolofuzz.RingNgdotLinkArgs
5, // 5: ngolofuzz.NgoloFuzzOne.RingNgdotUnlink:type_name -> ngolofuzz.RingNgdotUnlinkArgs
6, // 6: ngolofuzz.NgoloFuzzOne.RingNgdotLen:type_name -> ngolofuzz.RingNgdotLenArgs
7, // 7: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
8, // [8:8] is the sub-list for method output_type
8, // [8:8] is the sub-list for method input_type
8, // [8:8] is the sub-list for extension type_name
8, // [8:8] is the sub-list for extension extendee
0, // [0:8] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[7].OneofWrappers = []any{
(*NgoloFuzzOne_RingNgdotNext)(nil),
(*NgoloFuzzOne_RingNgdotPrev)(nil),
(*NgoloFuzzOne_RingNgdotMove)(nil),
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_RingNgdotLink)(nil),
(*NgoloFuzzOne_RingNgdotUnlink)(nil),
(*NgoloFuzzOne_RingNgdotLen)(nil),
}
file_ngolofuzz_proto_msgTypes[8].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 10,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_context
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"context"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ContextResults []*context.Context
ContextResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Background:
r0 := context.Background()
ContextResults = append(ContextResults, &r0)
case *NgoloFuzzOne_TODO:
r0 := context.TODO()
ContextResults = append(ContextResults, &r0)
case *NgoloFuzzOne_WithCancel:
if len(ContextResults) == 0 {
continue
}
arg0 := *ContextResults[ContextResultsIndex]
ContextResultsIndex = (ContextResultsIndex + 1) % len(ContextResults)
r0, _ := context.WithCancel(arg0)
ContextResults = append(ContextResults, &r0)
case *NgoloFuzzOne_WithCancelCause:
if len(ContextResults) == 0 {
continue
}
arg0 := *ContextResults[ContextResultsIndex]
ContextResultsIndex = (ContextResultsIndex + 1) % len(ContextResults)
r0, _ := context.WithCancelCause(arg0)
ContextResults = append(ContextResults, &r0)
case *NgoloFuzzOne_Cause:
if len(ContextResults) == 0 {
continue
}
arg0 := *ContextResults[ContextResultsIndex]
ContextResultsIndex = (ContextResultsIndex + 1) % len(ContextResults)
r0 := context.Cause(arg0)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WithoutCancel:
if len(ContextResults) == 0 {
continue
}
arg0 := *ContextResults[ContextResultsIndex]
ContextResultsIndex = (ContextResultsIndex + 1) % len(ContextResults)
r0 := context.WithoutCancel(arg0)
ContextResults = append(ContextResults, &r0)
case *NgoloFuzzOne_WithValue:
if len(ContextResults) == 0 {
continue
}
arg0 := *ContextResults[ContextResultsIndex]
ContextResultsIndex = (ContextResultsIndex + 1) % len(ContextResults)
r0 := context.WithValue(arg0, a.WithValue.Key, a.WithValue.Val)
ContextResults = append(ContextResults, &r0)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ContextNb := 0
ContextResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Background:
w.WriteString(fmt.Sprintf("Context%d := context.Background()\n", ContextNb))
ContextNb = ContextNb + 1
case *NgoloFuzzOne_TODO:
w.WriteString(fmt.Sprintf("Context%d := context.TODO()\n", ContextNb))
ContextNb = ContextNb + 1
case *NgoloFuzzOne_WithCancel:
if ContextNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Context%d, _ := context.WithCancel(Context%d)\n", ContextNb, (ContextResultsIndex + 0) % ContextNb))
ContextNb = ContextNb + 1
ContextResultsIndex = (ContextResultsIndex + 1) % ContextNb
case *NgoloFuzzOne_WithCancelCause:
if ContextNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Context%d, _ := context.WithCancelCause(Context%d)\n", ContextNb, (ContextResultsIndex + 0) % ContextNb))
ContextNb = ContextNb + 1
ContextResultsIndex = (ContextResultsIndex + 1) % ContextNb
case *NgoloFuzzOne_Cause:
if ContextNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("context.Cause(Context%d)\n", (ContextResultsIndex + 0) % ContextNb))
ContextResultsIndex = (ContextResultsIndex + 1) % ContextNb
case *NgoloFuzzOne_WithoutCancel:
if ContextNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Context%d := context.WithoutCancel(Context%d)\n", ContextNb, (ContextResultsIndex + 0) % ContextNb))
ContextNb = ContextNb + 1
ContextResultsIndex = (ContextResultsIndex + 1) % ContextNb
case *NgoloFuzzOne_WithValue:
if ContextNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Context%d := context.WithValue(Context%d, %#+v, %#+v)\n", ContextNb, (ContextResultsIndex + 0) % ContextNb, a.WithValue.Key, a.WithValue.Val))
ContextNb = ContextNb + 1
ContextResultsIndex = (ContextResultsIndex + 1) % ContextNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_context
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type BackgroundArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BackgroundArgs) Reset() {
*x = BackgroundArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BackgroundArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BackgroundArgs) ProtoMessage() {}
func (x *BackgroundArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BackgroundArgs.ProtoReflect.Descriptor instead.
func (*BackgroundArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type TODOArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TODOArgs) Reset() {
*x = TODOArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TODOArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TODOArgs) ProtoMessage() {}
func (x *TODOArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TODOArgs.ProtoReflect.Descriptor instead.
func (*TODOArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type WithCancelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WithCancelArgs) Reset() {
*x = WithCancelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WithCancelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WithCancelArgs) ProtoMessage() {}
func (x *WithCancelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WithCancelArgs.ProtoReflect.Descriptor instead.
func (*WithCancelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type WithCancelCauseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WithCancelCauseArgs) Reset() {
*x = WithCancelCauseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WithCancelCauseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WithCancelCauseArgs) ProtoMessage() {}
func (x *WithCancelCauseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WithCancelCauseArgs.ProtoReflect.Descriptor instead.
func (*WithCancelCauseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type CauseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CauseArgs) Reset() {
*x = CauseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CauseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CauseArgs) ProtoMessage() {}
func (x *CauseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CauseArgs.ProtoReflect.Descriptor instead.
func (*CauseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type WithoutCancelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WithoutCancelArgs) Reset() {
*x = WithoutCancelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WithoutCancelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WithoutCancelArgs) ProtoMessage() {}
func (x *WithoutCancelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WithoutCancelArgs.ProtoReflect.Descriptor instead.
func (*WithoutCancelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type WithValueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key *NgoloFuzzAny `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Val *NgoloFuzzAny `protobuf:"bytes,2,opt,name=val,proto3" json:"val,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WithValueArgs) Reset() {
*x = WithValueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WithValueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WithValueArgs) ProtoMessage() {}
func (x *WithValueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WithValueArgs.ProtoReflect.Descriptor instead.
func (*WithValueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *WithValueArgs) GetKey() *NgoloFuzzAny {
if x != nil {
return x.Key
}
return nil
}
func (x *WithValueArgs) GetVal() *NgoloFuzzAny {
if x != nil {
return x.Val
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Background
// *NgoloFuzzOne_TODO
// *NgoloFuzzOne_WithCancel
// *NgoloFuzzOne_WithCancelCause
// *NgoloFuzzOne_Cause
// *NgoloFuzzOne_WithoutCancel
// *NgoloFuzzOne_WithValue
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetBackground() *BackgroundArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Background); ok {
return x.Background
}
}
return nil
}
func (x *NgoloFuzzOne) GetTODO() *TODOArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TODO); ok {
return x.TODO
}
}
return nil
}
func (x *NgoloFuzzOne) GetWithCancel() *WithCancelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WithCancel); ok {
return x.WithCancel
}
}
return nil
}
func (x *NgoloFuzzOne) GetWithCancelCause() *WithCancelCauseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WithCancelCause); ok {
return x.WithCancelCause
}
}
return nil
}
func (x *NgoloFuzzOne) GetCause() *CauseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Cause); ok {
return x.Cause
}
}
return nil
}
func (x *NgoloFuzzOne) GetWithoutCancel() *WithoutCancelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WithoutCancel); ok {
return x.WithoutCancel
}
}
return nil
}
func (x *NgoloFuzzOne) GetWithValue() *WithValueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WithValue); ok {
return x.WithValue
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Background struct {
Background *BackgroundArgs `protobuf:"bytes,1,opt,name=Background,proto3,oneof"`
}
type NgoloFuzzOne_TODO struct {
TODO *TODOArgs `protobuf:"bytes,2,opt,name=TODO,proto3,oneof"`
}
type NgoloFuzzOne_WithCancel struct {
WithCancel *WithCancelArgs `protobuf:"bytes,3,opt,name=WithCancel,proto3,oneof"`
}
type NgoloFuzzOne_WithCancelCause struct {
WithCancelCause *WithCancelCauseArgs `protobuf:"bytes,4,opt,name=WithCancelCause,proto3,oneof"`
}
type NgoloFuzzOne_Cause struct {
Cause *CauseArgs `protobuf:"bytes,5,opt,name=Cause,proto3,oneof"`
}
type NgoloFuzzOne_WithoutCancel struct {
WithoutCancel *WithoutCancelArgs `protobuf:"bytes,6,opt,name=WithoutCancel,proto3,oneof"`
}
type NgoloFuzzOne_WithValue struct {
WithValue *WithValueArgs `protobuf:"bytes,7,opt,name=WithValue,proto3,oneof"`
}
func (*NgoloFuzzOne_Background) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TODO) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WithCancel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WithCancelCause) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Cause) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WithoutCancel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WithValue) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x10\n" +
"\x0eBackgroundArgs\"\n" +
"\n" +
"\bTODOArgs\"\x10\n" +
"\x0eWithCancelArgs\"\x15\n" +
"\x13WithCancelCauseArgs\"\v\n" +
"\tCauseArgs\"\x13\n" +
"\x11WithoutCancelArgs\"e\n" +
"\rWithValueArgs\x12)\n" +
"\x03key\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x03key\x12)\n" +
"\x03val\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x03val\"\xb5\x03\n" +
"\fNgoloFuzzOne\x12;\n" +
"\n" +
"Background\x18\x01 \x01(\v2\x19.ngolofuzz.BackgroundArgsH\x00R\n" +
"Background\x12)\n" +
"\x04TODO\x18\x02 \x01(\v2\x13.ngolofuzz.TODOArgsH\x00R\x04TODO\x12;\n" +
"\n" +
"WithCancel\x18\x03 \x01(\v2\x19.ngolofuzz.WithCancelArgsH\x00R\n" +
"WithCancel\x12J\n" +
"\x0fWithCancelCause\x18\x04 \x01(\v2\x1e.ngolofuzz.WithCancelCauseArgsH\x00R\x0fWithCancelCause\x12,\n" +
"\x05Cause\x18\x05 \x01(\v2\x14.ngolofuzz.CauseArgsH\x00R\x05Cause\x12D\n" +
"\rWithoutCancel\x18\x06 \x01(\v2\x1c.ngolofuzz.WithoutCancelArgsH\x00R\rWithoutCancel\x128\n" +
"\tWithValue\x18\a \x01(\v2\x18.ngolofuzz.WithValueArgsH\x00R\tWithValueB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x14Z\x12./;fuzz_ng_contextb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
var file_ngolofuzz_proto_goTypes = []any{
(*BackgroundArgs)(nil), // 0: ngolofuzz.BackgroundArgs
(*TODOArgs)(nil), // 1: ngolofuzz.TODOArgs
(*WithCancelArgs)(nil), // 2: ngolofuzz.WithCancelArgs
(*WithCancelCauseArgs)(nil), // 3: ngolofuzz.WithCancelCauseArgs
(*CauseArgs)(nil), // 4: ngolofuzz.CauseArgs
(*WithoutCancelArgs)(nil), // 5: ngolofuzz.WithoutCancelArgs
(*WithValueArgs)(nil), // 6: ngolofuzz.WithValueArgs
(*NgoloFuzzOne)(nil), // 7: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 8: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 9: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
8, // 0: ngolofuzz.WithValueArgs.key:type_name -> ngolofuzz.NgoloFuzzAny
8, // 1: ngolofuzz.WithValueArgs.val:type_name -> ngolofuzz.NgoloFuzzAny
0, // 2: ngolofuzz.NgoloFuzzOne.Background:type_name -> ngolofuzz.BackgroundArgs
1, // 3: ngolofuzz.NgoloFuzzOne.TODO:type_name -> ngolofuzz.TODOArgs
2, // 4: ngolofuzz.NgoloFuzzOne.WithCancel:type_name -> ngolofuzz.WithCancelArgs
3, // 5: ngolofuzz.NgoloFuzzOne.WithCancelCause:type_name -> ngolofuzz.WithCancelCauseArgs
4, // 6: ngolofuzz.NgoloFuzzOne.Cause:type_name -> ngolofuzz.CauseArgs
5, // 7: ngolofuzz.NgoloFuzzOne.WithoutCancel:type_name -> ngolofuzz.WithoutCancelArgs
6, // 8: ngolofuzz.NgoloFuzzOne.WithValue:type_name -> ngolofuzz.WithValueArgs
7, // 9: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
10, // [10:10] is the sub-list for method output_type
10, // [10:10] is the sub-list for method input_type
10, // [10:10] is the sub-list for extension type_name
10, // [10:10] is the sub-list for extension extendee
0, // [0:10] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[7].OneofWrappers = []any{
(*NgoloFuzzOne_Background)(nil),
(*NgoloFuzzOne_TODO)(nil),
(*NgoloFuzzOne_WithCancel)(nil),
(*NgoloFuzzOne_WithCancelCause)(nil),
(*NgoloFuzzOne_Cause)(nil),
(*NgoloFuzzOne_WithoutCancel)(nil),
(*NgoloFuzzOne_WithValue)(nil),
}
file_ngolofuzz_proto_msgTypes[8].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 10,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_aes
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/aes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewCipher:
_, r1 := aes.NewCipher(a.NewCipher.Key)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewCipher:
w.WriteString(fmt.Sprintf("aes.NewCipher(%#+v)\n", a.NewCipher.Key))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_aes
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewCipherArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewCipherArgs) Reset() {
*x = NewCipherArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewCipherArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewCipherArgs) ProtoMessage() {}
func (x *NewCipherArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewCipherArgs.ProtoReflect.Descriptor instead.
func (*NewCipherArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewCipherArgs) GetKey() []byte {
if x != nil {
return x.Key
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewCipher
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewCipher() *NewCipherArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewCipher); ok {
return x.NewCipher
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewCipher struct {
NewCipher *NewCipherArgs `protobuf:"bytes,1,opt,name=NewCipher,proto3,oneof"`
}
func (*NgoloFuzzOne_NewCipher) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"!\n" +
"\rNewCipherArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\fR\x03key\"P\n" +
"\fNgoloFuzzOne\x128\n" +
"\tNewCipher\x18\x01 \x01(\v2\x18.ngolofuzz.NewCipherArgsH\x00R\tNewCipherB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x17Z\x15./;fuzz_ng_crypto_aesb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_ngolofuzz_proto_goTypes = []any{
(*NewCipherArgs)(nil), // 0: ngolofuzz.NewCipherArgs
(*NgoloFuzzOne)(nil), // 1: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 2: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 3: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewCipher:type_name -> ngolofuzz.NewCipherArgs
1, // 1: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[1].OneofWrappers = []any{
(*NgoloFuzzOne_NewCipher)(nil),
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_des
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/des"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewCipher:
_, r1 := des.NewCipher(a.NewCipher.Key)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewTripleDESCipher:
_, r1 := des.NewTripleDESCipher(a.NewTripleDESCipher.Key)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewCipher:
w.WriteString(fmt.Sprintf("des.NewCipher(%#+v)\n", a.NewCipher.Key))
case *NgoloFuzzOne_NewTripleDESCipher:
w.WriteString(fmt.Sprintf("des.NewTripleDESCipher(%#+v)\n", a.NewTripleDESCipher.Key))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_des
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewCipherArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewCipherArgs) Reset() {
*x = NewCipherArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewCipherArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewCipherArgs) ProtoMessage() {}
func (x *NewCipherArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewCipherArgs.ProtoReflect.Descriptor instead.
func (*NewCipherArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewCipherArgs) GetKey() []byte {
if x != nil {
return x.Key
}
return nil
}
type NewTripleDESCipherArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewTripleDESCipherArgs) Reset() {
*x = NewTripleDESCipherArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewTripleDESCipherArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewTripleDESCipherArgs) ProtoMessage() {}
func (x *NewTripleDESCipherArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewTripleDESCipherArgs.ProtoReflect.Descriptor instead.
func (*NewTripleDESCipherArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NewTripleDESCipherArgs) GetKey() []byte {
if x != nil {
return x.Key
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewCipher
// *NgoloFuzzOne_NewTripleDESCipher
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewCipher() *NewCipherArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewCipher); ok {
return x.NewCipher
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewTripleDESCipher() *NewTripleDESCipherArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewTripleDESCipher); ok {
return x.NewTripleDESCipher
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewCipher struct {
NewCipher *NewCipherArgs `protobuf:"bytes,1,opt,name=NewCipher,proto3,oneof"`
}
type NgoloFuzzOne_NewTripleDESCipher struct {
NewTripleDESCipher *NewTripleDESCipherArgs `protobuf:"bytes,2,opt,name=NewTripleDESCipher,proto3,oneof"`
}
func (*NgoloFuzzOne_NewCipher) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewTripleDESCipher) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"!\n" +
"\rNewCipherArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\fR\x03key\"*\n" +
"\x16NewTripleDESCipherArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\fR\x03key\"\xa5\x01\n" +
"\fNgoloFuzzOne\x128\n" +
"\tNewCipher\x18\x01 \x01(\v2\x18.ngolofuzz.NewCipherArgsH\x00R\tNewCipher\x12S\n" +
"\x12NewTripleDESCipher\x18\x02 \x01(\v2!.ngolofuzz.NewTripleDESCipherArgsH\x00R\x12NewTripleDESCipherB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x17Z\x15./;fuzz_ng_crypto_desb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_ngolofuzz_proto_goTypes = []any{
(*NewCipherArgs)(nil), // 0: ngolofuzz.NewCipherArgs
(*NewTripleDESCipherArgs)(nil), // 1: ngolofuzz.NewTripleDESCipherArgs
(*NgoloFuzzOne)(nil), // 2: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 3: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 4: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewCipher:type_name -> ngolofuzz.NewCipherArgs
1, // 1: ngolofuzz.NgoloFuzzOne.NewTripleDESCipher:type_name -> ngolofuzz.NewTripleDESCipherArgs
2, // 2: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzOne_NewCipher)(nil),
(*NgoloFuzzOne_NewTripleDESCipher)(nil),
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_ecdsa
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/ecdsa"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var PrivateKeyResults []*ecdsa.PrivateKey
PrivateKeyResultsIndex := 0
var PublicKeyResults []*ecdsa.PublicKey
PublicKeyResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_PublicKeyNgdotECDH:
if len(PublicKeyResults) == 0 {
continue
}
arg0 := PublicKeyResults[PublicKeyResultsIndex]
PublicKeyResultsIndex = (PublicKeyResultsIndex + 1) % len(PublicKeyResults)
_, r1 := arg0.ECDH()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_PublicKeyNgdotBytes:
if len(PublicKeyResults) == 0 {
continue
}
arg0 := PublicKeyResults[PublicKeyResultsIndex]
PublicKeyResultsIndex = (PublicKeyResultsIndex + 1) % len(PublicKeyResults)
_, r1 := arg0.Bytes()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_PrivateKeyNgdotECDH:
if len(PrivateKeyResults) == 0 {
continue
}
arg0 := PrivateKeyResults[PrivateKeyResultsIndex]
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % len(PrivateKeyResults)
_, r1 := arg0.ECDH()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_PrivateKeyNgdotPublic:
if len(PrivateKeyResults) == 0 {
continue
}
arg0 := PrivateKeyResults[PrivateKeyResultsIndex]
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % len(PrivateKeyResults)
arg0.Public()
case *NgoloFuzzOne_PrivateKeyNgdotBytes:
if len(PrivateKeyResults) == 0 {
continue
}
arg0 := PrivateKeyResults[PrivateKeyResultsIndex]
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % len(PrivateKeyResults)
_, r1 := arg0.Bytes()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SignASN1:
arg0 := bytes.NewReader(a.SignASN1.R)
if len(PrivateKeyResults) == 0 {
continue
}
arg1 := PrivateKeyResults[PrivateKeyResultsIndex]
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % len(PrivateKeyResults)
_, r1 := ecdsa.SignASN1(arg0, arg1, a.SignASN1.Hash)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_VerifyASN1:
if len(PublicKeyResults) == 0 {
continue
}
arg0 := PublicKeyResults[PublicKeyResultsIndex]
PublicKeyResultsIndex = (PublicKeyResultsIndex + 1) % len(PublicKeyResults)
ecdsa.VerifyASN1(arg0, a.VerifyASN1.Hash, a.VerifyASN1.Sig)
case *NgoloFuzzOne_Sign:
arg0 := bytes.NewReader(a.Sign.Rand)
if len(PrivateKeyResults) == 0 {
continue
}
arg1 := PrivateKeyResults[PrivateKeyResultsIndex]
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % len(PrivateKeyResults)
_, _, r2 := ecdsa.Sign(arg0, arg1, a.Sign.Hash)
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_Verify:
if len(PublicKeyResults) == 0 {
continue
}
arg0 := PublicKeyResults[PublicKeyResultsIndex]
PublicKeyResultsIndex = (PublicKeyResultsIndex + 1) % len(PublicKeyResults)
arg2 := CreateBigInt(a.Verify.R)
arg3 := CreateBigInt(a.Verify.S)
ecdsa.Verify(arg0, a.Verify.Hash, arg2, arg3)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
PrivateKeyNb := 0
PrivateKeyResultsIndex := 0
PublicKeyNb := 0
PublicKeyResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_PublicKeyNgdotECDH:
if PublicKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PublicKey%d.ECDH()\n", PublicKeyResultsIndex))
PublicKeyResultsIndex = (PublicKeyResultsIndex + 1) % PublicKeyNb
case *NgoloFuzzOne_PublicKeyNgdotBytes:
if PublicKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PublicKey%d.Bytes()\n", PublicKeyResultsIndex))
PublicKeyResultsIndex = (PublicKeyResultsIndex + 1) % PublicKeyNb
case *NgoloFuzzOne_PrivateKeyNgdotECDH:
if PrivateKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PrivateKey%d.ECDH()\n", PrivateKeyResultsIndex))
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % PrivateKeyNb
case *NgoloFuzzOne_PrivateKeyNgdotPublic:
if PrivateKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PrivateKey%d.Public()\n", PrivateKeyResultsIndex))
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % PrivateKeyNb
case *NgoloFuzzOne_PrivateKeyNgdotBytes:
if PrivateKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PrivateKey%d.Bytes()\n", PrivateKeyResultsIndex))
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % PrivateKeyNb
case *NgoloFuzzOne_SignASN1:
if PrivateKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ecdsa.SignASN1(bytes.NewReader(%#+v), PrivateKey%d, %#+v)\n", a.SignASN1.R, (PrivateKeyResultsIndex + 0) % PrivateKeyNb, a.SignASN1.Hash))
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % PrivateKeyNb
case *NgoloFuzzOne_VerifyASN1:
if PublicKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ecdsa.VerifyASN1(PublicKey%d, %#+v, %#+v)\n", (PublicKeyResultsIndex + 0) % PublicKeyNb, a.VerifyASN1.Hash, a.VerifyASN1.Sig))
PublicKeyResultsIndex = (PublicKeyResultsIndex + 1) % PublicKeyNb
case *NgoloFuzzOne_Sign:
if PrivateKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ecdsa.Sign(bytes.NewReader(%#+v), PrivateKey%d, %#+v)\n", a.Sign.Rand, (PrivateKeyResultsIndex + 0) % PrivateKeyNb, a.Sign.Hash))
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % PrivateKeyNb
case *NgoloFuzzOne_Verify:
if PublicKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ecdsa.Verify(PublicKey%d, %#+v, CreateBigInt(%#+v), CreateBigInt(%#+v))\n", (PublicKeyResultsIndex + 0) % PublicKeyNb, a.Verify.Hash, a.Verify.R, a.Verify.S))
PublicKeyResultsIndex = (PublicKeyResultsIndex + 1) % PublicKeyNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_ecdsa
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type PublicKeyNgdotECDHArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PublicKeyNgdotECDHArgs) Reset() {
*x = PublicKeyNgdotECDHArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PublicKeyNgdotECDHArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PublicKeyNgdotECDHArgs) ProtoMessage() {}
func (x *PublicKeyNgdotECDHArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PublicKeyNgdotECDHArgs.ProtoReflect.Descriptor instead.
func (*PublicKeyNgdotECDHArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type PublicKeyNgdotBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PublicKeyNgdotBytesArgs) Reset() {
*x = PublicKeyNgdotBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PublicKeyNgdotBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PublicKeyNgdotBytesArgs) ProtoMessage() {}
func (x *PublicKeyNgdotBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PublicKeyNgdotBytesArgs.ProtoReflect.Descriptor instead.
func (*PublicKeyNgdotBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type PrivateKeyNgdotECDHArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrivateKeyNgdotECDHArgs) Reset() {
*x = PrivateKeyNgdotECDHArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrivateKeyNgdotECDHArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrivateKeyNgdotECDHArgs) ProtoMessage() {}
func (x *PrivateKeyNgdotECDHArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrivateKeyNgdotECDHArgs.ProtoReflect.Descriptor instead.
func (*PrivateKeyNgdotECDHArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type PrivateKeyNgdotPublicArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrivateKeyNgdotPublicArgs) Reset() {
*x = PrivateKeyNgdotPublicArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrivateKeyNgdotPublicArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrivateKeyNgdotPublicArgs) ProtoMessage() {}
func (x *PrivateKeyNgdotPublicArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrivateKeyNgdotPublicArgs.ProtoReflect.Descriptor instead.
func (*PrivateKeyNgdotPublicArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type PrivateKeyNgdotBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrivateKeyNgdotBytesArgs) Reset() {
*x = PrivateKeyNgdotBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrivateKeyNgdotBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrivateKeyNgdotBytesArgs) ProtoMessage() {}
func (x *PrivateKeyNgdotBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrivateKeyNgdotBytesArgs.ProtoReflect.Descriptor instead.
func (*PrivateKeyNgdotBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type SignASN1Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SignASN1Args) Reset() {
*x = SignASN1Args{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SignASN1Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SignASN1Args) ProtoMessage() {}
func (x *SignASN1Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SignASN1Args.ProtoReflect.Descriptor instead.
func (*SignASN1Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *SignASN1Args) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
func (x *SignASN1Args) GetHash() []byte {
if x != nil {
return x.Hash
}
return nil
}
type VerifyASN1Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
Sig []byte `protobuf:"bytes,2,opt,name=sig,proto3" json:"sig,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *VerifyASN1Args) Reset() {
*x = VerifyASN1Args{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *VerifyASN1Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*VerifyASN1Args) ProtoMessage() {}
func (x *VerifyASN1Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use VerifyASN1Args.ProtoReflect.Descriptor instead.
func (*VerifyASN1Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *VerifyASN1Args) GetHash() []byte {
if x != nil {
return x.Hash
}
return nil
}
func (x *VerifyASN1Args) GetSig() []byte {
if x != nil {
return x.Sig
}
return nil
}
type SignArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Rand []byte `protobuf:"bytes,1,opt,name=rand,proto3" json:"rand,omitempty"`
Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SignArgs) Reset() {
*x = SignArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SignArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SignArgs) ProtoMessage() {}
func (x *SignArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SignArgs.ProtoReflect.Descriptor instead.
func (*SignArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *SignArgs) GetRand() []byte {
if x != nil {
return x.Rand
}
return nil
}
func (x *SignArgs) GetHash() []byte {
if x != nil {
return x.Hash
}
return nil
}
type VerifyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
R []byte `protobuf:"bytes,2,opt,name=r,proto3" json:"r,omitempty"`
S []byte `protobuf:"bytes,3,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *VerifyArgs) Reset() {
*x = VerifyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *VerifyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*VerifyArgs) ProtoMessage() {}
func (x *VerifyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use VerifyArgs.ProtoReflect.Descriptor instead.
func (*VerifyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *VerifyArgs) GetHash() []byte {
if x != nil {
return x.Hash
}
return nil
}
func (x *VerifyArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
func (x *VerifyArgs) GetS() []byte {
if x != nil {
return x.S
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_PublicKeyNgdotECDH
// *NgoloFuzzOne_PublicKeyNgdotBytes
// *NgoloFuzzOne_PrivateKeyNgdotECDH
// *NgoloFuzzOne_PrivateKeyNgdotPublic
// *NgoloFuzzOne_PrivateKeyNgdotBytes
// *NgoloFuzzOne_SignASN1
// *NgoloFuzzOne_VerifyASN1
// *NgoloFuzzOne_Sign
// *NgoloFuzzOne_Verify
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetPublicKeyNgdotECDH() *PublicKeyNgdotECDHArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PublicKeyNgdotECDH); ok {
return x.PublicKeyNgdotECDH
}
}
return nil
}
func (x *NgoloFuzzOne) GetPublicKeyNgdotBytes() *PublicKeyNgdotBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PublicKeyNgdotBytes); ok {
return x.PublicKeyNgdotBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetPrivateKeyNgdotECDH() *PrivateKeyNgdotECDHArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PrivateKeyNgdotECDH); ok {
return x.PrivateKeyNgdotECDH
}
}
return nil
}
func (x *NgoloFuzzOne) GetPrivateKeyNgdotPublic() *PrivateKeyNgdotPublicArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PrivateKeyNgdotPublic); ok {
return x.PrivateKeyNgdotPublic
}
}
return nil
}
func (x *NgoloFuzzOne) GetPrivateKeyNgdotBytes() *PrivateKeyNgdotBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PrivateKeyNgdotBytes); ok {
return x.PrivateKeyNgdotBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetSignASN1() *SignASN1Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SignASN1); ok {
return x.SignASN1
}
}
return nil
}
func (x *NgoloFuzzOne) GetVerifyASN1() *VerifyASN1Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_VerifyASN1); ok {
return x.VerifyASN1
}
}
return nil
}
func (x *NgoloFuzzOne) GetSign() *SignArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sign); ok {
return x.Sign
}
}
return nil
}
func (x *NgoloFuzzOne) GetVerify() *VerifyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Verify); ok {
return x.Verify
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_PublicKeyNgdotECDH struct {
PublicKeyNgdotECDH *PublicKeyNgdotECDHArgs `protobuf:"bytes,1,opt,name=PublicKeyNgdotECDH,proto3,oneof"`
}
type NgoloFuzzOne_PublicKeyNgdotBytes struct {
PublicKeyNgdotBytes *PublicKeyNgdotBytesArgs `protobuf:"bytes,2,opt,name=PublicKeyNgdotBytes,proto3,oneof"`
}
type NgoloFuzzOne_PrivateKeyNgdotECDH struct {
PrivateKeyNgdotECDH *PrivateKeyNgdotECDHArgs `protobuf:"bytes,3,opt,name=PrivateKeyNgdotECDH,proto3,oneof"`
}
type NgoloFuzzOne_PrivateKeyNgdotPublic struct {
PrivateKeyNgdotPublic *PrivateKeyNgdotPublicArgs `protobuf:"bytes,4,opt,name=PrivateKeyNgdotPublic,proto3,oneof"`
}
type NgoloFuzzOne_PrivateKeyNgdotBytes struct {
PrivateKeyNgdotBytes *PrivateKeyNgdotBytesArgs `protobuf:"bytes,5,opt,name=PrivateKeyNgdotBytes,proto3,oneof"`
}
type NgoloFuzzOne_SignASN1 struct {
SignASN1 *SignASN1Args `protobuf:"bytes,6,opt,name=SignASN1,proto3,oneof"`
}
type NgoloFuzzOne_VerifyASN1 struct {
VerifyASN1 *VerifyASN1Args `protobuf:"bytes,7,opt,name=VerifyASN1,proto3,oneof"`
}
type NgoloFuzzOne_Sign struct {
Sign *SignArgs `protobuf:"bytes,8,opt,name=Sign,proto3,oneof"`
}
type NgoloFuzzOne_Verify struct {
Verify *VerifyArgs `protobuf:"bytes,9,opt,name=Verify,proto3,oneof"`
}
func (*NgoloFuzzOne_PublicKeyNgdotECDH) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PublicKeyNgdotBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PrivateKeyNgdotECDH) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PrivateKeyNgdotPublic) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PrivateKeyNgdotBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SignASN1) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_VerifyASN1) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sign) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Verify) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x18\n" +
"\x16PublicKeyNgdotECDHArgs\"\x19\n" +
"\x17PublicKeyNgdotBytesArgs\"\x19\n" +
"\x17PrivateKeyNgdotECDHArgs\"\x1b\n" +
"\x19PrivateKeyNgdotPublicArgs\"\x1a\n" +
"\x18PrivateKeyNgdotBytesArgs\"0\n" +
"\fSignASN1Args\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\x12\x12\n" +
"\x04hash\x18\x02 \x01(\fR\x04hash\"6\n" +
"\x0eVerifyASN1Args\x12\x12\n" +
"\x04hash\x18\x01 \x01(\fR\x04hash\x12\x10\n" +
"\x03sig\x18\x02 \x01(\fR\x03sig\"2\n" +
"\bSignArgs\x12\x12\n" +
"\x04rand\x18\x01 \x01(\fR\x04rand\x12\x12\n" +
"\x04hash\x18\x02 \x01(\fR\x04hash\"<\n" +
"\n" +
"VerifyArgs\x12\x12\n" +
"\x04hash\x18\x01 \x01(\fR\x04hash\x12\f\n" +
"\x01r\x18\x02 \x01(\fR\x01r\x12\f\n" +
"\x01s\x18\x03 \x01(\fR\x01s\"\xa4\x05\n" +
"\fNgoloFuzzOne\x12S\n" +
"\x12PublicKeyNgdotECDH\x18\x01 \x01(\v2!.ngolofuzz.PublicKeyNgdotECDHArgsH\x00R\x12PublicKeyNgdotECDH\x12V\n" +
"\x13PublicKeyNgdotBytes\x18\x02 \x01(\v2\".ngolofuzz.PublicKeyNgdotBytesArgsH\x00R\x13PublicKeyNgdotBytes\x12V\n" +
"\x13PrivateKeyNgdotECDH\x18\x03 \x01(\v2\".ngolofuzz.PrivateKeyNgdotECDHArgsH\x00R\x13PrivateKeyNgdotECDH\x12\\\n" +
"\x15PrivateKeyNgdotPublic\x18\x04 \x01(\v2$.ngolofuzz.PrivateKeyNgdotPublicArgsH\x00R\x15PrivateKeyNgdotPublic\x12Y\n" +
"\x14PrivateKeyNgdotBytes\x18\x05 \x01(\v2#.ngolofuzz.PrivateKeyNgdotBytesArgsH\x00R\x14PrivateKeyNgdotBytes\x125\n" +
"\bSignASN1\x18\x06 \x01(\v2\x17.ngolofuzz.SignASN1ArgsH\x00R\bSignASN1\x12;\n" +
"\n" +
"VerifyASN1\x18\a \x01(\v2\x19.ngolofuzz.VerifyASN1ArgsH\x00R\n" +
"VerifyASN1\x12)\n" +
"\x04Sign\x18\b \x01(\v2\x13.ngolofuzz.SignArgsH\x00R\x04Sign\x12/\n" +
"\x06Verify\x18\t \x01(\v2\x15.ngolofuzz.VerifyArgsH\x00R\x06VerifyB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x19Z\x17./;fuzz_ng_crypto_ecdsab\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
var file_ngolofuzz_proto_goTypes = []any{
(*PublicKeyNgdotECDHArgs)(nil), // 0: ngolofuzz.PublicKeyNgdotECDHArgs
(*PublicKeyNgdotBytesArgs)(nil), // 1: ngolofuzz.PublicKeyNgdotBytesArgs
(*PrivateKeyNgdotECDHArgs)(nil), // 2: ngolofuzz.PrivateKeyNgdotECDHArgs
(*PrivateKeyNgdotPublicArgs)(nil), // 3: ngolofuzz.PrivateKeyNgdotPublicArgs
(*PrivateKeyNgdotBytesArgs)(nil), // 4: ngolofuzz.PrivateKeyNgdotBytesArgs
(*SignASN1Args)(nil), // 5: ngolofuzz.SignASN1Args
(*VerifyASN1Args)(nil), // 6: ngolofuzz.VerifyASN1Args
(*SignArgs)(nil), // 7: ngolofuzz.SignArgs
(*VerifyArgs)(nil), // 8: ngolofuzz.VerifyArgs
(*NgoloFuzzOne)(nil), // 9: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 10: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 11: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.PublicKeyNgdotECDH:type_name -> ngolofuzz.PublicKeyNgdotECDHArgs
1, // 1: ngolofuzz.NgoloFuzzOne.PublicKeyNgdotBytes:type_name -> ngolofuzz.PublicKeyNgdotBytesArgs
2, // 2: ngolofuzz.NgoloFuzzOne.PrivateKeyNgdotECDH:type_name -> ngolofuzz.PrivateKeyNgdotECDHArgs
3, // 3: ngolofuzz.NgoloFuzzOne.PrivateKeyNgdotPublic:type_name -> ngolofuzz.PrivateKeyNgdotPublicArgs
4, // 4: ngolofuzz.NgoloFuzzOne.PrivateKeyNgdotBytes:type_name -> ngolofuzz.PrivateKeyNgdotBytesArgs
5, // 5: ngolofuzz.NgoloFuzzOne.SignASN1:type_name -> ngolofuzz.SignASN1Args
6, // 6: ngolofuzz.NgoloFuzzOne.VerifyASN1:type_name -> ngolofuzz.VerifyASN1Args
7, // 7: ngolofuzz.NgoloFuzzOne.Sign:type_name -> ngolofuzz.SignArgs
8, // 8: ngolofuzz.NgoloFuzzOne.Verify:type_name -> ngolofuzz.VerifyArgs
9, // 9: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
10, // [10:10] is the sub-list for method output_type
10, // [10:10] is the sub-list for method input_type
10, // [10:10] is the sub-list for extension type_name
10, // [10:10] is the sub-list for extension extendee
0, // [0:10] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[9].OneofWrappers = []any{
(*NgoloFuzzOne_PublicKeyNgdotECDH)(nil),
(*NgoloFuzzOne_PublicKeyNgdotBytes)(nil),
(*NgoloFuzzOne_PrivateKeyNgdotECDH)(nil),
(*NgoloFuzzOne_PrivateKeyNgdotPublic)(nil),
(*NgoloFuzzOne_PrivateKeyNgdotBytes)(nil),
(*NgoloFuzzOne_SignASN1)(nil),
(*NgoloFuzzOne_VerifyASN1)(nil),
(*NgoloFuzzOne_Sign)(nil),
(*NgoloFuzzOne_Verify)(nil),
}
file_ngolofuzz_proto_msgTypes[10].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 12,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_ed25519
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/ed25519"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var PrivateKeyResults []*ed25519.PrivateKey
PrivateKeyResultsIndex := 0
var PublicKeyResults []*ed25519.PublicKey
PublicKeyResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_PrivateKeyNgdotPublic:
if len(PrivateKeyResults) == 0 {
continue
}
arg0 := PrivateKeyResults[PrivateKeyResultsIndex]
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % len(PrivateKeyResults)
arg0.Public()
case *NgoloFuzzOne_PrivateKeyNgdotSeed:
if len(PrivateKeyResults) == 0 {
continue
}
arg0 := PrivateKeyResults[PrivateKeyResultsIndex]
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % len(PrivateKeyResults)
arg0.Seed()
case *NgoloFuzzOne_GenerateKey:
arg0 := bytes.NewReader(a.GenerateKey.Random)
r0, r1, r2 := ed25519.GenerateKey(arg0)
PublicKeyResults = append(PublicKeyResults, &r0)
PrivateKeyResults = append(PrivateKeyResults, &r1)
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_NewKeyFromSeed:
r0 := ed25519.NewKeyFromSeed(a.NewKeyFromSeed.Seed)
PrivateKeyResults = append(PrivateKeyResults, &r0)
case *NgoloFuzzOne_Sign:
if len(PrivateKeyResults) == 0 {
continue
}
arg0 := *PrivateKeyResults[PrivateKeyResultsIndex]
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % len(PrivateKeyResults)
ed25519.Sign(arg0, a.Sign.Message)
case *NgoloFuzzOne_Verify:
if len(PublicKeyResults) == 0 {
continue
}
arg0 := *PublicKeyResults[PublicKeyResultsIndex]
PublicKeyResultsIndex = (PublicKeyResultsIndex + 1) % len(PublicKeyResults)
ed25519.Verify(arg0, a.Verify.Message, a.Verify.Sig)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
PrivateKeyNb := 0
PrivateKeyResultsIndex := 0
PublicKeyNb := 0
PublicKeyResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_PrivateKeyNgdotPublic:
if PrivateKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PrivateKey%d.Public()\n", PrivateKeyResultsIndex))
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % PrivateKeyNb
case *NgoloFuzzOne_PrivateKeyNgdotSeed:
if PrivateKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PrivateKey%d.Seed()\n", PrivateKeyResultsIndex))
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % PrivateKeyNb
case *NgoloFuzzOne_GenerateKey:
w.WriteString(fmt.Sprintf("PublicKey%d, PrivateKey%d, _ := ed25519.GenerateKey(bytes.NewReader(%#+v))\n", PublicKeyNb, PrivateKeyNb, a.GenerateKey.Random))
PublicKeyNb = PublicKeyNb + 1
PrivateKeyNb = PrivateKeyNb + 1
case *NgoloFuzzOne_NewKeyFromSeed:
w.WriteString(fmt.Sprintf("PrivateKey%d := ed25519.NewKeyFromSeed(%#+v)\n", PrivateKeyNb, a.NewKeyFromSeed.Seed))
PrivateKeyNb = PrivateKeyNb + 1
case *NgoloFuzzOne_Sign:
if PrivateKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ed25519.Sign(PrivateKey%d, %#+v)\n", (PrivateKeyResultsIndex + 0) % PrivateKeyNb, a.Sign.Message))
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % PrivateKeyNb
case *NgoloFuzzOne_Verify:
if PublicKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ed25519.Verify(PublicKey%d, %#+v, %#+v)\n", (PublicKeyResultsIndex + 0) % PublicKeyNb, a.Verify.Message, a.Verify.Sig))
PublicKeyResultsIndex = (PublicKeyResultsIndex + 1) % PublicKeyNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_ed25519
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type PrivateKeyNgdotPublicArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrivateKeyNgdotPublicArgs) Reset() {
*x = PrivateKeyNgdotPublicArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrivateKeyNgdotPublicArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrivateKeyNgdotPublicArgs) ProtoMessage() {}
func (x *PrivateKeyNgdotPublicArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrivateKeyNgdotPublicArgs.ProtoReflect.Descriptor instead.
func (*PrivateKeyNgdotPublicArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type PrivateKeyNgdotSeedArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrivateKeyNgdotSeedArgs) Reset() {
*x = PrivateKeyNgdotSeedArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrivateKeyNgdotSeedArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrivateKeyNgdotSeedArgs) ProtoMessage() {}
func (x *PrivateKeyNgdotSeedArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrivateKeyNgdotSeedArgs.ProtoReflect.Descriptor instead.
func (*PrivateKeyNgdotSeedArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type GenerateKeyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Random []byte `protobuf:"bytes,1,opt,name=random,proto3" json:"random,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GenerateKeyArgs) Reset() {
*x = GenerateKeyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GenerateKeyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GenerateKeyArgs) ProtoMessage() {}
func (x *GenerateKeyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GenerateKeyArgs.ProtoReflect.Descriptor instead.
func (*GenerateKeyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *GenerateKeyArgs) GetRandom() []byte {
if x != nil {
return x.Random
}
return nil
}
type NewKeyFromSeedArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Seed []byte `protobuf:"bytes,1,opt,name=seed,proto3" json:"seed,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewKeyFromSeedArgs) Reset() {
*x = NewKeyFromSeedArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewKeyFromSeedArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewKeyFromSeedArgs) ProtoMessage() {}
func (x *NewKeyFromSeedArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewKeyFromSeedArgs.ProtoReflect.Descriptor instead.
func (*NewKeyFromSeedArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NewKeyFromSeedArgs) GetSeed() []byte {
if x != nil {
return x.Seed
}
return nil
}
type SignArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Message []byte `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SignArgs) Reset() {
*x = SignArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SignArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SignArgs) ProtoMessage() {}
func (x *SignArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SignArgs.ProtoReflect.Descriptor instead.
func (*SignArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *SignArgs) GetMessage() []byte {
if x != nil {
return x.Message
}
return nil
}
type VerifyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Message []byte `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
Sig []byte `protobuf:"bytes,2,opt,name=sig,proto3" json:"sig,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *VerifyArgs) Reset() {
*x = VerifyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *VerifyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*VerifyArgs) ProtoMessage() {}
func (x *VerifyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use VerifyArgs.ProtoReflect.Descriptor instead.
func (*VerifyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *VerifyArgs) GetMessage() []byte {
if x != nil {
return x.Message
}
return nil
}
func (x *VerifyArgs) GetSig() []byte {
if x != nil {
return x.Sig
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_PrivateKeyNgdotPublic
// *NgoloFuzzOne_PrivateKeyNgdotSeed
// *NgoloFuzzOne_GenerateKey
// *NgoloFuzzOne_NewKeyFromSeed
// *NgoloFuzzOne_Sign
// *NgoloFuzzOne_Verify
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetPrivateKeyNgdotPublic() *PrivateKeyNgdotPublicArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PrivateKeyNgdotPublic); ok {
return x.PrivateKeyNgdotPublic
}
}
return nil
}
func (x *NgoloFuzzOne) GetPrivateKeyNgdotSeed() *PrivateKeyNgdotSeedArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PrivateKeyNgdotSeed); ok {
return x.PrivateKeyNgdotSeed
}
}
return nil
}
func (x *NgoloFuzzOne) GetGenerateKey() *GenerateKeyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GenerateKey); ok {
return x.GenerateKey
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewKeyFromSeed() *NewKeyFromSeedArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewKeyFromSeed); ok {
return x.NewKeyFromSeed
}
}
return nil
}
func (x *NgoloFuzzOne) GetSign() *SignArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sign); ok {
return x.Sign
}
}
return nil
}
func (x *NgoloFuzzOne) GetVerify() *VerifyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Verify); ok {
return x.Verify
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_PrivateKeyNgdotPublic struct {
PrivateKeyNgdotPublic *PrivateKeyNgdotPublicArgs `protobuf:"bytes,1,opt,name=PrivateKeyNgdotPublic,proto3,oneof"`
}
type NgoloFuzzOne_PrivateKeyNgdotSeed struct {
PrivateKeyNgdotSeed *PrivateKeyNgdotSeedArgs `protobuf:"bytes,2,opt,name=PrivateKeyNgdotSeed,proto3,oneof"`
}
type NgoloFuzzOne_GenerateKey struct {
GenerateKey *GenerateKeyArgs `protobuf:"bytes,3,opt,name=GenerateKey,proto3,oneof"`
}
type NgoloFuzzOne_NewKeyFromSeed struct {
NewKeyFromSeed *NewKeyFromSeedArgs `protobuf:"bytes,4,opt,name=NewKeyFromSeed,proto3,oneof"`
}
type NgoloFuzzOne_Sign struct {
Sign *SignArgs `protobuf:"bytes,5,opt,name=Sign,proto3,oneof"`
}
type NgoloFuzzOne_Verify struct {
Verify *VerifyArgs `protobuf:"bytes,6,opt,name=Verify,proto3,oneof"`
}
func (*NgoloFuzzOne_PrivateKeyNgdotPublic) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PrivateKeyNgdotSeed) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GenerateKey) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewKeyFromSeed) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sign) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Verify) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1b\n" +
"\x19PrivateKeyNgdotPublicArgs\"\x19\n" +
"\x17PrivateKeyNgdotSeedArgs\")\n" +
"\x0fGenerateKeyArgs\x12\x16\n" +
"\x06random\x18\x01 \x01(\fR\x06random\"(\n" +
"\x12NewKeyFromSeedArgs\x12\x12\n" +
"\x04seed\x18\x01 \x01(\fR\x04seed\"$\n" +
"\bSignArgs\x12\x18\n" +
"\amessage\x18\x01 \x01(\fR\amessage\"8\n" +
"\n" +
"VerifyArgs\x12\x18\n" +
"\amessage\x18\x01 \x01(\fR\amessage\x12\x10\n" +
"\x03sig\x18\x02 \x01(\fR\x03sig\"\xb1\x03\n" +
"\fNgoloFuzzOne\x12\\\n" +
"\x15PrivateKeyNgdotPublic\x18\x01 \x01(\v2$.ngolofuzz.PrivateKeyNgdotPublicArgsH\x00R\x15PrivateKeyNgdotPublic\x12V\n" +
"\x13PrivateKeyNgdotSeed\x18\x02 \x01(\v2\".ngolofuzz.PrivateKeyNgdotSeedArgsH\x00R\x13PrivateKeyNgdotSeed\x12>\n" +
"\vGenerateKey\x18\x03 \x01(\v2\x1a.ngolofuzz.GenerateKeyArgsH\x00R\vGenerateKey\x12G\n" +
"\x0eNewKeyFromSeed\x18\x04 \x01(\v2\x1d.ngolofuzz.NewKeyFromSeedArgsH\x00R\x0eNewKeyFromSeed\x12)\n" +
"\x04Sign\x18\x05 \x01(\v2\x13.ngolofuzz.SignArgsH\x00R\x04Sign\x12/\n" +
"\x06Verify\x18\x06 \x01(\v2\x15.ngolofuzz.VerifyArgsH\x00R\x06VerifyB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1bZ\x19./;fuzz_ng_crypto_ed25519b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
var file_ngolofuzz_proto_goTypes = []any{
(*PrivateKeyNgdotPublicArgs)(nil), // 0: ngolofuzz.PrivateKeyNgdotPublicArgs
(*PrivateKeyNgdotSeedArgs)(nil), // 1: ngolofuzz.PrivateKeyNgdotSeedArgs
(*GenerateKeyArgs)(nil), // 2: ngolofuzz.GenerateKeyArgs
(*NewKeyFromSeedArgs)(nil), // 3: ngolofuzz.NewKeyFromSeedArgs
(*SignArgs)(nil), // 4: ngolofuzz.SignArgs
(*VerifyArgs)(nil), // 5: ngolofuzz.VerifyArgs
(*NgoloFuzzOne)(nil), // 6: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 7: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 8: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.PrivateKeyNgdotPublic:type_name -> ngolofuzz.PrivateKeyNgdotPublicArgs
1, // 1: ngolofuzz.NgoloFuzzOne.PrivateKeyNgdotSeed:type_name -> ngolofuzz.PrivateKeyNgdotSeedArgs
2, // 2: ngolofuzz.NgoloFuzzOne.GenerateKey:type_name -> ngolofuzz.GenerateKeyArgs
3, // 3: ngolofuzz.NgoloFuzzOne.NewKeyFromSeed:type_name -> ngolofuzz.NewKeyFromSeedArgs
4, // 4: ngolofuzz.NgoloFuzzOne.Sign:type_name -> ngolofuzz.SignArgs
5, // 5: ngolofuzz.NgoloFuzzOne.Verify:type_name -> ngolofuzz.VerifyArgs
6, // 6: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
7, // [7:7] is the sub-list for method output_type
7, // [7:7] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[6].OneofWrappers = []any{
(*NgoloFuzzOne_PrivateKeyNgdotPublic)(nil),
(*NgoloFuzzOne_PrivateKeyNgdotSeed)(nil),
(*NgoloFuzzOne_GenerateKey)(nil),
(*NgoloFuzzOne_NewKeyFromSeed)(nil),
(*NgoloFuzzOne_Sign)(nil),
(*NgoloFuzzOne_Verify)(nil),
}
file_ngolofuzz_proto_msgTypes[7].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_elliptic
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/elliptic"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var CurveResults []*elliptic.Curve
CurveResultsIndex := 0
var CurveParamsResults []*elliptic.CurveParams
CurveParamsResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_GenerateKey:
if len(CurveResults) == 0 {
continue
}
arg0 := *CurveResults[CurveResultsIndex]
CurveResultsIndex = (CurveResultsIndex + 1) % len(CurveResults)
arg1 := bytes.NewReader(a.GenerateKey.Rand)
_, _, _, r3 := elliptic.GenerateKey(arg0, arg1)
if r3 != nil{
r3.Error()
return 0
}
case *NgoloFuzzOne_Marshal_:
if len(CurveResults) == 0 {
continue
}
arg0 := *CurveResults[CurveResultsIndex]
CurveResultsIndex = (CurveResultsIndex + 1) % len(CurveResults)
arg1 := CreateBigInt(a.Marshal_.X)
arg2 := CreateBigInt(a.Marshal_.Y)
elliptic.Marshal(arg0, arg1, arg2)
case *NgoloFuzzOne_MarshalCompressed:
if len(CurveResults) == 0 {
continue
}
arg0 := *CurveResults[CurveResultsIndex]
CurveResultsIndex = (CurveResultsIndex + 1) % len(CurveResults)
arg1 := CreateBigInt(a.MarshalCompressed.X)
arg2 := CreateBigInt(a.MarshalCompressed.Y)
elliptic.MarshalCompressed(arg0, arg1, arg2)
case *NgoloFuzzOne_Unmarshal_:
if len(CurveResults) == 0 {
continue
}
arg0 := *CurveResults[CurveResultsIndex]
CurveResultsIndex = (CurveResultsIndex + 1) % len(CurveResults)
elliptic.Unmarshal(arg0, a.Unmarshal_.Data)
case *NgoloFuzzOne_UnmarshalCompressed:
if len(CurveResults) == 0 {
continue
}
arg0 := *CurveResults[CurveResultsIndex]
CurveResultsIndex = (CurveResultsIndex + 1) % len(CurveResults)
elliptic.UnmarshalCompressed(arg0, a.UnmarshalCompressed.Data)
case *NgoloFuzzOne_P224:
r0 := elliptic.P224()
CurveResults = append(CurveResults, &r0)
case *NgoloFuzzOne_P256:
r0 := elliptic.P256()
CurveResults = append(CurveResults, &r0)
case *NgoloFuzzOne_P384:
r0 := elliptic.P384()
CurveResults = append(CurveResults, &r0)
case *NgoloFuzzOne_P521:
r0 := elliptic.P521()
CurveResults = append(CurveResults, &r0)
case *NgoloFuzzOne_CurveParamsNgdotParams:
if len(CurveParamsResults) == 0 {
continue
}
arg0 := CurveParamsResults[CurveParamsResultsIndex]
CurveParamsResultsIndex = (CurveParamsResultsIndex + 1) % len(CurveParamsResults)
arg0.Params()
case *NgoloFuzzOne_CurveParamsNgdotIsOnCurve:
if len(CurveParamsResults) == 0 {
continue
}
arg0 := CurveParamsResults[CurveParamsResultsIndex]
CurveParamsResultsIndex = (CurveParamsResultsIndex + 1) % len(CurveParamsResults)
arg1 := CreateBigInt(a.CurveParamsNgdotIsOnCurve.X)
arg2 := CreateBigInt(a.CurveParamsNgdotIsOnCurve.Y)
arg0.IsOnCurve(arg1, arg2)
case *NgoloFuzzOne_CurveParamsNgdotAdd:
if len(CurveParamsResults) == 0 {
continue
}
arg0 := CurveParamsResults[CurveParamsResultsIndex]
CurveParamsResultsIndex = (CurveParamsResultsIndex + 1) % len(CurveParamsResults)
arg1 := CreateBigInt(a.CurveParamsNgdotAdd.X1)
arg2 := CreateBigInt(a.CurveParamsNgdotAdd.Y1)
arg3 := CreateBigInt(a.CurveParamsNgdotAdd.X2)
arg4 := CreateBigInt(a.CurveParamsNgdotAdd.Y2)
arg0.Add(arg1, arg2, arg3, arg4)
case *NgoloFuzzOne_CurveParamsNgdotDouble:
if len(CurveParamsResults) == 0 {
continue
}
arg0 := CurveParamsResults[CurveParamsResultsIndex]
CurveParamsResultsIndex = (CurveParamsResultsIndex + 1) % len(CurveParamsResults)
arg1 := CreateBigInt(a.CurveParamsNgdotDouble.X1)
arg2 := CreateBigInt(a.CurveParamsNgdotDouble.Y1)
arg0.Double(arg1, arg2)
case *NgoloFuzzOne_CurveParamsNgdotScalarMult:
if len(CurveParamsResults) == 0 {
continue
}
arg0 := CurveParamsResults[CurveParamsResultsIndex]
CurveParamsResultsIndex = (CurveParamsResultsIndex + 1) % len(CurveParamsResults)
arg1 := CreateBigInt(a.CurveParamsNgdotScalarMult.Bx)
arg2 := CreateBigInt(a.CurveParamsNgdotScalarMult.By)
arg0.ScalarMult(arg1, arg2, a.CurveParamsNgdotScalarMult.K)
case *NgoloFuzzOne_CurveParamsNgdotScalarBaseMult:
if len(CurveParamsResults) == 0 {
continue
}
arg0 := CurveParamsResults[CurveParamsResultsIndex]
CurveParamsResultsIndex = (CurveParamsResultsIndex + 1) % len(CurveParamsResults)
arg0.ScalarBaseMult(a.CurveParamsNgdotScalarBaseMult.K)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
CurveNb := 0
CurveResultsIndex := 0
CurveParamsNb := 0
CurveParamsResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_GenerateKey:
if CurveNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("elliptic.GenerateKey(Curve%d, bytes.NewReader(%#+v))\n", (CurveResultsIndex + 0) % CurveNb, a.GenerateKey.Rand))
CurveResultsIndex = (CurveResultsIndex + 1) % CurveNb
case *NgoloFuzzOne_Marshal_:
if CurveNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("elliptic.Marshal(Curve%d, CreateBigInt(%#+v), CreateBigInt(%#+v))\n", (CurveResultsIndex + 0) % CurveNb, a.Marshal_.X, a.Marshal_.Y))
CurveResultsIndex = (CurveResultsIndex + 1) % CurveNb
case *NgoloFuzzOne_MarshalCompressed:
if CurveNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("elliptic.MarshalCompressed(Curve%d, CreateBigInt(%#+v), CreateBigInt(%#+v))\n", (CurveResultsIndex + 0) % CurveNb, a.MarshalCompressed.X, a.MarshalCompressed.Y))
CurveResultsIndex = (CurveResultsIndex + 1) % CurveNb
case *NgoloFuzzOne_Unmarshal_:
if CurveNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("elliptic.Unmarshal(Curve%d, %#+v)\n", (CurveResultsIndex + 0) % CurveNb, a.Unmarshal_.Data))
CurveResultsIndex = (CurveResultsIndex + 1) % CurveNb
case *NgoloFuzzOne_UnmarshalCompressed:
if CurveNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("elliptic.UnmarshalCompressed(Curve%d, %#+v)\n", (CurveResultsIndex + 0) % CurveNb, a.UnmarshalCompressed.Data))
CurveResultsIndex = (CurveResultsIndex + 1) % CurveNb
case *NgoloFuzzOne_P224:
w.WriteString(fmt.Sprintf("Curve%d := elliptic.P224()\n", CurveNb))
CurveNb = CurveNb + 1
case *NgoloFuzzOne_P256:
w.WriteString(fmt.Sprintf("Curve%d := elliptic.P256()\n", CurveNb))
CurveNb = CurveNb + 1
case *NgoloFuzzOne_P384:
w.WriteString(fmt.Sprintf("Curve%d := elliptic.P384()\n", CurveNb))
CurveNb = CurveNb + 1
case *NgoloFuzzOne_P521:
w.WriteString(fmt.Sprintf("Curve%d := elliptic.P521()\n", CurveNb))
CurveNb = CurveNb + 1
case *NgoloFuzzOne_CurveParamsNgdotParams:
if CurveParamsNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CurveParams%d.Params()\n", CurveParamsResultsIndex))
CurveParamsResultsIndex = (CurveParamsResultsIndex + 1) % CurveParamsNb
case *NgoloFuzzOne_CurveParamsNgdotIsOnCurve:
if CurveParamsNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CurveParams%d.IsOnCurve(CreateBigInt(%#+v), CreateBigInt(%#+v))\n", CurveParamsResultsIndex, a.CurveParamsNgdotIsOnCurve.X, a.CurveParamsNgdotIsOnCurve.Y))
CurveParamsResultsIndex = (CurveParamsResultsIndex + 1) % CurveParamsNb
case *NgoloFuzzOne_CurveParamsNgdotAdd:
if CurveParamsNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CurveParams%d.Add(CreateBigInt(%#+v), CreateBigInt(%#+v), CreateBigInt(%#+v), CreateBigInt(%#+v))\n", CurveParamsResultsIndex, a.CurveParamsNgdotAdd.X1, a.CurveParamsNgdotAdd.Y1, a.CurveParamsNgdotAdd.X2, a.CurveParamsNgdotAdd.Y2))
CurveParamsResultsIndex = (CurveParamsResultsIndex + 1) % CurveParamsNb
case *NgoloFuzzOne_CurveParamsNgdotDouble:
if CurveParamsNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CurveParams%d.Double(CreateBigInt(%#+v), CreateBigInt(%#+v))\n", CurveParamsResultsIndex, a.CurveParamsNgdotDouble.X1, a.CurveParamsNgdotDouble.Y1))
CurveParamsResultsIndex = (CurveParamsResultsIndex + 1) % CurveParamsNb
case *NgoloFuzzOne_CurveParamsNgdotScalarMult:
if CurveParamsNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CurveParams%d.ScalarMult(CreateBigInt(%#+v), CreateBigInt(%#+v), %#+v)\n", CurveParamsResultsIndex, a.CurveParamsNgdotScalarMult.Bx, a.CurveParamsNgdotScalarMult.By, a.CurveParamsNgdotScalarMult.K))
CurveParamsResultsIndex = (CurveParamsResultsIndex + 1) % CurveParamsNb
case *NgoloFuzzOne_CurveParamsNgdotScalarBaseMult:
if CurveParamsNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CurveParams%d.ScalarBaseMult(%#+v)\n", CurveParamsResultsIndex, a.CurveParamsNgdotScalarBaseMult.K))
CurveParamsResultsIndex = (CurveParamsResultsIndex + 1) % CurveParamsNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_elliptic
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type GenerateKeyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Rand []byte `protobuf:"bytes,1,opt,name=rand,proto3" json:"rand,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GenerateKeyArgs) Reset() {
*x = GenerateKeyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GenerateKeyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GenerateKeyArgs) ProtoMessage() {}
func (x *GenerateKeyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GenerateKeyArgs.ProtoReflect.Descriptor instead.
func (*GenerateKeyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *GenerateKeyArgs) GetRand() []byte {
if x != nil {
return x.Rand
}
return nil
}
type MarshalArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X []byte `protobuf:"bytes,1,opt,name=x,proto3" json:"x,omitempty"`
Y []byte `protobuf:"bytes,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MarshalArgs) Reset() {
*x = MarshalArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MarshalArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MarshalArgs) ProtoMessage() {}
func (x *MarshalArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MarshalArgs.ProtoReflect.Descriptor instead.
func (*MarshalArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *MarshalArgs) GetX() []byte {
if x != nil {
return x.X
}
return nil
}
func (x *MarshalArgs) GetY() []byte {
if x != nil {
return x.Y
}
return nil
}
type MarshalCompressedArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X []byte `protobuf:"bytes,1,opt,name=x,proto3" json:"x,omitempty"`
Y []byte `protobuf:"bytes,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MarshalCompressedArgs) Reset() {
*x = MarshalCompressedArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MarshalCompressedArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MarshalCompressedArgs) ProtoMessage() {}
func (x *MarshalCompressedArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MarshalCompressedArgs.ProtoReflect.Descriptor instead.
func (*MarshalCompressedArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *MarshalCompressedArgs) GetX() []byte {
if x != nil {
return x.X
}
return nil
}
func (x *MarshalCompressedArgs) GetY() []byte {
if x != nil {
return x.Y
}
return nil
}
type UnmarshalArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnmarshalArgs) Reset() {
*x = UnmarshalArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnmarshalArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnmarshalArgs) ProtoMessage() {}
func (x *UnmarshalArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnmarshalArgs.ProtoReflect.Descriptor instead.
func (*UnmarshalArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *UnmarshalArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type UnmarshalCompressedArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnmarshalCompressedArgs) Reset() {
*x = UnmarshalCompressedArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnmarshalCompressedArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnmarshalCompressedArgs) ProtoMessage() {}
func (x *UnmarshalCompressedArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnmarshalCompressedArgs.ProtoReflect.Descriptor instead.
func (*UnmarshalCompressedArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *UnmarshalCompressedArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type P224Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *P224Args) Reset() {
*x = P224Args{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *P224Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*P224Args) ProtoMessage() {}
func (x *P224Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use P224Args.ProtoReflect.Descriptor instead.
func (*P224Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type P256Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *P256Args) Reset() {
*x = P256Args{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *P256Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*P256Args) ProtoMessage() {}
func (x *P256Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use P256Args.ProtoReflect.Descriptor instead.
func (*P256Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type P384Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *P384Args) Reset() {
*x = P384Args{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *P384Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*P384Args) ProtoMessage() {}
func (x *P384Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use P384Args.ProtoReflect.Descriptor instead.
func (*P384Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type P521Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *P521Args) Reset() {
*x = P521Args{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *P521Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*P521Args) ProtoMessage() {}
func (x *P521Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use P521Args.ProtoReflect.Descriptor instead.
func (*P521Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type CurveParamsNgdotParamsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CurveParamsNgdotParamsArgs) Reset() {
*x = CurveParamsNgdotParamsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CurveParamsNgdotParamsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CurveParamsNgdotParamsArgs) ProtoMessage() {}
func (x *CurveParamsNgdotParamsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CurveParamsNgdotParamsArgs.ProtoReflect.Descriptor instead.
func (*CurveParamsNgdotParamsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type CurveParamsNgdotIsOnCurveArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X []byte `protobuf:"bytes,1,opt,name=x,proto3" json:"x,omitempty"`
Y []byte `protobuf:"bytes,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CurveParamsNgdotIsOnCurveArgs) Reset() {
*x = CurveParamsNgdotIsOnCurveArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CurveParamsNgdotIsOnCurveArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CurveParamsNgdotIsOnCurveArgs) ProtoMessage() {}
func (x *CurveParamsNgdotIsOnCurveArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CurveParamsNgdotIsOnCurveArgs.ProtoReflect.Descriptor instead.
func (*CurveParamsNgdotIsOnCurveArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *CurveParamsNgdotIsOnCurveArgs) GetX() []byte {
if x != nil {
return x.X
}
return nil
}
func (x *CurveParamsNgdotIsOnCurveArgs) GetY() []byte {
if x != nil {
return x.Y
}
return nil
}
type CurveParamsNgdotAddArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X1 []byte `protobuf:"bytes,1,opt,name=x1,proto3" json:"x1,omitempty"`
Y1 []byte `protobuf:"bytes,2,opt,name=y1,proto3" json:"y1,omitempty"`
X2 []byte `protobuf:"bytes,3,opt,name=x2,proto3" json:"x2,omitempty"`
Y2 []byte `protobuf:"bytes,4,opt,name=y2,proto3" json:"y2,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CurveParamsNgdotAddArgs) Reset() {
*x = CurveParamsNgdotAddArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CurveParamsNgdotAddArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CurveParamsNgdotAddArgs) ProtoMessage() {}
func (x *CurveParamsNgdotAddArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CurveParamsNgdotAddArgs.ProtoReflect.Descriptor instead.
func (*CurveParamsNgdotAddArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *CurveParamsNgdotAddArgs) GetX1() []byte {
if x != nil {
return x.X1
}
return nil
}
func (x *CurveParamsNgdotAddArgs) GetY1() []byte {
if x != nil {
return x.Y1
}
return nil
}
func (x *CurveParamsNgdotAddArgs) GetX2() []byte {
if x != nil {
return x.X2
}
return nil
}
func (x *CurveParamsNgdotAddArgs) GetY2() []byte {
if x != nil {
return x.Y2
}
return nil
}
type CurveParamsNgdotDoubleArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X1 []byte `protobuf:"bytes,1,opt,name=x1,proto3" json:"x1,omitempty"`
Y1 []byte `protobuf:"bytes,2,opt,name=y1,proto3" json:"y1,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CurveParamsNgdotDoubleArgs) Reset() {
*x = CurveParamsNgdotDoubleArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CurveParamsNgdotDoubleArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CurveParamsNgdotDoubleArgs) ProtoMessage() {}
func (x *CurveParamsNgdotDoubleArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CurveParamsNgdotDoubleArgs.ProtoReflect.Descriptor instead.
func (*CurveParamsNgdotDoubleArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *CurveParamsNgdotDoubleArgs) GetX1() []byte {
if x != nil {
return x.X1
}
return nil
}
func (x *CurveParamsNgdotDoubleArgs) GetY1() []byte {
if x != nil {
return x.Y1
}
return nil
}
type CurveParamsNgdotScalarMultArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Bx []byte `protobuf:"bytes,1,opt,name=Bx,proto3" json:"Bx,omitempty"`
By []byte `protobuf:"bytes,2,opt,name=By,proto3" json:"By,omitempty"`
K []byte `protobuf:"bytes,3,opt,name=k,proto3" json:"k,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CurveParamsNgdotScalarMultArgs) Reset() {
*x = CurveParamsNgdotScalarMultArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CurveParamsNgdotScalarMultArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CurveParamsNgdotScalarMultArgs) ProtoMessage() {}
func (x *CurveParamsNgdotScalarMultArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CurveParamsNgdotScalarMultArgs.ProtoReflect.Descriptor instead.
func (*CurveParamsNgdotScalarMultArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *CurveParamsNgdotScalarMultArgs) GetBx() []byte {
if x != nil {
return x.Bx
}
return nil
}
func (x *CurveParamsNgdotScalarMultArgs) GetBy() []byte {
if x != nil {
return x.By
}
return nil
}
func (x *CurveParamsNgdotScalarMultArgs) GetK() []byte {
if x != nil {
return x.K
}
return nil
}
type CurveParamsNgdotScalarBaseMultArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
K []byte `protobuf:"bytes,1,opt,name=k,proto3" json:"k,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CurveParamsNgdotScalarBaseMultArgs) Reset() {
*x = CurveParamsNgdotScalarBaseMultArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CurveParamsNgdotScalarBaseMultArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CurveParamsNgdotScalarBaseMultArgs) ProtoMessage() {}
func (x *CurveParamsNgdotScalarBaseMultArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CurveParamsNgdotScalarBaseMultArgs.ProtoReflect.Descriptor instead.
func (*CurveParamsNgdotScalarBaseMultArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *CurveParamsNgdotScalarBaseMultArgs) GetK() []byte {
if x != nil {
return x.K
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_GenerateKey
// *NgoloFuzzOne_Marshal_
// *NgoloFuzzOne_MarshalCompressed
// *NgoloFuzzOne_Unmarshal_
// *NgoloFuzzOne_UnmarshalCompressed
// *NgoloFuzzOne_P224
// *NgoloFuzzOne_P256
// *NgoloFuzzOne_P384
// *NgoloFuzzOne_P521
// *NgoloFuzzOne_CurveParamsNgdotParams
// *NgoloFuzzOne_CurveParamsNgdotIsOnCurve
// *NgoloFuzzOne_CurveParamsNgdotAdd
// *NgoloFuzzOne_CurveParamsNgdotDouble
// *NgoloFuzzOne_CurveParamsNgdotScalarMult
// *NgoloFuzzOne_CurveParamsNgdotScalarBaseMult
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetGenerateKey() *GenerateKeyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GenerateKey); ok {
return x.GenerateKey
}
}
return nil
}
func (x *NgoloFuzzOne) GetMarshal_() *MarshalArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Marshal_); ok {
return x.Marshal_
}
}
return nil
}
func (x *NgoloFuzzOne) GetMarshalCompressed() *MarshalCompressedArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MarshalCompressed); ok {
return x.MarshalCompressed
}
}
return nil
}
func (x *NgoloFuzzOne) GetUnmarshal_() *UnmarshalArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Unmarshal_); ok {
return x.Unmarshal_
}
}
return nil
}
func (x *NgoloFuzzOne) GetUnmarshalCompressed() *UnmarshalCompressedArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UnmarshalCompressed); ok {
return x.UnmarshalCompressed
}
}
return nil
}
func (x *NgoloFuzzOne) GetP224() *P224Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_P224); ok {
return x.P224
}
}
return nil
}
func (x *NgoloFuzzOne) GetP256() *P256Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_P256); ok {
return x.P256
}
}
return nil
}
func (x *NgoloFuzzOne) GetP384() *P384Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_P384); ok {
return x.P384
}
}
return nil
}
func (x *NgoloFuzzOne) GetP521() *P521Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_P521); ok {
return x.P521
}
}
return nil
}
func (x *NgoloFuzzOne) GetCurveParamsNgdotParams() *CurveParamsNgdotParamsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CurveParamsNgdotParams); ok {
return x.CurveParamsNgdotParams
}
}
return nil
}
func (x *NgoloFuzzOne) GetCurveParamsNgdotIsOnCurve() *CurveParamsNgdotIsOnCurveArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CurveParamsNgdotIsOnCurve); ok {
return x.CurveParamsNgdotIsOnCurve
}
}
return nil
}
func (x *NgoloFuzzOne) GetCurveParamsNgdotAdd() *CurveParamsNgdotAddArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CurveParamsNgdotAdd); ok {
return x.CurveParamsNgdotAdd
}
}
return nil
}
func (x *NgoloFuzzOne) GetCurveParamsNgdotDouble() *CurveParamsNgdotDoubleArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CurveParamsNgdotDouble); ok {
return x.CurveParamsNgdotDouble
}
}
return nil
}
func (x *NgoloFuzzOne) GetCurveParamsNgdotScalarMult() *CurveParamsNgdotScalarMultArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CurveParamsNgdotScalarMult); ok {
return x.CurveParamsNgdotScalarMult
}
}
return nil
}
func (x *NgoloFuzzOne) GetCurveParamsNgdotScalarBaseMult() *CurveParamsNgdotScalarBaseMultArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CurveParamsNgdotScalarBaseMult); ok {
return x.CurveParamsNgdotScalarBaseMult
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_GenerateKey struct {
GenerateKey *GenerateKeyArgs `protobuf:"bytes,1,opt,name=GenerateKey,proto3,oneof"`
}
type NgoloFuzzOne_Marshal_ struct {
Marshal_ *MarshalArgs `protobuf:"bytes,2,opt,name=Marshal,proto3,oneof"`
}
type NgoloFuzzOne_MarshalCompressed struct {
MarshalCompressed *MarshalCompressedArgs `protobuf:"bytes,3,opt,name=MarshalCompressed,proto3,oneof"`
}
type NgoloFuzzOne_Unmarshal_ struct {
Unmarshal_ *UnmarshalArgs `protobuf:"bytes,4,opt,name=Unmarshal,proto3,oneof"`
}
type NgoloFuzzOne_UnmarshalCompressed struct {
UnmarshalCompressed *UnmarshalCompressedArgs `protobuf:"bytes,5,opt,name=UnmarshalCompressed,proto3,oneof"`
}
type NgoloFuzzOne_P224 struct {
P224 *P224Args `protobuf:"bytes,6,opt,name=P224,proto3,oneof"`
}
type NgoloFuzzOne_P256 struct {
P256 *P256Args `protobuf:"bytes,7,opt,name=P256,proto3,oneof"`
}
type NgoloFuzzOne_P384 struct {
P384 *P384Args `protobuf:"bytes,8,opt,name=P384,proto3,oneof"`
}
type NgoloFuzzOne_P521 struct {
P521 *P521Args `protobuf:"bytes,9,opt,name=P521,proto3,oneof"`
}
type NgoloFuzzOne_CurveParamsNgdotParams struct {
CurveParamsNgdotParams *CurveParamsNgdotParamsArgs `protobuf:"bytes,10,opt,name=CurveParamsNgdotParams,proto3,oneof"`
}
type NgoloFuzzOne_CurveParamsNgdotIsOnCurve struct {
CurveParamsNgdotIsOnCurve *CurveParamsNgdotIsOnCurveArgs `protobuf:"bytes,11,opt,name=CurveParamsNgdotIsOnCurve,proto3,oneof"`
}
type NgoloFuzzOne_CurveParamsNgdotAdd struct {
CurveParamsNgdotAdd *CurveParamsNgdotAddArgs `protobuf:"bytes,12,opt,name=CurveParamsNgdotAdd,proto3,oneof"`
}
type NgoloFuzzOne_CurveParamsNgdotDouble struct {
CurveParamsNgdotDouble *CurveParamsNgdotDoubleArgs `protobuf:"bytes,13,opt,name=CurveParamsNgdotDouble,proto3,oneof"`
}
type NgoloFuzzOne_CurveParamsNgdotScalarMult struct {
CurveParamsNgdotScalarMult *CurveParamsNgdotScalarMultArgs `protobuf:"bytes,14,opt,name=CurveParamsNgdotScalarMult,proto3,oneof"`
}
type NgoloFuzzOne_CurveParamsNgdotScalarBaseMult struct {
CurveParamsNgdotScalarBaseMult *CurveParamsNgdotScalarBaseMultArgs `protobuf:"bytes,15,opt,name=CurveParamsNgdotScalarBaseMult,proto3,oneof"`
}
func (*NgoloFuzzOne_GenerateKey) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Marshal_) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MarshalCompressed) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Unmarshal_) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UnmarshalCompressed) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_P224) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_P256) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_P384) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_P521) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CurveParamsNgdotParams) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CurveParamsNgdotIsOnCurve) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CurveParamsNgdotAdd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CurveParamsNgdotDouble) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CurveParamsNgdotScalarMult) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CurveParamsNgdotScalarBaseMult) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"%\n" +
"\x0fGenerateKeyArgs\x12\x12\n" +
"\x04rand\x18\x01 \x01(\fR\x04rand\")\n" +
"\vMarshalArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\fR\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\fR\x01y\"3\n" +
"\x15MarshalCompressedArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\fR\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\fR\x01y\"#\n" +
"\rUnmarshalArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"-\n" +
"\x17UnmarshalCompressedArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\n" +
"\n" +
"\bP224Args\"\n" +
"\n" +
"\bP256Args\"\n" +
"\n" +
"\bP384Args\"\n" +
"\n" +
"\bP521Args\"\x1c\n" +
"\x1aCurveParamsNgdotParamsArgs\";\n" +
"\x1dCurveParamsNgdotIsOnCurveArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\fR\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\fR\x01y\"Y\n" +
"\x17CurveParamsNgdotAddArgs\x12\x0e\n" +
"\x02x1\x18\x01 \x01(\fR\x02x1\x12\x0e\n" +
"\x02y1\x18\x02 \x01(\fR\x02y1\x12\x0e\n" +
"\x02x2\x18\x03 \x01(\fR\x02x2\x12\x0e\n" +
"\x02y2\x18\x04 \x01(\fR\x02y2\"<\n" +
"\x1aCurveParamsNgdotDoubleArgs\x12\x0e\n" +
"\x02x1\x18\x01 \x01(\fR\x02x1\x12\x0e\n" +
"\x02y1\x18\x02 \x01(\fR\x02y1\"N\n" +
"\x1eCurveParamsNgdotScalarMultArgs\x12\x0e\n" +
"\x02Bx\x18\x01 \x01(\fR\x02Bx\x12\x0e\n" +
"\x02By\x18\x02 \x01(\fR\x02By\x12\f\n" +
"\x01k\x18\x03 \x01(\fR\x01k\"2\n" +
"\"CurveParamsNgdotScalarBaseMultArgs\x12\f\n" +
"\x01k\x18\x01 \x01(\fR\x01k\"\x84\t\n" +
"\fNgoloFuzzOne\x12>\n" +
"\vGenerateKey\x18\x01 \x01(\v2\x1a.ngolofuzz.GenerateKeyArgsH\x00R\vGenerateKey\x122\n" +
"\aMarshal\x18\x02 \x01(\v2\x16.ngolofuzz.MarshalArgsH\x00R\aMarshal\x12P\n" +
"\x11MarshalCompressed\x18\x03 \x01(\v2 .ngolofuzz.MarshalCompressedArgsH\x00R\x11MarshalCompressed\x128\n" +
"\tUnmarshal\x18\x04 \x01(\v2\x18.ngolofuzz.UnmarshalArgsH\x00R\tUnmarshal\x12V\n" +
"\x13UnmarshalCompressed\x18\x05 \x01(\v2\".ngolofuzz.UnmarshalCompressedArgsH\x00R\x13UnmarshalCompressed\x12)\n" +
"\x04P224\x18\x06 \x01(\v2\x13.ngolofuzz.P224ArgsH\x00R\x04P224\x12)\n" +
"\x04P256\x18\a \x01(\v2\x13.ngolofuzz.P256ArgsH\x00R\x04P256\x12)\n" +
"\x04P384\x18\b \x01(\v2\x13.ngolofuzz.P384ArgsH\x00R\x04P384\x12)\n" +
"\x04P521\x18\t \x01(\v2\x13.ngolofuzz.P521ArgsH\x00R\x04P521\x12_\n" +
"\x16CurveParamsNgdotParams\x18\n" +
" \x01(\v2%.ngolofuzz.CurveParamsNgdotParamsArgsH\x00R\x16CurveParamsNgdotParams\x12h\n" +
"\x19CurveParamsNgdotIsOnCurve\x18\v \x01(\v2(.ngolofuzz.CurveParamsNgdotIsOnCurveArgsH\x00R\x19CurveParamsNgdotIsOnCurve\x12V\n" +
"\x13CurveParamsNgdotAdd\x18\f \x01(\v2\".ngolofuzz.CurveParamsNgdotAddArgsH\x00R\x13CurveParamsNgdotAdd\x12_\n" +
"\x16CurveParamsNgdotDouble\x18\r \x01(\v2%.ngolofuzz.CurveParamsNgdotDoubleArgsH\x00R\x16CurveParamsNgdotDouble\x12k\n" +
"\x1aCurveParamsNgdotScalarMult\x18\x0e \x01(\v2).ngolofuzz.CurveParamsNgdotScalarMultArgsH\x00R\x1aCurveParamsNgdotScalarMult\x12w\n" +
"\x1eCurveParamsNgdotScalarBaseMult\x18\x0f \x01(\v2-.ngolofuzz.CurveParamsNgdotScalarBaseMultArgsH\x00R\x1eCurveParamsNgdotScalarBaseMultB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1cZ\x1a./;fuzz_ng_crypto_ellipticb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 18)
var file_ngolofuzz_proto_goTypes = []any{
(*GenerateKeyArgs)(nil), // 0: ngolofuzz.GenerateKeyArgs
(*MarshalArgs)(nil), // 1: ngolofuzz.MarshalArgs
(*MarshalCompressedArgs)(nil), // 2: ngolofuzz.MarshalCompressedArgs
(*UnmarshalArgs)(nil), // 3: ngolofuzz.UnmarshalArgs
(*UnmarshalCompressedArgs)(nil), // 4: ngolofuzz.UnmarshalCompressedArgs
(*P224Args)(nil), // 5: ngolofuzz.P224Args
(*P256Args)(nil), // 6: ngolofuzz.P256Args
(*P384Args)(nil), // 7: ngolofuzz.P384Args
(*P521Args)(nil), // 8: ngolofuzz.P521Args
(*CurveParamsNgdotParamsArgs)(nil), // 9: ngolofuzz.CurveParamsNgdotParamsArgs
(*CurveParamsNgdotIsOnCurveArgs)(nil), // 10: ngolofuzz.CurveParamsNgdotIsOnCurveArgs
(*CurveParamsNgdotAddArgs)(nil), // 11: ngolofuzz.CurveParamsNgdotAddArgs
(*CurveParamsNgdotDoubleArgs)(nil), // 12: ngolofuzz.CurveParamsNgdotDoubleArgs
(*CurveParamsNgdotScalarMultArgs)(nil), // 13: ngolofuzz.CurveParamsNgdotScalarMultArgs
(*CurveParamsNgdotScalarBaseMultArgs)(nil), // 14: ngolofuzz.CurveParamsNgdotScalarBaseMultArgs
(*NgoloFuzzOne)(nil), // 15: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 16: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 17: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.GenerateKey:type_name -> ngolofuzz.GenerateKeyArgs
1, // 1: ngolofuzz.NgoloFuzzOne.Marshal:type_name -> ngolofuzz.MarshalArgs
2, // 2: ngolofuzz.NgoloFuzzOne.MarshalCompressed:type_name -> ngolofuzz.MarshalCompressedArgs
3, // 3: ngolofuzz.NgoloFuzzOne.Unmarshal:type_name -> ngolofuzz.UnmarshalArgs
4, // 4: ngolofuzz.NgoloFuzzOne.UnmarshalCompressed:type_name -> ngolofuzz.UnmarshalCompressedArgs
5, // 5: ngolofuzz.NgoloFuzzOne.P224:type_name -> ngolofuzz.P224Args
6, // 6: ngolofuzz.NgoloFuzzOne.P256:type_name -> ngolofuzz.P256Args
7, // 7: ngolofuzz.NgoloFuzzOne.P384:type_name -> ngolofuzz.P384Args
8, // 8: ngolofuzz.NgoloFuzzOne.P521:type_name -> ngolofuzz.P521Args
9, // 9: ngolofuzz.NgoloFuzzOne.CurveParamsNgdotParams:type_name -> ngolofuzz.CurveParamsNgdotParamsArgs
10, // 10: ngolofuzz.NgoloFuzzOne.CurveParamsNgdotIsOnCurve:type_name -> ngolofuzz.CurveParamsNgdotIsOnCurveArgs
11, // 11: ngolofuzz.NgoloFuzzOne.CurveParamsNgdotAdd:type_name -> ngolofuzz.CurveParamsNgdotAddArgs
12, // 12: ngolofuzz.NgoloFuzzOne.CurveParamsNgdotDouble:type_name -> ngolofuzz.CurveParamsNgdotDoubleArgs
13, // 13: ngolofuzz.NgoloFuzzOne.CurveParamsNgdotScalarMult:type_name -> ngolofuzz.CurveParamsNgdotScalarMultArgs
14, // 14: ngolofuzz.NgoloFuzzOne.CurveParamsNgdotScalarBaseMult:type_name -> ngolofuzz.CurveParamsNgdotScalarBaseMultArgs
15, // 15: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
16, // [16:16] is the sub-list for method output_type
16, // [16:16] is the sub-list for method input_type
16, // [16:16] is the sub-list for extension type_name
16, // [16:16] is the sub-list for extension extendee
0, // [0:16] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[15].OneofWrappers = []any{
(*NgoloFuzzOne_GenerateKey)(nil),
(*NgoloFuzzOne_Marshal_)(nil),
(*NgoloFuzzOne_MarshalCompressed)(nil),
(*NgoloFuzzOne_Unmarshal_)(nil),
(*NgoloFuzzOne_UnmarshalCompressed)(nil),
(*NgoloFuzzOne_P224)(nil),
(*NgoloFuzzOne_P256)(nil),
(*NgoloFuzzOne_P384)(nil),
(*NgoloFuzzOne_P521)(nil),
(*NgoloFuzzOne_CurveParamsNgdotParams)(nil),
(*NgoloFuzzOne_CurveParamsNgdotIsOnCurve)(nil),
(*NgoloFuzzOne_CurveParamsNgdotAdd)(nil),
(*NgoloFuzzOne_CurveParamsNgdotDouble)(nil),
(*NgoloFuzzOne_CurveParamsNgdotScalarMult)(nil),
(*NgoloFuzzOne_CurveParamsNgdotScalarBaseMult)(nil),
}
file_ngolofuzz_proto_msgTypes[16].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 18,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_hmac
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/hmac"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Equal:
hmac.Equal(a.Equal.Mac1, a.Equal.Mac2)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Equal:
w.WriteString(fmt.Sprintf("hmac.Equal(%#+v, %#+v)\n", a.Equal.Mac1, a.Equal.Mac2))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_hmac
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type EqualArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Mac1 []byte `protobuf:"bytes,1,opt,name=mac1,proto3" json:"mac1,omitempty"`
Mac2 []byte `protobuf:"bytes,2,opt,name=mac2,proto3" json:"mac2,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EqualArgs) Reset() {
*x = EqualArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EqualArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EqualArgs) ProtoMessage() {}
func (x *EqualArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EqualArgs.ProtoReflect.Descriptor instead.
func (*EqualArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *EqualArgs) GetMac1() []byte {
if x != nil {
return x.Mac1
}
return nil
}
func (x *EqualArgs) GetMac2() []byte {
if x != nil {
return x.Mac2
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Equal
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetEqual() *EqualArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Equal); ok {
return x.Equal
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Equal struct {
Equal *EqualArgs `protobuf:"bytes,1,opt,name=Equal,proto3,oneof"`
}
func (*NgoloFuzzOne_Equal) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"3\n" +
"\tEqualArgs\x12\x12\n" +
"\x04mac1\x18\x01 \x01(\fR\x04mac1\x12\x12\n" +
"\x04mac2\x18\x02 \x01(\fR\x04mac2\"D\n" +
"\fNgoloFuzzOne\x12,\n" +
"\x05Equal\x18\x01 \x01(\v2\x14.ngolofuzz.EqualArgsH\x00R\x05EqualB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x18Z\x16./;fuzz_ng_crypto_hmacb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_ngolofuzz_proto_goTypes = []any{
(*EqualArgs)(nil), // 0: ngolofuzz.EqualArgs
(*NgoloFuzzOne)(nil), // 1: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 2: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 3: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Equal:type_name -> ngolofuzz.EqualArgs
1, // 1: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[1].OneofWrappers = []any{
(*NgoloFuzzOne_Equal)(nil),
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_md5
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/md5"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
md5.New()
case *NgoloFuzzOne_Sum:
md5.Sum(a.Sum.Data)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("md5.New()\n"))
case *NgoloFuzzOne_Sum:
w.WriteString(fmt.Sprintf("md5.Sum(%#+v)\n", a.Sum.Data))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_md5
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type SumArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SumArgs) Reset() {
*x = SumArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SumArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SumArgs) ProtoMessage() {}
func (x *SumArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SumArgs.ProtoReflect.Descriptor instead.
func (*SumArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *SumArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_New
// *NgoloFuzzOne_Sum
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetSum() *SumArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sum); ok {
return x.Sum
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,1,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_Sum struct {
Sum *SumArgs `protobuf:"bytes,2,opt,name=Sum,proto3,oneof"`
}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sum) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\t\n" +
"\aNewArgs\"\x1d\n" +
"\aSumArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"f\n" +
"\fNgoloFuzzOne\x12&\n" +
"\x03New\x18\x01 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x12&\n" +
"\x03Sum\x18\x02 \x01(\v2\x12.ngolofuzz.SumArgsH\x00R\x03SumB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x17Z\x15./;fuzz_ng_crypto_md5b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_ngolofuzz_proto_goTypes = []any{
(*NewArgs)(nil), // 0: ngolofuzz.NewArgs
(*SumArgs)(nil), // 1: ngolofuzz.SumArgs
(*NgoloFuzzOne)(nil), // 2: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 3: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 4: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
1, // 1: ngolofuzz.NgoloFuzzOne.Sum:type_name -> ngolofuzz.SumArgs
2, // 2: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_Sum)(nil),
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_rand
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/rand"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Read:
_, r1 := rand.Read(a.Read.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Text:
rand.Text()
case *NgoloFuzzOne_Prime:
arg0 := bytes.NewReader(a.Prime.R)
arg1 := int(a.Prime.Bits)
_, r1 := rand.Prime(arg0, arg1 % 0x10001)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Int:
arg0 := bytes.NewReader(a.Int.Rand)
arg1 := CreateBigInt(a.Int.Max)
_, r1 := rand.Int(arg0, arg1)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Read:
w.WriteString(fmt.Sprintf("rand.Read(%#+v)\n", a.Read.B))
case *NgoloFuzzOne_Text:
w.WriteString(fmt.Sprintf("rand.Text()\n"))
case *NgoloFuzzOne_Prime:
w.WriteString(fmt.Sprintf("rand.Prime(bytes.NewReader(%#+v), int(%#+v) %% 0x10001)\n", a.Prime.R, a.Prime.Bits))
case *NgoloFuzzOne_Int:
w.WriteString(fmt.Sprintf("rand.Int(bytes.NewReader(%#+v), CreateBigInt(%#+v))\n", a.Int.Rand, a.Int.Max))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_rand
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ReadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReadArgs) Reset() {
*x = ReadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReadArgs) ProtoMessage() {}
func (x *ReadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReadArgs.ProtoReflect.Descriptor instead.
func (*ReadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *ReadArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type TextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TextArgs) Reset() {
*x = TextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TextArgs) ProtoMessage() {}
func (x *TextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TextArgs.ProtoReflect.Descriptor instead.
func (*TextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type PrimeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
Bits int64 `protobuf:"varint,2,opt,name=bits,proto3" json:"bits,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrimeArgs) Reset() {
*x = PrimeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrimeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrimeArgs) ProtoMessage() {}
func (x *PrimeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrimeArgs.ProtoReflect.Descriptor instead.
func (*PrimeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *PrimeArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
func (x *PrimeArgs) GetBits() int64 {
if x != nil {
return x.Bits
}
return 0
}
type IntArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Rand []byte `protobuf:"bytes,1,opt,name=rand,proto3" json:"rand,omitempty"`
Max []byte `protobuf:"bytes,2,opt,name=max,proto3" json:"max,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntArgs) Reset() {
*x = IntArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntArgs) ProtoMessage() {}
func (x *IntArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntArgs.ProtoReflect.Descriptor instead.
func (*IntArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *IntArgs) GetRand() []byte {
if x != nil {
return x.Rand
}
return nil
}
func (x *IntArgs) GetMax() []byte {
if x != nil {
return x.Max
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Read
// *NgoloFuzzOne_Text
// *NgoloFuzzOne_Prime
// *NgoloFuzzOne_Int
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetRead() *ReadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Read); ok {
return x.Read
}
}
return nil
}
func (x *NgoloFuzzOne) GetText() *TextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Text); ok {
return x.Text
}
}
return nil
}
func (x *NgoloFuzzOne) GetPrime() *PrimeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Prime); ok {
return x.Prime
}
}
return nil
}
func (x *NgoloFuzzOne) GetInt() *IntArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Int); ok {
return x.Int
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Read struct {
Read *ReadArgs `protobuf:"bytes,1,opt,name=Read,proto3,oneof"`
}
type NgoloFuzzOne_Text struct {
Text *TextArgs `protobuf:"bytes,2,opt,name=Text,proto3,oneof"`
}
type NgoloFuzzOne_Prime struct {
Prime *PrimeArgs `protobuf:"bytes,3,opt,name=Prime,proto3,oneof"`
}
type NgoloFuzzOne_Int struct {
Int *IntArgs `protobuf:"bytes,4,opt,name=Int,proto3,oneof"`
}
func (*NgoloFuzzOne_Read) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Text) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Prime) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Int) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x18\n" +
"\bReadArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"\n" +
"\n" +
"\bTextArgs\"-\n" +
"\tPrimeArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\x12\x12\n" +
"\x04bits\x18\x02 \x01(\x03R\x04bits\"/\n" +
"\aIntArgs\x12\x12\n" +
"\x04rand\x18\x01 \x01(\fR\x04rand\x12\x10\n" +
"\x03max\x18\x02 \x01(\fR\x03max\"\xc2\x01\n" +
"\fNgoloFuzzOne\x12)\n" +
"\x04Read\x18\x01 \x01(\v2\x13.ngolofuzz.ReadArgsH\x00R\x04Read\x12)\n" +
"\x04Text\x18\x02 \x01(\v2\x13.ngolofuzz.TextArgsH\x00R\x04Text\x12,\n" +
"\x05Prime\x18\x03 \x01(\v2\x14.ngolofuzz.PrimeArgsH\x00R\x05Prime\x12&\n" +
"\x03Int\x18\x04 \x01(\v2\x12.ngolofuzz.IntArgsH\x00R\x03IntB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x18Z\x16./;fuzz_ng_crypto_randb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_ngolofuzz_proto_goTypes = []any{
(*ReadArgs)(nil), // 0: ngolofuzz.ReadArgs
(*TextArgs)(nil), // 1: ngolofuzz.TextArgs
(*PrimeArgs)(nil), // 2: ngolofuzz.PrimeArgs
(*IntArgs)(nil), // 3: ngolofuzz.IntArgs
(*NgoloFuzzOne)(nil), // 4: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 5: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 6: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Read:type_name -> ngolofuzz.ReadArgs
1, // 1: ngolofuzz.NgoloFuzzOne.Text:type_name -> ngolofuzz.TextArgs
2, // 2: ngolofuzz.NgoloFuzzOne.Prime:type_name -> ngolofuzz.PrimeArgs
3, // 3: ngolofuzz.NgoloFuzzOne.Int:type_name -> ngolofuzz.IntArgs
4, // 4: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
5, // [5:5] is the sub-list for method output_type
5, // [5:5] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[4].OneofWrappers = []any{
(*NgoloFuzzOne_Read)(nil),
(*NgoloFuzzOne_Text)(nil),
(*NgoloFuzzOne_Prime)(nil),
(*NgoloFuzzOne_Int)(nil),
}
file_ngolofuzz_proto_msgTypes[5].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_rc4
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/rc4"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var CipherResults []*rc4.Cipher
CipherResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewCipher:
r0, r1 := rc4.NewCipher(a.NewCipher.Key)
if r0 != nil{
CipherResults = append(CipherResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_CipherNgdotReset:
if len(CipherResults) == 0 {
continue
}
arg0 := CipherResults[CipherResultsIndex]
CipherResultsIndex = (CipherResultsIndex + 1) % len(CipherResults)
arg0.Reset()
case *NgoloFuzzOne_CipherNgdotXORKeyStream:
if len(CipherResults) == 0 {
continue
}
arg0 := CipherResults[CipherResultsIndex]
CipherResultsIndex = (CipherResultsIndex + 1) % len(CipherResults)
a.CipherNgdotXORKeyStream.Dst = make([]byte, 2*len(a.CipherNgdotXORKeyStream.Src))
arg0.XORKeyStream(a.CipherNgdotXORKeyStream.Dst, a.CipherNgdotXORKeyStream.Src)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
CipherNb := 0
CipherResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewCipher:
w.WriteString(fmt.Sprintf("Cipher%d, _ := rc4.NewCipher(%#+v)\n", CipherNb, a.NewCipher.Key))
CipherNb = CipherNb + 1
case *NgoloFuzzOne_CipherNgdotReset:
if CipherNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Cipher%d.Reset()\n", CipherResultsIndex))
CipherResultsIndex = (CipherResultsIndex + 1) % CipherNb
case *NgoloFuzzOne_CipherNgdotXORKeyStream:
if CipherNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Cipher%d.XORKeyStream(%#+v, %#+v)\n", CipherResultsIndex, a.CipherNgdotXORKeyStream.Dst, a.CipherNgdotXORKeyStream.Src))
CipherResultsIndex = (CipherResultsIndex + 1) % CipherNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_rc4
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewCipherArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewCipherArgs) Reset() {
*x = NewCipherArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewCipherArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewCipherArgs) ProtoMessage() {}
func (x *NewCipherArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewCipherArgs.ProtoReflect.Descriptor instead.
func (*NewCipherArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewCipherArgs) GetKey() []byte {
if x != nil {
return x.Key
}
return nil
}
type CipherNgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CipherNgdotResetArgs) Reset() {
*x = CipherNgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CipherNgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CipherNgdotResetArgs) ProtoMessage() {}
func (x *CipherNgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CipherNgdotResetArgs.ProtoReflect.Descriptor instead.
func (*CipherNgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type CipherNgdotXORKeyStreamArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CipherNgdotXORKeyStreamArgs) Reset() {
*x = CipherNgdotXORKeyStreamArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CipherNgdotXORKeyStreamArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CipherNgdotXORKeyStreamArgs) ProtoMessage() {}
func (x *CipherNgdotXORKeyStreamArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CipherNgdotXORKeyStreamArgs.ProtoReflect.Descriptor instead.
func (*CipherNgdotXORKeyStreamArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *CipherNgdotXORKeyStreamArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *CipherNgdotXORKeyStreamArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewCipher
// *NgoloFuzzOne_CipherNgdotReset
// *NgoloFuzzOne_CipherNgdotXORKeyStream
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewCipher() *NewCipherArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewCipher); ok {
return x.NewCipher
}
}
return nil
}
func (x *NgoloFuzzOne) GetCipherNgdotReset() *CipherNgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CipherNgdotReset); ok {
return x.CipherNgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetCipherNgdotXORKeyStream() *CipherNgdotXORKeyStreamArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CipherNgdotXORKeyStream); ok {
return x.CipherNgdotXORKeyStream
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewCipher struct {
NewCipher *NewCipherArgs `protobuf:"bytes,1,opt,name=NewCipher,proto3,oneof"`
}
type NgoloFuzzOne_CipherNgdotReset struct {
CipherNgdotReset *CipherNgdotResetArgs `protobuf:"bytes,2,opt,name=CipherNgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_CipherNgdotXORKeyStream struct {
CipherNgdotXORKeyStream *CipherNgdotXORKeyStreamArgs `protobuf:"bytes,3,opt,name=CipherNgdotXORKeyStream,proto3,oneof"`
}
func (*NgoloFuzzOne_NewCipher) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CipherNgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CipherNgdotXORKeyStream) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"!\n" +
"\rNewCipherArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\fR\x03key\"\x16\n" +
"\x14CipherNgdotResetArgs\"A\n" +
"\x1bCipherNgdotXORKeyStreamArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"\x83\x02\n" +
"\fNgoloFuzzOne\x128\n" +
"\tNewCipher\x18\x01 \x01(\v2\x18.ngolofuzz.NewCipherArgsH\x00R\tNewCipher\x12M\n" +
"\x10CipherNgdotReset\x18\x02 \x01(\v2\x1f.ngolofuzz.CipherNgdotResetArgsH\x00R\x10CipherNgdotReset\x12b\n" +
"\x17CipherNgdotXORKeyStream\x18\x03 \x01(\v2&.ngolofuzz.CipherNgdotXORKeyStreamArgsH\x00R\x17CipherNgdotXORKeyStreamB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x17Z\x15./;fuzz_ng_crypto_rc4b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_ngolofuzz_proto_goTypes = []any{
(*NewCipherArgs)(nil), // 0: ngolofuzz.NewCipherArgs
(*CipherNgdotResetArgs)(nil), // 1: ngolofuzz.CipherNgdotResetArgs
(*CipherNgdotXORKeyStreamArgs)(nil), // 2: ngolofuzz.CipherNgdotXORKeyStreamArgs
(*NgoloFuzzOne)(nil), // 3: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 4: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 5: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewCipher:type_name -> ngolofuzz.NewCipherArgs
1, // 1: ngolofuzz.NgoloFuzzOne.CipherNgdotReset:type_name -> ngolofuzz.CipherNgdotResetArgs
2, // 2: ngolofuzz.NgoloFuzzOne.CipherNgdotXORKeyStream:type_name -> ngolofuzz.CipherNgdotXORKeyStreamArgs
3, // 3: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzOne_NewCipher)(nil),
(*NgoloFuzzOne_CipherNgdotReset)(nil),
(*NgoloFuzzOne_CipherNgdotXORKeyStream)(nil),
}
file_ngolofuzz_proto_msgTypes[4].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_rsa
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/rsa"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func PublicKeyNewFromFuzz(p *PublicKeyStruct) *rsa.PublicKey{
if p == nil {
return nil
}
return &rsa.PublicKey{
N: CreateBigInt(p.N),
E: int(p.E),
}
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var PrivateKeyResults []*rsa.PrivateKey
PrivateKeyResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_DecryptPKCS1V15:
arg0 := bytes.NewReader(a.DecryptPKCS1V15.Random)
if len(PrivateKeyResults) == 0 {
continue
}
arg1 := PrivateKeyResults[PrivateKeyResultsIndex]
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % len(PrivateKeyResults)
_, r1 := rsa.DecryptPKCS1v15(arg0, arg1, a.DecryptPKCS1V15.Ciphertext)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DecryptPKCS1V15SessionKey:
arg0 := bytes.NewReader(a.DecryptPKCS1V15SessionKey.Random)
if len(PrivateKeyResults) == 0 {
continue
}
arg1 := PrivateKeyResults[PrivateKeyResultsIndex]
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % len(PrivateKeyResults)
r0 := rsa.DecryptPKCS1v15SessionKey(arg0, arg1, a.DecryptPKCS1V15SessionKey.Ciphertext, a.DecryptPKCS1V15SessionKey.Key)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_PublicKeyNgdotSize:
arg0 := PublicKeyNewFromFuzz(a.PublicKeyNgdotSize.Pub)
if arg0 == nil {
continue
}
arg0.Size()
case *NgoloFuzzOne_PrivateKeyNgdotPublic:
if len(PrivateKeyResults) == 0 {
continue
}
arg0 := PrivateKeyResults[PrivateKeyResultsIndex]
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % len(PrivateKeyResults)
arg0.Public()
case *NgoloFuzzOne_PrivateKeyNgdotValidate:
if len(PrivateKeyResults) == 0 {
continue
}
arg0 := PrivateKeyResults[PrivateKeyResultsIndex]
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % len(PrivateKeyResults)
r0 := arg0.Validate()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_GenerateKey:
arg0 := bytes.NewReader(a.GenerateKey.Random)
arg1 := int(a.GenerateKey.Bits)
_, r1 := rsa.GenerateKey(arg0, arg1 % 0x10001)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_GenerateMultiPrimeKey:
arg0 := bytes.NewReader(a.GenerateMultiPrimeKey.Random)
arg1 := int(a.GenerateMultiPrimeKey.Nprimes)
arg2 := int(a.GenerateMultiPrimeKey.Bits)
_, r1 := rsa.GenerateMultiPrimeKey(arg0, arg1 % 0x10001, arg2 % 0x10001)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_PrivateKeyNgdotPrecompute:
if len(PrivateKeyResults) == 0 {
continue
}
arg0 := PrivateKeyResults[PrivateKeyResultsIndex]
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % len(PrivateKeyResults)
arg0.Precompute()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
PrivateKeyNb := 0
PrivateKeyResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_DecryptPKCS1V15:
if PrivateKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("rsa.DecryptPKCS1v15(bytes.NewReader(%#+v), PrivateKey%d, %#+v)\n", a.DecryptPKCS1V15.Random, (PrivateKeyResultsIndex + 0) % PrivateKeyNb, a.DecryptPKCS1V15.Ciphertext))
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % PrivateKeyNb
case *NgoloFuzzOne_DecryptPKCS1V15SessionKey:
if PrivateKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("rsa.DecryptPKCS1v15SessionKey(bytes.NewReader(%#+v), PrivateKey%d, %#+v, %#+v)\n", a.DecryptPKCS1V15SessionKey.Random, (PrivateKeyResultsIndex + 0) % PrivateKeyNb, a.DecryptPKCS1V15SessionKey.Ciphertext, a.DecryptPKCS1V15SessionKey.Key))
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % PrivateKeyNb
case *NgoloFuzzOne_PublicKeyNgdotSize:
w.WriteString(fmt.Sprintf("PublicKeyNewFromFuzz(%#+v).Size()\n", a.PublicKeyNgdotSize.Pub))
case *NgoloFuzzOne_PrivateKeyNgdotPublic:
if PrivateKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PrivateKey%d.Public()\n", PrivateKeyResultsIndex))
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % PrivateKeyNb
case *NgoloFuzzOne_PrivateKeyNgdotValidate:
if PrivateKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PrivateKey%d.Validate()\n", PrivateKeyResultsIndex))
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % PrivateKeyNb
case *NgoloFuzzOne_GenerateKey:
w.WriteString(fmt.Sprintf("rsa.GenerateKey(bytes.NewReader(%#+v), int(%#+v) %% 0x10001)\n", a.GenerateKey.Random, a.GenerateKey.Bits))
case *NgoloFuzzOne_GenerateMultiPrimeKey:
w.WriteString(fmt.Sprintf("rsa.GenerateMultiPrimeKey(bytes.NewReader(%#+v), int(%#+v) %% 0x10001, int(%#+v) %% 0x10001)\n", a.GenerateMultiPrimeKey.Random, a.GenerateMultiPrimeKey.Nprimes, a.GenerateMultiPrimeKey.Bits))
case *NgoloFuzzOne_PrivateKeyNgdotPrecompute:
if PrivateKeyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PrivateKey%d.Precompute()\n", PrivateKeyResultsIndex))
PrivateKeyResultsIndex = (PrivateKeyResultsIndex + 1) % PrivateKeyNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_rsa
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type PublicKeyStruct struct {
state protoimpl.MessageState `protogen:"open.v1"`
N []byte `protobuf:"bytes,1,opt,name=N,proto3" json:"N,omitempty"`
E int64 `protobuf:"varint,2,opt,name=E,proto3" json:"E,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PublicKeyStruct) Reset() {
*x = PublicKeyStruct{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PublicKeyStruct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PublicKeyStruct) ProtoMessage() {}
func (x *PublicKeyStruct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PublicKeyStruct.ProtoReflect.Descriptor instead.
func (*PublicKeyStruct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *PublicKeyStruct) GetN() []byte {
if x != nil {
return x.N
}
return nil
}
func (x *PublicKeyStruct) GetE() int64 {
if x != nil {
return x.E
}
return 0
}
type DecryptPKCS1V15Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Random []byte `protobuf:"bytes,1,opt,name=random,proto3" json:"random,omitempty"`
Ciphertext []byte `protobuf:"bytes,2,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecryptPKCS1V15Args) Reset() {
*x = DecryptPKCS1V15Args{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecryptPKCS1V15Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecryptPKCS1V15Args) ProtoMessage() {}
func (x *DecryptPKCS1V15Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecryptPKCS1V15Args.ProtoReflect.Descriptor instead.
func (*DecryptPKCS1V15Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *DecryptPKCS1V15Args) GetRandom() []byte {
if x != nil {
return x.Random
}
return nil
}
func (x *DecryptPKCS1V15Args) GetCiphertext() []byte {
if x != nil {
return x.Ciphertext
}
return nil
}
type DecryptPKCS1V15SessionKeyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Random []byte `protobuf:"bytes,1,opt,name=random,proto3" json:"random,omitempty"`
Ciphertext []byte `protobuf:"bytes,2,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"`
Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecryptPKCS1V15SessionKeyArgs) Reset() {
*x = DecryptPKCS1V15SessionKeyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecryptPKCS1V15SessionKeyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecryptPKCS1V15SessionKeyArgs) ProtoMessage() {}
func (x *DecryptPKCS1V15SessionKeyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecryptPKCS1V15SessionKeyArgs.ProtoReflect.Descriptor instead.
func (*DecryptPKCS1V15SessionKeyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *DecryptPKCS1V15SessionKeyArgs) GetRandom() []byte {
if x != nil {
return x.Random
}
return nil
}
func (x *DecryptPKCS1V15SessionKeyArgs) GetCiphertext() []byte {
if x != nil {
return x.Ciphertext
}
return nil
}
func (x *DecryptPKCS1V15SessionKeyArgs) GetKey() []byte {
if x != nil {
return x.Key
}
return nil
}
type PublicKeyNgdotSizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Pub *PublicKeyStruct `protobuf:"bytes,1,opt,name=pub,proto3" json:"pub,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PublicKeyNgdotSizeArgs) Reset() {
*x = PublicKeyNgdotSizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PublicKeyNgdotSizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PublicKeyNgdotSizeArgs) ProtoMessage() {}
func (x *PublicKeyNgdotSizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PublicKeyNgdotSizeArgs.ProtoReflect.Descriptor instead.
func (*PublicKeyNgdotSizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *PublicKeyNgdotSizeArgs) GetPub() *PublicKeyStruct {
if x != nil {
return x.Pub
}
return nil
}
type PrivateKeyNgdotPublicArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrivateKeyNgdotPublicArgs) Reset() {
*x = PrivateKeyNgdotPublicArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrivateKeyNgdotPublicArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrivateKeyNgdotPublicArgs) ProtoMessage() {}
func (x *PrivateKeyNgdotPublicArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrivateKeyNgdotPublicArgs.ProtoReflect.Descriptor instead.
func (*PrivateKeyNgdotPublicArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type PrivateKeyNgdotValidateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrivateKeyNgdotValidateArgs) Reset() {
*x = PrivateKeyNgdotValidateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrivateKeyNgdotValidateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrivateKeyNgdotValidateArgs) ProtoMessage() {}
func (x *PrivateKeyNgdotValidateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrivateKeyNgdotValidateArgs.ProtoReflect.Descriptor instead.
func (*PrivateKeyNgdotValidateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type GenerateKeyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Random []byte `protobuf:"bytes,1,opt,name=random,proto3" json:"random,omitempty"`
Bits int64 `protobuf:"varint,2,opt,name=bits,proto3" json:"bits,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GenerateKeyArgs) Reset() {
*x = GenerateKeyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GenerateKeyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GenerateKeyArgs) ProtoMessage() {}
func (x *GenerateKeyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GenerateKeyArgs.ProtoReflect.Descriptor instead.
func (*GenerateKeyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *GenerateKeyArgs) GetRandom() []byte {
if x != nil {
return x.Random
}
return nil
}
func (x *GenerateKeyArgs) GetBits() int64 {
if x != nil {
return x.Bits
}
return 0
}
type GenerateMultiPrimeKeyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Random []byte `protobuf:"bytes,1,opt,name=random,proto3" json:"random,omitempty"`
Nprimes int64 `protobuf:"varint,2,opt,name=nprimes,proto3" json:"nprimes,omitempty"`
Bits int64 `protobuf:"varint,3,opt,name=bits,proto3" json:"bits,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GenerateMultiPrimeKeyArgs) Reset() {
*x = GenerateMultiPrimeKeyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GenerateMultiPrimeKeyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GenerateMultiPrimeKeyArgs) ProtoMessage() {}
func (x *GenerateMultiPrimeKeyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GenerateMultiPrimeKeyArgs.ProtoReflect.Descriptor instead.
func (*GenerateMultiPrimeKeyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *GenerateMultiPrimeKeyArgs) GetRandom() []byte {
if x != nil {
return x.Random
}
return nil
}
func (x *GenerateMultiPrimeKeyArgs) GetNprimes() int64 {
if x != nil {
return x.Nprimes
}
return 0
}
func (x *GenerateMultiPrimeKeyArgs) GetBits() int64 {
if x != nil {
return x.Bits
}
return 0
}
type PrivateKeyNgdotPrecomputeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrivateKeyNgdotPrecomputeArgs) Reset() {
*x = PrivateKeyNgdotPrecomputeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrivateKeyNgdotPrecomputeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrivateKeyNgdotPrecomputeArgs) ProtoMessage() {}
func (x *PrivateKeyNgdotPrecomputeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrivateKeyNgdotPrecomputeArgs.ProtoReflect.Descriptor instead.
func (*PrivateKeyNgdotPrecomputeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_DecryptPKCS1V15
// *NgoloFuzzOne_DecryptPKCS1V15SessionKey
// *NgoloFuzzOne_PublicKeyNgdotSize
// *NgoloFuzzOne_PrivateKeyNgdotPublic
// *NgoloFuzzOne_PrivateKeyNgdotValidate
// *NgoloFuzzOne_GenerateKey
// *NgoloFuzzOne_GenerateMultiPrimeKey
// *NgoloFuzzOne_PrivateKeyNgdotPrecompute
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetDecryptPKCS1V15() *DecryptPKCS1V15Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecryptPKCS1V15); ok {
return x.DecryptPKCS1V15
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecryptPKCS1V15SessionKey() *DecryptPKCS1V15SessionKeyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecryptPKCS1V15SessionKey); ok {
return x.DecryptPKCS1V15SessionKey
}
}
return nil
}
func (x *NgoloFuzzOne) GetPublicKeyNgdotSize() *PublicKeyNgdotSizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PublicKeyNgdotSize); ok {
return x.PublicKeyNgdotSize
}
}
return nil
}
func (x *NgoloFuzzOne) GetPrivateKeyNgdotPublic() *PrivateKeyNgdotPublicArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PrivateKeyNgdotPublic); ok {
return x.PrivateKeyNgdotPublic
}
}
return nil
}
func (x *NgoloFuzzOne) GetPrivateKeyNgdotValidate() *PrivateKeyNgdotValidateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PrivateKeyNgdotValidate); ok {
return x.PrivateKeyNgdotValidate
}
}
return nil
}
func (x *NgoloFuzzOne) GetGenerateKey() *GenerateKeyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GenerateKey); ok {
return x.GenerateKey
}
}
return nil
}
func (x *NgoloFuzzOne) GetGenerateMultiPrimeKey() *GenerateMultiPrimeKeyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GenerateMultiPrimeKey); ok {
return x.GenerateMultiPrimeKey
}
}
return nil
}
func (x *NgoloFuzzOne) GetPrivateKeyNgdotPrecompute() *PrivateKeyNgdotPrecomputeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PrivateKeyNgdotPrecompute); ok {
return x.PrivateKeyNgdotPrecompute
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_DecryptPKCS1V15 struct {
DecryptPKCS1V15 *DecryptPKCS1V15Args `protobuf:"bytes,1,opt,name=DecryptPKCS1v15,proto3,oneof"`
}
type NgoloFuzzOne_DecryptPKCS1V15SessionKey struct {
DecryptPKCS1V15SessionKey *DecryptPKCS1V15SessionKeyArgs `protobuf:"bytes,2,opt,name=DecryptPKCS1v15SessionKey,proto3,oneof"`
}
type NgoloFuzzOne_PublicKeyNgdotSize struct {
PublicKeyNgdotSize *PublicKeyNgdotSizeArgs `protobuf:"bytes,3,opt,name=PublicKeyNgdotSize,proto3,oneof"`
}
type NgoloFuzzOne_PrivateKeyNgdotPublic struct {
PrivateKeyNgdotPublic *PrivateKeyNgdotPublicArgs `protobuf:"bytes,4,opt,name=PrivateKeyNgdotPublic,proto3,oneof"`
}
type NgoloFuzzOne_PrivateKeyNgdotValidate struct {
PrivateKeyNgdotValidate *PrivateKeyNgdotValidateArgs `protobuf:"bytes,5,opt,name=PrivateKeyNgdotValidate,proto3,oneof"`
}
type NgoloFuzzOne_GenerateKey struct {
GenerateKey *GenerateKeyArgs `protobuf:"bytes,6,opt,name=GenerateKey,proto3,oneof"`
}
type NgoloFuzzOne_GenerateMultiPrimeKey struct {
GenerateMultiPrimeKey *GenerateMultiPrimeKeyArgs `protobuf:"bytes,7,opt,name=GenerateMultiPrimeKey,proto3,oneof"`
}
type NgoloFuzzOne_PrivateKeyNgdotPrecompute struct {
PrivateKeyNgdotPrecompute *PrivateKeyNgdotPrecomputeArgs `protobuf:"bytes,8,opt,name=PrivateKeyNgdotPrecompute,proto3,oneof"`
}
func (*NgoloFuzzOne_DecryptPKCS1V15) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecryptPKCS1V15SessionKey) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PublicKeyNgdotSize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PrivateKeyNgdotPublic) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PrivateKeyNgdotValidate) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GenerateKey) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GenerateMultiPrimeKey) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PrivateKeyNgdotPrecompute) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"-\n" +
"\x0fPublicKeyStruct\x12\f\n" +
"\x01N\x18\x01 \x01(\fR\x01N\x12\f\n" +
"\x01E\x18\x02 \x01(\x03R\x01E\"M\n" +
"\x13DecryptPKCS1v15Args\x12\x16\n" +
"\x06random\x18\x01 \x01(\fR\x06random\x12\x1e\n" +
"\n" +
"ciphertext\x18\x02 \x01(\fR\n" +
"ciphertext\"i\n" +
"\x1dDecryptPKCS1v15SessionKeyArgs\x12\x16\n" +
"\x06random\x18\x01 \x01(\fR\x06random\x12\x1e\n" +
"\n" +
"ciphertext\x18\x02 \x01(\fR\n" +
"ciphertext\x12\x10\n" +
"\x03key\x18\x03 \x01(\fR\x03key\"F\n" +
"\x16PublicKeyNgdotSizeArgs\x12,\n" +
"\x03pub\x18\x01 \x01(\v2\x1a.ngolofuzz.PublicKeyStructR\x03pub\"\x1b\n" +
"\x19PrivateKeyNgdotPublicArgs\"\x1d\n" +
"\x1bPrivateKeyNgdotValidateArgs\"=\n" +
"\x0fGenerateKeyArgs\x12\x16\n" +
"\x06random\x18\x01 \x01(\fR\x06random\x12\x12\n" +
"\x04bits\x18\x02 \x01(\x03R\x04bits\"a\n" +
"\x19GenerateMultiPrimeKeyArgs\x12\x16\n" +
"\x06random\x18\x01 \x01(\fR\x06random\x12\x18\n" +
"\anprimes\x18\x02 \x01(\x03R\anprimes\x12\x12\n" +
"\x04bits\x18\x03 \x01(\x03R\x04bits\"\x1f\n" +
"\x1dPrivateKeyNgdotPrecomputeArgs\"\xeb\x05\n" +
"\fNgoloFuzzOne\x12J\n" +
"\x0fDecryptPKCS1v15\x18\x01 \x01(\v2\x1e.ngolofuzz.DecryptPKCS1v15ArgsH\x00R\x0fDecryptPKCS1v15\x12h\n" +
"\x19DecryptPKCS1v15SessionKey\x18\x02 \x01(\v2(.ngolofuzz.DecryptPKCS1v15SessionKeyArgsH\x00R\x19DecryptPKCS1v15SessionKey\x12S\n" +
"\x12PublicKeyNgdotSize\x18\x03 \x01(\v2!.ngolofuzz.PublicKeyNgdotSizeArgsH\x00R\x12PublicKeyNgdotSize\x12\\\n" +
"\x15PrivateKeyNgdotPublic\x18\x04 \x01(\v2$.ngolofuzz.PrivateKeyNgdotPublicArgsH\x00R\x15PrivateKeyNgdotPublic\x12b\n" +
"\x17PrivateKeyNgdotValidate\x18\x05 \x01(\v2&.ngolofuzz.PrivateKeyNgdotValidateArgsH\x00R\x17PrivateKeyNgdotValidate\x12>\n" +
"\vGenerateKey\x18\x06 \x01(\v2\x1a.ngolofuzz.GenerateKeyArgsH\x00R\vGenerateKey\x12\\\n" +
"\x15GenerateMultiPrimeKey\x18\a \x01(\v2$.ngolofuzz.GenerateMultiPrimeKeyArgsH\x00R\x15GenerateMultiPrimeKey\x12h\n" +
"\x19PrivateKeyNgdotPrecompute\x18\b \x01(\v2(.ngolofuzz.PrivateKeyNgdotPrecomputeArgsH\x00R\x19PrivateKeyNgdotPrecomputeB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x17Z\x15./;fuzz_ng_crypto_rsab\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
var file_ngolofuzz_proto_goTypes = []any{
(*PublicKeyStruct)(nil), // 0: ngolofuzz.PublicKeyStruct
(*DecryptPKCS1V15Args)(nil), // 1: ngolofuzz.DecryptPKCS1v15Args
(*DecryptPKCS1V15SessionKeyArgs)(nil), // 2: ngolofuzz.DecryptPKCS1v15SessionKeyArgs
(*PublicKeyNgdotSizeArgs)(nil), // 3: ngolofuzz.PublicKeyNgdotSizeArgs
(*PrivateKeyNgdotPublicArgs)(nil), // 4: ngolofuzz.PrivateKeyNgdotPublicArgs
(*PrivateKeyNgdotValidateArgs)(nil), // 5: ngolofuzz.PrivateKeyNgdotValidateArgs
(*GenerateKeyArgs)(nil), // 6: ngolofuzz.GenerateKeyArgs
(*GenerateMultiPrimeKeyArgs)(nil), // 7: ngolofuzz.GenerateMultiPrimeKeyArgs
(*PrivateKeyNgdotPrecomputeArgs)(nil), // 8: ngolofuzz.PrivateKeyNgdotPrecomputeArgs
(*NgoloFuzzOne)(nil), // 9: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 10: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 11: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.PublicKeyNgdotSizeArgs.pub:type_name -> ngolofuzz.PublicKeyStruct
1, // 1: ngolofuzz.NgoloFuzzOne.DecryptPKCS1v15:type_name -> ngolofuzz.DecryptPKCS1v15Args
2, // 2: ngolofuzz.NgoloFuzzOne.DecryptPKCS1v15SessionKey:type_name -> ngolofuzz.DecryptPKCS1v15SessionKeyArgs
3, // 3: ngolofuzz.NgoloFuzzOne.PublicKeyNgdotSize:type_name -> ngolofuzz.PublicKeyNgdotSizeArgs
4, // 4: ngolofuzz.NgoloFuzzOne.PrivateKeyNgdotPublic:type_name -> ngolofuzz.PrivateKeyNgdotPublicArgs
5, // 5: ngolofuzz.NgoloFuzzOne.PrivateKeyNgdotValidate:type_name -> ngolofuzz.PrivateKeyNgdotValidateArgs
6, // 6: ngolofuzz.NgoloFuzzOne.GenerateKey:type_name -> ngolofuzz.GenerateKeyArgs
7, // 7: ngolofuzz.NgoloFuzzOne.GenerateMultiPrimeKey:type_name -> ngolofuzz.GenerateMultiPrimeKeyArgs
8, // 8: ngolofuzz.NgoloFuzzOne.PrivateKeyNgdotPrecompute:type_name -> ngolofuzz.PrivateKeyNgdotPrecomputeArgs
9, // 9: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
10, // [10:10] is the sub-list for method output_type
10, // [10:10] is the sub-list for method input_type
10, // [10:10] is the sub-list for extension type_name
10, // [10:10] is the sub-list for extension extendee
0, // [0:10] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[9].OneofWrappers = []any{
(*NgoloFuzzOne_DecryptPKCS1V15)(nil),
(*NgoloFuzzOne_DecryptPKCS1V15SessionKey)(nil),
(*NgoloFuzzOne_PublicKeyNgdotSize)(nil),
(*NgoloFuzzOne_PrivateKeyNgdotPublic)(nil),
(*NgoloFuzzOne_PrivateKeyNgdotValidate)(nil),
(*NgoloFuzzOne_GenerateKey)(nil),
(*NgoloFuzzOne_GenerateMultiPrimeKey)(nil),
(*NgoloFuzzOne_PrivateKeyNgdotPrecompute)(nil),
}
file_ngolofuzz_proto_msgTypes[10].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 12,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_sha1
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/sha1"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
sha1.New()
case *NgoloFuzzOne_Sum:
sha1.Sum(a.Sum.Data)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("sha1.New()\n"))
case *NgoloFuzzOne_Sum:
w.WriteString(fmt.Sprintf("sha1.Sum(%#+v)\n", a.Sum.Data))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_sha1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type SumArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SumArgs) Reset() {
*x = SumArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SumArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SumArgs) ProtoMessage() {}
func (x *SumArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SumArgs.ProtoReflect.Descriptor instead.
func (*SumArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *SumArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_New
// *NgoloFuzzOne_Sum
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetSum() *SumArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sum); ok {
return x.Sum
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,1,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_Sum struct {
Sum *SumArgs `protobuf:"bytes,2,opt,name=Sum,proto3,oneof"`
}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sum) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\t\n" +
"\aNewArgs\"\x1d\n" +
"\aSumArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"f\n" +
"\fNgoloFuzzOne\x12&\n" +
"\x03New\x18\x01 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x12&\n" +
"\x03Sum\x18\x02 \x01(\v2\x12.ngolofuzz.SumArgsH\x00R\x03SumB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x18Z\x16./;fuzz_ng_crypto_sha1b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_ngolofuzz_proto_goTypes = []any{
(*NewArgs)(nil), // 0: ngolofuzz.NewArgs
(*SumArgs)(nil), // 1: ngolofuzz.SumArgs
(*NgoloFuzzOne)(nil), // 2: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 3: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 4: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
1, // 1: ngolofuzz.NgoloFuzzOne.Sum:type_name -> ngolofuzz.SumArgs
2, // 2: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_Sum)(nil),
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_sha256
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/sha256"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
sha256.New()
case *NgoloFuzzOne_New224:
sha256.New224()
case *NgoloFuzzOne_Sum256:
sha256.Sum256(a.Sum256.Data)
case *NgoloFuzzOne_Sum224:
sha256.Sum224(a.Sum224.Data)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("sha256.New()\n"))
case *NgoloFuzzOne_New224:
w.WriteString(fmt.Sprintf("sha256.New224()\n"))
case *NgoloFuzzOne_Sum256:
w.WriteString(fmt.Sprintf("sha256.Sum256(%#+v)\n", a.Sum256.Data))
case *NgoloFuzzOne_Sum224:
w.WriteString(fmt.Sprintf("sha256.Sum224(%#+v)\n", a.Sum224.Data))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_sha256
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type New224Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *New224Args) Reset() {
*x = New224Args{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *New224Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*New224Args) ProtoMessage() {}
func (x *New224Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use New224Args.ProtoReflect.Descriptor instead.
func (*New224Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type Sum256Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Sum256Args) Reset() {
*x = Sum256Args{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Sum256Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Sum256Args) ProtoMessage() {}
func (x *Sum256Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Sum256Args.ProtoReflect.Descriptor instead.
func (*Sum256Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *Sum256Args) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type Sum224Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Sum224Args) Reset() {
*x = Sum224Args{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Sum224Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Sum224Args) ProtoMessage() {}
func (x *Sum224Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Sum224Args.ProtoReflect.Descriptor instead.
func (*Sum224Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *Sum224Args) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_New
// *NgoloFuzzOne_New224
// *NgoloFuzzOne_Sum256
// *NgoloFuzzOne_Sum224
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew224() *New224Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New224); ok {
return x.New224
}
}
return nil
}
func (x *NgoloFuzzOne) GetSum256() *Sum256Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sum256); ok {
return x.Sum256
}
}
return nil
}
func (x *NgoloFuzzOne) GetSum224() *Sum224Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sum224); ok {
return x.Sum224
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,1,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_New224 struct {
New224 *New224Args `protobuf:"bytes,2,opt,name=New224,proto3,oneof"`
}
type NgoloFuzzOne_Sum256 struct {
Sum256 *Sum256Args `protobuf:"bytes,3,opt,name=Sum256,proto3,oneof"`
}
type NgoloFuzzOne_Sum224 struct {
Sum224 *Sum224Args `protobuf:"bytes,4,opt,name=Sum224,proto3,oneof"`
}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New224) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sum256) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sum224) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\t\n" +
"\aNewArgs\"\f\n" +
"\n" +
"New224Args\" \n" +
"\n" +
"Sum256Args\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\" \n" +
"\n" +
"Sum224Args\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\xd1\x01\n" +
"\fNgoloFuzzOne\x12&\n" +
"\x03New\x18\x01 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x12/\n" +
"\x06New224\x18\x02 \x01(\v2\x15.ngolofuzz.New224ArgsH\x00R\x06New224\x12/\n" +
"\x06Sum256\x18\x03 \x01(\v2\x15.ngolofuzz.Sum256ArgsH\x00R\x06Sum256\x12/\n" +
"\x06Sum224\x18\x04 \x01(\v2\x15.ngolofuzz.Sum224ArgsH\x00R\x06Sum224B\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_crypto_sha256b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_ngolofuzz_proto_goTypes = []any{
(*NewArgs)(nil), // 0: ngolofuzz.NewArgs
(*New224Args)(nil), // 1: ngolofuzz.New224Args
(*Sum256Args)(nil), // 2: ngolofuzz.Sum256Args
(*Sum224Args)(nil), // 3: ngolofuzz.Sum224Args
(*NgoloFuzzOne)(nil), // 4: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 5: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 6: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
1, // 1: ngolofuzz.NgoloFuzzOne.New224:type_name -> ngolofuzz.New224Args
2, // 2: ngolofuzz.NgoloFuzzOne.Sum256:type_name -> ngolofuzz.Sum256Args
3, // 3: ngolofuzz.NgoloFuzzOne.Sum224:type_name -> ngolofuzz.Sum224Args
4, // 4: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
5, // [5:5] is the sub-list for method output_type
5, // [5:5] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[4].OneofWrappers = []any{
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_New224)(nil),
(*NgoloFuzzOne_Sum256)(nil),
(*NgoloFuzzOne_Sum224)(nil),
}
file_ngolofuzz_proto_msgTypes[5].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_sha3
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/sha3"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var SHA3Results []*sha3.SHA3
SHA3ResultsIndex := 0
var SHAKEResults []*sha3.SHAKE
SHAKEResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Sum224:
sha3.Sum224(a.Sum224.Data)
case *NgoloFuzzOne_Sum256:
sha3.Sum256(a.Sum256.Data)
case *NgoloFuzzOne_Sum384:
sha3.Sum384(a.Sum384.Data)
case *NgoloFuzzOne_Sum512:
sha3.Sum512(a.Sum512.Data)
case *NgoloFuzzOne_SumSHAKE128:
arg1 := int(a.SumSHAKE128.Length)
sha3.SumSHAKE128(a.SumSHAKE128.Data, arg1 % 0x10001)
case *NgoloFuzzOne_SumSHAKE256:
arg1 := int(a.SumSHAKE256.Length)
sha3.SumSHAKE256(a.SumSHAKE256.Data, arg1 % 0x10001)
case *NgoloFuzzOne_New224:
r0 := sha3.New224()
if r0 != nil{
SHA3Results = append(SHA3Results, r0)
}
case *NgoloFuzzOne_New256:
r0 := sha3.New256()
if r0 != nil{
SHA3Results = append(SHA3Results, r0)
}
case *NgoloFuzzOne_New384:
r0 := sha3.New384()
if r0 != nil{
SHA3Results = append(SHA3Results, r0)
}
case *NgoloFuzzOne_New512:
r0 := sha3.New512()
if r0 != nil{
SHA3Results = append(SHA3Results, r0)
}
case *NgoloFuzzOne_SHA3NgdotWrite:
if len(SHA3Results) == 0 {
continue
}
arg0 := SHA3Results[SHA3ResultsIndex]
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % len(SHA3Results)
_, r1 := arg0.Write(a.SHA3NgdotWrite.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SHA3NgdotSum:
if len(SHA3Results) == 0 {
continue
}
arg0 := SHA3Results[SHA3ResultsIndex]
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % len(SHA3Results)
arg0.Sum(a.SHA3NgdotSum.B)
case *NgoloFuzzOne_SHA3NgdotReset:
if len(SHA3Results) == 0 {
continue
}
arg0 := SHA3Results[SHA3ResultsIndex]
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % len(SHA3Results)
arg0.Reset()
case *NgoloFuzzOne_SHA3NgdotSize:
if len(SHA3Results) == 0 {
continue
}
arg0 := SHA3Results[SHA3ResultsIndex]
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % len(SHA3Results)
arg0.Size()
case *NgoloFuzzOne_SHA3NgdotBlockSize:
if len(SHA3Results) == 0 {
continue
}
arg0 := SHA3Results[SHA3ResultsIndex]
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % len(SHA3Results)
arg0.BlockSize()
case *NgoloFuzzOne_SHA3NgdotMarshalBinary:
if len(SHA3Results) == 0 {
continue
}
arg0 := SHA3Results[SHA3ResultsIndex]
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % len(SHA3Results)
_, r1 := arg0.MarshalBinary()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SHA3NgdotAppendBinary:
if len(SHA3Results) == 0 {
continue
}
arg0 := SHA3Results[SHA3ResultsIndex]
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % len(SHA3Results)
_, r1 := arg0.AppendBinary(a.SHA3NgdotAppendBinary.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SHA3NgdotUnmarshalBinary:
if len(SHA3Results) == 0 {
continue
}
arg0 := SHA3Results[SHA3ResultsIndex]
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % len(SHA3Results)
r0 := arg0.UnmarshalBinary(a.SHA3NgdotUnmarshalBinary.Data)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_SHA3NgdotClone:
if len(SHA3Results) == 0 {
continue
}
arg0 := SHA3Results[SHA3ResultsIndex]
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % len(SHA3Results)
_, r1 := arg0.Clone()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewSHAKE128:
r0 := sha3.NewSHAKE128()
if r0 != nil{
SHAKEResults = append(SHAKEResults, r0)
}
case *NgoloFuzzOne_NewSHAKE256:
r0 := sha3.NewSHAKE256()
if r0 != nil{
SHAKEResults = append(SHAKEResults, r0)
}
case *NgoloFuzzOne_NewCSHAKE128:
r0 := sha3.NewCSHAKE128(a.NewCSHAKE128.N, a.NewCSHAKE128.S)
if r0 != nil{
SHAKEResults = append(SHAKEResults, r0)
}
case *NgoloFuzzOne_NewCSHAKE256:
r0 := sha3.NewCSHAKE256(a.NewCSHAKE256.N, a.NewCSHAKE256.S)
if r0 != nil{
SHAKEResults = append(SHAKEResults, r0)
}
case *NgoloFuzzOne_SHAKENgdotWrite:
if len(SHAKEResults) == 0 {
continue
}
arg0 := SHAKEResults[SHAKEResultsIndex]
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % len(SHAKEResults)
_, r1 := arg0.Write(a.SHAKENgdotWrite.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SHAKENgdotRead:
if len(SHAKEResults) == 0 {
continue
}
arg0 := SHAKEResults[SHAKEResultsIndex]
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % len(SHAKEResults)
_, r1 := arg0.Read(a.SHAKENgdotRead.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SHAKENgdotReset:
if len(SHAKEResults) == 0 {
continue
}
arg0 := SHAKEResults[SHAKEResultsIndex]
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % len(SHAKEResults)
arg0.Reset()
case *NgoloFuzzOne_SHAKENgdotBlockSize:
if len(SHAKEResults) == 0 {
continue
}
arg0 := SHAKEResults[SHAKEResultsIndex]
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % len(SHAKEResults)
arg0.BlockSize()
case *NgoloFuzzOne_SHAKENgdotMarshalBinary:
if len(SHAKEResults) == 0 {
continue
}
arg0 := SHAKEResults[SHAKEResultsIndex]
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % len(SHAKEResults)
_, r1 := arg0.MarshalBinary()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SHAKENgdotAppendBinary:
if len(SHAKEResults) == 0 {
continue
}
arg0 := SHAKEResults[SHAKEResultsIndex]
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % len(SHAKEResults)
_, r1 := arg0.AppendBinary(a.SHAKENgdotAppendBinary.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SHAKENgdotUnmarshalBinary:
if len(SHAKEResults) == 0 {
continue
}
arg0 := SHAKEResults[SHAKEResultsIndex]
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % len(SHAKEResults)
r0 := arg0.UnmarshalBinary(a.SHAKENgdotUnmarshalBinary.Data)
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
SHA3Nb := 0
SHA3ResultsIndex := 0
SHAKENb := 0
SHAKEResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Sum224:
w.WriteString(fmt.Sprintf("sha3.Sum224(%#+v)\n", a.Sum224.Data))
case *NgoloFuzzOne_Sum256:
w.WriteString(fmt.Sprintf("sha3.Sum256(%#+v)\n", a.Sum256.Data))
case *NgoloFuzzOne_Sum384:
w.WriteString(fmt.Sprintf("sha3.Sum384(%#+v)\n", a.Sum384.Data))
case *NgoloFuzzOne_Sum512:
w.WriteString(fmt.Sprintf("sha3.Sum512(%#+v)\n", a.Sum512.Data))
case *NgoloFuzzOne_SumSHAKE128:
w.WriteString(fmt.Sprintf("sha3.SumSHAKE128(%#+v, int(%#+v) %% 0x10001)\n", a.SumSHAKE128.Data, a.SumSHAKE128.Length))
case *NgoloFuzzOne_SumSHAKE256:
w.WriteString(fmt.Sprintf("sha3.SumSHAKE256(%#+v, int(%#+v) %% 0x10001)\n", a.SumSHAKE256.Data, a.SumSHAKE256.Length))
case *NgoloFuzzOne_New224:
w.WriteString(fmt.Sprintf("SHA3%d := sha3.New224()\n", SHA3Nb))
SHA3Nb = SHA3Nb + 1
case *NgoloFuzzOne_New256:
w.WriteString(fmt.Sprintf("SHA3%d := sha3.New256()\n", SHA3Nb))
SHA3Nb = SHA3Nb + 1
case *NgoloFuzzOne_New384:
w.WriteString(fmt.Sprintf("SHA3%d := sha3.New384()\n", SHA3Nb))
SHA3Nb = SHA3Nb + 1
case *NgoloFuzzOne_New512:
w.WriteString(fmt.Sprintf("SHA3%d := sha3.New512()\n", SHA3Nb))
SHA3Nb = SHA3Nb + 1
case *NgoloFuzzOne_SHA3NgdotWrite:
if SHA3Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHA3%d.Write(%#+v)\n", SHA3ResultsIndex, a.SHA3NgdotWrite.P))
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % SHA3Nb
case *NgoloFuzzOne_SHA3NgdotSum:
if SHA3Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHA3%d.Sum(%#+v)\n", SHA3ResultsIndex, a.SHA3NgdotSum.B))
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % SHA3Nb
case *NgoloFuzzOne_SHA3NgdotReset:
if SHA3Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHA3%d.Reset()\n", SHA3ResultsIndex))
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % SHA3Nb
case *NgoloFuzzOne_SHA3NgdotSize:
if SHA3Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHA3%d.Size()\n", SHA3ResultsIndex))
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % SHA3Nb
case *NgoloFuzzOne_SHA3NgdotBlockSize:
if SHA3Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHA3%d.BlockSize()\n", SHA3ResultsIndex))
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % SHA3Nb
case *NgoloFuzzOne_SHA3NgdotMarshalBinary:
if SHA3Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHA3%d.MarshalBinary()\n", SHA3ResultsIndex))
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % SHA3Nb
case *NgoloFuzzOne_SHA3NgdotAppendBinary:
if SHA3Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHA3%d.AppendBinary(%#+v)\n", SHA3ResultsIndex, a.SHA3NgdotAppendBinary.P))
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % SHA3Nb
case *NgoloFuzzOne_SHA3NgdotUnmarshalBinary:
if SHA3Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHA3%d.UnmarshalBinary(%#+v)\n", SHA3ResultsIndex, a.SHA3NgdotUnmarshalBinary.Data))
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % SHA3Nb
case *NgoloFuzzOne_SHA3NgdotClone:
if SHA3Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHA3%d.Clone()\n", SHA3ResultsIndex))
SHA3ResultsIndex = (SHA3ResultsIndex + 1) % SHA3Nb
case *NgoloFuzzOne_NewSHAKE128:
w.WriteString(fmt.Sprintf("SHAKE%d := sha3.NewSHAKE128()\n", SHAKENb))
SHAKENb = SHAKENb + 1
case *NgoloFuzzOne_NewSHAKE256:
w.WriteString(fmt.Sprintf("SHAKE%d := sha3.NewSHAKE256()\n", SHAKENb))
SHAKENb = SHAKENb + 1
case *NgoloFuzzOne_NewCSHAKE128:
w.WriteString(fmt.Sprintf("SHAKE%d := sha3.NewCSHAKE128(%#+v, %#+v)\n", SHAKENb, a.NewCSHAKE128.N, a.NewCSHAKE128.S))
SHAKENb = SHAKENb + 1
case *NgoloFuzzOne_NewCSHAKE256:
w.WriteString(fmt.Sprintf("SHAKE%d := sha3.NewCSHAKE256(%#+v, %#+v)\n", SHAKENb, a.NewCSHAKE256.N, a.NewCSHAKE256.S))
SHAKENb = SHAKENb + 1
case *NgoloFuzzOne_SHAKENgdotWrite:
if SHAKENb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHAKE%d.Write(%#+v)\n", SHAKEResultsIndex, a.SHAKENgdotWrite.P))
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % SHAKENb
case *NgoloFuzzOne_SHAKENgdotRead:
if SHAKENb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHAKE%d.Read(%#+v)\n", SHAKEResultsIndex, a.SHAKENgdotRead.P))
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % SHAKENb
case *NgoloFuzzOne_SHAKENgdotReset:
if SHAKENb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHAKE%d.Reset()\n", SHAKEResultsIndex))
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % SHAKENb
case *NgoloFuzzOne_SHAKENgdotBlockSize:
if SHAKENb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHAKE%d.BlockSize()\n", SHAKEResultsIndex))
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % SHAKENb
case *NgoloFuzzOne_SHAKENgdotMarshalBinary:
if SHAKENb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHAKE%d.MarshalBinary()\n", SHAKEResultsIndex))
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % SHAKENb
case *NgoloFuzzOne_SHAKENgdotAppendBinary:
if SHAKENb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHAKE%d.AppendBinary(%#+v)\n", SHAKEResultsIndex, a.SHAKENgdotAppendBinary.P))
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % SHAKENb
case *NgoloFuzzOne_SHAKENgdotUnmarshalBinary:
if SHAKENb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SHAKE%d.UnmarshalBinary(%#+v)\n", SHAKEResultsIndex, a.SHAKENgdotUnmarshalBinary.Data))
SHAKEResultsIndex = (SHAKEResultsIndex + 1) % SHAKENb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_sha3
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Sum224Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Sum224Args) Reset() {
*x = Sum224Args{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Sum224Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Sum224Args) ProtoMessage() {}
func (x *Sum224Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Sum224Args.ProtoReflect.Descriptor instead.
func (*Sum224Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *Sum224Args) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type Sum256Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Sum256Args) Reset() {
*x = Sum256Args{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Sum256Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Sum256Args) ProtoMessage() {}
func (x *Sum256Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Sum256Args.ProtoReflect.Descriptor instead.
func (*Sum256Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *Sum256Args) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type Sum384Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Sum384Args) Reset() {
*x = Sum384Args{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Sum384Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Sum384Args) ProtoMessage() {}
func (x *Sum384Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Sum384Args.ProtoReflect.Descriptor instead.
func (*Sum384Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *Sum384Args) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type Sum512Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Sum512Args) Reset() {
*x = Sum512Args{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Sum512Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Sum512Args) ProtoMessage() {}
func (x *Sum512Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Sum512Args.ProtoReflect.Descriptor instead.
func (*Sum512Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *Sum512Args) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type SumSHAKE128Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
Length int64 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SumSHAKE128Args) Reset() {
*x = SumSHAKE128Args{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SumSHAKE128Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SumSHAKE128Args) ProtoMessage() {}
func (x *SumSHAKE128Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SumSHAKE128Args.ProtoReflect.Descriptor instead.
func (*SumSHAKE128Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *SumSHAKE128Args) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
func (x *SumSHAKE128Args) GetLength() int64 {
if x != nil {
return x.Length
}
return 0
}
type SumSHAKE256Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
Length int64 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SumSHAKE256Args) Reset() {
*x = SumSHAKE256Args{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SumSHAKE256Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SumSHAKE256Args) ProtoMessage() {}
func (x *SumSHAKE256Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SumSHAKE256Args.ProtoReflect.Descriptor instead.
func (*SumSHAKE256Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *SumSHAKE256Args) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
func (x *SumSHAKE256Args) GetLength() int64 {
if x != nil {
return x.Length
}
return 0
}
type New224Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *New224Args) Reset() {
*x = New224Args{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *New224Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*New224Args) ProtoMessage() {}
func (x *New224Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use New224Args.ProtoReflect.Descriptor instead.
func (*New224Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type New256Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *New256Args) Reset() {
*x = New256Args{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *New256Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*New256Args) ProtoMessage() {}
func (x *New256Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use New256Args.ProtoReflect.Descriptor instead.
func (*New256Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type New384Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *New384Args) Reset() {
*x = New384Args{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *New384Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*New384Args) ProtoMessage() {}
func (x *New384Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use New384Args.ProtoReflect.Descriptor instead.
func (*New384Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type New512Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *New512Args) Reset() {
*x = New512Args{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *New512Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*New512Args) ProtoMessage() {}
func (x *New512Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use New512Args.ProtoReflect.Descriptor instead.
func (*New512Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type SHA3NgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHA3NgdotWriteArgs) Reset() {
*x = SHA3NgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHA3NgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHA3NgdotWriteArgs) ProtoMessage() {}
func (x *SHA3NgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHA3NgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*SHA3NgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *SHA3NgdotWriteArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type SHA3NgdotSumArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHA3NgdotSumArgs) Reset() {
*x = SHA3NgdotSumArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHA3NgdotSumArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHA3NgdotSumArgs) ProtoMessage() {}
func (x *SHA3NgdotSumArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHA3NgdotSumArgs.ProtoReflect.Descriptor instead.
func (*SHA3NgdotSumArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *SHA3NgdotSumArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type SHA3NgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHA3NgdotResetArgs) Reset() {
*x = SHA3NgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHA3NgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHA3NgdotResetArgs) ProtoMessage() {}
func (x *SHA3NgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHA3NgdotResetArgs.ProtoReflect.Descriptor instead.
func (*SHA3NgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type SHA3NgdotSizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHA3NgdotSizeArgs) Reset() {
*x = SHA3NgdotSizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHA3NgdotSizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHA3NgdotSizeArgs) ProtoMessage() {}
func (x *SHA3NgdotSizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHA3NgdotSizeArgs.ProtoReflect.Descriptor instead.
func (*SHA3NgdotSizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type SHA3NgdotBlockSizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHA3NgdotBlockSizeArgs) Reset() {
*x = SHA3NgdotBlockSizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHA3NgdotBlockSizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHA3NgdotBlockSizeArgs) ProtoMessage() {}
func (x *SHA3NgdotBlockSizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHA3NgdotBlockSizeArgs.ProtoReflect.Descriptor instead.
func (*SHA3NgdotBlockSizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type SHA3NgdotMarshalBinaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHA3NgdotMarshalBinaryArgs) Reset() {
*x = SHA3NgdotMarshalBinaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHA3NgdotMarshalBinaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHA3NgdotMarshalBinaryArgs) ProtoMessage() {}
func (x *SHA3NgdotMarshalBinaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHA3NgdotMarshalBinaryArgs.ProtoReflect.Descriptor instead.
func (*SHA3NgdotMarshalBinaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
type SHA3NgdotAppendBinaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHA3NgdotAppendBinaryArgs) Reset() {
*x = SHA3NgdotAppendBinaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHA3NgdotAppendBinaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHA3NgdotAppendBinaryArgs) ProtoMessage() {}
func (x *SHA3NgdotAppendBinaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHA3NgdotAppendBinaryArgs.ProtoReflect.Descriptor instead.
func (*SHA3NgdotAppendBinaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *SHA3NgdotAppendBinaryArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type SHA3NgdotUnmarshalBinaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHA3NgdotUnmarshalBinaryArgs) Reset() {
*x = SHA3NgdotUnmarshalBinaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHA3NgdotUnmarshalBinaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHA3NgdotUnmarshalBinaryArgs) ProtoMessage() {}
func (x *SHA3NgdotUnmarshalBinaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHA3NgdotUnmarshalBinaryArgs.ProtoReflect.Descriptor instead.
func (*SHA3NgdotUnmarshalBinaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *SHA3NgdotUnmarshalBinaryArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type SHA3NgdotCloneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHA3NgdotCloneArgs) Reset() {
*x = SHA3NgdotCloneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHA3NgdotCloneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHA3NgdotCloneArgs) ProtoMessage() {}
func (x *SHA3NgdotCloneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHA3NgdotCloneArgs.ProtoReflect.Descriptor instead.
func (*SHA3NgdotCloneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
type NewSHAKE128Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewSHAKE128Args) Reset() {
*x = NewSHAKE128Args{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewSHAKE128Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewSHAKE128Args) ProtoMessage() {}
func (x *NewSHAKE128Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewSHAKE128Args.ProtoReflect.Descriptor instead.
func (*NewSHAKE128Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
type NewSHAKE256Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewSHAKE256Args) Reset() {
*x = NewSHAKE256Args{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewSHAKE256Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewSHAKE256Args) ProtoMessage() {}
func (x *NewSHAKE256Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewSHAKE256Args.ProtoReflect.Descriptor instead.
func (*NewSHAKE256Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
type NewCSHAKE128Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
N []byte `protobuf:"bytes,1,opt,name=N,proto3" json:"N,omitempty"`
S []byte `protobuf:"bytes,2,opt,name=S,proto3" json:"S,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewCSHAKE128Args) Reset() {
*x = NewCSHAKE128Args{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewCSHAKE128Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewCSHAKE128Args) ProtoMessage() {}
func (x *NewCSHAKE128Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewCSHAKE128Args.ProtoReflect.Descriptor instead.
func (*NewCSHAKE128Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
func (x *NewCSHAKE128Args) GetN() []byte {
if x != nil {
return x.N
}
return nil
}
func (x *NewCSHAKE128Args) GetS() []byte {
if x != nil {
return x.S
}
return nil
}
type NewCSHAKE256Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
N []byte `protobuf:"bytes,1,opt,name=N,proto3" json:"N,omitempty"`
S []byte `protobuf:"bytes,2,opt,name=S,proto3" json:"S,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewCSHAKE256Args) Reset() {
*x = NewCSHAKE256Args{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewCSHAKE256Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewCSHAKE256Args) ProtoMessage() {}
func (x *NewCSHAKE256Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewCSHAKE256Args.ProtoReflect.Descriptor instead.
func (*NewCSHAKE256Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
func (x *NewCSHAKE256Args) GetN() []byte {
if x != nil {
return x.N
}
return nil
}
func (x *NewCSHAKE256Args) GetS() []byte {
if x != nil {
return x.S
}
return nil
}
type SHAKENgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHAKENgdotWriteArgs) Reset() {
*x = SHAKENgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHAKENgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHAKENgdotWriteArgs) ProtoMessage() {}
func (x *SHAKENgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHAKENgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*SHAKENgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
func (x *SHAKENgdotWriteArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type SHAKENgdotReadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHAKENgdotReadArgs) Reset() {
*x = SHAKENgdotReadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHAKENgdotReadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHAKENgdotReadArgs) ProtoMessage() {}
func (x *SHAKENgdotReadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHAKENgdotReadArgs.ProtoReflect.Descriptor instead.
func (*SHAKENgdotReadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
func (x *SHAKENgdotReadArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type SHAKENgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHAKENgdotResetArgs) Reset() {
*x = SHAKENgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHAKENgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHAKENgdotResetArgs) ProtoMessage() {}
func (x *SHAKENgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHAKENgdotResetArgs.ProtoReflect.Descriptor instead.
func (*SHAKENgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
type SHAKENgdotBlockSizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHAKENgdotBlockSizeArgs) Reset() {
*x = SHAKENgdotBlockSizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHAKENgdotBlockSizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHAKENgdotBlockSizeArgs) ProtoMessage() {}
func (x *SHAKENgdotBlockSizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHAKENgdotBlockSizeArgs.ProtoReflect.Descriptor instead.
func (*SHAKENgdotBlockSizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
type SHAKENgdotMarshalBinaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHAKENgdotMarshalBinaryArgs) Reset() {
*x = SHAKENgdotMarshalBinaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHAKENgdotMarshalBinaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHAKENgdotMarshalBinaryArgs) ProtoMessage() {}
func (x *SHAKENgdotMarshalBinaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHAKENgdotMarshalBinaryArgs.ProtoReflect.Descriptor instead.
func (*SHAKENgdotMarshalBinaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
type SHAKENgdotAppendBinaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHAKENgdotAppendBinaryArgs) Reset() {
*x = SHAKENgdotAppendBinaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHAKENgdotAppendBinaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHAKENgdotAppendBinaryArgs) ProtoMessage() {}
func (x *SHAKENgdotAppendBinaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHAKENgdotAppendBinaryArgs.ProtoReflect.Descriptor instead.
func (*SHAKENgdotAppendBinaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
func (x *SHAKENgdotAppendBinaryArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type SHAKENgdotUnmarshalBinaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SHAKENgdotUnmarshalBinaryArgs) Reset() {
*x = SHAKENgdotUnmarshalBinaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SHAKENgdotUnmarshalBinaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SHAKENgdotUnmarshalBinaryArgs) ProtoMessage() {}
func (x *SHAKENgdotUnmarshalBinaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SHAKENgdotUnmarshalBinaryArgs.ProtoReflect.Descriptor instead.
func (*SHAKENgdotUnmarshalBinaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
func (x *SHAKENgdotUnmarshalBinaryArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Sum224
// *NgoloFuzzOne_Sum256
// *NgoloFuzzOne_Sum384
// *NgoloFuzzOne_Sum512
// *NgoloFuzzOne_SumSHAKE128
// *NgoloFuzzOne_SumSHAKE256
// *NgoloFuzzOne_New224
// *NgoloFuzzOne_New256
// *NgoloFuzzOne_New384
// *NgoloFuzzOne_New512
// *NgoloFuzzOne_SHA3NgdotWrite
// *NgoloFuzzOne_SHA3NgdotSum
// *NgoloFuzzOne_SHA3NgdotReset
// *NgoloFuzzOne_SHA3NgdotSize
// *NgoloFuzzOne_SHA3NgdotBlockSize
// *NgoloFuzzOne_SHA3NgdotMarshalBinary
// *NgoloFuzzOne_SHA3NgdotAppendBinary
// *NgoloFuzzOne_SHA3NgdotUnmarshalBinary
// *NgoloFuzzOne_SHA3NgdotClone
// *NgoloFuzzOne_NewSHAKE128
// *NgoloFuzzOne_NewSHAKE256
// *NgoloFuzzOne_NewCSHAKE128
// *NgoloFuzzOne_NewCSHAKE256
// *NgoloFuzzOne_SHAKENgdotWrite
// *NgoloFuzzOne_SHAKENgdotRead
// *NgoloFuzzOne_SHAKENgdotReset
// *NgoloFuzzOne_SHAKENgdotBlockSize
// *NgoloFuzzOne_SHAKENgdotMarshalBinary
// *NgoloFuzzOne_SHAKENgdotAppendBinary
// *NgoloFuzzOne_SHAKENgdotUnmarshalBinary
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{30}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetSum224() *Sum224Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sum224); ok {
return x.Sum224
}
}
return nil
}
func (x *NgoloFuzzOne) GetSum256() *Sum256Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sum256); ok {
return x.Sum256
}
}
return nil
}
func (x *NgoloFuzzOne) GetSum384() *Sum384Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sum384); ok {
return x.Sum384
}
}
return nil
}
func (x *NgoloFuzzOne) GetSum512() *Sum512Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sum512); ok {
return x.Sum512
}
}
return nil
}
func (x *NgoloFuzzOne) GetSumSHAKE128() *SumSHAKE128Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SumSHAKE128); ok {
return x.SumSHAKE128
}
}
return nil
}
func (x *NgoloFuzzOne) GetSumSHAKE256() *SumSHAKE256Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SumSHAKE256); ok {
return x.SumSHAKE256
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew224() *New224Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New224); ok {
return x.New224
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew256() *New256Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New256); ok {
return x.New256
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew384() *New384Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New384); ok {
return x.New384
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew512() *New512Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New512); ok {
return x.New512
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHA3NgdotWrite() *SHA3NgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHA3NgdotWrite); ok {
return x.SHA3NgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHA3NgdotSum() *SHA3NgdotSumArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHA3NgdotSum); ok {
return x.SHA3NgdotSum
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHA3NgdotReset() *SHA3NgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHA3NgdotReset); ok {
return x.SHA3NgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHA3NgdotSize() *SHA3NgdotSizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHA3NgdotSize); ok {
return x.SHA3NgdotSize
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHA3NgdotBlockSize() *SHA3NgdotBlockSizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHA3NgdotBlockSize); ok {
return x.SHA3NgdotBlockSize
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHA3NgdotMarshalBinary() *SHA3NgdotMarshalBinaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHA3NgdotMarshalBinary); ok {
return x.SHA3NgdotMarshalBinary
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHA3NgdotAppendBinary() *SHA3NgdotAppendBinaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHA3NgdotAppendBinary); ok {
return x.SHA3NgdotAppendBinary
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHA3NgdotUnmarshalBinary() *SHA3NgdotUnmarshalBinaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHA3NgdotUnmarshalBinary); ok {
return x.SHA3NgdotUnmarshalBinary
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHA3NgdotClone() *SHA3NgdotCloneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHA3NgdotClone); ok {
return x.SHA3NgdotClone
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewSHAKE128() *NewSHAKE128Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewSHAKE128); ok {
return x.NewSHAKE128
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewSHAKE256() *NewSHAKE256Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewSHAKE256); ok {
return x.NewSHAKE256
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewCSHAKE128() *NewCSHAKE128Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewCSHAKE128); ok {
return x.NewCSHAKE128
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewCSHAKE256() *NewCSHAKE256Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewCSHAKE256); ok {
return x.NewCSHAKE256
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHAKENgdotWrite() *SHAKENgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHAKENgdotWrite); ok {
return x.SHAKENgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHAKENgdotRead() *SHAKENgdotReadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHAKENgdotRead); ok {
return x.SHAKENgdotRead
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHAKENgdotReset() *SHAKENgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHAKENgdotReset); ok {
return x.SHAKENgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHAKENgdotBlockSize() *SHAKENgdotBlockSizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHAKENgdotBlockSize); ok {
return x.SHAKENgdotBlockSize
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHAKENgdotMarshalBinary() *SHAKENgdotMarshalBinaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHAKENgdotMarshalBinary); ok {
return x.SHAKENgdotMarshalBinary
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHAKENgdotAppendBinary() *SHAKENgdotAppendBinaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHAKENgdotAppendBinary); ok {
return x.SHAKENgdotAppendBinary
}
}
return nil
}
func (x *NgoloFuzzOne) GetSHAKENgdotUnmarshalBinary() *SHAKENgdotUnmarshalBinaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SHAKENgdotUnmarshalBinary); ok {
return x.SHAKENgdotUnmarshalBinary
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Sum224 struct {
Sum224 *Sum224Args `protobuf:"bytes,1,opt,name=Sum224,proto3,oneof"`
}
type NgoloFuzzOne_Sum256 struct {
Sum256 *Sum256Args `protobuf:"bytes,2,opt,name=Sum256,proto3,oneof"`
}
type NgoloFuzzOne_Sum384 struct {
Sum384 *Sum384Args `protobuf:"bytes,3,opt,name=Sum384,proto3,oneof"`
}
type NgoloFuzzOne_Sum512 struct {
Sum512 *Sum512Args `protobuf:"bytes,4,opt,name=Sum512,proto3,oneof"`
}
type NgoloFuzzOne_SumSHAKE128 struct {
SumSHAKE128 *SumSHAKE128Args `protobuf:"bytes,5,opt,name=SumSHAKE128,proto3,oneof"`
}
type NgoloFuzzOne_SumSHAKE256 struct {
SumSHAKE256 *SumSHAKE256Args `protobuf:"bytes,6,opt,name=SumSHAKE256,proto3,oneof"`
}
type NgoloFuzzOne_New224 struct {
New224 *New224Args `protobuf:"bytes,7,opt,name=New224,proto3,oneof"`
}
type NgoloFuzzOne_New256 struct {
New256 *New256Args `protobuf:"bytes,8,opt,name=New256,proto3,oneof"`
}
type NgoloFuzzOne_New384 struct {
New384 *New384Args `protobuf:"bytes,9,opt,name=New384,proto3,oneof"`
}
type NgoloFuzzOne_New512 struct {
New512 *New512Args `protobuf:"bytes,10,opt,name=New512,proto3,oneof"`
}
type NgoloFuzzOne_SHA3NgdotWrite struct {
SHA3NgdotWrite *SHA3NgdotWriteArgs `protobuf:"bytes,11,opt,name=SHA3NgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_SHA3NgdotSum struct {
SHA3NgdotSum *SHA3NgdotSumArgs `protobuf:"bytes,12,opt,name=SHA3NgdotSum,proto3,oneof"`
}
type NgoloFuzzOne_SHA3NgdotReset struct {
SHA3NgdotReset *SHA3NgdotResetArgs `protobuf:"bytes,13,opt,name=SHA3NgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_SHA3NgdotSize struct {
SHA3NgdotSize *SHA3NgdotSizeArgs `protobuf:"bytes,14,opt,name=SHA3NgdotSize,proto3,oneof"`
}
type NgoloFuzzOne_SHA3NgdotBlockSize struct {
SHA3NgdotBlockSize *SHA3NgdotBlockSizeArgs `protobuf:"bytes,15,opt,name=SHA3NgdotBlockSize,proto3,oneof"`
}
type NgoloFuzzOne_SHA3NgdotMarshalBinary struct {
SHA3NgdotMarshalBinary *SHA3NgdotMarshalBinaryArgs `protobuf:"bytes,16,opt,name=SHA3NgdotMarshalBinary,proto3,oneof"`
}
type NgoloFuzzOne_SHA3NgdotAppendBinary struct {
SHA3NgdotAppendBinary *SHA3NgdotAppendBinaryArgs `protobuf:"bytes,17,opt,name=SHA3NgdotAppendBinary,proto3,oneof"`
}
type NgoloFuzzOne_SHA3NgdotUnmarshalBinary struct {
SHA3NgdotUnmarshalBinary *SHA3NgdotUnmarshalBinaryArgs `protobuf:"bytes,18,opt,name=SHA3NgdotUnmarshalBinary,proto3,oneof"`
}
type NgoloFuzzOne_SHA3NgdotClone struct {
SHA3NgdotClone *SHA3NgdotCloneArgs `protobuf:"bytes,19,opt,name=SHA3NgdotClone,proto3,oneof"`
}
type NgoloFuzzOne_NewSHAKE128 struct {
NewSHAKE128 *NewSHAKE128Args `protobuf:"bytes,20,opt,name=NewSHAKE128,proto3,oneof"`
}
type NgoloFuzzOne_NewSHAKE256 struct {
NewSHAKE256 *NewSHAKE256Args `protobuf:"bytes,21,opt,name=NewSHAKE256,proto3,oneof"`
}
type NgoloFuzzOne_NewCSHAKE128 struct {
NewCSHAKE128 *NewCSHAKE128Args `protobuf:"bytes,22,opt,name=NewCSHAKE128,proto3,oneof"`
}
type NgoloFuzzOne_NewCSHAKE256 struct {
NewCSHAKE256 *NewCSHAKE256Args `protobuf:"bytes,23,opt,name=NewCSHAKE256,proto3,oneof"`
}
type NgoloFuzzOne_SHAKENgdotWrite struct {
SHAKENgdotWrite *SHAKENgdotWriteArgs `protobuf:"bytes,24,opt,name=SHAKENgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_SHAKENgdotRead struct {
SHAKENgdotRead *SHAKENgdotReadArgs `protobuf:"bytes,25,opt,name=SHAKENgdotRead,proto3,oneof"`
}
type NgoloFuzzOne_SHAKENgdotReset struct {
SHAKENgdotReset *SHAKENgdotResetArgs `protobuf:"bytes,26,opt,name=SHAKENgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_SHAKENgdotBlockSize struct {
SHAKENgdotBlockSize *SHAKENgdotBlockSizeArgs `protobuf:"bytes,27,opt,name=SHAKENgdotBlockSize,proto3,oneof"`
}
type NgoloFuzzOne_SHAKENgdotMarshalBinary struct {
SHAKENgdotMarshalBinary *SHAKENgdotMarshalBinaryArgs `protobuf:"bytes,28,opt,name=SHAKENgdotMarshalBinary,proto3,oneof"`
}
type NgoloFuzzOne_SHAKENgdotAppendBinary struct {
SHAKENgdotAppendBinary *SHAKENgdotAppendBinaryArgs `protobuf:"bytes,29,opt,name=SHAKENgdotAppendBinary,proto3,oneof"`
}
type NgoloFuzzOne_SHAKENgdotUnmarshalBinary struct {
SHAKENgdotUnmarshalBinary *SHAKENgdotUnmarshalBinaryArgs `protobuf:"bytes,30,opt,name=SHAKENgdotUnmarshalBinary,proto3,oneof"`
}
func (*NgoloFuzzOne_Sum224) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sum256) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sum384) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sum512) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SumSHAKE128) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SumSHAKE256) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New224) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New256) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New384) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New512) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHA3NgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHA3NgdotSum) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHA3NgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHA3NgdotSize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHA3NgdotBlockSize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHA3NgdotMarshalBinary) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHA3NgdotAppendBinary) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHA3NgdotUnmarshalBinary) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHA3NgdotClone) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewSHAKE128) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewSHAKE256) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewCSHAKE128) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewCSHAKE256) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHAKENgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHAKENgdotRead) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHAKENgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHAKENgdotBlockSize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHAKENgdotMarshalBinary) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHAKENgdotAppendBinary) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SHAKENgdotUnmarshalBinary) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{31}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{32}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\" \n" +
"\n" +
"Sum224Args\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\" \n" +
"\n" +
"Sum256Args\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\" \n" +
"\n" +
"Sum384Args\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\" \n" +
"\n" +
"Sum512Args\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"=\n" +
"\x0fSumSHAKE128Args\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\x12\x16\n" +
"\x06length\x18\x02 \x01(\x03R\x06length\"=\n" +
"\x0fSumSHAKE256Args\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\x12\x16\n" +
"\x06length\x18\x02 \x01(\x03R\x06length\"\f\n" +
"\n" +
"New224Args\"\f\n" +
"\n" +
"New256Args\"\f\n" +
"\n" +
"New384Args\"\f\n" +
"\n" +
"New512Args\"\"\n" +
"\x12SHA3NgdotWriteArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\" \n" +
"\x10SHA3NgdotSumArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"\x14\n" +
"\x12SHA3NgdotResetArgs\"\x13\n" +
"\x11SHA3NgdotSizeArgs\"\x18\n" +
"\x16SHA3NgdotBlockSizeArgs\"\x1c\n" +
"\x1aSHA3NgdotMarshalBinaryArgs\")\n" +
"\x19SHA3NgdotAppendBinaryArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"2\n" +
"\x1cSHA3NgdotUnmarshalBinaryArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\x14\n" +
"\x12SHA3NgdotCloneArgs\"\x11\n" +
"\x0fNewSHAKE128Args\"\x11\n" +
"\x0fNewSHAKE256Args\".\n" +
"\x10NewCSHAKE128Args\x12\f\n" +
"\x01N\x18\x01 \x01(\fR\x01N\x12\f\n" +
"\x01S\x18\x02 \x01(\fR\x01S\".\n" +
"\x10NewCSHAKE256Args\x12\f\n" +
"\x01N\x18\x01 \x01(\fR\x01N\x12\f\n" +
"\x01S\x18\x02 \x01(\fR\x01S\"#\n" +
"\x13SHAKENgdotWriteArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"\"\n" +
"\x12SHAKENgdotReadArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"\x15\n" +
"\x13SHAKENgdotResetArgs\"\x19\n" +
"\x17SHAKENgdotBlockSizeArgs\"\x1d\n" +
"\x1bSHAKENgdotMarshalBinaryArgs\"*\n" +
"\x1aSHAKENgdotAppendBinaryArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"3\n" +
"\x1dSHAKENgdotUnmarshalBinaryArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\xeb\x10\n" +
"\fNgoloFuzzOne\x12/\n" +
"\x06Sum224\x18\x01 \x01(\v2\x15.ngolofuzz.Sum224ArgsH\x00R\x06Sum224\x12/\n" +
"\x06Sum256\x18\x02 \x01(\v2\x15.ngolofuzz.Sum256ArgsH\x00R\x06Sum256\x12/\n" +
"\x06Sum384\x18\x03 \x01(\v2\x15.ngolofuzz.Sum384ArgsH\x00R\x06Sum384\x12/\n" +
"\x06Sum512\x18\x04 \x01(\v2\x15.ngolofuzz.Sum512ArgsH\x00R\x06Sum512\x12>\n" +
"\vSumSHAKE128\x18\x05 \x01(\v2\x1a.ngolofuzz.SumSHAKE128ArgsH\x00R\vSumSHAKE128\x12>\n" +
"\vSumSHAKE256\x18\x06 \x01(\v2\x1a.ngolofuzz.SumSHAKE256ArgsH\x00R\vSumSHAKE256\x12/\n" +
"\x06New224\x18\a \x01(\v2\x15.ngolofuzz.New224ArgsH\x00R\x06New224\x12/\n" +
"\x06New256\x18\b \x01(\v2\x15.ngolofuzz.New256ArgsH\x00R\x06New256\x12/\n" +
"\x06New384\x18\t \x01(\v2\x15.ngolofuzz.New384ArgsH\x00R\x06New384\x12/\n" +
"\x06New512\x18\n" +
" \x01(\v2\x15.ngolofuzz.New512ArgsH\x00R\x06New512\x12G\n" +
"\x0eSHA3NgdotWrite\x18\v \x01(\v2\x1d.ngolofuzz.SHA3NgdotWriteArgsH\x00R\x0eSHA3NgdotWrite\x12A\n" +
"\fSHA3NgdotSum\x18\f \x01(\v2\x1b.ngolofuzz.SHA3NgdotSumArgsH\x00R\fSHA3NgdotSum\x12G\n" +
"\x0eSHA3NgdotReset\x18\r \x01(\v2\x1d.ngolofuzz.SHA3NgdotResetArgsH\x00R\x0eSHA3NgdotReset\x12D\n" +
"\rSHA3NgdotSize\x18\x0e \x01(\v2\x1c.ngolofuzz.SHA3NgdotSizeArgsH\x00R\rSHA3NgdotSize\x12S\n" +
"\x12SHA3NgdotBlockSize\x18\x0f \x01(\v2!.ngolofuzz.SHA3NgdotBlockSizeArgsH\x00R\x12SHA3NgdotBlockSize\x12_\n" +
"\x16SHA3NgdotMarshalBinary\x18\x10 \x01(\v2%.ngolofuzz.SHA3NgdotMarshalBinaryArgsH\x00R\x16SHA3NgdotMarshalBinary\x12\\\n" +
"\x15SHA3NgdotAppendBinary\x18\x11 \x01(\v2$.ngolofuzz.SHA3NgdotAppendBinaryArgsH\x00R\x15SHA3NgdotAppendBinary\x12e\n" +
"\x18SHA3NgdotUnmarshalBinary\x18\x12 \x01(\v2'.ngolofuzz.SHA3NgdotUnmarshalBinaryArgsH\x00R\x18SHA3NgdotUnmarshalBinary\x12G\n" +
"\x0eSHA3NgdotClone\x18\x13 \x01(\v2\x1d.ngolofuzz.SHA3NgdotCloneArgsH\x00R\x0eSHA3NgdotClone\x12>\n" +
"\vNewSHAKE128\x18\x14 \x01(\v2\x1a.ngolofuzz.NewSHAKE128ArgsH\x00R\vNewSHAKE128\x12>\n" +
"\vNewSHAKE256\x18\x15 \x01(\v2\x1a.ngolofuzz.NewSHAKE256ArgsH\x00R\vNewSHAKE256\x12A\n" +
"\fNewCSHAKE128\x18\x16 \x01(\v2\x1b.ngolofuzz.NewCSHAKE128ArgsH\x00R\fNewCSHAKE128\x12A\n" +
"\fNewCSHAKE256\x18\x17 \x01(\v2\x1b.ngolofuzz.NewCSHAKE256ArgsH\x00R\fNewCSHAKE256\x12J\n" +
"\x0fSHAKENgdotWrite\x18\x18 \x01(\v2\x1e.ngolofuzz.SHAKENgdotWriteArgsH\x00R\x0fSHAKENgdotWrite\x12G\n" +
"\x0eSHAKENgdotRead\x18\x19 \x01(\v2\x1d.ngolofuzz.SHAKENgdotReadArgsH\x00R\x0eSHAKENgdotRead\x12J\n" +
"\x0fSHAKENgdotReset\x18\x1a \x01(\v2\x1e.ngolofuzz.SHAKENgdotResetArgsH\x00R\x0fSHAKENgdotReset\x12V\n" +
"\x13SHAKENgdotBlockSize\x18\x1b \x01(\v2\".ngolofuzz.SHAKENgdotBlockSizeArgsH\x00R\x13SHAKENgdotBlockSize\x12b\n" +
"\x17SHAKENgdotMarshalBinary\x18\x1c \x01(\v2&.ngolofuzz.SHAKENgdotMarshalBinaryArgsH\x00R\x17SHAKENgdotMarshalBinary\x12_\n" +
"\x16SHAKENgdotAppendBinary\x18\x1d \x01(\v2%.ngolofuzz.SHAKENgdotAppendBinaryArgsH\x00R\x16SHAKENgdotAppendBinary\x12h\n" +
"\x19SHAKENgdotUnmarshalBinary\x18\x1e \x01(\v2(.ngolofuzz.SHAKENgdotUnmarshalBinaryArgsH\x00R\x19SHAKENgdotUnmarshalBinaryB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x18Z\x16./;fuzz_ng_crypto_sha3b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 33)
var file_ngolofuzz_proto_goTypes = []any{
(*Sum224Args)(nil), // 0: ngolofuzz.Sum224Args
(*Sum256Args)(nil), // 1: ngolofuzz.Sum256Args
(*Sum384Args)(nil), // 2: ngolofuzz.Sum384Args
(*Sum512Args)(nil), // 3: ngolofuzz.Sum512Args
(*SumSHAKE128Args)(nil), // 4: ngolofuzz.SumSHAKE128Args
(*SumSHAKE256Args)(nil), // 5: ngolofuzz.SumSHAKE256Args
(*New224Args)(nil), // 6: ngolofuzz.New224Args
(*New256Args)(nil), // 7: ngolofuzz.New256Args
(*New384Args)(nil), // 8: ngolofuzz.New384Args
(*New512Args)(nil), // 9: ngolofuzz.New512Args
(*SHA3NgdotWriteArgs)(nil), // 10: ngolofuzz.SHA3NgdotWriteArgs
(*SHA3NgdotSumArgs)(nil), // 11: ngolofuzz.SHA3NgdotSumArgs
(*SHA3NgdotResetArgs)(nil), // 12: ngolofuzz.SHA3NgdotResetArgs
(*SHA3NgdotSizeArgs)(nil), // 13: ngolofuzz.SHA3NgdotSizeArgs
(*SHA3NgdotBlockSizeArgs)(nil), // 14: ngolofuzz.SHA3NgdotBlockSizeArgs
(*SHA3NgdotMarshalBinaryArgs)(nil), // 15: ngolofuzz.SHA3NgdotMarshalBinaryArgs
(*SHA3NgdotAppendBinaryArgs)(nil), // 16: ngolofuzz.SHA3NgdotAppendBinaryArgs
(*SHA3NgdotUnmarshalBinaryArgs)(nil), // 17: ngolofuzz.SHA3NgdotUnmarshalBinaryArgs
(*SHA3NgdotCloneArgs)(nil), // 18: ngolofuzz.SHA3NgdotCloneArgs
(*NewSHAKE128Args)(nil), // 19: ngolofuzz.NewSHAKE128Args
(*NewSHAKE256Args)(nil), // 20: ngolofuzz.NewSHAKE256Args
(*NewCSHAKE128Args)(nil), // 21: ngolofuzz.NewCSHAKE128Args
(*NewCSHAKE256Args)(nil), // 22: ngolofuzz.NewCSHAKE256Args
(*SHAKENgdotWriteArgs)(nil), // 23: ngolofuzz.SHAKENgdotWriteArgs
(*SHAKENgdotReadArgs)(nil), // 24: ngolofuzz.SHAKENgdotReadArgs
(*SHAKENgdotResetArgs)(nil), // 25: ngolofuzz.SHAKENgdotResetArgs
(*SHAKENgdotBlockSizeArgs)(nil), // 26: ngolofuzz.SHAKENgdotBlockSizeArgs
(*SHAKENgdotMarshalBinaryArgs)(nil), // 27: ngolofuzz.SHAKENgdotMarshalBinaryArgs
(*SHAKENgdotAppendBinaryArgs)(nil), // 28: ngolofuzz.SHAKENgdotAppendBinaryArgs
(*SHAKENgdotUnmarshalBinaryArgs)(nil), // 29: ngolofuzz.SHAKENgdotUnmarshalBinaryArgs
(*NgoloFuzzOne)(nil), // 30: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 31: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 32: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Sum224:type_name -> ngolofuzz.Sum224Args
1, // 1: ngolofuzz.NgoloFuzzOne.Sum256:type_name -> ngolofuzz.Sum256Args
2, // 2: ngolofuzz.NgoloFuzzOne.Sum384:type_name -> ngolofuzz.Sum384Args
3, // 3: ngolofuzz.NgoloFuzzOne.Sum512:type_name -> ngolofuzz.Sum512Args
4, // 4: ngolofuzz.NgoloFuzzOne.SumSHAKE128:type_name -> ngolofuzz.SumSHAKE128Args
5, // 5: ngolofuzz.NgoloFuzzOne.SumSHAKE256:type_name -> ngolofuzz.SumSHAKE256Args
6, // 6: ngolofuzz.NgoloFuzzOne.New224:type_name -> ngolofuzz.New224Args
7, // 7: ngolofuzz.NgoloFuzzOne.New256:type_name -> ngolofuzz.New256Args
8, // 8: ngolofuzz.NgoloFuzzOne.New384:type_name -> ngolofuzz.New384Args
9, // 9: ngolofuzz.NgoloFuzzOne.New512:type_name -> ngolofuzz.New512Args
10, // 10: ngolofuzz.NgoloFuzzOne.SHA3NgdotWrite:type_name -> ngolofuzz.SHA3NgdotWriteArgs
11, // 11: ngolofuzz.NgoloFuzzOne.SHA3NgdotSum:type_name -> ngolofuzz.SHA3NgdotSumArgs
12, // 12: ngolofuzz.NgoloFuzzOne.SHA3NgdotReset:type_name -> ngolofuzz.SHA3NgdotResetArgs
13, // 13: ngolofuzz.NgoloFuzzOne.SHA3NgdotSize:type_name -> ngolofuzz.SHA3NgdotSizeArgs
14, // 14: ngolofuzz.NgoloFuzzOne.SHA3NgdotBlockSize:type_name -> ngolofuzz.SHA3NgdotBlockSizeArgs
15, // 15: ngolofuzz.NgoloFuzzOne.SHA3NgdotMarshalBinary:type_name -> ngolofuzz.SHA3NgdotMarshalBinaryArgs
16, // 16: ngolofuzz.NgoloFuzzOne.SHA3NgdotAppendBinary:type_name -> ngolofuzz.SHA3NgdotAppendBinaryArgs
17, // 17: ngolofuzz.NgoloFuzzOne.SHA3NgdotUnmarshalBinary:type_name -> ngolofuzz.SHA3NgdotUnmarshalBinaryArgs
18, // 18: ngolofuzz.NgoloFuzzOne.SHA3NgdotClone:type_name -> ngolofuzz.SHA3NgdotCloneArgs
19, // 19: ngolofuzz.NgoloFuzzOne.NewSHAKE128:type_name -> ngolofuzz.NewSHAKE128Args
20, // 20: ngolofuzz.NgoloFuzzOne.NewSHAKE256:type_name -> ngolofuzz.NewSHAKE256Args
21, // 21: ngolofuzz.NgoloFuzzOne.NewCSHAKE128:type_name -> ngolofuzz.NewCSHAKE128Args
22, // 22: ngolofuzz.NgoloFuzzOne.NewCSHAKE256:type_name -> ngolofuzz.NewCSHAKE256Args
23, // 23: ngolofuzz.NgoloFuzzOne.SHAKENgdotWrite:type_name -> ngolofuzz.SHAKENgdotWriteArgs
24, // 24: ngolofuzz.NgoloFuzzOne.SHAKENgdotRead:type_name -> ngolofuzz.SHAKENgdotReadArgs
25, // 25: ngolofuzz.NgoloFuzzOne.SHAKENgdotReset:type_name -> ngolofuzz.SHAKENgdotResetArgs
26, // 26: ngolofuzz.NgoloFuzzOne.SHAKENgdotBlockSize:type_name -> ngolofuzz.SHAKENgdotBlockSizeArgs
27, // 27: ngolofuzz.NgoloFuzzOne.SHAKENgdotMarshalBinary:type_name -> ngolofuzz.SHAKENgdotMarshalBinaryArgs
28, // 28: ngolofuzz.NgoloFuzzOne.SHAKENgdotAppendBinary:type_name -> ngolofuzz.SHAKENgdotAppendBinaryArgs
29, // 29: ngolofuzz.NgoloFuzzOne.SHAKENgdotUnmarshalBinary:type_name -> ngolofuzz.SHAKENgdotUnmarshalBinaryArgs
30, // 30: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
31, // [31:31] is the sub-list for method output_type
31, // [31:31] is the sub-list for method input_type
31, // [31:31] is the sub-list for extension type_name
31, // [31:31] is the sub-list for extension extendee
0, // [0:31] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[30].OneofWrappers = []any{
(*NgoloFuzzOne_Sum224)(nil),
(*NgoloFuzzOne_Sum256)(nil),
(*NgoloFuzzOne_Sum384)(nil),
(*NgoloFuzzOne_Sum512)(nil),
(*NgoloFuzzOne_SumSHAKE128)(nil),
(*NgoloFuzzOne_SumSHAKE256)(nil),
(*NgoloFuzzOne_New224)(nil),
(*NgoloFuzzOne_New256)(nil),
(*NgoloFuzzOne_New384)(nil),
(*NgoloFuzzOne_New512)(nil),
(*NgoloFuzzOne_SHA3NgdotWrite)(nil),
(*NgoloFuzzOne_SHA3NgdotSum)(nil),
(*NgoloFuzzOne_SHA3NgdotReset)(nil),
(*NgoloFuzzOne_SHA3NgdotSize)(nil),
(*NgoloFuzzOne_SHA3NgdotBlockSize)(nil),
(*NgoloFuzzOne_SHA3NgdotMarshalBinary)(nil),
(*NgoloFuzzOne_SHA3NgdotAppendBinary)(nil),
(*NgoloFuzzOne_SHA3NgdotUnmarshalBinary)(nil),
(*NgoloFuzzOne_SHA3NgdotClone)(nil),
(*NgoloFuzzOne_NewSHAKE128)(nil),
(*NgoloFuzzOne_NewSHAKE256)(nil),
(*NgoloFuzzOne_NewCSHAKE128)(nil),
(*NgoloFuzzOne_NewCSHAKE256)(nil),
(*NgoloFuzzOne_SHAKENgdotWrite)(nil),
(*NgoloFuzzOne_SHAKENgdotRead)(nil),
(*NgoloFuzzOne_SHAKENgdotReset)(nil),
(*NgoloFuzzOne_SHAKENgdotBlockSize)(nil),
(*NgoloFuzzOne_SHAKENgdotMarshalBinary)(nil),
(*NgoloFuzzOne_SHAKENgdotAppendBinary)(nil),
(*NgoloFuzzOne_SHAKENgdotUnmarshalBinary)(nil),
}
file_ngolofuzz_proto_msgTypes[31].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 33,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_sha512
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/sha512"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
sha512.New()
case *NgoloFuzzOne_New512_224:
sha512.New512_224()
case *NgoloFuzzOne_New512_256:
sha512.New512_256()
case *NgoloFuzzOne_New384:
sha512.New384()
case *NgoloFuzzOne_Sum512:
sha512.Sum512(a.Sum512.Data)
case *NgoloFuzzOne_Sum384:
sha512.Sum384(a.Sum384.Data)
case *NgoloFuzzOne_Sum512_224:
sha512.Sum512_224(a.Sum512_224.Data)
case *NgoloFuzzOne_Sum512_256:
sha512.Sum512_256(a.Sum512_256.Data)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("sha512.New()\n"))
case *NgoloFuzzOne_New512_224:
w.WriteString(fmt.Sprintf("sha512.New512_224()\n"))
case *NgoloFuzzOne_New512_256:
w.WriteString(fmt.Sprintf("sha512.New512_256()\n"))
case *NgoloFuzzOne_New384:
w.WriteString(fmt.Sprintf("sha512.New384()\n"))
case *NgoloFuzzOne_Sum512:
w.WriteString(fmt.Sprintf("sha512.Sum512(%#+v)\n", a.Sum512.Data))
case *NgoloFuzzOne_Sum384:
w.WriteString(fmt.Sprintf("sha512.Sum384(%#+v)\n", a.Sum384.Data))
case *NgoloFuzzOne_Sum512_224:
w.WriteString(fmt.Sprintf("sha512.Sum512_224(%#+v)\n", a.Sum512_224.Data))
case *NgoloFuzzOne_Sum512_256:
w.WriteString(fmt.Sprintf("sha512.Sum512_256(%#+v)\n", a.Sum512_256.Data))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_sha512
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type New512_224Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *New512_224Args) Reset() {
*x = New512_224Args{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *New512_224Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*New512_224Args) ProtoMessage() {}
func (x *New512_224Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use New512_224Args.ProtoReflect.Descriptor instead.
func (*New512_224Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type New512_256Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *New512_256Args) Reset() {
*x = New512_256Args{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *New512_256Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*New512_256Args) ProtoMessage() {}
func (x *New512_256Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use New512_256Args.ProtoReflect.Descriptor instead.
func (*New512_256Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type New384Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *New384Args) Reset() {
*x = New384Args{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *New384Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*New384Args) ProtoMessage() {}
func (x *New384Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use New384Args.ProtoReflect.Descriptor instead.
func (*New384Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type Sum512Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Sum512Args) Reset() {
*x = Sum512Args{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Sum512Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Sum512Args) ProtoMessage() {}
func (x *Sum512Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Sum512Args.ProtoReflect.Descriptor instead.
func (*Sum512Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *Sum512Args) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type Sum384Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Sum384Args) Reset() {
*x = Sum384Args{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Sum384Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Sum384Args) ProtoMessage() {}
func (x *Sum384Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Sum384Args.ProtoReflect.Descriptor instead.
func (*Sum384Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *Sum384Args) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type Sum512_224Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Sum512_224Args) Reset() {
*x = Sum512_224Args{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Sum512_224Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Sum512_224Args) ProtoMessage() {}
func (x *Sum512_224Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Sum512_224Args.ProtoReflect.Descriptor instead.
func (*Sum512_224Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *Sum512_224Args) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type Sum512_256Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Sum512_256Args) Reset() {
*x = Sum512_256Args{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Sum512_256Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Sum512_256Args) ProtoMessage() {}
func (x *Sum512_256Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Sum512_256Args.ProtoReflect.Descriptor instead.
func (*Sum512_256Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *Sum512_256Args) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_New
// *NgoloFuzzOne_New512_224
// *NgoloFuzzOne_New512_256
// *NgoloFuzzOne_New384
// *NgoloFuzzOne_Sum512
// *NgoloFuzzOne_Sum384
// *NgoloFuzzOne_Sum512_224
// *NgoloFuzzOne_Sum512_256
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew512_224() *New512_224Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New512_224); ok {
return x.New512_224
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew512_256() *New512_256Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New512_256); ok {
return x.New512_256
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew384() *New384Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New384); ok {
return x.New384
}
}
return nil
}
func (x *NgoloFuzzOne) GetSum512() *Sum512Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sum512); ok {
return x.Sum512
}
}
return nil
}
func (x *NgoloFuzzOne) GetSum384() *Sum384Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sum384); ok {
return x.Sum384
}
}
return nil
}
func (x *NgoloFuzzOne) GetSum512_224() *Sum512_224Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sum512_224); ok {
return x.Sum512_224
}
}
return nil
}
func (x *NgoloFuzzOne) GetSum512_256() *Sum512_256Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sum512_256); ok {
return x.Sum512_256
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,1,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_New512_224 struct {
New512_224 *New512_224Args `protobuf:"bytes,2,opt,name=New512_224,json=New512224,proto3,oneof"`
}
type NgoloFuzzOne_New512_256 struct {
New512_256 *New512_256Args `protobuf:"bytes,3,opt,name=New512_256,json=New512256,proto3,oneof"`
}
type NgoloFuzzOne_New384 struct {
New384 *New384Args `protobuf:"bytes,4,opt,name=New384,proto3,oneof"`
}
type NgoloFuzzOne_Sum512 struct {
Sum512 *Sum512Args `protobuf:"bytes,5,opt,name=Sum512,proto3,oneof"`
}
type NgoloFuzzOne_Sum384 struct {
Sum384 *Sum384Args `protobuf:"bytes,6,opt,name=Sum384,proto3,oneof"`
}
type NgoloFuzzOne_Sum512_224 struct {
Sum512_224 *Sum512_224Args `protobuf:"bytes,7,opt,name=Sum512_224,json=Sum512224,proto3,oneof"`
}
type NgoloFuzzOne_Sum512_256 struct {
Sum512_256 *Sum512_256Args `protobuf:"bytes,8,opt,name=Sum512_256,json=Sum512256,proto3,oneof"`
}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New512_224) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New512_256) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New384) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sum512) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sum384) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sum512_224) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sum512_256) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\t\n" +
"\aNewArgs\"\x10\n" +
"\x0eNew512_224Args\"\x10\n" +
"\x0eNew512_256Args\"\f\n" +
"\n" +
"New384Args\" \n" +
"\n" +
"Sum512Args\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\" \n" +
"\n" +
"Sum384Args\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"$\n" +
"\x0eSum512_224Args\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"$\n" +
"\x0eSum512_256Args\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\xc1\x03\n" +
"\fNgoloFuzzOne\x12&\n" +
"\x03New\x18\x01 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x12:\n" +
"\n" +
"New512_224\x18\x02 \x01(\v2\x19.ngolofuzz.New512_224ArgsH\x00R\tNew512224\x12:\n" +
"\n" +
"New512_256\x18\x03 \x01(\v2\x19.ngolofuzz.New512_256ArgsH\x00R\tNew512256\x12/\n" +
"\x06New384\x18\x04 \x01(\v2\x15.ngolofuzz.New384ArgsH\x00R\x06New384\x12/\n" +
"\x06Sum512\x18\x05 \x01(\v2\x15.ngolofuzz.Sum512ArgsH\x00R\x06Sum512\x12/\n" +
"\x06Sum384\x18\x06 \x01(\v2\x15.ngolofuzz.Sum384ArgsH\x00R\x06Sum384\x12:\n" +
"\n" +
"Sum512_224\x18\a \x01(\v2\x19.ngolofuzz.Sum512_224ArgsH\x00R\tSum512224\x12:\n" +
"\n" +
"Sum512_256\x18\b \x01(\v2\x19.ngolofuzz.Sum512_256ArgsH\x00R\tSum512256B\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_crypto_sha512b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
var file_ngolofuzz_proto_goTypes = []any{
(*NewArgs)(nil), // 0: ngolofuzz.NewArgs
(*New512_224Args)(nil), // 1: ngolofuzz.New512_224Args
(*New512_256Args)(nil), // 2: ngolofuzz.New512_256Args
(*New384Args)(nil), // 3: ngolofuzz.New384Args
(*Sum512Args)(nil), // 4: ngolofuzz.Sum512Args
(*Sum384Args)(nil), // 5: ngolofuzz.Sum384Args
(*Sum512_224Args)(nil), // 6: ngolofuzz.Sum512_224Args
(*Sum512_256Args)(nil), // 7: ngolofuzz.Sum512_256Args
(*NgoloFuzzOne)(nil), // 8: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 9: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 10: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
1, // 1: ngolofuzz.NgoloFuzzOne.New512_224:type_name -> ngolofuzz.New512_224Args
2, // 2: ngolofuzz.NgoloFuzzOne.New512_256:type_name -> ngolofuzz.New512_256Args
3, // 3: ngolofuzz.NgoloFuzzOne.New384:type_name -> ngolofuzz.New384Args
4, // 4: ngolofuzz.NgoloFuzzOne.Sum512:type_name -> ngolofuzz.Sum512Args
5, // 5: ngolofuzz.NgoloFuzzOne.Sum384:type_name -> ngolofuzz.Sum384Args
6, // 6: ngolofuzz.NgoloFuzzOne.Sum512_224:type_name -> ngolofuzz.Sum512_224Args
7, // 7: ngolofuzz.NgoloFuzzOne.Sum512_256:type_name -> ngolofuzz.Sum512_256Args
8, // 8: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
9, // [9:9] is the sub-list for method output_type
9, // [9:9] is the sub-list for method input_type
9, // [9:9] is the sub-list for extension type_name
9, // [9:9] is the sub-list for extension extendee
0, // [0:9] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[8].OneofWrappers = []any{
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_New512_224)(nil),
(*NgoloFuzzOne_New512_256)(nil),
(*NgoloFuzzOne_New384)(nil),
(*NgoloFuzzOne_Sum512)(nil),
(*NgoloFuzzOne_Sum384)(nil),
(*NgoloFuzzOne_Sum512_224)(nil),
(*NgoloFuzzOne_Sum512_256)(nil),
}
file_ngolofuzz_proto_msgTypes[9].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 11,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_crypto_subtle
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"crypto/subtle"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ConstantTimeCompare:
subtle.ConstantTimeCompare(a.ConstantTimeCompare.X, a.ConstantTimeCompare.Y)
case *NgoloFuzzOne_ConstantTimeSelect:
arg0 := int(a.ConstantTimeSelect.V)
arg1 := int(a.ConstantTimeSelect.X)
arg2 := int(a.ConstantTimeSelect.Y)
subtle.ConstantTimeSelect(arg0, arg1, arg2)
case *NgoloFuzzOne_ConstantTimeByteEq:
arg0 := uint8(a.ConstantTimeByteEq.X)
arg1 := uint8(a.ConstantTimeByteEq.Y)
subtle.ConstantTimeByteEq(arg0, arg1)
case *NgoloFuzzOne_ConstantTimeEq:
subtle.ConstantTimeEq(a.ConstantTimeEq.X, a.ConstantTimeEq.Y)
case *NgoloFuzzOne_ConstantTimeCopy:
arg0 := int(a.ConstantTimeCopy.V)
subtle.ConstantTimeCopy(arg0, a.ConstantTimeCopy.X, a.ConstantTimeCopy.Y)
case *NgoloFuzzOne_ConstantTimeLessOrEq:
arg0 := int(a.ConstantTimeLessOrEq.X)
arg1 := int(a.ConstantTimeLessOrEq.Y)
subtle.ConstantTimeLessOrEq(arg0, arg1)
case *NgoloFuzzOne_XORBytes:
subtle.XORBytes(a.XORBytes.Dst, a.XORBytes.X, a.XORBytes.Y)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ConstantTimeCompare:
w.WriteString(fmt.Sprintf("subtle.ConstantTimeCompare(%#+v, %#+v)\n", a.ConstantTimeCompare.X, a.ConstantTimeCompare.Y))
case *NgoloFuzzOne_ConstantTimeSelect:
w.WriteString(fmt.Sprintf("subtle.ConstantTimeSelect(int(%#+v), int(%#+v), int(%#+v))\n", a.ConstantTimeSelect.V, a.ConstantTimeSelect.X, a.ConstantTimeSelect.Y))
case *NgoloFuzzOne_ConstantTimeByteEq:
w.WriteString(fmt.Sprintf("subtle.ConstantTimeByteEq(uint8(%#+v), uint8(%#+v))\n", a.ConstantTimeByteEq.X, a.ConstantTimeByteEq.Y))
case *NgoloFuzzOne_ConstantTimeEq:
w.WriteString(fmt.Sprintf("subtle.ConstantTimeEq(%#+v, %#+v)\n", a.ConstantTimeEq.X, a.ConstantTimeEq.Y))
case *NgoloFuzzOne_ConstantTimeCopy:
w.WriteString(fmt.Sprintf("subtle.ConstantTimeCopy(int(%#+v), %#+v, %#+v)\n", a.ConstantTimeCopy.V, a.ConstantTimeCopy.X, a.ConstantTimeCopy.Y))
case *NgoloFuzzOne_ConstantTimeLessOrEq:
w.WriteString(fmt.Sprintf("subtle.ConstantTimeLessOrEq(int(%#+v), int(%#+v))\n", a.ConstantTimeLessOrEq.X, a.ConstantTimeLessOrEq.Y))
case *NgoloFuzzOne_XORBytes:
w.WriteString(fmt.Sprintf("subtle.XORBytes(%#+v, %#+v, %#+v)\n", a.XORBytes.Dst, a.XORBytes.X, a.XORBytes.Y))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_crypto_subtle
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ConstantTimeCompareArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X []byte `protobuf:"bytes,1,opt,name=x,proto3" json:"x,omitempty"`
Y []byte `protobuf:"bytes,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ConstantTimeCompareArgs) Reset() {
*x = ConstantTimeCompareArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ConstantTimeCompareArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ConstantTimeCompareArgs) ProtoMessage() {}
func (x *ConstantTimeCompareArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ConstantTimeCompareArgs.ProtoReflect.Descriptor instead.
func (*ConstantTimeCompareArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *ConstantTimeCompareArgs) GetX() []byte {
if x != nil {
return x.X
}
return nil
}
func (x *ConstantTimeCompareArgs) GetY() []byte {
if x != nil {
return x.Y
}
return nil
}
type ConstantTimeSelectArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V int64 `protobuf:"varint,1,opt,name=v,proto3" json:"v,omitempty"`
X int64 `protobuf:"varint,2,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,3,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ConstantTimeSelectArgs) Reset() {
*x = ConstantTimeSelectArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ConstantTimeSelectArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ConstantTimeSelectArgs) ProtoMessage() {}
func (x *ConstantTimeSelectArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ConstantTimeSelectArgs.ProtoReflect.Descriptor instead.
func (*ConstantTimeSelectArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *ConstantTimeSelectArgs) GetV() int64 {
if x != nil {
return x.V
}
return 0
}
func (x *ConstantTimeSelectArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *ConstantTimeSelectArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type ConstantTimeByteEqArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y uint32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ConstantTimeByteEqArgs) Reset() {
*x = ConstantTimeByteEqArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ConstantTimeByteEqArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ConstantTimeByteEqArgs) ProtoMessage() {}
func (x *ConstantTimeByteEqArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ConstantTimeByteEqArgs.ProtoReflect.Descriptor instead.
func (*ConstantTimeByteEqArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *ConstantTimeByteEqArgs) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
func (x *ConstantTimeByteEqArgs) GetY() uint32 {
if x != nil {
return x.Y
}
return 0
}
type ConstantTimeEqArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ConstantTimeEqArgs) Reset() {
*x = ConstantTimeEqArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ConstantTimeEqArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ConstantTimeEqArgs) ProtoMessage() {}
func (x *ConstantTimeEqArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ConstantTimeEqArgs.ProtoReflect.Descriptor instead.
func (*ConstantTimeEqArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ConstantTimeEqArgs) GetX() int32 {
if x != nil {
return x.X
}
return 0
}
func (x *ConstantTimeEqArgs) GetY() int32 {
if x != nil {
return x.Y
}
return 0
}
type ConstantTimeCopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V int64 `protobuf:"varint,1,opt,name=v,proto3" json:"v,omitempty"`
X []byte `protobuf:"bytes,2,opt,name=x,proto3" json:"x,omitempty"`
Y []byte `protobuf:"bytes,3,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ConstantTimeCopyArgs) Reset() {
*x = ConstantTimeCopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ConstantTimeCopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ConstantTimeCopyArgs) ProtoMessage() {}
func (x *ConstantTimeCopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ConstantTimeCopyArgs.ProtoReflect.Descriptor instead.
func (*ConstantTimeCopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *ConstantTimeCopyArgs) GetV() int64 {
if x != nil {
return x.V
}
return 0
}
func (x *ConstantTimeCopyArgs) GetX() []byte {
if x != nil {
return x.X
}
return nil
}
func (x *ConstantTimeCopyArgs) GetY() []byte {
if x != nil {
return x.Y
}
return nil
}
type ConstantTimeLessOrEqArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ConstantTimeLessOrEqArgs) Reset() {
*x = ConstantTimeLessOrEqArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ConstantTimeLessOrEqArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ConstantTimeLessOrEqArgs) ProtoMessage() {}
func (x *ConstantTimeLessOrEqArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ConstantTimeLessOrEqArgs.ProtoReflect.Descriptor instead.
func (*ConstantTimeLessOrEqArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *ConstantTimeLessOrEqArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *ConstantTimeLessOrEqArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type XORBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
X []byte `protobuf:"bytes,2,opt,name=x,proto3" json:"x,omitempty"`
Y []byte `protobuf:"bytes,3,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *XORBytesArgs) Reset() {
*x = XORBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *XORBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*XORBytesArgs) ProtoMessage() {}
func (x *XORBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use XORBytesArgs.ProtoReflect.Descriptor instead.
func (*XORBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *XORBytesArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *XORBytesArgs) GetX() []byte {
if x != nil {
return x.X
}
return nil
}
func (x *XORBytesArgs) GetY() []byte {
if x != nil {
return x.Y
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_ConstantTimeCompare
// *NgoloFuzzOne_ConstantTimeSelect
// *NgoloFuzzOne_ConstantTimeByteEq
// *NgoloFuzzOne_ConstantTimeEq
// *NgoloFuzzOne_ConstantTimeCopy
// *NgoloFuzzOne_ConstantTimeLessOrEq
// *NgoloFuzzOne_XORBytes
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetConstantTimeCompare() *ConstantTimeCompareArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ConstantTimeCompare); ok {
return x.ConstantTimeCompare
}
}
return nil
}
func (x *NgoloFuzzOne) GetConstantTimeSelect() *ConstantTimeSelectArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ConstantTimeSelect); ok {
return x.ConstantTimeSelect
}
}
return nil
}
func (x *NgoloFuzzOne) GetConstantTimeByteEq() *ConstantTimeByteEqArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ConstantTimeByteEq); ok {
return x.ConstantTimeByteEq
}
}
return nil
}
func (x *NgoloFuzzOne) GetConstantTimeEq() *ConstantTimeEqArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ConstantTimeEq); ok {
return x.ConstantTimeEq
}
}
return nil
}
func (x *NgoloFuzzOne) GetConstantTimeCopy() *ConstantTimeCopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ConstantTimeCopy); ok {
return x.ConstantTimeCopy
}
}
return nil
}
func (x *NgoloFuzzOne) GetConstantTimeLessOrEq() *ConstantTimeLessOrEqArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ConstantTimeLessOrEq); ok {
return x.ConstantTimeLessOrEq
}
}
return nil
}
func (x *NgoloFuzzOne) GetXORBytes() *XORBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_XORBytes); ok {
return x.XORBytes
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_ConstantTimeCompare struct {
ConstantTimeCompare *ConstantTimeCompareArgs `protobuf:"bytes,1,opt,name=ConstantTimeCompare,proto3,oneof"`
}
type NgoloFuzzOne_ConstantTimeSelect struct {
ConstantTimeSelect *ConstantTimeSelectArgs `protobuf:"bytes,2,opt,name=ConstantTimeSelect,proto3,oneof"`
}
type NgoloFuzzOne_ConstantTimeByteEq struct {
ConstantTimeByteEq *ConstantTimeByteEqArgs `protobuf:"bytes,3,opt,name=ConstantTimeByteEq,proto3,oneof"`
}
type NgoloFuzzOne_ConstantTimeEq struct {
ConstantTimeEq *ConstantTimeEqArgs `protobuf:"bytes,4,opt,name=ConstantTimeEq,proto3,oneof"`
}
type NgoloFuzzOne_ConstantTimeCopy struct {
ConstantTimeCopy *ConstantTimeCopyArgs `protobuf:"bytes,5,opt,name=ConstantTimeCopy,proto3,oneof"`
}
type NgoloFuzzOne_ConstantTimeLessOrEq struct {
ConstantTimeLessOrEq *ConstantTimeLessOrEqArgs `protobuf:"bytes,6,opt,name=ConstantTimeLessOrEq,proto3,oneof"`
}
type NgoloFuzzOne_XORBytes struct {
XORBytes *XORBytesArgs `protobuf:"bytes,7,opt,name=XORBytes,proto3,oneof"`
}
func (*NgoloFuzzOne_ConstantTimeCompare) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ConstantTimeSelect) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ConstantTimeByteEq) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ConstantTimeEq) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ConstantTimeCopy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ConstantTimeLessOrEq) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_XORBytes) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"5\n" +
"\x17ConstantTimeCompareArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\fR\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\fR\x01y\"B\n" +
"\x16ConstantTimeSelectArgs\x12\f\n" +
"\x01v\x18\x01 \x01(\x03R\x01v\x12\f\n" +
"\x01x\x18\x02 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x03 \x01(\x03R\x01y\"4\n" +
"\x16ConstantTimeByteEqArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\rR\x01y\"0\n" +
"\x12ConstantTimeEqArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x05R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x05R\x01y\"@\n" +
"\x14ConstantTimeCopyArgs\x12\f\n" +
"\x01v\x18\x01 \x01(\x03R\x01v\x12\f\n" +
"\x01x\x18\x02 \x01(\fR\x01x\x12\f\n" +
"\x01y\x18\x03 \x01(\fR\x01y\"6\n" +
"\x18ConstantTimeLessOrEqArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"<\n" +
"\fXORBytesArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\f\n" +
"\x01x\x18\x02 \x01(\fR\x01x\x12\f\n" +
"\x01y\x18\x03 \x01(\fR\x01y\"\xc2\x04\n" +
"\fNgoloFuzzOne\x12V\n" +
"\x13ConstantTimeCompare\x18\x01 \x01(\v2\".ngolofuzz.ConstantTimeCompareArgsH\x00R\x13ConstantTimeCompare\x12S\n" +
"\x12ConstantTimeSelect\x18\x02 \x01(\v2!.ngolofuzz.ConstantTimeSelectArgsH\x00R\x12ConstantTimeSelect\x12S\n" +
"\x12ConstantTimeByteEq\x18\x03 \x01(\v2!.ngolofuzz.ConstantTimeByteEqArgsH\x00R\x12ConstantTimeByteEq\x12G\n" +
"\x0eConstantTimeEq\x18\x04 \x01(\v2\x1d.ngolofuzz.ConstantTimeEqArgsH\x00R\x0eConstantTimeEq\x12M\n" +
"\x10ConstantTimeCopy\x18\x05 \x01(\v2\x1f.ngolofuzz.ConstantTimeCopyArgsH\x00R\x10ConstantTimeCopy\x12Y\n" +
"\x14ConstantTimeLessOrEq\x18\x06 \x01(\v2#.ngolofuzz.ConstantTimeLessOrEqArgsH\x00R\x14ConstantTimeLessOrEq\x125\n" +
"\bXORBytes\x18\a \x01(\v2\x17.ngolofuzz.XORBytesArgsH\x00R\bXORBytesB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_crypto_subtleb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
var file_ngolofuzz_proto_goTypes = []any{
(*ConstantTimeCompareArgs)(nil), // 0: ngolofuzz.ConstantTimeCompareArgs
(*ConstantTimeSelectArgs)(nil), // 1: ngolofuzz.ConstantTimeSelectArgs
(*ConstantTimeByteEqArgs)(nil), // 2: ngolofuzz.ConstantTimeByteEqArgs
(*ConstantTimeEqArgs)(nil), // 3: ngolofuzz.ConstantTimeEqArgs
(*ConstantTimeCopyArgs)(nil), // 4: ngolofuzz.ConstantTimeCopyArgs
(*ConstantTimeLessOrEqArgs)(nil), // 5: ngolofuzz.ConstantTimeLessOrEqArgs
(*XORBytesArgs)(nil), // 6: ngolofuzz.XORBytesArgs
(*NgoloFuzzOne)(nil), // 7: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 8: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 9: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.ConstantTimeCompare:type_name -> ngolofuzz.ConstantTimeCompareArgs
1, // 1: ngolofuzz.NgoloFuzzOne.ConstantTimeSelect:type_name -> ngolofuzz.ConstantTimeSelectArgs
2, // 2: ngolofuzz.NgoloFuzzOne.ConstantTimeByteEq:type_name -> ngolofuzz.ConstantTimeByteEqArgs
3, // 3: ngolofuzz.NgoloFuzzOne.ConstantTimeEq:type_name -> ngolofuzz.ConstantTimeEqArgs
4, // 4: ngolofuzz.NgoloFuzzOne.ConstantTimeCopy:type_name -> ngolofuzz.ConstantTimeCopyArgs
5, // 5: ngolofuzz.NgoloFuzzOne.ConstantTimeLessOrEq:type_name -> ngolofuzz.ConstantTimeLessOrEqArgs
6, // 6: ngolofuzz.NgoloFuzzOne.XORBytes:type_name -> ngolofuzz.XORBytesArgs
7, // 7: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
8, // [8:8] is the sub-list for method output_type
8, // [8:8] is the sub-list for method input_type
8, // [8:8] is the sub-list for extension type_name
8, // [8:8] is the sub-list for extension extendee
0, // [0:8] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[7].OneofWrappers = []any{
(*NgoloFuzzOne_ConstantTimeCompare)(nil),
(*NgoloFuzzOne_ConstantTimeSelect)(nil),
(*NgoloFuzzOne_ConstantTimeByteEq)(nil),
(*NgoloFuzzOne_ConstantTimeEq)(nil),
(*NgoloFuzzOne_ConstantTimeCopy)(nil),
(*NgoloFuzzOne_ConstantTimeLessOrEq)(nil),
(*NgoloFuzzOne_XORBytes)(nil),
}
file_ngolofuzz_proto_msgTypes[8].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 10,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_database_sql
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"database/sql"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func IsolationLevelNewFromFuzz(p IsolationLevelEnum) sql.IsolationLevel{
switch p {
case 1:
return sql.LevelReadUncommitted
case 2:
return sql.LevelReadCommitted
case 3:
return sql.LevelWriteCommitted
case 4:
return sql.LevelRepeatableRead
case 5:
return sql.LevelSnapshot
case 6:
return sql.LevelSerializable
case 7:
return sql.LevelLinearizable
}
return sql.LevelDefault
}
func ConvertIsolationLevelNewFromFuzz(a []IsolationLevelEnum) []sql.IsolationLevel{
r := make([]sql.IsolationLevel, len(a))
for i := range a {
r[i] = IsolationLevelNewFromFuzz(a[i])
}
return r
}
func NullFloat64NewFromFuzz(p *NullFloat64Struct) *sql.NullFloat64{
if p == nil {
return nil
}
return &sql.NullFloat64{
Float64: p.Float64,
Valid: p.Valid,
}
}
func NullInt64NewFromFuzz(p *NullInt64Struct) *sql.NullInt64{
if p == nil {
return nil
}
return &sql.NullInt64{
Int64: p.Int64,
Valid: p.Valid,
}
}
func NullInt32NewFromFuzz(p *NullInt32Struct) *sql.NullInt32{
if p == nil {
return nil
}
return &sql.NullInt32{
Int32: p.Int32,
Valid: p.Valid,
}
}
func TxOptionsNewFromFuzz(p *TxOptionsStruct) *sql.TxOptions{
if p == nil {
return nil
}
return &sql.TxOptions{
Isolation: IsolationLevelNewFromFuzz(p.Isolation),
ReadOnly: p.ReadOnly,
}
}
func NullStringNewFromFuzz(p *NullStringStruct) *sql.NullString{
if p == nil {
return nil
}
return &sql.NullString{
String: p.String_,
Valid: p.Valid,
}
}
func NullByteNewFromFuzz(p *NullByteStruct) *sql.NullByte{
if p == nil {
return nil
}
return &sql.NullByte{
Byte: byte(p.Byte),
Valid: p.Valid,
}
}
func NullBoolNewFromFuzz(p *NullBoolStruct) *sql.NullBool{
if p == nil {
return nil
}
return &sql.NullBool{
Bool: p.Bool,
Valid: p.Valid,
}
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var TxResults []*sql.Tx
TxResultsIndex := 0
var DBResults []*sql.DB
DBResultsIndex := 0
var StmtResults []*sql.Stmt
StmtResultsIndex := 0
var ConnResults []*sql.Conn
ConnResultsIndex := 0
var RowsResults []*sql.Rows
RowsResultsIndex := 0
var RowResults []*sql.Row
RowResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Drivers:
sql.Drivers()
case *NgoloFuzzOne_Named:
sql.Named(a.Named.Name, a.Named.Value)
case *NgoloFuzzOne_IsolationLevelNgdotString:
arg0 := IsolationLevelNewFromFuzz(a.IsolationLevelNgdotString.I)
arg0.String()
case *NgoloFuzzOne_NullStringNgdotScan:
arg0 := NullStringNewFromFuzz(a.NullStringNgdotScan.Ns)
if arg0 == nil {
continue
}
r0 := arg0.Scan(a.NullStringNgdotScan.Value)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_NullStringNgdotValue:
arg0 := NullStringNewFromFuzz(a.NullStringNgdotValue.Ns)
if arg0 == nil {
continue
}
_, r1 := arg0.Value()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NullInt64NgdotScan:
arg0 := NullInt64NewFromFuzz(a.NullInt64NgdotScan.N)
if arg0 == nil {
continue
}
r0 := arg0.Scan(a.NullInt64NgdotScan.Value)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_NullInt64NgdotValue:
arg0 := NullInt64NewFromFuzz(a.NullInt64NgdotValue.N)
if arg0 == nil {
continue
}
_, r1 := arg0.Value()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NullInt32NgdotScan:
arg0 := NullInt32NewFromFuzz(a.NullInt32NgdotScan.N)
if arg0 == nil {
continue
}
r0 := arg0.Scan(a.NullInt32NgdotScan.Value)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_NullInt32NgdotValue:
arg0 := NullInt32NewFromFuzz(a.NullInt32NgdotValue.N)
if arg0 == nil {
continue
}
_, r1 := arg0.Value()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NullByteNgdotScan:
arg0 := NullByteNewFromFuzz(a.NullByteNgdotScan.N)
if arg0 == nil {
continue
}
r0 := arg0.Scan(a.NullByteNgdotScan.Value)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_NullByteNgdotValue:
arg0 := NullByteNewFromFuzz(a.NullByteNgdotValue.N)
if arg0 == nil {
continue
}
_, r1 := arg0.Value()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NullFloat64NgdotScan:
arg0 := NullFloat64NewFromFuzz(a.NullFloat64NgdotScan.N)
if arg0 == nil {
continue
}
r0 := arg0.Scan(a.NullFloat64NgdotScan.Value)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_NullFloat64NgdotValue:
arg0 := NullFloat64NewFromFuzz(a.NullFloat64NgdotValue.N)
if arg0 == nil {
continue
}
_, r1 := arg0.Value()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NullBoolNgdotScan:
arg0 := NullBoolNewFromFuzz(a.NullBoolNgdotScan.N)
if arg0 == nil {
continue
}
r0 := arg0.Scan(a.NullBoolNgdotScan.Value)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_NullBoolNgdotValue:
arg0 := NullBoolNewFromFuzz(a.NullBoolNgdotValue.N)
if arg0 == nil {
continue
}
_, r1 := arg0.Value()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Open:
r0, r1 := sql.Open(a.Open.DriverName, a.Open.DataSourceName)
if r0 != nil{
DBResults = append(DBResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DBNgdotPing:
if len(DBResults) == 0 {
continue
}
arg0 := DBResults[DBResultsIndex]
DBResultsIndex = (DBResultsIndex + 1) % len(DBResults)
r0 := arg0.Ping()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_DBNgdotClose:
if len(DBResults) == 0 {
continue
}
arg0 := DBResults[DBResultsIndex]
DBResultsIndex = (DBResultsIndex + 1) % len(DBResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_DBNgdotSetMaxIdleConns:
if len(DBResults) == 0 {
continue
}
arg0 := DBResults[DBResultsIndex]
DBResultsIndex = (DBResultsIndex + 1) % len(DBResults)
arg1 := int(a.DBNgdotSetMaxIdleConns.N)
arg0.SetMaxIdleConns(arg1)
case *NgoloFuzzOne_DBNgdotSetMaxOpenConns:
if len(DBResults) == 0 {
continue
}
arg0 := DBResults[DBResultsIndex]
DBResultsIndex = (DBResultsIndex + 1) % len(DBResults)
arg1 := int(a.DBNgdotSetMaxOpenConns.N)
arg0.SetMaxOpenConns(arg1)
case *NgoloFuzzOne_DBNgdotStats:
if len(DBResults) == 0 {
continue
}
arg0 := DBResults[DBResultsIndex]
DBResultsIndex = (DBResultsIndex + 1) % len(DBResults)
arg0.Stats()
case *NgoloFuzzOne_DBNgdotPrepare:
if len(DBResults) == 0 {
continue
}
arg0 := DBResults[DBResultsIndex]
DBResultsIndex = (DBResultsIndex + 1) % len(DBResults)
r0, r1 := arg0.Prepare(a.DBNgdotPrepare.Query)
if r0 != nil{
StmtResults = append(StmtResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DBNgdotBegin:
if len(DBResults) == 0 {
continue
}
arg0 := DBResults[DBResultsIndex]
DBResultsIndex = (DBResultsIndex + 1) % len(DBResults)
r0, r1 := arg0.Begin()
if r0 != nil{
TxResults = append(TxResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DBNgdotDriver:
if len(DBResults) == 0 {
continue
}
arg0 := DBResults[DBResultsIndex]
DBResultsIndex = (DBResultsIndex + 1) % len(DBResults)
arg0.Driver()
case *NgoloFuzzOne_ConnNgdotClose:
if len(ConnResults) == 0 {
continue
}
arg0 := ConnResults[ConnResultsIndex]
ConnResultsIndex = (ConnResultsIndex + 1) % len(ConnResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_TxNgdotCommit:
if len(TxResults) == 0 {
continue
}
arg0 := TxResults[TxResultsIndex]
TxResultsIndex = (TxResultsIndex + 1) % len(TxResults)
r0 := arg0.Commit()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_TxNgdotRollback:
if len(TxResults) == 0 {
continue
}
arg0 := TxResults[TxResultsIndex]
TxResultsIndex = (TxResultsIndex + 1) % len(TxResults)
r0 := arg0.Rollback()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_TxNgdotPrepare:
if len(TxResults) == 0 {
continue
}
arg0 := TxResults[TxResultsIndex]
TxResultsIndex = (TxResultsIndex + 1) % len(TxResults)
r0, r1 := arg0.Prepare(a.TxNgdotPrepare.Query)
if r0 != nil{
StmtResults = append(StmtResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TxNgdotStmt:
if len(TxResults) == 0 {
continue
}
arg0 := TxResults[TxResultsIndex]
TxResultsIndex = (TxResultsIndex + 1) % len(TxResults)
if len(StmtResults) == 0 {
continue
}
arg1 := StmtResults[StmtResultsIndex]
StmtResultsIndex = (StmtResultsIndex + 1) % len(StmtResults)
r0 := arg0.Stmt(arg1)
if r0 != nil{
StmtResults = append(StmtResults, r0)
}
case *NgoloFuzzOne_StmtNgdotClose:
if len(StmtResults) == 0 {
continue
}
arg0 := StmtResults[StmtResultsIndex]
StmtResultsIndex = (StmtResultsIndex + 1) % len(StmtResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_RowsNgdotNext:
if len(RowsResults) == 0 {
continue
}
arg0 := RowsResults[RowsResultsIndex]
RowsResultsIndex = (RowsResultsIndex + 1) % len(RowsResults)
arg0.Next()
case *NgoloFuzzOne_RowsNgdotNextResultSet:
if len(RowsResults) == 0 {
continue
}
arg0 := RowsResults[RowsResultsIndex]
RowsResultsIndex = (RowsResultsIndex + 1) % len(RowsResults)
arg0.NextResultSet()
case *NgoloFuzzOne_RowsNgdotErr:
if len(RowsResults) == 0 {
continue
}
arg0 := RowsResults[RowsResultsIndex]
RowsResultsIndex = (RowsResultsIndex + 1) % len(RowsResults)
r0 := arg0.Err()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_RowsNgdotColumns:
if len(RowsResults) == 0 {
continue
}
arg0 := RowsResults[RowsResultsIndex]
RowsResultsIndex = (RowsResultsIndex + 1) % len(RowsResults)
_, r1 := arg0.Columns()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_RowsNgdotColumnTypes:
if len(RowsResults) == 0 {
continue
}
arg0 := RowsResults[RowsResultsIndex]
RowsResultsIndex = (RowsResultsIndex + 1) % len(RowsResults)
_, r1 := arg0.ColumnTypes()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_RowsNgdotClose:
if len(RowsResults) == 0 {
continue
}
arg0 := RowsResults[RowsResultsIndex]
RowsResultsIndex = (RowsResultsIndex + 1) % len(RowsResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_RowNgdotErr:
if len(RowResults) == 0 {
continue
}
arg0 := RowResults[RowResultsIndex]
RowResultsIndex = (RowResultsIndex + 1) % len(RowResults)
r0 := arg0.Err()
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
TxNb := 0
TxResultsIndex := 0
DBNb := 0
DBResultsIndex := 0
StmtNb := 0
StmtResultsIndex := 0
ConnNb := 0
ConnResultsIndex := 0
RowsNb := 0
RowsResultsIndex := 0
RowNb := 0
RowResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Drivers:
w.WriteString(fmt.Sprintf("sql.Drivers()\n"))
case *NgoloFuzzOne_Named:
w.WriteString(fmt.Sprintf("sql.Named(%#+v, %#+v)\n", a.Named.Name, a.Named.Value))
case *NgoloFuzzOne_IsolationLevelNgdotString:
w.WriteString(fmt.Sprintf("IsolationLevelNewFromFuzz(%#+v).String()\n", a.IsolationLevelNgdotString.I))
case *NgoloFuzzOne_NullStringNgdotScan:
w.WriteString(fmt.Sprintf("NullStringNewFromFuzz(%#+v).Scan(%#+v)\n", a.NullStringNgdotScan.Ns, a.NullStringNgdotScan.Value))
case *NgoloFuzzOne_NullStringNgdotValue:
w.WriteString(fmt.Sprintf("NullStringNewFromFuzz(%#+v).Value()\n", a.NullStringNgdotValue.Ns))
case *NgoloFuzzOne_NullInt64NgdotScan:
w.WriteString(fmt.Sprintf("NullInt64NewFromFuzz(%#+v).Scan(%#+v)\n", a.NullInt64NgdotScan.N, a.NullInt64NgdotScan.Value))
case *NgoloFuzzOne_NullInt64NgdotValue:
w.WriteString(fmt.Sprintf("NullInt64NewFromFuzz(%#+v).Value()\n", a.NullInt64NgdotValue.N))
case *NgoloFuzzOne_NullInt32NgdotScan:
w.WriteString(fmt.Sprintf("NullInt32NewFromFuzz(%#+v).Scan(%#+v)\n", a.NullInt32NgdotScan.N, a.NullInt32NgdotScan.Value))
case *NgoloFuzzOne_NullInt32NgdotValue:
w.WriteString(fmt.Sprintf("NullInt32NewFromFuzz(%#+v).Value()\n", a.NullInt32NgdotValue.N))
case *NgoloFuzzOne_NullByteNgdotScan:
w.WriteString(fmt.Sprintf("NullByteNewFromFuzz(%#+v).Scan(%#+v)\n", a.NullByteNgdotScan.N, a.NullByteNgdotScan.Value))
case *NgoloFuzzOne_NullByteNgdotValue:
w.WriteString(fmt.Sprintf("NullByteNewFromFuzz(%#+v).Value()\n", a.NullByteNgdotValue.N))
case *NgoloFuzzOne_NullFloat64NgdotScan:
w.WriteString(fmt.Sprintf("NullFloat64NewFromFuzz(%#+v).Scan(%#+v)\n", a.NullFloat64NgdotScan.N, a.NullFloat64NgdotScan.Value))
case *NgoloFuzzOne_NullFloat64NgdotValue:
w.WriteString(fmt.Sprintf("NullFloat64NewFromFuzz(%#+v).Value()\n", a.NullFloat64NgdotValue.N))
case *NgoloFuzzOne_NullBoolNgdotScan:
w.WriteString(fmt.Sprintf("NullBoolNewFromFuzz(%#+v).Scan(%#+v)\n", a.NullBoolNgdotScan.N, a.NullBoolNgdotScan.Value))
case *NgoloFuzzOne_NullBoolNgdotValue:
w.WriteString(fmt.Sprintf("NullBoolNewFromFuzz(%#+v).Value()\n", a.NullBoolNgdotValue.N))
case *NgoloFuzzOne_Open:
w.WriteString(fmt.Sprintf("DB%d, _ := sql.Open(%#+v, %#+v)\n", DBNb, a.Open.DriverName, a.Open.DataSourceName))
DBNb = DBNb + 1
case *NgoloFuzzOne_DBNgdotPing:
if DBNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("DB%d.Ping()\n", DBResultsIndex))
DBResultsIndex = (DBResultsIndex + 1) % DBNb
case *NgoloFuzzOne_DBNgdotClose:
if DBNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("DB%d.Close()\n", DBResultsIndex))
DBResultsIndex = (DBResultsIndex + 1) % DBNb
case *NgoloFuzzOne_DBNgdotSetMaxIdleConns:
if DBNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("DB%d.SetMaxIdleConns(int(%#+v))\n", DBResultsIndex, a.DBNgdotSetMaxIdleConns.N))
DBResultsIndex = (DBResultsIndex + 1) % DBNb
case *NgoloFuzzOne_DBNgdotSetMaxOpenConns:
if DBNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("DB%d.SetMaxOpenConns(int(%#+v))\n", DBResultsIndex, a.DBNgdotSetMaxOpenConns.N))
DBResultsIndex = (DBResultsIndex + 1) % DBNb
case *NgoloFuzzOne_DBNgdotStats:
if DBNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("DB%d.Stats()\n", DBResultsIndex))
DBResultsIndex = (DBResultsIndex + 1) % DBNb
case *NgoloFuzzOne_DBNgdotPrepare:
if DBNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Stmt%d, _ := DB%d.Prepare(%#+v)\n", StmtNb, DBResultsIndex, a.DBNgdotPrepare.Query))
StmtNb = StmtNb + 1
DBResultsIndex = (DBResultsIndex + 1) % DBNb
case *NgoloFuzzOne_DBNgdotBegin:
if DBNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Tx%d, _ := DB%d.Begin()\n", TxNb, DBResultsIndex))
TxNb = TxNb + 1
DBResultsIndex = (DBResultsIndex + 1) % DBNb
case *NgoloFuzzOne_DBNgdotDriver:
if DBNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("DB%d.Driver()\n", DBResultsIndex))
DBResultsIndex = (DBResultsIndex + 1) % DBNb
case *NgoloFuzzOne_ConnNgdotClose:
if ConnNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Conn%d.Close()\n", ConnResultsIndex))
ConnResultsIndex = (ConnResultsIndex + 1) % ConnNb
case *NgoloFuzzOne_TxNgdotCommit:
if TxNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Tx%d.Commit()\n", TxResultsIndex))
TxResultsIndex = (TxResultsIndex + 1) % TxNb
case *NgoloFuzzOne_TxNgdotRollback:
if TxNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Tx%d.Rollback()\n", TxResultsIndex))
TxResultsIndex = (TxResultsIndex + 1) % TxNb
case *NgoloFuzzOne_TxNgdotPrepare:
if TxNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Stmt%d, _ := Tx%d.Prepare(%#+v)\n", StmtNb, TxResultsIndex, a.TxNgdotPrepare.Query))
StmtNb = StmtNb + 1
TxResultsIndex = (TxResultsIndex + 1) % TxNb
case *NgoloFuzzOne_TxNgdotStmt:
if TxNb == 0 {
continue
}
if StmtNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Stmt%d := Tx%d.Stmt(Stmt%d)\n", StmtNb, TxResultsIndex, (StmtResultsIndex + 0) % StmtNb))
StmtNb = StmtNb + 1
TxResultsIndex = (TxResultsIndex + 1) % TxNb
StmtResultsIndex = (StmtResultsIndex + 1) % StmtNb
case *NgoloFuzzOne_StmtNgdotClose:
if StmtNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Stmt%d.Close()\n", StmtResultsIndex))
StmtResultsIndex = (StmtResultsIndex + 1) % StmtNb
case *NgoloFuzzOne_RowsNgdotNext:
if RowsNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rows%d.Next()\n", RowsResultsIndex))
RowsResultsIndex = (RowsResultsIndex + 1) % RowsNb
case *NgoloFuzzOne_RowsNgdotNextResultSet:
if RowsNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rows%d.NextResultSet()\n", RowsResultsIndex))
RowsResultsIndex = (RowsResultsIndex + 1) % RowsNb
case *NgoloFuzzOne_RowsNgdotErr:
if RowsNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rows%d.Err()\n", RowsResultsIndex))
RowsResultsIndex = (RowsResultsIndex + 1) % RowsNb
case *NgoloFuzzOne_RowsNgdotColumns:
if RowsNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rows%d.Columns()\n", RowsResultsIndex))
RowsResultsIndex = (RowsResultsIndex + 1) % RowsNb
case *NgoloFuzzOne_RowsNgdotColumnTypes:
if RowsNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rows%d.ColumnTypes()\n", RowsResultsIndex))
RowsResultsIndex = (RowsResultsIndex + 1) % RowsNb
case *NgoloFuzzOne_RowsNgdotClose:
if RowsNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rows%d.Close()\n", RowsResultsIndex))
RowsResultsIndex = (RowsResultsIndex + 1) % RowsNb
case *NgoloFuzzOne_RowNgdotErr:
if RowNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Row%d.Err()\n", RowResultsIndex))
RowResultsIndex = (RowResultsIndex + 1) % RowNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_database_sql
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type IsolationLevelEnum int32
const (
IsolationLevelEnum_LevelDefault IsolationLevelEnum = 0
IsolationLevelEnum_LevelReadUncommitted IsolationLevelEnum = 1
IsolationLevelEnum_LevelReadCommitted IsolationLevelEnum = 2
IsolationLevelEnum_LevelWriteCommitted IsolationLevelEnum = 3
IsolationLevelEnum_LevelRepeatableRead IsolationLevelEnum = 4
IsolationLevelEnum_LevelSnapshot IsolationLevelEnum = 5
IsolationLevelEnum_LevelSerializable IsolationLevelEnum = 6
IsolationLevelEnum_LevelLinearizable IsolationLevelEnum = 7
)
// Enum value maps for IsolationLevelEnum.
var (
IsolationLevelEnum_name = map[int32]string{
0: "LevelDefault",
1: "LevelReadUncommitted",
2: "LevelReadCommitted",
3: "LevelWriteCommitted",
4: "LevelRepeatableRead",
5: "LevelSnapshot",
6: "LevelSerializable",
7: "LevelLinearizable",
}
IsolationLevelEnum_value = map[string]int32{
"LevelDefault": 0,
"LevelReadUncommitted": 1,
"LevelReadCommitted": 2,
"LevelWriteCommitted": 3,
"LevelRepeatableRead": 4,
"LevelSnapshot": 5,
"LevelSerializable": 6,
"LevelLinearizable": 7,
}
)
func (x IsolationLevelEnum) Enum() *IsolationLevelEnum {
p := new(IsolationLevelEnum)
*p = x
return p
}
func (x IsolationLevelEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (IsolationLevelEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[0].Descriptor()
}
func (IsolationLevelEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[0]
}
func (x IsolationLevelEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use IsolationLevelEnum.Descriptor instead.
func (IsolationLevelEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type NullFloat64Struct struct {
state protoimpl.MessageState `protogen:"open.v1"`
Float64 float64 `protobuf:"fixed64,1,opt,name=Float64,proto3" json:"Float64,omitempty"`
Valid bool `protobuf:"varint,2,opt,name=Valid,proto3" json:"Valid,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullFloat64Struct) Reset() {
*x = NullFloat64Struct{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullFloat64Struct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullFloat64Struct) ProtoMessage() {}
func (x *NullFloat64Struct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullFloat64Struct.ProtoReflect.Descriptor instead.
func (*NullFloat64Struct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NullFloat64Struct) GetFloat64() float64 {
if x != nil {
return x.Float64
}
return 0
}
func (x *NullFloat64Struct) GetValid() bool {
if x != nil {
return x.Valid
}
return false
}
type NullInt64Struct struct {
state protoimpl.MessageState `protogen:"open.v1"`
Int64 int64 `protobuf:"varint,1,opt,name=Int64,proto3" json:"Int64,omitempty"`
Valid bool `protobuf:"varint,2,opt,name=Valid,proto3" json:"Valid,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullInt64Struct) Reset() {
*x = NullInt64Struct{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullInt64Struct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullInt64Struct) ProtoMessage() {}
func (x *NullInt64Struct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullInt64Struct.ProtoReflect.Descriptor instead.
func (*NullInt64Struct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NullInt64Struct) GetInt64() int64 {
if x != nil {
return x.Int64
}
return 0
}
func (x *NullInt64Struct) GetValid() bool {
if x != nil {
return x.Valid
}
return false
}
type NullInt32Struct struct {
state protoimpl.MessageState `protogen:"open.v1"`
Int32 int32 `protobuf:"varint,1,opt,name=Int32,proto3" json:"Int32,omitempty"`
Valid bool `protobuf:"varint,2,opt,name=Valid,proto3" json:"Valid,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullInt32Struct) Reset() {
*x = NullInt32Struct{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullInt32Struct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullInt32Struct) ProtoMessage() {}
func (x *NullInt32Struct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullInt32Struct.ProtoReflect.Descriptor instead.
func (*NullInt32Struct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NullInt32Struct) GetInt32() int32 {
if x != nil {
return x.Int32
}
return 0
}
func (x *NullInt32Struct) GetValid() bool {
if x != nil {
return x.Valid
}
return false
}
type TxOptionsStruct struct {
state protoimpl.MessageState `protogen:"open.v1"`
Isolation IsolationLevelEnum `protobuf:"varint,1,opt,name=Isolation,proto3,enum=ngolofuzz.IsolationLevelEnum" json:"Isolation,omitempty"`
ReadOnly bool `protobuf:"varint,2,opt,name=ReadOnly,proto3" json:"ReadOnly,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TxOptionsStruct) Reset() {
*x = TxOptionsStruct{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TxOptionsStruct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TxOptionsStruct) ProtoMessage() {}
func (x *TxOptionsStruct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TxOptionsStruct.ProtoReflect.Descriptor instead.
func (*TxOptionsStruct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *TxOptionsStruct) GetIsolation() IsolationLevelEnum {
if x != nil {
return x.Isolation
}
return IsolationLevelEnum_LevelDefault
}
func (x *TxOptionsStruct) GetReadOnly() bool {
if x != nil {
return x.ReadOnly
}
return false
}
type NullStringStruct struct {
state protoimpl.MessageState `protogen:"open.v1"`
String_ string `protobuf:"bytes,1,opt,name=String,proto3" json:"String,omitempty"`
Valid bool `protobuf:"varint,2,opt,name=Valid,proto3" json:"Valid,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullStringStruct) Reset() {
*x = NullStringStruct{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullStringStruct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullStringStruct) ProtoMessage() {}
func (x *NullStringStruct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullStringStruct.ProtoReflect.Descriptor instead.
func (*NullStringStruct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NullStringStruct) GetString_() string {
if x != nil {
return x.String_
}
return ""
}
func (x *NullStringStruct) GetValid() bool {
if x != nil {
return x.Valid
}
return false
}
type NullByteStruct struct {
state protoimpl.MessageState `protogen:"open.v1"`
Byte uint32 `protobuf:"varint,1,opt,name=Byte,proto3" json:"Byte,omitempty"`
Valid bool `protobuf:"varint,2,opt,name=Valid,proto3" json:"Valid,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullByteStruct) Reset() {
*x = NullByteStruct{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullByteStruct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullByteStruct) ProtoMessage() {}
func (x *NullByteStruct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullByteStruct.ProtoReflect.Descriptor instead.
func (*NullByteStruct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NullByteStruct) GetByte() uint32 {
if x != nil {
return x.Byte
}
return 0
}
func (x *NullByteStruct) GetValid() bool {
if x != nil {
return x.Valid
}
return false
}
type NullBoolStruct struct {
state protoimpl.MessageState `protogen:"open.v1"`
Bool bool `protobuf:"varint,1,opt,name=Bool,proto3" json:"Bool,omitempty"`
Valid bool `protobuf:"varint,2,opt,name=Valid,proto3" json:"Valid,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullBoolStruct) Reset() {
*x = NullBoolStruct{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullBoolStruct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullBoolStruct) ProtoMessage() {}
func (x *NullBoolStruct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullBoolStruct.ProtoReflect.Descriptor instead.
func (*NullBoolStruct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NullBoolStruct) GetBool() bool {
if x != nil {
return x.Bool
}
return false
}
func (x *NullBoolStruct) GetValid() bool {
if x != nil {
return x.Valid
}
return false
}
type DriversArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DriversArgs) Reset() {
*x = DriversArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DriversArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DriversArgs) ProtoMessage() {}
func (x *DriversArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DriversArgs.ProtoReflect.Descriptor instead.
func (*DriversArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type NamedArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value *NgoloFuzzAny `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NamedArgs) Reset() {
*x = NamedArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NamedArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NamedArgs) ProtoMessage() {}
func (x *NamedArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NamedArgs.ProtoReflect.Descriptor instead.
func (*NamedArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NamedArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *NamedArgs) GetValue() *NgoloFuzzAny {
if x != nil {
return x.Value
}
return nil
}
type IsolationLevelNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I IsolationLevelEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.IsolationLevelEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsolationLevelNgdotStringArgs) Reset() {
*x = IsolationLevelNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsolationLevelNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsolationLevelNgdotStringArgs) ProtoMessage() {}
func (x *IsolationLevelNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsolationLevelNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*IsolationLevelNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *IsolationLevelNgdotStringArgs) GetI() IsolationLevelEnum {
if x != nil {
return x.I
}
return IsolationLevelEnum_LevelDefault
}
type NullStringNgdotScanArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ns *NullStringStruct `protobuf:"bytes,1,opt,name=ns,proto3" json:"ns,omitempty"`
Value *NgoloFuzzAny `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullStringNgdotScanArgs) Reset() {
*x = NullStringNgdotScanArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullStringNgdotScanArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullStringNgdotScanArgs) ProtoMessage() {}
func (x *NullStringNgdotScanArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullStringNgdotScanArgs.ProtoReflect.Descriptor instead.
func (*NullStringNgdotScanArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NullStringNgdotScanArgs) GetNs() *NullStringStruct {
if x != nil {
return x.Ns
}
return nil
}
func (x *NullStringNgdotScanArgs) GetValue() *NgoloFuzzAny {
if x != nil {
return x.Value
}
return nil
}
type NullStringNgdotValueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ns *NullStringStruct `protobuf:"bytes,1,opt,name=ns,proto3" json:"ns,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullStringNgdotValueArgs) Reset() {
*x = NullStringNgdotValueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullStringNgdotValueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullStringNgdotValueArgs) ProtoMessage() {}
func (x *NullStringNgdotValueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullStringNgdotValueArgs.ProtoReflect.Descriptor instead.
func (*NullStringNgdotValueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NullStringNgdotValueArgs) GetNs() *NullStringStruct {
if x != nil {
return x.Ns
}
return nil
}
type NullInt64NgdotScanArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N *NullInt64Struct `protobuf:"bytes,1,opt,name=n,proto3" json:"n,omitempty"`
Value *NgoloFuzzAny `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullInt64NgdotScanArgs) Reset() {
*x = NullInt64NgdotScanArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullInt64NgdotScanArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullInt64NgdotScanArgs) ProtoMessage() {}
func (x *NullInt64NgdotScanArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullInt64NgdotScanArgs.ProtoReflect.Descriptor instead.
func (*NullInt64NgdotScanArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *NullInt64NgdotScanArgs) GetN() *NullInt64Struct {
if x != nil {
return x.N
}
return nil
}
func (x *NullInt64NgdotScanArgs) GetValue() *NgoloFuzzAny {
if x != nil {
return x.Value
}
return nil
}
type NullInt64NgdotValueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N *NullInt64Struct `protobuf:"bytes,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullInt64NgdotValueArgs) Reset() {
*x = NullInt64NgdotValueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullInt64NgdotValueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullInt64NgdotValueArgs) ProtoMessage() {}
func (x *NullInt64NgdotValueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullInt64NgdotValueArgs.ProtoReflect.Descriptor instead.
func (*NullInt64NgdotValueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *NullInt64NgdotValueArgs) GetN() *NullInt64Struct {
if x != nil {
return x.N
}
return nil
}
type NullInt32NgdotScanArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N *NullInt32Struct `protobuf:"bytes,1,opt,name=n,proto3" json:"n,omitempty"`
Value *NgoloFuzzAny `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullInt32NgdotScanArgs) Reset() {
*x = NullInt32NgdotScanArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullInt32NgdotScanArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullInt32NgdotScanArgs) ProtoMessage() {}
func (x *NullInt32NgdotScanArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullInt32NgdotScanArgs.ProtoReflect.Descriptor instead.
func (*NullInt32NgdotScanArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *NullInt32NgdotScanArgs) GetN() *NullInt32Struct {
if x != nil {
return x.N
}
return nil
}
func (x *NullInt32NgdotScanArgs) GetValue() *NgoloFuzzAny {
if x != nil {
return x.Value
}
return nil
}
type NullInt32NgdotValueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N *NullInt32Struct `protobuf:"bytes,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullInt32NgdotValueArgs) Reset() {
*x = NullInt32NgdotValueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullInt32NgdotValueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullInt32NgdotValueArgs) ProtoMessage() {}
func (x *NullInt32NgdotValueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullInt32NgdotValueArgs.ProtoReflect.Descriptor instead.
func (*NullInt32NgdotValueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *NullInt32NgdotValueArgs) GetN() *NullInt32Struct {
if x != nil {
return x.N
}
return nil
}
type NullByteNgdotScanArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N *NullByteStruct `protobuf:"bytes,1,opt,name=n,proto3" json:"n,omitempty"`
Value *NgoloFuzzAny `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullByteNgdotScanArgs) Reset() {
*x = NullByteNgdotScanArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullByteNgdotScanArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullByteNgdotScanArgs) ProtoMessage() {}
func (x *NullByteNgdotScanArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullByteNgdotScanArgs.ProtoReflect.Descriptor instead.
func (*NullByteNgdotScanArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *NullByteNgdotScanArgs) GetN() *NullByteStruct {
if x != nil {
return x.N
}
return nil
}
func (x *NullByteNgdotScanArgs) GetValue() *NgoloFuzzAny {
if x != nil {
return x.Value
}
return nil
}
type NullByteNgdotValueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N *NullByteStruct `protobuf:"bytes,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullByteNgdotValueArgs) Reset() {
*x = NullByteNgdotValueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullByteNgdotValueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullByteNgdotValueArgs) ProtoMessage() {}
func (x *NullByteNgdotValueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullByteNgdotValueArgs.ProtoReflect.Descriptor instead.
func (*NullByteNgdotValueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *NullByteNgdotValueArgs) GetN() *NullByteStruct {
if x != nil {
return x.N
}
return nil
}
type NullFloat64NgdotScanArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N *NullFloat64Struct `protobuf:"bytes,1,opt,name=n,proto3" json:"n,omitempty"`
Value *NgoloFuzzAny `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullFloat64NgdotScanArgs) Reset() {
*x = NullFloat64NgdotScanArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullFloat64NgdotScanArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullFloat64NgdotScanArgs) ProtoMessage() {}
func (x *NullFloat64NgdotScanArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullFloat64NgdotScanArgs.ProtoReflect.Descriptor instead.
func (*NullFloat64NgdotScanArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *NullFloat64NgdotScanArgs) GetN() *NullFloat64Struct {
if x != nil {
return x.N
}
return nil
}
func (x *NullFloat64NgdotScanArgs) GetValue() *NgoloFuzzAny {
if x != nil {
return x.Value
}
return nil
}
type NullFloat64NgdotValueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N *NullFloat64Struct `protobuf:"bytes,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullFloat64NgdotValueArgs) Reset() {
*x = NullFloat64NgdotValueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullFloat64NgdotValueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullFloat64NgdotValueArgs) ProtoMessage() {}
func (x *NullFloat64NgdotValueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullFloat64NgdotValueArgs.ProtoReflect.Descriptor instead.
func (*NullFloat64NgdotValueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *NullFloat64NgdotValueArgs) GetN() *NullFloat64Struct {
if x != nil {
return x.N
}
return nil
}
type NullBoolNgdotScanArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N *NullBoolStruct `protobuf:"bytes,1,opt,name=n,proto3" json:"n,omitempty"`
Value *NgoloFuzzAny `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullBoolNgdotScanArgs) Reset() {
*x = NullBoolNgdotScanArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullBoolNgdotScanArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullBoolNgdotScanArgs) ProtoMessage() {}
func (x *NullBoolNgdotScanArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullBoolNgdotScanArgs.ProtoReflect.Descriptor instead.
func (*NullBoolNgdotScanArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *NullBoolNgdotScanArgs) GetN() *NullBoolStruct {
if x != nil {
return x.N
}
return nil
}
func (x *NullBoolNgdotScanArgs) GetValue() *NgoloFuzzAny {
if x != nil {
return x.Value
}
return nil
}
type NullBoolNgdotValueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N *NullBoolStruct `protobuf:"bytes,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NullBoolNgdotValueArgs) Reset() {
*x = NullBoolNgdotValueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NullBoolNgdotValueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NullBoolNgdotValueArgs) ProtoMessage() {}
func (x *NullBoolNgdotValueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NullBoolNgdotValueArgs.ProtoReflect.Descriptor instead.
func (*NullBoolNgdotValueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
func (x *NullBoolNgdotValueArgs) GetN() *NullBoolStruct {
if x != nil {
return x.N
}
return nil
}
type OpenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
DriverName string `protobuf:"bytes,1,opt,name=driverName,proto3" json:"driverName,omitempty"`
DataSourceName string `protobuf:"bytes,2,opt,name=dataSourceName,proto3" json:"dataSourceName,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OpenArgs) Reset() {
*x = OpenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OpenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OpenArgs) ProtoMessage() {}
func (x *OpenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OpenArgs.ProtoReflect.Descriptor instead.
func (*OpenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
func (x *OpenArgs) GetDriverName() string {
if x != nil {
return x.DriverName
}
return ""
}
func (x *OpenArgs) GetDataSourceName() string {
if x != nil {
return x.DataSourceName
}
return ""
}
type DBNgdotPingArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DBNgdotPingArgs) Reset() {
*x = DBNgdotPingArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DBNgdotPingArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBNgdotPingArgs) ProtoMessage() {}
func (x *DBNgdotPingArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBNgdotPingArgs.ProtoReflect.Descriptor instead.
func (*DBNgdotPingArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
type DBNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DBNgdotCloseArgs) Reset() {
*x = DBNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DBNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBNgdotCloseArgs) ProtoMessage() {}
func (x *DBNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*DBNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
type DBNgdotSetMaxIdleConnsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DBNgdotSetMaxIdleConnsArgs) Reset() {
*x = DBNgdotSetMaxIdleConnsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DBNgdotSetMaxIdleConnsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBNgdotSetMaxIdleConnsArgs) ProtoMessage() {}
func (x *DBNgdotSetMaxIdleConnsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBNgdotSetMaxIdleConnsArgs.ProtoReflect.Descriptor instead.
func (*DBNgdotSetMaxIdleConnsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
func (x *DBNgdotSetMaxIdleConnsArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type DBNgdotSetMaxOpenConnsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DBNgdotSetMaxOpenConnsArgs) Reset() {
*x = DBNgdotSetMaxOpenConnsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DBNgdotSetMaxOpenConnsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBNgdotSetMaxOpenConnsArgs) ProtoMessage() {}
func (x *DBNgdotSetMaxOpenConnsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBNgdotSetMaxOpenConnsArgs.ProtoReflect.Descriptor instead.
func (*DBNgdotSetMaxOpenConnsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
func (x *DBNgdotSetMaxOpenConnsArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type DBNgdotStatsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DBNgdotStatsArgs) Reset() {
*x = DBNgdotStatsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DBNgdotStatsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBNgdotStatsArgs) ProtoMessage() {}
func (x *DBNgdotStatsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBNgdotStatsArgs.ProtoReflect.Descriptor instead.
func (*DBNgdotStatsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
type DBNgdotPrepareArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DBNgdotPrepareArgs) Reset() {
*x = DBNgdotPrepareArgs{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DBNgdotPrepareArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBNgdotPrepareArgs) ProtoMessage() {}
func (x *DBNgdotPrepareArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBNgdotPrepareArgs.ProtoReflect.Descriptor instead.
func (*DBNgdotPrepareArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
func (x *DBNgdotPrepareArgs) GetQuery() string {
if x != nil {
return x.Query
}
return ""
}
type DBNgdotBeginArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DBNgdotBeginArgs) Reset() {
*x = DBNgdotBeginArgs{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DBNgdotBeginArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBNgdotBeginArgs) ProtoMessage() {}
func (x *DBNgdotBeginArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBNgdotBeginArgs.ProtoReflect.Descriptor instead.
func (*DBNgdotBeginArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
type DBNgdotDriverArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DBNgdotDriverArgs) Reset() {
*x = DBNgdotDriverArgs{}
mi := &file_ngolofuzz_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DBNgdotDriverArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DBNgdotDriverArgs) ProtoMessage() {}
func (x *DBNgdotDriverArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DBNgdotDriverArgs.ProtoReflect.Descriptor instead.
func (*DBNgdotDriverArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{30}
}
type ConnNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ConnNgdotCloseArgs) Reset() {
*x = ConnNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ConnNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ConnNgdotCloseArgs) ProtoMessage() {}
func (x *ConnNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ConnNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*ConnNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{31}
}
type TxNgdotCommitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TxNgdotCommitArgs) Reset() {
*x = TxNgdotCommitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TxNgdotCommitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TxNgdotCommitArgs) ProtoMessage() {}
func (x *TxNgdotCommitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TxNgdotCommitArgs.ProtoReflect.Descriptor instead.
func (*TxNgdotCommitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{32}
}
type TxNgdotRollbackArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TxNgdotRollbackArgs) Reset() {
*x = TxNgdotRollbackArgs{}
mi := &file_ngolofuzz_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TxNgdotRollbackArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TxNgdotRollbackArgs) ProtoMessage() {}
func (x *TxNgdotRollbackArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[33]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TxNgdotRollbackArgs.ProtoReflect.Descriptor instead.
func (*TxNgdotRollbackArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{33}
}
type TxNgdotPrepareArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TxNgdotPrepareArgs) Reset() {
*x = TxNgdotPrepareArgs{}
mi := &file_ngolofuzz_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TxNgdotPrepareArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TxNgdotPrepareArgs) ProtoMessage() {}
func (x *TxNgdotPrepareArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[34]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TxNgdotPrepareArgs.ProtoReflect.Descriptor instead.
func (*TxNgdotPrepareArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{34}
}
func (x *TxNgdotPrepareArgs) GetQuery() string {
if x != nil {
return x.Query
}
return ""
}
type TxNgdotStmtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TxNgdotStmtArgs) Reset() {
*x = TxNgdotStmtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TxNgdotStmtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TxNgdotStmtArgs) ProtoMessage() {}
func (x *TxNgdotStmtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[35]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TxNgdotStmtArgs.ProtoReflect.Descriptor instead.
func (*TxNgdotStmtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{35}
}
type StmtNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StmtNgdotCloseArgs) Reset() {
*x = StmtNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StmtNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StmtNgdotCloseArgs) ProtoMessage() {}
func (x *StmtNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[36]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StmtNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*StmtNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{36}
}
type RowsNgdotNextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RowsNgdotNextArgs) Reset() {
*x = RowsNgdotNextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RowsNgdotNextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RowsNgdotNextArgs) ProtoMessage() {}
func (x *RowsNgdotNextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[37]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RowsNgdotNextArgs.ProtoReflect.Descriptor instead.
func (*RowsNgdotNextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{37}
}
type RowsNgdotNextResultSetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RowsNgdotNextResultSetArgs) Reset() {
*x = RowsNgdotNextResultSetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RowsNgdotNextResultSetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RowsNgdotNextResultSetArgs) ProtoMessage() {}
func (x *RowsNgdotNextResultSetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[38]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RowsNgdotNextResultSetArgs.ProtoReflect.Descriptor instead.
func (*RowsNgdotNextResultSetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{38}
}
type RowsNgdotErrArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RowsNgdotErrArgs) Reset() {
*x = RowsNgdotErrArgs{}
mi := &file_ngolofuzz_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RowsNgdotErrArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RowsNgdotErrArgs) ProtoMessage() {}
func (x *RowsNgdotErrArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[39]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RowsNgdotErrArgs.ProtoReflect.Descriptor instead.
func (*RowsNgdotErrArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{39}
}
type RowsNgdotColumnsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RowsNgdotColumnsArgs) Reset() {
*x = RowsNgdotColumnsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RowsNgdotColumnsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RowsNgdotColumnsArgs) ProtoMessage() {}
func (x *RowsNgdotColumnsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[40]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RowsNgdotColumnsArgs.ProtoReflect.Descriptor instead.
func (*RowsNgdotColumnsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{40}
}
type RowsNgdotColumnTypesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RowsNgdotColumnTypesArgs) Reset() {
*x = RowsNgdotColumnTypesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RowsNgdotColumnTypesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RowsNgdotColumnTypesArgs) ProtoMessage() {}
func (x *RowsNgdotColumnTypesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[41]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RowsNgdotColumnTypesArgs.ProtoReflect.Descriptor instead.
func (*RowsNgdotColumnTypesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{41}
}
type RowsNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RowsNgdotCloseArgs) Reset() {
*x = RowsNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RowsNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RowsNgdotCloseArgs) ProtoMessage() {}
func (x *RowsNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[42]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RowsNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*RowsNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{42}
}
type RowNgdotErrArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RowNgdotErrArgs) Reset() {
*x = RowNgdotErrArgs{}
mi := &file_ngolofuzz_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RowNgdotErrArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RowNgdotErrArgs) ProtoMessage() {}
func (x *RowNgdotErrArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[43]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RowNgdotErrArgs.ProtoReflect.Descriptor instead.
func (*RowNgdotErrArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{43}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Drivers
// *NgoloFuzzOne_Named
// *NgoloFuzzOne_IsolationLevelNgdotString
// *NgoloFuzzOne_NullStringNgdotScan
// *NgoloFuzzOne_NullStringNgdotValue
// *NgoloFuzzOne_NullInt64NgdotScan
// *NgoloFuzzOne_NullInt64NgdotValue
// *NgoloFuzzOne_NullInt32NgdotScan
// *NgoloFuzzOne_NullInt32NgdotValue
// *NgoloFuzzOne_NullByteNgdotScan
// *NgoloFuzzOne_NullByteNgdotValue
// *NgoloFuzzOne_NullFloat64NgdotScan
// *NgoloFuzzOne_NullFloat64NgdotValue
// *NgoloFuzzOne_NullBoolNgdotScan
// *NgoloFuzzOne_NullBoolNgdotValue
// *NgoloFuzzOne_Open
// *NgoloFuzzOne_DBNgdotPing
// *NgoloFuzzOne_DBNgdotClose
// *NgoloFuzzOne_DBNgdotSetMaxIdleConns
// *NgoloFuzzOne_DBNgdotSetMaxOpenConns
// *NgoloFuzzOne_DBNgdotStats
// *NgoloFuzzOne_DBNgdotPrepare
// *NgoloFuzzOne_DBNgdotBegin
// *NgoloFuzzOne_DBNgdotDriver
// *NgoloFuzzOne_ConnNgdotClose
// *NgoloFuzzOne_TxNgdotCommit
// *NgoloFuzzOne_TxNgdotRollback
// *NgoloFuzzOne_TxNgdotPrepare
// *NgoloFuzzOne_TxNgdotStmt
// *NgoloFuzzOne_StmtNgdotClose
// *NgoloFuzzOne_RowsNgdotNext
// *NgoloFuzzOne_RowsNgdotNextResultSet
// *NgoloFuzzOne_RowsNgdotErr
// *NgoloFuzzOne_RowsNgdotColumns
// *NgoloFuzzOne_RowsNgdotColumnTypes
// *NgoloFuzzOne_RowsNgdotClose
// *NgoloFuzzOne_RowNgdotErr
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[44]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{44}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetDrivers() *DriversArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Drivers); ok {
return x.Drivers
}
}
return nil
}
func (x *NgoloFuzzOne) GetNamed() *NamedArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Named); ok {
return x.Named
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsolationLevelNgdotString() *IsolationLevelNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsolationLevelNgdotString); ok {
return x.IsolationLevelNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetNullStringNgdotScan() *NullStringNgdotScanArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NullStringNgdotScan); ok {
return x.NullStringNgdotScan
}
}
return nil
}
func (x *NgoloFuzzOne) GetNullStringNgdotValue() *NullStringNgdotValueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NullStringNgdotValue); ok {
return x.NullStringNgdotValue
}
}
return nil
}
func (x *NgoloFuzzOne) GetNullInt64NgdotScan() *NullInt64NgdotScanArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NullInt64NgdotScan); ok {
return x.NullInt64NgdotScan
}
}
return nil
}
func (x *NgoloFuzzOne) GetNullInt64NgdotValue() *NullInt64NgdotValueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NullInt64NgdotValue); ok {
return x.NullInt64NgdotValue
}
}
return nil
}
func (x *NgoloFuzzOne) GetNullInt32NgdotScan() *NullInt32NgdotScanArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NullInt32NgdotScan); ok {
return x.NullInt32NgdotScan
}
}
return nil
}
func (x *NgoloFuzzOne) GetNullInt32NgdotValue() *NullInt32NgdotValueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NullInt32NgdotValue); ok {
return x.NullInt32NgdotValue
}
}
return nil
}
func (x *NgoloFuzzOne) GetNullByteNgdotScan() *NullByteNgdotScanArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NullByteNgdotScan); ok {
return x.NullByteNgdotScan
}
}
return nil
}
func (x *NgoloFuzzOne) GetNullByteNgdotValue() *NullByteNgdotValueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NullByteNgdotValue); ok {
return x.NullByteNgdotValue
}
}
return nil
}
func (x *NgoloFuzzOne) GetNullFloat64NgdotScan() *NullFloat64NgdotScanArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NullFloat64NgdotScan); ok {
return x.NullFloat64NgdotScan
}
}
return nil
}
func (x *NgoloFuzzOne) GetNullFloat64NgdotValue() *NullFloat64NgdotValueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NullFloat64NgdotValue); ok {
return x.NullFloat64NgdotValue
}
}
return nil
}
func (x *NgoloFuzzOne) GetNullBoolNgdotScan() *NullBoolNgdotScanArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NullBoolNgdotScan); ok {
return x.NullBoolNgdotScan
}
}
return nil
}
func (x *NgoloFuzzOne) GetNullBoolNgdotValue() *NullBoolNgdotValueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NullBoolNgdotValue); ok {
return x.NullBoolNgdotValue
}
}
return nil
}
func (x *NgoloFuzzOne) GetOpen() *OpenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Open); ok {
return x.Open
}
}
return nil
}
func (x *NgoloFuzzOne) GetDBNgdotPing() *DBNgdotPingArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DBNgdotPing); ok {
return x.DBNgdotPing
}
}
return nil
}
func (x *NgoloFuzzOne) GetDBNgdotClose() *DBNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DBNgdotClose); ok {
return x.DBNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetDBNgdotSetMaxIdleConns() *DBNgdotSetMaxIdleConnsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DBNgdotSetMaxIdleConns); ok {
return x.DBNgdotSetMaxIdleConns
}
}
return nil
}
func (x *NgoloFuzzOne) GetDBNgdotSetMaxOpenConns() *DBNgdotSetMaxOpenConnsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DBNgdotSetMaxOpenConns); ok {
return x.DBNgdotSetMaxOpenConns
}
}
return nil
}
func (x *NgoloFuzzOne) GetDBNgdotStats() *DBNgdotStatsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DBNgdotStats); ok {
return x.DBNgdotStats
}
}
return nil
}
func (x *NgoloFuzzOne) GetDBNgdotPrepare() *DBNgdotPrepareArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DBNgdotPrepare); ok {
return x.DBNgdotPrepare
}
}
return nil
}
func (x *NgoloFuzzOne) GetDBNgdotBegin() *DBNgdotBeginArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DBNgdotBegin); ok {
return x.DBNgdotBegin
}
}
return nil
}
func (x *NgoloFuzzOne) GetDBNgdotDriver() *DBNgdotDriverArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DBNgdotDriver); ok {
return x.DBNgdotDriver
}
}
return nil
}
func (x *NgoloFuzzOne) GetConnNgdotClose() *ConnNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ConnNgdotClose); ok {
return x.ConnNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetTxNgdotCommit() *TxNgdotCommitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TxNgdotCommit); ok {
return x.TxNgdotCommit
}
}
return nil
}
func (x *NgoloFuzzOne) GetTxNgdotRollback() *TxNgdotRollbackArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TxNgdotRollback); ok {
return x.TxNgdotRollback
}
}
return nil
}
func (x *NgoloFuzzOne) GetTxNgdotPrepare() *TxNgdotPrepareArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TxNgdotPrepare); ok {
return x.TxNgdotPrepare
}
}
return nil
}
func (x *NgoloFuzzOne) GetTxNgdotStmt() *TxNgdotStmtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TxNgdotStmt); ok {
return x.TxNgdotStmt
}
}
return nil
}
func (x *NgoloFuzzOne) GetStmtNgdotClose() *StmtNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_StmtNgdotClose); ok {
return x.StmtNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetRowsNgdotNext() *RowsNgdotNextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RowsNgdotNext); ok {
return x.RowsNgdotNext
}
}
return nil
}
func (x *NgoloFuzzOne) GetRowsNgdotNextResultSet() *RowsNgdotNextResultSetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RowsNgdotNextResultSet); ok {
return x.RowsNgdotNextResultSet
}
}
return nil
}
func (x *NgoloFuzzOne) GetRowsNgdotErr() *RowsNgdotErrArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RowsNgdotErr); ok {
return x.RowsNgdotErr
}
}
return nil
}
func (x *NgoloFuzzOne) GetRowsNgdotColumns() *RowsNgdotColumnsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RowsNgdotColumns); ok {
return x.RowsNgdotColumns
}
}
return nil
}
func (x *NgoloFuzzOne) GetRowsNgdotColumnTypes() *RowsNgdotColumnTypesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RowsNgdotColumnTypes); ok {
return x.RowsNgdotColumnTypes
}
}
return nil
}
func (x *NgoloFuzzOne) GetRowsNgdotClose() *RowsNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RowsNgdotClose); ok {
return x.RowsNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetRowNgdotErr() *RowNgdotErrArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RowNgdotErr); ok {
return x.RowNgdotErr
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Drivers struct {
Drivers *DriversArgs `protobuf:"bytes,1,opt,name=Drivers,proto3,oneof"`
}
type NgoloFuzzOne_Named struct {
Named *NamedArgs `protobuf:"bytes,2,opt,name=Named,proto3,oneof"`
}
type NgoloFuzzOne_IsolationLevelNgdotString struct {
IsolationLevelNgdotString *IsolationLevelNgdotStringArgs `protobuf:"bytes,3,opt,name=IsolationLevelNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_NullStringNgdotScan struct {
NullStringNgdotScan *NullStringNgdotScanArgs `protobuf:"bytes,4,opt,name=NullStringNgdotScan,proto3,oneof"`
}
type NgoloFuzzOne_NullStringNgdotValue struct {
NullStringNgdotValue *NullStringNgdotValueArgs `protobuf:"bytes,5,opt,name=NullStringNgdotValue,proto3,oneof"`
}
type NgoloFuzzOne_NullInt64NgdotScan struct {
NullInt64NgdotScan *NullInt64NgdotScanArgs `protobuf:"bytes,6,opt,name=NullInt64NgdotScan,proto3,oneof"`
}
type NgoloFuzzOne_NullInt64NgdotValue struct {
NullInt64NgdotValue *NullInt64NgdotValueArgs `protobuf:"bytes,7,opt,name=NullInt64NgdotValue,proto3,oneof"`
}
type NgoloFuzzOne_NullInt32NgdotScan struct {
NullInt32NgdotScan *NullInt32NgdotScanArgs `protobuf:"bytes,8,opt,name=NullInt32NgdotScan,proto3,oneof"`
}
type NgoloFuzzOne_NullInt32NgdotValue struct {
NullInt32NgdotValue *NullInt32NgdotValueArgs `protobuf:"bytes,9,opt,name=NullInt32NgdotValue,proto3,oneof"`
}
type NgoloFuzzOne_NullByteNgdotScan struct {
NullByteNgdotScan *NullByteNgdotScanArgs `protobuf:"bytes,10,opt,name=NullByteNgdotScan,proto3,oneof"`
}
type NgoloFuzzOne_NullByteNgdotValue struct {
NullByteNgdotValue *NullByteNgdotValueArgs `protobuf:"bytes,11,opt,name=NullByteNgdotValue,proto3,oneof"`
}
type NgoloFuzzOne_NullFloat64NgdotScan struct {
NullFloat64NgdotScan *NullFloat64NgdotScanArgs `protobuf:"bytes,12,opt,name=NullFloat64NgdotScan,proto3,oneof"`
}
type NgoloFuzzOne_NullFloat64NgdotValue struct {
NullFloat64NgdotValue *NullFloat64NgdotValueArgs `protobuf:"bytes,13,opt,name=NullFloat64NgdotValue,proto3,oneof"`
}
type NgoloFuzzOne_NullBoolNgdotScan struct {
NullBoolNgdotScan *NullBoolNgdotScanArgs `protobuf:"bytes,14,opt,name=NullBoolNgdotScan,proto3,oneof"`
}
type NgoloFuzzOne_NullBoolNgdotValue struct {
NullBoolNgdotValue *NullBoolNgdotValueArgs `protobuf:"bytes,15,opt,name=NullBoolNgdotValue,proto3,oneof"`
}
type NgoloFuzzOne_Open struct {
Open *OpenArgs `protobuf:"bytes,16,opt,name=Open,proto3,oneof"`
}
type NgoloFuzzOne_DBNgdotPing struct {
DBNgdotPing *DBNgdotPingArgs `protobuf:"bytes,17,opt,name=DBNgdotPing,proto3,oneof"`
}
type NgoloFuzzOne_DBNgdotClose struct {
DBNgdotClose *DBNgdotCloseArgs `protobuf:"bytes,18,opt,name=DBNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_DBNgdotSetMaxIdleConns struct {
DBNgdotSetMaxIdleConns *DBNgdotSetMaxIdleConnsArgs `protobuf:"bytes,19,opt,name=DBNgdotSetMaxIdleConns,proto3,oneof"`
}
type NgoloFuzzOne_DBNgdotSetMaxOpenConns struct {
DBNgdotSetMaxOpenConns *DBNgdotSetMaxOpenConnsArgs `protobuf:"bytes,20,opt,name=DBNgdotSetMaxOpenConns,proto3,oneof"`
}
type NgoloFuzzOne_DBNgdotStats struct {
DBNgdotStats *DBNgdotStatsArgs `protobuf:"bytes,21,opt,name=DBNgdotStats,proto3,oneof"`
}
type NgoloFuzzOne_DBNgdotPrepare struct {
DBNgdotPrepare *DBNgdotPrepareArgs `protobuf:"bytes,22,opt,name=DBNgdotPrepare,proto3,oneof"`
}
type NgoloFuzzOne_DBNgdotBegin struct {
DBNgdotBegin *DBNgdotBeginArgs `protobuf:"bytes,23,opt,name=DBNgdotBegin,proto3,oneof"`
}
type NgoloFuzzOne_DBNgdotDriver struct {
DBNgdotDriver *DBNgdotDriverArgs `protobuf:"bytes,24,opt,name=DBNgdotDriver,proto3,oneof"`
}
type NgoloFuzzOne_ConnNgdotClose struct {
ConnNgdotClose *ConnNgdotCloseArgs `protobuf:"bytes,25,opt,name=ConnNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_TxNgdotCommit struct {
TxNgdotCommit *TxNgdotCommitArgs `protobuf:"bytes,26,opt,name=TxNgdotCommit,proto3,oneof"`
}
type NgoloFuzzOne_TxNgdotRollback struct {
TxNgdotRollback *TxNgdotRollbackArgs `protobuf:"bytes,27,opt,name=TxNgdotRollback,proto3,oneof"`
}
type NgoloFuzzOne_TxNgdotPrepare struct {
TxNgdotPrepare *TxNgdotPrepareArgs `protobuf:"bytes,28,opt,name=TxNgdotPrepare,proto3,oneof"`
}
type NgoloFuzzOne_TxNgdotStmt struct {
TxNgdotStmt *TxNgdotStmtArgs `protobuf:"bytes,29,opt,name=TxNgdotStmt,proto3,oneof"`
}
type NgoloFuzzOne_StmtNgdotClose struct {
StmtNgdotClose *StmtNgdotCloseArgs `protobuf:"bytes,30,opt,name=StmtNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_RowsNgdotNext struct {
RowsNgdotNext *RowsNgdotNextArgs `protobuf:"bytes,31,opt,name=RowsNgdotNext,proto3,oneof"`
}
type NgoloFuzzOne_RowsNgdotNextResultSet struct {
RowsNgdotNextResultSet *RowsNgdotNextResultSetArgs `protobuf:"bytes,32,opt,name=RowsNgdotNextResultSet,proto3,oneof"`
}
type NgoloFuzzOne_RowsNgdotErr struct {
RowsNgdotErr *RowsNgdotErrArgs `protobuf:"bytes,33,opt,name=RowsNgdotErr,proto3,oneof"`
}
type NgoloFuzzOne_RowsNgdotColumns struct {
RowsNgdotColumns *RowsNgdotColumnsArgs `protobuf:"bytes,34,opt,name=RowsNgdotColumns,proto3,oneof"`
}
type NgoloFuzzOne_RowsNgdotColumnTypes struct {
RowsNgdotColumnTypes *RowsNgdotColumnTypesArgs `protobuf:"bytes,35,opt,name=RowsNgdotColumnTypes,proto3,oneof"`
}
type NgoloFuzzOne_RowsNgdotClose struct {
RowsNgdotClose *RowsNgdotCloseArgs `protobuf:"bytes,36,opt,name=RowsNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_RowNgdotErr struct {
RowNgdotErr *RowNgdotErrArgs `protobuf:"bytes,37,opt,name=RowNgdotErr,proto3,oneof"`
}
func (*NgoloFuzzOne_Drivers) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Named) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsolationLevelNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NullStringNgdotScan) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NullStringNgdotValue) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NullInt64NgdotScan) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NullInt64NgdotValue) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NullInt32NgdotScan) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NullInt32NgdotValue) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NullByteNgdotScan) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NullByteNgdotValue) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NullFloat64NgdotScan) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NullFloat64NgdotValue) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NullBoolNgdotScan) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NullBoolNgdotValue) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Open) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DBNgdotPing) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DBNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DBNgdotSetMaxIdleConns) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DBNgdotSetMaxOpenConns) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DBNgdotStats) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DBNgdotPrepare) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DBNgdotBegin) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DBNgdotDriver) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ConnNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TxNgdotCommit) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TxNgdotRollback) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TxNgdotPrepare) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TxNgdotStmt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_StmtNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RowsNgdotNext) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RowsNgdotNextResultSet) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RowsNgdotErr) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RowsNgdotColumns) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RowsNgdotColumnTypes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RowsNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RowNgdotErr) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[45]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{45}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[46]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[46]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{46}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"C\n" +
"\x11NullFloat64Struct\x12\x18\n" +
"\aFloat64\x18\x01 \x01(\x01R\aFloat64\x12\x14\n" +
"\x05Valid\x18\x02 \x01(\bR\x05Valid\"=\n" +
"\x0fNullInt64Struct\x12\x14\n" +
"\x05Int64\x18\x01 \x01(\x03R\x05Int64\x12\x14\n" +
"\x05Valid\x18\x02 \x01(\bR\x05Valid\"=\n" +
"\x0fNullInt32Struct\x12\x14\n" +
"\x05Int32\x18\x01 \x01(\x05R\x05Int32\x12\x14\n" +
"\x05Valid\x18\x02 \x01(\bR\x05Valid\"j\n" +
"\x0fTxOptionsStruct\x12;\n" +
"\tIsolation\x18\x01 \x01(\x0e2\x1d.ngolofuzz.IsolationLevelEnumR\tIsolation\x12\x1a\n" +
"\bReadOnly\x18\x02 \x01(\bR\bReadOnly\"@\n" +
"\x10NullStringStruct\x12\x16\n" +
"\x06String\x18\x01 \x01(\tR\x06String\x12\x14\n" +
"\x05Valid\x18\x02 \x01(\bR\x05Valid\":\n" +
"\x0eNullByteStruct\x12\x12\n" +
"\x04Byte\x18\x01 \x01(\rR\x04Byte\x12\x14\n" +
"\x05Valid\x18\x02 \x01(\bR\x05Valid\":\n" +
"\x0eNullBoolStruct\x12\x12\n" +
"\x04Bool\x18\x01 \x01(\bR\x04Bool\x12\x14\n" +
"\x05Valid\x18\x02 \x01(\bR\x05Valid\"\r\n" +
"\vDriversArgs\"N\n" +
"\tNamedArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12-\n" +
"\x05value\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x05value\"L\n" +
"\x1dIsolationLevelNgdotStringArgs\x12+\n" +
"\x01i\x18\x01 \x01(\x0e2\x1d.ngolofuzz.IsolationLevelEnumR\x01i\"u\n" +
"\x17NullStringNgdotScanArgs\x12+\n" +
"\x02ns\x18\x01 \x01(\v2\x1b.ngolofuzz.NullStringStructR\x02ns\x12-\n" +
"\x05value\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x05value\"G\n" +
"\x18NullStringNgdotValueArgs\x12+\n" +
"\x02ns\x18\x01 \x01(\v2\x1b.ngolofuzz.NullStringStructR\x02ns\"q\n" +
"\x16NullInt64NgdotScanArgs\x12(\n" +
"\x01n\x18\x01 \x01(\v2\x1a.ngolofuzz.NullInt64StructR\x01n\x12-\n" +
"\x05value\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x05value\"C\n" +
"\x17NullInt64NgdotValueArgs\x12(\n" +
"\x01n\x18\x01 \x01(\v2\x1a.ngolofuzz.NullInt64StructR\x01n\"q\n" +
"\x16NullInt32NgdotScanArgs\x12(\n" +
"\x01n\x18\x01 \x01(\v2\x1a.ngolofuzz.NullInt32StructR\x01n\x12-\n" +
"\x05value\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x05value\"C\n" +
"\x17NullInt32NgdotValueArgs\x12(\n" +
"\x01n\x18\x01 \x01(\v2\x1a.ngolofuzz.NullInt32StructR\x01n\"o\n" +
"\x15NullByteNgdotScanArgs\x12'\n" +
"\x01n\x18\x01 \x01(\v2\x19.ngolofuzz.NullByteStructR\x01n\x12-\n" +
"\x05value\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x05value\"A\n" +
"\x16NullByteNgdotValueArgs\x12'\n" +
"\x01n\x18\x01 \x01(\v2\x19.ngolofuzz.NullByteStructR\x01n\"u\n" +
"\x18NullFloat64NgdotScanArgs\x12*\n" +
"\x01n\x18\x01 \x01(\v2\x1c.ngolofuzz.NullFloat64StructR\x01n\x12-\n" +
"\x05value\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x05value\"G\n" +
"\x19NullFloat64NgdotValueArgs\x12*\n" +
"\x01n\x18\x01 \x01(\v2\x1c.ngolofuzz.NullFloat64StructR\x01n\"o\n" +
"\x15NullBoolNgdotScanArgs\x12'\n" +
"\x01n\x18\x01 \x01(\v2\x19.ngolofuzz.NullBoolStructR\x01n\x12-\n" +
"\x05value\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x05value\"A\n" +
"\x16NullBoolNgdotValueArgs\x12'\n" +
"\x01n\x18\x01 \x01(\v2\x19.ngolofuzz.NullBoolStructR\x01n\"R\n" +
"\bOpenArgs\x12\x1e\n" +
"\n" +
"driverName\x18\x01 \x01(\tR\n" +
"driverName\x12&\n" +
"\x0edataSourceName\x18\x02 \x01(\tR\x0edataSourceName\"\x11\n" +
"\x0fDBNgdotPingArgs\"\x12\n" +
"\x10DBNgdotCloseArgs\"*\n" +
"\x1aDBNgdotSetMaxIdleConnsArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"*\n" +
"\x1aDBNgdotSetMaxOpenConnsArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"\x12\n" +
"\x10DBNgdotStatsArgs\"*\n" +
"\x12DBNgdotPrepareArgs\x12\x14\n" +
"\x05query\x18\x01 \x01(\tR\x05query\"\x12\n" +
"\x10DBNgdotBeginArgs\"\x13\n" +
"\x11DBNgdotDriverArgs\"\x14\n" +
"\x12ConnNgdotCloseArgs\"\x13\n" +
"\x11TxNgdotCommitArgs\"\x15\n" +
"\x13TxNgdotRollbackArgs\"*\n" +
"\x12TxNgdotPrepareArgs\x12\x14\n" +
"\x05query\x18\x01 \x01(\tR\x05query\"\x11\n" +
"\x0fTxNgdotStmtArgs\"\x14\n" +
"\x12StmtNgdotCloseArgs\"\x13\n" +
"\x11RowsNgdotNextArgs\"\x1c\n" +
"\x1aRowsNgdotNextResultSetArgs\"\x12\n" +
"\x10RowsNgdotErrArgs\"\x16\n" +
"\x14RowsNgdotColumnsArgs\"\x1a\n" +
"\x18RowsNgdotColumnTypesArgs\"\x14\n" +
"\x12RowsNgdotCloseArgs\"\x11\n" +
"\x0fRowNgdotErrArgs\"\xc5\x16\n" +
"\fNgoloFuzzOne\x122\n" +
"\aDrivers\x18\x01 \x01(\v2\x16.ngolofuzz.DriversArgsH\x00R\aDrivers\x12,\n" +
"\x05Named\x18\x02 \x01(\v2\x14.ngolofuzz.NamedArgsH\x00R\x05Named\x12h\n" +
"\x19IsolationLevelNgdotString\x18\x03 \x01(\v2(.ngolofuzz.IsolationLevelNgdotStringArgsH\x00R\x19IsolationLevelNgdotString\x12V\n" +
"\x13NullStringNgdotScan\x18\x04 \x01(\v2\".ngolofuzz.NullStringNgdotScanArgsH\x00R\x13NullStringNgdotScan\x12Y\n" +
"\x14NullStringNgdotValue\x18\x05 \x01(\v2#.ngolofuzz.NullStringNgdotValueArgsH\x00R\x14NullStringNgdotValue\x12S\n" +
"\x12NullInt64NgdotScan\x18\x06 \x01(\v2!.ngolofuzz.NullInt64NgdotScanArgsH\x00R\x12NullInt64NgdotScan\x12V\n" +
"\x13NullInt64NgdotValue\x18\a \x01(\v2\".ngolofuzz.NullInt64NgdotValueArgsH\x00R\x13NullInt64NgdotValue\x12S\n" +
"\x12NullInt32NgdotScan\x18\b \x01(\v2!.ngolofuzz.NullInt32NgdotScanArgsH\x00R\x12NullInt32NgdotScan\x12V\n" +
"\x13NullInt32NgdotValue\x18\t \x01(\v2\".ngolofuzz.NullInt32NgdotValueArgsH\x00R\x13NullInt32NgdotValue\x12P\n" +
"\x11NullByteNgdotScan\x18\n" +
" \x01(\v2 .ngolofuzz.NullByteNgdotScanArgsH\x00R\x11NullByteNgdotScan\x12S\n" +
"\x12NullByteNgdotValue\x18\v \x01(\v2!.ngolofuzz.NullByteNgdotValueArgsH\x00R\x12NullByteNgdotValue\x12Y\n" +
"\x14NullFloat64NgdotScan\x18\f \x01(\v2#.ngolofuzz.NullFloat64NgdotScanArgsH\x00R\x14NullFloat64NgdotScan\x12\\\n" +
"\x15NullFloat64NgdotValue\x18\r \x01(\v2$.ngolofuzz.NullFloat64NgdotValueArgsH\x00R\x15NullFloat64NgdotValue\x12P\n" +
"\x11NullBoolNgdotScan\x18\x0e \x01(\v2 .ngolofuzz.NullBoolNgdotScanArgsH\x00R\x11NullBoolNgdotScan\x12S\n" +
"\x12NullBoolNgdotValue\x18\x0f \x01(\v2!.ngolofuzz.NullBoolNgdotValueArgsH\x00R\x12NullBoolNgdotValue\x12)\n" +
"\x04Open\x18\x10 \x01(\v2\x13.ngolofuzz.OpenArgsH\x00R\x04Open\x12>\n" +
"\vDBNgdotPing\x18\x11 \x01(\v2\x1a.ngolofuzz.DBNgdotPingArgsH\x00R\vDBNgdotPing\x12A\n" +
"\fDBNgdotClose\x18\x12 \x01(\v2\x1b.ngolofuzz.DBNgdotCloseArgsH\x00R\fDBNgdotClose\x12_\n" +
"\x16DBNgdotSetMaxIdleConns\x18\x13 \x01(\v2%.ngolofuzz.DBNgdotSetMaxIdleConnsArgsH\x00R\x16DBNgdotSetMaxIdleConns\x12_\n" +
"\x16DBNgdotSetMaxOpenConns\x18\x14 \x01(\v2%.ngolofuzz.DBNgdotSetMaxOpenConnsArgsH\x00R\x16DBNgdotSetMaxOpenConns\x12A\n" +
"\fDBNgdotStats\x18\x15 \x01(\v2\x1b.ngolofuzz.DBNgdotStatsArgsH\x00R\fDBNgdotStats\x12G\n" +
"\x0eDBNgdotPrepare\x18\x16 \x01(\v2\x1d.ngolofuzz.DBNgdotPrepareArgsH\x00R\x0eDBNgdotPrepare\x12A\n" +
"\fDBNgdotBegin\x18\x17 \x01(\v2\x1b.ngolofuzz.DBNgdotBeginArgsH\x00R\fDBNgdotBegin\x12D\n" +
"\rDBNgdotDriver\x18\x18 \x01(\v2\x1c.ngolofuzz.DBNgdotDriverArgsH\x00R\rDBNgdotDriver\x12G\n" +
"\x0eConnNgdotClose\x18\x19 \x01(\v2\x1d.ngolofuzz.ConnNgdotCloseArgsH\x00R\x0eConnNgdotClose\x12D\n" +
"\rTxNgdotCommit\x18\x1a \x01(\v2\x1c.ngolofuzz.TxNgdotCommitArgsH\x00R\rTxNgdotCommit\x12J\n" +
"\x0fTxNgdotRollback\x18\x1b \x01(\v2\x1e.ngolofuzz.TxNgdotRollbackArgsH\x00R\x0fTxNgdotRollback\x12G\n" +
"\x0eTxNgdotPrepare\x18\x1c \x01(\v2\x1d.ngolofuzz.TxNgdotPrepareArgsH\x00R\x0eTxNgdotPrepare\x12>\n" +
"\vTxNgdotStmt\x18\x1d \x01(\v2\x1a.ngolofuzz.TxNgdotStmtArgsH\x00R\vTxNgdotStmt\x12G\n" +
"\x0eStmtNgdotClose\x18\x1e \x01(\v2\x1d.ngolofuzz.StmtNgdotCloseArgsH\x00R\x0eStmtNgdotClose\x12D\n" +
"\rRowsNgdotNext\x18\x1f \x01(\v2\x1c.ngolofuzz.RowsNgdotNextArgsH\x00R\rRowsNgdotNext\x12_\n" +
"\x16RowsNgdotNextResultSet\x18 \x01(\v2%.ngolofuzz.RowsNgdotNextResultSetArgsH\x00R\x16RowsNgdotNextResultSet\x12A\n" +
"\fRowsNgdotErr\x18! \x01(\v2\x1b.ngolofuzz.RowsNgdotErrArgsH\x00R\fRowsNgdotErr\x12M\n" +
"\x10RowsNgdotColumns\x18\" \x01(\v2\x1f.ngolofuzz.RowsNgdotColumnsArgsH\x00R\x10RowsNgdotColumns\x12Y\n" +
"\x14RowsNgdotColumnTypes\x18# \x01(\v2#.ngolofuzz.RowsNgdotColumnTypesArgsH\x00R\x14RowsNgdotColumnTypes\x12G\n" +
"\x0eRowsNgdotClose\x18$ \x01(\v2\x1d.ngolofuzz.RowsNgdotCloseArgsH\x00R\x0eRowsNgdotClose\x12>\n" +
"\vRowNgdotErr\x18% \x01(\v2\x1a.ngolofuzz.RowNgdotErrArgsH\x00R\vRowNgdotErrB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04list*\xcb\x01\n" +
"\x12IsolationLevelEnum\x12\x10\n" +
"\fLevelDefault\x10\x00\x12\x18\n" +
"\x14LevelReadUncommitted\x10\x01\x12\x16\n" +
"\x12LevelReadCommitted\x10\x02\x12\x17\n" +
"\x13LevelWriteCommitted\x10\x03\x12\x17\n" +
"\x13LevelRepeatableRead\x10\x04\x12\x11\n" +
"\rLevelSnapshot\x10\x05\x12\x15\n" +
"\x11LevelSerializable\x10\x06\x12\x15\n" +
"\x11LevelLinearizable\x10\aB\x19Z\x17./;fuzz_ng_database_sqlb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 47)
var file_ngolofuzz_proto_goTypes = []any{
(IsolationLevelEnum)(0), // 0: ngolofuzz.IsolationLevelEnum
(*NullFloat64Struct)(nil), // 1: ngolofuzz.NullFloat64Struct
(*NullInt64Struct)(nil), // 2: ngolofuzz.NullInt64Struct
(*NullInt32Struct)(nil), // 3: ngolofuzz.NullInt32Struct
(*TxOptionsStruct)(nil), // 4: ngolofuzz.TxOptionsStruct
(*NullStringStruct)(nil), // 5: ngolofuzz.NullStringStruct
(*NullByteStruct)(nil), // 6: ngolofuzz.NullByteStruct
(*NullBoolStruct)(nil), // 7: ngolofuzz.NullBoolStruct
(*DriversArgs)(nil), // 8: ngolofuzz.DriversArgs
(*NamedArgs)(nil), // 9: ngolofuzz.NamedArgs
(*IsolationLevelNgdotStringArgs)(nil), // 10: ngolofuzz.IsolationLevelNgdotStringArgs
(*NullStringNgdotScanArgs)(nil), // 11: ngolofuzz.NullStringNgdotScanArgs
(*NullStringNgdotValueArgs)(nil), // 12: ngolofuzz.NullStringNgdotValueArgs
(*NullInt64NgdotScanArgs)(nil), // 13: ngolofuzz.NullInt64NgdotScanArgs
(*NullInt64NgdotValueArgs)(nil), // 14: ngolofuzz.NullInt64NgdotValueArgs
(*NullInt32NgdotScanArgs)(nil), // 15: ngolofuzz.NullInt32NgdotScanArgs
(*NullInt32NgdotValueArgs)(nil), // 16: ngolofuzz.NullInt32NgdotValueArgs
(*NullByteNgdotScanArgs)(nil), // 17: ngolofuzz.NullByteNgdotScanArgs
(*NullByteNgdotValueArgs)(nil), // 18: ngolofuzz.NullByteNgdotValueArgs
(*NullFloat64NgdotScanArgs)(nil), // 19: ngolofuzz.NullFloat64NgdotScanArgs
(*NullFloat64NgdotValueArgs)(nil), // 20: ngolofuzz.NullFloat64NgdotValueArgs
(*NullBoolNgdotScanArgs)(nil), // 21: ngolofuzz.NullBoolNgdotScanArgs
(*NullBoolNgdotValueArgs)(nil), // 22: ngolofuzz.NullBoolNgdotValueArgs
(*OpenArgs)(nil), // 23: ngolofuzz.OpenArgs
(*DBNgdotPingArgs)(nil), // 24: ngolofuzz.DBNgdotPingArgs
(*DBNgdotCloseArgs)(nil), // 25: ngolofuzz.DBNgdotCloseArgs
(*DBNgdotSetMaxIdleConnsArgs)(nil), // 26: ngolofuzz.DBNgdotSetMaxIdleConnsArgs
(*DBNgdotSetMaxOpenConnsArgs)(nil), // 27: ngolofuzz.DBNgdotSetMaxOpenConnsArgs
(*DBNgdotStatsArgs)(nil), // 28: ngolofuzz.DBNgdotStatsArgs
(*DBNgdotPrepareArgs)(nil), // 29: ngolofuzz.DBNgdotPrepareArgs
(*DBNgdotBeginArgs)(nil), // 30: ngolofuzz.DBNgdotBeginArgs
(*DBNgdotDriverArgs)(nil), // 31: ngolofuzz.DBNgdotDriverArgs
(*ConnNgdotCloseArgs)(nil), // 32: ngolofuzz.ConnNgdotCloseArgs
(*TxNgdotCommitArgs)(nil), // 33: ngolofuzz.TxNgdotCommitArgs
(*TxNgdotRollbackArgs)(nil), // 34: ngolofuzz.TxNgdotRollbackArgs
(*TxNgdotPrepareArgs)(nil), // 35: ngolofuzz.TxNgdotPrepareArgs
(*TxNgdotStmtArgs)(nil), // 36: ngolofuzz.TxNgdotStmtArgs
(*StmtNgdotCloseArgs)(nil), // 37: ngolofuzz.StmtNgdotCloseArgs
(*RowsNgdotNextArgs)(nil), // 38: ngolofuzz.RowsNgdotNextArgs
(*RowsNgdotNextResultSetArgs)(nil), // 39: ngolofuzz.RowsNgdotNextResultSetArgs
(*RowsNgdotErrArgs)(nil), // 40: ngolofuzz.RowsNgdotErrArgs
(*RowsNgdotColumnsArgs)(nil), // 41: ngolofuzz.RowsNgdotColumnsArgs
(*RowsNgdotColumnTypesArgs)(nil), // 42: ngolofuzz.RowsNgdotColumnTypesArgs
(*RowsNgdotCloseArgs)(nil), // 43: ngolofuzz.RowsNgdotCloseArgs
(*RowNgdotErrArgs)(nil), // 44: ngolofuzz.RowNgdotErrArgs
(*NgoloFuzzOne)(nil), // 45: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 46: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 47: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.TxOptionsStruct.Isolation:type_name -> ngolofuzz.IsolationLevelEnum
46, // 1: ngolofuzz.NamedArgs.value:type_name -> ngolofuzz.NgoloFuzzAny
0, // 2: ngolofuzz.IsolationLevelNgdotStringArgs.i:type_name -> ngolofuzz.IsolationLevelEnum
5, // 3: ngolofuzz.NullStringNgdotScanArgs.ns:type_name -> ngolofuzz.NullStringStruct
46, // 4: ngolofuzz.NullStringNgdotScanArgs.value:type_name -> ngolofuzz.NgoloFuzzAny
5, // 5: ngolofuzz.NullStringNgdotValueArgs.ns:type_name -> ngolofuzz.NullStringStruct
2, // 6: ngolofuzz.NullInt64NgdotScanArgs.n:type_name -> ngolofuzz.NullInt64Struct
46, // 7: ngolofuzz.NullInt64NgdotScanArgs.value:type_name -> ngolofuzz.NgoloFuzzAny
2, // 8: ngolofuzz.NullInt64NgdotValueArgs.n:type_name -> ngolofuzz.NullInt64Struct
3, // 9: ngolofuzz.NullInt32NgdotScanArgs.n:type_name -> ngolofuzz.NullInt32Struct
46, // 10: ngolofuzz.NullInt32NgdotScanArgs.value:type_name -> ngolofuzz.NgoloFuzzAny
3, // 11: ngolofuzz.NullInt32NgdotValueArgs.n:type_name -> ngolofuzz.NullInt32Struct
6, // 12: ngolofuzz.NullByteNgdotScanArgs.n:type_name -> ngolofuzz.NullByteStruct
46, // 13: ngolofuzz.NullByteNgdotScanArgs.value:type_name -> ngolofuzz.NgoloFuzzAny
6, // 14: ngolofuzz.NullByteNgdotValueArgs.n:type_name -> ngolofuzz.NullByteStruct
1, // 15: ngolofuzz.NullFloat64NgdotScanArgs.n:type_name -> ngolofuzz.NullFloat64Struct
46, // 16: ngolofuzz.NullFloat64NgdotScanArgs.value:type_name -> ngolofuzz.NgoloFuzzAny
1, // 17: ngolofuzz.NullFloat64NgdotValueArgs.n:type_name -> ngolofuzz.NullFloat64Struct
7, // 18: ngolofuzz.NullBoolNgdotScanArgs.n:type_name -> ngolofuzz.NullBoolStruct
46, // 19: ngolofuzz.NullBoolNgdotScanArgs.value:type_name -> ngolofuzz.NgoloFuzzAny
7, // 20: ngolofuzz.NullBoolNgdotValueArgs.n:type_name -> ngolofuzz.NullBoolStruct
8, // 21: ngolofuzz.NgoloFuzzOne.Drivers:type_name -> ngolofuzz.DriversArgs
9, // 22: ngolofuzz.NgoloFuzzOne.Named:type_name -> ngolofuzz.NamedArgs
10, // 23: ngolofuzz.NgoloFuzzOne.IsolationLevelNgdotString:type_name -> ngolofuzz.IsolationLevelNgdotStringArgs
11, // 24: ngolofuzz.NgoloFuzzOne.NullStringNgdotScan:type_name -> ngolofuzz.NullStringNgdotScanArgs
12, // 25: ngolofuzz.NgoloFuzzOne.NullStringNgdotValue:type_name -> ngolofuzz.NullStringNgdotValueArgs
13, // 26: ngolofuzz.NgoloFuzzOne.NullInt64NgdotScan:type_name -> ngolofuzz.NullInt64NgdotScanArgs
14, // 27: ngolofuzz.NgoloFuzzOne.NullInt64NgdotValue:type_name -> ngolofuzz.NullInt64NgdotValueArgs
15, // 28: ngolofuzz.NgoloFuzzOne.NullInt32NgdotScan:type_name -> ngolofuzz.NullInt32NgdotScanArgs
16, // 29: ngolofuzz.NgoloFuzzOne.NullInt32NgdotValue:type_name -> ngolofuzz.NullInt32NgdotValueArgs
17, // 30: ngolofuzz.NgoloFuzzOne.NullByteNgdotScan:type_name -> ngolofuzz.NullByteNgdotScanArgs
18, // 31: ngolofuzz.NgoloFuzzOne.NullByteNgdotValue:type_name -> ngolofuzz.NullByteNgdotValueArgs
19, // 32: ngolofuzz.NgoloFuzzOne.NullFloat64NgdotScan:type_name -> ngolofuzz.NullFloat64NgdotScanArgs
20, // 33: ngolofuzz.NgoloFuzzOne.NullFloat64NgdotValue:type_name -> ngolofuzz.NullFloat64NgdotValueArgs
21, // 34: ngolofuzz.NgoloFuzzOne.NullBoolNgdotScan:type_name -> ngolofuzz.NullBoolNgdotScanArgs
22, // 35: ngolofuzz.NgoloFuzzOne.NullBoolNgdotValue:type_name -> ngolofuzz.NullBoolNgdotValueArgs
23, // 36: ngolofuzz.NgoloFuzzOne.Open:type_name -> ngolofuzz.OpenArgs
24, // 37: ngolofuzz.NgoloFuzzOne.DBNgdotPing:type_name -> ngolofuzz.DBNgdotPingArgs
25, // 38: ngolofuzz.NgoloFuzzOne.DBNgdotClose:type_name -> ngolofuzz.DBNgdotCloseArgs
26, // 39: ngolofuzz.NgoloFuzzOne.DBNgdotSetMaxIdleConns:type_name -> ngolofuzz.DBNgdotSetMaxIdleConnsArgs
27, // 40: ngolofuzz.NgoloFuzzOne.DBNgdotSetMaxOpenConns:type_name -> ngolofuzz.DBNgdotSetMaxOpenConnsArgs
28, // 41: ngolofuzz.NgoloFuzzOne.DBNgdotStats:type_name -> ngolofuzz.DBNgdotStatsArgs
29, // 42: ngolofuzz.NgoloFuzzOne.DBNgdotPrepare:type_name -> ngolofuzz.DBNgdotPrepareArgs
30, // 43: ngolofuzz.NgoloFuzzOne.DBNgdotBegin:type_name -> ngolofuzz.DBNgdotBeginArgs
31, // 44: ngolofuzz.NgoloFuzzOne.DBNgdotDriver:type_name -> ngolofuzz.DBNgdotDriverArgs
32, // 45: ngolofuzz.NgoloFuzzOne.ConnNgdotClose:type_name -> ngolofuzz.ConnNgdotCloseArgs
33, // 46: ngolofuzz.NgoloFuzzOne.TxNgdotCommit:type_name -> ngolofuzz.TxNgdotCommitArgs
34, // 47: ngolofuzz.NgoloFuzzOne.TxNgdotRollback:type_name -> ngolofuzz.TxNgdotRollbackArgs
35, // 48: ngolofuzz.NgoloFuzzOne.TxNgdotPrepare:type_name -> ngolofuzz.TxNgdotPrepareArgs
36, // 49: ngolofuzz.NgoloFuzzOne.TxNgdotStmt:type_name -> ngolofuzz.TxNgdotStmtArgs
37, // 50: ngolofuzz.NgoloFuzzOne.StmtNgdotClose:type_name -> ngolofuzz.StmtNgdotCloseArgs
38, // 51: ngolofuzz.NgoloFuzzOne.RowsNgdotNext:type_name -> ngolofuzz.RowsNgdotNextArgs
39, // 52: ngolofuzz.NgoloFuzzOne.RowsNgdotNextResultSet:type_name -> ngolofuzz.RowsNgdotNextResultSetArgs
40, // 53: ngolofuzz.NgoloFuzzOne.RowsNgdotErr:type_name -> ngolofuzz.RowsNgdotErrArgs
41, // 54: ngolofuzz.NgoloFuzzOne.RowsNgdotColumns:type_name -> ngolofuzz.RowsNgdotColumnsArgs
42, // 55: ngolofuzz.NgoloFuzzOne.RowsNgdotColumnTypes:type_name -> ngolofuzz.RowsNgdotColumnTypesArgs
43, // 56: ngolofuzz.NgoloFuzzOne.RowsNgdotClose:type_name -> ngolofuzz.RowsNgdotCloseArgs
44, // 57: ngolofuzz.NgoloFuzzOne.RowNgdotErr:type_name -> ngolofuzz.RowNgdotErrArgs
45, // 58: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
59, // [59:59] is the sub-list for method output_type
59, // [59:59] is the sub-list for method input_type
59, // [59:59] is the sub-list for extension type_name
59, // [59:59] is the sub-list for extension extendee
0, // [0:59] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[44].OneofWrappers = []any{
(*NgoloFuzzOne_Drivers)(nil),
(*NgoloFuzzOne_Named)(nil),
(*NgoloFuzzOne_IsolationLevelNgdotString)(nil),
(*NgoloFuzzOne_NullStringNgdotScan)(nil),
(*NgoloFuzzOne_NullStringNgdotValue)(nil),
(*NgoloFuzzOne_NullInt64NgdotScan)(nil),
(*NgoloFuzzOne_NullInt64NgdotValue)(nil),
(*NgoloFuzzOne_NullInt32NgdotScan)(nil),
(*NgoloFuzzOne_NullInt32NgdotValue)(nil),
(*NgoloFuzzOne_NullByteNgdotScan)(nil),
(*NgoloFuzzOne_NullByteNgdotValue)(nil),
(*NgoloFuzzOne_NullFloat64NgdotScan)(nil),
(*NgoloFuzzOne_NullFloat64NgdotValue)(nil),
(*NgoloFuzzOne_NullBoolNgdotScan)(nil),
(*NgoloFuzzOne_NullBoolNgdotValue)(nil),
(*NgoloFuzzOne_Open)(nil),
(*NgoloFuzzOne_DBNgdotPing)(nil),
(*NgoloFuzzOne_DBNgdotClose)(nil),
(*NgoloFuzzOne_DBNgdotSetMaxIdleConns)(nil),
(*NgoloFuzzOne_DBNgdotSetMaxOpenConns)(nil),
(*NgoloFuzzOne_DBNgdotStats)(nil),
(*NgoloFuzzOne_DBNgdotPrepare)(nil),
(*NgoloFuzzOne_DBNgdotBegin)(nil),
(*NgoloFuzzOne_DBNgdotDriver)(nil),
(*NgoloFuzzOne_ConnNgdotClose)(nil),
(*NgoloFuzzOne_TxNgdotCommit)(nil),
(*NgoloFuzzOne_TxNgdotRollback)(nil),
(*NgoloFuzzOne_TxNgdotPrepare)(nil),
(*NgoloFuzzOne_TxNgdotStmt)(nil),
(*NgoloFuzzOne_StmtNgdotClose)(nil),
(*NgoloFuzzOne_RowsNgdotNext)(nil),
(*NgoloFuzzOne_RowsNgdotNextResultSet)(nil),
(*NgoloFuzzOne_RowsNgdotErr)(nil),
(*NgoloFuzzOne_RowsNgdotColumns)(nil),
(*NgoloFuzzOne_RowsNgdotColumnTypes)(nil),
(*NgoloFuzzOne_RowsNgdotClose)(nil),
(*NgoloFuzzOne_RowNgdotErr)(nil),
}
file_ngolofuzz_proto_msgTypes[45].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 1,
NumMessages: 47,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
EnumInfos: file_ngolofuzz_proto_enumTypes,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_database_sql_driver
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"database/sql/driver"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_IsValue:
driver.IsValue(a.IsValue.V)
case *NgoloFuzzOne_IsScanValue:
driver.IsScanValue(a.IsScanValue.V)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_IsValue:
w.WriteString(fmt.Sprintf("driver.IsValue(%#+v)\n", a.IsValue.V))
case *NgoloFuzzOne_IsScanValue:
w.WriteString(fmt.Sprintf("driver.IsScanValue(%#+v)\n", a.IsScanValue.V))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_database_sql_driver
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type IsValueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsValueArgs) Reset() {
*x = IsValueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsValueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsValueArgs) ProtoMessage() {}
func (x *IsValueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsValueArgs.ProtoReflect.Descriptor instead.
func (*IsValueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *IsValueArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type IsScanValueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsScanValueArgs) Reset() {
*x = IsScanValueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsScanValueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsScanValueArgs) ProtoMessage() {}
func (x *IsScanValueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsScanValueArgs.ProtoReflect.Descriptor instead.
func (*IsScanValueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *IsScanValueArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_IsValue
// *NgoloFuzzOne_IsScanValue
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetIsValue() *IsValueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsValue); ok {
return x.IsValue
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsScanValue() *IsScanValueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsScanValue); ok {
return x.IsScanValue
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_IsValue struct {
IsValue *IsValueArgs `protobuf:"bytes,1,opt,name=IsValue,proto3,oneof"`
}
type NgoloFuzzOne_IsScanValue struct {
IsScanValue *IsScanValueArgs `protobuf:"bytes,2,opt,name=IsScanValue,proto3,oneof"`
}
func (*NgoloFuzzOne_IsValue) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsScanValue) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"4\n" +
"\vIsValueArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"8\n" +
"\x0fIsScanValueArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"\x8a\x01\n" +
"\fNgoloFuzzOne\x122\n" +
"\aIsValue\x18\x01 \x01(\v2\x16.ngolofuzz.IsValueArgsH\x00R\aIsValue\x12>\n" +
"\vIsScanValue\x18\x02 \x01(\v2\x1a.ngolofuzz.IsScanValueArgsH\x00R\vIsScanValueB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB Z\x1e./;fuzz_ng_database_sql_driverb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_ngolofuzz_proto_goTypes = []any{
(*IsValueArgs)(nil), // 0: ngolofuzz.IsValueArgs
(*IsScanValueArgs)(nil), // 1: ngolofuzz.IsScanValueArgs
(*NgoloFuzzOne)(nil), // 2: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 3: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 4: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
3, // 0: ngolofuzz.IsValueArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
3, // 1: ngolofuzz.IsScanValueArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
0, // 2: ngolofuzz.NgoloFuzzOne.IsValue:type_name -> ngolofuzz.IsValueArgs
1, // 3: ngolofuzz.NgoloFuzzOne.IsScanValue:type_name -> ngolofuzz.IsScanValueArgs
2, // 4: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
5, // [5:5] is the sub-list for method output_type
5, // [5:5] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzOne_IsValue)(nil),
(*NgoloFuzzOne_IsScanValue)(nil),
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_debug_buildinfo
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"debug/buildinfo"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ReadFile:
_, r1 := buildinfo.ReadFile(a.ReadFile.Name)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Read:
arg0 := bytes.NewReader(a.Read.R)
_, r1 := buildinfo.Read(arg0)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ReadFile:
w.WriteString(fmt.Sprintf("buildinfo.ReadFile(%#+v)\n", a.ReadFile.Name))
case *NgoloFuzzOne_Read:
w.WriteString(fmt.Sprintf("buildinfo.Read(bytes.NewReader(%#+v))\n", a.Read.R))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_debug_buildinfo
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ReadFileArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReadFileArgs) Reset() {
*x = ReadFileArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReadFileArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReadFileArgs) ProtoMessage() {}
func (x *ReadFileArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReadFileArgs.ProtoReflect.Descriptor instead.
func (*ReadFileArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *ReadFileArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type ReadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReadArgs) Reset() {
*x = ReadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReadArgs) ProtoMessage() {}
func (x *ReadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReadArgs.ProtoReflect.Descriptor instead.
func (*ReadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *ReadArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_ReadFile
// *NgoloFuzzOne_Read
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetReadFile() *ReadFileArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReadFile); ok {
return x.ReadFile
}
}
return nil
}
func (x *NgoloFuzzOne) GetRead() *ReadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Read); ok {
return x.Read
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_ReadFile struct {
ReadFile *ReadFileArgs `protobuf:"bytes,1,opt,name=ReadFile,proto3,oneof"`
}
type NgoloFuzzOne_Read struct {
Read *ReadArgs `protobuf:"bytes,2,opt,name=Read,proto3,oneof"`
}
func (*NgoloFuzzOne_ReadFile) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Read) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\"\n" +
"\fReadFileArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x18\n" +
"\bReadArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"x\n" +
"\fNgoloFuzzOne\x125\n" +
"\bReadFile\x18\x01 \x01(\v2\x17.ngolofuzz.ReadFileArgsH\x00R\bReadFile\x12)\n" +
"\x04Read\x18\x02 \x01(\v2\x13.ngolofuzz.ReadArgsH\x00R\x04ReadB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1cZ\x1a./;fuzz_ng_debug_buildinfob\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_ngolofuzz_proto_goTypes = []any{
(*ReadFileArgs)(nil), // 0: ngolofuzz.ReadFileArgs
(*ReadArgs)(nil), // 1: ngolofuzz.ReadArgs
(*NgoloFuzzOne)(nil), // 2: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 3: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 4: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.ReadFile:type_name -> ngolofuzz.ReadFileArgs
1, // 1: ngolofuzz.NgoloFuzzOne.Read:type_name -> ngolofuzz.ReadArgs
2, // 2: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzOne_ReadFile)(nil),
(*NgoloFuzzOne_Read)(nil),
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_debug_dwarf
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"debug/dwarf"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func ClassNewFromFuzz(p ClassEnum) dwarf.Class{
switch p {
case 1:
return dwarf.ClassAddress
case 2:
return dwarf.ClassBlock
case 3:
return dwarf.ClassConstant
case 4:
return dwarf.ClassExprLoc
case 5:
return dwarf.ClassFlag
case 6:
return dwarf.ClassLinePtr
case 7:
return dwarf.ClassLocListPtr
case 8:
return dwarf.ClassMacPtr
case 9:
return dwarf.ClassRangeListPtr
case 10:
return dwarf.ClassReference
case 11:
return dwarf.ClassReferenceSig
case 12:
return dwarf.ClassString
case 13:
return dwarf.ClassReferenceAlt
case 14:
return dwarf.ClassStringAlt
case 15:
return dwarf.ClassAddrPtr
case 16:
return dwarf.ClassLocList
case 17:
return dwarf.ClassRngList
case 18:
return dwarf.ClassRngListsPtr
case 19:
return dwarf.ClassStrOffsetsPtr
}
return dwarf.ClassUnknown
}
func ConvertClassNewFromFuzz(a []ClassEnum) []dwarf.Class{
r := make([]dwarf.Class, len(a))
for i := range a {
r[i] = ClassNewFromFuzz(a[i])
}
return r
}
func AttrNewFromFuzz(p AttrEnum) dwarf.Attr{
switch p {
case 1:
return dwarf.AttrLocation
case 2:
return dwarf.AttrName
case 3:
return dwarf.AttrOrdering
case 4:
return dwarf.AttrByteSize
case 5:
return dwarf.AttrBitOffset
case 6:
return dwarf.AttrBitSize
case 7:
return dwarf.AttrStmtList
case 8:
return dwarf.AttrLowpc
case 9:
return dwarf.AttrHighpc
case 10:
return dwarf.AttrLanguage
case 11:
return dwarf.AttrDiscr
case 12:
return dwarf.AttrDiscrValue
case 13:
return dwarf.AttrVisibility
case 14:
return dwarf.AttrImport
case 15:
return dwarf.AttrStringLength
case 16:
return dwarf.AttrCommonRef
case 17:
return dwarf.AttrCompDir
case 18:
return dwarf.AttrConstValue
case 19:
return dwarf.AttrContainingType
case 20:
return dwarf.AttrDefaultValue
case 21:
return dwarf.AttrInline
case 22:
return dwarf.AttrIsOptional
case 23:
return dwarf.AttrLowerBound
case 24:
return dwarf.AttrProducer
case 25:
return dwarf.AttrPrototyped
case 26:
return dwarf.AttrReturnAddr
case 27:
return dwarf.AttrStartScope
case 28:
return dwarf.AttrStrideSize
case 29:
return dwarf.AttrUpperBound
case 30:
return dwarf.AttrAbstractOrigin
case 31:
return dwarf.AttrAccessibility
case 32:
return dwarf.AttrAddrClass
case 33:
return dwarf.AttrArtificial
case 34:
return dwarf.AttrBaseTypes
case 35:
return dwarf.AttrCalling
case 36:
return dwarf.AttrCount
case 37:
return dwarf.AttrDataMemberLoc
case 38:
return dwarf.AttrDeclColumn
case 39:
return dwarf.AttrDeclFile
case 40:
return dwarf.AttrDeclLine
case 41:
return dwarf.AttrDeclaration
case 42:
return dwarf.AttrDiscrList
case 43:
return dwarf.AttrEncoding
case 44:
return dwarf.AttrExternal
case 45:
return dwarf.AttrFrameBase
case 46:
return dwarf.AttrFriend
case 47:
return dwarf.AttrIdentifierCase
case 48:
return dwarf.AttrMacroInfo
case 49:
return dwarf.AttrNamelistItem
case 50:
return dwarf.AttrPriority
case 51:
return dwarf.AttrSegment
case 52:
return dwarf.AttrSpecification
case 53:
return dwarf.AttrStaticLink
case 54:
return dwarf.AttrType
case 55:
return dwarf.AttrUseLocation
case 56:
return dwarf.AttrVarParam
case 57:
return dwarf.AttrVirtuality
case 58:
return dwarf.AttrVtableElemLoc
case 59:
return dwarf.AttrAllocated
case 60:
return dwarf.AttrAssociated
case 61:
return dwarf.AttrDataLocation
case 62:
return dwarf.AttrStride
case 63:
return dwarf.AttrEntrypc
case 64:
return dwarf.AttrUseUTF8
case 65:
return dwarf.AttrExtension
case 66:
return dwarf.AttrRanges
case 67:
return dwarf.AttrTrampoline
case 68:
return dwarf.AttrCallColumn
case 69:
return dwarf.AttrCallFile
case 70:
return dwarf.AttrCallLine
case 71:
return dwarf.AttrDescription
case 72:
return dwarf.AttrBinaryScale
case 73:
return dwarf.AttrDecimalScale
case 74:
return dwarf.AttrSmall
case 75:
return dwarf.AttrDecimalSign
case 76:
return dwarf.AttrDigitCount
case 77:
return dwarf.AttrPictureString
case 78:
return dwarf.AttrMutable
case 79:
return dwarf.AttrThreadsScaled
case 80:
return dwarf.AttrExplicit
case 81:
return dwarf.AttrObjectPointer
case 82:
return dwarf.AttrEndianity
case 83:
return dwarf.AttrElemental
case 84:
return dwarf.AttrPure
case 85:
return dwarf.AttrRecursive
case 86:
return dwarf.AttrSignature
case 87:
return dwarf.AttrMainSubprogram
case 88:
return dwarf.AttrDataBitOffset
case 89:
return dwarf.AttrConstExpr
case 90:
return dwarf.AttrEnumClass
case 91:
return dwarf.AttrLinkageName
case 92:
return dwarf.AttrStringLengthBitSize
case 93:
return dwarf.AttrStringLengthByteSize
case 94:
return dwarf.AttrRank
case 95:
return dwarf.AttrStrOffsetsBase
case 96:
return dwarf.AttrAddrBase
case 97:
return dwarf.AttrRnglistsBase
case 98:
return dwarf.AttrDwoName
case 99:
return dwarf.AttrReference
case 100:
return dwarf.AttrRvalueReference
case 101:
return dwarf.AttrMacros
case 102:
return dwarf.AttrCallAllCalls
case 103:
return dwarf.AttrCallAllSourceCalls
case 104:
return dwarf.AttrCallAllTailCalls
case 105:
return dwarf.AttrCallReturnPC
case 106:
return dwarf.AttrCallValue
case 107:
return dwarf.AttrCallOrigin
case 108:
return dwarf.AttrCallParameter
case 109:
return dwarf.AttrCallPC
case 110:
return dwarf.AttrCallTailCall
case 111:
return dwarf.AttrCallTarget
case 112:
return dwarf.AttrCallTargetClobbered
case 113:
return dwarf.AttrCallDataLocation
case 114:
return dwarf.AttrCallDataValue
case 115:
return dwarf.AttrNoreturn
case 116:
return dwarf.AttrAlignment
case 117:
return dwarf.AttrExportSymbols
case 118:
return dwarf.AttrDeleted
case 119:
return dwarf.AttrDefaulted
case 120:
return dwarf.AttrLoclistsBase
}
return dwarf.AttrSibling
}
func ConvertAttrNewFromFuzz(a []AttrEnum) []dwarf.Attr{
r := make([]dwarf.Attr, len(a))
for i := range a {
r[i] = AttrNewFromFuzz(a[i])
}
return r
}
func TagNewFromFuzz(p TagEnum) dwarf.Tag{
switch p {
case 1:
return dwarf.TagClassType
case 2:
return dwarf.TagEntryPoint
case 3:
return dwarf.TagEnumerationType
case 4:
return dwarf.TagFormalParameter
case 5:
return dwarf.TagImportedDeclaration
case 6:
return dwarf.TagLabel
case 7:
return dwarf.TagLexDwarfBlock
case 8:
return dwarf.TagMember
case 9:
return dwarf.TagPointerType
case 10:
return dwarf.TagReferenceType
case 11:
return dwarf.TagCompileUnit
case 12:
return dwarf.TagStringType
case 13:
return dwarf.TagStructType
case 14:
return dwarf.TagSubroutineType
case 15:
return dwarf.TagTypedef
case 16:
return dwarf.TagUnionType
case 17:
return dwarf.TagUnspecifiedParameters
case 18:
return dwarf.TagVariant
case 19:
return dwarf.TagCommonDwarfBlock
case 20:
return dwarf.TagCommonInclusion
case 21:
return dwarf.TagInheritance
case 22:
return dwarf.TagInlinedSubroutine
case 23:
return dwarf.TagModule
case 24:
return dwarf.TagPtrToMemberType
case 25:
return dwarf.TagSetType
case 26:
return dwarf.TagSubrangeType
case 27:
return dwarf.TagWithStmt
case 28:
return dwarf.TagAccessDeclaration
case 29:
return dwarf.TagBaseType
case 30:
return dwarf.TagCatchDwarfBlock
case 31:
return dwarf.TagConstType
case 32:
return dwarf.TagConstant
case 33:
return dwarf.TagEnumerator
case 34:
return dwarf.TagFileType
case 35:
return dwarf.TagFriend
case 36:
return dwarf.TagNamelist
case 37:
return dwarf.TagNamelistItem
case 38:
return dwarf.TagPackedType
case 39:
return dwarf.TagSubprogram
case 40:
return dwarf.TagTemplateTypeParameter
case 41:
return dwarf.TagTemplateValueParameter
case 42:
return dwarf.TagThrownType
case 43:
return dwarf.TagTryDwarfBlock
case 44:
return dwarf.TagVariantPart
case 45:
return dwarf.TagVariable
case 46:
return dwarf.TagVolatileType
case 47:
return dwarf.TagDwarfProcedure
case 48:
return dwarf.TagRestrictType
case 49:
return dwarf.TagInterfaceType
case 50:
return dwarf.TagNamespace
case 51:
return dwarf.TagImportedModule
case 52:
return dwarf.TagUnspecifiedType
case 53:
return dwarf.TagPartialUnit
case 54:
return dwarf.TagImportedUnit
case 55:
return dwarf.TagMutableType
case 56:
return dwarf.TagCondition
case 57:
return dwarf.TagSharedType
case 58:
return dwarf.TagTypeUnit
case 59:
return dwarf.TagRvalueReferenceType
case 60:
return dwarf.TagTemplateAlias
case 61:
return dwarf.TagCoarrayType
case 62:
return dwarf.TagGenericSubrange
case 63:
return dwarf.TagDynamicType
case 64:
return dwarf.TagAtomicType
case 65:
return dwarf.TagCallSite
case 66:
return dwarf.TagCallSiteParameter
case 67:
return dwarf.TagSkeletonUnit
case 68:
return dwarf.TagImmutableType
}
return dwarf.TagArrayType
}
func ConvertTagNewFromFuzz(a []TagEnum) []dwarf.Tag{
r := make([]dwarf.Tag, len(a))
for i := range a {
r[i] = TagNewFromFuzz(a[i])
}
return r
}
func UnsupportedTypeNewFromFuzz(p *UnsupportedTypeStruct) *dwarf.UnsupportedType{
if p == nil {
return nil
}
return &dwarf.UnsupportedType{
Tag: TagNewFromFuzz(p.Tag),
}
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var CommonTypeResults []*dwarf.CommonType
CommonTypeResultsIndex := 0
var EntryResults []*dwarf.Entry
EntryResultsIndex := 0
var DataResults []*dwarf.Data
DataResultsIndex := 0
var LineReaderResults []*dwarf.LineReader
LineReaderResultsIndex := 0
var BasicTypeResults []*dwarf.BasicType
BasicTypeResultsIndex := 0
var ReaderResults []*dwarf.Reader
ReaderResultsIndex := 0
var LineReaderPosResults []*dwarf.LineReaderPos
LineReaderPosResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_AttrNgdotString:
arg0 := AttrNewFromFuzz(a.AttrNgdotString.I)
arg0.String()
case *NgoloFuzzOne_ClassNgdotString:
arg0 := ClassNewFromFuzz(a.ClassNgdotString.I)
arg0.String()
case *NgoloFuzzOne_AttrNgdotGoString:
arg0 := AttrNewFromFuzz(a.AttrNgdotGoString.A)
arg0.GoString()
case *NgoloFuzzOne_TagNgdotGoString:
arg0 := TagNewFromFuzz(a.TagNgdotGoString.T)
arg0.GoString()
case *NgoloFuzzOne_ClassNgdotGoString:
arg0 := ClassNewFromFuzz(a.ClassNgdotGoString.I)
arg0.GoString()
case *NgoloFuzzOne_EntryNgdotVal:
if len(EntryResults) == 0 {
continue
}
arg0 := EntryResults[EntryResultsIndex]
EntryResultsIndex = (EntryResultsIndex + 1) % len(EntryResults)
arg1 := AttrNewFromFuzz(a.EntryNgdotVal.A)
arg0.Val(arg1)
case *NgoloFuzzOne_EntryNgdotAttrField:
if len(EntryResults) == 0 {
continue
}
arg0 := EntryResults[EntryResultsIndex]
EntryResultsIndex = (EntryResultsIndex + 1) % len(EntryResults)
arg1 := AttrNewFromFuzz(a.EntryNgdotAttrField.A)
arg0.AttrField(arg1)
case *NgoloFuzzOne_DataNgdotReader:
if len(DataResults) == 0 {
continue
}
arg0 := DataResults[DataResultsIndex]
DataResultsIndex = (DataResultsIndex + 1) % len(DataResults)
r0 := arg0.Reader()
if r0 != nil{
ReaderResults = append(ReaderResults, r0)
}
case *NgoloFuzzOne_ReaderNgdotAddressSize:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg0.AddressSize()
case *NgoloFuzzOne_ReaderNgdotByteOrder:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg0.ByteOrder()
case *NgoloFuzzOne_ReaderNgdotNext:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.Next()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotSkipChildren:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg0.SkipChildren()
case *NgoloFuzzOne_ReaderNgdotSeekPC:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.SeekPC(a.ReaderNgdotSeekPC.Pc)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DataNgdotRanges:
if len(DataResults) == 0 {
continue
}
arg0 := DataResults[DataResultsIndex]
DataResultsIndex = (DataResultsIndex + 1) % len(DataResults)
if len(EntryResults) == 0 {
continue
}
arg1 := EntryResults[EntryResultsIndex]
EntryResultsIndex = (EntryResultsIndex + 1) % len(EntryResults)
_, r1 := arg0.Ranges(arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DataNgdotLineReader:
if len(DataResults) == 0 {
continue
}
arg0 := DataResults[DataResultsIndex]
DataResultsIndex = (DataResultsIndex + 1) % len(DataResults)
if len(EntryResults) == 0 {
continue
}
arg1 := EntryResults[EntryResultsIndex]
EntryResultsIndex = (EntryResultsIndex + 1) % len(EntryResults)
r0, r1 := arg0.LineReader(arg1)
if r0 != nil{
LineReaderResults = append(LineReaderResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_LineReaderNgdotTell:
if len(LineReaderResults) == 0 {
continue
}
arg0 := LineReaderResults[LineReaderResultsIndex]
LineReaderResultsIndex = (LineReaderResultsIndex + 1) % len(LineReaderResults)
r0 := arg0.Tell()
LineReaderPosResults = append(LineReaderPosResults, &r0)
case *NgoloFuzzOne_LineReaderNgdotSeek:
if len(LineReaderResults) == 0 {
continue
}
arg0 := LineReaderResults[LineReaderResultsIndex]
LineReaderResultsIndex = (LineReaderResultsIndex + 1) % len(LineReaderResults)
if len(LineReaderPosResults) == 0 {
continue
}
arg1 := *LineReaderPosResults[LineReaderPosResultsIndex]
LineReaderPosResultsIndex = (LineReaderPosResultsIndex + 1) % len(LineReaderPosResults)
arg0.Seek(arg1)
case *NgoloFuzzOne_LineReaderNgdotReset:
if len(LineReaderResults) == 0 {
continue
}
arg0 := LineReaderResults[LineReaderResultsIndex]
LineReaderResultsIndex = (LineReaderResultsIndex + 1) % len(LineReaderResults)
arg0.Reset()
case *NgoloFuzzOne_LineReaderNgdotFiles:
if len(LineReaderResults) == 0 {
continue
}
arg0 := LineReaderResults[LineReaderResultsIndex]
LineReaderResultsIndex = (LineReaderResultsIndex + 1) % len(LineReaderResults)
arg0.Files()
case *NgoloFuzzOne_New:
r0, r1 := dwarf.New(a.New.Abbrev, a.New.Aranges, a.New.Frame, a.New.Info, a.New.Line, a.New.Pubnames, a.New.Ranges, a.New.Str)
if r0 != nil{
DataResults = append(DataResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DataNgdotAddTypes:
if len(DataResults) == 0 {
continue
}
arg0 := DataResults[DataResultsIndex]
DataResultsIndex = (DataResultsIndex + 1) % len(DataResults)
r0 := arg0.AddTypes(a.DataNgdotAddTypes.Name, a.DataNgdotAddTypes.Types)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_DataNgdotAddSection:
if len(DataResults) == 0 {
continue
}
arg0 := DataResults[DataResultsIndex]
DataResultsIndex = (DataResultsIndex + 1) % len(DataResults)
r0 := arg0.AddSection(a.DataNgdotAddSection.Name, a.DataNgdotAddSection.Contents)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_TagNgdotString:
arg0 := TagNewFromFuzz(a.TagNgdotString.I)
arg0.String()
case *NgoloFuzzOne_CommonTypeNgdotCommon:
if len(CommonTypeResults) == 0 {
continue
}
arg0 := CommonTypeResults[CommonTypeResultsIndex]
CommonTypeResultsIndex = (CommonTypeResultsIndex + 1) % len(CommonTypeResults)
arg0.Common()
case *NgoloFuzzOne_CommonTypeNgdotSize:
if len(CommonTypeResults) == 0 {
continue
}
arg0 := CommonTypeResults[CommonTypeResultsIndex]
CommonTypeResultsIndex = (CommonTypeResultsIndex + 1) % len(CommonTypeResults)
arg0.Size()
case *NgoloFuzzOne_BasicTypeNgdotBasic:
if len(BasicTypeResults) == 0 {
continue
}
arg0 := BasicTypeResults[BasicTypeResultsIndex]
BasicTypeResultsIndex = (BasicTypeResultsIndex + 1) % len(BasicTypeResults)
arg0.Basic()
case *NgoloFuzzOne_BasicTypeNgdotString:
if len(BasicTypeResults) == 0 {
continue
}
arg0 := BasicTypeResults[BasicTypeResultsIndex]
BasicTypeResultsIndex = (BasicTypeResultsIndex + 1) % len(BasicTypeResults)
arg0.String()
case *NgoloFuzzOne_UnsupportedTypeNgdotString:
arg0 := UnsupportedTypeNewFromFuzz(a.UnsupportedTypeNgdotString.T)
if arg0 == nil {
continue
}
arg0.String()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
CommonTypeNb := 0
CommonTypeResultsIndex := 0
EntryNb := 0
EntryResultsIndex := 0
DataNb := 0
DataResultsIndex := 0
LineReaderNb := 0
LineReaderResultsIndex := 0
BasicTypeNb := 0
BasicTypeResultsIndex := 0
ReaderNb := 0
ReaderResultsIndex := 0
LineReaderPosNb := 0
LineReaderPosResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_AttrNgdotString:
w.WriteString(fmt.Sprintf("AttrNewFromFuzz(%#+v).String()\n", a.AttrNgdotString.I))
case *NgoloFuzzOne_ClassNgdotString:
w.WriteString(fmt.Sprintf("ClassNewFromFuzz(%#+v).String()\n", a.ClassNgdotString.I))
case *NgoloFuzzOne_AttrNgdotGoString:
w.WriteString(fmt.Sprintf("AttrNewFromFuzz(%#+v).GoString()\n", a.AttrNgdotGoString.A))
case *NgoloFuzzOne_TagNgdotGoString:
w.WriteString(fmt.Sprintf("TagNewFromFuzz(%#+v).GoString()\n", a.TagNgdotGoString.T))
case *NgoloFuzzOne_ClassNgdotGoString:
w.WriteString(fmt.Sprintf("ClassNewFromFuzz(%#+v).GoString()\n", a.ClassNgdotGoString.I))
case *NgoloFuzzOne_EntryNgdotVal:
if EntryNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Entry%d.Val(AttrNewFromFuzz(%#+v))\n", EntryResultsIndex, a.EntryNgdotVal.A))
EntryResultsIndex = (EntryResultsIndex + 1) % EntryNb
case *NgoloFuzzOne_EntryNgdotAttrField:
if EntryNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Entry%d.AttrField(AttrNewFromFuzz(%#+v))\n", EntryResultsIndex, a.EntryNgdotAttrField.A))
EntryResultsIndex = (EntryResultsIndex + 1) % EntryNb
case *NgoloFuzzOne_DataNgdotReader:
if DataNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d := Data%d.Reader()\n", ReaderNb, DataResultsIndex))
ReaderNb = ReaderNb + 1
DataResultsIndex = (DataResultsIndex + 1) % DataNb
case *NgoloFuzzOne_ReaderNgdotAddressSize:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.AddressSize()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotByteOrder:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ByteOrder()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotNext:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Next()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotSkipChildren:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.SkipChildren()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotSeekPC:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.SeekPC(%#+v)\n", ReaderResultsIndex, a.ReaderNgdotSeekPC.Pc))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_DataNgdotRanges:
if DataNb == 0 {
continue
}
if EntryNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Data%d.Ranges(Entry%d)\n", DataResultsIndex, (EntryResultsIndex + 0) % EntryNb))
DataResultsIndex = (DataResultsIndex + 1) % DataNb
EntryResultsIndex = (EntryResultsIndex + 1) % EntryNb
case *NgoloFuzzOne_DataNgdotLineReader:
if DataNb == 0 {
continue
}
if EntryNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("LineReader%d, _ := Data%d.LineReader(Entry%d)\n", LineReaderNb, DataResultsIndex, (EntryResultsIndex + 0) % EntryNb))
LineReaderNb = LineReaderNb + 1
DataResultsIndex = (DataResultsIndex + 1) % DataNb
EntryResultsIndex = (EntryResultsIndex + 1) % EntryNb
case *NgoloFuzzOne_LineReaderNgdotTell:
if LineReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("LineReaderPos%d := LineReader%d.Tell()\n", LineReaderPosNb, LineReaderResultsIndex))
LineReaderPosNb = LineReaderPosNb + 1
LineReaderResultsIndex = (LineReaderResultsIndex + 1) % LineReaderNb
case *NgoloFuzzOne_LineReaderNgdotSeek:
if LineReaderNb == 0 {
continue
}
if LineReaderPosNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("LineReader%d.Seek(LineReaderPos%d)\n", LineReaderResultsIndex, (LineReaderPosResultsIndex + 0) % LineReaderPosNb))
LineReaderResultsIndex = (LineReaderResultsIndex + 1) % LineReaderNb
LineReaderPosResultsIndex = (LineReaderPosResultsIndex + 1) % LineReaderPosNb
case *NgoloFuzzOne_LineReaderNgdotReset:
if LineReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("LineReader%d.Reset()\n", LineReaderResultsIndex))
LineReaderResultsIndex = (LineReaderResultsIndex + 1) % LineReaderNb
case *NgoloFuzzOne_LineReaderNgdotFiles:
if LineReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("LineReader%d.Files()\n", LineReaderResultsIndex))
LineReaderResultsIndex = (LineReaderResultsIndex + 1) % LineReaderNb
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("Data%d, _ := dwarf.New(%#+v, %#+v, %#+v, %#+v, %#+v, %#+v, %#+v, %#+v)\n", DataNb, a.New.Abbrev, a.New.Aranges, a.New.Frame, a.New.Info, a.New.Line, a.New.Pubnames, a.New.Ranges, a.New.Str))
DataNb = DataNb + 1
case *NgoloFuzzOne_DataNgdotAddTypes:
if DataNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Data%d.AddTypes(%#+v, %#+v)\n", DataResultsIndex, a.DataNgdotAddTypes.Name, a.DataNgdotAddTypes.Types))
DataResultsIndex = (DataResultsIndex + 1) % DataNb
case *NgoloFuzzOne_DataNgdotAddSection:
if DataNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Data%d.AddSection(%#+v, %#+v)\n", DataResultsIndex, a.DataNgdotAddSection.Name, a.DataNgdotAddSection.Contents))
DataResultsIndex = (DataResultsIndex + 1) % DataNb
case *NgoloFuzzOne_TagNgdotString:
w.WriteString(fmt.Sprintf("TagNewFromFuzz(%#+v).String()\n", a.TagNgdotString.I))
case *NgoloFuzzOne_CommonTypeNgdotCommon:
if CommonTypeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CommonType%d.Common()\n", CommonTypeResultsIndex))
CommonTypeResultsIndex = (CommonTypeResultsIndex + 1) % CommonTypeNb
case *NgoloFuzzOne_CommonTypeNgdotSize:
if CommonTypeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CommonType%d.Size()\n", CommonTypeResultsIndex))
CommonTypeResultsIndex = (CommonTypeResultsIndex + 1) % CommonTypeNb
case *NgoloFuzzOne_BasicTypeNgdotBasic:
if BasicTypeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("BasicType%d.Basic()\n", BasicTypeResultsIndex))
BasicTypeResultsIndex = (BasicTypeResultsIndex + 1) % BasicTypeNb
case *NgoloFuzzOne_BasicTypeNgdotString:
if BasicTypeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("BasicType%d.String()\n", BasicTypeResultsIndex))
BasicTypeResultsIndex = (BasicTypeResultsIndex + 1) % BasicTypeNb
case *NgoloFuzzOne_UnsupportedTypeNgdotString:
w.WriteString(fmt.Sprintf("UnsupportedTypeNewFromFuzz(%#+v).String()\n", a.UnsupportedTypeNgdotString.T))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_debug_dwarf
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ClassEnum int32
const (
ClassEnum_ClassUnknown ClassEnum = 0
ClassEnum_ClassAddress ClassEnum = 1
ClassEnum_ClassBlock ClassEnum = 2
ClassEnum_ClassConstant ClassEnum = 3
ClassEnum_ClassExprLoc ClassEnum = 4
ClassEnum_ClassFlag ClassEnum = 5
ClassEnum_ClassLinePtr ClassEnum = 6
ClassEnum_ClassLocListPtr ClassEnum = 7
ClassEnum_ClassMacPtr ClassEnum = 8
ClassEnum_ClassRangeListPtr ClassEnum = 9
ClassEnum_ClassReference ClassEnum = 10
ClassEnum_ClassReferenceSig ClassEnum = 11
ClassEnum_ClassString ClassEnum = 12
ClassEnum_ClassReferenceAlt ClassEnum = 13
ClassEnum_ClassStringAlt ClassEnum = 14
ClassEnum_ClassAddrPtr ClassEnum = 15
ClassEnum_ClassLocList ClassEnum = 16
ClassEnum_ClassRngList ClassEnum = 17
ClassEnum_ClassRngListsPtr ClassEnum = 18
ClassEnum_ClassStrOffsetsPtr ClassEnum = 19
)
// Enum value maps for ClassEnum.
var (
ClassEnum_name = map[int32]string{
0: "ClassUnknown",
1: "ClassAddress",
2: "ClassBlock",
3: "ClassConstant",
4: "ClassExprLoc",
5: "ClassFlag",
6: "ClassLinePtr",
7: "ClassLocListPtr",
8: "ClassMacPtr",
9: "ClassRangeListPtr",
10: "ClassReference",
11: "ClassReferenceSig",
12: "ClassString",
13: "ClassReferenceAlt",
14: "ClassStringAlt",
15: "ClassAddrPtr",
16: "ClassLocList",
17: "ClassRngList",
18: "ClassRngListsPtr",
19: "ClassStrOffsetsPtr",
}
ClassEnum_value = map[string]int32{
"ClassUnknown": 0,
"ClassAddress": 1,
"ClassBlock": 2,
"ClassConstant": 3,
"ClassExprLoc": 4,
"ClassFlag": 5,
"ClassLinePtr": 6,
"ClassLocListPtr": 7,
"ClassMacPtr": 8,
"ClassRangeListPtr": 9,
"ClassReference": 10,
"ClassReferenceSig": 11,
"ClassString": 12,
"ClassReferenceAlt": 13,
"ClassStringAlt": 14,
"ClassAddrPtr": 15,
"ClassLocList": 16,
"ClassRngList": 17,
"ClassRngListsPtr": 18,
"ClassStrOffsetsPtr": 19,
}
)
func (x ClassEnum) Enum() *ClassEnum {
p := new(ClassEnum)
*p = x
return p
}
func (x ClassEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (ClassEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[0].Descriptor()
}
func (ClassEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[0]
}
func (x ClassEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use ClassEnum.Descriptor instead.
func (ClassEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type AttrEnum int32
const (
AttrEnum_AttrSibling AttrEnum = 0
AttrEnum_AttrLocation AttrEnum = 1
AttrEnum_AttrName AttrEnum = 2
AttrEnum_AttrOrdering AttrEnum = 3
AttrEnum_AttrByteSize AttrEnum = 4
AttrEnum_AttrBitOffset AttrEnum = 5
AttrEnum_AttrBitSize AttrEnum = 6
AttrEnum_AttrStmtList AttrEnum = 7
AttrEnum_AttrLowpc AttrEnum = 8
AttrEnum_AttrHighpc AttrEnum = 9
AttrEnum_AttrLanguage AttrEnum = 10
AttrEnum_AttrDiscr AttrEnum = 11
AttrEnum_AttrDiscrValue AttrEnum = 12
AttrEnum_AttrVisibility AttrEnum = 13
AttrEnum_AttrImport AttrEnum = 14
AttrEnum_AttrStringLength AttrEnum = 15
AttrEnum_AttrCommonRef AttrEnum = 16
AttrEnum_AttrCompDir AttrEnum = 17
AttrEnum_AttrConstValue AttrEnum = 18
AttrEnum_AttrContainingType AttrEnum = 19
AttrEnum_AttrDefaultValue AttrEnum = 20
AttrEnum_AttrInline AttrEnum = 21
AttrEnum_AttrIsOptional AttrEnum = 22
AttrEnum_AttrLowerBound AttrEnum = 23
AttrEnum_AttrProducer AttrEnum = 24
AttrEnum_AttrPrototyped AttrEnum = 25
AttrEnum_AttrReturnAddr AttrEnum = 26
AttrEnum_AttrStartScope AttrEnum = 27
AttrEnum_AttrStrideSize AttrEnum = 28
AttrEnum_AttrUpperBound AttrEnum = 29
AttrEnum_AttrAbstractOrigin AttrEnum = 30
AttrEnum_AttrAccessibility AttrEnum = 31
AttrEnum_AttrAddrClass AttrEnum = 32
AttrEnum_AttrArtificial AttrEnum = 33
AttrEnum_AttrBaseTypes AttrEnum = 34
AttrEnum_AttrCalling AttrEnum = 35
AttrEnum_AttrCount AttrEnum = 36
AttrEnum_AttrDataMemberLoc AttrEnum = 37
AttrEnum_AttrDeclColumn AttrEnum = 38
AttrEnum_AttrDeclFile AttrEnum = 39
AttrEnum_AttrDeclLine AttrEnum = 40
AttrEnum_AttrDeclaration AttrEnum = 41
AttrEnum_AttrDiscrList AttrEnum = 42
AttrEnum_AttrEncoding AttrEnum = 43
AttrEnum_AttrExternal AttrEnum = 44
AttrEnum_AttrFrameBase AttrEnum = 45
AttrEnum_AttrFriend AttrEnum = 46
AttrEnum_AttrIdentifierCase AttrEnum = 47
AttrEnum_AttrMacroInfo AttrEnum = 48
AttrEnum_AttrNamelistItem AttrEnum = 49
AttrEnum_AttrPriority AttrEnum = 50
AttrEnum_AttrSegment AttrEnum = 51
AttrEnum_AttrSpecification AttrEnum = 52
AttrEnum_AttrStaticLink AttrEnum = 53
AttrEnum_AttrType AttrEnum = 54
AttrEnum_AttrUseLocation AttrEnum = 55
AttrEnum_AttrVarParam AttrEnum = 56
AttrEnum_AttrVirtuality AttrEnum = 57
AttrEnum_AttrVtableElemLoc AttrEnum = 58
AttrEnum_AttrAllocated AttrEnum = 59
AttrEnum_AttrAssociated AttrEnum = 60
AttrEnum_AttrDataLocation AttrEnum = 61
AttrEnum_AttrStride AttrEnum = 62
AttrEnum_AttrEntrypc AttrEnum = 63
AttrEnum_AttrUseUTF8 AttrEnum = 64
AttrEnum_AttrExtension AttrEnum = 65
AttrEnum_AttrRanges AttrEnum = 66
AttrEnum_AttrTrampoline AttrEnum = 67
AttrEnum_AttrCallColumn AttrEnum = 68
AttrEnum_AttrCallFile AttrEnum = 69
AttrEnum_AttrCallLine AttrEnum = 70
AttrEnum_AttrDescription AttrEnum = 71
AttrEnum_AttrBinaryScale AttrEnum = 72
AttrEnum_AttrDecimalScale AttrEnum = 73
AttrEnum_AttrSmall AttrEnum = 74
AttrEnum_AttrDecimalSign AttrEnum = 75
AttrEnum_AttrDigitCount AttrEnum = 76
AttrEnum_AttrPictureString AttrEnum = 77
AttrEnum_AttrMutable AttrEnum = 78
AttrEnum_AttrThreadsScaled AttrEnum = 79
AttrEnum_AttrExplicit AttrEnum = 80
AttrEnum_AttrObjectPointer AttrEnum = 81
AttrEnum_AttrEndianity AttrEnum = 82
AttrEnum_AttrElemental AttrEnum = 83
AttrEnum_AttrPure AttrEnum = 84
AttrEnum_AttrRecursive AttrEnum = 85
AttrEnum_AttrSignature AttrEnum = 86
AttrEnum_AttrMainSubprogram AttrEnum = 87
AttrEnum_AttrDataBitOffset AttrEnum = 88
AttrEnum_AttrConstExpr AttrEnum = 89
AttrEnum_AttrEnumClass AttrEnum = 90
AttrEnum_AttrLinkageName AttrEnum = 91
AttrEnum_AttrStringLengthBitSize AttrEnum = 92
AttrEnum_AttrStringLengthByteSize AttrEnum = 93
AttrEnum_AttrRank AttrEnum = 94
AttrEnum_AttrStrOffsetsBase AttrEnum = 95
AttrEnum_AttrAddrBase AttrEnum = 96
AttrEnum_AttrRnglistsBase AttrEnum = 97
AttrEnum_AttrDwoName AttrEnum = 98
AttrEnum_AttrReference AttrEnum = 99
AttrEnum_AttrRvalueReference AttrEnum = 100
AttrEnum_AttrMacros AttrEnum = 101
AttrEnum_AttrCallAllCalls AttrEnum = 102
AttrEnum_AttrCallAllSourceCalls AttrEnum = 103
AttrEnum_AttrCallAllTailCalls AttrEnum = 104
AttrEnum_AttrCallReturnPC AttrEnum = 105
AttrEnum_AttrCallValue AttrEnum = 106
AttrEnum_AttrCallOrigin AttrEnum = 107
AttrEnum_AttrCallParameter AttrEnum = 108
AttrEnum_AttrCallPC AttrEnum = 109
AttrEnum_AttrCallTailCall AttrEnum = 110
AttrEnum_AttrCallTarget AttrEnum = 111
AttrEnum_AttrCallTargetClobbered AttrEnum = 112
AttrEnum_AttrCallDataLocation AttrEnum = 113
AttrEnum_AttrCallDataValue AttrEnum = 114
AttrEnum_AttrNoreturn AttrEnum = 115
AttrEnum_AttrAlignment AttrEnum = 116
AttrEnum_AttrExportSymbols AttrEnum = 117
AttrEnum_AttrDeleted AttrEnum = 118
AttrEnum_AttrDefaulted AttrEnum = 119
AttrEnum_AttrLoclistsBase AttrEnum = 120
)
// Enum value maps for AttrEnum.
var (
AttrEnum_name = map[int32]string{
0: "AttrSibling",
1: "AttrLocation",
2: "AttrName",
3: "AttrOrdering",
4: "AttrByteSize",
5: "AttrBitOffset",
6: "AttrBitSize",
7: "AttrStmtList",
8: "AttrLowpc",
9: "AttrHighpc",
10: "AttrLanguage",
11: "AttrDiscr",
12: "AttrDiscrValue",
13: "AttrVisibility",
14: "AttrImport",
15: "AttrStringLength",
16: "AttrCommonRef",
17: "AttrCompDir",
18: "AttrConstValue",
19: "AttrContainingType",
20: "AttrDefaultValue",
21: "AttrInline",
22: "AttrIsOptional",
23: "AttrLowerBound",
24: "AttrProducer",
25: "AttrPrototyped",
26: "AttrReturnAddr",
27: "AttrStartScope",
28: "AttrStrideSize",
29: "AttrUpperBound",
30: "AttrAbstractOrigin",
31: "AttrAccessibility",
32: "AttrAddrClass",
33: "AttrArtificial",
34: "AttrBaseTypes",
35: "AttrCalling",
36: "AttrCount",
37: "AttrDataMemberLoc",
38: "AttrDeclColumn",
39: "AttrDeclFile",
40: "AttrDeclLine",
41: "AttrDeclaration",
42: "AttrDiscrList",
43: "AttrEncoding",
44: "AttrExternal",
45: "AttrFrameBase",
46: "AttrFriend",
47: "AttrIdentifierCase",
48: "AttrMacroInfo",
49: "AttrNamelistItem",
50: "AttrPriority",
51: "AttrSegment",
52: "AttrSpecification",
53: "AttrStaticLink",
54: "AttrType",
55: "AttrUseLocation",
56: "AttrVarParam",
57: "AttrVirtuality",
58: "AttrVtableElemLoc",
59: "AttrAllocated",
60: "AttrAssociated",
61: "AttrDataLocation",
62: "AttrStride",
63: "AttrEntrypc",
64: "AttrUseUTF8",
65: "AttrExtension",
66: "AttrRanges",
67: "AttrTrampoline",
68: "AttrCallColumn",
69: "AttrCallFile",
70: "AttrCallLine",
71: "AttrDescription",
72: "AttrBinaryScale",
73: "AttrDecimalScale",
74: "AttrSmall",
75: "AttrDecimalSign",
76: "AttrDigitCount",
77: "AttrPictureString",
78: "AttrMutable",
79: "AttrThreadsScaled",
80: "AttrExplicit",
81: "AttrObjectPointer",
82: "AttrEndianity",
83: "AttrElemental",
84: "AttrPure",
85: "AttrRecursive",
86: "AttrSignature",
87: "AttrMainSubprogram",
88: "AttrDataBitOffset",
89: "AttrConstExpr",
90: "AttrEnumClass",
91: "AttrLinkageName",
92: "AttrStringLengthBitSize",
93: "AttrStringLengthByteSize",
94: "AttrRank",
95: "AttrStrOffsetsBase",
96: "AttrAddrBase",
97: "AttrRnglistsBase",
98: "AttrDwoName",
99: "AttrReference",
100: "AttrRvalueReference",
101: "AttrMacros",
102: "AttrCallAllCalls",
103: "AttrCallAllSourceCalls",
104: "AttrCallAllTailCalls",
105: "AttrCallReturnPC",
106: "AttrCallValue",
107: "AttrCallOrigin",
108: "AttrCallParameter",
109: "AttrCallPC",
110: "AttrCallTailCall",
111: "AttrCallTarget",
112: "AttrCallTargetClobbered",
113: "AttrCallDataLocation",
114: "AttrCallDataValue",
115: "AttrNoreturn",
116: "AttrAlignment",
117: "AttrExportSymbols",
118: "AttrDeleted",
119: "AttrDefaulted",
120: "AttrLoclistsBase",
}
AttrEnum_value = map[string]int32{
"AttrSibling": 0,
"AttrLocation": 1,
"AttrName": 2,
"AttrOrdering": 3,
"AttrByteSize": 4,
"AttrBitOffset": 5,
"AttrBitSize": 6,
"AttrStmtList": 7,
"AttrLowpc": 8,
"AttrHighpc": 9,
"AttrLanguage": 10,
"AttrDiscr": 11,
"AttrDiscrValue": 12,
"AttrVisibility": 13,
"AttrImport": 14,
"AttrStringLength": 15,
"AttrCommonRef": 16,
"AttrCompDir": 17,
"AttrConstValue": 18,
"AttrContainingType": 19,
"AttrDefaultValue": 20,
"AttrInline": 21,
"AttrIsOptional": 22,
"AttrLowerBound": 23,
"AttrProducer": 24,
"AttrPrototyped": 25,
"AttrReturnAddr": 26,
"AttrStartScope": 27,
"AttrStrideSize": 28,
"AttrUpperBound": 29,
"AttrAbstractOrigin": 30,
"AttrAccessibility": 31,
"AttrAddrClass": 32,
"AttrArtificial": 33,
"AttrBaseTypes": 34,
"AttrCalling": 35,
"AttrCount": 36,
"AttrDataMemberLoc": 37,
"AttrDeclColumn": 38,
"AttrDeclFile": 39,
"AttrDeclLine": 40,
"AttrDeclaration": 41,
"AttrDiscrList": 42,
"AttrEncoding": 43,
"AttrExternal": 44,
"AttrFrameBase": 45,
"AttrFriend": 46,
"AttrIdentifierCase": 47,
"AttrMacroInfo": 48,
"AttrNamelistItem": 49,
"AttrPriority": 50,
"AttrSegment": 51,
"AttrSpecification": 52,
"AttrStaticLink": 53,
"AttrType": 54,
"AttrUseLocation": 55,
"AttrVarParam": 56,
"AttrVirtuality": 57,
"AttrVtableElemLoc": 58,
"AttrAllocated": 59,
"AttrAssociated": 60,
"AttrDataLocation": 61,
"AttrStride": 62,
"AttrEntrypc": 63,
"AttrUseUTF8": 64,
"AttrExtension": 65,
"AttrRanges": 66,
"AttrTrampoline": 67,
"AttrCallColumn": 68,
"AttrCallFile": 69,
"AttrCallLine": 70,
"AttrDescription": 71,
"AttrBinaryScale": 72,
"AttrDecimalScale": 73,
"AttrSmall": 74,
"AttrDecimalSign": 75,
"AttrDigitCount": 76,
"AttrPictureString": 77,
"AttrMutable": 78,
"AttrThreadsScaled": 79,
"AttrExplicit": 80,
"AttrObjectPointer": 81,
"AttrEndianity": 82,
"AttrElemental": 83,
"AttrPure": 84,
"AttrRecursive": 85,
"AttrSignature": 86,
"AttrMainSubprogram": 87,
"AttrDataBitOffset": 88,
"AttrConstExpr": 89,
"AttrEnumClass": 90,
"AttrLinkageName": 91,
"AttrStringLengthBitSize": 92,
"AttrStringLengthByteSize": 93,
"AttrRank": 94,
"AttrStrOffsetsBase": 95,
"AttrAddrBase": 96,
"AttrRnglistsBase": 97,
"AttrDwoName": 98,
"AttrReference": 99,
"AttrRvalueReference": 100,
"AttrMacros": 101,
"AttrCallAllCalls": 102,
"AttrCallAllSourceCalls": 103,
"AttrCallAllTailCalls": 104,
"AttrCallReturnPC": 105,
"AttrCallValue": 106,
"AttrCallOrigin": 107,
"AttrCallParameter": 108,
"AttrCallPC": 109,
"AttrCallTailCall": 110,
"AttrCallTarget": 111,
"AttrCallTargetClobbered": 112,
"AttrCallDataLocation": 113,
"AttrCallDataValue": 114,
"AttrNoreturn": 115,
"AttrAlignment": 116,
"AttrExportSymbols": 117,
"AttrDeleted": 118,
"AttrDefaulted": 119,
"AttrLoclistsBase": 120,
}
)
func (x AttrEnum) Enum() *AttrEnum {
p := new(AttrEnum)
*p = x
return p
}
func (x AttrEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (AttrEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[1].Descriptor()
}
func (AttrEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[1]
}
func (x AttrEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use AttrEnum.Descriptor instead.
func (AttrEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type TagEnum int32
const (
TagEnum_TagArrayType TagEnum = 0
TagEnum_TagClassType TagEnum = 1
TagEnum_TagEntryPoint TagEnum = 2
TagEnum_TagEnumerationType TagEnum = 3
TagEnum_TagFormalParameter TagEnum = 4
TagEnum_TagImportedDeclaration TagEnum = 5
TagEnum_TagLabel TagEnum = 6
TagEnum_TagLexDwarfBlock TagEnum = 7
TagEnum_TagMember TagEnum = 8
TagEnum_TagPointerType TagEnum = 9
TagEnum_TagReferenceType TagEnum = 10
TagEnum_TagCompileUnit TagEnum = 11
TagEnum_TagStringType TagEnum = 12
TagEnum_TagStructType TagEnum = 13
TagEnum_TagSubroutineType TagEnum = 14
TagEnum_TagTypedef TagEnum = 15
TagEnum_TagUnionType TagEnum = 16
TagEnum_TagUnspecifiedParameters TagEnum = 17
TagEnum_TagVariant TagEnum = 18
TagEnum_TagCommonDwarfBlock TagEnum = 19
TagEnum_TagCommonInclusion TagEnum = 20
TagEnum_TagInheritance TagEnum = 21
TagEnum_TagInlinedSubroutine TagEnum = 22
TagEnum_TagModule TagEnum = 23
TagEnum_TagPtrToMemberType TagEnum = 24
TagEnum_TagSetType TagEnum = 25
TagEnum_TagSubrangeType TagEnum = 26
TagEnum_TagWithStmt TagEnum = 27
TagEnum_TagAccessDeclaration TagEnum = 28
TagEnum_TagBaseType TagEnum = 29
TagEnum_TagCatchDwarfBlock TagEnum = 30
TagEnum_TagConstType TagEnum = 31
TagEnum_TagConstant TagEnum = 32
TagEnum_TagEnumerator TagEnum = 33
TagEnum_TagFileType TagEnum = 34
TagEnum_TagFriend TagEnum = 35
TagEnum_TagNamelist TagEnum = 36
TagEnum_TagNamelistItem TagEnum = 37
TagEnum_TagPackedType TagEnum = 38
TagEnum_TagSubprogram TagEnum = 39
TagEnum_TagTemplateTypeParameter TagEnum = 40
TagEnum_TagTemplateValueParameter TagEnum = 41
TagEnum_TagThrownType TagEnum = 42
TagEnum_TagTryDwarfBlock TagEnum = 43
TagEnum_TagVariantPart TagEnum = 44
TagEnum_TagVariable TagEnum = 45
TagEnum_TagVolatileType TagEnum = 46
TagEnum_TagDwarfProcedure TagEnum = 47
TagEnum_TagRestrictType TagEnum = 48
TagEnum_TagInterfaceType TagEnum = 49
TagEnum_TagNamespace TagEnum = 50
TagEnum_TagImportedModule TagEnum = 51
TagEnum_TagUnspecifiedType TagEnum = 52
TagEnum_TagPartialUnit TagEnum = 53
TagEnum_TagImportedUnit TagEnum = 54
TagEnum_TagMutableType TagEnum = 55
TagEnum_TagCondition TagEnum = 56
TagEnum_TagSharedType TagEnum = 57
TagEnum_TagTypeUnit TagEnum = 58
TagEnum_TagRvalueReferenceType TagEnum = 59
TagEnum_TagTemplateAlias TagEnum = 60
TagEnum_TagCoarrayType TagEnum = 61
TagEnum_TagGenericSubrange TagEnum = 62
TagEnum_TagDynamicType TagEnum = 63
TagEnum_TagAtomicType TagEnum = 64
TagEnum_TagCallSite TagEnum = 65
TagEnum_TagCallSiteParameter TagEnum = 66
TagEnum_TagSkeletonUnit TagEnum = 67
TagEnum_TagImmutableType TagEnum = 68
)
// Enum value maps for TagEnum.
var (
TagEnum_name = map[int32]string{
0: "TagArrayType",
1: "TagClassType",
2: "TagEntryPoint",
3: "TagEnumerationType",
4: "TagFormalParameter",
5: "TagImportedDeclaration",
6: "TagLabel",
7: "TagLexDwarfBlock",
8: "TagMember",
9: "TagPointerType",
10: "TagReferenceType",
11: "TagCompileUnit",
12: "TagStringType",
13: "TagStructType",
14: "TagSubroutineType",
15: "TagTypedef",
16: "TagUnionType",
17: "TagUnspecifiedParameters",
18: "TagVariant",
19: "TagCommonDwarfBlock",
20: "TagCommonInclusion",
21: "TagInheritance",
22: "TagInlinedSubroutine",
23: "TagModule",
24: "TagPtrToMemberType",
25: "TagSetType",
26: "TagSubrangeType",
27: "TagWithStmt",
28: "TagAccessDeclaration",
29: "TagBaseType",
30: "TagCatchDwarfBlock",
31: "TagConstType",
32: "TagConstant",
33: "TagEnumerator",
34: "TagFileType",
35: "TagFriend",
36: "TagNamelist",
37: "TagNamelistItem",
38: "TagPackedType",
39: "TagSubprogram",
40: "TagTemplateTypeParameter",
41: "TagTemplateValueParameter",
42: "TagThrownType",
43: "TagTryDwarfBlock",
44: "TagVariantPart",
45: "TagVariable",
46: "TagVolatileType",
47: "TagDwarfProcedure",
48: "TagRestrictType",
49: "TagInterfaceType",
50: "TagNamespace",
51: "TagImportedModule",
52: "TagUnspecifiedType",
53: "TagPartialUnit",
54: "TagImportedUnit",
55: "TagMutableType",
56: "TagCondition",
57: "TagSharedType",
58: "TagTypeUnit",
59: "TagRvalueReferenceType",
60: "TagTemplateAlias",
61: "TagCoarrayType",
62: "TagGenericSubrange",
63: "TagDynamicType",
64: "TagAtomicType",
65: "TagCallSite",
66: "TagCallSiteParameter",
67: "TagSkeletonUnit",
68: "TagImmutableType",
}
TagEnum_value = map[string]int32{
"TagArrayType": 0,
"TagClassType": 1,
"TagEntryPoint": 2,
"TagEnumerationType": 3,
"TagFormalParameter": 4,
"TagImportedDeclaration": 5,
"TagLabel": 6,
"TagLexDwarfBlock": 7,
"TagMember": 8,
"TagPointerType": 9,
"TagReferenceType": 10,
"TagCompileUnit": 11,
"TagStringType": 12,
"TagStructType": 13,
"TagSubroutineType": 14,
"TagTypedef": 15,
"TagUnionType": 16,
"TagUnspecifiedParameters": 17,
"TagVariant": 18,
"TagCommonDwarfBlock": 19,
"TagCommonInclusion": 20,
"TagInheritance": 21,
"TagInlinedSubroutine": 22,
"TagModule": 23,
"TagPtrToMemberType": 24,
"TagSetType": 25,
"TagSubrangeType": 26,
"TagWithStmt": 27,
"TagAccessDeclaration": 28,
"TagBaseType": 29,
"TagCatchDwarfBlock": 30,
"TagConstType": 31,
"TagConstant": 32,
"TagEnumerator": 33,
"TagFileType": 34,
"TagFriend": 35,
"TagNamelist": 36,
"TagNamelistItem": 37,
"TagPackedType": 38,
"TagSubprogram": 39,
"TagTemplateTypeParameter": 40,
"TagTemplateValueParameter": 41,
"TagThrownType": 42,
"TagTryDwarfBlock": 43,
"TagVariantPart": 44,
"TagVariable": 45,
"TagVolatileType": 46,
"TagDwarfProcedure": 47,
"TagRestrictType": 48,
"TagInterfaceType": 49,
"TagNamespace": 50,
"TagImportedModule": 51,
"TagUnspecifiedType": 52,
"TagPartialUnit": 53,
"TagImportedUnit": 54,
"TagMutableType": 55,
"TagCondition": 56,
"TagSharedType": 57,
"TagTypeUnit": 58,
"TagRvalueReferenceType": 59,
"TagTemplateAlias": 60,
"TagCoarrayType": 61,
"TagGenericSubrange": 62,
"TagDynamicType": 63,
"TagAtomicType": 64,
"TagCallSite": 65,
"TagCallSiteParameter": 66,
"TagSkeletonUnit": 67,
"TagImmutableType": 68,
}
)
func (x TagEnum) Enum() *TagEnum {
p := new(TagEnum)
*p = x
return p
}
func (x TagEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (TagEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[2].Descriptor()
}
func (TagEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[2]
}
func (x TagEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use TagEnum.Descriptor instead.
func (TagEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type UnsupportedTypeStruct struct {
state protoimpl.MessageState `protogen:"open.v1"`
Tag TagEnum `protobuf:"varint,1,opt,name=Tag,proto3,enum=ngolofuzz.TagEnum" json:"Tag,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnsupportedTypeStruct) Reset() {
*x = UnsupportedTypeStruct{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnsupportedTypeStruct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnsupportedTypeStruct) ProtoMessage() {}
func (x *UnsupportedTypeStruct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnsupportedTypeStruct.ProtoReflect.Descriptor instead.
func (*UnsupportedTypeStruct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *UnsupportedTypeStruct) GetTag() TagEnum {
if x != nil {
return x.Tag
}
return TagEnum_TagArrayType
}
type AttrNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I AttrEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.AttrEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AttrNgdotStringArgs) Reset() {
*x = AttrNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AttrNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AttrNgdotStringArgs) ProtoMessage() {}
func (x *AttrNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AttrNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*AttrNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *AttrNgdotStringArgs) GetI() AttrEnum {
if x != nil {
return x.I
}
return AttrEnum_AttrSibling
}
type ClassNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I ClassEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.ClassEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClassNgdotStringArgs) Reset() {
*x = ClassNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClassNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClassNgdotStringArgs) ProtoMessage() {}
func (x *ClassNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClassNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*ClassNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *ClassNgdotStringArgs) GetI() ClassEnum {
if x != nil {
return x.I
}
return ClassEnum_ClassUnknown
}
type AttrNgdotGoStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
A AttrEnum `protobuf:"varint,1,opt,name=a,proto3,enum=ngolofuzz.AttrEnum" json:"a,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AttrNgdotGoStringArgs) Reset() {
*x = AttrNgdotGoStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AttrNgdotGoStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AttrNgdotGoStringArgs) ProtoMessage() {}
func (x *AttrNgdotGoStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AttrNgdotGoStringArgs.ProtoReflect.Descriptor instead.
func (*AttrNgdotGoStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *AttrNgdotGoStringArgs) GetA() AttrEnum {
if x != nil {
return x.A
}
return AttrEnum_AttrSibling
}
type TagNgdotGoStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
T TagEnum `protobuf:"varint,1,opt,name=t,proto3,enum=ngolofuzz.TagEnum" json:"t,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TagNgdotGoStringArgs) Reset() {
*x = TagNgdotGoStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TagNgdotGoStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TagNgdotGoStringArgs) ProtoMessage() {}
func (x *TagNgdotGoStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TagNgdotGoStringArgs.ProtoReflect.Descriptor instead.
func (*TagNgdotGoStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *TagNgdotGoStringArgs) GetT() TagEnum {
if x != nil {
return x.T
}
return TagEnum_TagArrayType
}
type ClassNgdotGoStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I ClassEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.ClassEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClassNgdotGoStringArgs) Reset() {
*x = ClassNgdotGoStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClassNgdotGoStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClassNgdotGoStringArgs) ProtoMessage() {}
func (x *ClassNgdotGoStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClassNgdotGoStringArgs.ProtoReflect.Descriptor instead.
func (*ClassNgdotGoStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *ClassNgdotGoStringArgs) GetI() ClassEnum {
if x != nil {
return x.I
}
return ClassEnum_ClassUnknown
}
type EntryNgdotValArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
A AttrEnum `protobuf:"varint,1,opt,name=a,proto3,enum=ngolofuzz.AttrEnum" json:"a,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EntryNgdotValArgs) Reset() {
*x = EntryNgdotValArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EntryNgdotValArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EntryNgdotValArgs) ProtoMessage() {}
func (x *EntryNgdotValArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EntryNgdotValArgs.ProtoReflect.Descriptor instead.
func (*EntryNgdotValArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *EntryNgdotValArgs) GetA() AttrEnum {
if x != nil {
return x.A
}
return AttrEnum_AttrSibling
}
type EntryNgdotAttrFieldArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
A AttrEnum `protobuf:"varint,1,opt,name=a,proto3,enum=ngolofuzz.AttrEnum" json:"a,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EntryNgdotAttrFieldArgs) Reset() {
*x = EntryNgdotAttrFieldArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EntryNgdotAttrFieldArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EntryNgdotAttrFieldArgs) ProtoMessage() {}
func (x *EntryNgdotAttrFieldArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EntryNgdotAttrFieldArgs.ProtoReflect.Descriptor instead.
func (*EntryNgdotAttrFieldArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *EntryNgdotAttrFieldArgs) GetA() AttrEnum {
if x != nil {
return x.A
}
return AttrEnum_AttrSibling
}
type DataNgdotReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DataNgdotReaderArgs) Reset() {
*x = DataNgdotReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DataNgdotReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DataNgdotReaderArgs) ProtoMessage() {}
func (x *DataNgdotReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DataNgdotReaderArgs.ProtoReflect.Descriptor instead.
func (*DataNgdotReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type ReaderNgdotAddressSizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotAddressSizeArgs) Reset() {
*x = ReaderNgdotAddressSizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotAddressSizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotAddressSizeArgs) ProtoMessage() {}
func (x *ReaderNgdotAddressSizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotAddressSizeArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotAddressSizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type ReaderNgdotByteOrderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotByteOrderArgs) Reset() {
*x = ReaderNgdotByteOrderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotByteOrderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotByteOrderArgs) ProtoMessage() {}
func (x *ReaderNgdotByteOrderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotByteOrderArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotByteOrderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
type ReaderNgdotNextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotNextArgs) Reset() {
*x = ReaderNgdotNextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotNextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotNextArgs) ProtoMessage() {}
func (x *ReaderNgdotNextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotNextArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotNextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type ReaderNgdotSkipChildrenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotSkipChildrenArgs) Reset() {
*x = ReaderNgdotSkipChildrenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotSkipChildrenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotSkipChildrenArgs) ProtoMessage() {}
func (x *ReaderNgdotSkipChildrenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotSkipChildrenArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotSkipChildrenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type ReaderNgdotSeekPCArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Pc uint64 `protobuf:"varint,1,opt,name=pc,proto3" json:"pc,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotSeekPCArgs) Reset() {
*x = ReaderNgdotSeekPCArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotSeekPCArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotSeekPCArgs) ProtoMessage() {}
func (x *ReaderNgdotSeekPCArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotSeekPCArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotSeekPCArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *ReaderNgdotSeekPCArgs) GetPc() uint64 {
if x != nil {
return x.Pc
}
return 0
}
type DataNgdotRangesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DataNgdotRangesArgs) Reset() {
*x = DataNgdotRangesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DataNgdotRangesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DataNgdotRangesArgs) ProtoMessage() {}
func (x *DataNgdotRangesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DataNgdotRangesArgs.ProtoReflect.Descriptor instead.
func (*DataNgdotRangesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type DataNgdotLineReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DataNgdotLineReaderArgs) Reset() {
*x = DataNgdotLineReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DataNgdotLineReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DataNgdotLineReaderArgs) ProtoMessage() {}
func (x *DataNgdotLineReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DataNgdotLineReaderArgs.ProtoReflect.Descriptor instead.
func (*DataNgdotLineReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
type LineReaderNgdotTellArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LineReaderNgdotTellArgs) Reset() {
*x = LineReaderNgdotTellArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LineReaderNgdotTellArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LineReaderNgdotTellArgs) ProtoMessage() {}
func (x *LineReaderNgdotTellArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LineReaderNgdotTellArgs.ProtoReflect.Descriptor instead.
func (*LineReaderNgdotTellArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
type LineReaderNgdotSeekArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LineReaderNgdotSeekArgs) Reset() {
*x = LineReaderNgdotSeekArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LineReaderNgdotSeekArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LineReaderNgdotSeekArgs) ProtoMessage() {}
func (x *LineReaderNgdotSeekArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LineReaderNgdotSeekArgs.ProtoReflect.Descriptor instead.
func (*LineReaderNgdotSeekArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
type LineReaderNgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LineReaderNgdotResetArgs) Reset() {
*x = LineReaderNgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LineReaderNgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LineReaderNgdotResetArgs) ProtoMessage() {}
func (x *LineReaderNgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LineReaderNgdotResetArgs.ProtoReflect.Descriptor instead.
func (*LineReaderNgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
type LineReaderNgdotFilesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LineReaderNgdotFilesArgs) Reset() {
*x = LineReaderNgdotFilesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LineReaderNgdotFilesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LineReaderNgdotFilesArgs) ProtoMessage() {}
func (x *LineReaderNgdotFilesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LineReaderNgdotFilesArgs.ProtoReflect.Descriptor instead.
func (*LineReaderNgdotFilesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Abbrev []byte `protobuf:"bytes,1,opt,name=abbrev,proto3" json:"abbrev,omitempty"`
Aranges []byte `protobuf:"bytes,2,opt,name=aranges,proto3" json:"aranges,omitempty"`
Frame []byte `protobuf:"bytes,3,opt,name=frame,proto3" json:"frame,omitempty"`
Info []byte `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"`
Line []byte `protobuf:"bytes,5,opt,name=line,proto3" json:"line,omitempty"`
Pubnames []byte `protobuf:"bytes,6,opt,name=pubnames,proto3" json:"pubnames,omitempty"`
Ranges []byte `protobuf:"bytes,7,opt,name=ranges,proto3" json:"ranges,omitempty"`
Str []byte `protobuf:"bytes,8,opt,name=str,proto3" json:"str,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *NewArgs) GetAbbrev() []byte {
if x != nil {
return x.Abbrev
}
return nil
}
func (x *NewArgs) GetAranges() []byte {
if x != nil {
return x.Aranges
}
return nil
}
func (x *NewArgs) GetFrame() []byte {
if x != nil {
return x.Frame
}
return nil
}
func (x *NewArgs) GetInfo() []byte {
if x != nil {
return x.Info
}
return nil
}
func (x *NewArgs) GetLine() []byte {
if x != nil {
return x.Line
}
return nil
}
func (x *NewArgs) GetPubnames() []byte {
if x != nil {
return x.Pubnames
}
return nil
}
func (x *NewArgs) GetRanges() []byte {
if x != nil {
return x.Ranges
}
return nil
}
func (x *NewArgs) GetStr() []byte {
if x != nil {
return x.Str
}
return nil
}
type DataNgdotAddTypesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Types []byte `protobuf:"bytes,2,opt,name=types,proto3" json:"types,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DataNgdotAddTypesArgs) Reset() {
*x = DataNgdotAddTypesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DataNgdotAddTypesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DataNgdotAddTypesArgs) ProtoMessage() {}
func (x *DataNgdotAddTypesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DataNgdotAddTypesArgs.ProtoReflect.Descriptor instead.
func (*DataNgdotAddTypesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
func (x *DataNgdotAddTypesArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *DataNgdotAddTypesArgs) GetTypes() []byte {
if x != nil {
return x.Types
}
return nil
}
type DataNgdotAddSectionArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Contents []byte `protobuf:"bytes,2,opt,name=contents,proto3" json:"contents,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DataNgdotAddSectionArgs) Reset() {
*x = DataNgdotAddSectionArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DataNgdotAddSectionArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DataNgdotAddSectionArgs) ProtoMessage() {}
func (x *DataNgdotAddSectionArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DataNgdotAddSectionArgs.ProtoReflect.Descriptor instead.
func (*DataNgdotAddSectionArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
func (x *DataNgdotAddSectionArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *DataNgdotAddSectionArgs) GetContents() []byte {
if x != nil {
return x.Contents
}
return nil
}
type TagNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I TagEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.TagEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TagNgdotStringArgs) Reset() {
*x = TagNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TagNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TagNgdotStringArgs) ProtoMessage() {}
func (x *TagNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TagNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*TagNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
func (x *TagNgdotStringArgs) GetI() TagEnum {
if x != nil {
return x.I
}
return TagEnum_TagArrayType
}
type CommonTypeNgdotCommonArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CommonTypeNgdotCommonArgs) Reset() {
*x = CommonTypeNgdotCommonArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CommonTypeNgdotCommonArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CommonTypeNgdotCommonArgs) ProtoMessage() {}
func (x *CommonTypeNgdotCommonArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CommonTypeNgdotCommonArgs.ProtoReflect.Descriptor instead.
func (*CommonTypeNgdotCommonArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
type CommonTypeNgdotSizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CommonTypeNgdotSizeArgs) Reset() {
*x = CommonTypeNgdotSizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CommonTypeNgdotSizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CommonTypeNgdotSizeArgs) ProtoMessage() {}
func (x *CommonTypeNgdotSizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CommonTypeNgdotSizeArgs.ProtoReflect.Descriptor instead.
func (*CommonTypeNgdotSizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
type BasicTypeNgdotBasicArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BasicTypeNgdotBasicArgs) Reset() {
*x = BasicTypeNgdotBasicArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BasicTypeNgdotBasicArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BasicTypeNgdotBasicArgs) ProtoMessage() {}
func (x *BasicTypeNgdotBasicArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BasicTypeNgdotBasicArgs.ProtoReflect.Descriptor instead.
func (*BasicTypeNgdotBasicArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
type BasicTypeNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BasicTypeNgdotStringArgs) Reset() {
*x = BasicTypeNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BasicTypeNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BasicTypeNgdotStringArgs) ProtoMessage() {}
func (x *BasicTypeNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BasicTypeNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*BasicTypeNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
type UnsupportedTypeNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
T *UnsupportedTypeStruct `protobuf:"bytes,1,opt,name=t,proto3" json:"t,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnsupportedTypeNgdotStringArgs) Reset() {
*x = UnsupportedTypeNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnsupportedTypeNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnsupportedTypeNgdotStringArgs) ProtoMessage() {}
func (x *UnsupportedTypeNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnsupportedTypeNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*UnsupportedTypeNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
func (x *UnsupportedTypeNgdotStringArgs) GetT() *UnsupportedTypeStruct {
if x != nil {
return x.T
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_AttrNgdotString
// *NgoloFuzzOne_ClassNgdotString
// *NgoloFuzzOne_AttrNgdotGoString
// *NgoloFuzzOne_TagNgdotGoString
// *NgoloFuzzOne_ClassNgdotGoString
// *NgoloFuzzOne_EntryNgdotVal
// *NgoloFuzzOne_EntryNgdotAttrField
// *NgoloFuzzOne_DataNgdotReader
// *NgoloFuzzOne_ReaderNgdotAddressSize
// *NgoloFuzzOne_ReaderNgdotByteOrder
// *NgoloFuzzOne_ReaderNgdotNext
// *NgoloFuzzOne_ReaderNgdotSkipChildren
// *NgoloFuzzOne_ReaderNgdotSeekPC
// *NgoloFuzzOne_DataNgdotRanges
// *NgoloFuzzOne_DataNgdotLineReader
// *NgoloFuzzOne_LineReaderNgdotTell
// *NgoloFuzzOne_LineReaderNgdotSeek
// *NgoloFuzzOne_LineReaderNgdotReset
// *NgoloFuzzOne_LineReaderNgdotFiles
// *NgoloFuzzOne_New
// *NgoloFuzzOne_DataNgdotAddTypes
// *NgoloFuzzOne_DataNgdotAddSection
// *NgoloFuzzOne_TagNgdotString
// *NgoloFuzzOne_CommonTypeNgdotCommon
// *NgoloFuzzOne_CommonTypeNgdotSize
// *NgoloFuzzOne_BasicTypeNgdotBasic
// *NgoloFuzzOne_BasicTypeNgdotString
// *NgoloFuzzOne_UnsupportedTypeNgdotString
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetAttrNgdotString() *AttrNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AttrNgdotString); ok {
return x.AttrNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetClassNgdotString() *ClassNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClassNgdotString); ok {
return x.ClassNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetAttrNgdotGoString() *AttrNgdotGoStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AttrNgdotGoString); ok {
return x.AttrNgdotGoString
}
}
return nil
}
func (x *NgoloFuzzOne) GetTagNgdotGoString() *TagNgdotGoStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TagNgdotGoString); ok {
return x.TagNgdotGoString
}
}
return nil
}
func (x *NgoloFuzzOne) GetClassNgdotGoString() *ClassNgdotGoStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClassNgdotGoString); ok {
return x.ClassNgdotGoString
}
}
return nil
}
func (x *NgoloFuzzOne) GetEntryNgdotVal() *EntryNgdotValArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EntryNgdotVal); ok {
return x.EntryNgdotVal
}
}
return nil
}
func (x *NgoloFuzzOne) GetEntryNgdotAttrField() *EntryNgdotAttrFieldArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EntryNgdotAttrField); ok {
return x.EntryNgdotAttrField
}
}
return nil
}
func (x *NgoloFuzzOne) GetDataNgdotReader() *DataNgdotReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DataNgdotReader); ok {
return x.DataNgdotReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotAddressSize() *ReaderNgdotAddressSizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotAddressSize); ok {
return x.ReaderNgdotAddressSize
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotByteOrder() *ReaderNgdotByteOrderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotByteOrder); ok {
return x.ReaderNgdotByteOrder
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotNext() *ReaderNgdotNextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotNext); ok {
return x.ReaderNgdotNext
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotSkipChildren() *ReaderNgdotSkipChildrenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotSkipChildren); ok {
return x.ReaderNgdotSkipChildren
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotSeekPC() *ReaderNgdotSeekPCArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotSeekPC); ok {
return x.ReaderNgdotSeekPC
}
}
return nil
}
func (x *NgoloFuzzOne) GetDataNgdotRanges() *DataNgdotRangesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DataNgdotRanges); ok {
return x.DataNgdotRanges
}
}
return nil
}
func (x *NgoloFuzzOne) GetDataNgdotLineReader() *DataNgdotLineReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DataNgdotLineReader); ok {
return x.DataNgdotLineReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetLineReaderNgdotTell() *LineReaderNgdotTellArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LineReaderNgdotTell); ok {
return x.LineReaderNgdotTell
}
}
return nil
}
func (x *NgoloFuzzOne) GetLineReaderNgdotSeek() *LineReaderNgdotSeekArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LineReaderNgdotSeek); ok {
return x.LineReaderNgdotSeek
}
}
return nil
}
func (x *NgoloFuzzOne) GetLineReaderNgdotReset() *LineReaderNgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LineReaderNgdotReset); ok {
return x.LineReaderNgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetLineReaderNgdotFiles() *LineReaderNgdotFilesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LineReaderNgdotFiles); ok {
return x.LineReaderNgdotFiles
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetDataNgdotAddTypes() *DataNgdotAddTypesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DataNgdotAddTypes); ok {
return x.DataNgdotAddTypes
}
}
return nil
}
func (x *NgoloFuzzOne) GetDataNgdotAddSection() *DataNgdotAddSectionArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DataNgdotAddSection); ok {
return x.DataNgdotAddSection
}
}
return nil
}
func (x *NgoloFuzzOne) GetTagNgdotString() *TagNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TagNgdotString); ok {
return x.TagNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetCommonTypeNgdotCommon() *CommonTypeNgdotCommonArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CommonTypeNgdotCommon); ok {
return x.CommonTypeNgdotCommon
}
}
return nil
}
func (x *NgoloFuzzOne) GetCommonTypeNgdotSize() *CommonTypeNgdotSizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CommonTypeNgdotSize); ok {
return x.CommonTypeNgdotSize
}
}
return nil
}
func (x *NgoloFuzzOne) GetBasicTypeNgdotBasic() *BasicTypeNgdotBasicArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_BasicTypeNgdotBasic); ok {
return x.BasicTypeNgdotBasic
}
}
return nil
}
func (x *NgoloFuzzOne) GetBasicTypeNgdotString() *BasicTypeNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_BasicTypeNgdotString); ok {
return x.BasicTypeNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetUnsupportedTypeNgdotString() *UnsupportedTypeNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UnsupportedTypeNgdotString); ok {
return x.UnsupportedTypeNgdotString
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_AttrNgdotString struct {
AttrNgdotString *AttrNgdotStringArgs `protobuf:"bytes,1,opt,name=AttrNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_ClassNgdotString struct {
ClassNgdotString *ClassNgdotStringArgs `protobuf:"bytes,2,opt,name=ClassNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_AttrNgdotGoString struct {
AttrNgdotGoString *AttrNgdotGoStringArgs `protobuf:"bytes,3,opt,name=AttrNgdotGoString,proto3,oneof"`
}
type NgoloFuzzOne_TagNgdotGoString struct {
TagNgdotGoString *TagNgdotGoStringArgs `protobuf:"bytes,4,opt,name=TagNgdotGoString,proto3,oneof"`
}
type NgoloFuzzOne_ClassNgdotGoString struct {
ClassNgdotGoString *ClassNgdotGoStringArgs `protobuf:"bytes,5,opt,name=ClassNgdotGoString,proto3,oneof"`
}
type NgoloFuzzOne_EntryNgdotVal struct {
EntryNgdotVal *EntryNgdotValArgs `protobuf:"bytes,6,opt,name=EntryNgdotVal,proto3,oneof"`
}
type NgoloFuzzOne_EntryNgdotAttrField struct {
EntryNgdotAttrField *EntryNgdotAttrFieldArgs `protobuf:"bytes,7,opt,name=EntryNgdotAttrField,proto3,oneof"`
}
type NgoloFuzzOne_DataNgdotReader struct {
DataNgdotReader *DataNgdotReaderArgs `protobuf:"bytes,8,opt,name=DataNgdotReader,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotAddressSize struct {
ReaderNgdotAddressSize *ReaderNgdotAddressSizeArgs `protobuf:"bytes,9,opt,name=ReaderNgdotAddressSize,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotByteOrder struct {
ReaderNgdotByteOrder *ReaderNgdotByteOrderArgs `protobuf:"bytes,10,opt,name=ReaderNgdotByteOrder,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotNext struct {
ReaderNgdotNext *ReaderNgdotNextArgs `protobuf:"bytes,11,opt,name=ReaderNgdotNext,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotSkipChildren struct {
ReaderNgdotSkipChildren *ReaderNgdotSkipChildrenArgs `protobuf:"bytes,12,opt,name=ReaderNgdotSkipChildren,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotSeekPC struct {
ReaderNgdotSeekPC *ReaderNgdotSeekPCArgs `protobuf:"bytes,13,opt,name=ReaderNgdotSeekPC,proto3,oneof"`
}
type NgoloFuzzOne_DataNgdotRanges struct {
DataNgdotRanges *DataNgdotRangesArgs `protobuf:"bytes,14,opt,name=DataNgdotRanges,proto3,oneof"`
}
type NgoloFuzzOne_DataNgdotLineReader struct {
DataNgdotLineReader *DataNgdotLineReaderArgs `protobuf:"bytes,15,opt,name=DataNgdotLineReader,proto3,oneof"`
}
type NgoloFuzzOne_LineReaderNgdotTell struct {
LineReaderNgdotTell *LineReaderNgdotTellArgs `protobuf:"bytes,16,opt,name=LineReaderNgdotTell,proto3,oneof"`
}
type NgoloFuzzOne_LineReaderNgdotSeek struct {
LineReaderNgdotSeek *LineReaderNgdotSeekArgs `protobuf:"bytes,17,opt,name=LineReaderNgdotSeek,proto3,oneof"`
}
type NgoloFuzzOne_LineReaderNgdotReset struct {
LineReaderNgdotReset *LineReaderNgdotResetArgs `protobuf:"bytes,18,opt,name=LineReaderNgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_LineReaderNgdotFiles struct {
LineReaderNgdotFiles *LineReaderNgdotFilesArgs `protobuf:"bytes,19,opt,name=LineReaderNgdotFiles,proto3,oneof"`
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,20,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_DataNgdotAddTypes struct {
DataNgdotAddTypes *DataNgdotAddTypesArgs `protobuf:"bytes,21,opt,name=DataNgdotAddTypes,proto3,oneof"`
}
type NgoloFuzzOne_DataNgdotAddSection struct {
DataNgdotAddSection *DataNgdotAddSectionArgs `protobuf:"bytes,22,opt,name=DataNgdotAddSection,proto3,oneof"`
}
type NgoloFuzzOne_TagNgdotString struct {
TagNgdotString *TagNgdotStringArgs `protobuf:"bytes,23,opt,name=TagNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_CommonTypeNgdotCommon struct {
CommonTypeNgdotCommon *CommonTypeNgdotCommonArgs `protobuf:"bytes,24,opt,name=CommonTypeNgdotCommon,proto3,oneof"`
}
type NgoloFuzzOne_CommonTypeNgdotSize struct {
CommonTypeNgdotSize *CommonTypeNgdotSizeArgs `protobuf:"bytes,25,opt,name=CommonTypeNgdotSize,proto3,oneof"`
}
type NgoloFuzzOne_BasicTypeNgdotBasic struct {
BasicTypeNgdotBasic *BasicTypeNgdotBasicArgs `protobuf:"bytes,26,opt,name=BasicTypeNgdotBasic,proto3,oneof"`
}
type NgoloFuzzOne_BasicTypeNgdotString struct {
BasicTypeNgdotString *BasicTypeNgdotStringArgs `protobuf:"bytes,27,opt,name=BasicTypeNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_UnsupportedTypeNgdotString struct {
UnsupportedTypeNgdotString *UnsupportedTypeNgdotStringArgs `protobuf:"bytes,28,opt,name=UnsupportedTypeNgdotString,proto3,oneof"`
}
func (*NgoloFuzzOne_AttrNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClassNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AttrNgdotGoString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TagNgdotGoString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClassNgdotGoString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EntryNgdotVal) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EntryNgdotAttrField) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DataNgdotReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotAddressSize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotByteOrder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotNext) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotSkipChildren) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotSeekPC) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DataNgdotRanges) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DataNgdotLineReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LineReaderNgdotTell) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LineReaderNgdotSeek) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LineReaderNgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LineReaderNgdotFiles) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DataNgdotAddTypes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DataNgdotAddSection) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TagNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CommonTypeNgdotCommon) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CommonTypeNgdotSize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_BasicTypeNgdotBasic) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_BasicTypeNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UnsupportedTypeNgdotString) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{30}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{31}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"=\n" +
"\x15UnsupportedTypeStruct\x12$\n" +
"\x03Tag\x18\x01 \x01(\x0e2\x12.ngolofuzz.TagEnumR\x03Tag\"8\n" +
"\x13AttrNgdotStringArgs\x12!\n" +
"\x01i\x18\x01 \x01(\x0e2\x13.ngolofuzz.AttrEnumR\x01i\":\n" +
"\x14ClassNgdotStringArgs\x12\"\n" +
"\x01i\x18\x01 \x01(\x0e2\x14.ngolofuzz.ClassEnumR\x01i\":\n" +
"\x15AttrNgdotGoStringArgs\x12!\n" +
"\x01a\x18\x01 \x01(\x0e2\x13.ngolofuzz.AttrEnumR\x01a\"8\n" +
"\x14TagNgdotGoStringArgs\x12 \n" +
"\x01t\x18\x01 \x01(\x0e2\x12.ngolofuzz.TagEnumR\x01t\"<\n" +
"\x16ClassNgdotGoStringArgs\x12\"\n" +
"\x01i\x18\x01 \x01(\x0e2\x14.ngolofuzz.ClassEnumR\x01i\"6\n" +
"\x11EntryNgdotValArgs\x12!\n" +
"\x01a\x18\x01 \x01(\x0e2\x13.ngolofuzz.AttrEnumR\x01a\"<\n" +
"\x17EntryNgdotAttrFieldArgs\x12!\n" +
"\x01a\x18\x01 \x01(\x0e2\x13.ngolofuzz.AttrEnumR\x01a\"\x15\n" +
"\x13DataNgdotReaderArgs\"\x1c\n" +
"\x1aReaderNgdotAddressSizeArgs\"\x1a\n" +
"\x18ReaderNgdotByteOrderArgs\"\x15\n" +
"\x13ReaderNgdotNextArgs\"\x1d\n" +
"\x1bReaderNgdotSkipChildrenArgs\"'\n" +
"\x15ReaderNgdotSeekPCArgs\x12\x0e\n" +
"\x02pc\x18\x01 \x01(\x04R\x02pc\"\x15\n" +
"\x13DataNgdotRangesArgs\"\x19\n" +
"\x17DataNgdotLineReaderArgs\"\x19\n" +
"\x17LineReaderNgdotTellArgs\"\x19\n" +
"\x17LineReaderNgdotSeekArgs\"\x1a\n" +
"\x18LineReaderNgdotResetArgs\"\x1a\n" +
"\x18LineReaderNgdotFilesArgs\"\xbf\x01\n" +
"\aNewArgs\x12\x16\n" +
"\x06abbrev\x18\x01 \x01(\fR\x06abbrev\x12\x18\n" +
"\aaranges\x18\x02 \x01(\fR\aaranges\x12\x14\n" +
"\x05frame\x18\x03 \x01(\fR\x05frame\x12\x12\n" +
"\x04info\x18\x04 \x01(\fR\x04info\x12\x12\n" +
"\x04line\x18\x05 \x01(\fR\x04line\x12\x1a\n" +
"\bpubnames\x18\x06 \x01(\fR\bpubnames\x12\x16\n" +
"\x06ranges\x18\a \x01(\fR\x06ranges\x12\x10\n" +
"\x03str\x18\b \x01(\fR\x03str\"A\n" +
"\x15DataNgdotAddTypesArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x14\n" +
"\x05types\x18\x02 \x01(\fR\x05types\"I\n" +
"\x17DataNgdotAddSectionArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x1a\n" +
"\bcontents\x18\x02 \x01(\fR\bcontents\"6\n" +
"\x12TagNgdotStringArgs\x12 \n" +
"\x01i\x18\x01 \x01(\x0e2\x12.ngolofuzz.TagEnumR\x01i\"\x1b\n" +
"\x19CommonTypeNgdotCommonArgs\"\x19\n" +
"\x17CommonTypeNgdotSizeArgs\"\x19\n" +
"\x17BasicTypeNgdotBasicArgs\"\x1a\n" +
"\x18BasicTypeNgdotStringArgs\"P\n" +
"\x1eUnsupportedTypeNgdotStringArgs\x12.\n" +
"\x01t\x18\x01 \x01(\v2 .ngolofuzz.UnsupportedTypeStructR\x01t\"\xca\x12\n" +
"\fNgoloFuzzOne\x12J\n" +
"\x0fAttrNgdotString\x18\x01 \x01(\v2\x1e.ngolofuzz.AttrNgdotStringArgsH\x00R\x0fAttrNgdotString\x12M\n" +
"\x10ClassNgdotString\x18\x02 \x01(\v2\x1f.ngolofuzz.ClassNgdotStringArgsH\x00R\x10ClassNgdotString\x12P\n" +
"\x11AttrNgdotGoString\x18\x03 \x01(\v2 .ngolofuzz.AttrNgdotGoStringArgsH\x00R\x11AttrNgdotGoString\x12M\n" +
"\x10TagNgdotGoString\x18\x04 \x01(\v2\x1f.ngolofuzz.TagNgdotGoStringArgsH\x00R\x10TagNgdotGoString\x12S\n" +
"\x12ClassNgdotGoString\x18\x05 \x01(\v2!.ngolofuzz.ClassNgdotGoStringArgsH\x00R\x12ClassNgdotGoString\x12D\n" +
"\rEntryNgdotVal\x18\x06 \x01(\v2\x1c.ngolofuzz.EntryNgdotValArgsH\x00R\rEntryNgdotVal\x12V\n" +
"\x13EntryNgdotAttrField\x18\a \x01(\v2\".ngolofuzz.EntryNgdotAttrFieldArgsH\x00R\x13EntryNgdotAttrField\x12J\n" +
"\x0fDataNgdotReader\x18\b \x01(\v2\x1e.ngolofuzz.DataNgdotReaderArgsH\x00R\x0fDataNgdotReader\x12_\n" +
"\x16ReaderNgdotAddressSize\x18\t \x01(\v2%.ngolofuzz.ReaderNgdotAddressSizeArgsH\x00R\x16ReaderNgdotAddressSize\x12Y\n" +
"\x14ReaderNgdotByteOrder\x18\n" +
" \x01(\v2#.ngolofuzz.ReaderNgdotByteOrderArgsH\x00R\x14ReaderNgdotByteOrder\x12J\n" +
"\x0fReaderNgdotNext\x18\v \x01(\v2\x1e.ngolofuzz.ReaderNgdotNextArgsH\x00R\x0fReaderNgdotNext\x12b\n" +
"\x17ReaderNgdotSkipChildren\x18\f \x01(\v2&.ngolofuzz.ReaderNgdotSkipChildrenArgsH\x00R\x17ReaderNgdotSkipChildren\x12P\n" +
"\x11ReaderNgdotSeekPC\x18\r \x01(\v2 .ngolofuzz.ReaderNgdotSeekPCArgsH\x00R\x11ReaderNgdotSeekPC\x12J\n" +
"\x0fDataNgdotRanges\x18\x0e \x01(\v2\x1e.ngolofuzz.DataNgdotRangesArgsH\x00R\x0fDataNgdotRanges\x12V\n" +
"\x13DataNgdotLineReader\x18\x0f \x01(\v2\".ngolofuzz.DataNgdotLineReaderArgsH\x00R\x13DataNgdotLineReader\x12V\n" +
"\x13LineReaderNgdotTell\x18\x10 \x01(\v2\".ngolofuzz.LineReaderNgdotTellArgsH\x00R\x13LineReaderNgdotTell\x12V\n" +
"\x13LineReaderNgdotSeek\x18\x11 \x01(\v2\".ngolofuzz.LineReaderNgdotSeekArgsH\x00R\x13LineReaderNgdotSeek\x12Y\n" +
"\x14LineReaderNgdotReset\x18\x12 \x01(\v2#.ngolofuzz.LineReaderNgdotResetArgsH\x00R\x14LineReaderNgdotReset\x12Y\n" +
"\x14LineReaderNgdotFiles\x18\x13 \x01(\v2#.ngolofuzz.LineReaderNgdotFilesArgsH\x00R\x14LineReaderNgdotFiles\x12&\n" +
"\x03New\x18\x14 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x12P\n" +
"\x11DataNgdotAddTypes\x18\x15 \x01(\v2 .ngolofuzz.DataNgdotAddTypesArgsH\x00R\x11DataNgdotAddTypes\x12V\n" +
"\x13DataNgdotAddSection\x18\x16 \x01(\v2\".ngolofuzz.DataNgdotAddSectionArgsH\x00R\x13DataNgdotAddSection\x12G\n" +
"\x0eTagNgdotString\x18\x17 \x01(\v2\x1d.ngolofuzz.TagNgdotStringArgsH\x00R\x0eTagNgdotString\x12\\\n" +
"\x15CommonTypeNgdotCommon\x18\x18 \x01(\v2$.ngolofuzz.CommonTypeNgdotCommonArgsH\x00R\x15CommonTypeNgdotCommon\x12V\n" +
"\x13CommonTypeNgdotSize\x18\x19 \x01(\v2\".ngolofuzz.CommonTypeNgdotSizeArgsH\x00R\x13CommonTypeNgdotSize\x12V\n" +
"\x13BasicTypeNgdotBasic\x18\x1a \x01(\v2\".ngolofuzz.BasicTypeNgdotBasicArgsH\x00R\x13BasicTypeNgdotBasic\x12Y\n" +
"\x14BasicTypeNgdotString\x18\x1b \x01(\v2#.ngolofuzz.BasicTypeNgdotStringArgsH\x00R\x14BasicTypeNgdotString\x12k\n" +
"\x1aUnsupportedTypeNgdotString\x18\x1c \x01(\v2).ngolofuzz.UnsupportedTypeNgdotStringArgsH\x00R\x1aUnsupportedTypeNgdotStringB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04list*\x8d\x03\n" +
"\tClassEnum\x12\x10\n" +
"\fClassUnknown\x10\x00\x12\x10\n" +
"\fClassAddress\x10\x01\x12\x0e\n" +
"\n" +
"ClassBlock\x10\x02\x12\x11\n" +
"\rClassConstant\x10\x03\x12\x10\n" +
"\fClassExprLoc\x10\x04\x12\r\n" +
"\tClassFlag\x10\x05\x12\x10\n" +
"\fClassLinePtr\x10\x06\x12\x13\n" +
"\x0fClassLocListPtr\x10\a\x12\x0f\n" +
"\vClassMacPtr\x10\b\x12\x15\n" +
"\x11ClassRangeListPtr\x10\t\x12\x12\n" +
"\x0eClassReference\x10\n" +
"\x12\x15\n" +
"\x11ClassReferenceSig\x10\v\x12\x0f\n" +
"\vClassString\x10\f\x12\x15\n" +
"\x11ClassReferenceAlt\x10\r\x12\x12\n" +
"\x0eClassStringAlt\x10\x0e\x12\x10\n" +
"\fClassAddrPtr\x10\x0f\x12\x10\n" +
"\fClassLocList\x10\x10\x12\x10\n" +
"\fClassRngList\x10\x11\x12\x14\n" +
"\x10ClassRngListsPtr\x10\x12\x12\x16\n" +
"\x12ClassStrOffsetsPtr\x10\x13*\xe3\x12\n" +
"\bAttrEnum\x12\x0f\n" +
"\vAttrSibling\x10\x00\x12\x10\n" +
"\fAttrLocation\x10\x01\x12\f\n" +
"\bAttrName\x10\x02\x12\x10\n" +
"\fAttrOrdering\x10\x03\x12\x10\n" +
"\fAttrByteSize\x10\x04\x12\x11\n" +
"\rAttrBitOffset\x10\x05\x12\x0f\n" +
"\vAttrBitSize\x10\x06\x12\x10\n" +
"\fAttrStmtList\x10\a\x12\r\n" +
"\tAttrLowpc\x10\b\x12\x0e\n" +
"\n" +
"AttrHighpc\x10\t\x12\x10\n" +
"\fAttrLanguage\x10\n" +
"\x12\r\n" +
"\tAttrDiscr\x10\v\x12\x12\n" +
"\x0eAttrDiscrValue\x10\f\x12\x12\n" +
"\x0eAttrVisibility\x10\r\x12\x0e\n" +
"\n" +
"AttrImport\x10\x0e\x12\x14\n" +
"\x10AttrStringLength\x10\x0f\x12\x11\n" +
"\rAttrCommonRef\x10\x10\x12\x0f\n" +
"\vAttrCompDir\x10\x11\x12\x12\n" +
"\x0eAttrConstValue\x10\x12\x12\x16\n" +
"\x12AttrContainingType\x10\x13\x12\x14\n" +
"\x10AttrDefaultValue\x10\x14\x12\x0e\n" +
"\n" +
"AttrInline\x10\x15\x12\x12\n" +
"\x0eAttrIsOptional\x10\x16\x12\x12\n" +
"\x0eAttrLowerBound\x10\x17\x12\x10\n" +
"\fAttrProducer\x10\x18\x12\x12\n" +
"\x0eAttrPrototyped\x10\x19\x12\x12\n" +
"\x0eAttrReturnAddr\x10\x1a\x12\x12\n" +
"\x0eAttrStartScope\x10\x1b\x12\x12\n" +
"\x0eAttrStrideSize\x10\x1c\x12\x12\n" +
"\x0eAttrUpperBound\x10\x1d\x12\x16\n" +
"\x12AttrAbstractOrigin\x10\x1e\x12\x15\n" +
"\x11AttrAccessibility\x10\x1f\x12\x11\n" +
"\rAttrAddrClass\x10 \x12\x12\n" +
"\x0eAttrArtificial\x10!\x12\x11\n" +
"\rAttrBaseTypes\x10\"\x12\x0f\n" +
"\vAttrCalling\x10#\x12\r\n" +
"\tAttrCount\x10$\x12\x15\n" +
"\x11AttrDataMemberLoc\x10%\x12\x12\n" +
"\x0eAttrDeclColumn\x10&\x12\x10\n" +
"\fAttrDeclFile\x10'\x12\x10\n" +
"\fAttrDeclLine\x10(\x12\x13\n" +
"\x0fAttrDeclaration\x10)\x12\x11\n" +
"\rAttrDiscrList\x10*\x12\x10\n" +
"\fAttrEncoding\x10+\x12\x10\n" +
"\fAttrExternal\x10,\x12\x11\n" +
"\rAttrFrameBase\x10-\x12\x0e\n" +
"\n" +
"AttrFriend\x10.\x12\x16\n" +
"\x12AttrIdentifierCase\x10/\x12\x11\n" +
"\rAttrMacroInfo\x100\x12\x14\n" +
"\x10AttrNamelistItem\x101\x12\x10\n" +
"\fAttrPriority\x102\x12\x0f\n" +
"\vAttrSegment\x103\x12\x15\n" +
"\x11AttrSpecification\x104\x12\x12\n" +
"\x0eAttrStaticLink\x105\x12\f\n" +
"\bAttrType\x106\x12\x13\n" +
"\x0fAttrUseLocation\x107\x12\x10\n" +
"\fAttrVarParam\x108\x12\x12\n" +
"\x0eAttrVirtuality\x109\x12\x15\n" +
"\x11AttrVtableElemLoc\x10:\x12\x11\n" +
"\rAttrAllocated\x10;\x12\x12\n" +
"\x0eAttrAssociated\x10<\x12\x14\n" +
"\x10AttrDataLocation\x10=\x12\x0e\n" +
"\n" +
"AttrStride\x10>\x12\x0f\n" +
"\vAttrEntrypc\x10?\x12\x0f\n" +
"\vAttrUseUTF8\x10@\x12\x11\n" +
"\rAttrExtension\x10A\x12\x0e\n" +
"\n" +
"AttrRanges\x10B\x12\x12\n" +
"\x0eAttrTrampoline\x10C\x12\x12\n" +
"\x0eAttrCallColumn\x10D\x12\x10\n" +
"\fAttrCallFile\x10E\x12\x10\n" +
"\fAttrCallLine\x10F\x12\x13\n" +
"\x0fAttrDescription\x10G\x12\x13\n" +
"\x0fAttrBinaryScale\x10H\x12\x14\n" +
"\x10AttrDecimalScale\x10I\x12\r\n" +
"\tAttrSmall\x10J\x12\x13\n" +
"\x0fAttrDecimalSign\x10K\x12\x12\n" +
"\x0eAttrDigitCount\x10L\x12\x15\n" +
"\x11AttrPictureString\x10M\x12\x0f\n" +
"\vAttrMutable\x10N\x12\x15\n" +
"\x11AttrThreadsScaled\x10O\x12\x10\n" +
"\fAttrExplicit\x10P\x12\x15\n" +
"\x11AttrObjectPointer\x10Q\x12\x11\n" +
"\rAttrEndianity\x10R\x12\x11\n" +
"\rAttrElemental\x10S\x12\f\n" +
"\bAttrPure\x10T\x12\x11\n" +
"\rAttrRecursive\x10U\x12\x11\n" +
"\rAttrSignature\x10V\x12\x16\n" +
"\x12AttrMainSubprogram\x10W\x12\x15\n" +
"\x11AttrDataBitOffset\x10X\x12\x11\n" +
"\rAttrConstExpr\x10Y\x12\x11\n" +
"\rAttrEnumClass\x10Z\x12\x13\n" +
"\x0fAttrLinkageName\x10[\x12\x1b\n" +
"\x17AttrStringLengthBitSize\x10\\\x12\x1c\n" +
"\x18AttrStringLengthByteSize\x10]\x12\f\n" +
"\bAttrRank\x10^\x12\x16\n" +
"\x12AttrStrOffsetsBase\x10_\x12\x10\n" +
"\fAttrAddrBase\x10`\x12\x14\n" +
"\x10AttrRnglistsBase\x10a\x12\x0f\n" +
"\vAttrDwoName\x10b\x12\x11\n" +
"\rAttrReference\x10c\x12\x17\n" +
"\x13AttrRvalueReference\x10d\x12\x0e\n" +
"\n" +
"AttrMacros\x10e\x12\x14\n" +
"\x10AttrCallAllCalls\x10f\x12\x1a\n" +
"\x16AttrCallAllSourceCalls\x10g\x12\x18\n" +
"\x14AttrCallAllTailCalls\x10h\x12\x14\n" +
"\x10AttrCallReturnPC\x10i\x12\x11\n" +
"\rAttrCallValue\x10j\x12\x12\n" +
"\x0eAttrCallOrigin\x10k\x12\x15\n" +
"\x11AttrCallParameter\x10l\x12\x0e\n" +
"\n" +
"AttrCallPC\x10m\x12\x14\n" +
"\x10AttrCallTailCall\x10n\x12\x12\n" +
"\x0eAttrCallTarget\x10o\x12\x1b\n" +
"\x17AttrCallTargetClobbered\x10p\x12\x18\n" +
"\x14AttrCallDataLocation\x10q\x12\x15\n" +
"\x11AttrCallDataValue\x10r\x12\x10\n" +
"\fAttrNoreturn\x10s\x12\x11\n" +
"\rAttrAlignment\x10t\x12\x15\n" +
"\x11AttrExportSymbols\x10u\x12\x0f\n" +
"\vAttrDeleted\x10v\x12\x11\n" +
"\rAttrDefaulted\x10w\x12\x14\n" +
"\x10AttrLoclistsBase\x10x*\x9c\v\n" +
"\aTagEnum\x12\x10\n" +
"\fTagArrayType\x10\x00\x12\x10\n" +
"\fTagClassType\x10\x01\x12\x11\n" +
"\rTagEntryPoint\x10\x02\x12\x16\n" +
"\x12TagEnumerationType\x10\x03\x12\x16\n" +
"\x12TagFormalParameter\x10\x04\x12\x1a\n" +
"\x16TagImportedDeclaration\x10\x05\x12\f\n" +
"\bTagLabel\x10\x06\x12\x14\n" +
"\x10TagLexDwarfBlock\x10\a\x12\r\n" +
"\tTagMember\x10\b\x12\x12\n" +
"\x0eTagPointerType\x10\t\x12\x14\n" +
"\x10TagReferenceType\x10\n" +
"\x12\x12\n" +
"\x0eTagCompileUnit\x10\v\x12\x11\n" +
"\rTagStringType\x10\f\x12\x11\n" +
"\rTagStructType\x10\r\x12\x15\n" +
"\x11TagSubroutineType\x10\x0e\x12\x0e\n" +
"\n" +
"TagTypedef\x10\x0f\x12\x10\n" +
"\fTagUnionType\x10\x10\x12\x1c\n" +
"\x18TagUnspecifiedParameters\x10\x11\x12\x0e\n" +
"\n" +
"TagVariant\x10\x12\x12\x17\n" +
"\x13TagCommonDwarfBlock\x10\x13\x12\x16\n" +
"\x12TagCommonInclusion\x10\x14\x12\x12\n" +
"\x0eTagInheritance\x10\x15\x12\x18\n" +
"\x14TagInlinedSubroutine\x10\x16\x12\r\n" +
"\tTagModule\x10\x17\x12\x16\n" +
"\x12TagPtrToMemberType\x10\x18\x12\x0e\n" +
"\n" +
"TagSetType\x10\x19\x12\x13\n" +
"\x0fTagSubrangeType\x10\x1a\x12\x0f\n" +
"\vTagWithStmt\x10\x1b\x12\x18\n" +
"\x14TagAccessDeclaration\x10\x1c\x12\x0f\n" +
"\vTagBaseType\x10\x1d\x12\x16\n" +
"\x12TagCatchDwarfBlock\x10\x1e\x12\x10\n" +
"\fTagConstType\x10\x1f\x12\x0f\n" +
"\vTagConstant\x10 \x12\x11\n" +
"\rTagEnumerator\x10!\x12\x0f\n" +
"\vTagFileType\x10\"\x12\r\n" +
"\tTagFriend\x10#\x12\x0f\n" +
"\vTagNamelist\x10$\x12\x13\n" +
"\x0fTagNamelistItem\x10%\x12\x11\n" +
"\rTagPackedType\x10&\x12\x11\n" +
"\rTagSubprogram\x10'\x12\x1c\n" +
"\x18TagTemplateTypeParameter\x10(\x12\x1d\n" +
"\x19TagTemplateValueParameter\x10)\x12\x11\n" +
"\rTagThrownType\x10*\x12\x14\n" +
"\x10TagTryDwarfBlock\x10+\x12\x12\n" +
"\x0eTagVariantPart\x10,\x12\x0f\n" +
"\vTagVariable\x10-\x12\x13\n" +
"\x0fTagVolatileType\x10.\x12\x15\n" +
"\x11TagDwarfProcedure\x10/\x12\x13\n" +
"\x0fTagRestrictType\x100\x12\x14\n" +
"\x10TagInterfaceType\x101\x12\x10\n" +
"\fTagNamespace\x102\x12\x15\n" +
"\x11TagImportedModule\x103\x12\x16\n" +
"\x12TagUnspecifiedType\x104\x12\x12\n" +
"\x0eTagPartialUnit\x105\x12\x13\n" +
"\x0fTagImportedUnit\x106\x12\x12\n" +
"\x0eTagMutableType\x107\x12\x10\n" +
"\fTagCondition\x108\x12\x11\n" +
"\rTagSharedType\x109\x12\x0f\n" +
"\vTagTypeUnit\x10:\x12\x1a\n" +
"\x16TagRvalueReferenceType\x10;\x12\x14\n" +
"\x10TagTemplateAlias\x10<\x12\x12\n" +
"\x0eTagCoarrayType\x10=\x12\x16\n" +
"\x12TagGenericSubrange\x10>\x12\x12\n" +
"\x0eTagDynamicType\x10?\x12\x11\n" +
"\rTagAtomicType\x10@\x12\x0f\n" +
"\vTagCallSite\x10A\x12\x18\n" +
"\x14TagCallSiteParameter\x10B\x12\x13\n" +
"\x0fTagSkeletonUnit\x10C\x12\x14\n" +
"\x10TagImmutableType\x10DB\x18Z\x16./;fuzz_ng_debug_dwarfb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 32)
var file_ngolofuzz_proto_goTypes = []any{
(ClassEnum)(0), // 0: ngolofuzz.ClassEnum
(AttrEnum)(0), // 1: ngolofuzz.AttrEnum
(TagEnum)(0), // 2: ngolofuzz.TagEnum
(*UnsupportedTypeStruct)(nil), // 3: ngolofuzz.UnsupportedTypeStruct
(*AttrNgdotStringArgs)(nil), // 4: ngolofuzz.AttrNgdotStringArgs
(*ClassNgdotStringArgs)(nil), // 5: ngolofuzz.ClassNgdotStringArgs
(*AttrNgdotGoStringArgs)(nil), // 6: ngolofuzz.AttrNgdotGoStringArgs
(*TagNgdotGoStringArgs)(nil), // 7: ngolofuzz.TagNgdotGoStringArgs
(*ClassNgdotGoStringArgs)(nil), // 8: ngolofuzz.ClassNgdotGoStringArgs
(*EntryNgdotValArgs)(nil), // 9: ngolofuzz.EntryNgdotValArgs
(*EntryNgdotAttrFieldArgs)(nil), // 10: ngolofuzz.EntryNgdotAttrFieldArgs
(*DataNgdotReaderArgs)(nil), // 11: ngolofuzz.DataNgdotReaderArgs
(*ReaderNgdotAddressSizeArgs)(nil), // 12: ngolofuzz.ReaderNgdotAddressSizeArgs
(*ReaderNgdotByteOrderArgs)(nil), // 13: ngolofuzz.ReaderNgdotByteOrderArgs
(*ReaderNgdotNextArgs)(nil), // 14: ngolofuzz.ReaderNgdotNextArgs
(*ReaderNgdotSkipChildrenArgs)(nil), // 15: ngolofuzz.ReaderNgdotSkipChildrenArgs
(*ReaderNgdotSeekPCArgs)(nil), // 16: ngolofuzz.ReaderNgdotSeekPCArgs
(*DataNgdotRangesArgs)(nil), // 17: ngolofuzz.DataNgdotRangesArgs
(*DataNgdotLineReaderArgs)(nil), // 18: ngolofuzz.DataNgdotLineReaderArgs
(*LineReaderNgdotTellArgs)(nil), // 19: ngolofuzz.LineReaderNgdotTellArgs
(*LineReaderNgdotSeekArgs)(nil), // 20: ngolofuzz.LineReaderNgdotSeekArgs
(*LineReaderNgdotResetArgs)(nil), // 21: ngolofuzz.LineReaderNgdotResetArgs
(*LineReaderNgdotFilesArgs)(nil), // 22: ngolofuzz.LineReaderNgdotFilesArgs
(*NewArgs)(nil), // 23: ngolofuzz.NewArgs
(*DataNgdotAddTypesArgs)(nil), // 24: ngolofuzz.DataNgdotAddTypesArgs
(*DataNgdotAddSectionArgs)(nil), // 25: ngolofuzz.DataNgdotAddSectionArgs
(*TagNgdotStringArgs)(nil), // 26: ngolofuzz.TagNgdotStringArgs
(*CommonTypeNgdotCommonArgs)(nil), // 27: ngolofuzz.CommonTypeNgdotCommonArgs
(*CommonTypeNgdotSizeArgs)(nil), // 28: ngolofuzz.CommonTypeNgdotSizeArgs
(*BasicTypeNgdotBasicArgs)(nil), // 29: ngolofuzz.BasicTypeNgdotBasicArgs
(*BasicTypeNgdotStringArgs)(nil), // 30: ngolofuzz.BasicTypeNgdotStringArgs
(*UnsupportedTypeNgdotStringArgs)(nil), // 31: ngolofuzz.UnsupportedTypeNgdotStringArgs
(*NgoloFuzzOne)(nil), // 32: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 33: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 34: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
2, // 0: ngolofuzz.UnsupportedTypeStruct.Tag:type_name -> ngolofuzz.TagEnum
1, // 1: ngolofuzz.AttrNgdotStringArgs.i:type_name -> ngolofuzz.AttrEnum
0, // 2: ngolofuzz.ClassNgdotStringArgs.i:type_name -> ngolofuzz.ClassEnum
1, // 3: ngolofuzz.AttrNgdotGoStringArgs.a:type_name -> ngolofuzz.AttrEnum
2, // 4: ngolofuzz.TagNgdotGoStringArgs.t:type_name -> ngolofuzz.TagEnum
0, // 5: ngolofuzz.ClassNgdotGoStringArgs.i:type_name -> ngolofuzz.ClassEnum
1, // 6: ngolofuzz.EntryNgdotValArgs.a:type_name -> ngolofuzz.AttrEnum
1, // 7: ngolofuzz.EntryNgdotAttrFieldArgs.a:type_name -> ngolofuzz.AttrEnum
2, // 8: ngolofuzz.TagNgdotStringArgs.i:type_name -> ngolofuzz.TagEnum
3, // 9: ngolofuzz.UnsupportedTypeNgdotStringArgs.t:type_name -> ngolofuzz.UnsupportedTypeStruct
4, // 10: ngolofuzz.NgoloFuzzOne.AttrNgdotString:type_name -> ngolofuzz.AttrNgdotStringArgs
5, // 11: ngolofuzz.NgoloFuzzOne.ClassNgdotString:type_name -> ngolofuzz.ClassNgdotStringArgs
6, // 12: ngolofuzz.NgoloFuzzOne.AttrNgdotGoString:type_name -> ngolofuzz.AttrNgdotGoStringArgs
7, // 13: ngolofuzz.NgoloFuzzOne.TagNgdotGoString:type_name -> ngolofuzz.TagNgdotGoStringArgs
8, // 14: ngolofuzz.NgoloFuzzOne.ClassNgdotGoString:type_name -> ngolofuzz.ClassNgdotGoStringArgs
9, // 15: ngolofuzz.NgoloFuzzOne.EntryNgdotVal:type_name -> ngolofuzz.EntryNgdotValArgs
10, // 16: ngolofuzz.NgoloFuzzOne.EntryNgdotAttrField:type_name -> ngolofuzz.EntryNgdotAttrFieldArgs
11, // 17: ngolofuzz.NgoloFuzzOne.DataNgdotReader:type_name -> ngolofuzz.DataNgdotReaderArgs
12, // 18: ngolofuzz.NgoloFuzzOne.ReaderNgdotAddressSize:type_name -> ngolofuzz.ReaderNgdotAddressSizeArgs
13, // 19: ngolofuzz.NgoloFuzzOne.ReaderNgdotByteOrder:type_name -> ngolofuzz.ReaderNgdotByteOrderArgs
14, // 20: ngolofuzz.NgoloFuzzOne.ReaderNgdotNext:type_name -> ngolofuzz.ReaderNgdotNextArgs
15, // 21: ngolofuzz.NgoloFuzzOne.ReaderNgdotSkipChildren:type_name -> ngolofuzz.ReaderNgdotSkipChildrenArgs
16, // 22: ngolofuzz.NgoloFuzzOne.ReaderNgdotSeekPC:type_name -> ngolofuzz.ReaderNgdotSeekPCArgs
17, // 23: ngolofuzz.NgoloFuzzOne.DataNgdotRanges:type_name -> ngolofuzz.DataNgdotRangesArgs
18, // 24: ngolofuzz.NgoloFuzzOne.DataNgdotLineReader:type_name -> ngolofuzz.DataNgdotLineReaderArgs
19, // 25: ngolofuzz.NgoloFuzzOne.LineReaderNgdotTell:type_name -> ngolofuzz.LineReaderNgdotTellArgs
20, // 26: ngolofuzz.NgoloFuzzOne.LineReaderNgdotSeek:type_name -> ngolofuzz.LineReaderNgdotSeekArgs
21, // 27: ngolofuzz.NgoloFuzzOne.LineReaderNgdotReset:type_name -> ngolofuzz.LineReaderNgdotResetArgs
22, // 28: ngolofuzz.NgoloFuzzOne.LineReaderNgdotFiles:type_name -> ngolofuzz.LineReaderNgdotFilesArgs
23, // 29: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
24, // 30: ngolofuzz.NgoloFuzzOne.DataNgdotAddTypes:type_name -> ngolofuzz.DataNgdotAddTypesArgs
25, // 31: ngolofuzz.NgoloFuzzOne.DataNgdotAddSection:type_name -> ngolofuzz.DataNgdotAddSectionArgs
26, // 32: ngolofuzz.NgoloFuzzOne.TagNgdotString:type_name -> ngolofuzz.TagNgdotStringArgs
27, // 33: ngolofuzz.NgoloFuzzOne.CommonTypeNgdotCommon:type_name -> ngolofuzz.CommonTypeNgdotCommonArgs
28, // 34: ngolofuzz.NgoloFuzzOne.CommonTypeNgdotSize:type_name -> ngolofuzz.CommonTypeNgdotSizeArgs
29, // 35: ngolofuzz.NgoloFuzzOne.BasicTypeNgdotBasic:type_name -> ngolofuzz.BasicTypeNgdotBasicArgs
30, // 36: ngolofuzz.NgoloFuzzOne.BasicTypeNgdotString:type_name -> ngolofuzz.BasicTypeNgdotStringArgs
31, // 37: ngolofuzz.NgoloFuzzOne.UnsupportedTypeNgdotString:type_name -> ngolofuzz.UnsupportedTypeNgdotStringArgs
32, // 38: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
39, // [39:39] is the sub-list for method output_type
39, // [39:39] is the sub-list for method input_type
39, // [39:39] is the sub-list for extension type_name
39, // [39:39] is the sub-list for extension extendee
0, // [0:39] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[29].OneofWrappers = []any{
(*NgoloFuzzOne_AttrNgdotString)(nil),
(*NgoloFuzzOne_ClassNgdotString)(nil),
(*NgoloFuzzOne_AttrNgdotGoString)(nil),
(*NgoloFuzzOne_TagNgdotGoString)(nil),
(*NgoloFuzzOne_ClassNgdotGoString)(nil),
(*NgoloFuzzOne_EntryNgdotVal)(nil),
(*NgoloFuzzOne_EntryNgdotAttrField)(nil),
(*NgoloFuzzOne_DataNgdotReader)(nil),
(*NgoloFuzzOne_ReaderNgdotAddressSize)(nil),
(*NgoloFuzzOne_ReaderNgdotByteOrder)(nil),
(*NgoloFuzzOne_ReaderNgdotNext)(nil),
(*NgoloFuzzOne_ReaderNgdotSkipChildren)(nil),
(*NgoloFuzzOne_ReaderNgdotSeekPC)(nil),
(*NgoloFuzzOne_DataNgdotRanges)(nil),
(*NgoloFuzzOne_DataNgdotLineReader)(nil),
(*NgoloFuzzOne_LineReaderNgdotTell)(nil),
(*NgoloFuzzOne_LineReaderNgdotSeek)(nil),
(*NgoloFuzzOne_LineReaderNgdotReset)(nil),
(*NgoloFuzzOne_LineReaderNgdotFiles)(nil),
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_DataNgdotAddTypes)(nil),
(*NgoloFuzzOne_DataNgdotAddSection)(nil),
(*NgoloFuzzOne_TagNgdotString)(nil),
(*NgoloFuzzOne_CommonTypeNgdotCommon)(nil),
(*NgoloFuzzOne_CommonTypeNgdotSize)(nil),
(*NgoloFuzzOne_BasicTypeNgdotBasic)(nil),
(*NgoloFuzzOne_BasicTypeNgdotString)(nil),
(*NgoloFuzzOne_UnsupportedTypeNgdotString)(nil),
}
file_ngolofuzz_proto_msgTypes[30].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 3,
NumMessages: 32,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
EnumInfos: file_ngolofuzz_proto_enumTypes,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_debug_gosym
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"debug/gosym"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var LineTableResults []*gosym.LineTable
LineTableResultsIndex := 0
var TableResults []*gosym.Table
TableResultsIndex := 0
var SymResults []*gosym.Sym
SymResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_LineTableNgdotPCToLine:
if len(LineTableResults) == 0 {
continue
}
arg0 := LineTableResults[LineTableResultsIndex]
LineTableResultsIndex = (LineTableResultsIndex + 1) % len(LineTableResults)
arg0.PCToLine(a.LineTableNgdotPCToLine.Pc)
case *NgoloFuzzOne_LineTableNgdotLineToPC:
if len(LineTableResults) == 0 {
continue
}
arg0 := LineTableResults[LineTableResultsIndex]
LineTableResultsIndex = (LineTableResultsIndex + 1) % len(LineTableResults)
arg1 := int(a.LineTableNgdotLineToPC.Line)
arg0.LineToPC(arg1, a.LineTableNgdotLineToPC.Maxpc)
case *NgoloFuzzOne_NewLineTable:
r0 := gosym.NewLineTable(a.NewLineTable.Data, a.NewLineTable.Text)
if r0 != nil{
LineTableResults = append(LineTableResults, r0)
}
case *NgoloFuzzOne_SymNgdotStatic:
if len(SymResults) == 0 {
continue
}
arg0 := SymResults[SymResultsIndex]
SymResultsIndex = (SymResultsIndex + 1) % len(SymResults)
arg0.Static()
case *NgoloFuzzOne_SymNgdotPackageName:
if len(SymResults) == 0 {
continue
}
arg0 := SymResults[SymResultsIndex]
SymResultsIndex = (SymResultsIndex + 1) % len(SymResults)
arg0.PackageName()
case *NgoloFuzzOne_SymNgdotReceiverName:
if len(SymResults) == 0 {
continue
}
arg0 := SymResults[SymResultsIndex]
SymResultsIndex = (SymResultsIndex + 1) % len(SymResults)
arg0.ReceiverName()
case *NgoloFuzzOne_SymNgdotBaseName:
if len(SymResults) == 0 {
continue
}
arg0 := SymResults[SymResultsIndex]
SymResultsIndex = (SymResultsIndex + 1) % len(SymResults)
arg0.BaseName()
case *NgoloFuzzOne_NewTable:
if len(LineTableResults) == 0 {
continue
}
arg1 := LineTableResults[LineTableResultsIndex]
LineTableResultsIndex = (LineTableResultsIndex + 1) % len(LineTableResults)
_, r1 := gosym.NewTable(a.NewTable.Symtab, arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TableNgdotPCToFunc:
if len(TableResults) == 0 {
continue
}
arg0 := TableResults[TableResultsIndex]
TableResultsIndex = (TableResultsIndex + 1) % len(TableResults)
arg0.PCToFunc(a.TableNgdotPCToFunc.Pc)
case *NgoloFuzzOne_TableNgdotPCToLine:
if len(TableResults) == 0 {
continue
}
arg0 := TableResults[TableResultsIndex]
TableResultsIndex = (TableResultsIndex + 1) % len(TableResults)
arg0.PCToLine(a.TableNgdotPCToLine.Pc)
case *NgoloFuzzOne_TableNgdotLineToPC:
if len(TableResults) == 0 {
continue
}
arg0 := TableResults[TableResultsIndex]
TableResultsIndex = (TableResultsIndex + 1) % len(TableResults)
arg2 := int(a.TableNgdotLineToPC.Line)
_, _, r2 := arg0.LineToPC(a.TableNgdotLineToPC.File, arg2)
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_TableNgdotLookupSym:
if len(TableResults) == 0 {
continue
}
arg0 := TableResults[TableResultsIndex]
TableResultsIndex = (TableResultsIndex + 1) % len(TableResults)
arg0.LookupSym(a.TableNgdotLookupSym.Name)
case *NgoloFuzzOne_TableNgdotLookupFunc:
if len(TableResults) == 0 {
continue
}
arg0 := TableResults[TableResultsIndex]
TableResultsIndex = (TableResultsIndex + 1) % len(TableResults)
arg0.LookupFunc(a.TableNgdotLookupFunc.Name)
case *NgoloFuzzOne_TableNgdotSymByAddr:
if len(TableResults) == 0 {
continue
}
arg0 := TableResults[TableResultsIndex]
TableResultsIndex = (TableResultsIndex + 1) % len(TableResults)
arg0.SymByAddr(a.TableNgdotSymByAddr.Addr)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
LineTableNb := 0
LineTableResultsIndex := 0
TableNb := 0
TableResultsIndex := 0
SymNb := 0
SymResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_LineTableNgdotPCToLine:
if LineTableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("LineTable%d.PCToLine(%#+v)\n", LineTableResultsIndex, a.LineTableNgdotPCToLine.Pc))
LineTableResultsIndex = (LineTableResultsIndex + 1) % LineTableNb
case *NgoloFuzzOne_LineTableNgdotLineToPC:
if LineTableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("LineTable%d.LineToPC(int(%#+v), %#+v)\n", LineTableResultsIndex, a.LineTableNgdotLineToPC.Line, a.LineTableNgdotLineToPC.Maxpc))
LineTableResultsIndex = (LineTableResultsIndex + 1) % LineTableNb
case *NgoloFuzzOne_NewLineTable:
w.WriteString(fmt.Sprintf("LineTable%d := gosym.NewLineTable(%#+v, %#+v)\n", LineTableNb, a.NewLineTable.Data, a.NewLineTable.Text))
LineTableNb = LineTableNb + 1
case *NgoloFuzzOne_SymNgdotStatic:
if SymNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Sym%d.Static()\n", SymResultsIndex))
SymResultsIndex = (SymResultsIndex + 1) % SymNb
case *NgoloFuzzOne_SymNgdotPackageName:
if SymNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Sym%d.PackageName()\n", SymResultsIndex))
SymResultsIndex = (SymResultsIndex + 1) % SymNb
case *NgoloFuzzOne_SymNgdotReceiverName:
if SymNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Sym%d.ReceiverName()\n", SymResultsIndex))
SymResultsIndex = (SymResultsIndex + 1) % SymNb
case *NgoloFuzzOne_SymNgdotBaseName:
if SymNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Sym%d.BaseName()\n", SymResultsIndex))
SymResultsIndex = (SymResultsIndex + 1) % SymNb
case *NgoloFuzzOne_NewTable:
if LineTableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("gosym.NewTable(%#+v, LineTable%d)\n", a.NewTable.Symtab, (LineTableResultsIndex + 0) % LineTableNb))
LineTableResultsIndex = (LineTableResultsIndex + 1) % LineTableNb
case *NgoloFuzzOne_TableNgdotPCToFunc:
if TableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Table%d.PCToFunc(%#+v)\n", TableResultsIndex, a.TableNgdotPCToFunc.Pc))
TableResultsIndex = (TableResultsIndex + 1) % TableNb
case *NgoloFuzzOne_TableNgdotPCToLine:
if TableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Table%d.PCToLine(%#+v)\n", TableResultsIndex, a.TableNgdotPCToLine.Pc))
TableResultsIndex = (TableResultsIndex + 1) % TableNb
case *NgoloFuzzOne_TableNgdotLineToPC:
if TableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Table%d.LineToPC(%#+v, int(%#+v))\n", TableResultsIndex, a.TableNgdotLineToPC.File, a.TableNgdotLineToPC.Line))
TableResultsIndex = (TableResultsIndex + 1) % TableNb
case *NgoloFuzzOne_TableNgdotLookupSym:
if TableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Table%d.LookupSym(%#+v)\n", TableResultsIndex, a.TableNgdotLookupSym.Name))
TableResultsIndex = (TableResultsIndex + 1) % TableNb
case *NgoloFuzzOne_TableNgdotLookupFunc:
if TableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Table%d.LookupFunc(%#+v)\n", TableResultsIndex, a.TableNgdotLookupFunc.Name))
TableResultsIndex = (TableResultsIndex + 1) % TableNb
case *NgoloFuzzOne_TableNgdotSymByAddr:
if TableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Table%d.SymByAddr(%#+v)\n", TableResultsIndex, a.TableNgdotSymByAddr.Addr))
TableResultsIndex = (TableResultsIndex + 1) % TableNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_debug_gosym
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type LineTableNgdotPCToLineArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Pc uint64 `protobuf:"varint,1,opt,name=pc,proto3" json:"pc,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LineTableNgdotPCToLineArgs) Reset() {
*x = LineTableNgdotPCToLineArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LineTableNgdotPCToLineArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LineTableNgdotPCToLineArgs) ProtoMessage() {}
func (x *LineTableNgdotPCToLineArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LineTableNgdotPCToLineArgs.ProtoReflect.Descriptor instead.
func (*LineTableNgdotPCToLineArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *LineTableNgdotPCToLineArgs) GetPc() uint64 {
if x != nil {
return x.Pc
}
return 0
}
type LineTableNgdotLineToPCArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Line int64 `protobuf:"varint,1,opt,name=line,proto3" json:"line,omitempty"`
Maxpc uint64 `protobuf:"varint,2,opt,name=maxpc,proto3" json:"maxpc,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LineTableNgdotLineToPCArgs) Reset() {
*x = LineTableNgdotLineToPCArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LineTableNgdotLineToPCArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LineTableNgdotLineToPCArgs) ProtoMessage() {}
func (x *LineTableNgdotLineToPCArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LineTableNgdotLineToPCArgs.ProtoReflect.Descriptor instead.
func (*LineTableNgdotLineToPCArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *LineTableNgdotLineToPCArgs) GetLine() int64 {
if x != nil {
return x.Line
}
return 0
}
func (x *LineTableNgdotLineToPCArgs) GetMaxpc() uint64 {
if x != nil {
return x.Maxpc
}
return 0
}
type NewLineTableArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
Text uint64 `protobuf:"varint,2,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewLineTableArgs) Reset() {
*x = NewLineTableArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewLineTableArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewLineTableArgs) ProtoMessage() {}
func (x *NewLineTableArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewLineTableArgs.ProtoReflect.Descriptor instead.
func (*NewLineTableArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NewLineTableArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
func (x *NewLineTableArgs) GetText() uint64 {
if x != nil {
return x.Text
}
return 0
}
type SymNgdotStaticArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SymNgdotStaticArgs) Reset() {
*x = SymNgdotStaticArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SymNgdotStaticArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SymNgdotStaticArgs) ProtoMessage() {}
func (x *SymNgdotStaticArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SymNgdotStaticArgs.ProtoReflect.Descriptor instead.
func (*SymNgdotStaticArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type SymNgdotPackageNameArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SymNgdotPackageNameArgs) Reset() {
*x = SymNgdotPackageNameArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SymNgdotPackageNameArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SymNgdotPackageNameArgs) ProtoMessage() {}
func (x *SymNgdotPackageNameArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SymNgdotPackageNameArgs.ProtoReflect.Descriptor instead.
func (*SymNgdotPackageNameArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type SymNgdotReceiverNameArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SymNgdotReceiverNameArgs) Reset() {
*x = SymNgdotReceiverNameArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SymNgdotReceiverNameArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SymNgdotReceiverNameArgs) ProtoMessage() {}
func (x *SymNgdotReceiverNameArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SymNgdotReceiverNameArgs.ProtoReflect.Descriptor instead.
func (*SymNgdotReceiverNameArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type SymNgdotBaseNameArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SymNgdotBaseNameArgs) Reset() {
*x = SymNgdotBaseNameArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SymNgdotBaseNameArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SymNgdotBaseNameArgs) ProtoMessage() {}
func (x *SymNgdotBaseNameArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SymNgdotBaseNameArgs.ProtoReflect.Descriptor instead.
func (*SymNgdotBaseNameArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type NewTableArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Symtab []byte `protobuf:"bytes,1,opt,name=symtab,proto3" json:"symtab,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewTableArgs) Reset() {
*x = NewTableArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewTableArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewTableArgs) ProtoMessage() {}
func (x *NewTableArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewTableArgs.ProtoReflect.Descriptor instead.
func (*NewTableArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NewTableArgs) GetSymtab() []byte {
if x != nil {
return x.Symtab
}
return nil
}
type TableNgdotPCToFuncArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Pc uint64 `protobuf:"varint,1,opt,name=pc,proto3" json:"pc,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TableNgdotPCToFuncArgs) Reset() {
*x = TableNgdotPCToFuncArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TableNgdotPCToFuncArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TableNgdotPCToFuncArgs) ProtoMessage() {}
func (x *TableNgdotPCToFuncArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TableNgdotPCToFuncArgs.ProtoReflect.Descriptor instead.
func (*TableNgdotPCToFuncArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *TableNgdotPCToFuncArgs) GetPc() uint64 {
if x != nil {
return x.Pc
}
return 0
}
type TableNgdotPCToLineArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Pc uint64 `protobuf:"varint,1,opt,name=pc,proto3" json:"pc,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TableNgdotPCToLineArgs) Reset() {
*x = TableNgdotPCToLineArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TableNgdotPCToLineArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TableNgdotPCToLineArgs) ProtoMessage() {}
func (x *TableNgdotPCToLineArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TableNgdotPCToLineArgs.ProtoReflect.Descriptor instead.
func (*TableNgdotPCToLineArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *TableNgdotPCToLineArgs) GetPc() uint64 {
if x != nil {
return x.Pc
}
return 0
}
type TableNgdotLineToPCArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
File string `protobuf:"bytes,1,opt,name=file,proto3" json:"file,omitempty"`
Line int64 `protobuf:"varint,2,opt,name=line,proto3" json:"line,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TableNgdotLineToPCArgs) Reset() {
*x = TableNgdotLineToPCArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TableNgdotLineToPCArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TableNgdotLineToPCArgs) ProtoMessage() {}
func (x *TableNgdotLineToPCArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TableNgdotLineToPCArgs.ProtoReflect.Descriptor instead.
func (*TableNgdotLineToPCArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *TableNgdotLineToPCArgs) GetFile() string {
if x != nil {
return x.File
}
return ""
}
func (x *TableNgdotLineToPCArgs) GetLine() int64 {
if x != nil {
return x.Line
}
return 0
}
type TableNgdotLookupSymArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TableNgdotLookupSymArgs) Reset() {
*x = TableNgdotLookupSymArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TableNgdotLookupSymArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TableNgdotLookupSymArgs) ProtoMessage() {}
func (x *TableNgdotLookupSymArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TableNgdotLookupSymArgs.ProtoReflect.Descriptor instead.
func (*TableNgdotLookupSymArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *TableNgdotLookupSymArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type TableNgdotLookupFuncArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TableNgdotLookupFuncArgs) Reset() {
*x = TableNgdotLookupFuncArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TableNgdotLookupFuncArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TableNgdotLookupFuncArgs) ProtoMessage() {}
func (x *TableNgdotLookupFuncArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TableNgdotLookupFuncArgs.ProtoReflect.Descriptor instead.
func (*TableNgdotLookupFuncArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *TableNgdotLookupFuncArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type TableNgdotSymByAddrArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Addr uint64 `protobuf:"varint,1,opt,name=addr,proto3" json:"addr,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TableNgdotSymByAddrArgs) Reset() {
*x = TableNgdotSymByAddrArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TableNgdotSymByAddrArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TableNgdotSymByAddrArgs) ProtoMessage() {}
func (x *TableNgdotSymByAddrArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TableNgdotSymByAddrArgs.ProtoReflect.Descriptor instead.
func (*TableNgdotSymByAddrArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *TableNgdotSymByAddrArgs) GetAddr() uint64 {
if x != nil {
return x.Addr
}
return 0
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_LineTableNgdotPCToLine
// *NgoloFuzzOne_LineTableNgdotLineToPC
// *NgoloFuzzOne_NewLineTable
// *NgoloFuzzOne_SymNgdotStatic
// *NgoloFuzzOne_SymNgdotPackageName
// *NgoloFuzzOne_SymNgdotReceiverName
// *NgoloFuzzOne_SymNgdotBaseName
// *NgoloFuzzOne_NewTable
// *NgoloFuzzOne_TableNgdotPCToFunc
// *NgoloFuzzOne_TableNgdotPCToLine
// *NgoloFuzzOne_TableNgdotLineToPC
// *NgoloFuzzOne_TableNgdotLookupSym
// *NgoloFuzzOne_TableNgdotLookupFunc
// *NgoloFuzzOne_TableNgdotSymByAddr
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetLineTableNgdotPCToLine() *LineTableNgdotPCToLineArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LineTableNgdotPCToLine); ok {
return x.LineTableNgdotPCToLine
}
}
return nil
}
func (x *NgoloFuzzOne) GetLineTableNgdotLineToPC() *LineTableNgdotLineToPCArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LineTableNgdotLineToPC); ok {
return x.LineTableNgdotLineToPC
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewLineTable() *NewLineTableArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewLineTable); ok {
return x.NewLineTable
}
}
return nil
}
func (x *NgoloFuzzOne) GetSymNgdotStatic() *SymNgdotStaticArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SymNgdotStatic); ok {
return x.SymNgdotStatic
}
}
return nil
}
func (x *NgoloFuzzOne) GetSymNgdotPackageName() *SymNgdotPackageNameArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SymNgdotPackageName); ok {
return x.SymNgdotPackageName
}
}
return nil
}
func (x *NgoloFuzzOne) GetSymNgdotReceiverName() *SymNgdotReceiverNameArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SymNgdotReceiverName); ok {
return x.SymNgdotReceiverName
}
}
return nil
}
func (x *NgoloFuzzOne) GetSymNgdotBaseName() *SymNgdotBaseNameArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SymNgdotBaseName); ok {
return x.SymNgdotBaseName
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewTable() *NewTableArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewTable); ok {
return x.NewTable
}
}
return nil
}
func (x *NgoloFuzzOne) GetTableNgdotPCToFunc() *TableNgdotPCToFuncArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TableNgdotPCToFunc); ok {
return x.TableNgdotPCToFunc
}
}
return nil
}
func (x *NgoloFuzzOne) GetTableNgdotPCToLine() *TableNgdotPCToLineArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TableNgdotPCToLine); ok {
return x.TableNgdotPCToLine
}
}
return nil
}
func (x *NgoloFuzzOne) GetTableNgdotLineToPC() *TableNgdotLineToPCArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TableNgdotLineToPC); ok {
return x.TableNgdotLineToPC
}
}
return nil
}
func (x *NgoloFuzzOne) GetTableNgdotLookupSym() *TableNgdotLookupSymArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TableNgdotLookupSym); ok {
return x.TableNgdotLookupSym
}
}
return nil
}
func (x *NgoloFuzzOne) GetTableNgdotLookupFunc() *TableNgdotLookupFuncArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TableNgdotLookupFunc); ok {
return x.TableNgdotLookupFunc
}
}
return nil
}
func (x *NgoloFuzzOne) GetTableNgdotSymByAddr() *TableNgdotSymByAddrArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TableNgdotSymByAddr); ok {
return x.TableNgdotSymByAddr
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_LineTableNgdotPCToLine struct {
LineTableNgdotPCToLine *LineTableNgdotPCToLineArgs `protobuf:"bytes,1,opt,name=LineTableNgdotPCToLine,proto3,oneof"`
}
type NgoloFuzzOne_LineTableNgdotLineToPC struct {
LineTableNgdotLineToPC *LineTableNgdotLineToPCArgs `protobuf:"bytes,2,opt,name=LineTableNgdotLineToPC,proto3,oneof"`
}
type NgoloFuzzOne_NewLineTable struct {
NewLineTable *NewLineTableArgs `protobuf:"bytes,3,opt,name=NewLineTable,proto3,oneof"`
}
type NgoloFuzzOne_SymNgdotStatic struct {
SymNgdotStatic *SymNgdotStaticArgs `protobuf:"bytes,4,opt,name=SymNgdotStatic,proto3,oneof"`
}
type NgoloFuzzOne_SymNgdotPackageName struct {
SymNgdotPackageName *SymNgdotPackageNameArgs `protobuf:"bytes,5,opt,name=SymNgdotPackageName,proto3,oneof"`
}
type NgoloFuzzOne_SymNgdotReceiverName struct {
SymNgdotReceiverName *SymNgdotReceiverNameArgs `protobuf:"bytes,6,opt,name=SymNgdotReceiverName,proto3,oneof"`
}
type NgoloFuzzOne_SymNgdotBaseName struct {
SymNgdotBaseName *SymNgdotBaseNameArgs `protobuf:"bytes,7,opt,name=SymNgdotBaseName,proto3,oneof"`
}
type NgoloFuzzOne_NewTable struct {
NewTable *NewTableArgs `protobuf:"bytes,8,opt,name=NewTable,proto3,oneof"`
}
type NgoloFuzzOne_TableNgdotPCToFunc struct {
TableNgdotPCToFunc *TableNgdotPCToFuncArgs `protobuf:"bytes,9,opt,name=TableNgdotPCToFunc,proto3,oneof"`
}
type NgoloFuzzOne_TableNgdotPCToLine struct {
TableNgdotPCToLine *TableNgdotPCToLineArgs `protobuf:"bytes,10,opt,name=TableNgdotPCToLine,proto3,oneof"`
}
type NgoloFuzzOne_TableNgdotLineToPC struct {
TableNgdotLineToPC *TableNgdotLineToPCArgs `protobuf:"bytes,11,opt,name=TableNgdotLineToPC,proto3,oneof"`
}
type NgoloFuzzOne_TableNgdotLookupSym struct {
TableNgdotLookupSym *TableNgdotLookupSymArgs `protobuf:"bytes,12,opt,name=TableNgdotLookupSym,proto3,oneof"`
}
type NgoloFuzzOne_TableNgdotLookupFunc struct {
TableNgdotLookupFunc *TableNgdotLookupFuncArgs `protobuf:"bytes,13,opt,name=TableNgdotLookupFunc,proto3,oneof"`
}
type NgoloFuzzOne_TableNgdotSymByAddr struct {
TableNgdotSymByAddr *TableNgdotSymByAddrArgs `protobuf:"bytes,14,opt,name=TableNgdotSymByAddr,proto3,oneof"`
}
func (*NgoloFuzzOne_LineTableNgdotPCToLine) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LineTableNgdotLineToPC) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewLineTable) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SymNgdotStatic) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SymNgdotPackageName) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SymNgdotReceiverName) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SymNgdotBaseName) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewTable) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TableNgdotPCToFunc) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TableNgdotPCToLine) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TableNgdotLineToPC) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TableNgdotLookupSym) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TableNgdotLookupFunc) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TableNgdotSymByAddr) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\",\n" +
"\x1aLineTableNgdotPCToLineArgs\x12\x0e\n" +
"\x02pc\x18\x01 \x01(\x04R\x02pc\"F\n" +
"\x1aLineTableNgdotLineToPCArgs\x12\x12\n" +
"\x04line\x18\x01 \x01(\x03R\x04line\x12\x14\n" +
"\x05maxpc\x18\x02 \x01(\x04R\x05maxpc\":\n" +
"\x10NewLineTableArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\x12\x12\n" +
"\x04text\x18\x02 \x01(\x04R\x04text\"\x14\n" +
"\x12SymNgdotStaticArgs\"\x19\n" +
"\x17SymNgdotPackageNameArgs\"\x1a\n" +
"\x18SymNgdotReceiverNameArgs\"\x16\n" +
"\x14SymNgdotBaseNameArgs\"&\n" +
"\fNewTableArgs\x12\x16\n" +
"\x06symtab\x18\x01 \x01(\fR\x06symtab\"(\n" +
"\x16TableNgdotPCToFuncArgs\x12\x0e\n" +
"\x02pc\x18\x01 \x01(\x04R\x02pc\"(\n" +
"\x16TableNgdotPCToLineArgs\x12\x0e\n" +
"\x02pc\x18\x01 \x01(\x04R\x02pc\"@\n" +
"\x16TableNgdotLineToPCArgs\x12\x12\n" +
"\x04file\x18\x01 \x01(\tR\x04file\x12\x12\n" +
"\x04line\x18\x02 \x01(\x03R\x04line\"-\n" +
"\x17TableNgdotLookupSymArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\".\n" +
"\x18TableNgdotLookupFuncArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"-\n" +
"\x17TableNgdotSymByAddrArgs\x12\x12\n" +
"\x04addr\x18\x01 \x01(\x04R\x04addr\"\xa7\t\n" +
"\fNgoloFuzzOne\x12_\n" +
"\x16LineTableNgdotPCToLine\x18\x01 \x01(\v2%.ngolofuzz.LineTableNgdotPCToLineArgsH\x00R\x16LineTableNgdotPCToLine\x12_\n" +
"\x16LineTableNgdotLineToPC\x18\x02 \x01(\v2%.ngolofuzz.LineTableNgdotLineToPCArgsH\x00R\x16LineTableNgdotLineToPC\x12A\n" +
"\fNewLineTable\x18\x03 \x01(\v2\x1b.ngolofuzz.NewLineTableArgsH\x00R\fNewLineTable\x12G\n" +
"\x0eSymNgdotStatic\x18\x04 \x01(\v2\x1d.ngolofuzz.SymNgdotStaticArgsH\x00R\x0eSymNgdotStatic\x12V\n" +
"\x13SymNgdotPackageName\x18\x05 \x01(\v2\".ngolofuzz.SymNgdotPackageNameArgsH\x00R\x13SymNgdotPackageName\x12Y\n" +
"\x14SymNgdotReceiverName\x18\x06 \x01(\v2#.ngolofuzz.SymNgdotReceiverNameArgsH\x00R\x14SymNgdotReceiverName\x12M\n" +
"\x10SymNgdotBaseName\x18\a \x01(\v2\x1f.ngolofuzz.SymNgdotBaseNameArgsH\x00R\x10SymNgdotBaseName\x125\n" +
"\bNewTable\x18\b \x01(\v2\x17.ngolofuzz.NewTableArgsH\x00R\bNewTable\x12S\n" +
"\x12TableNgdotPCToFunc\x18\t \x01(\v2!.ngolofuzz.TableNgdotPCToFuncArgsH\x00R\x12TableNgdotPCToFunc\x12S\n" +
"\x12TableNgdotPCToLine\x18\n" +
" \x01(\v2!.ngolofuzz.TableNgdotPCToLineArgsH\x00R\x12TableNgdotPCToLine\x12S\n" +
"\x12TableNgdotLineToPC\x18\v \x01(\v2!.ngolofuzz.TableNgdotLineToPCArgsH\x00R\x12TableNgdotLineToPC\x12V\n" +
"\x13TableNgdotLookupSym\x18\f \x01(\v2\".ngolofuzz.TableNgdotLookupSymArgsH\x00R\x13TableNgdotLookupSym\x12Y\n" +
"\x14TableNgdotLookupFunc\x18\r \x01(\v2#.ngolofuzz.TableNgdotLookupFuncArgsH\x00R\x14TableNgdotLookupFunc\x12V\n" +
"\x13TableNgdotSymByAddr\x18\x0e \x01(\v2\".ngolofuzz.TableNgdotSymByAddrArgsH\x00R\x13TableNgdotSymByAddrB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x18Z\x16./;fuzz_ng_debug_gosymb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 17)
var file_ngolofuzz_proto_goTypes = []any{
(*LineTableNgdotPCToLineArgs)(nil), // 0: ngolofuzz.LineTableNgdotPCToLineArgs
(*LineTableNgdotLineToPCArgs)(nil), // 1: ngolofuzz.LineTableNgdotLineToPCArgs
(*NewLineTableArgs)(nil), // 2: ngolofuzz.NewLineTableArgs
(*SymNgdotStaticArgs)(nil), // 3: ngolofuzz.SymNgdotStaticArgs
(*SymNgdotPackageNameArgs)(nil), // 4: ngolofuzz.SymNgdotPackageNameArgs
(*SymNgdotReceiverNameArgs)(nil), // 5: ngolofuzz.SymNgdotReceiverNameArgs
(*SymNgdotBaseNameArgs)(nil), // 6: ngolofuzz.SymNgdotBaseNameArgs
(*NewTableArgs)(nil), // 7: ngolofuzz.NewTableArgs
(*TableNgdotPCToFuncArgs)(nil), // 8: ngolofuzz.TableNgdotPCToFuncArgs
(*TableNgdotPCToLineArgs)(nil), // 9: ngolofuzz.TableNgdotPCToLineArgs
(*TableNgdotLineToPCArgs)(nil), // 10: ngolofuzz.TableNgdotLineToPCArgs
(*TableNgdotLookupSymArgs)(nil), // 11: ngolofuzz.TableNgdotLookupSymArgs
(*TableNgdotLookupFuncArgs)(nil), // 12: ngolofuzz.TableNgdotLookupFuncArgs
(*TableNgdotSymByAddrArgs)(nil), // 13: ngolofuzz.TableNgdotSymByAddrArgs
(*NgoloFuzzOne)(nil), // 14: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 15: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 16: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.LineTableNgdotPCToLine:type_name -> ngolofuzz.LineTableNgdotPCToLineArgs
1, // 1: ngolofuzz.NgoloFuzzOne.LineTableNgdotLineToPC:type_name -> ngolofuzz.LineTableNgdotLineToPCArgs
2, // 2: ngolofuzz.NgoloFuzzOne.NewLineTable:type_name -> ngolofuzz.NewLineTableArgs
3, // 3: ngolofuzz.NgoloFuzzOne.SymNgdotStatic:type_name -> ngolofuzz.SymNgdotStaticArgs
4, // 4: ngolofuzz.NgoloFuzzOne.SymNgdotPackageName:type_name -> ngolofuzz.SymNgdotPackageNameArgs
5, // 5: ngolofuzz.NgoloFuzzOne.SymNgdotReceiverName:type_name -> ngolofuzz.SymNgdotReceiverNameArgs
6, // 6: ngolofuzz.NgoloFuzzOne.SymNgdotBaseName:type_name -> ngolofuzz.SymNgdotBaseNameArgs
7, // 7: ngolofuzz.NgoloFuzzOne.NewTable:type_name -> ngolofuzz.NewTableArgs
8, // 8: ngolofuzz.NgoloFuzzOne.TableNgdotPCToFunc:type_name -> ngolofuzz.TableNgdotPCToFuncArgs
9, // 9: ngolofuzz.NgoloFuzzOne.TableNgdotPCToLine:type_name -> ngolofuzz.TableNgdotPCToLineArgs
10, // 10: ngolofuzz.NgoloFuzzOne.TableNgdotLineToPC:type_name -> ngolofuzz.TableNgdotLineToPCArgs
11, // 11: ngolofuzz.NgoloFuzzOne.TableNgdotLookupSym:type_name -> ngolofuzz.TableNgdotLookupSymArgs
12, // 12: ngolofuzz.NgoloFuzzOne.TableNgdotLookupFunc:type_name -> ngolofuzz.TableNgdotLookupFuncArgs
13, // 13: ngolofuzz.NgoloFuzzOne.TableNgdotSymByAddr:type_name -> ngolofuzz.TableNgdotSymByAddrArgs
14, // 14: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
15, // [15:15] is the sub-list for method output_type
15, // [15:15] is the sub-list for method input_type
15, // [15:15] is the sub-list for extension type_name
15, // [15:15] is the sub-list for extension extendee
0, // [0:15] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[14].OneofWrappers = []any{
(*NgoloFuzzOne_LineTableNgdotPCToLine)(nil),
(*NgoloFuzzOne_LineTableNgdotLineToPC)(nil),
(*NgoloFuzzOne_NewLineTable)(nil),
(*NgoloFuzzOne_SymNgdotStatic)(nil),
(*NgoloFuzzOne_SymNgdotPackageName)(nil),
(*NgoloFuzzOne_SymNgdotReceiverName)(nil),
(*NgoloFuzzOne_SymNgdotBaseName)(nil),
(*NgoloFuzzOne_NewTable)(nil),
(*NgoloFuzzOne_TableNgdotPCToFunc)(nil),
(*NgoloFuzzOne_TableNgdotPCToLine)(nil),
(*NgoloFuzzOne_TableNgdotLineToPC)(nil),
(*NgoloFuzzOne_TableNgdotLookupSym)(nil),
(*NgoloFuzzOne_TableNgdotLookupFunc)(nil),
(*NgoloFuzzOne_TableNgdotSymByAddr)(nil),
}
file_ngolofuzz_proto_msgTypes[15].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 17,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_debug_macho
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"debug/macho"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func RelocTypeGenericNewFromFuzz(p RelocTypeGenericEnum) macho.RelocTypeGeneric{
switch p {
case 1:
return macho.GENERIC_RELOC_PAIR
case 2:
return macho.GENERIC_RELOC_SECTDIFF
case 3:
return macho.GENERIC_RELOC_PB_LA_PTR
case 4:
return macho.GENERIC_RELOC_LOCAL_SECTDIFF
case 5:
return macho.GENERIC_RELOC_TLV
}
return macho.GENERIC_RELOC_VANILLA
}
func ConvertRelocTypeGenericNewFromFuzz(a []RelocTypeGenericEnum) []macho.RelocTypeGeneric{
r := make([]macho.RelocTypeGeneric, len(a))
for i := range a {
r[i] = RelocTypeGenericNewFromFuzz(a[i])
}
return r
}
func RelocTypeX86_64NewFromFuzz(p RelocTypeX86_64Enum) macho.RelocTypeX86_64{
switch p {
case 1:
return macho.X86_64_RELOC_SIGNED
case 2:
return macho.X86_64_RELOC_BRANCH
case 3:
return macho.X86_64_RELOC_GOT_LOAD
case 4:
return macho.X86_64_RELOC_GOT
case 5:
return macho.X86_64_RELOC_SUBTRACTOR
case 6:
return macho.X86_64_RELOC_SIGNED_1
case 7:
return macho.X86_64_RELOC_SIGNED_2
case 8:
return macho.X86_64_RELOC_SIGNED_4
case 9:
return macho.X86_64_RELOC_TLV
}
return macho.X86_64_RELOC_UNSIGNED
}
func ConvertRelocTypeX86_64NewFromFuzz(a []RelocTypeX86_64Enum) []macho.RelocTypeX86_64{
r := make([]macho.RelocTypeX86_64, len(a))
for i := range a {
r[i] = RelocTypeX86_64NewFromFuzz(a[i])
}
return r
}
func RelocTypeARMNewFromFuzz(p RelocTypeARMEnum) macho.RelocTypeARM{
switch p {
case 1:
return macho.ARM_RELOC_PAIR
case 2:
return macho.ARM_RELOC_SECTDIFF
case 3:
return macho.ARM_RELOC_LOCAL_SECTDIFF
case 4:
return macho.ARM_RELOC_PB_LA_PTR
case 5:
return macho.ARM_RELOC_BR24
case 6:
return macho.ARM_THUMB_RELOC_BR22
case 7:
return macho.ARM_THUMB_32BIT_BRANCH
case 8:
return macho.ARM_RELOC_HALF
case 9:
return macho.ARM_RELOC_HALF_SECTDIFF
}
return macho.ARM_RELOC_VANILLA
}
func ConvertRelocTypeARMNewFromFuzz(a []RelocTypeARMEnum) []macho.RelocTypeARM{
r := make([]macho.RelocTypeARM, len(a))
for i := range a {
r[i] = RelocTypeARMNewFromFuzz(a[i])
}
return r
}
func LoadCmdNewFromFuzz(p LoadCmdEnum) macho.LoadCmd{
switch p {
case 1:
return macho.LoadCmdSymtab
case 2:
return macho.LoadCmdThread
case 3:
return macho.LoadCmdUnixThread
case 4:
return macho.LoadCmdDysymtab
case 5:
return macho.LoadCmdDylib
case 6:
return macho.LoadCmdDylinker
case 7:
return macho.LoadCmdSegment64
case 8:
return macho.LoadCmdRpath
}
return macho.LoadCmdSegment
}
func ConvertLoadCmdNewFromFuzz(a []LoadCmdEnum) []macho.LoadCmd{
r := make([]macho.LoadCmd, len(a))
for i := range a {
r[i] = LoadCmdNewFromFuzz(a[i])
}
return r
}
func CpuNewFromFuzz(p CpuEnum) macho.Cpu{
switch p {
case 1:
return macho.CpuAmd64
case 2:
return macho.CpuArm
case 3:
return macho.CpuArm64
case 4:
return macho.CpuPpc
case 5:
return macho.CpuPpc64
}
return macho.Cpu386
}
func ConvertCpuNewFromFuzz(a []CpuEnum) []macho.Cpu{
r := make([]macho.Cpu, len(a))
for i := range a {
r[i] = CpuNewFromFuzz(a[i])
}
return r
}
func RelocTypeARM64NewFromFuzz(p RelocTypeARM64Enum) macho.RelocTypeARM64{
switch p {
case 1:
return macho.ARM64_RELOC_SUBTRACTOR
case 2:
return macho.ARM64_RELOC_BRANCH26
case 3:
return macho.ARM64_RELOC_PAGE21
case 4:
return macho.ARM64_RELOC_PAGEOFF12
case 5:
return macho.ARM64_RELOC_GOT_LOAD_PAGE21
case 6:
return macho.ARM64_RELOC_GOT_LOAD_PAGEOFF12
case 7:
return macho.ARM64_RELOC_POINTER_TO_GOT
case 8:
return macho.ARM64_RELOC_TLVP_LOAD_PAGE21
case 9:
return macho.ARM64_RELOC_TLVP_LOAD_PAGEOFF12
case 10:
return macho.ARM64_RELOC_ADDEND
}
return macho.ARM64_RELOC_UNSIGNED
}
func ConvertRelocTypeARM64NewFromFuzz(a []RelocTypeARM64Enum) []macho.RelocTypeARM64{
r := make([]macho.RelocTypeARM64, len(a))
for i := range a {
r[i] = RelocTypeARM64NewFromFuzz(a[i])
}
return r
}
func TypeNewFromFuzz(p TypeEnum) macho.Type{
switch p {
case 1:
return macho.TypeExec
case 2:
return macho.TypeDylib
case 3:
return macho.TypeBundle
}
return macho.TypeObj
}
func ConvertTypeNewFromFuzz(a []TypeEnum) []macho.Type{
r := make([]macho.Type, len(a))
for i := range a {
r[i] = TypeNewFromFuzz(a[i])
}
return r
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var FileResults []*macho.File
FileResultsIndex := 0
var SegmentResults []*macho.Segment
SegmentResultsIndex := 0
var FatFileResults []*macho.FatFile
FatFileResultsIndex := 0
var SectionResults []*macho.Section
SectionResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewFatFile:
arg0 := bytes.NewReader(a.NewFatFile.R)
_, r1 := macho.NewFatFile(arg0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_OpenFat:
_, r1 := macho.OpenFat(a.OpenFat.Name)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FatFileNgdotClose:
if len(FatFileResults) == 0 {
continue
}
arg0 := FatFileResults[FatFileResultsIndex]
FatFileResultsIndex = (FatFileResultsIndex + 1) % len(FatFileResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_SegmentNgdotData:
if len(SegmentResults) == 0 {
continue
}
arg0 := SegmentResults[SegmentResultsIndex]
SegmentResultsIndex = (SegmentResultsIndex + 1) % len(SegmentResults)
_, r1 := arg0.Data()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SegmentNgdotOpen:
if len(SegmentResults) == 0 {
continue
}
arg0 := SegmentResults[SegmentResultsIndex]
SegmentResultsIndex = (SegmentResultsIndex + 1) % len(SegmentResults)
arg0.Open()
case *NgoloFuzzOne_SectionNgdotData:
if len(SectionResults) == 0 {
continue
}
arg0 := SectionResults[SectionResultsIndex]
SectionResultsIndex = (SectionResultsIndex + 1) % len(SectionResults)
_, r1 := arg0.Data()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SectionNgdotOpen:
if len(SectionResults) == 0 {
continue
}
arg0 := SectionResults[SectionResultsIndex]
SectionResultsIndex = (SectionResultsIndex + 1) % len(SectionResults)
arg0.Open()
case *NgoloFuzzOne_Open:
_, r1 := macho.Open(a.Open.Name)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FileNgdotClose:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_NewFile:
arg0 := bytes.NewReader(a.NewFile.R)
_, r1 := macho.NewFile(arg0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FileNgdotSegment:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
r0 := arg0.Segment(a.FileNgdotSegment.Name)
if r0 != nil{
SegmentResults = append(SegmentResults, r0)
}
case *NgoloFuzzOne_FileNgdotSection:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
r0 := arg0.Section(a.FileNgdotSection.Name)
if r0 != nil{
SectionResults = append(SectionResults, r0)
}
case *NgoloFuzzOne_FileNgdotDWARF:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
_, r1 := arg0.DWARF()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FileNgdotImportedSymbols:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
_, r1 := arg0.ImportedSymbols()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FileNgdotImportedLibraries:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
_, r1 := arg0.ImportedLibraries()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TypeNgdotString:
arg0 := TypeNewFromFuzz(a.TypeNgdotString.T)
arg0.String()
case *NgoloFuzzOne_TypeNgdotGoString:
arg0 := TypeNewFromFuzz(a.TypeNgdotGoString.T)
arg0.GoString()
case *NgoloFuzzOne_CpuNgdotString:
arg0 := CpuNewFromFuzz(a.CpuNgdotString.I)
arg0.String()
case *NgoloFuzzOne_CpuNgdotGoString:
arg0 := CpuNewFromFuzz(a.CpuNgdotGoString.I)
arg0.GoString()
case *NgoloFuzzOne_LoadCmdNgdotString:
arg0 := LoadCmdNewFromFuzz(a.LoadCmdNgdotString.I)
arg0.String()
case *NgoloFuzzOne_LoadCmdNgdotGoString:
arg0 := LoadCmdNewFromFuzz(a.LoadCmdNgdotGoString.I)
arg0.GoString()
case *NgoloFuzzOne_RelocTypeGenericNgdotGoString:
arg0 := RelocTypeGenericNewFromFuzz(a.RelocTypeGenericNgdotGoString.R)
arg0.GoString()
case *NgoloFuzzOne_RelocTypeX86_64NgdotGoString:
arg0 := RelocTypeX86_64NewFromFuzz(a.RelocTypeX86_64NgdotGoString.R)
arg0.GoString()
case *NgoloFuzzOne_RelocTypeARMNgdotGoString:
arg0 := RelocTypeARMNewFromFuzz(a.RelocTypeARMNgdotGoString.R)
arg0.GoString()
case *NgoloFuzzOne_RelocTypeARM64NgdotGoString:
arg0 := RelocTypeARM64NewFromFuzz(a.RelocTypeARM64NgdotGoString.R)
arg0.GoString()
case *NgoloFuzzOne_RelocTypeGenericNgdotString:
arg0 := RelocTypeGenericNewFromFuzz(a.RelocTypeGenericNgdotString.I)
arg0.String()
case *NgoloFuzzOne_RelocTypeX86_64NgdotString:
arg0 := RelocTypeX86_64NewFromFuzz(a.RelocTypeX86_64NgdotString.I)
arg0.String()
case *NgoloFuzzOne_RelocTypeARMNgdotString:
arg0 := RelocTypeARMNewFromFuzz(a.RelocTypeARMNgdotString.I)
arg0.String()
case *NgoloFuzzOne_RelocTypeARM64NgdotString:
arg0 := RelocTypeARM64NewFromFuzz(a.RelocTypeARM64NgdotString.I)
arg0.String()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
FileNb := 0
FileResultsIndex := 0
SegmentNb := 0
SegmentResultsIndex := 0
FatFileNb := 0
FatFileResultsIndex := 0
SectionNb := 0
SectionResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewFatFile:
w.WriteString(fmt.Sprintf("macho.NewFatFile(bytes.NewReader(%#+v))\n", a.NewFatFile.R))
case *NgoloFuzzOne_OpenFat:
w.WriteString(fmt.Sprintf("macho.OpenFat(%#+v)\n", a.OpenFat.Name))
case *NgoloFuzzOne_FatFileNgdotClose:
if FatFileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("FatFile%d.Close()\n", FatFileResultsIndex))
FatFileResultsIndex = (FatFileResultsIndex + 1) % FatFileNb
case *NgoloFuzzOne_SegmentNgdotData:
if SegmentNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Segment%d.Data()\n", SegmentResultsIndex))
SegmentResultsIndex = (SegmentResultsIndex + 1) % SegmentNb
case *NgoloFuzzOne_SegmentNgdotOpen:
if SegmentNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Segment%d.Open()\n", SegmentResultsIndex))
SegmentResultsIndex = (SegmentResultsIndex + 1) % SegmentNb
case *NgoloFuzzOne_SectionNgdotData:
if SectionNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Section%d.Data()\n", SectionResultsIndex))
SectionResultsIndex = (SectionResultsIndex + 1) % SectionNb
case *NgoloFuzzOne_SectionNgdotOpen:
if SectionNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Section%d.Open()\n", SectionResultsIndex))
SectionResultsIndex = (SectionResultsIndex + 1) % SectionNb
case *NgoloFuzzOne_Open:
w.WriteString(fmt.Sprintf("macho.Open(%#+v)\n", a.Open.Name))
case *NgoloFuzzOne_FileNgdotClose:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("File%d.Close()\n", FileResultsIndex))
FileResultsIndex = (FileResultsIndex + 1) % FileNb
case *NgoloFuzzOne_NewFile:
w.WriteString(fmt.Sprintf("macho.NewFile(bytes.NewReader(%#+v))\n", a.NewFile.R))
case *NgoloFuzzOne_FileNgdotSegment:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Segment%d := File%d.Segment(%#+v)\n", SegmentNb, FileResultsIndex, a.FileNgdotSegment.Name))
SegmentNb = SegmentNb + 1
FileResultsIndex = (FileResultsIndex + 1) % FileNb
case *NgoloFuzzOne_FileNgdotSection:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Section%d := File%d.Section(%#+v)\n", SectionNb, FileResultsIndex, a.FileNgdotSection.Name))
SectionNb = SectionNb + 1
FileResultsIndex = (FileResultsIndex + 1) % FileNb
case *NgoloFuzzOne_FileNgdotDWARF:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("File%d.DWARF()\n", FileResultsIndex))
FileResultsIndex = (FileResultsIndex + 1) % FileNb
case *NgoloFuzzOne_FileNgdotImportedSymbols:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("File%d.ImportedSymbols()\n", FileResultsIndex))
FileResultsIndex = (FileResultsIndex + 1) % FileNb
case *NgoloFuzzOne_FileNgdotImportedLibraries:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("File%d.ImportedLibraries()\n", FileResultsIndex))
FileResultsIndex = (FileResultsIndex + 1) % FileNb
case *NgoloFuzzOne_TypeNgdotString:
w.WriteString(fmt.Sprintf("TypeNewFromFuzz(%#+v).String()\n", a.TypeNgdotString.T))
case *NgoloFuzzOne_TypeNgdotGoString:
w.WriteString(fmt.Sprintf("TypeNewFromFuzz(%#+v).GoString()\n", a.TypeNgdotGoString.T))
case *NgoloFuzzOne_CpuNgdotString:
w.WriteString(fmt.Sprintf("CpuNewFromFuzz(%#+v).String()\n", a.CpuNgdotString.I))
case *NgoloFuzzOne_CpuNgdotGoString:
w.WriteString(fmt.Sprintf("CpuNewFromFuzz(%#+v).GoString()\n", a.CpuNgdotGoString.I))
case *NgoloFuzzOne_LoadCmdNgdotString:
w.WriteString(fmt.Sprintf("LoadCmdNewFromFuzz(%#+v).String()\n", a.LoadCmdNgdotString.I))
case *NgoloFuzzOne_LoadCmdNgdotGoString:
w.WriteString(fmt.Sprintf("LoadCmdNewFromFuzz(%#+v).GoString()\n", a.LoadCmdNgdotGoString.I))
case *NgoloFuzzOne_RelocTypeGenericNgdotGoString:
w.WriteString(fmt.Sprintf("RelocTypeGenericNewFromFuzz(%#+v).GoString()\n", a.RelocTypeGenericNgdotGoString.R))
case *NgoloFuzzOne_RelocTypeX86_64NgdotGoString:
w.WriteString(fmt.Sprintf("RelocTypeX86_64NewFromFuzz(%#+v).GoString()\n", a.RelocTypeX86_64NgdotGoString.R))
case *NgoloFuzzOne_RelocTypeARMNgdotGoString:
w.WriteString(fmt.Sprintf("RelocTypeARMNewFromFuzz(%#+v).GoString()\n", a.RelocTypeARMNgdotGoString.R))
case *NgoloFuzzOne_RelocTypeARM64NgdotGoString:
w.WriteString(fmt.Sprintf("RelocTypeARM64NewFromFuzz(%#+v).GoString()\n", a.RelocTypeARM64NgdotGoString.R))
case *NgoloFuzzOne_RelocTypeGenericNgdotString:
w.WriteString(fmt.Sprintf("RelocTypeGenericNewFromFuzz(%#+v).String()\n", a.RelocTypeGenericNgdotString.I))
case *NgoloFuzzOne_RelocTypeX86_64NgdotString:
w.WriteString(fmt.Sprintf("RelocTypeX86_64NewFromFuzz(%#+v).String()\n", a.RelocTypeX86_64NgdotString.I))
case *NgoloFuzzOne_RelocTypeARMNgdotString:
w.WriteString(fmt.Sprintf("RelocTypeARMNewFromFuzz(%#+v).String()\n", a.RelocTypeARMNgdotString.I))
case *NgoloFuzzOne_RelocTypeARM64NgdotString:
w.WriteString(fmt.Sprintf("RelocTypeARM64NewFromFuzz(%#+v).String()\n", a.RelocTypeARM64NgdotString.I))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_debug_macho
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type RelocTypeGenericEnum int32
const (
RelocTypeGenericEnum_GENERIC_RELOC_VANILLA RelocTypeGenericEnum = 0
RelocTypeGenericEnum_GENERIC_RELOC_PAIR RelocTypeGenericEnum = 1
RelocTypeGenericEnum_GENERIC_RELOC_SECTDIFF RelocTypeGenericEnum = 2
RelocTypeGenericEnum_GENERIC_RELOC_PB_LA_PTR RelocTypeGenericEnum = 3
RelocTypeGenericEnum_GENERIC_RELOC_LOCAL_SECTDIFF RelocTypeGenericEnum = 4
RelocTypeGenericEnum_GENERIC_RELOC_TLV RelocTypeGenericEnum = 5
)
// Enum value maps for RelocTypeGenericEnum.
var (
RelocTypeGenericEnum_name = map[int32]string{
0: "GENERIC_RELOC_VANILLA",
1: "GENERIC_RELOC_PAIR",
2: "GENERIC_RELOC_SECTDIFF",
3: "GENERIC_RELOC_PB_LA_PTR",
4: "GENERIC_RELOC_LOCAL_SECTDIFF",
5: "GENERIC_RELOC_TLV",
}
RelocTypeGenericEnum_value = map[string]int32{
"GENERIC_RELOC_VANILLA": 0,
"GENERIC_RELOC_PAIR": 1,
"GENERIC_RELOC_SECTDIFF": 2,
"GENERIC_RELOC_PB_LA_PTR": 3,
"GENERIC_RELOC_LOCAL_SECTDIFF": 4,
"GENERIC_RELOC_TLV": 5,
}
)
func (x RelocTypeGenericEnum) Enum() *RelocTypeGenericEnum {
p := new(RelocTypeGenericEnum)
*p = x
return p
}
func (x RelocTypeGenericEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (RelocTypeGenericEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[0].Descriptor()
}
func (RelocTypeGenericEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[0]
}
func (x RelocTypeGenericEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use RelocTypeGenericEnum.Descriptor instead.
func (RelocTypeGenericEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type RelocTypeX86_64Enum int32
const (
RelocTypeX86_64Enum_X86_64_RELOC_UNSIGNED RelocTypeX86_64Enum = 0
RelocTypeX86_64Enum_X86_64_RELOC_SIGNED RelocTypeX86_64Enum = 1
RelocTypeX86_64Enum_X86_64_RELOC_BRANCH RelocTypeX86_64Enum = 2
RelocTypeX86_64Enum_X86_64_RELOC_GOT_LOAD RelocTypeX86_64Enum = 3
RelocTypeX86_64Enum_X86_64_RELOC_GOT RelocTypeX86_64Enum = 4
RelocTypeX86_64Enum_X86_64_RELOC_SUBTRACTOR RelocTypeX86_64Enum = 5
RelocTypeX86_64Enum_X86_64_RELOC_SIGNED_1 RelocTypeX86_64Enum = 6
RelocTypeX86_64Enum_X86_64_RELOC_SIGNED_2 RelocTypeX86_64Enum = 7
RelocTypeX86_64Enum_X86_64_RELOC_SIGNED_4 RelocTypeX86_64Enum = 8
RelocTypeX86_64Enum_X86_64_RELOC_TLV RelocTypeX86_64Enum = 9
)
// Enum value maps for RelocTypeX86_64Enum.
var (
RelocTypeX86_64Enum_name = map[int32]string{
0: "X86_64_RELOC_UNSIGNED",
1: "X86_64_RELOC_SIGNED",
2: "X86_64_RELOC_BRANCH",
3: "X86_64_RELOC_GOT_LOAD",
4: "X86_64_RELOC_GOT",
5: "X86_64_RELOC_SUBTRACTOR",
6: "X86_64_RELOC_SIGNED_1",
7: "X86_64_RELOC_SIGNED_2",
8: "X86_64_RELOC_SIGNED_4",
9: "X86_64_RELOC_TLV",
}
RelocTypeX86_64Enum_value = map[string]int32{
"X86_64_RELOC_UNSIGNED": 0,
"X86_64_RELOC_SIGNED": 1,
"X86_64_RELOC_BRANCH": 2,
"X86_64_RELOC_GOT_LOAD": 3,
"X86_64_RELOC_GOT": 4,
"X86_64_RELOC_SUBTRACTOR": 5,
"X86_64_RELOC_SIGNED_1": 6,
"X86_64_RELOC_SIGNED_2": 7,
"X86_64_RELOC_SIGNED_4": 8,
"X86_64_RELOC_TLV": 9,
}
)
func (x RelocTypeX86_64Enum) Enum() *RelocTypeX86_64Enum {
p := new(RelocTypeX86_64Enum)
*p = x
return p
}
func (x RelocTypeX86_64Enum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (RelocTypeX86_64Enum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[1].Descriptor()
}
func (RelocTypeX86_64Enum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[1]
}
func (x RelocTypeX86_64Enum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use RelocTypeX86_64Enum.Descriptor instead.
func (RelocTypeX86_64Enum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type RelocTypeARMEnum int32
const (
RelocTypeARMEnum_ARM_RELOC_VANILLA RelocTypeARMEnum = 0
RelocTypeARMEnum_ARM_RELOC_PAIR RelocTypeARMEnum = 1
RelocTypeARMEnum_ARM_RELOC_SECTDIFF RelocTypeARMEnum = 2
RelocTypeARMEnum_ARM_RELOC_LOCAL_SECTDIFF RelocTypeARMEnum = 3
RelocTypeARMEnum_ARM_RELOC_PB_LA_PTR RelocTypeARMEnum = 4
RelocTypeARMEnum_ARM_RELOC_BR24 RelocTypeARMEnum = 5
RelocTypeARMEnum_ARM_THUMB_RELOC_BR22 RelocTypeARMEnum = 6
RelocTypeARMEnum_ARM_THUMB_32BIT_BRANCH RelocTypeARMEnum = 7
RelocTypeARMEnum_ARM_RELOC_HALF RelocTypeARMEnum = 8
RelocTypeARMEnum_ARM_RELOC_HALF_SECTDIFF RelocTypeARMEnum = 9
)
// Enum value maps for RelocTypeARMEnum.
var (
RelocTypeARMEnum_name = map[int32]string{
0: "ARM_RELOC_VANILLA",
1: "ARM_RELOC_PAIR",
2: "ARM_RELOC_SECTDIFF",
3: "ARM_RELOC_LOCAL_SECTDIFF",
4: "ARM_RELOC_PB_LA_PTR",
5: "ARM_RELOC_BR24",
6: "ARM_THUMB_RELOC_BR22",
7: "ARM_THUMB_32BIT_BRANCH",
8: "ARM_RELOC_HALF",
9: "ARM_RELOC_HALF_SECTDIFF",
}
RelocTypeARMEnum_value = map[string]int32{
"ARM_RELOC_VANILLA": 0,
"ARM_RELOC_PAIR": 1,
"ARM_RELOC_SECTDIFF": 2,
"ARM_RELOC_LOCAL_SECTDIFF": 3,
"ARM_RELOC_PB_LA_PTR": 4,
"ARM_RELOC_BR24": 5,
"ARM_THUMB_RELOC_BR22": 6,
"ARM_THUMB_32BIT_BRANCH": 7,
"ARM_RELOC_HALF": 8,
"ARM_RELOC_HALF_SECTDIFF": 9,
}
)
func (x RelocTypeARMEnum) Enum() *RelocTypeARMEnum {
p := new(RelocTypeARMEnum)
*p = x
return p
}
func (x RelocTypeARMEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (RelocTypeARMEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[2].Descriptor()
}
func (RelocTypeARMEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[2]
}
func (x RelocTypeARMEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use RelocTypeARMEnum.Descriptor instead.
func (RelocTypeARMEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type LoadCmdEnum int32
const (
LoadCmdEnum_LoadCmdSegment LoadCmdEnum = 0
LoadCmdEnum_LoadCmdSymtab LoadCmdEnum = 1
LoadCmdEnum_LoadCmdThread LoadCmdEnum = 2
LoadCmdEnum_LoadCmdUnixThread LoadCmdEnum = 3
LoadCmdEnum_LoadCmdDysymtab LoadCmdEnum = 4
LoadCmdEnum_LoadCmdDylib LoadCmdEnum = 5
LoadCmdEnum_LoadCmdDylinker LoadCmdEnum = 6
LoadCmdEnum_LoadCmdSegment64 LoadCmdEnum = 7
LoadCmdEnum_LoadCmdRpath LoadCmdEnum = 8
)
// Enum value maps for LoadCmdEnum.
var (
LoadCmdEnum_name = map[int32]string{
0: "LoadCmdSegment",
1: "LoadCmdSymtab",
2: "LoadCmdThread",
3: "LoadCmdUnixThread",
4: "LoadCmdDysymtab",
5: "LoadCmdDylib",
6: "LoadCmdDylinker",
7: "LoadCmdSegment64",
8: "LoadCmdRpath",
}
LoadCmdEnum_value = map[string]int32{
"LoadCmdSegment": 0,
"LoadCmdSymtab": 1,
"LoadCmdThread": 2,
"LoadCmdUnixThread": 3,
"LoadCmdDysymtab": 4,
"LoadCmdDylib": 5,
"LoadCmdDylinker": 6,
"LoadCmdSegment64": 7,
"LoadCmdRpath": 8,
}
)
func (x LoadCmdEnum) Enum() *LoadCmdEnum {
p := new(LoadCmdEnum)
*p = x
return p
}
func (x LoadCmdEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (LoadCmdEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[3].Descriptor()
}
func (LoadCmdEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[3]
}
func (x LoadCmdEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use LoadCmdEnum.Descriptor instead.
func (LoadCmdEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type CpuEnum int32
const (
CpuEnum_Cpu386 CpuEnum = 0
CpuEnum_CpuAmd64 CpuEnum = 1
CpuEnum_CpuArm CpuEnum = 2
CpuEnum_CpuArm64 CpuEnum = 3
CpuEnum_CpuPpc CpuEnum = 4
CpuEnum_CpuPpc64 CpuEnum = 5
)
// Enum value maps for CpuEnum.
var (
CpuEnum_name = map[int32]string{
0: "Cpu386",
1: "CpuAmd64",
2: "CpuArm",
3: "CpuArm64",
4: "CpuPpc",
5: "CpuPpc64",
}
CpuEnum_value = map[string]int32{
"Cpu386": 0,
"CpuAmd64": 1,
"CpuArm": 2,
"CpuArm64": 3,
"CpuPpc": 4,
"CpuPpc64": 5,
}
)
func (x CpuEnum) Enum() *CpuEnum {
p := new(CpuEnum)
*p = x
return p
}
func (x CpuEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (CpuEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[4].Descriptor()
}
func (CpuEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[4]
}
func (x CpuEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use CpuEnum.Descriptor instead.
func (CpuEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type RelocTypeARM64Enum int32
const (
RelocTypeARM64Enum_ARM64_RELOC_UNSIGNED RelocTypeARM64Enum = 0
RelocTypeARM64Enum_ARM64_RELOC_SUBTRACTOR RelocTypeARM64Enum = 1
RelocTypeARM64Enum_ARM64_RELOC_BRANCH26 RelocTypeARM64Enum = 2
RelocTypeARM64Enum_ARM64_RELOC_PAGE21 RelocTypeARM64Enum = 3
RelocTypeARM64Enum_ARM64_RELOC_PAGEOFF12 RelocTypeARM64Enum = 4
RelocTypeARM64Enum_ARM64_RELOC_GOT_LOAD_PAGE21 RelocTypeARM64Enum = 5
RelocTypeARM64Enum_ARM64_RELOC_GOT_LOAD_PAGEOFF12 RelocTypeARM64Enum = 6
RelocTypeARM64Enum_ARM64_RELOC_POINTER_TO_GOT RelocTypeARM64Enum = 7
RelocTypeARM64Enum_ARM64_RELOC_TLVP_LOAD_PAGE21 RelocTypeARM64Enum = 8
RelocTypeARM64Enum_ARM64_RELOC_TLVP_LOAD_PAGEOFF12 RelocTypeARM64Enum = 9
RelocTypeARM64Enum_ARM64_RELOC_ADDEND RelocTypeARM64Enum = 10
)
// Enum value maps for RelocTypeARM64Enum.
var (
RelocTypeARM64Enum_name = map[int32]string{
0: "ARM64_RELOC_UNSIGNED",
1: "ARM64_RELOC_SUBTRACTOR",
2: "ARM64_RELOC_BRANCH26",
3: "ARM64_RELOC_PAGE21",
4: "ARM64_RELOC_PAGEOFF12",
5: "ARM64_RELOC_GOT_LOAD_PAGE21",
6: "ARM64_RELOC_GOT_LOAD_PAGEOFF12",
7: "ARM64_RELOC_POINTER_TO_GOT",
8: "ARM64_RELOC_TLVP_LOAD_PAGE21",
9: "ARM64_RELOC_TLVP_LOAD_PAGEOFF12",
10: "ARM64_RELOC_ADDEND",
}
RelocTypeARM64Enum_value = map[string]int32{
"ARM64_RELOC_UNSIGNED": 0,
"ARM64_RELOC_SUBTRACTOR": 1,
"ARM64_RELOC_BRANCH26": 2,
"ARM64_RELOC_PAGE21": 3,
"ARM64_RELOC_PAGEOFF12": 4,
"ARM64_RELOC_GOT_LOAD_PAGE21": 5,
"ARM64_RELOC_GOT_LOAD_PAGEOFF12": 6,
"ARM64_RELOC_POINTER_TO_GOT": 7,
"ARM64_RELOC_TLVP_LOAD_PAGE21": 8,
"ARM64_RELOC_TLVP_LOAD_PAGEOFF12": 9,
"ARM64_RELOC_ADDEND": 10,
}
)
func (x RelocTypeARM64Enum) Enum() *RelocTypeARM64Enum {
p := new(RelocTypeARM64Enum)
*p = x
return p
}
func (x RelocTypeARM64Enum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (RelocTypeARM64Enum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[5].Descriptor()
}
func (RelocTypeARM64Enum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[5]
}
func (x RelocTypeARM64Enum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use RelocTypeARM64Enum.Descriptor instead.
func (RelocTypeARM64Enum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type TypeEnum int32
const (
TypeEnum_TypeObj TypeEnum = 0
TypeEnum_TypeExec TypeEnum = 1
TypeEnum_TypeDylib TypeEnum = 2
TypeEnum_TypeBundle TypeEnum = 3
)
// Enum value maps for TypeEnum.
var (
TypeEnum_name = map[int32]string{
0: "TypeObj",
1: "TypeExec",
2: "TypeDylib",
3: "TypeBundle",
}
TypeEnum_value = map[string]int32{
"TypeObj": 0,
"TypeExec": 1,
"TypeDylib": 2,
"TypeBundle": 3,
}
)
func (x TypeEnum) Enum() *TypeEnum {
p := new(TypeEnum)
*p = x
return p
}
func (x TypeEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (TypeEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[6].Descriptor()
}
func (TypeEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[6]
}
func (x TypeEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use TypeEnum.Descriptor instead.
func (TypeEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type NewFatFileArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewFatFileArgs) Reset() {
*x = NewFatFileArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewFatFileArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewFatFileArgs) ProtoMessage() {}
func (x *NewFatFileArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewFatFileArgs.ProtoReflect.Descriptor instead.
func (*NewFatFileArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewFatFileArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type OpenFatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OpenFatArgs) Reset() {
*x = OpenFatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OpenFatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OpenFatArgs) ProtoMessage() {}
func (x *OpenFatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OpenFatArgs.ProtoReflect.Descriptor instead.
func (*OpenFatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *OpenFatArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type FatFileNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FatFileNgdotCloseArgs) Reset() {
*x = FatFileNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FatFileNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FatFileNgdotCloseArgs) ProtoMessage() {}
func (x *FatFileNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FatFileNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*FatFileNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type SegmentNgdotDataArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SegmentNgdotDataArgs) Reset() {
*x = SegmentNgdotDataArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SegmentNgdotDataArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SegmentNgdotDataArgs) ProtoMessage() {}
func (x *SegmentNgdotDataArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SegmentNgdotDataArgs.ProtoReflect.Descriptor instead.
func (*SegmentNgdotDataArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type SegmentNgdotOpenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SegmentNgdotOpenArgs) Reset() {
*x = SegmentNgdotOpenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SegmentNgdotOpenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SegmentNgdotOpenArgs) ProtoMessage() {}
func (x *SegmentNgdotOpenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SegmentNgdotOpenArgs.ProtoReflect.Descriptor instead.
func (*SegmentNgdotOpenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type SectionNgdotDataArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SectionNgdotDataArgs) Reset() {
*x = SectionNgdotDataArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SectionNgdotDataArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SectionNgdotDataArgs) ProtoMessage() {}
func (x *SectionNgdotDataArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SectionNgdotDataArgs.ProtoReflect.Descriptor instead.
func (*SectionNgdotDataArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type SectionNgdotOpenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SectionNgdotOpenArgs) Reset() {
*x = SectionNgdotOpenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SectionNgdotOpenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SectionNgdotOpenArgs) ProtoMessage() {}
func (x *SectionNgdotOpenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SectionNgdotOpenArgs.ProtoReflect.Descriptor instead.
func (*SectionNgdotOpenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type OpenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OpenArgs) Reset() {
*x = OpenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OpenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OpenArgs) ProtoMessage() {}
func (x *OpenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OpenArgs.ProtoReflect.Descriptor instead.
func (*OpenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *OpenArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type FileNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotCloseArgs) Reset() {
*x = FileNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotCloseArgs) ProtoMessage() {}
func (x *FileNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type NewFileArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewFileArgs) Reset() {
*x = NewFileArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewFileArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewFileArgs) ProtoMessage() {}
func (x *NewFileArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewFileArgs.ProtoReflect.Descriptor instead.
func (*NewFileArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NewFileArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type FileNgdotSegmentArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotSegmentArgs) Reset() {
*x = FileNgdotSegmentArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotSegmentArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotSegmentArgs) ProtoMessage() {}
func (x *FileNgdotSegmentArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotSegmentArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotSegmentArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *FileNgdotSegmentArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type FileNgdotSectionArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotSectionArgs) Reset() {
*x = FileNgdotSectionArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotSectionArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotSectionArgs) ProtoMessage() {}
func (x *FileNgdotSectionArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotSectionArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotSectionArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *FileNgdotSectionArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type FileNgdotDWARFArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotDWARFArgs) Reset() {
*x = FileNgdotDWARFArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotDWARFArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotDWARFArgs) ProtoMessage() {}
func (x *FileNgdotDWARFArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotDWARFArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotDWARFArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type FileNgdotImportedSymbolsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotImportedSymbolsArgs) Reset() {
*x = FileNgdotImportedSymbolsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotImportedSymbolsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotImportedSymbolsArgs) ProtoMessage() {}
func (x *FileNgdotImportedSymbolsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotImportedSymbolsArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotImportedSymbolsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type FileNgdotImportedLibrariesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotImportedLibrariesArgs) Reset() {
*x = FileNgdotImportedLibrariesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotImportedLibrariesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotImportedLibrariesArgs) ProtoMessage() {}
func (x *FileNgdotImportedLibrariesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotImportedLibrariesArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotImportedLibrariesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type TypeNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
T TypeEnum `protobuf:"varint,1,opt,name=t,proto3,enum=ngolofuzz.TypeEnum" json:"t,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TypeNgdotStringArgs) Reset() {
*x = TypeNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TypeNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TypeNgdotStringArgs) ProtoMessage() {}
func (x *TypeNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TypeNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*TypeNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *TypeNgdotStringArgs) GetT() TypeEnum {
if x != nil {
return x.T
}
return TypeEnum_TypeObj
}
type TypeNgdotGoStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
T TypeEnum `protobuf:"varint,1,opt,name=t,proto3,enum=ngolofuzz.TypeEnum" json:"t,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TypeNgdotGoStringArgs) Reset() {
*x = TypeNgdotGoStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TypeNgdotGoStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TypeNgdotGoStringArgs) ProtoMessage() {}
func (x *TypeNgdotGoStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TypeNgdotGoStringArgs.ProtoReflect.Descriptor instead.
func (*TypeNgdotGoStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *TypeNgdotGoStringArgs) GetT() TypeEnum {
if x != nil {
return x.T
}
return TypeEnum_TypeObj
}
type CpuNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I CpuEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.CpuEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CpuNgdotStringArgs) Reset() {
*x = CpuNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CpuNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CpuNgdotStringArgs) ProtoMessage() {}
func (x *CpuNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CpuNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*CpuNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *CpuNgdotStringArgs) GetI() CpuEnum {
if x != nil {
return x.I
}
return CpuEnum_Cpu386
}
type CpuNgdotGoStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I CpuEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.CpuEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CpuNgdotGoStringArgs) Reset() {
*x = CpuNgdotGoStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CpuNgdotGoStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CpuNgdotGoStringArgs) ProtoMessage() {}
func (x *CpuNgdotGoStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CpuNgdotGoStringArgs.ProtoReflect.Descriptor instead.
func (*CpuNgdotGoStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *CpuNgdotGoStringArgs) GetI() CpuEnum {
if x != nil {
return x.I
}
return CpuEnum_Cpu386
}
type LoadCmdNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I LoadCmdEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.LoadCmdEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LoadCmdNgdotStringArgs) Reset() {
*x = LoadCmdNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LoadCmdNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LoadCmdNgdotStringArgs) ProtoMessage() {}
func (x *LoadCmdNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LoadCmdNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*LoadCmdNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *LoadCmdNgdotStringArgs) GetI() LoadCmdEnum {
if x != nil {
return x.I
}
return LoadCmdEnum_LoadCmdSegment
}
type LoadCmdNgdotGoStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I LoadCmdEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.LoadCmdEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LoadCmdNgdotGoStringArgs) Reset() {
*x = LoadCmdNgdotGoStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LoadCmdNgdotGoStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LoadCmdNgdotGoStringArgs) ProtoMessage() {}
func (x *LoadCmdNgdotGoStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LoadCmdNgdotGoStringArgs.ProtoReflect.Descriptor instead.
func (*LoadCmdNgdotGoStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *LoadCmdNgdotGoStringArgs) GetI() LoadCmdEnum {
if x != nil {
return x.I
}
return LoadCmdEnum_LoadCmdSegment
}
type RelocTypeGenericNgdotGoStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R RelocTypeGenericEnum `protobuf:"varint,1,opt,name=r,proto3,enum=ngolofuzz.RelocTypeGenericEnum" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RelocTypeGenericNgdotGoStringArgs) Reset() {
*x = RelocTypeGenericNgdotGoStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RelocTypeGenericNgdotGoStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RelocTypeGenericNgdotGoStringArgs) ProtoMessage() {}
func (x *RelocTypeGenericNgdotGoStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RelocTypeGenericNgdotGoStringArgs.ProtoReflect.Descriptor instead.
func (*RelocTypeGenericNgdotGoStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
func (x *RelocTypeGenericNgdotGoStringArgs) GetR() RelocTypeGenericEnum {
if x != nil {
return x.R
}
return RelocTypeGenericEnum_GENERIC_RELOC_VANILLA
}
type RelocTypeX86_64NgdotGoStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R RelocTypeX86_64Enum `protobuf:"varint,1,opt,name=r,proto3,enum=ngolofuzz.RelocTypeX86_64Enum" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RelocTypeX86_64NgdotGoStringArgs) Reset() {
*x = RelocTypeX86_64NgdotGoStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RelocTypeX86_64NgdotGoStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RelocTypeX86_64NgdotGoStringArgs) ProtoMessage() {}
func (x *RelocTypeX86_64NgdotGoStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RelocTypeX86_64NgdotGoStringArgs.ProtoReflect.Descriptor instead.
func (*RelocTypeX86_64NgdotGoStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
func (x *RelocTypeX86_64NgdotGoStringArgs) GetR() RelocTypeX86_64Enum {
if x != nil {
return x.R
}
return RelocTypeX86_64Enum_X86_64_RELOC_UNSIGNED
}
type RelocTypeARMNgdotGoStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R RelocTypeARMEnum `protobuf:"varint,1,opt,name=r,proto3,enum=ngolofuzz.RelocTypeARMEnum" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RelocTypeARMNgdotGoStringArgs) Reset() {
*x = RelocTypeARMNgdotGoStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RelocTypeARMNgdotGoStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RelocTypeARMNgdotGoStringArgs) ProtoMessage() {}
func (x *RelocTypeARMNgdotGoStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RelocTypeARMNgdotGoStringArgs.ProtoReflect.Descriptor instead.
func (*RelocTypeARMNgdotGoStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
func (x *RelocTypeARMNgdotGoStringArgs) GetR() RelocTypeARMEnum {
if x != nil {
return x.R
}
return RelocTypeARMEnum_ARM_RELOC_VANILLA
}
type RelocTypeARM64NgdotGoStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R RelocTypeARM64Enum `protobuf:"varint,1,opt,name=r,proto3,enum=ngolofuzz.RelocTypeARM64Enum" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RelocTypeARM64NgdotGoStringArgs) Reset() {
*x = RelocTypeARM64NgdotGoStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RelocTypeARM64NgdotGoStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RelocTypeARM64NgdotGoStringArgs) ProtoMessage() {}
func (x *RelocTypeARM64NgdotGoStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RelocTypeARM64NgdotGoStringArgs.ProtoReflect.Descriptor instead.
func (*RelocTypeARM64NgdotGoStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
func (x *RelocTypeARM64NgdotGoStringArgs) GetR() RelocTypeARM64Enum {
if x != nil {
return x.R
}
return RelocTypeARM64Enum_ARM64_RELOC_UNSIGNED
}
type RelocTypeGenericNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I RelocTypeGenericEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.RelocTypeGenericEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RelocTypeGenericNgdotStringArgs) Reset() {
*x = RelocTypeGenericNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RelocTypeGenericNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RelocTypeGenericNgdotStringArgs) ProtoMessage() {}
func (x *RelocTypeGenericNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RelocTypeGenericNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*RelocTypeGenericNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
func (x *RelocTypeGenericNgdotStringArgs) GetI() RelocTypeGenericEnum {
if x != nil {
return x.I
}
return RelocTypeGenericEnum_GENERIC_RELOC_VANILLA
}
type RelocTypeX86_64NgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I RelocTypeX86_64Enum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.RelocTypeX86_64Enum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RelocTypeX86_64NgdotStringArgs) Reset() {
*x = RelocTypeX86_64NgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RelocTypeX86_64NgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RelocTypeX86_64NgdotStringArgs) ProtoMessage() {}
func (x *RelocTypeX86_64NgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RelocTypeX86_64NgdotStringArgs.ProtoReflect.Descriptor instead.
func (*RelocTypeX86_64NgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
func (x *RelocTypeX86_64NgdotStringArgs) GetI() RelocTypeX86_64Enum {
if x != nil {
return x.I
}
return RelocTypeX86_64Enum_X86_64_RELOC_UNSIGNED
}
type RelocTypeARMNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I RelocTypeARMEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.RelocTypeARMEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RelocTypeARMNgdotStringArgs) Reset() {
*x = RelocTypeARMNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RelocTypeARMNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RelocTypeARMNgdotStringArgs) ProtoMessage() {}
func (x *RelocTypeARMNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RelocTypeARMNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*RelocTypeARMNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
func (x *RelocTypeARMNgdotStringArgs) GetI() RelocTypeARMEnum {
if x != nil {
return x.I
}
return RelocTypeARMEnum_ARM_RELOC_VANILLA
}
type RelocTypeARM64NgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I RelocTypeARM64Enum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.RelocTypeARM64Enum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RelocTypeARM64NgdotStringArgs) Reset() {
*x = RelocTypeARM64NgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RelocTypeARM64NgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RelocTypeARM64NgdotStringArgs) ProtoMessage() {}
func (x *RelocTypeARM64NgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RelocTypeARM64NgdotStringArgs.ProtoReflect.Descriptor instead.
func (*RelocTypeARM64NgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
func (x *RelocTypeARM64NgdotStringArgs) GetI() RelocTypeARM64Enum {
if x != nil {
return x.I
}
return RelocTypeARM64Enum_ARM64_RELOC_UNSIGNED
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewFatFile
// *NgoloFuzzOne_OpenFat
// *NgoloFuzzOne_FatFileNgdotClose
// *NgoloFuzzOne_SegmentNgdotData
// *NgoloFuzzOne_SegmentNgdotOpen
// *NgoloFuzzOne_SectionNgdotData
// *NgoloFuzzOne_SectionNgdotOpen
// *NgoloFuzzOne_Open
// *NgoloFuzzOne_FileNgdotClose
// *NgoloFuzzOne_NewFile
// *NgoloFuzzOne_FileNgdotSegment
// *NgoloFuzzOne_FileNgdotSection
// *NgoloFuzzOne_FileNgdotDWARF
// *NgoloFuzzOne_FileNgdotImportedSymbols
// *NgoloFuzzOne_FileNgdotImportedLibraries
// *NgoloFuzzOne_TypeNgdotString
// *NgoloFuzzOne_TypeNgdotGoString
// *NgoloFuzzOne_CpuNgdotString
// *NgoloFuzzOne_CpuNgdotGoString
// *NgoloFuzzOne_LoadCmdNgdotString
// *NgoloFuzzOne_LoadCmdNgdotGoString
// *NgoloFuzzOne_RelocTypeGenericNgdotGoString
// *NgoloFuzzOne_RelocTypeX86_64NgdotGoString
// *NgoloFuzzOne_RelocTypeARMNgdotGoString
// *NgoloFuzzOne_RelocTypeARM64NgdotGoString
// *NgoloFuzzOne_RelocTypeGenericNgdotString
// *NgoloFuzzOne_RelocTypeX86_64NgdotString
// *NgoloFuzzOne_RelocTypeARMNgdotString
// *NgoloFuzzOne_RelocTypeARM64NgdotString
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewFatFile() *NewFatFileArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewFatFile); ok {
return x.NewFatFile
}
}
return nil
}
func (x *NgoloFuzzOne) GetOpenFat() *OpenFatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_OpenFat); ok {
return x.OpenFat
}
}
return nil
}
func (x *NgoloFuzzOne) GetFatFileNgdotClose() *FatFileNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FatFileNgdotClose); ok {
return x.FatFileNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetSegmentNgdotData() *SegmentNgdotDataArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SegmentNgdotData); ok {
return x.SegmentNgdotData
}
}
return nil
}
func (x *NgoloFuzzOne) GetSegmentNgdotOpen() *SegmentNgdotOpenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SegmentNgdotOpen); ok {
return x.SegmentNgdotOpen
}
}
return nil
}
func (x *NgoloFuzzOne) GetSectionNgdotData() *SectionNgdotDataArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SectionNgdotData); ok {
return x.SectionNgdotData
}
}
return nil
}
func (x *NgoloFuzzOne) GetSectionNgdotOpen() *SectionNgdotOpenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SectionNgdotOpen); ok {
return x.SectionNgdotOpen
}
}
return nil
}
func (x *NgoloFuzzOne) GetOpen() *OpenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Open); ok {
return x.Open
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotClose() *FileNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotClose); ok {
return x.FileNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewFile() *NewFileArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewFile); ok {
return x.NewFile
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotSegment() *FileNgdotSegmentArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotSegment); ok {
return x.FileNgdotSegment
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotSection() *FileNgdotSectionArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotSection); ok {
return x.FileNgdotSection
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotDWARF() *FileNgdotDWARFArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotDWARF); ok {
return x.FileNgdotDWARF
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotImportedSymbols() *FileNgdotImportedSymbolsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotImportedSymbols); ok {
return x.FileNgdotImportedSymbols
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotImportedLibraries() *FileNgdotImportedLibrariesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotImportedLibraries); ok {
return x.FileNgdotImportedLibraries
}
}
return nil
}
func (x *NgoloFuzzOne) GetTypeNgdotString() *TypeNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TypeNgdotString); ok {
return x.TypeNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetTypeNgdotGoString() *TypeNgdotGoStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TypeNgdotGoString); ok {
return x.TypeNgdotGoString
}
}
return nil
}
func (x *NgoloFuzzOne) GetCpuNgdotString() *CpuNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CpuNgdotString); ok {
return x.CpuNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetCpuNgdotGoString() *CpuNgdotGoStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CpuNgdotGoString); ok {
return x.CpuNgdotGoString
}
}
return nil
}
func (x *NgoloFuzzOne) GetLoadCmdNgdotString() *LoadCmdNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LoadCmdNgdotString); ok {
return x.LoadCmdNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetLoadCmdNgdotGoString() *LoadCmdNgdotGoStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LoadCmdNgdotGoString); ok {
return x.LoadCmdNgdotGoString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRelocTypeGenericNgdotGoString() *RelocTypeGenericNgdotGoStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RelocTypeGenericNgdotGoString); ok {
return x.RelocTypeGenericNgdotGoString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRelocTypeX86_64NgdotGoString() *RelocTypeX86_64NgdotGoStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RelocTypeX86_64NgdotGoString); ok {
return x.RelocTypeX86_64NgdotGoString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRelocTypeARMNgdotGoString() *RelocTypeARMNgdotGoStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RelocTypeARMNgdotGoString); ok {
return x.RelocTypeARMNgdotGoString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRelocTypeARM64NgdotGoString() *RelocTypeARM64NgdotGoStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RelocTypeARM64NgdotGoString); ok {
return x.RelocTypeARM64NgdotGoString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRelocTypeGenericNgdotString() *RelocTypeGenericNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RelocTypeGenericNgdotString); ok {
return x.RelocTypeGenericNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRelocTypeX86_64NgdotString() *RelocTypeX86_64NgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RelocTypeX86_64NgdotString); ok {
return x.RelocTypeX86_64NgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRelocTypeARMNgdotString() *RelocTypeARMNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RelocTypeARMNgdotString); ok {
return x.RelocTypeARMNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRelocTypeARM64NgdotString() *RelocTypeARM64NgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RelocTypeARM64NgdotString); ok {
return x.RelocTypeARM64NgdotString
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewFatFile struct {
NewFatFile *NewFatFileArgs `protobuf:"bytes,1,opt,name=NewFatFile,proto3,oneof"`
}
type NgoloFuzzOne_OpenFat struct {
OpenFat *OpenFatArgs `protobuf:"bytes,2,opt,name=OpenFat,proto3,oneof"`
}
type NgoloFuzzOne_FatFileNgdotClose struct {
FatFileNgdotClose *FatFileNgdotCloseArgs `protobuf:"bytes,3,opt,name=FatFileNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_SegmentNgdotData struct {
SegmentNgdotData *SegmentNgdotDataArgs `protobuf:"bytes,4,opt,name=SegmentNgdotData,proto3,oneof"`
}
type NgoloFuzzOne_SegmentNgdotOpen struct {
SegmentNgdotOpen *SegmentNgdotOpenArgs `protobuf:"bytes,5,opt,name=SegmentNgdotOpen,proto3,oneof"`
}
type NgoloFuzzOne_SectionNgdotData struct {
SectionNgdotData *SectionNgdotDataArgs `protobuf:"bytes,6,opt,name=SectionNgdotData,proto3,oneof"`
}
type NgoloFuzzOne_SectionNgdotOpen struct {
SectionNgdotOpen *SectionNgdotOpenArgs `protobuf:"bytes,7,opt,name=SectionNgdotOpen,proto3,oneof"`
}
type NgoloFuzzOne_Open struct {
Open *OpenArgs `protobuf:"bytes,8,opt,name=Open,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotClose struct {
FileNgdotClose *FileNgdotCloseArgs `protobuf:"bytes,9,opt,name=FileNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_NewFile struct {
NewFile *NewFileArgs `protobuf:"bytes,10,opt,name=NewFile,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotSegment struct {
FileNgdotSegment *FileNgdotSegmentArgs `protobuf:"bytes,11,opt,name=FileNgdotSegment,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotSection struct {
FileNgdotSection *FileNgdotSectionArgs `protobuf:"bytes,12,opt,name=FileNgdotSection,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotDWARF struct {
FileNgdotDWARF *FileNgdotDWARFArgs `protobuf:"bytes,13,opt,name=FileNgdotDWARF,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotImportedSymbols struct {
FileNgdotImportedSymbols *FileNgdotImportedSymbolsArgs `protobuf:"bytes,14,opt,name=FileNgdotImportedSymbols,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotImportedLibraries struct {
FileNgdotImportedLibraries *FileNgdotImportedLibrariesArgs `protobuf:"bytes,15,opt,name=FileNgdotImportedLibraries,proto3,oneof"`
}
type NgoloFuzzOne_TypeNgdotString struct {
TypeNgdotString *TypeNgdotStringArgs `protobuf:"bytes,16,opt,name=TypeNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_TypeNgdotGoString struct {
TypeNgdotGoString *TypeNgdotGoStringArgs `protobuf:"bytes,17,opt,name=TypeNgdotGoString,proto3,oneof"`
}
type NgoloFuzzOne_CpuNgdotString struct {
CpuNgdotString *CpuNgdotStringArgs `protobuf:"bytes,18,opt,name=CpuNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_CpuNgdotGoString struct {
CpuNgdotGoString *CpuNgdotGoStringArgs `protobuf:"bytes,19,opt,name=CpuNgdotGoString,proto3,oneof"`
}
type NgoloFuzzOne_LoadCmdNgdotString struct {
LoadCmdNgdotString *LoadCmdNgdotStringArgs `protobuf:"bytes,20,opt,name=LoadCmdNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_LoadCmdNgdotGoString struct {
LoadCmdNgdotGoString *LoadCmdNgdotGoStringArgs `protobuf:"bytes,21,opt,name=LoadCmdNgdotGoString,proto3,oneof"`
}
type NgoloFuzzOne_RelocTypeGenericNgdotGoString struct {
RelocTypeGenericNgdotGoString *RelocTypeGenericNgdotGoStringArgs `protobuf:"bytes,22,opt,name=RelocTypeGenericNgdotGoString,proto3,oneof"`
}
type NgoloFuzzOne_RelocTypeX86_64NgdotGoString struct {
RelocTypeX86_64NgdotGoString *RelocTypeX86_64NgdotGoStringArgs `protobuf:"bytes,23,opt,name=RelocTypeX86_64NgdotGoString,json=RelocTypeX8664NgdotGoString,proto3,oneof"`
}
type NgoloFuzzOne_RelocTypeARMNgdotGoString struct {
RelocTypeARMNgdotGoString *RelocTypeARMNgdotGoStringArgs `protobuf:"bytes,24,opt,name=RelocTypeARMNgdotGoString,proto3,oneof"`
}
type NgoloFuzzOne_RelocTypeARM64NgdotGoString struct {
RelocTypeARM64NgdotGoString *RelocTypeARM64NgdotGoStringArgs `protobuf:"bytes,25,opt,name=RelocTypeARM64NgdotGoString,proto3,oneof"`
}
type NgoloFuzzOne_RelocTypeGenericNgdotString struct {
RelocTypeGenericNgdotString *RelocTypeGenericNgdotStringArgs `protobuf:"bytes,26,opt,name=RelocTypeGenericNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_RelocTypeX86_64NgdotString struct {
RelocTypeX86_64NgdotString *RelocTypeX86_64NgdotStringArgs `protobuf:"bytes,27,opt,name=RelocTypeX86_64NgdotString,json=RelocTypeX8664NgdotString,proto3,oneof"`
}
type NgoloFuzzOne_RelocTypeARMNgdotString struct {
RelocTypeARMNgdotString *RelocTypeARMNgdotStringArgs `protobuf:"bytes,28,opt,name=RelocTypeARMNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_RelocTypeARM64NgdotString struct {
RelocTypeARM64NgdotString *RelocTypeARM64NgdotStringArgs `protobuf:"bytes,29,opt,name=RelocTypeARM64NgdotString,proto3,oneof"`
}
func (*NgoloFuzzOne_NewFatFile) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_OpenFat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FatFileNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SegmentNgdotData) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SegmentNgdotOpen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SectionNgdotData) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SectionNgdotOpen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Open) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewFile) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotSegment) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotSection) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotDWARF) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotImportedSymbols) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotImportedLibraries) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TypeNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TypeNgdotGoString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CpuNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CpuNgdotGoString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LoadCmdNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LoadCmdNgdotGoString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RelocTypeGenericNgdotGoString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RelocTypeX86_64NgdotGoString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RelocTypeARMNgdotGoString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RelocTypeARM64NgdotGoString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RelocTypeGenericNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RelocTypeX86_64NgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RelocTypeARMNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RelocTypeARM64NgdotString) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{30}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{31}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1e\n" +
"\x0eNewFatFileArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"!\n" +
"\vOpenFatArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x17\n" +
"\x15FatFileNgdotCloseArgs\"\x16\n" +
"\x14SegmentNgdotDataArgs\"\x16\n" +
"\x14SegmentNgdotOpenArgs\"\x16\n" +
"\x14SectionNgdotDataArgs\"\x16\n" +
"\x14SectionNgdotOpenArgs\"\x1e\n" +
"\bOpenArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x14\n" +
"\x12FileNgdotCloseArgs\"\x1b\n" +
"\vNewFileArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"*\n" +
"\x14FileNgdotSegmentArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"*\n" +
"\x14FileNgdotSectionArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x14\n" +
"\x12FileNgdotDWARFArgs\"\x1e\n" +
"\x1cFileNgdotImportedSymbolsArgs\" \n" +
"\x1eFileNgdotImportedLibrariesArgs\"8\n" +
"\x13TypeNgdotStringArgs\x12!\n" +
"\x01t\x18\x01 \x01(\x0e2\x13.ngolofuzz.TypeEnumR\x01t\":\n" +
"\x15TypeNgdotGoStringArgs\x12!\n" +
"\x01t\x18\x01 \x01(\x0e2\x13.ngolofuzz.TypeEnumR\x01t\"6\n" +
"\x12CpuNgdotStringArgs\x12 \n" +
"\x01i\x18\x01 \x01(\x0e2\x12.ngolofuzz.CpuEnumR\x01i\"8\n" +
"\x14CpuNgdotGoStringArgs\x12 \n" +
"\x01i\x18\x01 \x01(\x0e2\x12.ngolofuzz.CpuEnumR\x01i\">\n" +
"\x16LoadCmdNgdotStringArgs\x12$\n" +
"\x01i\x18\x01 \x01(\x0e2\x16.ngolofuzz.LoadCmdEnumR\x01i\"@\n" +
"\x18LoadCmdNgdotGoStringArgs\x12$\n" +
"\x01i\x18\x01 \x01(\x0e2\x16.ngolofuzz.LoadCmdEnumR\x01i\"R\n" +
"!RelocTypeGenericNgdotGoStringArgs\x12-\n" +
"\x01r\x18\x01 \x01(\x0e2\x1f.ngolofuzz.RelocTypeGenericEnumR\x01r\"P\n" +
" RelocTypeX86_64NgdotGoStringArgs\x12,\n" +
"\x01r\x18\x01 \x01(\x0e2\x1e.ngolofuzz.RelocTypeX86_64EnumR\x01r\"J\n" +
"\x1dRelocTypeARMNgdotGoStringArgs\x12)\n" +
"\x01r\x18\x01 \x01(\x0e2\x1b.ngolofuzz.RelocTypeARMEnumR\x01r\"N\n" +
"\x1fRelocTypeARM64NgdotGoStringArgs\x12+\n" +
"\x01r\x18\x01 \x01(\x0e2\x1d.ngolofuzz.RelocTypeARM64EnumR\x01r\"P\n" +
"\x1fRelocTypeGenericNgdotStringArgs\x12-\n" +
"\x01i\x18\x01 \x01(\x0e2\x1f.ngolofuzz.RelocTypeGenericEnumR\x01i\"N\n" +
"\x1eRelocTypeX86_64NgdotStringArgs\x12,\n" +
"\x01i\x18\x01 \x01(\x0e2\x1e.ngolofuzz.RelocTypeX86_64EnumR\x01i\"H\n" +
"\x1bRelocTypeARMNgdotStringArgs\x12)\n" +
"\x01i\x18\x01 \x01(\x0e2\x1b.ngolofuzz.RelocTypeARMEnumR\x01i\"L\n" +
"\x1dRelocTypeARM64NgdotStringArgs\x12+\n" +
"\x01i\x18\x01 \x01(\x0e2\x1d.ngolofuzz.RelocTypeARM64EnumR\x01i\"\xca\x13\n" +
"\fNgoloFuzzOne\x12;\n" +
"\n" +
"NewFatFile\x18\x01 \x01(\v2\x19.ngolofuzz.NewFatFileArgsH\x00R\n" +
"NewFatFile\x122\n" +
"\aOpenFat\x18\x02 \x01(\v2\x16.ngolofuzz.OpenFatArgsH\x00R\aOpenFat\x12P\n" +
"\x11FatFileNgdotClose\x18\x03 \x01(\v2 .ngolofuzz.FatFileNgdotCloseArgsH\x00R\x11FatFileNgdotClose\x12M\n" +
"\x10SegmentNgdotData\x18\x04 \x01(\v2\x1f.ngolofuzz.SegmentNgdotDataArgsH\x00R\x10SegmentNgdotData\x12M\n" +
"\x10SegmentNgdotOpen\x18\x05 \x01(\v2\x1f.ngolofuzz.SegmentNgdotOpenArgsH\x00R\x10SegmentNgdotOpen\x12M\n" +
"\x10SectionNgdotData\x18\x06 \x01(\v2\x1f.ngolofuzz.SectionNgdotDataArgsH\x00R\x10SectionNgdotData\x12M\n" +
"\x10SectionNgdotOpen\x18\a \x01(\v2\x1f.ngolofuzz.SectionNgdotOpenArgsH\x00R\x10SectionNgdotOpen\x12)\n" +
"\x04Open\x18\b \x01(\v2\x13.ngolofuzz.OpenArgsH\x00R\x04Open\x12G\n" +
"\x0eFileNgdotClose\x18\t \x01(\v2\x1d.ngolofuzz.FileNgdotCloseArgsH\x00R\x0eFileNgdotClose\x122\n" +
"\aNewFile\x18\n" +
" \x01(\v2\x16.ngolofuzz.NewFileArgsH\x00R\aNewFile\x12M\n" +
"\x10FileNgdotSegment\x18\v \x01(\v2\x1f.ngolofuzz.FileNgdotSegmentArgsH\x00R\x10FileNgdotSegment\x12M\n" +
"\x10FileNgdotSection\x18\f \x01(\v2\x1f.ngolofuzz.FileNgdotSectionArgsH\x00R\x10FileNgdotSection\x12G\n" +
"\x0eFileNgdotDWARF\x18\r \x01(\v2\x1d.ngolofuzz.FileNgdotDWARFArgsH\x00R\x0eFileNgdotDWARF\x12e\n" +
"\x18FileNgdotImportedSymbols\x18\x0e \x01(\v2'.ngolofuzz.FileNgdotImportedSymbolsArgsH\x00R\x18FileNgdotImportedSymbols\x12k\n" +
"\x1aFileNgdotImportedLibraries\x18\x0f \x01(\v2).ngolofuzz.FileNgdotImportedLibrariesArgsH\x00R\x1aFileNgdotImportedLibraries\x12J\n" +
"\x0fTypeNgdotString\x18\x10 \x01(\v2\x1e.ngolofuzz.TypeNgdotStringArgsH\x00R\x0fTypeNgdotString\x12P\n" +
"\x11TypeNgdotGoString\x18\x11 \x01(\v2 .ngolofuzz.TypeNgdotGoStringArgsH\x00R\x11TypeNgdotGoString\x12G\n" +
"\x0eCpuNgdotString\x18\x12 \x01(\v2\x1d.ngolofuzz.CpuNgdotStringArgsH\x00R\x0eCpuNgdotString\x12M\n" +
"\x10CpuNgdotGoString\x18\x13 \x01(\v2\x1f.ngolofuzz.CpuNgdotGoStringArgsH\x00R\x10CpuNgdotGoString\x12S\n" +
"\x12LoadCmdNgdotString\x18\x14 \x01(\v2!.ngolofuzz.LoadCmdNgdotStringArgsH\x00R\x12LoadCmdNgdotString\x12Y\n" +
"\x14LoadCmdNgdotGoString\x18\x15 \x01(\v2#.ngolofuzz.LoadCmdNgdotGoStringArgsH\x00R\x14LoadCmdNgdotGoString\x12t\n" +
"\x1dRelocTypeGenericNgdotGoString\x18\x16 \x01(\v2,.ngolofuzz.RelocTypeGenericNgdotGoStringArgsH\x00R\x1dRelocTypeGenericNgdotGoString\x12p\n" +
"\x1cRelocTypeX86_64NgdotGoString\x18\x17 \x01(\v2+.ngolofuzz.RelocTypeX86_64NgdotGoStringArgsH\x00R\x1bRelocTypeX8664NgdotGoString\x12h\n" +
"\x19RelocTypeARMNgdotGoString\x18\x18 \x01(\v2(.ngolofuzz.RelocTypeARMNgdotGoStringArgsH\x00R\x19RelocTypeARMNgdotGoString\x12n\n" +
"\x1bRelocTypeARM64NgdotGoString\x18\x19 \x01(\v2*.ngolofuzz.RelocTypeARM64NgdotGoStringArgsH\x00R\x1bRelocTypeARM64NgdotGoString\x12n\n" +
"\x1bRelocTypeGenericNgdotString\x18\x1a \x01(\v2*.ngolofuzz.RelocTypeGenericNgdotStringArgsH\x00R\x1bRelocTypeGenericNgdotString\x12j\n" +
"\x1aRelocTypeX86_64NgdotString\x18\x1b \x01(\v2).ngolofuzz.RelocTypeX86_64NgdotStringArgsH\x00R\x19RelocTypeX8664NgdotString\x12b\n" +
"\x17RelocTypeARMNgdotString\x18\x1c \x01(\v2&.ngolofuzz.RelocTypeARMNgdotStringArgsH\x00R\x17RelocTypeARMNgdotString\x12h\n" +
"\x19RelocTypeARM64NgdotString\x18\x1d \x01(\v2(.ngolofuzz.RelocTypeARM64NgdotStringArgsH\x00R\x19RelocTypeARM64NgdotStringB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04list*\xbb\x01\n" +
"\x14RelocTypeGenericEnum\x12\x19\n" +
"\x15GENERIC_RELOC_VANILLA\x10\x00\x12\x16\n" +
"\x12GENERIC_RELOC_PAIR\x10\x01\x12\x1a\n" +
"\x16GENERIC_RELOC_SECTDIFF\x10\x02\x12\x1b\n" +
"\x17GENERIC_RELOC_PB_LA_PTR\x10\x03\x12 \n" +
"\x1cGENERIC_RELOC_LOCAL_SECTDIFF\x10\x04\x12\x15\n" +
"\x11GENERIC_RELOC_TLV\x10\x05*\x97\x02\n" +
"\x13RelocTypeX86_64Enum\x12\x19\n" +
"\x15X86_64_RELOC_UNSIGNED\x10\x00\x12\x17\n" +
"\x13X86_64_RELOC_SIGNED\x10\x01\x12\x17\n" +
"\x13X86_64_RELOC_BRANCH\x10\x02\x12\x19\n" +
"\x15X86_64_RELOC_GOT_LOAD\x10\x03\x12\x14\n" +
"\x10X86_64_RELOC_GOT\x10\x04\x12\x1b\n" +
"\x17X86_64_RELOC_SUBTRACTOR\x10\x05\x12\x19\n" +
"\x15X86_64_RELOC_SIGNED_1\x10\x06\x12\x19\n" +
"\x15X86_64_RELOC_SIGNED_2\x10\a\x12\x19\n" +
"\x15X86_64_RELOC_SIGNED_4\x10\b\x12\x14\n" +
"\x10X86_64_RELOC_TLV\x10\t*\x87\x02\n" +
"\x10RelocTypeARMEnum\x12\x15\n" +
"\x11ARM_RELOC_VANILLA\x10\x00\x12\x12\n" +
"\x0eARM_RELOC_PAIR\x10\x01\x12\x16\n" +
"\x12ARM_RELOC_SECTDIFF\x10\x02\x12\x1c\n" +
"\x18ARM_RELOC_LOCAL_SECTDIFF\x10\x03\x12\x17\n" +
"\x13ARM_RELOC_PB_LA_PTR\x10\x04\x12\x12\n" +
"\x0eARM_RELOC_BR24\x10\x05\x12\x18\n" +
"\x14ARM_THUMB_RELOC_BR22\x10\x06\x12\x1a\n" +
"\x16ARM_THUMB_32BIT_BRANCH\x10\a\x12\x12\n" +
"\x0eARM_RELOC_HALF\x10\b\x12\x1b\n" +
"\x17ARM_RELOC_HALF_SECTDIFF\x10\t*\xc2\x01\n" +
"\vLoadCmdEnum\x12\x12\n" +
"\x0eLoadCmdSegment\x10\x00\x12\x11\n" +
"\rLoadCmdSymtab\x10\x01\x12\x11\n" +
"\rLoadCmdThread\x10\x02\x12\x15\n" +
"\x11LoadCmdUnixThread\x10\x03\x12\x13\n" +
"\x0fLoadCmdDysymtab\x10\x04\x12\x10\n" +
"\fLoadCmdDylib\x10\x05\x12\x13\n" +
"\x0fLoadCmdDylinker\x10\x06\x12\x14\n" +
"\x10LoadCmdSegment64\x10\a\x12\x10\n" +
"\fLoadCmdRpath\x10\b*W\n" +
"\aCpuEnum\x12\n" +
"\n" +
"\x06Cpu386\x10\x00\x12\f\n" +
"\bCpuAmd64\x10\x01\x12\n" +
"\n" +
"\x06CpuArm\x10\x02\x12\f\n" +
"\bCpuArm64\x10\x03\x12\n" +
"\n" +
"\x06CpuPpc\x10\x04\x12\f\n" +
"\bCpuPpc64\x10\x05*\xdb\x02\n" +
"\x12RelocTypeARM64Enum\x12\x18\n" +
"\x14ARM64_RELOC_UNSIGNED\x10\x00\x12\x1a\n" +
"\x16ARM64_RELOC_SUBTRACTOR\x10\x01\x12\x18\n" +
"\x14ARM64_RELOC_BRANCH26\x10\x02\x12\x16\n" +
"\x12ARM64_RELOC_PAGE21\x10\x03\x12\x19\n" +
"\x15ARM64_RELOC_PAGEOFF12\x10\x04\x12\x1f\n" +
"\x1bARM64_RELOC_GOT_LOAD_PAGE21\x10\x05\x12\"\n" +
"\x1eARM64_RELOC_GOT_LOAD_PAGEOFF12\x10\x06\x12\x1e\n" +
"\x1aARM64_RELOC_POINTER_TO_GOT\x10\a\x12 \n" +
"\x1cARM64_RELOC_TLVP_LOAD_PAGE21\x10\b\x12#\n" +
"\x1fARM64_RELOC_TLVP_LOAD_PAGEOFF12\x10\t\x12\x16\n" +
"\x12ARM64_RELOC_ADDEND\x10\n" +
"*D\n" +
"\bTypeEnum\x12\v\n" +
"\aTypeObj\x10\x00\x12\f\n" +
"\bTypeExec\x10\x01\x12\r\n" +
"\tTypeDylib\x10\x02\x12\x0e\n" +
"\n" +
"TypeBundle\x10\x03B\x18Z\x16./;fuzz_ng_debug_machob\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_enumTypes = make([]protoimpl.EnumInfo, 7)
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 32)
var file_ngolofuzz_proto_goTypes = []any{
(RelocTypeGenericEnum)(0), // 0: ngolofuzz.RelocTypeGenericEnum
(RelocTypeX86_64Enum)(0), // 1: ngolofuzz.RelocTypeX86_64Enum
(RelocTypeARMEnum)(0), // 2: ngolofuzz.RelocTypeARMEnum
(LoadCmdEnum)(0), // 3: ngolofuzz.LoadCmdEnum
(CpuEnum)(0), // 4: ngolofuzz.CpuEnum
(RelocTypeARM64Enum)(0), // 5: ngolofuzz.RelocTypeARM64Enum
(TypeEnum)(0), // 6: ngolofuzz.TypeEnum
(*NewFatFileArgs)(nil), // 7: ngolofuzz.NewFatFileArgs
(*OpenFatArgs)(nil), // 8: ngolofuzz.OpenFatArgs
(*FatFileNgdotCloseArgs)(nil), // 9: ngolofuzz.FatFileNgdotCloseArgs
(*SegmentNgdotDataArgs)(nil), // 10: ngolofuzz.SegmentNgdotDataArgs
(*SegmentNgdotOpenArgs)(nil), // 11: ngolofuzz.SegmentNgdotOpenArgs
(*SectionNgdotDataArgs)(nil), // 12: ngolofuzz.SectionNgdotDataArgs
(*SectionNgdotOpenArgs)(nil), // 13: ngolofuzz.SectionNgdotOpenArgs
(*OpenArgs)(nil), // 14: ngolofuzz.OpenArgs
(*FileNgdotCloseArgs)(nil), // 15: ngolofuzz.FileNgdotCloseArgs
(*NewFileArgs)(nil), // 16: ngolofuzz.NewFileArgs
(*FileNgdotSegmentArgs)(nil), // 17: ngolofuzz.FileNgdotSegmentArgs
(*FileNgdotSectionArgs)(nil), // 18: ngolofuzz.FileNgdotSectionArgs
(*FileNgdotDWARFArgs)(nil), // 19: ngolofuzz.FileNgdotDWARFArgs
(*FileNgdotImportedSymbolsArgs)(nil), // 20: ngolofuzz.FileNgdotImportedSymbolsArgs
(*FileNgdotImportedLibrariesArgs)(nil), // 21: ngolofuzz.FileNgdotImportedLibrariesArgs
(*TypeNgdotStringArgs)(nil), // 22: ngolofuzz.TypeNgdotStringArgs
(*TypeNgdotGoStringArgs)(nil), // 23: ngolofuzz.TypeNgdotGoStringArgs
(*CpuNgdotStringArgs)(nil), // 24: ngolofuzz.CpuNgdotStringArgs
(*CpuNgdotGoStringArgs)(nil), // 25: ngolofuzz.CpuNgdotGoStringArgs
(*LoadCmdNgdotStringArgs)(nil), // 26: ngolofuzz.LoadCmdNgdotStringArgs
(*LoadCmdNgdotGoStringArgs)(nil), // 27: ngolofuzz.LoadCmdNgdotGoStringArgs
(*RelocTypeGenericNgdotGoStringArgs)(nil), // 28: ngolofuzz.RelocTypeGenericNgdotGoStringArgs
(*RelocTypeX86_64NgdotGoStringArgs)(nil), // 29: ngolofuzz.RelocTypeX86_64NgdotGoStringArgs
(*RelocTypeARMNgdotGoStringArgs)(nil), // 30: ngolofuzz.RelocTypeARMNgdotGoStringArgs
(*RelocTypeARM64NgdotGoStringArgs)(nil), // 31: ngolofuzz.RelocTypeARM64NgdotGoStringArgs
(*RelocTypeGenericNgdotStringArgs)(nil), // 32: ngolofuzz.RelocTypeGenericNgdotStringArgs
(*RelocTypeX86_64NgdotStringArgs)(nil), // 33: ngolofuzz.RelocTypeX86_64NgdotStringArgs
(*RelocTypeARMNgdotStringArgs)(nil), // 34: ngolofuzz.RelocTypeARMNgdotStringArgs
(*RelocTypeARM64NgdotStringArgs)(nil), // 35: ngolofuzz.RelocTypeARM64NgdotStringArgs
(*NgoloFuzzOne)(nil), // 36: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 37: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 38: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
6, // 0: ngolofuzz.TypeNgdotStringArgs.t:type_name -> ngolofuzz.TypeEnum
6, // 1: ngolofuzz.TypeNgdotGoStringArgs.t:type_name -> ngolofuzz.TypeEnum
4, // 2: ngolofuzz.CpuNgdotStringArgs.i:type_name -> ngolofuzz.CpuEnum
4, // 3: ngolofuzz.CpuNgdotGoStringArgs.i:type_name -> ngolofuzz.CpuEnum
3, // 4: ngolofuzz.LoadCmdNgdotStringArgs.i:type_name -> ngolofuzz.LoadCmdEnum
3, // 5: ngolofuzz.LoadCmdNgdotGoStringArgs.i:type_name -> ngolofuzz.LoadCmdEnum
0, // 6: ngolofuzz.RelocTypeGenericNgdotGoStringArgs.r:type_name -> ngolofuzz.RelocTypeGenericEnum
1, // 7: ngolofuzz.RelocTypeX86_64NgdotGoStringArgs.r:type_name -> ngolofuzz.RelocTypeX86_64Enum
2, // 8: ngolofuzz.RelocTypeARMNgdotGoStringArgs.r:type_name -> ngolofuzz.RelocTypeARMEnum
5, // 9: ngolofuzz.RelocTypeARM64NgdotGoStringArgs.r:type_name -> ngolofuzz.RelocTypeARM64Enum
0, // 10: ngolofuzz.RelocTypeGenericNgdotStringArgs.i:type_name -> ngolofuzz.RelocTypeGenericEnum
1, // 11: ngolofuzz.RelocTypeX86_64NgdotStringArgs.i:type_name -> ngolofuzz.RelocTypeX86_64Enum
2, // 12: ngolofuzz.RelocTypeARMNgdotStringArgs.i:type_name -> ngolofuzz.RelocTypeARMEnum
5, // 13: ngolofuzz.RelocTypeARM64NgdotStringArgs.i:type_name -> ngolofuzz.RelocTypeARM64Enum
7, // 14: ngolofuzz.NgoloFuzzOne.NewFatFile:type_name -> ngolofuzz.NewFatFileArgs
8, // 15: ngolofuzz.NgoloFuzzOne.OpenFat:type_name -> ngolofuzz.OpenFatArgs
9, // 16: ngolofuzz.NgoloFuzzOne.FatFileNgdotClose:type_name -> ngolofuzz.FatFileNgdotCloseArgs
10, // 17: ngolofuzz.NgoloFuzzOne.SegmentNgdotData:type_name -> ngolofuzz.SegmentNgdotDataArgs
11, // 18: ngolofuzz.NgoloFuzzOne.SegmentNgdotOpen:type_name -> ngolofuzz.SegmentNgdotOpenArgs
12, // 19: ngolofuzz.NgoloFuzzOne.SectionNgdotData:type_name -> ngolofuzz.SectionNgdotDataArgs
13, // 20: ngolofuzz.NgoloFuzzOne.SectionNgdotOpen:type_name -> ngolofuzz.SectionNgdotOpenArgs
14, // 21: ngolofuzz.NgoloFuzzOne.Open:type_name -> ngolofuzz.OpenArgs
15, // 22: ngolofuzz.NgoloFuzzOne.FileNgdotClose:type_name -> ngolofuzz.FileNgdotCloseArgs
16, // 23: ngolofuzz.NgoloFuzzOne.NewFile:type_name -> ngolofuzz.NewFileArgs
17, // 24: ngolofuzz.NgoloFuzzOne.FileNgdotSegment:type_name -> ngolofuzz.FileNgdotSegmentArgs
18, // 25: ngolofuzz.NgoloFuzzOne.FileNgdotSection:type_name -> ngolofuzz.FileNgdotSectionArgs
19, // 26: ngolofuzz.NgoloFuzzOne.FileNgdotDWARF:type_name -> ngolofuzz.FileNgdotDWARFArgs
20, // 27: ngolofuzz.NgoloFuzzOne.FileNgdotImportedSymbols:type_name -> ngolofuzz.FileNgdotImportedSymbolsArgs
21, // 28: ngolofuzz.NgoloFuzzOne.FileNgdotImportedLibraries:type_name -> ngolofuzz.FileNgdotImportedLibrariesArgs
22, // 29: ngolofuzz.NgoloFuzzOne.TypeNgdotString:type_name -> ngolofuzz.TypeNgdotStringArgs
23, // 30: ngolofuzz.NgoloFuzzOne.TypeNgdotGoString:type_name -> ngolofuzz.TypeNgdotGoStringArgs
24, // 31: ngolofuzz.NgoloFuzzOne.CpuNgdotString:type_name -> ngolofuzz.CpuNgdotStringArgs
25, // 32: ngolofuzz.NgoloFuzzOne.CpuNgdotGoString:type_name -> ngolofuzz.CpuNgdotGoStringArgs
26, // 33: ngolofuzz.NgoloFuzzOne.LoadCmdNgdotString:type_name -> ngolofuzz.LoadCmdNgdotStringArgs
27, // 34: ngolofuzz.NgoloFuzzOne.LoadCmdNgdotGoString:type_name -> ngolofuzz.LoadCmdNgdotGoStringArgs
28, // 35: ngolofuzz.NgoloFuzzOne.RelocTypeGenericNgdotGoString:type_name -> ngolofuzz.RelocTypeGenericNgdotGoStringArgs
29, // 36: ngolofuzz.NgoloFuzzOne.RelocTypeX86_64NgdotGoString:type_name -> ngolofuzz.RelocTypeX86_64NgdotGoStringArgs
30, // 37: ngolofuzz.NgoloFuzzOne.RelocTypeARMNgdotGoString:type_name -> ngolofuzz.RelocTypeARMNgdotGoStringArgs
31, // 38: ngolofuzz.NgoloFuzzOne.RelocTypeARM64NgdotGoString:type_name -> ngolofuzz.RelocTypeARM64NgdotGoStringArgs
32, // 39: ngolofuzz.NgoloFuzzOne.RelocTypeGenericNgdotString:type_name -> ngolofuzz.RelocTypeGenericNgdotStringArgs
33, // 40: ngolofuzz.NgoloFuzzOne.RelocTypeX86_64NgdotString:type_name -> ngolofuzz.RelocTypeX86_64NgdotStringArgs
34, // 41: ngolofuzz.NgoloFuzzOne.RelocTypeARMNgdotString:type_name -> ngolofuzz.RelocTypeARMNgdotStringArgs
35, // 42: ngolofuzz.NgoloFuzzOne.RelocTypeARM64NgdotString:type_name -> ngolofuzz.RelocTypeARM64NgdotStringArgs
36, // 43: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
44, // [44:44] is the sub-list for method output_type
44, // [44:44] is the sub-list for method input_type
44, // [44:44] is the sub-list for extension type_name
44, // [44:44] is the sub-list for extension extendee
0, // [0:44] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[29].OneofWrappers = []any{
(*NgoloFuzzOne_NewFatFile)(nil),
(*NgoloFuzzOne_OpenFat)(nil),
(*NgoloFuzzOne_FatFileNgdotClose)(nil),
(*NgoloFuzzOne_SegmentNgdotData)(nil),
(*NgoloFuzzOne_SegmentNgdotOpen)(nil),
(*NgoloFuzzOne_SectionNgdotData)(nil),
(*NgoloFuzzOne_SectionNgdotOpen)(nil),
(*NgoloFuzzOne_Open)(nil),
(*NgoloFuzzOne_FileNgdotClose)(nil),
(*NgoloFuzzOne_NewFile)(nil),
(*NgoloFuzzOne_FileNgdotSegment)(nil),
(*NgoloFuzzOne_FileNgdotSection)(nil),
(*NgoloFuzzOne_FileNgdotDWARF)(nil),
(*NgoloFuzzOne_FileNgdotImportedSymbols)(nil),
(*NgoloFuzzOne_FileNgdotImportedLibraries)(nil),
(*NgoloFuzzOne_TypeNgdotString)(nil),
(*NgoloFuzzOne_TypeNgdotGoString)(nil),
(*NgoloFuzzOne_CpuNgdotString)(nil),
(*NgoloFuzzOne_CpuNgdotGoString)(nil),
(*NgoloFuzzOne_LoadCmdNgdotString)(nil),
(*NgoloFuzzOne_LoadCmdNgdotGoString)(nil),
(*NgoloFuzzOne_RelocTypeGenericNgdotGoString)(nil),
(*NgoloFuzzOne_RelocTypeX86_64NgdotGoString)(nil),
(*NgoloFuzzOne_RelocTypeARMNgdotGoString)(nil),
(*NgoloFuzzOne_RelocTypeARM64NgdotGoString)(nil),
(*NgoloFuzzOne_RelocTypeGenericNgdotString)(nil),
(*NgoloFuzzOne_RelocTypeX86_64NgdotString)(nil),
(*NgoloFuzzOne_RelocTypeARMNgdotString)(nil),
(*NgoloFuzzOne_RelocTypeARM64NgdotString)(nil),
}
file_ngolofuzz_proto_msgTypes[30].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 7,
NumMessages: 32,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
EnumInfos: file_ngolofuzz_proto_enumTypes,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_debug_pe
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"debug/pe"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var SectionResults []*pe.Section
SectionResultsIndex := 0
var FileResults []*pe.File
FileResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Open:
_, r1 := pe.Open(a.Open.Name)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FileNgdotClose:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_NewFile:
arg0 := bytes.NewReader(a.NewFile.R)
_, r1 := pe.NewFile(arg0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FileNgdotSection:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
r0 := arg0.Section(a.FileNgdotSection.Name)
if r0 != nil{
SectionResults = append(SectionResults, r0)
}
case *NgoloFuzzOne_FileNgdotDWARF:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
_, r1 := arg0.DWARF()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FileNgdotImportedSymbols:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
_, r1 := arg0.ImportedSymbols()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FileNgdotImportedLibraries:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
_, r1 := arg0.ImportedLibraries()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SectionNgdotData:
if len(SectionResults) == 0 {
continue
}
arg0 := SectionResults[SectionResultsIndex]
SectionResultsIndex = (SectionResultsIndex + 1) % len(SectionResults)
_, r1 := arg0.Data()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SectionNgdotOpen:
if len(SectionResults) == 0 {
continue
}
arg0 := SectionResults[SectionResultsIndex]
SectionResultsIndex = (SectionResultsIndex + 1) % len(SectionResults)
arg0.Open()
case *NgoloFuzzOne_FileNgdotCOFFSymbolReadSectionDefAux:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
arg1 := int(a.FileNgdotCOFFSymbolReadSectionDefAux.Idx)
_, r1 := arg0.COFFSymbolReadSectionDefAux(arg1)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
SectionNb := 0
SectionResultsIndex := 0
FileNb := 0
FileResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Open:
w.WriteString(fmt.Sprintf("pe.Open(%#+v)\n", a.Open.Name))
case *NgoloFuzzOne_FileNgdotClose:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("File%d.Close()\n", FileResultsIndex))
FileResultsIndex = (FileResultsIndex + 1) % FileNb
case *NgoloFuzzOne_NewFile:
w.WriteString(fmt.Sprintf("pe.NewFile(bytes.NewReader(%#+v))\n", a.NewFile.R))
case *NgoloFuzzOne_FileNgdotSection:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Section%d := File%d.Section(%#+v)\n", SectionNb, FileResultsIndex, a.FileNgdotSection.Name))
SectionNb = SectionNb + 1
FileResultsIndex = (FileResultsIndex + 1) % FileNb
case *NgoloFuzzOne_FileNgdotDWARF:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("File%d.DWARF()\n", FileResultsIndex))
FileResultsIndex = (FileResultsIndex + 1) % FileNb
case *NgoloFuzzOne_FileNgdotImportedSymbols:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("File%d.ImportedSymbols()\n", FileResultsIndex))
FileResultsIndex = (FileResultsIndex + 1) % FileNb
case *NgoloFuzzOne_FileNgdotImportedLibraries:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("File%d.ImportedLibraries()\n", FileResultsIndex))
FileResultsIndex = (FileResultsIndex + 1) % FileNb
case *NgoloFuzzOne_SectionNgdotData:
if SectionNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Section%d.Data()\n", SectionResultsIndex))
SectionResultsIndex = (SectionResultsIndex + 1) % SectionNb
case *NgoloFuzzOne_SectionNgdotOpen:
if SectionNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Section%d.Open()\n", SectionResultsIndex))
SectionResultsIndex = (SectionResultsIndex + 1) % SectionNb
case *NgoloFuzzOne_FileNgdotCOFFSymbolReadSectionDefAux:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("File%d.COFFSymbolReadSectionDefAux(int(%#+v))\n", FileResultsIndex, a.FileNgdotCOFFSymbolReadSectionDefAux.Idx))
FileResultsIndex = (FileResultsIndex + 1) % FileNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_debug_pe
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type OpenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OpenArgs) Reset() {
*x = OpenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OpenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OpenArgs) ProtoMessage() {}
func (x *OpenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OpenArgs.ProtoReflect.Descriptor instead.
func (*OpenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *OpenArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type FileNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotCloseArgs) Reset() {
*x = FileNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotCloseArgs) ProtoMessage() {}
func (x *FileNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type NewFileArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewFileArgs) Reset() {
*x = NewFileArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewFileArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewFileArgs) ProtoMessage() {}
func (x *NewFileArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewFileArgs.ProtoReflect.Descriptor instead.
func (*NewFileArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NewFileArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type FileNgdotSectionArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotSectionArgs) Reset() {
*x = FileNgdotSectionArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotSectionArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotSectionArgs) ProtoMessage() {}
func (x *FileNgdotSectionArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotSectionArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotSectionArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *FileNgdotSectionArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type FileNgdotDWARFArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotDWARFArgs) Reset() {
*x = FileNgdotDWARFArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotDWARFArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotDWARFArgs) ProtoMessage() {}
func (x *FileNgdotDWARFArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotDWARFArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotDWARFArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type FileNgdotImportedSymbolsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotImportedSymbolsArgs) Reset() {
*x = FileNgdotImportedSymbolsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotImportedSymbolsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotImportedSymbolsArgs) ProtoMessage() {}
func (x *FileNgdotImportedSymbolsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotImportedSymbolsArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotImportedSymbolsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type FileNgdotImportedLibrariesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotImportedLibrariesArgs) Reset() {
*x = FileNgdotImportedLibrariesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotImportedLibrariesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotImportedLibrariesArgs) ProtoMessage() {}
func (x *FileNgdotImportedLibrariesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotImportedLibrariesArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotImportedLibrariesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type SectionNgdotDataArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SectionNgdotDataArgs) Reset() {
*x = SectionNgdotDataArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SectionNgdotDataArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SectionNgdotDataArgs) ProtoMessage() {}
func (x *SectionNgdotDataArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SectionNgdotDataArgs.ProtoReflect.Descriptor instead.
func (*SectionNgdotDataArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type SectionNgdotOpenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SectionNgdotOpenArgs) Reset() {
*x = SectionNgdotOpenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SectionNgdotOpenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SectionNgdotOpenArgs) ProtoMessage() {}
func (x *SectionNgdotOpenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SectionNgdotOpenArgs.ProtoReflect.Descriptor instead.
func (*SectionNgdotOpenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type FileNgdotCOFFSymbolReadSectionDefAuxArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Idx int64 `protobuf:"varint,1,opt,name=idx,proto3" json:"idx,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotCOFFSymbolReadSectionDefAuxArgs) Reset() {
*x = FileNgdotCOFFSymbolReadSectionDefAuxArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotCOFFSymbolReadSectionDefAuxArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotCOFFSymbolReadSectionDefAuxArgs) ProtoMessage() {}
func (x *FileNgdotCOFFSymbolReadSectionDefAuxArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotCOFFSymbolReadSectionDefAuxArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotCOFFSymbolReadSectionDefAuxArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *FileNgdotCOFFSymbolReadSectionDefAuxArgs) GetIdx() int64 {
if x != nil {
return x.Idx
}
return 0
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Open
// *NgoloFuzzOne_FileNgdotClose
// *NgoloFuzzOne_NewFile
// *NgoloFuzzOne_FileNgdotSection
// *NgoloFuzzOne_FileNgdotDWARF
// *NgoloFuzzOne_FileNgdotImportedSymbols
// *NgoloFuzzOne_FileNgdotImportedLibraries
// *NgoloFuzzOne_SectionNgdotData
// *NgoloFuzzOne_SectionNgdotOpen
// *NgoloFuzzOne_FileNgdotCOFFSymbolReadSectionDefAux
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetOpen() *OpenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Open); ok {
return x.Open
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotClose() *FileNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotClose); ok {
return x.FileNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewFile() *NewFileArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewFile); ok {
return x.NewFile
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotSection() *FileNgdotSectionArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotSection); ok {
return x.FileNgdotSection
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotDWARF() *FileNgdotDWARFArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotDWARF); ok {
return x.FileNgdotDWARF
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotImportedSymbols() *FileNgdotImportedSymbolsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotImportedSymbols); ok {
return x.FileNgdotImportedSymbols
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotImportedLibraries() *FileNgdotImportedLibrariesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotImportedLibraries); ok {
return x.FileNgdotImportedLibraries
}
}
return nil
}
func (x *NgoloFuzzOne) GetSectionNgdotData() *SectionNgdotDataArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SectionNgdotData); ok {
return x.SectionNgdotData
}
}
return nil
}
func (x *NgoloFuzzOne) GetSectionNgdotOpen() *SectionNgdotOpenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SectionNgdotOpen); ok {
return x.SectionNgdotOpen
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotCOFFSymbolReadSectionDefAux() *FileNgdotCOFFSymbolReadSectionDefAuxArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotCOFFSymbolReadSectionDefAux); ok {
return x.FileNgdotCOFFSymbolReadSectionDefAux
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Open struct {
Open *OpenArgs `protobuf:"bytes,1,opt,name=Open,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotClose struct {
FileNgdotClose *FileNgdotCloseArgs `protobuf:"bytes,2,opt,name=FileNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_NewFile struct {
NewFile *NewFileArgs `protobuf:"bytes,3,opt,name=NewFile,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotSection struct {
FileNgdotSection *FileNgdotSectionArgs `protobuf:"bytes,4,opt,name=FileNgdotSection,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotDWARF struct {
FileNgdotDWARF *FileNgdotDWARFArgs `protobuf:"bytes,5,opt,name=FileNgdotDWARF,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotImportedSymbols struct {
FileNgdotImportedSymbols *FileNgdotImportedSymbolsArgs `protobuf:"bytes,6,opt,name=FileNgdotImportedSymbols,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotImportedLibraries struct {
FileNgdotImportedLibraries *FileNgdotImportedLibrariesArgs `protobuf:"bytes,7,opt,name=FileNgdotImportedLibraries,proto3,oneof"`
}
type NgoloFuzzOne_SectionNgdotData struct {
SectionNgdotData *SectionNgdotDataArgs `protobuf:"bytes,8,opt,name=SectionNgdotData,proto3,oneof"`
}
type NgoloFuzzOne_SectionNgdotOpen struct {
SectionNgdotOpen *SectionNgdotOpenArgs `protobuf:"bytes,9,opt,name=SectionNgdotOpen,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotCOFFSymbolReadSectionDefAux struct {
FileNgdotCOFFSymbolReadSectionDefAux *FileNgdotCOFFSymbolReadSectionDefAuxArgs `protobuf:"bytes,10,opt,name=FileNgdotCOFFSymbolReadSectionDefAux,proto3,oneof"`
}
func (*NgoloFuzzOne_Open) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewFile) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotSection) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotDWARF) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotImportedSymbols) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotImportedLibraries) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SectionNgdotData) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SectionNgdotOpen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotCOFFSymbolReadSectionDefAux) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1e\n" +
"\bOpenArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x14\n" +
"\x12FileNgdotCloseArgs\"\x1b\n" +
"\vNewFileArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"*\n" +
"\x14FileNgdotSectionArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x14\n" +
"\x12FileNgdotDWARFArgs\"\x1e\n" +
"\x1cFileNgdotImportedSymbolsArgs\" \n" +
"\x1eFileNgdotImportedLibrariesArgs\"\x16\n" +
"\x14SectionNgdotDataArgs\"\x16\n" +
"\x14SectionNgdotOpenArgs\"<\n" +
"(FileNgdotCOFFSymbolReadSectionDefAuxArgs\x12\x10\n" +
"\x03idx\x18\x01 \x01(\x03R\x03idx\"\xd4\x06\n" +
"\fNgoloFuzzOne\x12)\n" +
"\x04Open\x18\x01 \x01(\v2\x13.ngolofuzz.OpenArgsH\x00R\x04Open\x12G\n" +
"\x0eFileNgdotClose\x18\x02 \x01(\v2\x1d.ngolofuzz.FileNgdotCloseArgsH\x00R\x0eFileNgdotClose\x122\n" +
"\aNewFile\x18\x03 \x01(\v2\x16.ngolofuzz.NewFileArgsH\x00R\aNewFile\x12M\n" +
"\x10FileNgdotSection\x18\x04 \x01(\v2\x1f.ngolofuzz.FileNgdotSectionArgsH\x00R\x10FileNgdotSection\x12G\n" +
"\x0eFileNgdotDWARF\x18\x05 \x01(\v2\x1d.ngolofuzz.FileNgdotDWARFArgsH\x00R\x0eFileNgdotDWARF\x12e\n" +
"\x18FileNgdotImportedSymbols\x18\x06 \x01(\v2'.ngolofuzz.FileNgdotImportedSymbolsArgsH\x00R\x18FileNgdotImportedSymbols\x12k\n" +
"\x1aFileNgdotImportedLibraries\x18\a \x01(\v2).ngolofuzz.FileNgdotImportedLibrariesArgsH\x00R\x1aFileNgdotImportedLibraries\x12M\n" +
"\x10SectionNgdotData\x18\b \x01(\v2\x1f.ngolofuzz.SectionNgdotDataArgsH\x00R\x10SectionNgdotData\x12M\n" +
"\x10SectionNgdotOpen\x18\t \x01(\v2\x1f.ngolofuzz.SectionNgdotOpenArgsH\x00R\x10SectionNgdotOpen\x12\x89\x01\n" +
"$FileNgdotCOFFSymbolReadSectionDefAux\x18\n" +
" \x01(\v23.ngolofuzz.FileNgdotCOFFSymbolReadSectionDefAuxArgsH\x00R$FileNgdotCOFFSymbolReadSectionDefAuxB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x15Z\x13./;fuzz_ng_debug_peb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
var file_ngolofuzz_proto_goTypes = []any{
(*OpenArgs)(nil), // 0: ngolofuzz.OpenArgs
(*FileNgdotCloseArgs)(nil), // 1: ngolofuzz.FileNgdotCloseArgs
(*NewFileArgs)(nil), // 2: ngolofuzz.NewFileArgs
(*FileNgdotSectionArgs)(nil), // 3: ngolofuzz.FileNgdotSectionArgs
(*FileNgdotDWARFArgs)(nil), // 4: ngolofuzz.FileNgdotDWARFArgs
(*FileNgdotImportedSymbolsArgs)(nil), // 5: ngolofuzz.FileNgdotImportedSymbolsArgs
(*FileNgdotImportedLibrariesArgs)(nil), // 6: ngolofuzz.FileNgdotImportedLibrariesArgs
(*SectionNgdotDataArgs)(nil), // 7: ngolofuzz.SectionNgdotDataArgs
(*SectionNgdotOpenArgs)(nil), // 8: ngolofuzz.SectionNgdotOpenArgs
(*FileNgdotCOFFSymbolReadSectionDefAuxArgs)(nil), // 9: ngolofuzz.FileNgdotCOFFSymbolReadSectionDefAuxArgs
(*NgoloFuzzOne)(nil), // 10: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 11: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 12: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Open:type_name -> ngolofuzz.OpenArgs
1, // 1: ngolofuzz.NgoloFuzzOne.FileNgdotClose:type_name -> ngolofuzz.FileNgdotCloseArgs
2, // 2: ngolofuzz.NgoloFuzzOne.NewFile:type_name -> ngolofuzz.NewFileArgs
3, // 3: ngolofuzz.NgoloFuzzOne.FileNgdotSection:type_name -> ngolofuzz.FileNgdotSectionArgs
4, // 4: ngolofuzz.NgoloFuzzOne.FileNgdotDWARF:type_name -> ngolofuzz.FileNgdotDWARFArgs
5, // 5: ngolofuzz.NgoloFuzzOne.FileNgdotImportedSymbols:type_name -> ngolofuzz.FileNgdotImportedSymbolsArgs
6, // 6: ngolofuzz.NgoloFuzzOne.FileNgdotImportedLibraries:type_name -> ngolofuzz.FileNgdotImportedLibrariesArgs
7, // 7: ngolofuzz.NgoloFuzzOne.SectionNgdotData:type_name -> ngolofuzz.SectionNgdotDataArgs
8, // 8: ngolofuzz.NgoloFuzzOne.SectionNgdotOpen:type_name -> ngolofuzz.SectionNgdotOpenArgs
9, // 9: ngolofuzz.NgoloFuzzOne.FileNgdotCOFFSymbolReadSectionDefAux:type_name -> ngolofuzz.FileNgdotCOFFSymbolReadSectionDefAuxArgs
10, // 10: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
11, // [11:11] is the sub-list for method output_type
11, // [11:11] is the sub-list for method input_type
11, // [11:11] is the sub-list for extension type_name
11, // [11:11] is the sub-list for extension extendee
0, // [0:11] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[10].OneofWrappers = []any{
(*NgoloFuzzOne_Open)(nil),
(*NgoloFuzzOne_FileNgdotClose)(nil),
(*NgoloFuzzOne_NewFile)(nil),
(*NgoloFuzzOne_FileNgdotSection)(nil),
(*NgoloFuzzOne_FileNgdotDWARF)(nil),
(*NgoloFuzzOne_FileNgdotImportedSymbols)(nil),
(*NgoloFuzzOne_FileNgdotImportedLibraries)(nil),
(*NgoloFuzzOne_SectionNgdotData)(nil),
(*NgoloFuzzOne_SectionNgdotOpen)(nil),
(*NgoloFuzzOne_FileNgdotCOFFSymbolReadSectionDefAux)(nil),
}
file_ngolofuzz_proto_msgTypes[11].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 13,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_debug_plan9obj
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"debug/plan9obj"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var FileResults []*plan9obj.File
FileResultsIndex := 0
var SectionResults []*plan9obj.Section
SectionResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_SectionNgdotData:
if len(SectionResults) == 0 {
continue
}
arg0 := SectionResults[SectionResultsIndex]
SectionResultsIndex = (SectionResultsIndex + 1) % len(SectionResults)
_, r1 := arg0.Data()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SectionNgdotOpen:
if len(SectionResults) == 0 {
continue
}
arg0 := SectionResults[SectionResultsIndex]
SectionResultsIndex = (SectionResultsIndex + 1) % len(SectionResults)
arg0.Open()
case *NgoloFuzzOne_Open:
r0, r1 := plan9obj.Open(a.Open.Name)
if r0 != nil{
FileResults = append(FileResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FileNgdotClose:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_NewFile:
arg0 := bytes.NewReader(a.NewFile.R)
r0, r1 := plan9obj.NewFile(arg0)
if r0 != nil{
FileResults = append(FileResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FileNgdotSymbols:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
_, r1 := arg0.Symbols()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FileNgdotSection:
if len(FileResults) == 0 {
continue
}
arg0 := FileResults[FileResultsIndex]
FileResultsIndex = (FileResultsIndex + 1) % len(FileResults)
r0 := arg0.Section(a.FileNgdotSection.Name)
if r0 != nil{
SectionResults = append(SectionResults, r0)
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
FileNb := 0
FileResultsIndex := 0
SectionNb := 0
SectionResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_SectionNgdotData:
if SectionNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Section%d.Data()\n", SectionResultsIndex))
SectionResultsIndex = (SectionResultsIndex + 1) % SectionNb
case *NgoloFuzzOne_SectionNgdotOpen:
if SectionNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Section%d.Open()\n", SectionResultsIndex))
SectionResultsIndex = (SectionResultsIndex + 1) % SectionNb
case *NgoloFuzzOne_Open:
w.WriteString(fmt.Sprintf("File%d, _ := plan9obj.Open(%#+v)\n", FileNb, a.Open.Name))
FileNb = FileNb + 1
case *NgoloFuzzOne_FileNgdotClose:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("File%d.Close()\n", FileResultsIndex))
FileResultsIndex = (FileResultsIndex + 1) % FileNb
case *NgoloFuzzOne_NewFile:
w.WriteString(fmt.Sprintf("File%d, _ := plan9obj.NewFile(bytes.NewReader(%#+v))\n", FileNb, a.NewFile.R))
FileNb = FileNb + 1
case *NgoloFuzzOne_FileNgdotSymbols:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("File%d.Symbols()\n", FileResultsIndex))
FileResultsIndex = (FileResultsIndex + 1) % FileNb
case *NgoloFuzzOne_FileNgdotSection:
if FileNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Section%d := File%d.Section(%#+v)\n", SectionNb, FileResultsIndex, a.FileNgdotSection.Name))
SectionNb = SectionNb + 1
FileResultsIndex = (FileResultsIndex + 1) % FileNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_debug_plan9obj
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type SectionNgdotDataArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SectionNgdotDataArgs) Reset() {
*x = SectionNgdotDataArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SectionNgdotDataArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SectionNgdotDataArgs) ProtoMessage() {}
func (x *SectionNgdotDataArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SectionNgdotDataArgs.ProtoReflect.Descriptor instead.
func (*SectionNgdotDataArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type SectionNgdotOpenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SectionNgdotOpenArgs) Reset() {
*x = SectionNgdotOpenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SectionNgdotOpenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SectionNgdotOpenArgs) ProtoMessage() {}
func (x *SectionNgdotOpenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SectionNgdotOpenArgs.ProtoReflect.Descriptor instead.
func (*SectionNgdotOpenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type OpenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OpenArgs) Reset() {
*x = OpenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OpenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OpenArgs) ProtoMessage() {}
func (x *OpenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OpenArgs.ProtoReflect.Descriptor instead.
func (*OpenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *OpenArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type FileNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotCloseArgs) Reset() {
*x = FileNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotCloseArgs) ProtoMessage() {}
func (x *FileNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type NewFileArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewFileArgs) Reset() {
*x = NewFileArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewFileArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewFileArgs) ProtoMessage() {}
func (x *NewFileArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewFileArgs.ProtoReflect.Descriptor instead.
func (*NewFileArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NewFileArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type FileNgdotSymbolsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotSymbolsArgs) Reset() {
*x = FileNgdotSymbolsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotSymbolsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotSymbolsArgs) ProtoMessage() {}
func (x *FileNgdotSymbolsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotSymbolsArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotSymbolsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type FileNgdotSectionArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileNgdotSectionArgs) Reset() {
*x = FileNgdotSectionArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileNgdotSectionArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileNgdotSectionArgs) ProtoMessage() {}
func (x *FileNgdotSectionArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileNgdotSectionArgs.ProtoReflect.Descriptor instead.
func (*FileNgdotSectionArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *FileNgdotSectionArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_SectionNgdotData
// *NgoloFuzzOne_SectionNgdotOpen
// *NgoloFuzzOne_Open
// *NgoloFuzzOne_FileNgdotClose
// *NgoloFuzzOne_NewFile
// *NgoloFuzzOne_FileNgdotSymbols
// *NgoloFuzzOne_FileNgdotSection
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetSectionNgdotData() *SectionNgdotDataArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SectionNgdotData); ok {
return x.SectionNgdotData
}
}
return nil
}
func (x *NgoloFuzzOne) GetSectionNgdotOpen() *SectionNgdotOpenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SectionNgdotOpen); ok {
return x.SectionNgdotOpen
}
}
return nil
}
func (x *NgoloFuzzOne) GetOpen() *OpenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Open); ok {
return x.Open
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotClose() *FileNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotClose); ok {
return x.FileNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewFile() *NewFileArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewFile); ok {
return x.NewFile
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotSymbols() *FileNgdotSymbolsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotSymbols); ok {
return x.FileNgdotSymbols
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileNgdotSection() *FileNgdotSectionArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileNgdotSection); ok {
return x.FileNgdotSection
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_SectionNgdotData struct {
SectionNgdotData *SectionNgdotDataArgs `protobuf:"bytes,1,opt,name=SectionNgdotData,proto3,oneof"`
}
type NgoloFuzzOne_SectionNgdotOpen struct {
SectionNgdotOpen *SectionNgdotOpenArgs `protobuf:"bytes,2,opt,name=SectionNgdotOpen,proto3,oneof"`
}
type NgoloFuzzOne_Open struct {
Open *OpenArgs `protobuf:"bytes,3,opt,name=Open,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotClose struct {
FileNgdotClose *FileNgdotCloseArgs `protobuf:"bytes,4,opt,name=FileNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_NewFile struct {
NewFile *NewFileArgs `protobuf:"bytes,5,opt,name=NewFile,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotSymbols struct {
FileNgdotSymbols *FileNgdotSymbolsArgs `protobuf:"bytes,6,opt,name=FileNgdotSymbols,proto3,oneof"`
}
type NgoloFuzzOne_FileNgdotSection struct {
FileNgdotSection *FileNgdotSectionArgs `protobuf:"bytes,7,opt,name=FileNgdotSection,proto3,oneof"`
}
func (*NgoloFuzzOne_SectionNgdotData) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SectionNgdotOpen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Open) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewFile) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotSymbols) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileNgdotSection) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x16\n" +
"\x14SectionNgdotDataArgs\"\x16\n" +
"\x14SectionNgdotOpenArgs\"\x1e\n" +
"\bOpenArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x14\n" +
"\x12FileNgdotCloseArgs\"\x1b\n" +
"\vNewFileArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x16\n" +
"\x14FileNgdotSymbolsArgs\"*\n" +
"\x14FileNgdotSectionArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\xfa\x03\n" +
"\fNgoloFuzzOne\x12M\n" +
"\x10SectionNgdotData\x18\x01 \x01(\v2\x1f.ngolofuzz.SectionNgdotDataArgsH\x00R\x10SectionNgdotData\x12M\n" +
"\x10SectionNgdotOpen\x18\x02 \x01(\v2\x1f.ngolofuzz.SectionNgdotOpenArgsH\x00R\x10SectionNgdotOpen\x12)\n" +
"\x04Open\x18\x03 \x01(\v2\x13.ngolofuzz.OpenArgsH\x00R\x04Open\x12G\n" +
"\x0eFileNgdotClose\x18\x04 \x01(\v2\x1d.ngolofuzz.FileNgdotCloseArgsH\x00R\x0eFileNgdotClose\x122\n" +
"\aNewFile\x18\x05 \x01(\v2\x16.ngolofuzz.NewFileArgsH\x00R\aNewFile\x12M\n" +
"\x10FileNgdotSymbols\x18\x06 \x01(\v2\x1f.ngolofuzz.FileNgdotSymbolsArgsH\x00R\x10FileNgdotSymbols\x12M\n" +
"\x10FileNgdotSection\x18\a \x01(\v2\x1f.ngolofuzz.FileNgdotSectionArgsH\x00R\x10FileNgdotSectionB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1bZ\x19./;fuzz_ng_debug_plan9objb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
var file_ngolofuzz_proto_goTypes = []any{
(*SectionNgdotDataArgs)(nil), // 0: ngolofuzz.SectionNgdotDataArgs
(*SectionNgdotOpenArgs)(nil), // 1: ngolofuzz.SectionNgdotOpenArgs
(*OpenArgs)(nil), // 2: ngolofuzz.OpenArgs
(*FileNgdotCloseArgs)(nil), // 3: ngolofuzz.FileNgdotCloseArgs
(*NewFileArgs)(nil), // 4: ngolofuzz.NewFileArgs
(*FileNgdotSymbolsArgs)(nil), // 5: ngolofuzz.FileNgdotSymbolsArgs
(*FileNgdotSectionArgs)(nil), // 6: ngolofuzz.FileNgdotSectionArgs
(*NgoloFuzzOne)(nil), // 7: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 8: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 9: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.SectionNgdotData:type_name -> ngolofuzz.SectionNgdotDataArgs
1, // 1: ngolofuzz.NgoloFuzzOne.SectionNgdotOpen:type_name -> ngolofuzz.SectionNgdotOpenArgs
2, // 2: ngolofuzz.NgoloFuzzOne.Open:type_name -> ngolofuzz.OpenArgs
3, // 3: ngolofuzz.NgoloFuzzOne.FileNgdotClose:type_name -> ngolofuzz.FileNgdotCloseArgs
4, // 4: ngolofuzz.NgoloFuzzOne.NewFile:type_name -> ngolofuzz.NewFileArgs
5, // 5: ngolofuzz.NgoloFuzzOne.FileNgdotSymbols:type_name -> ngolofuzz.FileNgdotSymbolsArgs
6, // 6: ngolofuzz.NgoloFuzzOne.FileNgdotSection:type_name -> ngolofuzz.FileNgdotSectionArgs
7, // 7: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
8, // [8:8] is the sub-list for method output_type
8, // [8:8] is the sub-list for method input_type
8, // [8:8] is the sub-list for extension type_name
8, // [8:8] is the sub-list for extension extendee
0, // [0:8] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[7].OneofWrappers = []any{
(*NgoloFuzzOne_SectionNgdotData)(nil),
(*NgoloFuzzOne_SectionNgdotOpen)(nil),
(*NgoloFuzzOne_Open)(nil),
(*NgoloFuzzOne_FileNgdotClose)(nil),
(*NgoloFuzzOne_NewFile)(nil),
(*NgoloFuzzOne_FileNgdotSymbols)(nil),
(*NgoloFuzzOne_FileNgdotSection)(nil),
}
file_ngolofuzz_proto_msgTypes[8].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 10,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_encoding_ascii85
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"encoding/ascii85"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Encode:
a.Encode.Dst = make([]byte, ascii85.MaxEncodedLen(len(a.Encode.Src)))
ascii85.Encode(a.Encode.Dst, a.Encode.Src)
case *NgoloFuzzOne_MaxEncodedLen:
arg0 := int(a.MaxEncodedLen.N)
ascii85.MaxEncodedLen(arg0)
case *NgoloFuzzOne_NewEncoder:
arg0 := bytes.NewBuffer(a.NewEncoder.W)
ascii85.NewEncoder(arg0)
case *NgoloFuzzOne_Decode:
a.Decode.Dst = make([]byte, 2*len(a.Decode.Src))
_, _, r2 := ascii85.Decode(a.Decode.Dst, a.Decode.Src, a.Decode.Flush)
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_NewDecoder:
arg0 := bytes.NewReader(a.NewDecoder.R)
ascii85.NewDecoder(arg0)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Encode:
w.WriteString(fmt.Sprintf("ascii85.Encode(%#+v, %#+v)\n", a.Encode.Dst, a.Encode.Src))
case *NgoloFuzzOne_MaxEncodedLen:
w.WriteString(fmt.Sprintf("ascii85.MaxEncodedLen(int(%#+v))\n", a.MaxEncodedLen.N))
case *NgoloFuzzOne_NewEncoder:
w.WriteString(fmt.Sprintf("ascii85.NewEncoder(bytes.NewBuffer(%#+v))\n", a.NewEncoder.W))
case *NgoloFuzzOne_Decode:
w.WriteString(fmt.Sprintf("ascii85.Decode(%#+v, %#+v, %#+v)\n", a.Decode.Dst, a.Decode.Src, a.Decode.Flush))
case *NgoloFuzzOne_NewDecoder:
w.WriteString(fmt.Sprintf("ascii85.NewDecoder(bytes.NewReader(%#+v))\n", a.NewDecoder.R))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_encoding_ascii85
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type EncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodeArgs) Reset() {
*x = EncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodeArgs) ProtoMessage() {}
func (x *EncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodeArgs.ProtoReflect.Descriptor instead.
func (*EncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *EncodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *EncodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type MaxEncodedLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MaxEncodedLenArgs) Reset() {
*x = MaxEncodedLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MaxEncodedLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MaxEncodedLenArgs) ProtoMessage() {}
func (x *MaxEncodedLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MaxEncodedLenArgs.ProtoReflect.Descriptor instead.
func (*MaxEncodedLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *MaxEncodedLenArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type NewEncoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewEncoderArgs) Reset() {
*x = NewEncoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewEncoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewEncoderArgs) ProtoMessage() {}
func (x *NewEncoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewEncoderArgs.ProtoReflect.Descriptor instead.
func (*NewEncoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NewEncoderArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type DecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
Flush bool `protobuf:"varint,3,opt,name=flush,proto3" json:"flush,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeArgs) Reset() {
*x = DecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeArgs) ProtoMessage() {}
func (x *DecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeArgs.ProtoReflect.Descriptor instead.
func (*DecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *DecodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *DecodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
func (x *DecodeArgs) GetFlush() bool {
if x != nil {
return x.Flush
}
return false
}
type NewDecoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewDecoderArgs) Reset() {
*x = NewDecoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewDecoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewDecoderArgs) ProtoMessage() {}
func (x *NewDecoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewDecoderArgs.ProtoReflect.Descriptor instead.
func (*NewDecoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NewDecoderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Encode
// *NgoloFuzzOne_MaxEncodedLen
// *NgoloFuzzOne_NewEncoder
// *NgoloFuzzOne_Decode
// *NgoloFuzzOne_NewDecoder
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetEncode() *EncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Encode); ok {
return x.Encode
}
}
return nil
}
func (x *NgoloFuzzOne) GetMaxEncodedLen() *MaxEncodedLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MaxEncodedLen); ok {
return x.MaxEncodedLen
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewEncoder() *NewEncoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewEncoder); ok {
return x.NewEncoder
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecode() *DecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Decode); ok {
return x.Decode
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewDecoder() *NewDecoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewDecoder); ok {
return x.NewDecoder
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Encode struct {
Encode *EncodeArgs `protobuf:"bytes,1,opt,name=Encode,proto3,oneof"`
}
type NgoloFuzzOne_MaxEncodedLen struct {
MaxEncodedLen *MaxEncodedLenArgs `protobuf:"bytes,2,opt,name=MaxEncodedLen,proto3,oneof"`
}
type NgoloFuzzOne_NewEncoder struct {
NewEncoder *NewEncoderArgs `protobuf:"bytes,3,opt,name=NewEncoder,proto3,oneof"`
}
type NgoloFuzzOne_Decode struct {
Decode *DecodeArgs `protobuf:"bytes,4,opt,name=Decode,proto3,oneof"`
}
type NgoloFuzzOne_NewDecoder struct {
NewDecoder *NewDecoderArgs `protobuf:"bytes,5,opt,name=NewDecoder,proto3,oneof"`
}
func (*NgoloFuzzOne_Encode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MaxEncodedLen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewEncoder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Decode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewDecoder) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"0\n" +
"\n" +
"EncodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"!\n" +
"\x11MaxEncodedLenArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"\x1e\n" +
"\x0eNewEncoderArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"F\n" +
"\n" +
"DecodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\x12\x14\n" +
"\x05flush\x18\x03 \x01(\bR\x05flush\"\x1e\n" +
"\x0eNewDecoderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\xb8\x02\n" +
"\fNgoloFuzzOne\x12/\n" +
"\x06Encode\x18\x01 \x01(\v2\x15.ngolofuzz.EncodeArgsH\x00R\x06Encode\x12D\n" +
"\rMaxEncodedLen\x18\x02 \x01(\v2\x1c.ngolofuzz.MaxEncodedLenArgsH\x00R\rMaxEncodedLen\x12;\n" +
"\n" +
"NewEncoder\x18\x03 \x01(\v2\x19.ngolofuzz.NewEncoderArgsH\x00R\n" +
"NewEncoder\x12/\n" +
"\x06Decode\x18\x04 \x01(\v2\x15.ngolofuzz.DecodeArgsH\x00R\x06Decode\x12;\n" +
"\n" +
"NewDecoder\x18\x05 \x01(\v2\x19.ngolofuzz.NewDecoderArgsH\x00R\n" +
"NewDecoderB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1dZ\x1b./;fuzz_ng_encoding_ascii85b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_ngolofuzz_proto_goTypes = []any{
(*EncodeArgs)(nil), // 0: ngolofuzz.EncodeArgs
(*MaxEncodedLenArgs)(nil), // 1: ngolofuzz.MaxEncodedLenArgs
(*NewEncoderArgs)(nil), // 2: ngolofuzz.NewEncoderArgs
(*DecodeArgs)(nil), // 3: ngolofuzz.DecodeArgs
(*NewDecoderArgs)(nil), // 4: ngolofuzz.NewDecoderArgs
(*NgoloFuzzOne)(nil), // 5: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 6: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 7: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Encode:type_name -> ngolofuzz.EncodeArgs
1, // 1: ngolofuzz.NgoloFuzzOne.MaxEncodedLen:type_name -> ngolofuzz.MaxEncodedLenArgs
2, // 2: ngolofuzz.NgoloFuzzOne.NewEncoder:type_name -> ngolofuzz.NewEncoderArgs
3, // 3: ngolofuzz.NgoloFuzzOne.Decode:type_name -> ngolofuzz.DecodeArgs
4, // 4: ngolofuzz.NgoloFuzzOne.NewDecoder:type_name -> ngolofuzz.NewDecoderArgs
5, // 5: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
6, // [6:6] is the sub-list for method output_type
6, // [6:6] is the sub-list for method input_type
6, // [6:6] is the sub-list for extension type_name
6, // [6:6] is the sub-list for extension extendee
0, // [0:6] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[5].OneofWrappers = []any{
(*NgoloFuzzOne_Encode)(nil),
(*NgoloFuzzOne_MaxEncodedLen)(nil),
(*NgoloFuzzOne_NewEncoder)(nil),
(*NgoloFuzzOne_Decode)(nil),
(*NgoloFuzzOne_NewDecoder)(nil),
}
file_ngolofuzz_proto_msgTypes[6].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 8,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_encoding_asn1
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"encoding/asn1"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Unmarshal_:
_, r1 := asn1.Unmarshal(a.Unmarshal_.B, a.Unmarshal_.Val)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_UnmarshalWithParams:
_, r1 := asn1.UnmarshalWithParams(a.UnmarshalWithParams.B, a.UnmarshalWithParams.Val, a.UnmarshalWithParams.Params)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Marshal_:
_, r1 := asn1.Marshal(a.Marshal_.Val)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_MarshalWithParams:
_, r1 := asn1.MarshalWithParams(a.MarshalWithParams.Val, a.MarshalWithParams.Params)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Unmarshal_:
w.WriteString(fmt.Sprintf("asn1.Unmarshal(%#+v, %#+v)\n", a.Unmarshal_.B, a.Unmarshal_.Val))
case *NgoloFuzzOne_UnmarshalWithParams:
w.WriteString(fmt.Sprintf("asn1.UnmarshalWithParams(%#+v, %#+v, %#+v)\n", a.UnmarshalWithParams.B, a.UnmarshalWithParams.Val, a.UnmarshalWithParams.Params))
case *NgoloFuzzOne_Marshal_:
w.WriteString(fmt.Sprintf("asn1.Marshal(%#+v)\n", a.Marshal_.Val))
case *NgoloFuzzOne_MarshalWithParams:
w.WriteString(fmt.Sprintf("asn1.MarshalWithParams(%#+v, %#+v)\n", a.MarshalWithParams.Val, a.MarshalWithParams.Params))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_encoding_asn1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type UnmarshalArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
Val *NgoloFuzzAny `protobuf:"bytes,2,opt,name=val,proto3" json:"val,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnmarshalArgs) Reset() {
*x = UnmarshalArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnmarshalArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnmarshalArgs) ProtoMessage() {}
func (x *UnmarshalArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnmarshalArgs.ProtoReflect.Descriptor instead.
func (*UnmarshalArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *UnmarshalArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
func (x *UnmarshalArgs) GetVal() *NgoloFuzzAny {
if x != nil {
return x.Val
}
return nil
}
type UnmarshalWithParamsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
Val *NgoloFuzzAny `protobuf:"bytes,2,opt,name=val,proto3" json:"val,omitempty"`
Params string `protobuf:"bytes,3,opt,name=params,proto3" json:"params,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnmarshalWithParamsArgs) Reset() {
*x = UnmarshalWithParamsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnmarshalWithParamsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnmarshalWithParamsArgs) ProtoMessage() {}
func (x *UnmarshalWithParamsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnmarshalWithParamsArgs.ProtoReflect.Descriptor instead.
func (*UnmarshalWithParamsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *UnmarshalWithParamsArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
func (x *UnmarshalWithParamsArgs) GetVal() *NgoloFuzzAny {
if x != nil {
return x.Val
}
return nil
}
func (x *UnmarshalWithParamsArgs) GetParams() string {
if x != nil {
return x.Params
}
return ""
}
type MarshalArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Val *NgoloFuzzAny `protobuf:"bytes,1,opt,name=val,proto3" json:"val,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MarshalArgs) Reset() {
*x = MarshalArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MarshalArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MarshalArgs) ProtoMessage() {}
func (x *MarshalArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MarshalArgs.ProtoReflect.Descriptor instead.
func (*MarshalArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *MarshalArgs) GetVal() *NgoloFuzzAny {
if x != nil {
return x.Val
}
return nil
}
type MarshalWithParamsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Val *NgoloFuzzAny `protobuf:"bytes,1,opt,name=val,proto3" json:"val,omitempty"`
Params string `protobuf:"bytes,2,opt,name=params,proto3" json:"params,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MarshalWithParamsArgs) Reset() {
*x = MarshalWithParamsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MarshalWithParamsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MarshalWithParamsArgs) ProtoMessage() {}
func (x *MarshalWithParamsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MarshalWithParamsArgs.ProtoReflect.Descriptor instead.
func (*MarshalWithParamsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *MarshalWithParamsArgs) GetVal() *NgoloFuzzAny {
if x != nil {
return x.Val
}
return nil
}
func (x *MarshalWithParamsArgs) GetParams() string {
if x != nil {
return x.Params
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Unmarshal_
// *NgoloFuzzOne_UnmarshalWithParams
// *NgoloFuzzOne_Marshal_
// *NgoloFuzzOne_MarshalWithParams
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetUnmarshal_() *UnmarshalArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Unmarshal_); ok {
return x.Unmarshal_
}
}
return nil
}
func (x *NgoloFuzzOne) GetUnmarshalWithParams() *UnmarshalWithParamsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UnmarshalWithParams); ok {
return x.UnmarshalWithParams
}
}
return nil
}
func (x *NgoloFuzzOne) GetMarshal_() *MarshalArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Marshal_); ok {
return x.Marshal_
}
}
return nil
}
func (x *NgoloFuzzOne) GetMarshalWithParams() *MarshalWithParamsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MarshalWithParams); ok {
return x.MarshalWithParams
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Unmarshal_ struct {
Unmarshal_ *UnmarshalArgs `protobuf:"bytes,1,opt,name=Unmarshal,proto3,oneof"`
}
type NgoloFuzzOne_UnmarshalWithParams struct {
UnmarshalWithParams *UnmarshalWithParamsArgs `protobuf:"bytes,2,opt,name=UnmarshalWithParams,proto3,oneof"`
}
type NgoloFuzzOne_Marshal_ struct {
Marshal_ *MarshalArgs `protobuf:"bytes,3,opt,name=Marshal,proto3,oneof"`
}
type NgoloFuzzOne_MarshalWithParams struct {
MarshalWithParams *MarshalWithParamsArgs `protobuf:"bytes,4,opt,name=MarshalWithParams,proto3,oneof"`
}
func (*NgoloFuzzOne_Unmarshal_) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UnmarshalWithParams) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Marshal_) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MarshalWithParams) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"H\n" +
"\rUnmarshalArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\x12)\n" +
"\x03val\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x03val\"j\n" +
"\x17UnmarshalWithParamsArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\x12)\n" +
"\x03val\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x03val\x12\x16\n" +
"\x06params\x18\x03 \x01(\tR\x06params\"8\n" +
"\vMarshalArgs\x12)\n" +
"\x03val\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x03val\"Z\n" +
"\x15MarshalWithParamsArgs\x12)\n" +
"\x03val\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x03val\x12\x16\n" +
"\x06params\x18\x02 \x01(\tR\x06params\"\xae\x02\n" +
"\fNgoloFuzzOne\x128\n" +
"\tUnmarshal\x18\x01 \x01(\v2\x18.ngolofuzz.UnmarshalArgsH\x00R\tUnmarshal\x12V\n" +
"\x13UnmarshalWithParams\x18\x02 \x01(\v2\".ngolofuzz.UnmarshalWithParamsArgsH\x00R\x13UnmarshalWithParams\x122\n" +
"\aMarshal\x18\x03 \x01(\v2\x16.ngolofuzz.MarshalArgsH\x00R\aMarshal\x12P\n" +
"\x11MarshalWithParams\x18\x04 \x01(\v2 .ngolofuzz.MarshalWithParamsArgsH\x00R\x11MarshalWithParamsB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_encoding_asn1b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_ngolofuzz_proto_goTypes = []any{
(*UnmarshalArgs)(nil), // 0: ngolofuzz.UnmarshalArgs
(*UnmarshalWithParamsArgs)(nil), // 1: ngolofuzz.UnmarshalWithParamsArgs
(*MarshalArgs)(nil), // 2: ngolofuzz.MarshalArgs
(*MarshalWithParamsArgs)(nil), // 3: ngolofuzz.MarshalWithParamsArgs
(*NgoloFuzzOne)(nil), // 4: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 5: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 6: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
5, // 0: ngolofuzz.UnmarshalArgs.val:type_name -> ngolofuzz.NgoloFuzzAny
5, // 1: ngolofuzz.UnmarshalWithParamsArgs.val:type_name -> ngolofuzz.NgoloFuzzAny
5, // 2: ngolofuzz.MarshalArgs.val:type_name -> ngolofuzz.NgoloFuzzAny
5, // 3: ngolofuzz.MarshalWithParamsArgs.val:type_name -> ngolofuzz.NgoloFuzzAny
0, // 4: ngolofuzz.NgoloFuzzOne.Unmarshal:type_name -> ngolofuzz.UnmarshalArgs
1, // 5: ngolofuzz.NgoloFuzzOne.UnmarshalWithParams:type_name -> ngolofuzz.UnmarshalWithParamsArgs
2, // 6: ngolofuzz.NgoloFuzzOne.Marshal:type_name -> ngolofuzz.MarshalArgs
3, // 7: ngolofuzz.NgoloFuzzOne.MarshalWithParams:type_name -> ngolofuzz.MarshalWithParamsArgs
4, // 8: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
9, // [9:9] is the sub-list for method output_type
9, // [9:9] is the sub-list for method input_type
9, // [9:9] is the sub-list for extension type_name
9, // [9:9] is the sub-list for extension extendee
0, // [0:9] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[4].OneofWrappers = []any{
(*NgoloFuzzOne_Unmarshal_)(nil),
(*NgoloFuzzOne_UnmarshalWithParams)(nil),
(*NgoloFuzzOne_Marshal_)(nil),
(*NgoloFuzzOne_MarshalWithParams)(nil),
}
file_ngolofuzz_proto_msgTypes[5].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_encoding_base32
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"encoding/base32"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var EncodingResults []*base32.Encoding
EncodingResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewEncoding:
r0 := base32.NewEncoding(a.NewEncoding.Encoder)
if r0 != nil{
EncodingResults = append(EncodingResults, r0)
}
case *NgoloFuzzOne_EncodingNgdotWithPadding:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
arg1 := GetRune(a.EncodingNgdotWithPadding.Padding)
r0 := arg0.WithPadding(arg1)
if r0 != nil{
EncodingResults = append(EncodingResults, r0)
}
case *NgoloFuzzOne_EncodingNgdotEncode:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
a.EncodingNgdotEncode.Dst = make([]byte, 2*len(a.EncodingNgdotEncode.Src))
arg0.Encode(a.EncodingNgdotEncode.Dst, a.EncodingNgdotEncode.Src)
case *NgoloFuzzOne_EncodingNgdotAppendEncode:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
a.EncodingNgdotAppendEncode.Dst = make([]byte, 2*len(a.EncodingNgdotAppendEncode.Src))
arg0.AppendEncode(a.EncodingNgdotAppendEncode.Dst, a.EncodingNgdotAppendEncode.Src)
case *NgoloFuzzOne_EncodingNgdotEncodeToString:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
arg0.EncodeToString(a.EncodingNgdotEncodeToString.Src)
case *NgoloFuzzOne_NewEncoder:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
arg1 := bytes.NewBuffer(a.NewEncoder.W)
base32.NewEncoder(arg0, arg1)
case *NgoloFuzzOne_EncodingNgdotEncodedLen:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
arg1 := int(a.EncodingNgdotEncodedLen.N)
arg0.EncodedLen(arg1)
case *NgoloFuzzOne_EncodingNgdotDecode:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
a.EncodingNgdotDecode.Dst = make([]byte, 2*len(a.EncodingNgdotDecode.Src))
_, r1 := arg0.Decode(a.EncodingNgdotDecode.Dst, a.EncodingNgdotDecode.Src)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_EncodingNgdotAppendDecode:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
a.EncodingNgdotAppendDecode.Dst = make([]byte, 2*len(a.EncodingNgdotAppendDecode.Src))
_, r1 := arg0.AppendDecode(a.EncodingNgdotAppendDecode.Dst, a.EncodingNgdotAppendDecode.Src)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_EncodingNgdotDecodeString:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
_, r1 := arg0.DecodeString(a.EncodingNgdotDecodeString.S)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewDecoder:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
arg1 := bytes.NewReader(a.NewDecoder.R)
base32.NewDecoder(arg0, arg1)
case *NgoloFuzzOne_EncodingNgdotDecodedLen:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
arg1 := int(a.EncodingNgdotDecodedLen.N)
arg0.DecodedLen(arg1)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
EncodingNb := 0
EncodingResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewEncoding:
w.WriteString(fmt.Sprintf("Encoding%d := base32.NewEncoding(%#+v)\n", EncodingNb, a.NewEncoding.Encoder))
EncodingNb = EncodingNb + 1
case *NgoloFuzzOne_EncodingNgdotWithPadding:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d := Encoding%d.WithPadding(GetRune(%#+v))\n", EncodingNb, EncodingResultsIndex, a.EncodingNgdotWithPadding.Padding))
EncodingNb = EncodingNb + 1
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotEncode:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.Encode(%#+v, %#+v)\n", EncodingResultsIndex, a.EncodingNgdotEncode.Dst, a.EncodingNgdotEncode.Src))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotAppendEncode:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.AppendEncode(%#+v, %#+v)\n", EncodingResultsIndex, a.EncodingNgdotAppendEncode.Dst, a.EncodingNgdotAppendEncode.Src))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotEncodeToString:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.EncodeToString(%#+v)\n", EncodingResultsIndex, a.EncodingNgdotEncodeToString.Src))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_NewEncoder:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("base32.NewEncoder(Encoding%d, bytes.NewBuffer(%#+v))\n", (EncodingResultsIndex + 0) % EncodingNb, a.NewEncoder.W))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotEncodedLen:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.EncodedLen(int(%#+v))\n", EncodingResultsIndex, a.EncodingNgdotEncodedLen.N))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotDecode:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.Decode(%#+v, %#+v)\n", EncodingResultsIndex, a.EncodingNgdotDecode.Dst, a.EncodingNgdotDecode.Src))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotAppendDecode:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.AppendDecode(%#+v, %#+v)\n", EncodingResultsIndex, a.EncodingNgdotAppendDecode.Dst, a.EncodingNgdotAppendDecode.Src))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotDecodeString:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.DecodeString(%#+v)\n", EncodingResultsIndex, a.EncodingNgdotDecodeString.S))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_NewDecoder:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("base32.NewDecoder(Encoding%d, bytes.NewReader(%#+v))\n", (EncodingResultsIndex + 0) % EncodingNb, a.NewDecoder.R))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotDecodedLen:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.DecodedLen(int(%#+v))\n", EncodingResultsIndex, a.EncodingNgdotDecodedLen.N))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_encoding_base32
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewEncodingArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Encoder string `protobuf:"bytes,1,opt,name=encoder,proto3" json:"encoder,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewEncodingArgs) Reset() {
*x = NewEncodingArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewEncodingArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewEncodingArgs) ProtoMessage() {}
func (x *NewEncodingArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewEncodingArgs.ProtoReflect.Descriptor instead.
func (*NewEncodingArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewEncodingArgs) GetEncoder() string {
if x != nil {
return x.Encoder
}
return ""
}
type EncodingNgdotWithPaddingArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Padding string `protobuf:"bytes,1,opt,name=padding,proto3" json:"padding,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotWithPaddingArgs) Reset() {
*x = EncodingNgdotWithPaddingArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotWithPaddingArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotWithPaddingArgs) ProtoMessage() {}
func (x *EncodingNgdotWithPaddingArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotWithPaddingArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotWithPaddingArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *EncodingNgdotWithPaddingArgs) GetPadding() string {
if x != nil {
return x.Padding
}
return ""
}
type EncodingNgdotEncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotEncodeArgs) Reset() {
*x = EncodingNgdotEncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotEncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotEncodeArgs) ProtoMessage() {}
func (x *EncodingNgdotEncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotEncodeArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotEncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *EncodingNgdotEncodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *EncodingNgdotEncodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type EncodingNgdotAppendEncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotAppendEncodeArgs) Reset() {
*x = EncodingNgdotAppendEncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotAppendEncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotAppendEncodeArgs) ProtoMessage() {}
func (x *EncodingNgdotAppendEncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotAppendEncodeArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotAppendEncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *EncodingNgdotAppendEncodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *EncodingNgdotAppendEncodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type EncodingNgdotEncodeToStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Src []byte `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotEncodeToStringArgs) Reset() {
*x = EncodingNgdotEncodeToStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotEncodeToStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotEncodeToStringArgs) ProtoMessage() {}
func (x *EncodingNgdotEncodeToStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotEncodeToStringArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotEncodeToStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *EncodingNgdotEncodeToStringArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type NewEncoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewEncoderArgs) Reset() {
*x = NewEncoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewEncoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewEncoderArgs) ProtoMessage() {}
func (x *NewEncoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewEncoderArgs.ProtoReflect.Descriptor instead.
func (*NewEncoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NewEncoderArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type EncodingNgdotEncodedLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotEncodedLenArgs) Reset() {
*x = EncodingNgdotEncodedLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotEncodedLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotEncodedLenArgs) ProtoMessage() {}
func (x *EncodingNgdotEncodedLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotEncodedLenArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotEncodedLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *EncodingNgdotEncodedLenArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type EncodingNgdotDecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotDecodeArgs) Reset() {
*x = EncodingNgdotDecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotDecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotDecodeArgs) ProtoMessage() {}
func (x *EncodingNgdotDecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotDecodeArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotDecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *EncodingNgdotDecodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *EncodingNgdotDecodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type EncodingNgdotAppendDecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotAppendDecodeArgs) Reset() {
*x = EncodingNgdotAppendDecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotAppendDecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotAppendDecodeArgs) ProtoMessage() {}
func (x *EncodingNgdotAppendDecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotAppendDecodeArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotAppendDecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *EncodingNgdotAppendDecodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *EncodingNgdotAppendDecodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type EncodingNgdotDecodeStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotDecodeStringArgs) Reset() {
*x = EncodingNgdotDecodeStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotDecodeStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotDecodeStringArgs) ProtoMessage() {}
func (x *EncodingNgdotDecodeStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotDecodeStringArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotDecodeStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *EncodingNgdotDecodeStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type NewDecoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewDecoderArgs) Reset() {
*x = NewDecoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewDecoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewDecoderArgs) ProtoMessage() {}
func (x *NewDecoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewDecoderArgs.ProtoReflect.Descriptor instead.
func (*NewDecoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NewDecoderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type EncodingNgdotDecodedLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotDecodedLenArgs) Reset() {
*x = EncodingNgdotDecodedLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotDecodedLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotDecodedLenArgs) ProtoMessage() {}
func (x *EncodingNgdotDecodedLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotDecodedLenArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotDecodedLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *EncodingNgdotDecodedLenArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewEncoding
// *NgoloFuzzOne_EncodingNgdotWithPadding
// *NgoloFuzzOne_EncodingNgdotEncode
// *NgoloFuzzOne_EncodingNgdotAppendEncode
// *NgoloFuzzOne_EncodingNgdotEncodeToString
// *NgoloFuzzOne_NewEncoder
// *NgoloFuzzOne_EncodingNgdotEncodedLen
// *NgoloFuzzOne_EncodingNgdotDecode
// *NgoloFuzzOne_EncodingNgdotAppendDecode
// *NgoloFuzzOne_EncodingNgdotDecodeString
// *NgoloFuzzOne_NewDecoder
// *NgoloFuzzOne_EncodingNgdotDecodedLen
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewEncoding() *NewEncodingArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewEncoding); ok {
return x.NewEncoding
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotWithPadding() *EncodingNgdotWithPaddingArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotWithPadding); ok {
return x.EncodingNgdotWithPadding
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotEncode() *EncodingNgdotEncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotEncode); ok {
return x.EncodingNgdotEncode
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotAppendEncode() *EncodingNgdotAppendEncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotAppendEncode); ok {
return x.EncodingNgdotAppendEncode
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotEncodeToString() *EncodingNgdotEncodeToStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotEncodeToString); ok {
return x.EncodingNgdotEncodeToString
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewEncoder() *NewEncoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewEncoder); ok {
return x.NewEncoder
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotEncodedLen() *EncodingNgdotEncodedLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotEncodedLen); ok {
return x.EncodingNgdotEncodedLen
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotDecode() *EncodingNgdotDecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotDecode); ok {
return x.EncodingNgdotDecode
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotAppendDecode() *EncodingNgdotAppendDecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotAppendDecode); ok {
return x.EncodingNgdotAppendDecode
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotDecodeString() *EncodingNgdotDecodeStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotDecodeString); ok {
return x.EncodingNgdotDecodeString
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewDecoder() *NewDecoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewDecoder); ok {
return x.NewDecoder
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotDecodedLen() *EncodingNgdotDecodedLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotDecodedLen); ok {
return x.EncodingNgdotDecodedLen
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewEncoding struct {
NewEncoding *NewEncodingArgs `protobuf:"bytes,1,opt,name=NewEncoding,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotWithPadding struct {
EncodingNgdotWithPadding *EncodingNgdotWithPaddingArgs `protobuf:"bytes,2,opt,name=EncodingNgdotWithPadding,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotEncode struct {
EncodingNgdotEncode *EncodingNgdotEncodeArgs `protobuf:"bytes,3,opt,name=EncodingNgdotEncode,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotAppendEncode struct {
EncodingNgdotAppendEncode *EncodingNgdotAppendEncodeArgs `protobuf:"bytes,4,opt,name=EncodingNgdotAppendEncode,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotEncodeToString struct {
EncodingNgdotEncodeToString *EncodingNgdotEncodeToStringArgs `protobuf:"bytes,5,opt,name=EncodingNgdotEncodeToString,proto3,oneof"`
}
type NgoloFuzzOne_NewEncoder struct {
NewEncoder *NewEncoderArgs `protobuf:"bytes,6,opt,name=NewEncoder,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotEncodedLen struct {
EncodingNgdotEncodedLen *EncodingNgdotEncodedLenArgs `protobuf:"bytes,7,opt,name=EncodingNgdotEncodedLen,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotDecode struct {
EncodingNgdotDecode *EncodingNgdotDecodeArgs `protobuf:"bytes,8,opt,name=EncodingNgdotDecode,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotAppendDecode struct {
EncodingNgdotAppendDecode *EncodingNgdotAppendDecodeArgs `protobuf:"bytes,9,opt,name=EncodingNgdotAppendDecode,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotDecodeString struct {
EncodingNgdotDecodeString *EncodingNgdotDecodeStringArgs `protobuf:"bytes,10,opt,name=EncodingNgdotDecodeString,proto3,oneof"`
}
type NgoloFuzzOne_NewDecoder struct {
NewDecoder *NewDecoderArgs `protobuf:"bytes,11,opt,name=NewDecoder,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotDecodedLen struct {
EncodingNgdotDecodedLen *EncodingNgdotDecodedLenArgs `protobuf:"bytes,12,opt,name=EncodingNgdotDecodedLen,proto3,oneof"`
}
func (*NgoloFuzzOne_NewEncoding) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotWithPadding) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotEncode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotAppendEncode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotEncodeToString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewEncoder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotEncodedLen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotDecode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotAppendDecode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotDecodeString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewDecoder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotDecodedLen) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"+\n" +
"\x0fNewEncodingArgs\x12\x18\n" +
"\aencoder\x18\x01 \x01(\tR\aencoder\"8\n" +
"\x1cEncodingNgdotWithPaddingArgs\x12\x18\n" +
"\apadding\x18\x01 \x01(\tR\apadding\"=\n" +
"\x17EncodingNgdotEncodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"C\n" +
"\x1dEncodingNgdotAppendEncodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"3\n" +
"\x1fEncodingNgdotEncodeToStringArgs\x12\x10\n" +
"\x03src\x18\x01 \x01(\fR\x03src\"\x1e\n" +
"\x0eNewEncoderArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"+\n" +
"\x1bEncodingNgdotEncodedLenArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"=\n" +
"\x17EncodingNgdotDecodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"C\n" +
"\x1dEncodingNgdotAppendDecodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"-\n" +
"\x1dEncodingNgdotDecodeStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1e\n" +
"\x0eNewDecoderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"+\n" +
"\x1bEncodingNgdotDecodedLenArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"\xdd\b\n" +
"\fNgoloFuzzOne\x12>\n" +
"\vNewEncoding\x18\x01 \x01(\v2\x1a.ngolofuzz.NewEncodingArgsH\x00R\vNewEncoding\x12e\n" +
"\x18EncodingNgdotWithPadding\x18\x02 \x01(\v2'.ngolofuzz.EncodingNgdotWithPaddingArgsH\x00R\x18EncodingNgdotWithPadding\x12V\n" +
"\x13EncodingNgdotEncode\x18\x03 \x01(\v2\".ngolofuzz.EncodingNgdotEncodeArgsH\x00R\x13EncodingNgdotEncode\x12h\n" +
"\x19EncodingNgdotAppendEncode\x18\x04 \x01(\v2(.ngolofuzz.EncodingNgdotAppendEncodeArgsH\x00R\x19EncodingNgdotAppendEncode\x12n\n" +
"\x1bEncodingNgdotEncodeToString\x18\x05 \x01(\v2*.ngolofuzz.EncodingNgdotEncodeToStringArgsH\x00R\x1bEncodingNgdotEncodeToString\x12;\n" +
"\n" +
"NewEncoder\x18\x06 \x01(\v2\x19.ngolofuzz.NewEncoderArgsH\x00R\n" +
"NewEncoder\x12b\n" +
"\x17EncodingNgdotEncodedLen\x18\a \x01(\v2&.ngolofuzz.EncodingNgdotEncodedLenArgsH\x00R\x17EncodingNgdotEncodedLen\x12V\n" +
"\x13EncodingNgdotDecode\x18\b \x01(\v2\".ngolofuzz.EncodingNgdotDecodeArgsH\x00R\x13EncodingNgdotDecode\x12h\n" +
"\x19EncodingNgdotAppendDecode\x18\t \x01(\v2(.ngolofuzz.EncodingNgdotAppendDecodeArgsH\x00R\x19EncodingNgdotAppendDecode\x12h\n" +
"\x19EncodingNgdotDecodeString\x18\n" +
" \x01(\v2(.ngolofuzz.EncodingNgdotDecodeStringArgsH\x00R\x19EncodingNgdotDecodeString\x12;\n" +
"\n" +
"NewDecoder\x18\v \x01(\v2\x19.ngolofuzz.NewDecoderArgsH\x00R\n" +
"NewDecoder\x12b\n" +
"\x17EncodingNgdotDecodedLen\x18\f \x01(\v2&.ngolofuzz.EncodingNgdotDecodedLenArgsH\x00R\x17EncodingNgdotDecodedLenB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1cZ\x1a./;fuzz_ng_encoding_base32b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
var file_ngolofuzz_proto_goTypes = []any{
(*NewEncodingArgs)(nil), // 0: ngolofuzz.NewEncodingArgs
(*EncodingNgdotWithPaddingArgs)(nil), // 1: ngolofuzz.EncodingNgdotWithPaddingArgs
(*EncodingNgdotEncodeArgs)(nil), // 2: ngolofuzz.EncodingNgdotEncodeArgs
(*EncodingNgdotAppendEncodeArgs)(nil), // 3: ngolofuzz.EncodingNgdotAppendEncodeArgs
(*EncodingNgdotEncodeToStringArgs)(nil), // 4: ngolofuzz.EncodingNgdotEncodeToStringArgs
(*NewEncoderArgs)(nil), // 5: ngolofuzz.NewEncoderArgs
(*EncodingNgdotEncodedLenArgs)(nil), // 6: ngolofuzz.EncodingNgdotEncodedLenArgs
(*EncodingNgdotDecodeArgs)(nil), // 7: ngolofuzz.EncodingNgdotDecodeArgs
(*EncodingNgdotAppendDecodeArgs)(nil), // 8: ngolofuzz.EncodingNgdotAppendDecodeArgs
(*EncodingNgdotDecodeStringArgs)(nil), // 9: ngolofuzz.EncodingNgdotDecodeStringArgs
(*NewDecoderArgs)(nil), // 10: ngolofuzz.NewDecoderArgs
(*EncodingNgdotDecodedLenArgs)(nil), // 11: ngolofuzz.EncodingNgdotDecodedLenArgs
(*NgoloFuzzOne)(nil), // 12: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 13: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 14: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewEncoding:type_name -> ngolofuzz.NewEncodingArgs
1, // 1: ngolofuzz.NgoloFuzzOne.EncodingNgdotWithPadding:type_name -> ngolofuzz.EncodingNgdotWithPaddingArgs
2, // 2: ngolofuzz.NgoloFuzzOne.EncodingNgdotEncode:type_name -> ngolofuzz.EncodingNgdotEncodeArgs
3, // 3: ngolofuzz.NgoloFuzzOne.EncodingNgdotAppendEncode:type_name -> ngolofuzz.EncodingNgdotAppendEncodeArgs
4, // 4: ngolofuzz.NgoloFuzzOne.EncodingNgdotEncodeToString:type_name -> ngolofuzz.EncodingNgdotEncodeToStringArgs
5, // 5: ngolofuzz.NgoloFuzzOne.NewEncoder:type_name -> ngolofuzz.NewEncoderArgs
6, // 6: ngolofuzz.NgoloFuzzOne.EncodingNgdotEncodedLen:type_name -> ngolofuzz.EncodingNgdotEncodedLenArgs
7, // 7: ngolofuzz.NgoloFuzzOne.EncodingNgdotDecode:type_name -> ngolofuzz.EncodingNgdotDecodeArgs
8, // 8: ngolofuzz.NgoloFuzzOne.EncodingNgdotAppendDecode:type_name -> ngolofuzz.EncodingNgdotAppendDecodeArgs
9, // 9: ngolofuzz.NgoloFuzzOne.EncodingNgdotDecodeString:type_name -> ngolofuzz.EncodingNgdotDecodeStringArgs
10, // 10: ngolofuzz.NgoloFuzzOne.NewDecoder:type_name -> ngolofuzz.NewDecoderArgs
11, // 11: ngolofuzz.NgoloFuzzOne.EncodingNgdotDecodedLen:type_name -> ngolofuzz.EncodingNgdotDecodedLenArgs
12, // 12: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
13, // [13:13] is the sub-list for method output_type
13, // [13:13] is the sub-list for method input_type
13, // [13:13] is the sub-list for extension type_name
13, // [13:13] is the sub-list for extension extendee
0, // [0:13] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[12].OneofWrappers = []any{
(*NgoloFuzzOne_NewEncoding)(nil),
(*NgoloFuzzOne_EncodingNgdotWithPadding)(nil),
(*NgoloFuzzOne_EncodingNgdotEncode)(nil),
(*NgoloFuzzOne_EncodingNgdotAppendEncode)(nil),
(*NgoloFuzzOne_EncodingNgdotEncodeToString)(nil),
(*NgoloFuzzOne_NewEncoder)(nil),
(*NgoloFuzzOne_EncodingNgdotEncodedLen)(nil),
(*NgoloFuzzOne_EncodingNgdotDecode)(nil),
(*NgoloFuzzOne_EncodingNgdotAppendDecode)(nil),
(*NgoloFuzzOne_EncodingNgdotDecodeString)(nil),
(*NgoloFuzzOne_NewDecoder)(nil),
(*NgoloFuzzOne_EncodingNgdotDecodedLen)(nil),
}
file_ngolofuzz_proto_msgTypes[13].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 15,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_encoding_base64
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"encoding/base64"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var EncodingResults []*base64.Encoding
EncodingResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewEncoding:
r0 := base64.NewEncoding(a.NewEncoding.Encoder)
if r0 != nil{
EncodingResults = append(EncodingResults, r0)
}
case *NgoloFuzzOne_EncodingNgdotWithPadding:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
arg1 := GetRune(a.EncodingNgdotWithPadding.Padding)
r0 := arg0.WithPadding(arg1)
if r0 != nil{
EncodingResults = append(EncodingResults, r0)
}
case *NgoloFuzzOne_EncodingNgdotStrict:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
r0 := arg0.Strict()
if r0 != nil{
EncodingResults = append(EncodingResults, r0)
}
case *NgoloFuzzOne_EncodingNgdotEncode:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
a.EncodingNgdotEncode.Dst = make([]byte, 2*len(a.EncodingNgdotEncode.Src))
arg0.Encode(a.EncodingNgdotEncode.Dst, a.EncodingNgdotEncode.Src)
case *NgoloFuzzOne_EncodingNgdotAppendEncode:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
a.EncodingNgdotAppendEncode.Dst = make([]byte, 2*len(a.EncodingNgdotAppendEncode.Src))
arg0.AppendEncode(a.EncodingNgdotAppendEncode.Dst, a.EncodingNgdotAppendEncode.Src)
case *NgoloFuzzOne_EncodingNgdotEncodeToString:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
arg0.EncodeToString(a.EncodingNgdotEncodeToString.Src)
case *NgoloFuzzOne_NewEncoder:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
arg1 := bytes.NewBuffer(a.NewEncoder.W)
base64.NewEncoder(arg0, arg1)
case *NgoloFuzzOne_EncodingNgdotEncodedLen:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
arg1 := int(a.EncodingNgdotEncodedLen.N)
arg0.EncodedLen(arg1)
case *NgoloFuzzOne_EncodingNgdotAppendDecode:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
a.EncodingNgdotAppendDecode.Dst = make([]byte, 2*len(a.EncodingNgdotAppendDecode.Src))
_, r1 := arg0.AppendDecode(a.EncodingNgdotAppendDecode.Dst, a.EncodingNgdotAppendDecode.Src)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_EncodingNgdotDecodeString:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
_, r1 := arg0.DecodeString(a.EncodingNgdotDecodeString.S)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_EncodingNgdotDecode:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
a.EncodingNgdotDecode.Dst = make([]byte, 2*len(a.EncodingNgdotDecode.Src))
_, r1 := arg0.Decode(a.EncodingNgdotDecode.Dst, a.EncodingNgdotDecode.Src)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewDecoder:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
arg1 := bytes.NewReader(a.NewDecoder.R)
base64.NewDecoder(arg0, arg1)
case *NgoloFuzzOne_EncodingNgdotDecodedLen:
if len(EncodingResults) == 0 {
continue
}
arg0 := EncodingResults[EncodingResultsIndex]
EncodingResultsIndex = (EncodingResultsIndex + 1) % len(EncodingResults)
arg1 := int(a.EncodingNgdotDecodedLen.N)
arg0.DecodedLen(arg1)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
EncodingNb := 0
EncodingResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewEncoding:
w.WriteString(fmt.Sprintf("Encoding%d := base64.NewEncoding(%#+v)\n", EncodingNb, a.NewEncoding.Encoder))
EncodingNb = EncodingNb + 1
case *NgoloFuzzOne_EncodingNgdotWithPadding:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d := Encoding%d.WithPadding(GetRune(%#+v))\n", EncodingNb, EncodingResultsIndex, a.EncodingNgdotWithPadding.Padding))
EncodingNb = EncodingNb + 1
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotStrict:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d := Encoding%d.Strict()\n", EncodingNb, EncodingResultsIndex))
EncodingNb = EncodingNb + 1
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotEncode:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.Encode(%#+v, %#+v)\n", EncodingResultsIndex, a.EncodingNgdotEncode.Dst, a.EncodingNgdotEncode.Src))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotAppendEncode:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.AppendEncode(%#+v, %#+v)\n", EncodingResultsIndex, a.EncodingNgdotAppendEncode.Dst, a.EncodingNgdotAppendEncode.Src))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotEncodeToString:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.EncodeToString(%#+v)\n", EncodingResultsIndex, a.EncodingNgdotEncodeToString.Src))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_NewEncoder:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("base64.NewEncoder(Encoding%d, bytes.NewBuffer(%#+v))\n", (EncodingResultsIndex + 0) % EncodingNb, a.NewEncoder.W))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotEncodedLen:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.EncodedLen(int(%#+v))\n", EncodingResultsIndex, a.EncodingNgdotEncodedLen.N))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotAppendDecode:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.AppendDecode(%#+v, %#+v)\n", EncodingResultsIndex, a.EncodingNgdotAppendDecode.Dst, a.EncodingNgdotAppendDecode.Src))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotDecodeString:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.DecodeString(%#+v)\n", EncodingResultsIndex, a.EncodingNgdotDecodeString.S))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotDecode:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.Decode(%#+v, %#+v)\n", EncodingResultsIndex, a.EncodingNgdotDecode.Dst, a.EncodingNgdotDecode.Src))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_NewDecoder:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("base64.NewDecoder(Encoding%d, bytes.NewReader(%#+v))\n", (EncodingResultsIndex + 0) % EncodingNb, a.NewDecoder.R))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
case *NgoloFuzzOne_EncodingNgdotDecodedLen:
if EncodingNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoding%d.DecodedLen(int(%#+v))\n", EncodingResultsIndex, a.EncodingNgdotDecodedLen.N))
EncodingResultsIndex = (EncodingResultsIndex + 1) % EncodingNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_encoding_base64
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewEncodingArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Encoder string `protobuf:"bytes,1,opt,name=encoder,proto3" json:"encoder,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewEncodingArgs) Reset() {
*x = NewEncodingArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewEncodingArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewEncodingArgs) ProtoMessage() {}
func (x *NewEncodingArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewEncodingArgs.ProtoReflect.Descriptor instead.
func (*NewEncodingArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewEncodingArgs) GetEncoder() string {
if x != nil {
return x.Encoder
}
return ""
}
type EncodingNgdotWithPaddingArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Padding string `protobuf:"bytes,1,opt,name=padding,proto3" json:"padding,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotWithPaddingArgs) Reset() {
*x = EncodingNgdotWithPaddingArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotWithPaddingArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotWithPaddingArgs) ProtoMessage() {}
func (x *EncodingNgdotWithPaddingArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotWithPaddingArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotWithPaddingArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *EncodingNgdotWithPaddingArgs) GetPadding() string {
if x != nil {
return x.Padding
}
return ""
}
type EncodingNgdotStrictArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotStrictArgs) Reset() {
*x = EncodingNgdotStrictArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotStrictArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotStrictArgs) ProtoMessage() {}
func (x *EncodingNgdotStrictArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotStrictArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotStrictArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type EncodingNgdotEncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotEncodeArgs) Reset() {
*x = EncodingNgdotEncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotEncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotEncodeArgs) ProtoMessage() {}
func (x *EncodingNgdotEncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotEncodeArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotEncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *EncodingNgdotEncodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *EncodingNgdotEncodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type EncodingNgdotAppendEncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotAppendEncodeArgs) Reset() {
*x = EncodingNgdotAppendEncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotAppendEncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotAppendEncodeArgs) ProtoMessage() {}
func (x *EncodingNgdotAppendEncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotAppendEncodeArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotAppendEncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *EncodingNgdotAppendEncodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *EncodingNgdotAppendEncodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type EncodingNgdotEncodeToStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Src []byte `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotEncodeToStringArgs) Reset() {
*x = EncodingNgdotEncodeToStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotEncodeToStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotEncodeToStringArgs) ProtoMessage() {}
func (x *EncodingNgdotEncodeToStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotEncodeToStringArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotEncodeToStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *EncodingNgdotEncodeToStringArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type NewEncoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewEncoderArgs) Reset() {
*x = NewEncoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewEncoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewEncoderArgs) ProtoMessage() {}
func (x *NewEncoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewEncoderArgs.ProtoReflect.Descriptor instead.
func (*NewEncoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NewEncoderArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type EncodingNgdotEncodedLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotEncodedLenArgs) Reset() {
*x = EncodingNgdotEncodedLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotEncodedLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotEncodedLenArgs) ProtoMessage() {}
func (x *EncodingNgdotEncodedLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotEncodedLenArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotEncodedLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *EncodingNgdotEncodedLenArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type EncodingNgdotAppendDecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotAppendDecodeArgs) Reset() {
*x = EncodingNgdotAppendDecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotAppendDecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotAppendDecodeArgs) ProtoMessage() {}
func (x *EncodingNgdotAppendDecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotAppendDecodeArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotAppendDecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *EncodingNgdotAppendDecodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *EncodingNgdotAppendDecodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type EncodingNgdotDecodeStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotDecodeStringArgs) Reset() {
*x = EncodingNgdotDecodeStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotDecodeStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotDecodeStringArgs) ProtoMessage() {}
func (x *EncodingNgdotDecodeStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotDecodeStringArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotDecodeStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *EncodingNgdotDecodeStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type EncodingNgdotDecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotDecodeArgs) Reset() {
*x = EncodingNgdotDecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotDecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotDecodeArgs) ProtoMessage() {}
func (x *EncodingNgdotDecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotDecodeArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotDecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *EncodingNgdotDecodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *EncodingNgdotDecodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type NewDecoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewDecoderArgs) Reset() {
*x = NewDecoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewDecoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewDecoderArgs) ProtoMessage() {}
func (x *NewDecoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewDecoderArgs.ProtoReflect.Descriptor instead.
func (*NewDecoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NewDecoderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type EncodingNgdotDecodedLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodingNgdotDecodedLenArgs) Reset() {
*x = EncodingNgdotDecodedLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodingNgdotDecodedLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodingNgdotDecodedLenArgs) ProtoMessage() {}
func (x *EncodingNgdotDecodedLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodingNgdotDecodedLenArgs.ProtoReflect.Descriptor instead.
func (*EncodingNgdotDecodedLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *EncodingNgdotDecodedLenArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewEncoding
// *NgoloFuzzOne_EncodingNgdotWithPadding
// *NgoloFuzzOne_EncodingNgdotStrict
// *NgoloFuzzOne_EncodingNgdotEncode
// *NgoloFuzzOne_EncodingNgdotAppendEncode
// *NgoloFuzzOne_EncodingNgdotEncodeToString
// *NgoloFuzzOne_NewEncoder
// *NgoloFuzzOne_EncodingNgdotEncodedLen
// *NgoloFuzzOne_EncodingNgdotAppendDecode
// *NgoloFuzzOne_EncodingNgdotDecodeString
// *NgoloFuzzOne_EncodingNgdotDecode
// *NgoloFuzzOne_NewDecoder
// *NgoloFuzzOne_EncodingNgdotDecodedLen
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewEncoding() *NewEncodingArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewEncoding); ok {
return x.NewEncoding
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotWithPadding() *EncodingNgdotWithPaddingArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotWithPadding); ok {
return x.EncodingNgdotWithPadding
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotStrict() *EncodingNgdotStrictArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotStrict); ok {
return x.EncodingNgdotStrict
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotEncode() *EncodingNgdotEncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotEncode); ok {
return x.EncodingNgdotEncode
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotAppendEncode() *EncodingNgdotAppendEncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotAppendEncode); ok {
return x.EncodingNgdotAppendEncode
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotEncodeToString() *EncodingNgdotEncodeToStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotEncodeToString); ok {
return x.EncodingNgdotEncodeToString
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewEncoder() *NewEncoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewEncoder); ok {
return x.NewEncoder
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotEncodedLen() *EncodingNgdotEncodedLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotEncodedLen); ok {
return x.EncodingNgdotEncodedLen
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotAppendDecode() *EncodingNgdotAppendDecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotAppendDecode); ok {
return x.EncodingNgdotAppendDecode
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotDecodeString() *EncodingNgdotDecodeStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotDecodeString); ok {
return x.EncodingNgdotDecodeString
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotDecode() *EncodingNgdotDecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotDecode); ok {
return x.EncodingNgdotDecode
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewDecoder() *NewDecoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewDecoder); ok {
return x.NewDecoder
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodingNgdotDecodedLen() *EncodingNgdotDecodedLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodingNgdotDecodedLen); ok {
return x.EncodingNgdotDecodedLen
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewEncoding struct {
NewEncoding *NewEncodingArgs `protobuf:"bytes,1,opt,name=NewEncoding,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotWithPadding struct {
EncodingNgdotWithPadding *EncodingNgdotWithPaddingArgs `protobuf:"bytes,2,opt,name=EncodingNgdotWithPadding,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotStrict struct {
EncodingNgdotStrict *EncodingNgdotStrictArgs `protobuf:"bytes,3,opt,name=EncodingNgdotStrict,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotEncode struct {
EncodingNgdotEncode *EncodingNgdotEncodeArgs `protobuf:"bytes,4,opt,name=EncodingNgdotEncode,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotAppendEncode struct {
EncodingNgdotAppendEncode *EncodingNgdotAppendEncodeArgs `protobuf:"bytes,5,opt,name=EncodingNgdotAppendEncode,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotEncodeToString struct {
EncodingNgdotEncodeToString *EncodingNgdotEncodeToStringArgs `protobuf:"bytes,6,opt,name=EncodingNgdotEncodeToString,proto3,oneof"`
}
type NgoloFuzzOne_NewEncoder struct {
NewEncoder *NewEncoderArgs `protobuf:"bytes,7,opt,name=NewEncoder,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotEncodedLen struct {
EncodingNgdotEncodedLen *EncodingNgdotEncodedLenArgs `protobuf:"bytes,8,opt,name=EncodingNgdotEncodedLen,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotAppendDecode struct {
EncodingNgdotAppendDecode *EncodingNgdotAppendDecodeArgs `protobuf:"bytes,9,opt,name=EncodingNgdotAppendDecode,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotDecodeString struct {
EncodingNgdotDecodeString *EncodingNgdotDecodeStringArgs `protobuf:"bytes,10,opt,name=EncodingNgdotDecodeString,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotDecode struct {
EncodingNgdotDecode *EncodingNgdotDecodeArgs `protobuf:"bytes,11,opt,name=EncodingNgdotDecode,proto3,oneof"`
}
type NgoloFuzzOne_NewDecoder struct {
NewDecoder *NewDecoderArgs `protobuf:"bytes,12,opt,name=NewDecoder,proto3,oneof"`
}
type NgoloFuzzOne_EncodingNgdotDecodedLen struct {
EncodingNgdotDecodedLen *EncodingNgdotDecodedLenArgs `protobuf:"bytes,13,opt,name=EncodingNgdotDecodedLen,proto3,oneof"`
}
func (*NgoloFuzzOne_NewEncoding) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotWithPadding) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotStrict) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotEncode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotAppendEncode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotEncodeToString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewEncoder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotEncodedLen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotAppendDecode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotDecodeString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotDecode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewDecoder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodingNgdotDecodedLen) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"+\n" +
"\x0fNewEncodingArgs\x12\x18\n" +
"\aencoder\x18\x01 \x01(\tR\aencoder\"8\n" +
"\x1cEncodingNgdotWithPaddingArgs\x12\x18\n" +
"\apadding\x18\x01 \x01(\tR\apadding\"\x19\n" +
"\x17EncodingNgdotStrictArgs\"=\n" +
"\x17EncodingNgdotEncodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"C\n" +
"\x1dEncodingNgdotAppendEncodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"3\n" +
"\x1fEncodingNgdotEncodeToStringArgs\x12\x10\n" +
"\x03src\x18\x01 \x01(\fR\x03src\"\x1e\n" +
"\x0eNewEncoderArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"+\n" +
"\x1bEncodingNgdotEncodedLenArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"C\n" +
"\x1dEncodingNgdotAppendDecodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"-\n" +
"\x1dEncodingNgdotDecodeStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"=\n" +
"\x17EncodingNgdotDecodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"\x1e\n" +
"\x0eNewDecoderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"+\n" +
"\x1bEncodingNgdotDecodedLenArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"\xb5\t\n" +
"\fNgoloFuzzOne\x12>\n" +
"\vNewEncoding\x18\x01 \x01(\v2\x1a.ngolofuzz.NewEncodingArgsH\x00R\vNewEncoding\x12e\n" +
"\x18EncodingNgdotWithPadding\x18\x02 \x01(\v2'.ngolofuzz.EncodingNgdotWithPaddingArgsH\x00R\x18EncodingNgdotWithPadding\x12V\n" +
"\x13EncodingNgdotStrict\x18\x03 \x01(\v2\".ngolofuzz.EncodingNgdotStrictArgsH\x00R\x13EncodingNgdotStrict\x12V\n" +
"\x13EncodingNgdotEncode\x18\x04 \x01(\v2\".ngolofuzz.EncodingNgdotEncodeArgsH\x00R\x13EncodingNgdotEncode\x12h\n" +
"\x19EncodingNgdotAppendEncode\x18\x05 \x01(\v2(.ngolofuzz.EncodingNgdotAppendEncodeArgsH\x00R\x19EncodingNgdotAppendEncode\x12n\n" +
"\x1bEncodingNgdotEncodeToString\x18\x06 \x01(\v2*.ngolofuzz.EncodingNgdotEncodeToStringArgsH\x00R\x1bEncodingNgdotEncodeToString\x12;\n" +
"\n" +
"NewEncoder\x18\a \x01(\v2\x19.ngolofuzz.NewEncoderArgsH\x00R\n" +
"NewEncoder\x12b\n" +
"\x17EncodingNgdotEncodedLen\x18\b \x01(\v2&.ngolofuzz.EncodingNgdotEncodedLenArgsH\x00R\x17EncodingNgdotEncodedLen\x12h\n" +
"\x19EncodingNgdotAppendDecode\x18\t \x01(\v2(.ngolofuzz.EncodingNgdotAppendDecodeArgsH\x00R\x19EncodingNgdotAppendDecode\x12h\n" +
"\x19EncodingNgdotDecodeString\x18\n" +
" \x01(\v2(.ngolofuzz.EncodingNgdotDecodeStringArgsH\x00R\x19EncodingNgdotDecodeString\x12V\n" +
"\x13EncodingNgdotDecode\x18\v \x01(\v2\".ngolofuzz.EncodingNgdotDecodeArgsH\x00R\x13EncodingNgdotDecode\x12;\n" +
"\n" +
"NewDecoder\x18\f \x01(\v2\x19.ngolofuzz.NewDecoderArgsH\x00R\n" +
"NewDecoder\x12b\n" +
"\x17EncodingNgdotDecodedLen\x18\r \x01(\v2&.ngolofuzz.EncodingNgdotDecodedLenArgsH\x00R\x17EncodingNgdotDecodedLenB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1cZ\x1a./;fuzz_ng_encoding_base64b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
var file_ngolofuzz_proto_goTypes = []any{
(*NewEncodingArgs)(nil), // 0: ngolofuzz.NewEncodingArgs
(*EncodingNgdotWithPaddingArgs)(nil), // 1: ngolofuzz.EncodingNgdotWithPaddingArgs
(*EncodingNgdotStrictArgs)(nil), // 2: ngolofuzz.EncodingNgdotStrictArgs
(*EncodingNgdotEncodeArgs)(nil), // 3: ngolofuzz.EncodingNgdotEncodeArgs
(*EncodingNgdotAppendEncodeArgs)(nil), // 4: ngolofuzz.EncodingNgdotAppendEncodeArgs
(*EncodingNgdotEncodeToStringArgs)(nil), // 5: ngolofuzz.EncodingNgdotEncodeToStringArgs
(*NewEncoderArgs)(nil), // 6: ngolofuzz.NewEncoderArgs
(*EncodingNgdotEncodedLenArgs)(nil), // 7: ngolofuzz.EncodingNgdotEncodedLenArgs
(*EncodingNgdotAppendDecodeArgs)(nil), // 8: ngolofuzz.EncodingNgdotAppendDecodeArgs
(*EncodingNgdotDecodeStringArgs)(nil), // 9: ngolofuzz.EncodingNgdotDecodeStringArgs
(*EncodingNgdotDecodeArgs)(nil), // 10: ngolofuzz.EncodingNgdotDecodeArgs
(*NewDecoderArgs)(nil), // 11: ngolofuzz.NewDecoderArgs
(*EncodingNgdotDecodedLenArgs)(nil), // 12: ngolofuzz.EncodingNgdotDecodedLenArgs
(*NgoloFuzzOne)(nil), // 13: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 14: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 15: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewEncoding:type_name -> ngolofuzz.NewEncodingArgs
1, // 1: ngolofuzz.NgoloFuzzOne.EncodingNgdotWithPadding:type_name -> ngolofuzz.EncodingNgdotWithPaddingArgs
2, // 2: ngolofuzz.NgoloFuzzOne.EncodingNgdotStrict:type_name -> ngolofuzz.EncodingNgdotStrictArgs
3, // 3: ngolofuzz.NgoloFuzzOne.EncodingNgdotEncode:type_name -> ngolofuzz.EncodingNgdotEncodeArgs
4, // 4: ngolofuzz.NgoloFuzzOne.EncodingNgdotAppendEncode:type_name -> ngolofuzz.EncodingNgdotAppendEncodeArgs
5, // 5: ngolofuzz.NgoloFuzzOne.EncodingNgdotEncodeToString:type_name -> ngolofuzz.EncodingNgdotEncodeToStringArgs
6, // 6: ngolofuzz.NgoloFuzzOne.NewEncoder:type_name -> ngolofuzz.NewEncoderArgs
7, // 7: ngolofuzz.NgoloFuzzOne.EncodingNgdotEncodedLen:type_name -> ngolofuzz.EncodingNgdotEncodedLenArgs
8, // 8: ngolofuzz.NgoloFuzzOne.EncodingNgdotAppendDecode:type_name -> ngolofuzz.EncodingNgdotAppendDecodeArgs
9, // 9: ngolofuzz.NgoloFuzzOne.EncodingNgdotDecodeString:type_name -> ngolofuzz.EncodingNgdotDecodeStringArgs
10, // 10: ngolofuzz.NgoloFuzzOne.EncodingNgdotDecode:type_name -> ngolofuzz.EncodingNgdotDecodeArgs
11, // 11: ngolofuzz.NgoloFuzzOne.NewDecoder:type_name -> ngolofuzz.NewDecoderArgs
12, // 12: ngolofuzz.NgoloFuzzOne.EncodingNgdotDecodedLen:type_name -> ngolofuzz.EncodingNgdotDecodedLenArgs
13, // 13: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
14, // [14:14] is the sub-list for method output_type
14, // [14:14] is the sub-list for method input_type
14, // [14:14] is the sub-list for extension type_name
14, // [14:14] is the sub-list for extension extendee
0, // [0:14] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[13].OneofWrappers = []any{
(*NgoloFuzzOne_NewEncoding)(nil),
(*NgoloFuzzOne_EncodingNgdotWithPadding)(nil),
(*NgoloFuzzOne_EncodingNgdotStrict)(nil),
(*NgoloFuzzOne_EncodingNgdotEncode)(nil),
(*NgoloFuzzOne_EncodingNgdotAppendEncode)(nil),
(*NgoloFuzzOne_EncodingNgdotEncodeToString)(nil),
(*NgoloFuzzOne_NewEncoder)(nil),
(*NgoloFuzzOne_EncodingNgdotEncodedLen)(nil),
(*NgoloFuzzOne_EncodingNgdotAppendDecode)(nil),
(*NgoloFuzzOne_EncodingNgdotDecodeString)(nil),
(*NgoloFuzzOne_EncodingNgdotDecode)(nil),
(*NgoloFuzzOne_NewDecoder)(nil),
(*NgoloFuzzOne_EncodingNgdotDecodedLen)(nil),
}
file_ngolofuzz_proto_msgTypes[14].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 16,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_encoding_binary
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Size:
binary.Size(a.Size.V)
case *NgoloFuzzOne_AppendUvarint:
binary.AppendUvarint(a.AppendUvarint.Buf, a.AppendUvarint.X)
case *NgoloFuzzOne_Uvarint:
binary.Uvarint(a.Uvarint.Buf)
case *NgoloFuzzOne_AppendVarint:
binary.AppendVarint(a.AppendVarint.Buf, a.AppendVarint.X)
case *NgoloFuzzOne_Varint:
binary.Varint(a.Varint.Buf)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Size:
w.WriteString(fmt.Sprintf("binary.Size(%#+v)\n", a.Size.V))
case *NgoloFuzzOne_AppendUvarint:
w.WriteString(fmt.Sprintf("binary.AppendUvarint(%#+v, %#+v)\n", a.AppendUvarint.Buf, a.AppendUvarint.X))
case *NgoloFuzzOne_Uvarint:
w.WriteString(fmt.Sprintf("binary.Uvarint(%#+v)\n", a.Uvarint.Buf))
case *NgoloFuzzOne_AppendVarint:
w.WriteString(fmt.Sprintf("binary.AppendVarint(%#+v, %#+v)\n", a.AppendVarint.Buf, a.AppendVarint.X))
case *NgoloFuzzOne_Varint:
w.WriteString(fmt.Sprintf("binary.Varint(%#+v)\n", a.Varint.Buf))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_encoding_binary
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type SizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SizeArgs) Reset() {
*x = SizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SizeArgs) ProtoMessage() {}
func (x *SizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SizeArgs.ProtoReflect.Descriptor instead.
func (*SizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *SizeArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type AppendUvarintArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
X uint64 `protobuf:"varint,2,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendUvarintArgs) Reset() {
*x = AppendUvarintArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendUvarintArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendUvarintArgs) ProtoMessage() {}
func (x *AppendUvarintArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendUvarintArgs.ProtoReflect.Descriptor instead.
func (*AppendUvarintArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *AppendUvarintArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
func (x *AppendUvarintArgs) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
type UvarintArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UvarintArgs) Reset() {
*x = UvarintArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UvarintArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UvarintArgs) ProtoMessage() {}
func (x *UvarintArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UvarintArgs.ProtoReflect.Descriptor instead.
func (*UvarintArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *UvarintArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
type AppendVarintArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
X int64 `protobuf:"varint,2,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendVarintArgs) Reset() {
*x = AppendVarintArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendVarintArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendVarintArgs) ProtoMessage() {}
func (x *AppendVarintArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendVarintArgs.ProtoReflect.Descriptor instead.
func (*AppendVarintArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *AppendVarintArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
func (x *AppendVarintArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
type VarintArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *VarintArgs) Reset() {
*x = VarintArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *VarintArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*VarintArgs) ProtoMessage() {}
func (x *VarintArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use VarintArgs.ProtoReflect.Descriptor instead.
func (*VarintArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *VarintArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Size
// *NgoloFuzzOne_AppendUvarint
// *NgoloFuzzOne_Uvarint
// *NgoloFuzzOne_AppendVarint
// *NgoloFuzzOne_Varint
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetSize() *SizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Size); ok {
return x.Size
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendUvarint() *AppendUvarintArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendUvarint); ok {
return x.AppendUvarint
}
}
return nil
}
func (x *NgoloFuzzOne) GetUvarint() *UvarintArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Uvarint); ok {
return x.Uvarint
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendVarint() *AppendVarintArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendVarint); ok {
return x.AppendVarint
}
}
return nil
}
func (x *NgoloFuzzOne) GetVarint() *VarintArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Varint); ok {
return x.Varint
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Size struct {
Size *SizeArgs `protobuf:"bytes,1,opt,name=Size,proto3,oneof"`
}
type NgoloFuzzOne_AppendUvarint struct {
AppendUvarint *AppendUvarintArgs `protobuf:"bytes,2,opt,name=AppendUvarint,proto3,oneof"`
}
type NgoloFuzzOne_Uvarint struct {
Uvarint *UvarintArgs `protobuf:"bytes,3,opt,name=Uvarint,proto3,oneof"`
}
type NgoloFuzzOne_AppendVarint struct {
AppendVarint *AppendVarintArgs `protobuf:"bytes,4,opt,name=AppendVarint,proto3,oneof"`
}
type NgoloFuzzOne_Varint struct {
Varint *VarintArgs `protobuf:"bytes,5,opt,name=Varint,proto3,oneof"`
}
func (*NgoloFuzzOne_Size) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendUvarint) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Uvarint) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendVarint) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Varint) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"1\n" +
"\bSizeArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"3\n" +
"\x11AppendUvarintArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\x12\f\n" +
"\x01x\x18\x02 \x01(\x04R\x01x\"\x1f\n" +
"\vUvarintArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\"2\n" +
"\x10AppendVarintArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\x12\f\n" +
"\x01x\x18\x02 \x01(\x03R\x01x\"\x1e\n" +
"\n" +
"VarintArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\"\xaf\x02\n" +
"\fNgoloFuzzOne\x12)\n" +
"\x04Size\x18\x01 \x01(\v2\x13.ngolofuzz.SizeArgsH\x00R\x04Size\x12D\n" +
"\rAppendUvarint\x18\x02 \x01(\v2\x1c.ngolofuzz.AppendUvarintArgsH\x00R\rAppendUvarint\x122\n" +
"\aUvarint\x18\x03 \x01(\v2\x16.ngolofuzz.UvarintArgsH\x00R\aUvarint\x12A\n" +
"\fAppendVarint\x18\x04 \x01(\v2\x1b.ngolofuzz.AppendVarintArgsH\x00R\fAppendVarint\x12/\n" +
"\x06Varint\x18\x05 \x01(\v2\x15.ngolofuzz.VarintArgsH\x00R\x06VarintB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1cZ\x1a./;fuzz_ng_encoding_binaryb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_ngolofuzz_proto_goTypes = []any{
(*SizeArgs)(nil), // 0: ngolofuzz.SizeArgs
(*AppendUvarintArgs)(nil), // 1: ngolofuzz.AppendUvarintArgs
(*UvarintArgs)(nil), // 2: ngolofuzz.UvarintArgs
(*AppendVarintArgs)(nil), // 3: ngolofuzz.AppendVarintArgs
(*VarintArgs)(nil), // 4: ngolofuzz.VarintArgs
(*NgoloFuzzOne)(nil), // 5: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 6: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 7: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
6, // 0: ngolofuzz.SizeArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
0, // 1: ngolofuzz.NgoloFuzzOne.Size:type_name -> ngolofuzz.SizeArgs
1, // 2: ngolofuzz.NgoloFuzzOne.AppendUvarint:type_name -> ngolofuzz.AppendUvarintArgs
2, // 3: ngolofuzz.NgoloFuzzOne.Uvarint:type_name -> ngolofuzz.UvarintArgs
3, // 4: ngolofuzz.NgoloFuzzOne.AppendVarint:type_name -> ngolofuzz.AppendVarintArgs
4, // 5: ngolofuzz.NgoloFuzzOne.Varint:type_name -> ngolofuzz.VarintArgs
5, // 6: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
7, // [7:7] is the sub-list for method output_type
7, // [7:7] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[5].OneofWrappers = []any{
(*NgoloFuzzOne_Size)(nil),
(*NgoloFuzzOne_AppendUvarint)(nil),
(*NgoloFuzzOne_Uvarint)(nil),
(*NgoloFuzzOne_AppendVarint)(nil),
(*NgoloFuzzOne_Varint)(nil),
}
file_ngolofuzz_proto_msgTypes[6].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 8,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_encoding_csv
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"encoding/csv"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ReaderResults []*csv.Reader
ReaderResultsIndex := 0
var WriterResults []*csv.Writer
WriterResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReader:
arg0 := bytes.NewReader(a.NewReader.R)
r0 := csv.NewReader(arg0)
if r0 != nil{
ReaderResults = append(ReaderResults, r0)
}
case *NgoloFuzzOne_ReaderNgdotRead:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.Read()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotFieldPos:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg1 := int(a.ReaderNgdotFieldPos.Field)
arg0.FieldPos(arg1)
case *NgoloFuzzOne_ReaderNgdotInputOffset:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg0.InputOffset()
case *NgoloFuzzOne_ReaderNgdotReadAll:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.ReadAll()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewWriter:
arg0 := bytes.NewBuffer(a.NewWriter.W)
r0 := csv.NewWriter(arg0)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
case *NgoloFuzzOne_WriterNgdotWrite:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Write(a.WriterNgdotWrite.Record)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotFlush:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg0.Flush()
case *NgoloFuzzOne_WriterNgdotError:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Error()
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ReaderNb := 0
ReaderResultsIndex := 0
WriterNb := 0
WriterResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReader:
w.WriteString(fmt.Sprintf("Reader%d := csv.NewReader(bytes.NewReader(%#+v))\n", ReaderNb, a.NewReader.R))
ReaderNb = ReaderNb + 1
case *NgoloFuzzOne_ReaderNgdotRead:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Read()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotFieldPos:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.FieldPos(int(%#+v))\n", ReaderResultsIndex, a.ReaderNgdotFieldPos.Field))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotInputOffset:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.InputOffset()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadAll:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadAll()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_NewWriter:
w.WriteString(fmt.Sprintf("Writer%d := csv.NewWriter(bytes.NewBuffer(%#+v))\n", WriterNb, a.NewWriter.W))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_WriterNgdotWrite:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Write(%#+v)\n", WriterResultsIndex, a.WriterNgdotWrite.Record))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotFlush:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Flush()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotError:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Error()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_encoding_csv
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderArgs) Reset() {
*x = NewReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderArgs) ProtoMessage() {}
func (x *NewReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderArgs.ProtoReflect.Descriptor instead.
func (*NewReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type ReaderNgdotReadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadArgs) Reset() {
*x = ReaderNgdotReadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadArgs) ProtoMessage() {}
func (x *ReaderNgdotReadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type ReaderNgdotFieldPosArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Field int64 `protobuf:"varint,1,opt,name=field,proto3" json:"field,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotFieldPosArgs) Reset() {
*x = ReaderNgdotFieldPosArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotFieldPosArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotFieldPosArgs) ProtoMessage() {}
func (x *ReaderNgdotFieldPosArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotFieldPosArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotFieldPosArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *ReaderNgdotFieldPosArgs) GetField() int64 {
if x != nil {
return x.Field
}
return 0
}
type ReaderNgdotInputOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotInputOffsetArgs) Reset() {
*x = ReaderNgdotInputOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotInputOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotInputOffsetArgs) ProtoMessage() {}
func (x *ReaderNgdotInputOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotInputOffsetArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotInputOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type ReaderNgdotReadAllArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadAllArgs) Reset() {
*x = ReaderNgdotReadAllArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadAllArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadAllArgs) ProtoMessage() {}
func (x *ReaderNgdotReadAllArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadAllArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadAllArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type NewWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterArgs) Reset() {
*x = NewWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterArgs) ProtoMessage() {}
func (x *NewWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterArgs.ProtoReflect.Descriptor instead.
func (*NewWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NewWriterArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type WriterNgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Record []string `protobuf:"bytes,1,rep,name=record,proto3" json:"record,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteArgs) Reset() {
*x = WriterNgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteArgs) ProtoMessage() {}
func (x *WriterNgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *WriterNgdotWriteArgs) GetRecord() []string {
if x != nil {
return x.Record
}
return nil
}
type WriterNgdotFlushArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotFlushArgs) Reset() {
*x = WriterNgdotFlushArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotFlushArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotFlushArgs) ProtoMessage() {}
func (x *WriterNgdotFlushArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotFlushArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotFlushArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type WriterNgdotErrorArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotErrorArgs) Reset() {
*x = WriterNgdotErrorArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotErrorArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotErrorArgs) ProtoMessage() {}
func (x *WriterNgdotErrorArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotErrorArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotErrorArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewReader
// *NgoloFuzzOne_ReaderNgdotRead
// *NgoloFuzzOne_ReaderNgdotFieldPos
// *NgoloFuzzOne_ReaderNgdotInputOffset
// *NgoloFuzzOne_ReaderNgdotReadAll
// *NgoloFuzzOne_NewWriter
// *NgoloFuzzOne_WriterNgdotWrite
// *NgoloFuzzOne_WriterNgdotFlush
// *NgoloFuzzOne_WriterNgdotError
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewReader() *NewReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReader); ok {
return x.NewReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotRead() *ReaderNgdotReadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotRead); ok {
return x.ReaderNgdotRead
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotFieldPos() *ReaderNgdotFieldPosArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotFieldPos); ok {
return x.ReaderNgdotFieldPos
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotInputOffset() *ReaderNgdotInputOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotInputOffset); ok {
return x.ReaderNgdotInputOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadAll() *ReaderNgdotReadAllArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadAll); ok {
return x.ReaderNgdotReadAll
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriter() *NewWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriter); ok {
return x.NewWriter
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWrite() *WriterNgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWrite); ok {
return x.WriterNgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotFlush() *WriterNgdotFlushArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotFlush); ok {
return x.WriterNgdotFlush
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotError() *WriterNgdotErrorArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotError); ok {
return x.WriterNgdotError
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewReader struct {
NewReader *NewReaderArgs `protobuf:"bytes,1,opt,name=NewReader,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotRead struct {
ReaderNgdotRead *ReaderNgdotReadArgs `protobuf:"bytes,2,opt,name=ReaderNgdotRead,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotFieldPos struct {
ReaderNgdotFieldPos *ReaderNgdotFieldPosArgs `protobuf:"bytes,3,opt,name=ReaderNgdotFieldPos,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotInputOffset struct {
ReaderNgdotInputOffset *ReaderNgdotInputOffsetArgs `protobuf:"bytes,4,opt,name=ReaderNgdotInputOffset,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadAll struct {
ReaderNgdotReadAll *ReaderNgdotReadAllArgs `protobuf:"bytes,5,opt,name=ReaderNgdotReadAll,proto3,oneof"`
}
type NgoloFuzzOne_NewWriter struct {
NewWriter *NewWriterArgs `protobuf:"bytes,6,opt,name=NewWriter,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWrite struct {
WriterNgdotWrite *WriterNgdotWriteArgs `protobuf:"bytes,7,opt,name=WriterNgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotFlush struct {
WriterNgdotFlush *WriterNgdotFlushArgs `protobuf:"bytes,8,opt,name=WriterNgdotFlush,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotError struct {
WriterNgdotError *WriterNgdotErrorArgs `protobuf:"bytes,9,opt,name=WriterNgdotError,proto3,oneof"`
}
func (*NgoloFuzzOne_NewReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotRead) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotFieldPos) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotInputOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadAll) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotFlush) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotError) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1d\n" +
"\rNewReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x15\n" +
"\x13ReaderNgdotReadArgs\"/\n" +
"\x17ReaderNgdotFieldPosArgs\x12\x14\n" +
"\x05field\x18\x01 \x01(\x03R\x05field\"\x1c\n" +
"\x1aReaderNgdotInputOffsetArgs\"\x18\n" +
"\x16ReaderNgdotReadAllArgs\"\x1d\n" +
"\rNewWriterArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\".\n" +
"\x14WriterNgdotWriteArgs\x12\x16\n" +
"\x06record\x18\x01 \x03(\tR\x06record\"\x16\n" +
"\x14WriterNgdotFlushArgs\"\x16\n" +
"\x14WriterNgdotErrorArgs\"\xd1\x05\n" +
"\fNgoloFuzzOne\x128\n" +
"\tNewReader\x18\x01 \x01(\v2\x18.ngolofuzz.NewReaderArgsH\x00R\tNewReader\x12J\n" +
"\x0fReaderNgdotRead\x18\x02 \x01(\v2\x1e.ngolofuzz.ReaderNgdotReadArgsH\x00R\x0fReaderNgdotRead\x12V\n" +
"\x13ReaderNgdotFieldPos\x18\x03 \x01(\v2\".ngolofuzz.ReaderNgdotFieldPosArgsH\x00R\x13ReaderNgdotFieldPos\x12_\n" +
"\x16ReaderNgdotInputOffset\x18\x04 \x01(\v2%.ngolofuzz.ReaderNgdotInputOffsetArgsH\x00R\x16ReaderNgdotInputOffset\x12S\n" +
"\x12ReaderNgdotReadAll\x18\x05 \x01(\v2!.ngolofuzz.ReaderNgdotReadAllArgsH\x00R\x12ReaderNgdotReadAll\x128\n" +
"\tNewWriter\x18\x06 \x01(\v2\x18.ngolofuzz.NewWriterArgsH\x00R\tNewWriter\x12M\n" +
"\x10WriterNgdotWrite\x18\a \x01(\v2\x1f.ngolofuzz.WriterNgdotWriteArgsH\x00R\x10WriterNgdotWrite\x12M\n" +
"\x10WriterNgdotFlush\x18\b \x01(\v2\x1f.ngolofuzz.WriterNgdotFlushArgsH\x00R\x10WriterNgdotFlush\x12M\n" +
"\x10WriterNgdotError\x18\t \x01(\v2\x1f.ngolofuzz.WriterNgdotErrorArgsH\x00R\x10WriterNgdotErrorB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x19Z\x17./;fuzz_ng_encoding_csvb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
var file_ngolofuzz_proto_goTypes = []any{
(*NewReaderArgs)(nil), // 0: ngolofuzz.NewReaderArgs
(*ReaderNgdotReadArgs)(nil), // 1: ngolofuzz.ReaderNgdotReadArgs
(*ReaderNgdotFieldPosArgs)(nil), // 2: ngolofuzz.ReaderNgdotFieldPosArgs
(*ReaderNgdotInputOffsetArgs)(nil), // 3: ngolofuzz.ReaderNgdotInputOffsetArgs
(*ReaderNgdotReadAllArgs)(nil), // 4: ngolofuzz.ReaderNgdotReadAllArgs
(*NewWriterArgs)(nil), // 5: ngolofuzz.NewWriterArgs
(*WriterNgdotWriteArgs)(nil), // 6: ngolofuzz.WriterNgdotWriteArgs
(*WriterNgdotFlushArgs)(nil), // 7: ngolofuzz.WriterNgdotFlushArgs
(*WriterNgdotErrorArgs)(nil), // 8: ngolofuzz.WriterNgdotErrorArgs
(*NgoloFuzzOne)(nil), // 9: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 10: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 11: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewReader:type_name -> ngolofuzz.NewReaderArgs
1, // 1: ngolofuzz.NgoloFuzzOne.ReaderNgdotRead:type_name -> ngolofuzz.ReaderNgdotReadArgs
2, // 2: ngolofuzz.NgoloFuzzOne.ReaderNgdotFieldPos:type_name -> ngolofuzz.ReaderNgdotFieldPosArgs
3, // 3: ngolofuzz.NgoloFuzzOne.ReaderNgdotInputOffset:type_name -> ngolofuzz.ReaderNgdotInputOffsetArgs
4, // 4: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadAll:type_name -> ngolofuzz.ReaderNgdotReadAllArgs
5, // 5: ngolofuzz.NgoloFuzzOne.NewWriter:type_name -> ngolofuzz.NewWriterArgs
6, // 6: ngolofuzz.NgoloFuzzOne.WriterNgdotWrite:type_name -> ngolofuzz.WriterNgdotWriteArgs
7, // 7: ngolofuzz.NgoloFuzzOne.WriterNgdotFlush:type_name -> ngolofuzz.WriterNgdotFlushArgs
8, // 8: ngolofuzz.NgoloFuzzOne.WriterNgdotError:type_name -> ngolofuzz.WriterNgdotErrorArgs
9, // 9: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
10, // [10:10] is the sub-list for method output_type
10, // [10:10] is the sub-list for method input_type
10, // [10:10] is the sub-list for extension type_name
10, // [10:10] is the sub-list for extension extendee
0, // [0:10] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[9].OneofWrappers = []any{
(*NgoloFuzzOne_NewReader)(nil),
(*NgoloFuzzOne_ReaderNgdotRead)(nil),
(*NgoloFuzzOne_ReaderNgdotFieldPos)(nil),
(*NgoloFuzzOne_ReaderNgdotInputOffset)(nil),
(*NgoloFuzzOne_ReaderNgdotReadAll)(nil),
(*NgoloFuzzOne_NewWriter)(nil),
(*NgoloFuzzOne_WriterNgdotWrite)(nil),
(*NgoloFuzzOne_WriterNgdotFlush)(nil),
(*NgoloFuzzOne_WriterNgdotError)(nil),
}
file_ngolofuzz_proto_msgTypes[10].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 12,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_encoding_gob
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"encoding/gob"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var DecoderResults []*gob.Decoder
DecoderResultsIndex := 0
var EncoderResults []*gob.Encoder
EncoderResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewDecoder:
arg0 := bytes.NewReader(a.NewDecoder.R)
r0 := gob.NewDecoder(arg0)
if r0 != nil{
DecoderResults = append(DecoderResults, r0)
}
case *NgoloFuzzOne_DecoderNgdotDecode:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
r0 := arg0.Decode(a.DecoderNgdotDecode.E)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_NewEncoder:
arg0 := bytes.NewBuffer(a.NewEncoder.W)
r0 := gob.NewEncoder(arg0)
if r0 != nil{
EncoderResults = append(EncoderResults, r0)
}
case *NgoloFuzzOne_EncoderNgdotEncode:
if len(EncoderResults) == 0 {
continue
}
arg0 := EncoderResults[EncoderResultsIndex]
EncoderResultsIndex = (EncoderResultsIndex + 1) % len(EncoderResults)
r0 := arg0.Encode(a.EncoderNgdotEncode.E)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_RegisterName:
gob.RegisterName(a.RegisterName.Name, a.RegisterName.Value)
case *NgoloFuzzOne_Register:
gob.Register(a.Register.Value)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
DecoderNb := 0
DecoderResultsIndex := 0
EncoderNb := 0
EncoderResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewDecoder:
w.WriteString(fmt.Sprintf("Decoder%d := gob.NewDecoder(bytes.NewReader(%#+v))\n", DecoderNb, a.NewDecoder.R))
DecoderNb = DecoderNb + 1
case *NgoloFuzzOne_DecoderNgdotDecode:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Decoder%d.Decode(%#+v)\n", DecoderResultsIndex, a.DecoderNgdotDecode.E))
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
case *NgoloFuzzOne_NewEncoder:
w.WriteString(fmt.Sprintf("Encoder%d := gob.NewEncoder(bytes.NewBuffer(%#+v))\n", EncoderNb, a.NewEncoder.W))
EncoderNb = EncoderNb + 1
case *NgoloFuzzOne_EncoderNgdotEncode:
if EncoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoder%d.Encode(%#+v)\n", EncoderResultsIndex, a.EncoderNgdotEncode.E))
EncoderResultsIndex = (EncoderResultsIndex + 1) % EncoderNb
case *NgoloFuzzOne_RegisterName:
w.WriteString(fmt.Sprintf("gob.RegisterName(%#+v, %#+v)\n", a.RegisterName.Name, a.RegisterName.Value))
case *NgoloFuzzOne_Register:
w.WriteString(fmt.Sprintf("gob.Register(%#+v)\n", a.Register.Value))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_encoding_gob
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewDecoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewDecoderArgs) Reset() {
*x = NewDecoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewDecoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewDecoderArgs) ProtoMessage() {}
func (x *NewDecoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewDecoderArgs.ProtoReflect.Descriptor instead.
func (*NewDecoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewDecoderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type DecoderNgdotDecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
E *NgoloFuzzAny `protobuf:"bytes,1,opt,name=e,proto3" json:"e,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotDecodeArgs) Reset() {
*x = DecoderNgdotDecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotDecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotDecodeArgs) ProtoMessage() {}
func (x *DecoderNgdotDecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotDecodeArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotDecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *DecoderNgdotDecodeArgs) GetE() *NgoloFuzzAny {
if x != nil {
return x.E
}
return nil
}
type NewEncoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewEncoderArgs) Reset() {
*x = NewEncoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewEncoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewEncoderArgs) ProtoMessage() {}
func (x *NewEncoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewEncoderArgs.ProtoReflect.Descriptor instead.
func (*NewEncoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NewEncoderArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type EncoderNgdotEncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
E *NgoloFuzzAny `protobuf:"bytes,1,opt,name=e,proto3" json:"e,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncoderNgdotEncodeArgs) Reset() {
*x = EncoderNgdotEncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncoderNgdotEncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncoderNgdotEncodeArgs) ProtoMessage() {}
func (x *EncoderNgdotEncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncoderNgdotEncodeArgs.ProtoReflect.Descriptor instead.
func (*EncoderNgdotEncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *EncoderNgdotEncodeArgs) GetE() *NgoloFuzzAny {
if x != nil {
return x.E
}
return nil
}
type RegisterNameArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value *NgoloFuzzAny `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegisterNameArgs) Reset() {
*x = RegisterNameArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegisterNameArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegisterNameArgs) ProtoMessage() {}
func (x *RegisterNameArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegisterNameArgs.ProtoReflect.Descriptor instead.
func (*RegisterNameArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *RegisterNameArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *RegisterNameArgs) GetValue() *NgoloFuzzAny {
if x != nil {
return x.Value
}
return nil
}
type RegisterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Value *NgoloFuzzAny `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegisterArgs) Reset() {
*x = RegisterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegisterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegisterArgs) ProtoMessage() {}
func (x *RegisterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegisterArgs.ProtoReflect.Descriptor instead.
func (*RegisterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *RegisterArgs) GetValue() *NgoloFuzzAny {
if x != nil {
return x.Value
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewDecoder
// *NgoloFuzzOne_DecoderNgdotDecode
// *NgoloFuzzOne_NewEncoder
// *NgoloFuzzOne_EncoderNgdotEncode
// *NgoloFuzzOne_RegisterName
// *NgoloFuzzOne_Register
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewDecoder() *NewDecoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewDecoder); ok {
return x.NewDecoder
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotDecode() *DecoderNgdotDecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotDecode); ok {
return x.DecoderNgdotDecode
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewEncoder() *NewEncoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewEncoder); ok {
return x.NewEncoder
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncoderNgdotEncode() *EncoderNgdotEncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncoderNgdotEncode); ok {
return x.EncoderNgdotEncode
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegisterName() *RegisterNameArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegisterName); ok {
return x.RegisterName
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegister() *RegisterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Register); ok {
return x.Register
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewDecoder struct {
NewDecoder *NewDecoderArgs `protobuf:"bytes,1,opt,name=NewDecoder,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotDecode struct {
DecoderNgdotDecode *DecoderNgdotDecodeArgs `protobuf:"bytes,2,opt,name=DecoderNgdotDecode,proto3,oneof"`
}
type NgoloFuzzOne_NewEncoder struct {
NewEncoder *NewEncoderArgs `protobuf:"bytes,3,opt,name=NewEncoder,proto3,oneof"`
}
type NgoloFuzzOne_EncoderNgdotEncode struct {
EncoderNgdotEncode *EncoderNgdotEncodeArgs `protobuf:"bytes,4,opt,name=EncoderNgdotEncode,proto3,oneof"`
}
type NgoloFuzzOne_RegisterName struct {
RegisterName *RegisterNameArgs `protobuf:"bytes,5,opt,name=RegisterName,proto3,oneof"`
}
type NgoloFuzzOne_Register struct {
Register *RegisterArgs `protobuf:"bytes,6,opt,name=Register,proto3,oneof"`
}
func (*NgoloFuzzOne_NewDecoder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotDecode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewEncoder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncoderNgdotEncode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegisterName) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Register) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1e\n" +
"\x0eNewDecoderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"?\n" +
"\x16DecoderNgdotDecodeArgs\x12%\n" +
"\x01e\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01e\"\x1e\n" +
"\x0eNewEncoderArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"?\n" +
"\x16EncoderNgdotEncodeArgs\x12%\n" +
"\x01e\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01e\"U\n" +
"\x10RegisterNameArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12-\n" +
"\x05value\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x05value\"=\n" +
"\fRegisterArgs\x12-\n" +
"\x05value\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x05value\"\xb4\x03\n" +
"\fNgoloFuzzOne\x12;\n" +
"\n" +
"NewDecoder\x18\x01 \x01(\v2\x19.ngolofuzz.NewDecoderArgsH\x00R\n" +
"NewDecoder\x12S\n" +
"\x12DecoderNgdotDecode\x18\x02 \x01(\v2!.ngolofuzz.DecoderNgdotDecodeArgsH\x00R\x12DecoderNgdotDecode\x12;\n" +
"\n" +
"NewEncoder\x18\x03 \x01(\v2\x19.ngolofuzz.NewEncoderArgsH\x00R\n" +
"NewEncoder\x12S\n" +
"\x12EncoderNgdotEncode\x18\x04 \x01(\v2!.ngolofuzz.EncoderNgdotEncodeArgsH\x00R\x12EncoderNgdotEncode\x12A\n" +
"\fRegisterName\x18\x05 \x01(\v2\x1b.ngolofuzz.RegisterNameArgsH\x00R\fRegisterName\x125\n" +
"\bRegister\x18\x06 \x01(\v2\x17.ngolofuzz.RegisterArgsH\x00R\bRegisterB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x19Z\x17./;fuzz_ng_encoding_gobb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
var file_ngolofuzz_proto_goTypes = []any{
(*NewDecoderArgs)(nil), // 0: ngolofuzz.NewDecoderArgs
(*DecoderNgdotDecodeArgs)(nil), // 1: ngolofuzz.DecoderNgdotDecodeArgs
(*NewEncoderArgs)(nil), // 2: ngolofuzz.NewEncoderArgs
(*EncoderNgdotEncodeArgs)(nil), // 3: ngolofuzz.EncoderNgdotEncodeArgs
(*RegisterNameArgs)(nil), // 4: ngolofuzz.RegisterNameArgs
(*RegisterArgs)(nil), // 5: ngolofuzz.RegisterArgs
(*NgoloFuzzOne)(nil), // 6: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 7: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 8: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
7, // 0: ngolofuzz.DecoderNgdotDecodeArgs.e:type_name -> ngolofuzz.NgoloFuzzAny
7, // 1: ngolofuzz.EncoderNgdotEncodeArgs.e:type_name -> ngolofuzz.NgoloFuzzAny
7, // 2: ngolofuzz.RegisterNameArgs.value:type_name -> ngolofuzz.NgoloFuzzAny
7, // 3: ngolofuzz.RegisterArgs.value:type_name -> ngolofuzz.NgoloFuzzAny
0, // 4: ngolofuzz.NgoloFuzzOne.NewDecoder:type_name -> ngolofuzz.NewDecoderArgs
1, // 5: ngolofuzz.NgoloFuzzOne.DecoderNgdotDecode:type_name -> ngolofuzz.DecoderNgdotDecodeArgs
2, // 6: ngolofuzz.NgoloFuzzOne.NewEncoder:type_name -> ngolofuzz.NewEncoderArgs
3, // 7: ngolofuzz.NgoloFuzzOne.EncoderNgdotEncode:type_name -> ngolofuzz.EncoderNgdotEncodeArgs
4, // 8: ngolofuzz.NgoloFuzzOne.RegisterName:type_name -> ngolofuzz.RegisterNameArgs
5, // 9: ngolofuzz.NgoloFuzzOne.Register:type_name -> ngolofuzz.RegisterArgs
6, // 10: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
11, // [11:11] is the sub-list for method output_type
11, // [11:11] is the sub-list for method input_type
11, // [11:11] is the sub-list for extension type_name
11, // [11:11] is the sub-list for extension extendee
0, // [0:11] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[6].OneofWrappers = []any{
(*NgoloFuzzOne_NewDecoder)(nil),
(*NgoloFuzzOne_DecoderNgdotDecode)(nil),
(*NgoloFuzzOne_NewEncoder)(nil),
(*NgoloFuzzOne_EncoderNgdotEncode)(nil),
(*NgoloFuzzOne_RegisterName)(nil),
(*NgoloFuzzOne_Register)(nil),
}
file_ngolofuzz_proto_msgTypes[7].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_encoding_hex
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"encoding/hex"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_EncodedLen:
arg0 := int(a.EncodedLen.N)
hex.EncodedLen(arg0)
case *NgoloFuzzOne_Encode:
a.Encode.Dst = make([]byte, 2*len(a.Encode.Src))
hex.Encode(a.Encode.Dst, a.Encode.Src)
case *NgoloFuzzOne_AppendEncode:
a.AppendEncode.Dst = make([]byte, 2*len(a.AppendEncode.Src))
hex.AppendEncode(a.AppendEncode.Dst, a.AppendEncode.Src)
case *NgoloFuzzOne_DecodedLen:
arg0 := int(a.DecodedLen.X)
hex.DecodedLen(arg0)
case *NgoloFuzzOne_Decode:
a.Decode.Dst = make([]byte, 2*len(a.Decode.Src))
_, r1 := hex.Decode(a.Decode.Dst, a.Decode.Src)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_AppendDecode:
a.AppendDecode.Dst = make([]byte, 2*len(a.AppendDecode.Src))
_, r1 := hex.AppendDecode(a.AppendDecode.Dst, a.AppendDecode.Src)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_EncodeToString:
hex.EncodeToString(a.EncodeToString.Src)
case *NgoloFuzzOne_DecodeString:
_, r1 := hex.DecodeString(a.DecodeString.S)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Dump:
hex.Dump(a.Dump.Data)
case *NgoloFuzzOne_NewEncoder:
arg0 := bytes.NewBuffer(a.NewEncoder.W)
hex.NewEncoder(arg0)
case *NgoloFuzzOne_NewDecoder:
arg0 := bytes.NewReader(a.NewDecoder.R)
hex.NewDecoder(arg0)
case *NgoloFuzzOne_Dumper:
arg0 := bytes.NewBuffer(a.Dumper.W)
hex.Dumper(arg0)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_EncodedLen:
w.WriteString(fmt.Sprintf("hex.EncodedLen(int(%#+v))\n", a.EncodedLen.N))
case *NgoloFuzzOne_Encode:
w.WriteString(fmt.Sprintf("hex.Encode(%#+v, %#+v)\n", a.Encode.Dst, a.Encode.Src))
case *NgoloFuzzOne_AppendEncode:
w.WriteString(fmt.Sprintf("hex.AppendEncode(%#+v, %#+v)\n", a.AppendEncode.Dst, a.AppendEncode.Src))
case *NgoloFuzzOne_DecodedLen:
w.WriteString(fmt.Sprintf("hex.DecodedLen(int(%#+v))\n", a.DecodedLen.X))
case *NgoloFuzzOne_Decode:
w.WriteString(fmt.Sprintf("hex.Decode(%#+v, %#+v)\n", a.Decode.Dst, a.Decode.Src))
case *NgoloFuzzOne_AppendDecode:
w.WriteString(fmt.Sprintf("hex.AppendDecode(%#+v, %#+v)\n", a.AppendDecode.Dst, a.AppendDecode.Src))
case *NgoloFuzzOne_EncodeToString:
w.WriteString(fmt.Sprintf("hex.EncodeToString(%#+v)\n", a.EncodeToString.Src))
case *NgoloFuzzOne_DecodeString:
w.WriteString(fmt.Sprintf("hex.DecodeString(%#+v)\n", a.DecodeString.S))
case *NgoloFuzzOne_Dump:
w.WriteString(fmt.Sprintf("hex.Dump(%#+v)\n", a.Dump.Data))
case *NgoloFuzzOne_NewEncoder:
w.WriteString(fmt.Sprintf("hex.NewEncoder(bytes.NewBuffer(%#+v))\n", a.NewEncoder.W))
case *NgoloFuzzOne_NewDecoder:
w.WriteString(fmt.Sprintf("hex.NewDecoder(bytes.NewReader(%#+v))\n", a.NewDecoder.R))
case *NgoloFuzzOne_Dumper:
w.WriteString(fmt.Sprintf("hex.Dumper(bytes.NewBuffer(%#+v))\n", a.Dumper.W))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_encoding_hex
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type EncodedLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodedLenArgs) Reset() {
*x = EncodedLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodedLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodedLenArgs) ProtoMessage() {}
func (x *EncodedLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodedLenArgs.ProtoReflect.Descriptor instead.
func (*EncodedLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *EncodedLenArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type EncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodeArgs) Reset() {
*x = EncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodeArgs) ProtoMessage() {}
func (x *EncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodeArgs.ProtoReflect.Descriptor instead.
func (*EncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *EncodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *EncodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type AppendEncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendEncodeArgs) Reset() {
*x = AppendEncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendEncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendEncodeArgs) ProtoMessage() {}
func (x *AppendEncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendEncodeArgs.ProtoReflect.Descriptor instead.
func (*AppendEncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *AppendEncodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *AppendEncodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type DecodedLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodedLenArgs) Reset() {
*x = DecodedLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodedLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodedLenArgs) ProtoMessage() {}
func (x *DecodedLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodedLenArgs.ProtoReflect.Descriptor instead.
func (*DecodedLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *DecodedLenArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
type DecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeArgs) Reset() {
*x = DecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeArgs) ProtoMessage() {}
func (x *DecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeArgs.ProtoReflect.Descriptor instead.
func (*DecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *DecodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *DecodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type AppendDecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
Src []byte `protobuf:"bytes,2,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendDecodeArgs) Reset() {
*x = AppendDecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendDecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendDecodeArgs) ProtoMessage() {}
func (x *AppendDecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendDecodeArgs.ProtoReflect.Descriptor instead.
func (*AppendDecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *AppendDecodeArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *AppendDecodeArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type EncodeToStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Src []byte `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodeToStringArgs) Reset() {
*x = EncodeToStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodeToStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodeToStringArgs) ProtoMessage() {}
func (x *EncodeToStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodeToStringArgs.ProtoReflect.Descriptor instead.
func (*EncodeToStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *EncodeToStringArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type DecodeStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeStringArgs) Reset() {
*x = DecodeStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeStringArgs) ProtoMessage() {}
func (x *DecodeStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeStringArgs.ProtoReflect.Descriptor instead.
func (*DecodeStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *DecodeStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type DumpArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DumpArgs) Reset() {
*x = DumpArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DumpArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DumpArgs) ProtoMessage() {}
func (x *DumpArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DumpArgs.ProtoReflect.Descriptor instead.
func (*DumpArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *DumpArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type NewEncoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewEncoderArgs) Reset() {
*x = NewEncoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewEncoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewEncoderArgs) ProtoMessage() {}
func (x *NewEncoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewEncoderArgs.ProtoReflect.Descriptor instead.
func (*NewEncoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NewEncoderArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type NewDecoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewDecoderArgs) Reset() {
*x = NewDecoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewDecoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewDecoderArgs) ProtoMessage() {}
func (x *NewDecoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewDecoderArgs.ProtoReflect.Descriptor instead.
func (*NewDecoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NewDecoderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type DumperArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DumperArgs) Reset() {
*x = DumperArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DumperArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DumperArgs) ProtoMessage() {}
func (x *DumperArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DumperArgs.ProtoReflect.Descriptor instead.
func (*DumperArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *DumperArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_EncodedLen
// *NgoloFuzzOne_Encode
// *NgoloFuzzOne_AppendEncode
// *NgoloFuzzOne_DecodedLen
// *NgoloFuzzOne_Decode
// *NgoloFuzzOne_AppendDecode
// *NgoloFuzzOne_EncodeToString
// *NgoloFuzzOne_DecodeString
// *NgoloFuzzOne_Dump
// *NgoloFuzzOne_NewEncoder
// *NgoloFuzzOne_NewDecoder
// *NgoloFuzzOne_Dumper
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetEncodedLen() *EncodedLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodedLen); ok {
return x.EncodedLen
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncode() *EncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Encode); ok {
return x.Encode
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendEncode() *AppendEncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendEncode); ok {
return x.AppendEncode
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecodedLen() *DecodedLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecodedLen); ok {
return x.DecodedLen
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecode() *DecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Decode); ok {
return x.Decode
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendDecode() *AppendDecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendDecode); ok {
return x.AppendDecode
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodeToString() *EncodeToStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodeToString); ok {
return x.EncodeToString
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecodeString() *DecodeStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecodeString); ok {
return x.DecodeString
}
}
return nil
}
func (x *NgoloFuzzOne) GetDump() *DumpArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Dump); ok {
return x.Dump
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewEncoder() *NewEncoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewEncoder); ok {
return x.NewEncoder
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewDecoder() *NewDecoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewDecoder); ok {
return x.NewDecoder
}
}
return nil
}
func (x *NgoloFuzzOne) GetDumper() *DumperArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Dumper); ok {
return x.Dumper
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_EncodedLen struct {
EncodedLen *EncodedLenArgs `protobuf:"bytes,1,opt,name=EncodedLen,proto3,oneof"`
}
type NgoloFuzzOne_Encode struct {
Encode *EncodeArgs `protobuf:"bytes,2,opt,name=Encode,proto3,oneof"`
}
type NgoloFuzzOne_AppendEncode struct {
AppendEncode *AppendEncodeArgs `protobuf:"bytes,3,opt,name=AppendEncode,proto3,oneof"`
}
type NgoloFuzzOne_DecodedLen struct {
DecodedLen *DecodedLenArgs `protobuf:"bytes,4,opt,name=DecodedLen,proto3,oneof"`
}
type NgoloFuzzOne_Decode struct {
Decode *DecodeArgs `protobuf:"bytes,5,opt,name=Decode,proto3,oneof"`
}
type NgoloFuzzOne_AppendDecode struct {
AppendDecode *AppendDecodeArgs `protobuf:"bytes,6,opt,name=AppendDecode,proto3,oneof"`
}
type NgoloFuzzOne_EncodeToString struct {
EncodeToString *EncodeToStringArgs `protobuf:"bytes,7,opt,name=EncodeToString,proto3,oneof"`
}
type NgoloFuzzOne_DecodeString struct {
DecodeString *DecodeStringArgs `protobuf:"bytes,8,opt,name=DecodeString,proto3,oneof"`
}
type NgoloFuzzOne_Dump struct {
Dump *DumpArgs `protobuf:"bytes,9,opt,name=Dump,proto3,oneof"`
}
type NgoloFuzzOne_NewEncoder struct {
NewEncoder *NewEncoderArgs `protobuf:"bytes,10,opt,name=NewEncoder,proto3,oneof"`
}
type NgoloFuzzOne_NewDecoder struct {
NewDecoder *NewDecoderArgs `protobuf:"bytes,11,opt,name=NewDecoder,proto3,oneof"`
}
type NgoloFuzzOne_Dumper struct {
Dumper *DumperArgs `protobuf:"bytes,12,opt,name=Dumper,proto3,oneof"`
}
func (*NgoloFuzzOne_EncodedLen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Encode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendEncode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecodedLen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Decode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendDecode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodeToString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecodeString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Dump) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewEncoder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewDecoder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Dumper) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1e\n" +
"\x0eEncodedLenArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"0\n" +
"\n" +
"EncodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"6\n" +
"\x10AppendEncodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"\x1e\n" +
"\x0eDecodedLenArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\"0\n" +
"\n" +
"DecodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"6\n" +
"\x10AppendDecodeArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\x10\n" +
"\x03src\x18\x02 \x01(\fR\x03src\"&\n" +
"\x12EncodeToStringArgs\x12\x10\n" +
"\x03src\x18\x01 \x01(\fR\x03src\" \n" +
"\x10DecodeStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1e\n" +
"\bDumpArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\x1e\n" +
"\x0eNewEncoderArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"\x1e\n" +
"\x0eNewDecoderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x1a\n" +
"\n" +
"DumperArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"\xda\x05\n" +
"\fNgoloFuzzOne\x12;\n" +
"\n" +
"EncodedLen\x18\x01 \x01(\v2\x19.ngolofuzz.EncodedLenArgsH\x00R\n" +
"EncodedLen\x12/\n" +
"\x06Encode\x18\x02 \x01(\v2\x15.ngolofuzz.EncodeArgsH\x00R\x06Encode\x12A\n" +
"\fAppendEncode\x18\x03 \x01(\v2\x1b.ngolofuzz.AppendEncodeArgsH\x00R\fAppendEncode\x12;\n" +
"\n" +
"DecodedLen\x18\x04 \x01(\v2\x19.ngolofuzz.DecodedLenArgsH\x00R\n" +
"DecodedLen\x12/\n" +
"\x06Decode\x18\x05 \x01(\v2\x15.ngolofuzz.DecodeArgsH\x00R\x06Decode\x12A\n" +
"\fAppendDecode\x18\x06 \x01(\v2\x1b.ngolofuzz.AppendDecodeArgsH\x00R\fAppendDecode\x12G\n" +
"\x0eEncodeToString\x18\a \x01(\v2\x1d.ngolofuzz.EncodeToStringArgsH\x00R\x0eEncodeToString\x12A\n" +
"\fDecodeString\x18\b \x01(\v2\x1b.ngolofuzz.DecodeStringArgsH\x00R\fDecodeString\x12)\n" +
"\x04Dump\x18\t \x01(\v2\x13.ngolofuzz.DumpArgsH\x00R\x04Dump\x12;\n" +
"\n" +
"NewEncoder\x18\n" +
" \x01(\v2\x19.ngolofuzz.NewEncoderArgsH\x00R\n" +
"NewEncoder\x12;\n" +
"\n" +
"NewDecoder\x18\v \x01(\v2\x19.ngolofuzz.NewDecoderArgsH\x00R\n" +
"NewDecoder\x12/\n" +
"\x06Dumper\x18\f \x01(\v2\x15.ngolofuzz.DumperArgsH\x00R\x06DumperB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x19Z\x17./;fuzz_ng_encoding_hexb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
var file_ngolofuzz_proto_goTypes = []any{
(*EncodedLenArgs)(nil), // 0: ngolofuzz.EncodedLenArgs
(*EncodeArgs)(nil), // 1: ngolofuzz.EncodeArgs
(*AppendEncodeArgs)(nil), // 2: ngolofuzz.AppendEncodeArgs
(*DecodedLenArgs)(nil), // 3: ngolofuzz.DecodedLenArgs
(*DecodeArgs)(nil), // 4: ngolofuzz.DecodeArgs
(*AppendDecodeArgs)(nil), // 5: ngolofuzz.AppendDecodeArgs
(*EncodeToStringArgs)(nil), // 6: ngolofuzz.EncodeToStringArgs
(*DecodeStringArgs)(nil), // 7: ngolofuzz.DecodeStringArgs
(*DumpArgs)(nil), // 8: ngolofuzz.DumpArgs
(*NewEncoderArgs)(nil), // 9: ngolofuzz.NewEncoderArgs
(*NewDecoderArgs)(nil), // 10: ngolofuzz.NewDecoderArgs
(*DumperArgs)(nil), // 11: ngolofuzz.DumperArgs
(*NgoloFuzzOne)(nil), // 12: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 13: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 14: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.EncodedLen:type_name -> ngolofuzz.EncodedLenArgs
1, // 1: ngolofuzz.NgoloFuzzOne.Encode:type_name -> ngolofuzz.EncodeArgs
2, // 2: ngolofuzz.NgoloFuzzOne.AppendEncode:type_name -> ngolofuzz.AppendEncodeArgs
3, // 3: ngolofuzz.NgoloFuzzOne.DecodedLen:type_name -> ngolofuzz.DecodedLenArgs
4, // 4: ngolofuzz.NgoloFuzzOne.Decode:type_name -> ngolofuzz.DecodeArgs
5, // 5: ngolofuzz.NgoloFuzzOne.AppendDecode:type_name -> ngolofuzz.AppendDecodeArgs
6, // 6: ngolofuzz.NgoloFuzzOne.EncodeToString:type_name -> ngolofuzz.EncodeToStringArgs
7, // 7: ngolofuzz.NgoloFuzzOne.DecodeString:type_name -> ngolofuzz.DecodeStringArgs
8, // 8: ngolofuzz.NgoloFuzzOne.Dump:type_name -> ngolofuzz.DumpArgs
9, // 9: ngolofuzz.NgoloFuzzOne.NewEncoder:type_name -> ngolofuzz.NewEncoderArgs
10, // 10: ngolofuzz.NgoloFuzzOne.NewDecoder:type_name -> ngolofuzz.NewDecoderArgs
11, // 11: ngolofuzz.NgoloFuzzOne.Dumper:type_name -> ngolofuzz.DumperArgs
12, // 12: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
13, // [13:13] is the sub-list for method output_type
13, // [13:13] is the sub-list for method input_type
13, // [13:13] is the sub-list for extension type_name
13, // [13:13] is the sub-list for extension extendee
0, // [0:13] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[12].OneofWrappers = []any{
(*NgoloFuzzOne_EncodedLen)(nil),
(*NgoloFuzzOne_Encode)(nil),
(*NgoloFuzzOne_AppendEncode)(nil),
(*NgoloFuzzOne_DecodedLen)(nil),
(*NgoloFuzzOne_Decode)(nil),
(*NgoloFuzzOne_AppendDecode)(nil),
(*NgoloFuzzOne_EncodeToString)(nil),
(*NgoloFuzzOne_DecodeString)(nil),
(*NgoloFuzzOne_Dump)(nil),
(*NgoloFuzzOne_NewEncoder)(nil),
(*NgoloFuzzOne_NewDecoder)(nil),
(*NgoloFuzzOne_Dumper)(nil),
}
file_ngolofuzz_proto_msgTypes[13].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 15,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_encoding_json
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var EncoderResults []*json.Encoder
EncoderResultsIndex := 0
var DecoderResults []*json.Decoder
DecoderResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Unmarshal_:
r0 := json.Unmarshal(a.Unmarshal_.Data, a.Unmarshal_.V)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_Marshal_:
_, r1 := json.Marshal(a.Marshal_.V)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_MarshalIndent:
_, r1 := json.MarshalIndent(a.MarshalIndent.V, a.MarshalIndent.Prefix, a.MarshalIndent.Indent)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Valid:
json.Valid(a.Valid.Data)
case *NgoloFuzzOne_NewDecoder:
arg0 := bytes.NewReader(a.NewDecoder.R)
r0 := json.NewDecoder(arg0)
if r0 != nil{
DecoderResults = append(DecoderResults, r0)
}
case *NgoloFuzzOne_DecoderNgdotUseNumber:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
arg0.UseNumber()
case *NgoloFuzzOne_DecoderNgdotDisallowUnknownFields:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
arg0.DisallowUnknownFields()
case *NgoloFuzzOne_DecoderNgdotDecode:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
r0 := arg0.Decode(a.DecoderNgdotDecode.V)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_DecoderNgdotBuffered:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
arg0.Buffered()
case *NgoloFuzzOne_NewEncoder:
arg0 := bytes.NewBuffer(a.NewEncoder.W)
r0 := json.NewEncoder(arg0)
if r0 != nil{
EncoderResults = append(EncoderResults, r0)
}
case *NgoloFuzzOne_EncoderNgdotEncode:
if len(EncoderResults) == 0 {
continue
}
arg0 := EncoderResults[EncoderResultsIndex]
EncoderResultsIndex = (EncoderResultsIndex + 1) % len(EncoderResults)
r0 := arg0.Encode(a.EncoderNgdotEncode.V)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_EncoderNgdotSetIndent:
if len(EncoderResults) == 0 {
continue
}
arg0 := EncoderResults[EncoderResultsIndex]
EncoderResultsIndex = (EncoderResultsIndex + 1) % len(EncoderResults)
arg0.SetIndent(a.EncoderNgdotSetIndent.Prefix, a.EncoderNgdotSetIndent.Indent)
case *NgoloFuzzOne_EncoderNgdotSetEscapeHTML:
if len(EncoderResults) == 0 {
continue
}
arg0 := EncoderResults[EncoderResultsIndex]
EncoderResultsIndex = (EncoderResultsIndex + 1) % len(EncoderResults)
arg0.SetEscapeHTML(a.EncoderNgdotSetEscapeHTML.On)
case *NgoloFuzzOne_DecoderNgdotToken:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
_, r1 := arg0.Token()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DecoderNgdotMore:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
arg0.More()
case *NgoloFuzzOne_DecoderNgdotInputOffset:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
arg0.InputOffset()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
EncoderNb := 0
EncoderResultsIndex := 0
DecoderNb := 0
DecoderResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Unmarshal_:
w.WriteString(fmt.Sprintf("json.Unmarshal(%#+v, %#+v)\n", a.Unmarshal_.Data, a.Unmarshal_.V))
case *NgoloFuzzOne_Marshal_:
w.WriteString(fmt.Sprintf("json.Marshal(%#+v)\n", a.Marshal_.V))
case *NgoloFuzzOne_MarshalIndent:
w.WriteString(fmt.Sprintf("json.MarshalIndent(%#+v, %#+v, %#+v)\n", a.MarshalIndent.V, a.MarshalIndent.Prefix, a.MarshalIndent.Indent))
case *NgoloFuzzOne_Valid:
w.WriteString(fmt.Sprintf("json.Valid(%#+v)\n", a.Valid.Data))
case *NgoloFuzzOne_NewDecoder:
w.WriteString(fmt.Sprintf("Decoder%d := json.NewDecoder(bytes.NewReader(%#+v))\n", DecoderNb, a.NewDecoder.R))
DecoderNb = DecoderNb + 1
case *NgoloFuzzOne_DecoderNgdotUseNumber:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Decoder%d.UseNumber()\n", DecoderResultsIndex))
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
case *NgoloFuzzOne_DecoderNgdotDisallowUnknownFields:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Decoder%d.DisallowUnknownFields()\n", DecoderResultsIndex))
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
case *NgoloFuzzOne_DecoderNgdotDecode:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Decoder%d.Decode(%#+v)\n", DecoderResultsIndex, a.DecoderNgdotDecode.V))
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
case *NgoloFuzzOne_DecoderNgdotBuffered:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Decoder%d.Buffered()\n", DecoderResultsIndex))
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
case *NgoloFuzzOne_NewEncoder:
w.WriteString(fmt.Sprintf("Encoder%d := json.NewEncoder(bytes.NewBuffer(%#+v))\n", EncoderNb, a.NewEncoder.W))
EncoderNb = EncoderNb + 1
case *NgoloFuzzOne_EncoderNgdotEncode:
if EncoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoder%d.Encode(%#+v)\n", EncoderResultsIndex, a.EncoderNgdotEncode.V))
EncoderResultsIndex = (EncoderResultsIndex + 1) % EncoderNb
case *NgoloFuzzOne_EncoderNgdotSetIndent:
if EncoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoder%d.SetIndent(%#+v, %#+v)\n", EncoderResultsIndex, a.EncoderNgdotSetIndent.Prefix, a.EncoderNgdotSetIndent.Indent))
EncoderResultsIndex = (EncoderResultsIndex + 1) % EncoderNb
case *NgoloFuzzOne_EncoderNgdotSetEscapeHTML:
if EncoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoder%d.SetEscapeHTML(%#+v)\n", EncoderResultsIndex, a.EncoderNgdotSetEscapeHTML.On))
EncoderResultsIndex = (EncoderResultsIndex + 1) % EncoderNb
case *NgoloFuzzOne_DecoderNgdotToken:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Decoder%d.Token()\n", DecoderResultsIndex))
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
case *NgoloFuzzOne_DecoderNgdotMore:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Decoder%d.More()\n", DecoderResultsIndex))
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
case *NgoloFuzzOne_DecoderNgdotInputOffset:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Decoder%d.InputOffset()\n", DecoderResultsIndex))
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_encoding_json
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type UnmarshalArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
V *NgoloFuzzAny `protobuf:"bytes,2,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnmarshalArgs) Reset() {
*x = UnmarshalArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnmarshalArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnmarshalArgs) ProtoMessage() {}
func (x *UnmarshalArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnmarshalArgs.ProtoReflect.Descriptor instead.
func (*UnmarshalArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *UnmarshalArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
func (x *UnmarshalArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type MarshalArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MarshalArgs) Reset() {
*x = MarshalArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MarshalArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MarshalArgs) ProtoMessage() {}
func (x *MarshalArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MarshalArgs.ProtoReflect.Descriptor instead.
func (*MarshalArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *MarshalArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type MarshalIndentArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
Indent string `protobuf:"bytes,3,opt,name=indent,proto3" json:"indent,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MarshalIndentArgs) Reset() {
*x = MarshalIndentArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MarshalIndentArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MarshalIndentArgs) ProtoMessage() {}
func (x *MarshalIndentArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MarshalIndentArgs.ProtoReflect.Descriptor instead.
func (*MarshalIndentArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *MarshalIndentArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
func (x *MarshalIndentArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
func (x *MarshalIndentArgs) GetIndent() string {
if x != nil {
return x.Indent
}
return ""
}
type ValidArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValidArgs) Reset() {
*x = ValidArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValidArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValidArgs) ProtoMessage() {}
func (x *ValidArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValidArgs.ProtoReflect.Descriptor instead.
func (*ValidArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ValidArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type NewDecoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewDecoderArgs) Reset() {
*x = NewDecoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewDecoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewDecoderArgs) ProtoMessage() {}
func (x *NewDecoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewDecoderArgs.ProtoReflect.Descriptor instead.
func (*NewDecoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NewDecoderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type DecoderNgdotUseNumberArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotUseNumberArgs) Reset() {
*x = DecoderNgdotUseNumberArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotUseNumberArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotUseNumberArgs) ProtoMessage() {}
func (x *DecoderNgdotUseNumberArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotUseNumberArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotUseNumberArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type DecoderNgdotDisallowUnknownFieldsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotDisallowUnknownFieldsArgs) Reset() {
*x = DecoderNgdotDisallowUnknownFieldsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotDisallowUnknownFieldsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotDisallowUnknownFieldsArgs) ProtoMessage() {}
func (x *DecoderNgdotDisallowUnknownFieldsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotDisallowUnknownFieldsArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotDisallowUnknownFieldsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type DecoderNgdotDecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotDecodeArgs) Reset() {
*x = DecoderNgdotDecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotDecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotDecodeArgs) ProtoMessage() {}
func (x *DecoderNgdotDecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotDecodeArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotDecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *DecoderNgdotDecodeArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type DecoderNgdotBufferedArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotBufferedArgs) Reset() {
*x = DecoderNgdotBufferedArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotBufferedArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotBufferedArgs) ProtoMessage() {}
func (x *DecoderNgdotBufferedArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotBufferedArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotBufferedArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type NewEncoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewEncoderArgs) Reset() {
*x = NewEncoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewEncoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewEncoderArgs) ProtoMessage() {}
func (x *NewEncoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewEncoderArgs.ProtoReflect.Descriptor instead.
func (*NewEncoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NewEncoderArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type EncoderNgdotEncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncoderNgdotEncodeArgs) Reset() {
*x = EncoderNgdotEncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncoderNgdotEncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncoderNgdotEncodeArgs) ProtoMessage() {}
func (x *EncoderNgdotEncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncoderNgdotEncodeArgs.ProtoReflect.Descriptor instead.
func (*EncoderNgdotEncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *EncoderNgdotEncodeArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type EncoderNgdotSetIndentArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
Indent string `protobuf:"bytes,2,opt,name=indent,proto3" json:"indent,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncoderNgdotSetIndentArgs) Reset() {
*x = EncoderNgdotSetIndentArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncoderNgdotSetIndentArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncoderNgdotSetIndentArgs) ProtoMessage() {}
func (x *EncoderNgdotSetIndentArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncoderNgdotSetIndentArgs.ProtoReflect.Descriptor instead.
func (*EncoderNgdotSetIndentArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *EncoderNgdotSetIndentArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
func (x *EncoderNgdotSetIndentArgs) GetIndent() string {
if x != nil {
return x.Indent
}
return ""
}
type EncoderNgdotSetEscapeHTMLArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
On bool `protobuf:"varint,1,opt,name=on,proto3" json:"on,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncoderNgdotSetEscapeHTMLArgs) Reset() {
*x = EncoderNgdotSetEscapeHTMLArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncoderNgdotSetEscapeHTMLArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncoderNgdotSetEscapeHTMLArgs) ProtoMessage() {}
func (x *EncoderNgdotSetEscapeHTMLArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncoderNgdotSetEscapeHTMLArgs.ProtoReflect.Descriptor instead.
func (*EncoderNgdotSetEscapeHTMLArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *EncoderNgdotSetEscapeHTMLArgs) GetOn() bool {
if x != nil {
return x.On
}
return false
}
type DecoderNgdotTokenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotTokenArgs) Reset() {
*x = DecoderNgdotTokenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotTokenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotTokenArgs) ProtoMessage() {}
func (x *DecoderNgdotTokenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotTokenArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotTokenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type DecoderNgdotMoreArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotMoreArgs) Reset() {
*x = DecoderNgdotMoreArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotMoreArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotMoreArgs) ProtoMessage() {}
func (x *DecoderNgdotMoreArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotMoreArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotMoreArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type DecoderNgdotInputOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotInputOffsetArgs) Reset() {
*x = DecoderNgdotInputOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotInputOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotInputOffsetArgs) ProtoMessage() {}
func (x *DecoderNgdotInputOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotInputOffsetArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotInputOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Unmarshal_
// *NgoloFuzzOne_Marshal_
// *NgoloFuzzOne_MarshalIndent
// *NgoloFuzzOne_Valid
// *NgoloFuzzOne_NewDecoder
// *NgoloFuzzOne_DecoderNgdotUseNumber
// *NgoloFuzzOne_DecoderNgdotDisallowUnknownFields
// *NgoloFuzzOne_DecoderNgdotDecode
// *NgoloFuzzOne_DecoderNgdotBuffered
// *NgoloFuzzOne_NewEncoder
// *NgoloFuzzOne_EncoderNgdotEncode
// *NgoloFuzzOne_EncoderNgdotSetIndent
// *NgoloFuzzOne_EncoderNgdotSetEscapeHTML
// *NgoloFuzzOne_DecoderNgdotToken
// *NgoloFuzzOne_DecoderNgdotMore
// *NgoloFuzzOne_DecoderNgdotInputOffset
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetUnmarshal_() *UnmarshalArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Unmarshal_); ok {
return x.Unmarshal_
}
}
return nil
}
func (x *NgoloFuzzOne) GetMarshal_() *MarshalArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Marshal_); ok {
return x.Marshal_
}
}
return nil
}
func (x *NgoloFuzzOne) GetMarshalIndent() *MarshalIndentArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MarshalIndent); ok {
return x.MarshalIndent
}
}
return nil
}
func (x *NgoloFuzzOne) GetValid() *ValidArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Valid); ok {
return x.Valid
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewDecoder() *NewDecoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewDecoder); ok {
return x.NewDecoder
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotUseNumber() *DecoderNgdotUseNumberArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotUseNumber); ok {
return x.DecoderNgdotUseNumber
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotDisallowUnknownFields() *DecoderNgdotDisallowUnknownFieldsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotDisallowUnknownFields); ok {
return x.DecoderNgdotDisallowUnknownFields
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotDecode() *DecoderNgdotDecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotDecode); ok {
return x.DecoderNgdotDecode
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotBuffered() *DecoderNgdotBufferedArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotBuffered); ok {
return x.DecoderNgdotBuffered
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewEncoder() *NewEncoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewEncoder); ok {
return x.NewEncoder
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncoderNgdotEncode() *EncoderNgdotEncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncoderNgdotEncode); ok {
return x.EncoderNgdotEncode
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncoderNgdotSetIndent() *EncoderNgdotSetIndentArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncoderNgdotSetIndent); ok {
return x.EncoderNgdotSetIndent
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncoderNgdotSetEscapeHTML() *EncoderNgdotSetEscapeHTMLArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncoderNgdotSetEscapeHTML); ok {
return x.EncoderNgdotSetEscapeHTML
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotToken() *DecoderNgdotTokenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotToken); ok {
return x.DecoderNgdotToken
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotMore() *DecoderNgdotMoreArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotMore); ok {
return x.DecoderNgdotMore
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotInputOffset() *DecoderNgdotInputOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotInputOffset); ok {
return x.DecoderNgdotInputOffset
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Unmarshal_ struct {
Unmarshal_ *UnmarshalArgs `protobuf:"bytes,1,opt,name=Unmarshal,proto3,oneof"`
}
type NgoloFuzzOne_Marshal_ struct {
Marshal_ *MarshalArgs `protobuf:"bytes,2,opt,name=Marshal,proto3,oneof"`
}
type NgoloFuzzOne_MarshalIndent struct {
MarshalIndent *MarshalIndentArgs `protobuf:"bytes,3,opt,name=MarshalIndent,proto3,oneof"`
}
type NgoloFuzzOne_Valid struct {
Valid *ValidArgs `protobuf:"bytes,4,opt,name=Valid,proto3,oneof"`
}
type NgoloFuzzOne_NewDecoder struct {
NewDecoder *NewDecoderArgs `protobuf:"bytes,5,opt,name=NewDecoder,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotUseNumber struct {
DecoderNgdotUseNumber *DecoderNgdotUseNumberArgs `protobuf:"bytes,6,opt,name=DecoderNgdotUseNumber,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotDisallowUnknownFields struct {
DecoderNgdotDisallowUnknownFields *DecoderNgdotDisallowUnknownFieldsArgs `protobuf:"bytes,7,opt,name=DecoderNgdotDisallowUnknownFields,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotDecode struct {
DecoderNgdotDecode *DecoderNgdotDecodeArgs `protobuf:"bytes,8,opt,name=DecoderNgdotDecode,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotBuffered struct {
DecoderNgdotBuffered *DecoderNgdotBufferedArgs `protobuf:"bytes,9,opt,name=DecoderNgdotBuffered,proto3,oneof"`
}
type NgoloFuzzOne_NewEncoder struct {
NewEncoder *NewEncoderArgs `protobuf:"bytes,10,opt,name=NewEncoder,proto3,oneof"`
}
type NgoloFuzzOne_EncoderNgdotEncode struct {
EncoderNgdotEncode *EncoderNgdotEncodeArgs `protobuf:"bytes,11,opt,name=EncoderNgdotEncode,proto3,oneof"`
}
type NgoloFuzzOne_EncoderNgdotSetIndent struct {
EncoderNgdotSetIndent *EncoderNgdotSetIndentArgs `protobuf:"bytes,12,opt,name=EncoderNgdotSetIndent,proto3,oneof"`
}
type NgoloFuzzOne_EncoderNgdotSetEscapeHTML struct {
EncoderNgdotSetEscapeHTML *EncoderNgdotSetEscapeHTMLArgs `protobuf:"bytes,13,opt,name=EncoderNgdotSetEscapeHTML,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotToken struct {
DecoderNgdotToken *DecoderNgdotTokenArgs `protobuf:"bytes,14,opt,name=DecoderNgdotToken,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotMore struct {
DecoderNgdotMore *DecoderNgdotMoreArgs `protobuf:"bytes,15,opt,name=DecoderNgdotMore,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotInputOffset struct {
DecoderNgdotInputOffset *DecoderNgdotInputOffsetArgs `protobuf:"bytes,16,opt,name=DecoderNgdotInputOffset,proto3,oneof"`
}
func (*NgoloFuzzOne_Unmarshal_) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Marshal_) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MarshalIndent) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Valid) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewDecoder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotUseNumber) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotDisallowUnknownFields) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotDecode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotBuffered) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewEncoder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncoderNgdotEncode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncoderNgdotSetIndent) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncoderNgdotSetEscapeHTML) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotToken) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotMore) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotInputOffset) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"J\n" +
"\rUnmarshalArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\x12%\n" +
"\x01v\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"4\n" +
"\vMarshalArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"j\n" +
"\x11MarshalIndentArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\x12\x16\n" +
"\x06prefix\x18\x02 \x01(\tR\x06prefix\x12\x16\n" +
"\x06indent\x18\x03 \x01(\tR\x06indent\"\x1f\n" +
"\tValidArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\x1e\n" +
"\x0eNewDecoderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x1b\n" +
"\x19DecoderNgdotUseNumberArgs\"'\n" +
"%DecoderNgdotDisallowUnknownFieldsArgs\"?\n" +
"\x16DecoderNgdotDecodeArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"\x1a\n" +
"\x18DecoderNgdotBufferedArgs\"\x1e\n" +
"\x0eNewEncoderArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"?\n" +
"\x16EncoderNgdotEncodeArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"K\n" +
"\x19EncoderNgdotSetIndentArgs\x12\x16\n" +
"\x06prefix\x18\x01 \x01(\tR\x06prefix\x12\x16\n" +
"\x06indent\x18\x02 \x01(\tR\x06indent\"/\n" +
"\x1dEncoderNgdotSetEscapeHTMLArgs\x12\x0e\n" +
"\x02on\x18\x01 \x01(\bR\x02on\"\x17\n" +
"\x15DecoderNgdotTokenArgs\"\x16\n" +
"\x14DecoderNgdotMoreArgs\"\x1d\n" +
"\x1bDecoderNgdotInputOffsetArgs\"\xa5\n" +
"\n" +
"\fNgoloFuzzOne\x128\n" +
"\tUnmarshal\x18\x01 \x01(\v2\x18.ngolofuzz.UnmarshalArgsH\x00R\tUnmarshal\x122\n" +
"\aMarshal\x18\x02 \x01(\v2\x16.ngolofuzz.MarshalArgsH\x00R\aMarshal\x12D\n" +
"\rMarshalIndent\x18\x03 \x01(\v2\x1c.ngolofuzz.MarshalIndentArgsH\x00R\rMarshalIndent\x12,\n" +
"\x05Valid\x18\x04 \x01(\v2\x14.ngolofuzz.ValidArgsH\x00R\x05Valid\x12;\n" +
"\n" +
"NewDecoder\x18\x05 \x01(\v2\x19.ngolofuzz.NewDecoderArgsH\x00R\n" +
"NewDecoder\x12\\\n" +
"\x15DecoderNgdotUseNumber\x18\x06 \x01(\v2$.ngolofuzz.DecoderNgdotUseNumberArgsH\x00R\x15DecoderNgdotUseNumber\x12\x80\x01\n" +
"!DecoderNgdotDisallowUnknownFields\x18\a \x01(\v20.ngolofuzz.DecoderNgdotDisallowUnknownFieldsArgsH\x00R!DecoderNgdotDisallowUnknownFields\x12S\n" +
"\x12DecoderNgdotDecode\x18\b \x01(\v2!.ngolofuzz.DecoderNgdotDecodeArgsH\x00R\x12DecoderNgdotDecode\x12Y\n" +
"\x14DecoderNgdotBuffered\x18\t \x01(\v2#.ngolofuzz.DecoderNgdotBufferedArgsH\x00R\x14DecoderNgdotBuffered\x12;\n" +
"\n" +
"NewEncoder\x18\n" +
" \x01(\v2\x19.ngolofuzz.NewEncoderArgsH\x00R\n" +
"NewEncoder\x12S\n" +
"\x12EncoderNgdotEncode\x18\v \x01(\v2!.ngolofuzz.EncoderNgdotEncodeArgsH\x00R\x12EncoderNgdotEncode\x12\\\n" +
"\x15EncoderNgdotSetIndent\x18\f \x01(\v2$.ngolofuzz.EncoderNgdotSetIndentArgsH\x00R\x15EncoderNgdotSetIndent\x12h\n" +
"\x19EncoderNgdotSetEscapeHTML\x18\r \x01(\v2(.ngolofuzz.EncoderNgdotSetEscapeHTMLArgsH\x00R\x19EncoderNgdotSetEscapeHTML\x12P\n" +
"\x11DecoderNgdotToken\x18\x0e \x01(\v2 .ngolofuzz.DecoderNgdotTokenArgsH\x00R\x11DecoderNgdotToken\x12M\n" +
"\x10DecoderNgdotMore\x18\x0f \x01(\v2\x1f.ngolofuzz.DecoderNgdotMoreArgsH\x00R\x10DecoderNgdotMore\x12b\n" +
"\x17DecoderNgdotInputOffset\x18\x10 \x01(\v2&.ngolofuzz.DecoderNgdotInputOffsetArgsH\x00R\x17DecoderNgdotInputOffsetB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_encoding_jsonb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
var file_ngolofuzz_proto_goTypes = []any{
(*UnmarshalArgs)(nil), // 0: ngolofuzz.UnmarshalArgs
(*MarshalArgs)(nil), // 1: ngolofuzz.MarshalArgs
(*MarshalIndentArgs)(nil), // 2: ngolofuzz.MarshalIndentArgs
(*ValidArgs)(nil), // 3: ngolofuzz.ValidArgs
(*NewDecoderArgs)(nil), // 4: ngolofuzz.NewDecoderArgs
(*DecoderNgdotUseNumberArgs)(nil), // 5: ngolofuzz.DecoderNgdotUseNumberArgs
(*DecoderNgdotDisallowUnknownFieldsArgs)(nil), // 6: ngolofuzz.DecoderNgdotDisallowUnknownFieldsArgs
(*DecoderNgdotDecodeArgs)(nil), // 7: ngolofuzz.DecoderNgdotDecodeArgs
(*DecoderNgdotBufferedArgs)(nil), // 8: ngolofuzz.DecoderNgdotBufferedArgs
(*NewEncoderArgs)(nil), // 9: ngolofuzz.NewEncoderArgs
(*EncoderNgdotEncodeArgs)(nil), // 10: ngolofuzz.EncoderNgdotEncodeArgs
(*EncoderNgdotSetIndentArgs)(nil), // 11: ngolofuzz.EncoderNgdotSetIndentArgs
(*EncoderNgdotSetEscapeHTMLArgs)(nil), // 12: ngolofuzz.EncoderNgdotSetEscapeHTMLArgs
(*DecoderNgdotTokenArgs)(nil), // 13: ngolofuzz.DecoderNgdotTokenArgs
(*DecoderNgdotMoreArgs)(nil), // 14: ngolofuzz.DecoderNgdotMoreArgs
(*DecoderNgdotInputOffsetArgs)(nil), // 15: ngolofuzz.DecoderNgdotInputOffsetArgs
(*NgoloFuzzOne)(nil), // 16: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 17: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 18: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
17, // 0: ngolofuzz.UnmarshalArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
17, // 1: ngolofuzz.MarshalArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
17, // 2: ngolofuzz.MarshalIndentArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
17, // 3: ngolofuzz.DecoderNgdotDecodeArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
17, // 4: ngolofuzz.EncoderNgdotEncodeArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
0, // 5: ngolofuzz.NgoloFuzzOne.Unmarshal:type_name -> ngolofuzz.UnmarshalArgs
1, // 6: ngolofuzz.NgoloFuzzOne.Marshal:type_name -> ngolofuzz.MarshalArgs
2, // 7: ngolofuzz.NgoloFuzzOne.MarshalIndent:type_name -> ngolofuzz.MarshalIndentArgs
3, // 8: ngolofuzz.NgoloFuzzOne.Valid:type_name -> ngolofuzz.ValidArgs
4, // 9: ngolofuzz.NgoloFuzzOne.NewDecoder:type_name -> ngolofuzz.NewDecoderArgs
5, // 10: ngolofuzz.NgoloFuzzOne.DecoderNgdotUseNumber:type_name -> ngolofuzz.DecoderNgdotUseNumberArgs
6, // 11: ngolofuzz.NgoloFuzzOne.DecoderNgdotDisallowUnknownFields:type_name -> ngolofuzz.DecoderNgdotDisallowUnknownFieldsArgs
7, // 12: ngolofuzz.NgoloFuzzOne.DecoderNgdotDecode:type_name -> ngolofuzz.DecoderNgdotDecodeArgs
8, // 13: ngolofuzz.NgoloFuzzOne.DecoderNgdotBuffered:type_name -> ngolofuzz.DecoderNgdotBufferedArgs
9, // 14: ngolofuzz.NgoloFuzzOne.NewEncoder:type_name -> ngolofuzz.NewEncoderArgs
10, // 15: ngolofuzz.NgoloFuzzOne.EncoderNgdotEncode:type_name -> ngolofuzz.EncoderNgdotEncodeArgs
11, // 16: ngolofuzz.NgoloFuzzOne.EncoderNgdotSetIndent:type_name -> ngolofuzz.EncoderNgdotSetIndentArgs
12, // 17: ngolofuzz.NgoloFuzzOne.EncoderNgdotSetEscapeHTML:type_name -> ngolofuzz.EncoderNgdotSetEscapeHTMLArgs
13, // 18: ngolofuzz.NgoloFuzzOne.DecoderNgdotToken:type_name -> ngolofuzz.DecoderNgdotTokenArgs
14, // 19: ngolofuzz.NgoloFuzzOne.DecoderNgdotMore:type_name -> ngolofuzz.DecoderNgdotMoreArgs
15, // 20: ngolofuzz.NgoloFuzzOne.DecoderNgdotInputOffset:type_name -> ngolofuzz.DecoderNgdotInputOffsetArgs
16, // 21: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
22, // [22:22] is the sub-list for method output_type
22, // [22:22] is the sub-list for method input_type
22, // [22:22] is the sub-list for extension type_name
22, // [22:22] is the sub-list for extension extendee
0, // [0:22] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[16].OneofWrappers = []any{
(*NgoloFuzzOne_Unmarshal_)(nil),
(*NgoloFuzzOne_Marshal_)(nil),
(*NgoloFuzzOne_MarshalIndent)(nil),
(*NgoloFuzzOne_Valid)(nil),
(*NgoloFuzzOne_NewDecoder)(nil),
(*NgoloFuzzOne_DecoderNgdotUseNumber)(nil),
(*NgoloFuzzOne_DecoderNgdotDisallowUnknownFields)(nil),
(*NgoloFuzzOne_DecoderNgdotDecode)(nil),
(*NgoloFuzzOne_DecoderNgdotBuffered)(nil),
(*NgoloFuzzOne_NewEncoder)(nil),
(*NgoloFuzzOne_EncoderNgdotEncode)(nil),
(*NgoloFuzzOne_EncoderNgdotSetIndent)(nil),
(*NgoloFuzzOne_EncoderNgdotSetEscapeHTML)(nil),
(*NgoloFuzzOne_DecoderNgdotToken)(nil),
(*NgoloFuzzOne_DecoderNgdotMore)(nil),
(*NgoloFuzzOne_DecoderNgdotInputOffset)(nil),
}
file_ngolofuzz_proto_msgTypes[17].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 19,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_encoding_pem
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"encoding/pem"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var BlockResults []*pem.Block
BlockResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Decode:
pem.Decode(a.Decode.Data)
case *NgoloFuzzOne_Encode:
arg0 := bytes.NewBuffer(a.Encode.Out)
if len(BlockResults) == 0 {
continue
}
arg1 := BlockResults[BlockResultsIndex]
BlockResultsIndex = (BlockResultsIndex + 1) % len(BlockResults)
r0 := pem.Encode(arg0, arg1)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_EncodeToMemory:
if len(BlockResults) == 0 {
continue
}
arg0 := BlockResults[BlockResultsIndex]
BlockResultsIndex = (BlockResultsIndex + 1) % len(BlockResults)
pem.EncodeToMemory(arg0)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
BlockNb := 0
BlockResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Decode:
w.WriteString(fmt.Sprintf("pem.Decode(%#+v)\n", a.Decode.Data))
case *NgoloFuzzOne_Encode:
if BlockNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("pem.Encode(bytes.NewBuffer(%#+v), Block%d)\n", a.Encode.Out, (BlockResultsIndex + 0) % BlockNb))
BlockResultsIndex = (BlockResultsIndex + 1) % BlockNb
case *NgoloFuzzOne_EncodeToMemory:
if BlockNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("pem.EncodeToMemory(Block%d)\n", (BlockResultsIndex + 0) % BlockNb))
BlockResultsIndex = (BlockResultsIndex + 1) % BlockNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_encoding_pem
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type DecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeArgs) Reset() {
*x = DecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeArgs) ProtoMessage() {}
func (x *DecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeArgs.ProtoReflect.Descriptor instead.
func (*DecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *DecodeArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type EncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Out []byte `protobuf:"bytes,1,opt,name=out,proto3" json:"out,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodeArgs) Reset() {
*x = EncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodeArgs) ProtoMessage() {}
func (x *EncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodeArgs.ProtoReflect.Descriptor instead.
func (*EncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *EncodeArgs) GetOut() []byte {
if x != nil {
return x.Out
}
return nil
}
type EncodeToMemoryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodeToMemoryArgs) Reset() {
*x = EncodeToMemoryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodeToMemoryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodeToMemoryArgs) ProtoMessage() {}
func (x *EncodeToMemoryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodeToMemoryArgs.ProtoReflect.Descriptor instead.
func (*EncodeToMemoryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Decode
// *NgoloFuzzOne_Encode
// *NgoloFuzzOne_EncodeToMemory
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetDecode() *DecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Decode); ok {
return x.Decode
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncode() *EncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Encode); ok {
return x.Encode
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodeToMemory() *EncodeToMemoryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodeToMemory); ok {
return x.EncodeToMemory
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Decode struct {
Decode *DecodeArgs `protobuf:"bytes,1,opt,name=Decode,proto3,oneof"`
}
type NgoloFuzzOne_Encode struct {
Encode *EncodeArgs `protobuf:"bytes,2,opt,name=Encode,proto3,oneof"`
}
type NgoloFuzzOne_EncodeToMemory struct {
EncodeToMemory *EncodeToMemoryArgs `protobuf:"bytes,3,opt,name=EncodeToMemory,proto3,oneof"`
}
func (*NgoloFuzzOne_Decode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Encode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodeToMemory) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\" \n" +
"\n" +
"DecodeArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\x1e\n" +
"\n" +
"EncodeArgs\x12\x10\n" +
"\x03out\x18\x01 \x01(\fR\x03out\"\x14\n" +
"\x12EncodeToMemoryArgs\"\xc1\x01\n" +
"\fNgoloFuzzOne\x12/\n" +
"\x06Decode\x18\x01 \x01(\v2\x15.ngolofuzz.DecodeArgsH\x00R\x06Decode\x12/\n" +
"\x06Encode\x18\x02 \x01(\v2\x15.ngolofuzz.EncodeArgsH\x00R\x06Encode\x12G\n" +
"\x0eEncodeToMemory\x18\x03 \x01(\v2\x1d.ngolofuzz.EncodeToMemoryArgsH\x00R\x0eEncodeToMemoryB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x19Z\x17./;fuzz_ng_encoding_pemb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_ngolofuzz_proto_goTypes = []any{
(*DecodeArgs)(nil), // 0: ngolofuzz.DecodeArgs
(*EncodeArgs)(nil), // 1: ngolofuzz.EncodeArgs
(*EncodeToMemoryArgs)(nil), // 2: ngolofuzz.EncodeToMemoryArgs
(*NgoloFuzzOne)(nil), // 3: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 4: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 5: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Decode:type_name -> ngolofuzz.DecodeArgs
1, // 1: ngolofuzz.NgoloFuzzOne.Encode:type_name -> ngolofuzz.EncodeArgs
2, // 2: ngolofuzz.NgoloFuzzOne.EncodeToMemory:type_name -> ngolofuzz.EncodeToMemoryArgs
3, // 3: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzOne_Decode)(nil),
(*NgoloFuzzOne_Encode)(nil),
(*NgoloFuzzOne_EncodeToMemory)(nil),
}
file_ngolofuzz_proto_msgTypes[4].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_encoding_xml
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"encoding/xml"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ProcInstResults []*xml.ProcInst
ProcInstResultsIndex := 0
var DirectiveResults []*xml.Directive
DirectiveResultsIndex := 0
var DecoderResults []*xml.Decoder
DecoderResultsIndex := 0
var StartElementResults []*xml.StartElement
StartElementResultsIndex := 0
var CharDataResults []*xml.CharData
CharDataResultsIndex := 0
var EncoderResults []*xml.Encoder
EncoderResultsIndex := 0
var CommentResults []*xml.Comment
CommentResultsIndex := 0
var TokenResults []*xml.Token
TokenResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Marshal_:
_, r1 := xml.Marshal(a.Marshal_.V)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_MarshalIndent:
_, r1 := xml.MarshalIndent(a.MarshalIndent.V, a.MarshalIndent.Prefix, a.MarshalIndent.Indent)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewEncoder:
arg0 := bytes.NewBuffer(a.NewEncoder.W)
r0 := xml.NewEncoder(arg0)
if r0 != nil{
EncoderResults = append(EncoderResults, r0)
}
case *NgoloFuzzOne_EncoderNgdotIndent:
if len(EncoderResults) == 0 {
continue
}
arg0 := EncoderResults[EncoderResultsIndex]
EncoderResultsIndex = (EncoderResultsIndex + 1) % len(EncoderResults)
arg0.Indent(a.EncoderNgdotIndent.Prefix, a.EncoderNgdotIndent.Indent)
case *NgoloFuzzOne_EncoderNgdotEncode:
if len(EncoderResults) == 0 {
continue
}
arg0 := EncoderResults[EncoderResultsIndex]
EncoderResultsIndex = (EncoderResultsIndex + 1) % len(EncoderResults)
r0 := arg0.Encode(a.EncoderNgdotEncode.V)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_EncoderNgdotEncodeElement:
if len(EncoderResults) == 0 {
continue
}
arg0 := EncoderResults[EncoderResultsIndex]
EncoderResultsIndex = (EncoderResultsIndex + 1) % len(EncoderResults)
if len(StartElementResults) == 0 {
continue
}
arg2 := *StartElementResults[StartElementResultsIndex]
StartElementResultsIndex = (StartElementResultsIndex + 1) % len(StartElementResults)
r0 := arg0.EncodeElement(a.EncoderNgdotEncodeElement.V, arg2)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_EncoderNgdotEncodeToken:
if len(EncoderResults) == 0 {
continue
}
arg0 := EncoderResults[EncoderResultsIndex]
EncoderResultsIndex = (EncoderResultsIndex + 1) % len(EncoderResults)
if len(TokenResults) == 0 {
continue
}
arg1 := *TokenResults[TokenResultsIndex]
TokenResultsIndex = (TokenResultsIndex + 1) % len(TokenResults)
r0 := arg0.EncodeToken(arg1)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_EncoderNgdotFlush:
if len(EncoderResults) == 0 {
continue
}
arg0 := EncoderResults[EncoderResultsIndex]
EncoderResultsIndex = (EncoderResultsIndex + 1) % len(EncoderResults)
r0 := arg0.Flush()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_EncoderNgdotClose:
if len(EncoderResults) == 0 {
continue
}
arg0 := EncoderResults[EncoderResultsIndex]
EncoderResultsIndex = (EncoderResultsIndex + 1) % len(EncoderResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_Unmarshal_:
r0 := xml.Unmarshal(a.Unmarshal_.Data, a.Unmarshal_.V)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_DecoderNgdotDecode:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
r0 := arg0.Decode(a.DecoderNgdotDecode.V)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_DecoderNgdotDecodeElement:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
if len(StartElementResults) == 0 {
continue
}
arg2 := StartElementResults[StartElementResultsIndex]
StartElementResultsIndex = (StartElementResultsIndex + 1) % len(StartElementResults)
r0 := arg0.DecodeElement(a.DecoderNgdotDecodeElement.V, arg2)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_DecoderNgdotSkip:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
r0 := arg0.Skip()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_StartElementNgdotCopy:
if len(StartElementResults) == 0 {
continue
}
arg0 := StartElementResults[StartElementResultsIndex]
StartElementResultsIndex = (StartElementResultsIndex + 1) % len(StartElementResults)
arg0.Copy()
case *NgoloFuzzOne_StartElementNgdotEnd:
if len(StartElementResults) == 0 {
continue
}
arg0 := StartElementResults[StartElementResultsIndex]
StartElementResultsIndex = (StartElementResultsIndex + 1) % len(StartElementResults)
arg0.End()
case *NgoloFuzzOne_CharDataNgdotCopy:
if len(CharDataResults) == 0 {
continue
}
arg0 := CharDataResults[CharDataResultsIndex]
CharDataResultsIndex = (CharDataResultsIndex + 1) % len(CharDataResults)
r0 := arg0.Copy()
CharDataResults = append(CharDataResults, &r0)
case *NgoloFuzzOne_CommentNgdotCopy:
if len(CommentResults) == 0 {
continue
}
arg0 := CommentResults[CommentResultsIndex]
CommentResultsIndex = (CommentResultsIndex + 1) % len(CommentResults)
r0 := arg0.Copy()
CommentResults = append(CommentResults, &r0)
case *NgoloFuzzOne_ProcInstNgdotCopy:
if len(ProcInstResults) == 0 {
continue
}
arg0 := ProcInstResults[ProcInstResultsIndex]
ProcInstResultsIndex = (ProcInstResultsIndex + 1) % len(ProcInstResults)
arg0.Copy()
case *NgoloFuzzOne_DirectiveNgdotCopy:
if len(DirectiveResults) == 0 {
continue
}
arg0 := DirectiveResults[DirectiveResultsIndex]
DirectiveResultsIndex = (DirectiveResultsIndex + 1) % len(DirectiveResults)
r0 := arg0.Copy()
DirectiveResults = append(DirectiveResults, &r0)
case *NgoloFuzzOne_CopyToken:
if len(TokenResults) == 0 {
continue
}
arg0 := *TokenResults[TokenResultsIndex]
TokenResultsIndex = (TokenResultsIndex + 1) % len(TokenResults)
r0 := xml.CopyToken(arg0)
TokenResults = append(TokenResults, &r0)
case *NgoloFuzzOne_NewDecoder:
arg0 := bytes.NewReader(a.NewDecoder.R)
r0 := xml.NewDecoder(arg0)
if r0 != nil{
DecoderResults = append(DecoderResults, r0)
}
case *NgoloFuzzOne_DecoderNgdotToken:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
r0, r1 := arg0.Token()
TokenResults = append(TokenResults, &r0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DecoderNgdotRawToken:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
r0, r1 := arg0.RawToken()
TokenResults = append(TokenResults, &r0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DecoderNgdotInputOffset:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
arg0.InputOffset()
case *NgoloFuzzOne_DecoderNgdotInputPos:
if len(DecoderResults) == 0 {
continue
}
arg0 := DecoderResults[DecoderResultsIndex]
DecoderResultsIndex = (DecoderResultsIndex + 1) % len(DecoderResults)
arg0.InputPos()
case *NgoloFuzzOne_EscapeText:
arg0 := bytes.NewBuffer(a.EscapeText.W)
r0 := xml.EscapeText(arg0, a.EscapeText.S)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_Escape:
arg0 := bytes.NewBuffer(a.Escape.W)
xml.Escape(arg0, a.Escape.S)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ProcInstNb := 0
ProcInstResultsIndex := 0
DirectiveNb := 0
DirectiveResultsIndex := 0
DecoderNb := 0
DecoderResultsIndex := 0
StartElementNb := 0
StartElementResultsIndex := 0
CharDataNb := 0
CharDataResultsIndex := 0
EncoderNb := 0
EncoderResultsIndex := 0
CommentNb := 0
CommentResultsIndex := 0
TokenNb := 0
TokenResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Marshal_:
w.WriteString(fmt.Sprintf("xml.Marshal(%#+v)\n", a.Marshal_.V))
case *NgoloFuzzOne_MarshalIndent:
w.WriteString(fmt.Sprintf("xml.MarshalIndent(%#+v, %#+v, %#+v)\n", a.MarshalIndent.V, a.MarshalIndent.Prefix, a.MarshalIndent.Indent))
case *NgoloFuzzOne_NewEncoder:
w.WriteString(fmt.Sprintf("Encoder%d := xml.NewEncoder(bytes.NewBuffer(%#+v))\n", EncoderNb, a.NewEncoder.W))
EncoderNb = EncoderNb + 1
case *NgoloFuzzOne_EncoderNgdotIndent:
if EncoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoder%d.Indent(%#+v, %#+v)\n", EncoderResultsIndex, a.EncoderNgdotIndent.Prefix, a.EncoderNgdotIndent.Indent))
EncoderResultsIndex = (EncoderResultsIndex + 1) % EncoderNb
case *NgoloFuzzOne_EncoderNgdotEncode:
if EncoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoder%d.Encode(%#+v)\n", EncoderResultsIndex, a.EncoderNgdotEncode.V))
EncoderResultsIndex = (EncoderResultsIndex + 1) % EncoderNb
case *NgoloFuzzOne_EncoderNgdotEncodeElement:
if EncoderNb == 0 {
continue
}
if StartElementNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoder%d.EncodeElement(%#+v, StartElement%d)\n", EncoderResultsIndex, a.EncoderNgdotEncodeElement.V, (StartElementResultsIndex + 0) % StartElementNb))
EncoderResultsIndex = (EncoderResultsIndex + 1) % EncoderNb
StartElementResultsIndex = (StartElementResultsIndex + 1) % StartElementNb
case *NgoloFuzzOne_EncoderNgdotEncodeToken:
if EncoderNb == 0 {
continue
}
if TokenNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoder%d.EncodeToken(Token%d)\n", EncoderResultsIndex, (TokenResultsIndex + 0) % TokenNb))
EncoderResultsIndex = (EncoderResultsIndex + 1) % EncoderNb
TokenResultsIndex = (TokenResultsIndex + 1) % TokenNb
case *NgoloFuzzOne_EncoderNgdotFlush:
if EncoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoder%d.Flush()\n", EncoderResultsIndex))
EncoderResultsIndex = (EncoderResultsIndex + 1) % EncoderNb
case *NgoloFuzzOne_EncoderNgdotClose:
if EncoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Encoder%d.Close()\n", EncoderResultsIndex))
EncoderResultsIndex = (EncoderResultsIndex + 1) % EncoderNb
case *NgoloFuzzOne_Unmarshal_:
w.WriteString(fmt.Sprintf("xml.Unmarshal(%#+v, %#+v)\n", a.Unmarshal_.Data, a.Unmarshal_.V))
case *NgoloFuzzOne_DecoderNgdotDecode:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Decoder%d.Decode(%#+v)\n", DecoderResultsIndex, a.DecoderNgdotDecode.V))
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
case *NgoloFuzzOne_DecoderNgdotDecodeElement:
if DecoderNb == 0 {
continue
}
if StartElementNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Decoder%d.DecodeElement(%#+v, StartElement%d)\n", DecoderResultsIndex, a.DecoderNgdotDecodeElement.V, (StartElementResultsIndex + 0) % StartElementNb))
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
StartElementResultsIndex = (StartElementResultsIndex + 1) % StartElementNb
case *NgoloFuzzOne_DecoderNgdotSkip:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Decoder%d.Skip()\n", DecoderResultsIndex))
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
case *NgoloFuzzOne_StartElementNgdotCopy:
if StartElementNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("StartElement%d.Copy()\n", StartElementResultsIndex))
StartElementResultsIndex = (StartElementResultsIndex + 1) % StartElementNb
case *NgoloFuzzOne_StartElementNgdotEnd:
if StartElementNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("StartElement%d.End()\n", StartElementResultsIndex))
StartElementResultsIndex = (StartElementResultsIndex + 1) % StartElementNb
case *NgoloFuzzOne_CharDataNgdotCopy:
if CharDataNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CharData%d := CharData%d.Copy()\n", CharDataNb, CharDataResultsIndex))
CharDataNb = CharDataNb + 1
CharDataResultsIndex = (CharDataResultsIndex + 1) % CharDataNb
case *NgoloFuzzOne_CommentNgdotCopy:
if CommentNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Comment%d := Comment%d.Copy()\n", CommentNb, CommentResultsIndex))
CommentNb = CommentNb + 1
CommentResultsIndex = (CommentResultsIndex + 1) % CommentNb
case *NgoloFuzzOne_ProcInstNgdotCopy:
if ProcInstNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ProcInst%d.Copy()\n", ProcInstResultsIndex))
ProcInstResultsIndex = (ProcInstResultsIndex + 1) % ProcInstNb
case *NgoloFuzzOne_DirectiveNgdotCopy:
if DirectiveNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Directive%d := Directive%d.Copy()\n", DirectiveNb, DirectiveResultsIndex))
DirectiveNb = DirectiveNb + 1
DirectiveResultsIndex = (DirectiveResultsIndex + 1) % DirectiveNb
case *NgoloFuzzOne_CopyToken:
if TokenNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Token%d := xml.CopyToken(Token%d)\n", TokenNb, (TokenResultsIndex + 0) % TokenNb))
TokenNb = TokenNb + 1
TokenResultsIndex = (TokenResultsIndex + 1) % TokenNb
case *NgoloFuzzOne_NewDecoder:
w.WriteString(fmt.Sprintf("Decoder%d := xml.NewDecoder(bytes.NewReader(%#+v))\n", DecoderNb, a.NewDecoder.R))
DecoderNb = DecoderNb + 1
case *NgoloFuzzOne_DecoderNgdotToken:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Token%d, _ := Decoder%d.Token()\n", TokenNb, DecoderResultsIndex))
TokenNb = TokenNb + 1
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
case *NgoloFuzzOne_DecoderNgdotRawToken:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Token%d, _ := Decoder%d.RawToken()\n", TokenNb, DecoderResultsIndex))
TokenNb = TokenNb + 1
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
case *NgoloFuzzOne_DecoderNgdotInputOffset:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Decoder%d.InputOffset()\n", DecoderResultsIndex))
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
case *NgoloFuzzOne_DecoderNgdotInputPos:
if DecoderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Decoder%d.InputPos()\n", DecoderResultsIndex))
DecoderResultsIndex = (DecoderResultsIndex + 1) % DecoderNb
case *NgoloFuzzOne_EscapeText:
w.WriteString(fmt.Sprintf("xml.EscapeText(bytes.NewBuffer(%#+v), %#+v)\n", a.EscapeText.W, a.EscapeText.S))
case *NgoloFuzzOne_Escape:
w.WriteString(fmt.Sprintf("xml.Escape(bytes.NewBuffer(%#+v), %#+v)\n", a.Escape.W, a.Escape.S))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_encoding_xml
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type MarshalArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MarshalArgs) Reset() {
*x = MarshalArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MarshalArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MarshalArgs) ProtoMessage() {}
func (x *MarshalArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MarshalArgs.ProtoReflect.Descriptor instead.
func (*MarshalArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *MarshalArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type MarshalIndentArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
Indent string `protobuf:"bytes,3,opt,name=indent,proto3" json:"indent,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MarshalIndentArgs) Reset() {
*x = MarshalIndentArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MarshalIndentArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MarshalIndentArgs) ProtoMessage() {}
func (x *MarshalIndentArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MarshalIndentArgs.ProtoReflect.Descriptor instead.
func (*MarshalIndentArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *MarshalIndentArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
func (x *MarshalIndentArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
func (x *MarshalIndentArgs) GetIndent() string {
if x != nil {
return x.Indent
}
return ""
}
type NewEncoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewEncoderArgs) Reset() {
*x = NewEncoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewEncoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewEncoderArgs) ProtoMessage() {}
func (x *NewEncoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewEncoderArgs.ProtoReflect.Descriptor instead.
func (*NewEncoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NewEncoderArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type EncoderNgdotIndentArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
Indent string `protobuf:"bytes,2,opt,name=indent,proto3" json:"indent,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncoderNgdotIndentArgs) Reset() {
*x = EncoderNgdotIndentArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncoderNgdotIndentArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncoderNgdotIndentArgs) ProtoMessage() {}
func (x *EncoderNgdotIndentArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncoderNgdotIndentArgs.ProtoReflect.Descriptor instead.
func (*EncoderNgdotIndentArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *EncoderNgdotIndentArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
func (x *EncoderNgdotIndentArgs) GetIndent() string {
if x != nil {
return x.Indent
}
return ""
}
type EncoderNgdotEncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncoderNgdotEncodeArgs) Reset() {
*x = EncoderNgdotEncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncoderNgdotEncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncoderNgdotEncodeArgs) ProtoMessage() {}
func (x *EncoderNgdotEncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncoderNgdotEncodeArgs.ProtoReflect.Descriptor instead.
func (*EncoderNgdotEncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *EncoderNgdotEncodeArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type EncoderNgdotEncodeElementArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncoderNgdotEncodeElementArgs) Reset() {
*x = EncoderNgdotEncodeElementArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncoderNgdotEncodeElementArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncoderNgdotEncodeElementArgs) ProtoMessage() {}
func (x *EncoderNgdotEncodeElementArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncoderNgdotEncodeElementArgs.ProtoReflect.Descriptor instead.
func (*EncoderNgdotEncodeElementArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *EncoderNgdotEncodeElementArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type EncoderNgdotEncodeTokenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncoderNgdotEncodeTokenArgs) Reset() {
*x = EncoderNgdotEncodeTokenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncoderNgdotEncodeTokenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncoderNgdotEncodeTokenArgs) ProtoMessage() {}
func (x *EncoderNgdotEncodeTokenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncoderNgdotEncodeTokenArgs.ProtoReflect.Descriptor instead.
func (*EncoderNgdotEncodeTokenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type EncoderNgdotFlushArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncoderNgdotFlushArgs) Reset() {
*x = EncoderNgdotFlushArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncoderNgdotFlushArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncoderNgdotFlushArgs) ProtoMessage() {}
func (x *EncoderNgdotFlushArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncoderNgdotFlushArgs.ProtoReflect.Descriptor instead.
func (*EncoderNgdotFlushArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type EncoderNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncoderNgdotCloseArgs) Reset() {
*x = EncoderNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncoderNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncoderNgdotCloseArgs) ProtoMessage() {}
func (x *EncoderNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncoderNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*EncoderNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type UnmarshalArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
V *NgoloFuzzAny `protobuf:"bytes,2,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnmarshalArgs) Reset() {
*x = UnmarshalArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnmarshalArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnmarshalArgs) ProtoMessage() {}
func (x *UnmarshalArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnmarshalArgs.ProtoReflect.Descriptor instead.
func (*UnmarshalArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *UnmarshalArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
func (x *UnmarshalArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type DecoderNgdotDecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotDecodeArgs) Reset() {
*x = DecoderNgdotDecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotDecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotDecodeArgs) ProtoMessage() {}
func (x *DecoderNgdotDecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotDecodeArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotDecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *DecoderNgdotDecodeArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type DecoderNgdotDecodeElementArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotDecodeElementArgs) Reset() {
*x = DecoderNgdotDecodeElementArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotDecodeElementArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotDecodeElementArgs) ProtoMessage() {}
func (x *DecoderNgdotDecodeElementArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotDecodeElementArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotDecodeElementArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *DecoderNgdotDecodeElementArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type DecoderNgdotSkipArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotSkipArgs) Reset() {
*x = DecoderNgdotSkipArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotSkipArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotSkipArgs) ProtoMessage() {}
func (x *DecoderNgdotSkipArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotSkipArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotSkipArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type StartElementNgdotCopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StartElementNgdotCopyArgs) Reset() {
*x = StartElementNgdotCopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StartElementNgdotCopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StartElementNgdotCopyArgs) ProtoMessage() {}
func (x *StartElementNgdotCopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StartElementNgdotCopyArgs.ProtoReflect.Descriptor instead.
func (*StartElementNgdotCopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type StartElementNgdotEndArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StartElementNgdotEndArgs) Reset() {
*x = StartElementNgdotEndArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StartElementNgdotEndArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StartElementNgdotEndArgs) ProtoMessage() {}
func (x *StartElementNgdotEndArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StartElementNgdotEndArgs.ProtoReflect.Descriptor instead.
func (*StartElementNgdotEndArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type CharDataNgdotCopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CharDataNgdotCopyArgs) Reset() {
*x = CharDataNgdotCopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CharDataNgdotCopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CharDataNgdotCopyArgs) ProtoMessage() {}
func (x *CharDataNgdotCopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CharDataNgdotCopyArgs.ProtoReflect.Descriptor instead.
func (*CharDataNgdotCopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
type CommentNgdotCopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CommentNgdotCopyArgs) Reset() {
*x = CommentNgdotCopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CommentNgdotCopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CommentNgdotCopyArgs) ProtoMessage() {}
func (x *CommentNgdotCopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CommentNgdotCopyArgs.ProtoReflect.Descriptor instead.
func (*CommentNgdotCopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
type ProcInstNgdotCopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ProcInstNgdotCopyArgs) Reset() {
*x = ProcInstNgdotCopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ProcInstNgdotCopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ProcInstNgdotCopyArgs) ProtoMessage() {}
func (x *ProcInstNgdotCopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ProcInstNgdotCopyArgs.ProtoReflect.Descriptor instead.
func (*ProcInstNgdotCopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
type DirectiveNgdotCopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DirectiveNgdotCopyArgs) Reset() {
*x = DirectiveNgdotCopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DirectiveNgdotCopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DirectiveNgdotCopyArgs) ProtoMessage() {}
func (x *DirectiveNgdotCopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DirectiveNgdotCopyArgs.ProtoReflect.Descriptor instead.
func (*DirectiveNgdotCopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
type CopyTokenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CopyTokenArgs) Reset() {
*x = CopyTokenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CopyTokenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CopyTokenArgs) ProtoMessage() {}
func (x *CopyTokenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CopyTokenArgs.ProtoReflect.Descriptor instead.
func (*CopyTokenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
type NewDecoderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewDecoderArgs) Reset() {
*x = NewDecoderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewDecoderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewDecoderArgs) ProtoMessage() {}
func (x *NewDecoderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewDecoderArgs.ProtoReflect.Descriptor instead.
func (*NewDecoderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *NewDecoderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type DecoderNgdotTokenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotTokenArgs) Reset() {
*x = DecoderNgdotTokenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotTokenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotTokenArgs) ProtoMessage() {}
func (x *DecoderNgdotTokenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotTokenArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotTokenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
type DecoderNgdotRawTokenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotRawTokenArgs) Reset() {
*x = DecoderNgdotRawTokenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotRawTokenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotRawTokenArgs) ProtoMessage() {}
func (x *DecoderNgdotRawTokenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotRawTokenArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotRawTokenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
type DecoderNgdotInputOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotInputOffsetArgs) Reset() {
*x = DecoderNgdotInputOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotInputOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotInputOffsetArgs) ProtoMessage() {}
func (x *DecoderNgdotInputOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotInputOffsetArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotInputOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
type DecoderNgdotInputPosArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecoderNgdotInputPosArgs) Reset() {
*x = DecoderNgdotInputPosArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecoderNgdotInputPosArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecoderNgdotInputPosArgs) ProtoMessage() {}
func (x *DecoderNgdotInputPosArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecoderNgdotInputPosArgs.ProtoReflect.Descriptor instead.
func (*DecoderNgdotInputPosArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
type EscapeTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
S []byte `protobuf:"bytes,2,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EscapeTextArgs) Reset() {
*x = EscapeTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EscapeTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EscapeTextArgs) ProtoMessage() {}
func (x *EscapeTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EscapeTextArgs.ProtoReflect.Descriptor instead.
func (*EscapeTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
func (x *EscapeTextArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *EscapeTextArgs) GetS() []byte {
if x != nil {
return x.S
}
return nil
}
type EscapeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
S []byte `protobuf:"bytes,2,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EscapeArgs) Reset() {
*x = EscapeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EscapeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EscapeArgs) ProtoMessage() {}
func (x *EscapeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EscapeArgs.ProtoReflect.Descriptor instead.
func (*EscapeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
func (x *EscapeArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *EscapeArgs) GetS() []byte {
if x != nil {
return x.S
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Marshal_
// *NgoloFuzzOne_MarshalIndent
// *NgoloFuzzOne_NewEncoder
// *NgoloFuzzOne_EncoderNgdotIndent
// *NgoloFuzzOne_EncoderNgdotEncode
// *NgoloFuzzOne_EncoderNgdotEncodeElement
// *NgoloFuzzOne_EncoderNgdotEncodeToken
// *NgoloFuzzOne_EncoderNgdotFlush
// *NgoloFuzzOne_EncoderNgdotClose
// *NgoloFuzzOne_Unmarshal_
// *NgoloFuzzOne_DecoderNgdotDecode
// *NgoloFuzzOne_DecoderNgdotDecodeElement
// *NgoloFuzzOne_DecoderNgdotSkip
// *NgoloFuzzOne_StartElementNgdotCopy
// *NgoloFuzzOne_StartElementNgdotEnd
// *NgoloFuzzOne_CharDataNgdotCopy
// *NgoloFuzzOne_CommentNgdotCopy
// *NgoloFuzzOne_ProcInstNgdotCopy
// *NgoloFuzzOne_DirectiveNgdotCopy
// *NgoloFuzzOne_CopyToken
// *NgoloFuzzOne_NewDecoder
// *NgoloFuzzOne_DecoderNgdotToken
// *NgoloFuzzOne_DecoderNgdotRawToken
// *NgoloFuzzOne_DecoderNgdotInputOffset
// *NgoloFuzzOne_DecoderNgdotInputPos
// *NgoloFuzzOne_EscapeText
// *NgoloFuzzOne_Escape
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetMarshal_() *MarshalArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Marshal_); ok {
return x.Marshal_
}
}
return nil
}
func (x *NgoloFuzzOne) GetMarshalIndent() *MarshalIndentArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MarshalIndent); ok {
return x.MarshalIndent
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewEncoder() *NewEncoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewEncoder); ok {
return x.NewEncoder
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncoderNgdotIndent() *EncoderNgdotIndentArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncoderNgdotIndent); ok {
return x.EncoderNgdotIndent
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncoderNgdotEncode() *EncoderNgdotEncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncoderNgdotEncode); ok {
return x.EncoderNgdotEncode
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncoderNgdotEncodeElement() *EncoderNgdotEncodeElementArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncoderNgdotEncodeElement); ok {
return x.EncoderNgdotEncodeElement
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncoderNgdotEncodeToken() *EncoderNgdotEncodeTokenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncoderNgdotEncodeToken); ok {
return x.EncoderNgdotEncodeToken
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncoderNgdotFlush() *EncoderNgdotFlushArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncoderNgdotFlush); ok {
return x.EncoderNgdotFlush
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncoderNgdotClose() *EncoderNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncoderNgdotClose); ok {
return x.EncoderNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetUnmarshal_() *UnmarshalArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Unmarshal_); ok {
return x.Unmarshal_
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotDecode() *DecoderNgdotDecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotDecode); ok {
return x.DecoderNgdotDecode
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotDecodeElement() *DecoderNgdotDecodeElementArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotDecodeElement); ok {
return x.DecoderNgdotDecodeElement
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotSkip() *DecoderNgdotSkipArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotSkip); ok {
return x.DecoderNgdotSkip
}
}
return nil
}
func (x *NgoloFuzzOne) GetStartElementNgdotCopy() *StartElementNgdotCopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_StartElementNgdotCopy); ok {
return x.StartElementNgdotCopy
}
}
return nil
}
func (x *NgoloFuzzOne) GetStartElementNgdotEnd() *StartElementNgdotEndArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_StartElementNgdotEnd); ok {
return x.StartElementNgdotEnd
}
}
return nil
}
func (x *NgoloFuzzOne) GetCharDataNgdotCopy() *CharDataNgdotCopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CharDataNgdotCopy); ok {
return x.CharDataNgdotCopy
}
}
return nil
}
func (x *NgoloFuzzOne) GetCommentNgdotCopy() *CommentNgdotCopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CommentNgdotCopy); ok {
return x.CommentNgdotCopy
}
}
return nil
}
func (x *NgoloFuzzOne) GetProcInstNgdotCopy() *ProcInstNgdotCopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ProcInstNgdotCopy); ok {
return x.ProcInstNgdotCopy
}
}
return nil
}
func (x *NgoloFuzzOne) GetDirectiveNgdotCopy() *DirectiveNgdotCopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DirectiveNgdotCopy); ok {
return x.DirectiveNgdotCopy
}
}
return nil
}
func (x *NgoloFuzzOne) GetCopyToken() *CopyTokenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CopyToken); ok {
return x.CopyToken
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewDecoder() *NewDecoderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewDecoder); ok {
return x.NewDecoder
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotToken() *DecoderNgdotTokenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotToken); ok {
return x.DecoderNgdotToken
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotRawToken() *DecoderNgdotRawTokenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotRawToken); ok {
return x.DecoderNgdotRawToken
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotInputOffset() *DecoderNgdotInputOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotInputOffset); ok {
return x.DecoderNgdotInputOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecoderNgdotInputPos() *DecoderNgdotInputPosArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecoderNgdotInputPos); ok {
return x.DecoderNgdotInputPos
}
}
return nil
}
func (x *NgoloFuzzOne) GetEscapeText() *EscapeTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EscapeText); ok {
return x.EscapeText
}
}
return nil
}
func (x *NgoloFuzzOne) GetEscape() *EscapeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Escape); ok {
return x.Escape
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Marshal_ struct {
Marshal_ *MarshalArgs `protobuf:"bytes,1,opt,name=Marshal,proto3,oneof"`
}
type NgoloFuzzOne_MarshalIndent struct {
MarshalIndent *MarshalIndentArgs `protobuf:"bytes,2,opt,name=MarshalIndent,proto3,oneof"`
}
type NgoloFuzzOne_NewEncoder struct {
NewEncoder *NewEncoderArgs `protobuf:"bytes,3,opt,name=NewEncoder,proto3,oneof"`
}
type NgoloFuzzOne_EncoderNgdotIndent struct {
EncoderNgdotIndent *EncoderNgdotIndentArgs `protobuf:"bytes,4,opt,name=EncoderNgdotIndent,proto3,oneof"`
}
type NgoloFuzzOne_EncoderNgdotEncode struct {
EncoderNgdotEncode *EncoderNgdotEncodeArgs `protobuf:"bytes,5,opt,name=EncoderNgdotEncode,proto3,oneof"`
}
type NgoloFuzzOne_EncoderNgdotEncodeElement struct {
EncoderNgdotEncodeElement *EncoderNgdotEncodeElementArgs `protobuf:"bytes,6,opt,name=EncoderNgdotEncodeElement,proto3,oneof"`
}
type NgoloFuzzOne_EncoderNgdotEncodeToken struct {
EncoderNgdotEncodeToken *EncoderNgdotEncodeTokenArgs `protobuf:"bytes,7,opt,name=EncoderNgdotEncodeToken,proto3,oneof"`
}
type NgoloFuzzOne_EncoderNgdotFlush struct {
EncoderNgdotFlush *EncoderNgdotFlushArgs `protobuf:"bytes,8,opt,name=EncoderNgdotFlush,proto3,oneof"`
}
type NgoloFuzzOne_EncoderNgdotClose struct {
EncoderNgdotClose *EncoderNgdotCloseArgs `protobuf:"bytes,9,opt,name=EncoderNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_Unmarshal_ struct {
Unmarshal_ *UnmarshalArgs `protobuf:"bytes,10,opt,name=Unmarshal,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotDecode struct {
DecoderNgdotDecode *DecoderNgdotDecodeArgs `protobuf:"bytes,11,opt,name=DecoderNgdotDecode,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotDecodeElement struct {
DecoderNgdotDecodeElement *DecoderNgdotDecodeElementArgs `protobuf:"bytes,12,opt,name=DecoderNgdotDecodeElement,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotSkip struct {
DecoderNgdotSkip *DecoderNgdotSkipArgs `protobuf:"bytes,13,opt,name=DecoderNgdotSkip,proto3,oneof"`
}
type NgoloFuzzOne_StartElementNgdotCopy struct {
StartElementNgdotCopy *StartElementNgdotCopyArgs `protobuf:"bytes,14,opt,name=StartElementNgdotCopy,proto3,oneof"`
}
type NgoloFuzzOne_StartElementNgdotEnd struct {
StartElementNgdotEnd *StartElementNgdotEndArgs `protobuf:"bytes,15,opt,name=StartElementNgdotEnd,proto3,oneof"`
}
type NgoloFuzzOne_CharDataNgdotCopy struct {
CharDataNgdotCopy *CharDataNgdotCopyArgs `protobuf:"bytes,16,opt,name=CharDataNgdotCopy,proto3,oneof"`
}
type NgoloFuzzOne_CommentNgdotCopy struct {
CommentNgdotCopy *CommentNgdotCopyArgs `protobuf:"bytes,17,opt,name=CommentNgdotCopy,proto3,oneof"`
}
type NgoloFuzzOne_ProcInstNgdotCopy struct {
ProcInstNgdotCopy *ProcInstNgdotCopyArgs `protobuf:"bytes,18,opt,name=ProcInstNgdotCopy,proto3,oneof"`
}
type NgoloFuzzOne_DirectiveNgdotCopy struct {
DirectiveNgdotCopy *DirectiveNgdotCopyArgs `protobuf:"bytes,19,opt,name=DirectiveNgdotCopy,proto3,oneof"`
}
type NgoloFuzzOne_CopyToken struct {
CopyToken *CopyTokenArgs `protobuf:"bytes,20,opt,name=CopyToken,proto3,oneof"`
}
type NgoloFuzzOne_NewDecoder struct {
NewDecoder *NewDecoderArgs `protobuf:"bytes,21,opt,name=NewDecoder,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotToken struct {
DecoderNgdotToken *DecoderNgdotTokenArgs `protobuf:"bytes,22,opt,name=DecoderNgdotToken,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotRawToken struct {
DecoderNgdotRawToken *DecoderNgdotRawTokenArgs `protobuf:"bytes,23,opt,name=DecoderNgdotRawToken,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotInputOffset struct {
DecoderNgdotInputOffset *DecoderNgdotInputOffsetArgs `protobuf:"bytes,24,opt,name=DecoderNgdotInputOffset,proto3,oneof"`
}
type NgoloFuzzOne_DecoderNgdotInputPos struct {
DecoderNgdotInputPos *DecoderNgdotInputPosArgs `protobuf:"bytes,25,opt,name=DecoderNgdotInputPos,proto3,oneof"`
}
type NgoloFuzzOne_EscapeText struct {
EscapeText *EscapeTextArgs `protobuf:"bytes,26,opt,name=EscapeText,proto3,oneof"`
}
type NgoloFuzzOne_Escape struct {
Escape *EscapeArgs `protobuf:"bytes,27,opt,name=Escape,proto3,oneof"`
}
func (*NgoloFuzzOne_Marshal_) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MarshalIndent) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewEncoder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncoderNgdotIndent) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncoderNgdotEncode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncoderNgdotEncodeElement) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncoderNgdotEncodeToken) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncoderNgdotFlush) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncoderNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Unmarshal_) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotDecode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotDecodeElement) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotSkip) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_StartElementNgdotCopy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_StartElementNgdotEnd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CharDataNgdotCopy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CommentNgdotCopy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ProcInstNgdotCopy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DirectiveNgdotCopy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CopyToken) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewDecoder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotToken) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotRawToken) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotInputOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecoderNgdotInputPos) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EscapeText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Escape) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"4\n" +
"\vMarshalArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"j\n" +
"\x11MarshalIndentArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\x12\x16\n" +
"\x06prefix\x18\x02 \x01(\tR\x06prefix\x12\x16\n" +
"\x06indent\x18\x03 \x01(\tR\x06indent\"\x1e\n" +
"\x0eNewEncoderArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"H\n" +
"\x16EncoderNgdotIndentArgs\x12\x16\n" +
"\x06prefix\x18\x01 \x01(\tR\x06prefix\x12\x16\n" +
"\x06indent\x18\x02 \x01(\tR\x06indent\"?\n" +
"\x16EncoderNgdotEncodeArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"F\n" +
"\x1dEncoderNgdotEncodeElementArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"\x1d\n" +
"\x1bEncoderNgdotEncodeTokenArgs\"\x17\n" +
"\x15EncoderNgdotFlushArgs\"\x17\n" +
"\x15EncoderNgdotCloseArgs\"J\n" +
"\rUnmarshalArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\x12%\n" +
"\x01v\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"?\n" +
"\x16DecoderNgdotDecodeArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"F\n" +
"\x1dDecoderNgdotDecodeElementArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"\x16\n" +
"\x14DecoderNgdotSkipArgs\"\x1b\n" +
"\x19StartElementNgdotCopyArgs\"\x1a\n" +
"\x18StartElementNgdotEndArgs\"\x17\n" +
"\x15CharDataNgdotCopyArgs\"\x16\n" +
"\x14CommentNgdotCopyArgs\"\x17\n" +
"\x15ProcInstNgdotCopyArgs\"\x18\n" +
"\x16DirectiveNgdotCopyArgs\"\x0f\n" +
"\rCopyTokenArgs\"\x1e\n" +
"\x0eNewDecoderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x17\n" +
"\x15DecoderNgdotTokenArgs\"\x1a\n" +
"\x18DecoderNgdotRawTokenArgs\"\x1d\n" +
"\x1bDecoderNgdotInputOffsetArgs\"\x1a\n" +
"\x18DecoderNgdotInputPosArgs\",\n" +
"\x0eEscapeTextArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\f\n" +
"\x01s\x18\x02 \x01(\fR\x01s\"(\n" +
"\n" +
"EscapeArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\f\n" +
"\x01s\x18\x02 \x01(\fR\x01s\"\x83\x11\n" +
"\fNgoloFuzzOne\x122\n" +
"\aMarshal\x18\x01 \x01(\v2\x16.ngolofuzz.MarshalArgsH\x00R\aMarshal\x12D\n" +
"\rMarshalIndent\x18\x02 \x01(\v2\x1c.ngolofuzz.MarshalIndentArgsH\x00R\rMarshalIndent\x12;\n" +
"\n" +
"NewEncoder\x18\x03 \x01(\v2\x19.ngolofuzz.NewEncoderArgsH\x00R\n" +
"NewEncoder\x12S\n" +
"\x12EncoderNgdotIndent\x18\x04 \x01(\v2!.ngolofuzz.EncoderNgdotIndentArgsH\x00R\x12EncoderNgdotIndent\x12S\n" +
"\x12EncoderNgdotEncode\x18\x05 \x01(\v2!.ngolofuzz.EncoderNgdotEncodeArgsH\x00R\x12EncoderNgdotEncode\x12h\n" +
"\x19EncoderNgdotEncodeElement\x18\x06 \x01(\v2(.ngolofuzz.EncoderNgdotEncodeElementArgsH\x00R\x19EncoderNgdotEncodeElement\x12b\n" +
"\x17EncoderNgdotEncodeToken\x18\a \x01(\v2&.ngolofuzz.EncoderNgdotEncodeTokenArgsH\x00R\x17EncoderNgdotEncodeToken\x12P\n" +
"\x11EncoderNgdotFlush\x18\b \x01(\v2 .ngolofuzz.EncoderNgdotFlushArgsH\x00R\x11EncoderNgdotFlush\x12P\n" +
"\x11EncoderNgdotClose\x18\t \x01(\v2 .ngolofuzz.EncoderNgdotCloseArgsH\x00R\x11EncoderNgdotClose\x128\n" +
"\tUnmarshal\x18\n" +
" \x01(\v2\x18.ngolofuzz.UnmarshalArgsH\x00R\tUnmarshal\x12S\n" +
"\x12DecoderNgdotDecode\x18\v \x01(\v2!.ngolofuzz.DecoderNgdotDecodeArgsH\x00R\x12DecoderNgdotDecode\x12h\n" +
"\x19DecoderNgdotDecodeElement\x18\f \x01(\v2(.ngolofuzz.DecoderNgdotDecodeElementArgsH\x00R\x19DecoderNgdotDecodeElement\x12M\n" +
"\x10DecoderNgdotSkip\x18\r \x01(\v2\x1f.ngolofuzz.DecoderNgdotSkipArgsH\x00R\x10DecoderNgdotSkip\x12\\\n" +
"\x15StartElementNgdotCopy\x18\x0e \x01(\v2$.ngolofuzz.StartElementNgdotCopyArgsH\x00R\x15StartElementNgdotCopy\x12Y\n" +
"\x14StartElementNgdotEnd\x18\x0f \x01(\v2#.ngolofuzz.StartElementNgdotEndArgsH\x00R\x14StartElementNgdotEnd\x12P\n" +
"\x11CharDataNgdotCopy\x18\x10 \x01(\v2 .ngolofuzz.CharDataNgdotCopyArgsH\x00R\x11CharDataNgdotCopy\x12M\n" +
"\x10CommentNgdotCopy\x18\x11 \x01(\v2\x1f.ngolofuzz.CommentNgdotCopyArgsH\x00R\x10CommentNgdotCopy\x12P\n" +
"\x11ProcInstNgdotCopy\x18\x12 \x01(\v2 .ngolofuzz.ProcInstNgdotCopyArgsH\x00R\x11ProcInstNgdotCopy\x12S\n" +
"\x12DirectiveNgdotCopy\x18\x13 \x01(\v2!.ngolofuzz.DirectiveNgdotCopyArgsH\x00R\x12DirectiveNgdotCopy\x128\n" +
"\tCopyToken\x18\x14 \x01(\v2\x18.ngolofuzz.CopyTokenArgsH\x00R\tCopyToken\x12;\n" +
"\n" +
"NewDecoder\x18\x15 \x01(\v2\x19.ngolofuzz.NewDecoderArgsH\x00R\n" +
"NewDecoder\x12P\n" +
"\x11DecoderNgdotToken\x18\x16 \x01(\v2 .ngolofuzz.DecoderNgdotTokenArgsH\x00R\x11DecoderNgdotToken\x12Y\n" +
"\x14DecoderNgdotRawToken\x18\x17 \x01(\v2#.ngolofuzz.DecoderNgdotRawTokenArgsH\x00R\x14DecoderNgdotRawToken\x12b\n" +
"\x17DecoderNgdotInputOffset\x18\x18 \x01(\v2&.ngolofuzz.DecoderNgdotInputOffsetArgsH\x00R\x17DecoderNgdotInputOffset\x12Y\n" +
"\x14DecoderNgdotInputPos\x18\x19 \x01(\v2#.ngolofuzz.DecoderNgdotInputPosArgsH\x00R\x14DecoderNgdotInputPos\x12;\n" +
"\n" +
"EscapeText\x18\x1a \x01(\v2\x19.ngolofuzz.EscapeTextArgsH\x00R\n" +
"EscapeText\x12/\n" +
"\x06Escape\x18\x1b \x01(\v2\x15.ngolofuzz.EscapeArgsH\x00R\x06EscapeB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x19Z\x17./;fuzz_ng_encoding_xmlb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 30)
var file_ngolofuzz_proto_goTypes = []any{
(*MarshalArgs)(nil), // 0: ngolofuzz.MarshalArgs
(*MarshalIndentArgs)(nil), // 1: ngolofuzz.MarshalIndentArgs
(*NewEncoderArgs)(nil), // 2: ngolofuzz.NewEncoderArgs
(*EncoderNgdotIndentArgs)(nil), // 3: ngolofuzz.EncoderNgdotIndentArgs
(*EncoderNgdotEncodeArgs)(nil), // 4: ngolofuzz.EncoderNgdotEncodeArgs
(*EncoderNgdotEncodeElementArgs)(nil), // 5: ngolofuzz.EncoderNgdotEncodeElementArgs
(*EncoderNgdotEncodeTokenArgs)(nil), // 6: ngolofuzz.EncoderNgdotEncodeTokenArgs
(*EncoderNgdotFlushArgs)(nil), // 7: ngolofuzz.EncoderNgdotFlushArgs
(*EncoderNgdotCloseArgs)(nil), // 8: ngolofuzz.EncoderNgdotCloseArgs
(*UnmarshalArgs)(nil), // 9: ngolofuzz.UnmarshalArgs
(*DecoderNgdotDecodeArgs)(nil), // 10: ngolofuzz.DecoderNgdotDecodeArgs
(*DecoderNgdotDecodeElementArgs)(nil), // 11: ngolofuzz.DecoderNgdotDecodeElementArgs
(*DecoderNgdotSkipArgs)(nil), // 12: ngolofuzz.DecoderNgdotSkipArgs
(*StartElementNgdotCopyArgs)(nil), // 13: ngolofuzz.StartElementNgdotCopyArgs
(*StartElementNgdotEndArgs)(nil), // 14: ngolofuzz.StartElementNgdotEndArgs
(*CharDataNgdotCopyArgs)(nil), // 15: ngolofuzz.CharDataNgdotCopyArgs
(*CommentNgdotCopyArgs)(nil), // 16: ngolofuzz.CommentNgdotCopyArgs
(*ProcInstNgdotCopyArgs)(nil), // 17: ngolofuzz.ProcInstNgdotCopyArgs
(*DirectiveNgdotCopyArgs)(nil), // 18: ngolofuzz.DirectiveNgdotCopyArgs
(*CopyTokenArgs)(nil), // 19: ngolofuzz.CopyTokenArgs
(*NewDecoderArgs)(nil), // 20: ngolofuzz.NewDecoderArgs
(*DecoderNgdotTokenArgs)(nil), // 21: ngolofuzz.DecoderNgdotTokenArgs
(*DecoderNgdotRawTokenArgs)(nil), // 22: ngolofuzz.DecoderNgdotRawTokenArgs
(*DecoderNgdotInputOffsetArgs)(nil), // 23: ngolofuzz.DecoderNgdotInputOffsetArgs
(*DecoderNgdotInputPosArgs)(nil), // 24: ngolofuzz.DecoderNgdotInputPosArgs
(*EscapeTextArgs)(nil), // 25: ngolofuzz.EscapeTextArgs
(*EscapeArgs)(nil), // 26: ngolofuzz.EscapeArgs
(*NgoloFuzzOne)(nil), // 27: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 28: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 29: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
28, // 0: ngolofuzz.MarshalArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
28, // 1: ngolofuzz.MarshalIndentArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
28, // 2: ngolofuzz.EncoderNgdotEncodeArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
28, // 3: ngolofuzz.EncoderNgdotEncodeElementArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
28, // 4: ngolofuzz.UnmarshalArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
28, // 5: ngolofuzz.DecoderNgdotDecodeArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
28, // 6: ngolofuzz.DecoderNgdotDecodeElementArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
0, // 7: ngolofuzz.NgoloFuzzOne.Marshal:type_name -> ngolofuzz.MarshalArgs
1, // 8: ngolofuzz.NgoloFuzzOne.MarshalIndent:type_name -> ngolofuzz.MarshalIndentArgs
2, // 9: ngolofuzz.NgoloFuzzOne.NewEncoder:type_name -> ngolofuzz.NewEncoderArgs
3, // 10: ngolofuzz.NgoloFuzzOne.EncoderNgdotIndent:type_name -> ngolofuzz.EncoderNgdotIndentArgs
4, // 11: ngolofuzz.NgoloFuzzOne.EncoderNgdotEncode:type_name -> ngolofuzz.EncoderNgdotEncodeArgs
5, // 12: ngolofuzz.NgoloFuzzOne.EncoderNgdotEncodeElement:type_name -> ngolofuzz.EncoderNgdotEncodeElementArgs
6, // 13: ngolofuzz.NgoloFuzzOne.EncoderNgdotEncodeToken:type_name -> ngolofuzz.EncoderNgdotEncodeTokenArgs
7, // 14: ngolofuzz.NgoloFuzzOne.EncoderNgdotFlush:type_name -> ngolofuzz.EncoderNgdotFlushArgs
8, // 15: ngolofuzz.NgoloFuzzOne.EncoderNgdotClose:type_name -> ngolofuzz.EncoderNgdotCloseArgs
9, // 16: ngolofuzz.NgoloFuzzOne.Unmarshal:type_name -> ngolofuzz.UnmarshalArgs
10, // 17: ngolofuzz.NgoloFuzzOne.DecoderNgdotDecode:type_name -> ngolofuzz.DecoderNgdotDecodeArgs
11, // 18: ngolofuzz.NgoloFuzzOne.DecoderNgdotDecodeElement:type_name -> ngolofuzz.DecoderNgdotDecodeElementArgs
12, // 19: ngolofuzz.NgoloFuzzOne.DecoderNgdotSkip:type_name -> ngolofuzz.DecoderNgdotSkipArgs
13, // 20: ngolofuzz.NgoloFuzzOne.StartElementNgdotCopy:type_name -> ngolofuzz.StartElementNgdotCopyArgs
14, // 21: ngolofuzz.NgoloFuzzOne.StartElementNgdotEnd:type_name -> ngolofuzz.StartElementNgdotEndArgs
15, // 22: ngolofuzz.NgoloFuzzOne.CharDataNgdotCopy:type_name -> ngolofuzz.CharDataNgdotCopyArgs
16, // 23: ngolofuzz.NgoloFuzzOne.CommentNgdotCopy:type_name -> ngolofuzz.CommentNgdotCopyArgs
17, // 24: ngolofuzz.NgoloFuzzOne.ProcInstNgdotCopy:type_name -> ngolofuzz.ProcInstNgdotCopyArgs
18, // 25: ngolofuzz.NgoloFuzzOne.DirectiveNgdotCopy:type_name -> ngolofuzz.DirectiveNgdotCopyArgs
19, // 26: ngolofuzz.NgoloFuzzOne.CopyToken:type_name -> ngolofuzz.CopyTokenArgs
20, // 27: ngolofuzz.NgoloFuzzOne.NewDecoder:type_name -> ngolofuzz.NewDecoderArgs
21, // 28: ngolofuzz.NgoloFuzzOne.DecoderNgdotToken:type_name -> ngolofuzz.DecoderNgdotTokenArgs
22, // 29: ngolofuzz.NgoloFuzzOne.DecoderNgdotRawToken:type_name -> ngolofuzz.DecoderNgdotRawTokenArgs
23, // 30: ngolofuzz.NgoloFuzzOne.DecoderNgdotInputOffset:type_name -> ngolofuzz.DecoderNgdotInputOffsetArgs
24, // 31: ngolofuzz.NgoloFuzzOne.DecoderNgdotInputPos:type_name -> ngolofuzz.DecoderNgdotInputPosArgs
25, // 32: ngolofuzz.NgoloFuzzOne.EscapeText:type_name -> ngolofuzz.EscapeTextArgs
26, // 33: ngolofuzz.NgoloFuzzOne.Escape:type_name -> ngolofuzz.EscapeArgs
27, // 34: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
35, // [35:35] is the sub-list for method output_type
35, // [35:35] is the sub-list for method input_type
35, // [35:35] is the sub-list for extension type_name
35, // [35:35] is the sub-list for extension extendee
0, // [0:35] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[27].OneofWrappers = []any{
(*NgoloFuzzOne_Marshal_)(nil),
(*NgoloFuzzOne_MarshalIndent)(nil),
(*NgoloFuzzOne_NewEncoder)(nil),
(*NgoloFuzzOne_EncoderNgdotIndent)(nil),
(*NgoloFuzzOne_EncoderNgdotEncode)(nil),
(*NgoloFuzzOne_EncoderNgdotEncodeElement)(nil),
(*NgoloFuzzOne_EncoderNgdotEncodeToken)(nil),
(*NgoloFuzzOne_EncoderNgdotFlush)(nil),
(*NgoloFuzzOne_EncoderNgdotClose)(nil),
(*NgoloFuzzOne_Unmarshal_)(nil),
(*NgoloFuzzOne_DecoderNgdotDecode)(nil),
(*NgoloFuzzOne_DecoderNgdotDecodeElement)(nil),
(*NgoloFuzzOne_DecoderNgdotSkip)(nil),
(*NgoloFuzzOne_StartElementNgdotCopy)(nil),
(*NgoloFuzzOne_StartElementNgdotEnd)(nil),
(*NgoloFuzzOne_CharDataNgdotCopy)(nil),
(*NgoloFuzzOne_CommentNgdotCopy)(nil),
(*NgoloFuzzOne_ProcInstNgdotCopy)(nil),
(*NgoloFuzzOne_DirectiveNgdotCopy)(nil),
(*NgoloFuzzOne_CopyToken)(nil),
(*NgoloFuzzOne_NewDecoder)(nil),
(*NgoloFuzzOne_DecoderNgdotToken)(nil),
(*NgoloFuzzOne_DecoderNgdotRawToken)(nil),
(*NgoloFuzzOne_DecoderNgdotInputOffset)(nil),
(*NgoloFuzzOne_DecoderNgdotInputPos)(nil),
(*NgoloFuzzOne_EscapeText)(nil),
(*NgoloFuzzOne_Escape)(nil),
}
file_ngolofuzz_proto_msgTypes[28].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 30,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_errors
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"errors"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
r0 := errors.New(a.New.Text)
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("errors.New(%#+v)\n", a.New.Text))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_errors
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewArgs) GetText() string {
if x != nil {
return x.Text
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_New
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,1,opt,name=New,proto3,oneof"`
}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1d\n" +
"\aNewArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\tR\x04text\">\n" +
"\fNgoloFuzzOne\x12&\n" +
"\x03New\x18\x01 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03NewB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x13Z\x11./;fuzz_ng_errorsb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_ngolofuzz_proto_goTypes = []any{
(*NewArgs)(nil), // 0: ngolofuzz.NewArgs
(*NgoloFuzzOne)(nil), // 1: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 2: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 3: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
1, // 1: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[1].OneofWrappers = []any{
(*NgoloFuzzOne_New)(nil),
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_expvar
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"expvar"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var StringResults []*expvar.String
StringResultsIndex := 0
var VarResults []*expvar.Var
VarResultsIndex := 0
var IntResults []*expvar.Int
IntResultsIndex := 0
var FloatResults []*expvar.Float
FloatResultsIndex := 0
var MapResults []*expvar.Map
MapResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_IntNgdotValue:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.Value()
case *NgoloFuzzOne_IntNgdotString:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.String()
case *NgoloFuzzOne_IntNgdotAdd:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.Add(a.IntNgdotAdd.Delta)
case *NgoloFuzzOne_IntNgdotSet:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.Set(a.IntNgdotSet.Value)
case *NgoloFuzzOne_FloatNgdotValue:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg0.Value()
case *NgoloFuzzOne_FloatNgdotString:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg0.String()
case *NgoloFuzzOne_FloatNgdotAdd:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg0.Add(a.FloatNgdotAdd.Delta)
case *NgoloFuzzOne_FloatNgdotSet:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg0.Set(a.FloatNgdotSet.Value)
case *NgoloFuzzOne_MapNgdotString:
if len(MapResults) == 0 {
continue
}
arg0 := MapResults[MapResultsIndex]
MapResultsIndex = (MapResultsIndex + 1) % len(MapResults)
arg0.String()
case *NgoloFuzzOne_MapNgdotInit:
if len(MapResults) == 0 {
continue
}
arg0 := MapResults[MapResultsIndex]
MapResultsIndex = (MapResultsIndex + 1) % len(MapResults)
r0 := arg0.Init()
if r0 != nil{
MapResults = append(MapResults, r0)
}
case *NgoloFuzzOne_MapNgdotGet:
if len(MapResults) == 0 {
continue
}
arg0 := MapResults[MapResultsIndex]
MapResultsIndex = (MapResultsIndex + 1) % len(MapResults)
r0 := arg0.Get(a.MapNgdotGet.Key)
VarResults = append(VarResults, &r0)
case *NgoloFuzzOne_MapNgdotSet:
if len(MapResults) == 0 {
continue
}
arg0 := MapResults[MapResultsIndex]
MapResultsIndex = (MapResultsIndex + 1) % len(MapResults)
if len(VarResults) == 0 {
continue
}
arg2 := *VarResults[VarResultsIndex]
VarResultsIndex = (VarResultsIndex + 1) % len(VarResults)
arg0.Set(a.MapNgdotSet.Key, arg2)
case *NgoloFuzzOne_MapNgdotAdd:
if len(MapResults) == 0 {
continue
}
arg0 := MapResults[MapResultsIndex]
MapResultsIndex = (MapResultsIndex + 1) % len(MapResults)
arg0.Add(a.MapNgdotAdd.Key, a.MapNgdotAdd.Delta)
case *NgoloFuzzOne_MapNgdotAddFloat:
if len(MapResults) == 0 {
continue
}
arg0 := MapResults[MapResultsIndex]
MapResultsIndex = (MapResultsIndex + 1) % len(MapResults)
arg0.AddFloat(a.MapNgdotAddFloat.Key, a.MapNgdotAddFloat.Delta)
case *NgoloFuzzOne_MapNgdotDelete:
if len(MapResults) == 0 {
continue
}
arg0 := MapResults[MapResultsIndex]
MapResultsIndex = (MapResultsIndex + 1) % len(MapResults)
arg0.Delete(a.MapNgdotDelete.Key)
case *NgoloFuzzOne_StringNgdotValue:
if len(StringResults) == 0 {
continue
}
arg0 := StringResults[StringResultsIndex]
StringResultsIndex = (StringResultsIndex + 1) % len(StringResults)
arg0.Value()
case *NgoloFuzzOne_StringNgdotString:
if len(StringResults) == 0 {
continue
}
arg0 := StringResults[StringResultsIndex]
StringResultsIndex = (StringResultsIndex + 1) % len(StringResults)
arg0.String()
case *NgoloFuzzOne_StringNgdotSet:
if len(StringResults) == 0 {
continue
}
arg0 := StringResults[StringResultsIndex]
StringResultsIndex = (StringResultsIndex + 1) % len(StringResults)
arg0.Set(a.StringNgdotSet.Value)
case *NgoloFuzzOne_Publish:
if len(VarResults) == 0 {
continue
}
arg1 := *VarResults[VarResultsIndex]
VarResultsIndex = (VarResultsIndex + 1) % len(VarResults)
expvar.Publish(a.Publish.Name, arg1)
case *NgoloFuzzOne_Get:
r0 := expvar.Get(a.Get.Name)
VarResults = append(VarResults, &r0)
case *NgoloFuzzOne_NewInt:
r0 := expvar.NewInt(a.NewInt.Name)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_NewFloat:
r0 := expvar.NewFloat(a.NewFloat.Name)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_NewMap:
r0 := expvar.NewMap(a.NewMap.Name)
if r0 != nil{
MapResults = append(MapResults, r0)
}
case *NgoloFuzzOne_NewString:
r0 := expvar.NewString(a.NewString.Name)
if r0 != nil{
StringResults = append(StringResults, r0)
}
case *NgoloFuzzOne_Handler:
expvar.Handler()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
StringNb := 0
StringResultsIndex := 0
VarNb := 0
VarResultsIndex := 0
IntNb := 0
IntResultsIndex := 0
FloatNb := 0
FloatResultsIndex := 0
MapNb := 0
MapResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_IntNgdotValue:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.Value()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotString:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.String()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotAdd:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.Add(%#+v)\n", IntResultsIndex, a.IntNgdotAdd.Delta))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotSet:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.Set(%#+v)\n", IntResultsIndex, a.IntNgdotSet.Value))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_FloatNgdotValue:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.Value()\n", FloatResultsIndex))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotString:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.String()\n", FloatResultsIndex))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotAdd:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.Add(%#+v)\n", FloatResultsIndex, a.FloatNgdotAdd.Delta))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotSet:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.Set(%#+v)\n", FloatResultsIndex, a.FloatNgdotSet.Value))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_MapNgdotString:
if MapNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Map%d.String()\n", MapResultsIndex))
MapResultsIndex = (MapResultsIndex + 1) % MapNb
case *NgoloFuzzOne_MapNgdotInit:
if MapNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Map%d := Map%d.Init()\n", MapNb, MapResultsIndex))
MapNb = MapNb + 1
MapResultsIndex = (MapResultsIndex + 1) % MapNb
case *NgoloFuzzOne_MapNgdotGet:
if MapNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Var%d := Map%d.Get(%#+v)\n", VarNb, MapResultsIndex, a.MapNgdotGet.Key))
VarNb = VarNb + 1
MapResultsIndex = (MapResultsIndex + 1) % MapNb
case *NgoloFuzzOne_MapNgdotSet:
if MapNb == 0 {
continue
}
if VarNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Map%d.Set(%#+v, Var%d)\n", MapResultsIndex, a.MapNgdotSet.Key, (VarResultsIndex + 0) % VarNb))
MapResultsIndex = (MapResultsIndex + 1) % MapNb
VarResultsIndex = (VarResultsIndex + 1) % VarNb
case *NgoloFuzzOne_MapNgdotAdd:
if MapNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Map%d.Add(%#+v, %#+v)\n", MapResultsIndex, a.MapNgdotAdd.Key, a.MapNgdotAdd.Delta))
MapResultsIndex = (MapResultsIndex + 1) % MapNb
case *NgoloFuzzOne_MapNgdotAddFloat:
if MapNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Map%d.AddFloat(%#+v, %#+v)\n", MapResultsIndex, a.MapNgdotAddFloat.Key, a.MapNgdotAddFloat.Delta))
MapResultsIndex = (MapResultsIndex + 1) % MapNb
case *NgoloFuzzOne_MapNgdotDelete:
if MapNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Map%d.Delete(%#+v)\n", MapResultsIndex, a.MapNgdotDelete.Key))
MapResultsIndex = (MapResultsIndex + 1) % MapNb
case *NgoloFuzzOne_StringNgdotValue:
if StringNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("String%d.Value()\n", StringResultsIndex))
StringResultsIndex = (StringResultsIndex + 1) % StringNb
case *NgoloFuzzOne_StringNgdotString:
if StringNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("String%d.String()\n", StringResultsIndex))
StringResultsIndex = (StringResultsIndex + 1) % StringNb
case *NgoloFuzzOne_StringNgdotSet:
if StringNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("String%d.Set(%#+v)\n", StringResultsIndex, a.StringNgdotSet.Value))
StringResultsIndex = (StringResultsIndex + 1) % StringNb
case *NgoloFuzzOne_Publish:
if VarNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("expvar.Publish(%#+v, Var%d)\n", a.Publish.Name, (VarResultsIndex + 0) % VarNb))
VarResultsIndex = (VarResultsIndex + 1) % VarNb
case *NgoloFuzzOne_Get:
w.WriteString(fmt.Sprintf("Var%d := expvar.Get(%#+v)\n", VarNb, a.Get.Name))
VarNb = VarNb + 1
case *NgoloFuzzOne_NewInt:
w.WriteString(fmt.Sprintf("Int%d := expvar.NewInt(%#+v)\n", IntNb, a.NewInt.Name))
IntNb = IntNb + 1
case *NgoloFuzzOne_NewFloat:
w.WriteString(fmt.Sprintf("Float%d := expvar.NewFloat(%#+v)\n", FloatNb, a.NewFloat.Name))
FloatNb = FloatNb + 1
case *NgoloFuzzOne_NewMap:
w.WriteString(fmt.Sprintf("Map%d := expvar.NewMap(%#+v)\n", MapNb, a.NewMap.Name))
MapNb = MapNb + 1
case *NgoloFuzzOne_NewString:
w.WriteString(fmt.Sprintf("String%d := expvar.NewString(%#+v)\n", StringNb, a.NewString.Name))
StringNb = StringNb + 1
case *NgoloFuzzOne_Handler:
w.WriteString(fmt.Sprintf("expvar.Handler()\n"))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_expvar
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type IntNgdotValueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotValueArgs) Reset() {
*x = IntNgdotValueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotValueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotValueArgs) ProtoMessage() {}
func (x *IntNgdotValueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotValueArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotValueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type IntNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotStringArgs) Reset() {
*x = IntNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotStringArgs) ProtoMessage() {}
func (x *IntNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type IntNgdotAddArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Delta int64 `protobuf:"varint,1,opt,name=delta,proto3" json:"delta,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotAddArgs) Reset() {
*x = IntNgdotAddArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotAddArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotAddArgs) ProtoMessage() {}
func (x *IntNgdotAddArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotAddArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotAddArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *IntNgdotAddArgs) GetDelta() int64 {
if x != nil {
return x.Delta
}
return 0
}
type IntNgdotSetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotSetArgs) Reset() {
*x = IntNgdotSetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotSetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotSetArgs) ProtoMessage() {}
func (x *IntNgdotSetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotSetArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotSetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *IntNgdotSetArgs) GetValue() int64 {
if x != nil {
return x.Value
}
return 0
}
type FloatNgdotValueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotValueArgs) Reset() {
*x = FloatNgdotValueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotValueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotValueArgs) ProtoMessage() {}
func (x *FloatNgdotValueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotValueArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotValueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type FloatNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotStringArgs) Reset() {
*x = FloatNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotStringArgs) ProtoMessage() {}
func (x *FloatNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type FloatNgdotAddArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Delta float64 `protobuf:"fixed64,1,opt,name=delta,proto3" json:"delta,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotAddArgs) Reset() {
*x = FloatNgdotAddArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotAddArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotAddArgs) ProtoMessage() {}
func (x *FloatNgdotAddArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotAddArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotAddArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *FloatNgdotAddArgs) GetDelta() float64 {
if x != nil {
return x.Delta
}
return 0
}
type FloatNgdotSetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSetArgs) Reset() {
*x = FloatNgdotSetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSetArgs) ProtoMessage() {}
func (x *FloatNgdotSetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSetArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotSetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *FloatNgdotSetArgs) GetValue() float64 {
if x != nil {
return x.Value
}
return 0
}
type MapNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MapNgdotStringArgs) Reset() {
*x = MapNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MapNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MapNgdotStringArgs) ProtoMessage() {}
func (x *MapNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MapNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*MapNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type MapNgdotInitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MapNgdotInitArgs) Reset() {
*x = MapNgdotInitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MapNgdotInitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MapNgdotInitArgs) ProtoMessage() {}
func (x *MapNgdotInitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MapNgdotInitArgs.ProtoReflect.Descriptor instead.
func (*MapNgdotInitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type MapNgdotGetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MapNgdotGetArgs) Reset() {
*x = MapNgdotGetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MapNgdotGetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MapNgdotGetArgs) ProtoMessage() {}
func (x *MapNgdotGetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MapNgdotGetArgs.ProtoReflect.Descriptor instead.
func (*MapNgdotGetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *MapNgdotGetArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
type MapNgdotSetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MapNgdotSetArgs) Reset() {
*x = MapNgdotSetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MapNgdotSetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MapNgdotSetArgs) ProtoMessage() {}
func (x *MapNgdotSetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MapNgdotSetArgs.ProtoReflect.Descriptor instead.
func (*MapNgdotSetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *MapNgdotSetArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
type MapNgdotAddArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Delta int64 `protobuf:"varint,2,opt,name=delta,proto3" json:"delta,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MapNgdotAddArgs) Reset() {
*x = MapNgdotAddArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MapNgdotAddArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MapNgdotAddArgs) ProtoMessage() {}
func (x *MapNgdotAddArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MapNgdotAddArgs.ProtoReflect.Descriptor instead.
func (*MapNgdotAddArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *MapNgdotAddArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *MapNgdotAddArgs) GetDelta() int64 {
if x != nil {
return x.Delta
}
return 0
}
type MapNgdotAddFloatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Delta float64 `protobuf:"fixed64,2,opt,name=delta,proto3" json:"delta,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MapNgdotAddFloatArgs) Reset() {
*x = MapNgdotAddFloatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MapNgdotAddFloatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MapNgdotAddFloatArgs) ProtoMessage() {}
func (x *MapNgdotAddFloatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MapNgdotAddFloatArgs.ProtoReflect.Descriptor instead.
func (*MapNgdotAddFloatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *MapNgdotAddFloatArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *MapNgdotAddFloatArgs) GetDelta() float64 {
if x != nil {
return x.Delta
}
return 0
}
type MapNgdotDeleteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MapNgdotDeleteArgs) Reset() {
*x = MapNgdotDeleteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MapNgdotDeleteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MapNgdotDeleteArgs) ProtoMessage() {}
func (x *MapNgdotDeleteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MapNgdotDeleteArgs.ProtoReflect.Descriptor instead.
func (*MapNgdotDeleteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *MapNgdotDeleteArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
type StringNgdotValueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StringNgdotValueArgs) Reset() {
*x = StringNgdotValueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StringNgdotValueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StringNgdotValueArgs) ProtoMessage() {}
func (x *StringNgdotValueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StringNgdotValueArgs.ProtoReflect.Descriptor instead.
func (*StringNgdotValueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
type StringNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StringNgdotStringArgs) Reset() {
*x = StringNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StringNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StringNgdotStringArgs) ProtoMessage() {}
func (x *StringNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StringNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*StringNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
type StringNgdotSetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StringNgdotSetArgs) Reset() {
*x = StringNgdotSetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StringNgdotSetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StringNgdotSetArgs) ProtoMessage() {}
func (x *StringNgdotSetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StringNgdotSetArgs.ProtoReflect.Descriptor instead.
func (*StringNgdotSetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *StringNgdotSetArgs) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
type PublishArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PublishArgs) Reset() {
*x = PublishArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PublishArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PublishArgs) ProtoMessage() {}
func (x *PublishArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PublishArgs.ProtoReflect.Descriptor instead.
func (*PublishArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *PublishArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type GetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetArgs) Reset() {
*x = GetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetArgs) ProtoMessage() {}
func (x *GetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetArgs.ProtoReflect.Descriptor instead.
func (*GetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *GetArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type NewIntArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewIntArgs) Reset() {
*x = NewIntArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewIntArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewIntArgs) ProtoMessage() {}
func (x *NewIntArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewIntArgs.ProtoReflect.Descriptor instead.
func (*NewIntArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *NewIntArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type NewFloatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewFloatArgs) Reset() {
*x = NewFloatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewFloatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewFloatArgs) ProtoMessage() {}
func (x *NewFloatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewFloatArgs.ProtoReflect.Descriptor instead.
func (*NewFloatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
func (x *NewFloatArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type NewMapArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewMapArgs) Reset() {
*x = NewMapArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewMapArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewMapArgs) ProtoMessage() {}
func (x *NewMapArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewMapArgs.ProtoReflect.Descriptor instead.
func (*NewMapArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
func (x *NewMapArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type NewStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewStringArgs) Reset() {
*x = NewStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewStringArgs) ProtoMessage() {}
func (x *NewStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewStringArgs.ProtoReflect.Descriptor instead.
func (*NewStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
func (x *NewStringArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type HandlerArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HandlerArgs) Reset() {
*x = HandlerArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HandlerArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HandlerArgs) ProtoMessage() {}
func (x *HandlerArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HandlerArgs.ProtoReflect.Descriptor instead.
func (*HandlerArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_IntNgdotValue
// *NgoloFuzzOne_IntNgdotString
// *NgoloFuzzOne_IntNgdotAdd
// *NgoloFuzzOne_IntNgdotSet
// *NgoloFuzzOne_FloatNgdotValue
// *NgoloFuzzOne_FloatNgdotString
// *NgoloFuzzOne_FloatNgdotAdd
// *NgoloFuzzOne_FloatNgdotSet
// *NgoloFuzzOne_MapNgdotString
// *NgoloFuzzOne_MapNgdotInit
// *NgoloFuzzOne_MapNgdotGet
// *NgoloFuzzOne_MapNgdotSet
// *NgoloFuzzOne_MapNgdotAdd
// *NgoloFuzzOne_MapNgdotAddFloat
// *NgoloFuzzOne_MapNgdotDelete
// *NgoloFuzzOne_StringNgdotValue
// *NgoloFuzzOne_StringNgdotString
// *NgoloFuzzOne_StringNgdotSet
// *NgoloFuzzOne_Publish
// *NgoloFuzzOne_Get
// *NgoloFuzzOne_NewInt
// *NgoloFuzzOne_NewFloat
// *NgoloFuzzOne_NewMap
// *NgoloFuzzOne_NewString
// *NgoloFuzzOne_Handler
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotValue() *IntNgdotValueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotValue); ok {
return x.IntNgdotValue
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotString() *IntNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotString); ok {
return x.IntNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotAdd() *IntNgdotAddArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotAdd); ok {
return x.IntNgdotAdd
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotSet() *IntNgdotSetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotSet); ok {
return x.IntNgdotSet
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotValue() *FloatNgdotValueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotValue); ok {
return x.FloatNgdotValue
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotString() *FloatNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotString); ok {
return x.FloatNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotAdd() *FloatNgdotAddArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotAdd); ok {
return x.FloatNgdotAdd
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSet() *FloatNgdotSetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSet); ok {
return x.FloatNgdotSet
}
}
return nil
}
func (x *NgoloFuzzOne) GetMapNgdotString() *MapNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MapNgdotString); ok {
return x.MapNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetMapNgdotInit() *MapNgdotInitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MapNgdotInit); ok {
return x.MapNgdotInit
}
}
return nil
}
func (x *NgoloFuzzOne) GetMapNgdotGet() *MapNgdotGetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MapNgdotGet); ok {
return x.MapNgdotGet
}
}
return nil
}
func (x *NgoloFuzzOne) GetMapNgdotSet() *MapNgdotSetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MapNgdotSet); ok {
return x.MapNgdotSet
}
}
return nil
}
func (x *NgoloFuzzOne) GetMapNgdotAdd() *MapNgdotAddArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MapNgdotAdd); ok {
return x.MapNgdotAdd
}
}
return nil
}
func (x *NgoloFuzzOne) GetMapNgdotAddFloat() *MapNgdotAddFloatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MapNgdotAddFloat); ok {
return x.MapNgdotAddFloat
}
}
return nil
}
func (x *NgoloFuzzOne) GetMapNgdotDelete() *MapNgdotDeleteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MapNgdotDelete); ok {
return x.MapNgdotDelete
}
}
return nil
}
func (x *NgoloFuzzOne) GetStringNgdotValue() *StringNgdotValueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_StringNgdotValue); ok {
return x.StringNgdotValue
}
}
return nil
}
func (x *NgoloFuzzOne) GetStringNgdotString() *StringNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_StringNgdotString); ok {
return x.StringNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetStringNgdotSet() *StringNgdotSetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_StringNgdotSet); ok {
return x.StringNgdotSet
}
}
return nil
}
func (x *NgoloFuzzOne) GetPublish() *PublishArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Publish); ok {
return x.Publish
}
}
return nil
}
func (x *NgoloFuzzOne) GetGet() *GetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Get); ok {
return x.Get
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewInt() *NewIntArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewInt); ok {
return x.NewInt
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewFloat() *NewFloatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewFloat); ok {
return x.NewFloat
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewMap() *NewMapArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewMap); ok {
return x.NewMap
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewString() *NewStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewString); ok {
return x.NewString
}
}
return nil
}
func (x *NgoloFuzzOne) GetHandler() *HandlerArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Handler); ok {
return x.Handler
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_IntNgdotValue struct {
IntNgdotValue *IntNgdotValueArgs `protobuf:"bytes,1,opt,name=IntNgdotValue,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotString struct {
IntNgdotString *IntNgdotStringArgs `protobuf:"bytes,2,opt,name=IntNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotAdd struct {
IntNgdotAdd *IntNgdotAddArgs `protobuf:"bytes,3,opt,name=IntNgdotAdd,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotSet struct {
IntNgdotSet *IntNgdotSetArgs `protobuf:"bytes,4,opt,name=IntNgdotSet,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotValue struct {
FloatNgdotValue *FloatNgdotValueArgs `protobuf:"bytes,5,opt,name=FloatNgdotValue,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotString struct {
FloatNgdotString *FloatNgdotStringArgs `protobuf:"bytes,6,opt,name=FloatNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotAdd struct {
FloatNgdotAdd *FloatNgdotAddArgs `protobuf:"bytes,7,opt,name=FloatNgdotAdd,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSet struct {
FloatNgdotSet *FloatNgdotSetArgs `protobuf:"bytes,8,opt,name=FloatNgdotSet,proto3,oneof"`
}
type NgoloFuzzOne_MapNgdotString struct {
MapNgdotString *MapNgdotStringArgs `protobuf:"bytes,9,opt,name=MapNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_MapNgdotInit struct {
MapNgdotInit *MapNgdotInitArgs `protobuf:"bytes,10,opt,name=MapNgdotInit,proto3,oneof"`
}
type NgoloFuzzOne_MapNgdotGet struct {
MapNgdotGet *MapNgdotGetArgs `protobuf:"bytes,11,opt,name=MapNgdotGet,proto3,oneof"`
}
type NgoloFuzzOne_MapNgdotSet struct {
MapNgdotSet *MapNgdotSetArgs `protobuf:"bytes,12,opt,name=MapNgdotSet,proto3,oneof"`
}
type NgoloFuzzOne_MapNgdotAdd struct {
MapNgdotAdd *MapNgdotAddArgs `protobuf:"bytes,13,opt,name=MapNgdotAdd,proto3,oneof"`
}
type NgoloFuzzOne_MapNgdotAddFloat struct {
MapNgdotAddFloat *MapNgdotAddFloatArgs `protobuf:"bytes,14,opt,name=MapNgdotAddFloat,proto3,oneof"`
}
type NgoloFuzzOne_MapNgdotDelete struct {
MapNgdotDelete *MapNgdotDeleteArgs `protobuf:"bytes,15,opt,name=MapNgdotDelete,proto3,oneof"`
}
type NgoloFuzzOne_StringNgdotValue struct {
StringNgdotValue *StringNgdotValueArgs `protobuf:"bytes,16,opt,name=StringNgdotValue,proto3,oneof"`
}
type NgoloFuzzOne_StringNgdotString struct {
StringNgdotString *StringNgdotStringArgs `protobuf:"bytes,17,opt,name=StringNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_StringNgdotSet struct {
StringNgdotSet *StringNgdotSetArgs `protobuf:"bytes,18,opt,name=StringNgdotSet,proto3,oneof"`
}
type NgoloFuzzOne_Publish struct {
Publish *PublishArgs `protobuf:"bytes,19,opt,name=Publish,proto3,oneof"`
}
type NgoloFuzzOne_Get struct {
Get *GetArgs `protobuf:"bytes,20,opt,name=Get,proto3,oneof"`
}
type NgoloFuzzOne_NewInt struct {
NewInt *NewIntArgs `protobuf:"bytes,21,opt,name=NewInt,proto3,oneof"`
}
type NgoloFuzzOne_NewFloat struct {
NewFloat *NewFloatArgs `protobuf:"bytes,22,opt,name=NewFloat,proto3,oneof"`
}
type NgoloFuzzOne_NewMap struct {
NewMap *NewMapArgs `protobuf:"bytes,23,opt,name=NewMap,proto3,oneof"`
}
type NgoloFuzzOne_NewString struct {
NewString *NewStringArgs `protobuf:"bytes,24,opt,name=NewString,proto3,oneof"`
}
type NgoloFuzzOne_Handler struct {
Handler *HandlerArgs `protobuf:"bytes,25,opt,name=Handler,proto3,oneof"`
}
func (*NgoloFuzzOne_IntNgdotValue) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotAdd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotSet) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotValue) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotAdd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSet) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MapNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MapNgdotInit) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MapNgdotGet) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MapNgdotSet) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MapNgdotAdd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MapNgdotAddFloat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MapNgdotDelete) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_StringNgdotValue) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_StringNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_StringNgdotSet) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Publish) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Get) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewInt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewFloat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewMap) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Handler) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x13\n" +
"\x11IntNgdotValueArgs\"\x14\n" +
"\x12IntNgdotStringArgs\"'\n" +
"\x0fIntNgdotAddArgs\x12\x14\n" +
"\x05delta\x18\x01 \x01(\x03R\x05delta\"'\n" +
"\x0fIntNgdotSetArgs\x12\x14\n" +
"\x05value\x18\x01 \x01(\x03R\x05value\"\x15\n" +
"\x13FloatNgdotValueArgs\"\x16\n" +
"\x14FloatNgdotStringArgs\")\n" +
"\x11FloatNgdotAddArgs\x12\x14\n" +
"\x05delta\x18\x01 \x01(\x01R\x05delta\")\n" +
"\x11FloatNgdotSetArgs\x12\x14\n" +
"\x05value\x18\x01 \x01(\x01R\x05value\"\x14\n" +
"\x12MapNgdotStringArgs\"\x12\n" +
"\x10MapNgdotInitArgs\"#\n" +
"\x0fMapNgdotGetArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\"#\n" +
"\x0fMapNgdotSetArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\"9\n" +
"\x0fMapNgdotAddArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05delta\x18\x02 \x01(\x03R\x05delta\">\n" +
"\x14MapNgdotAddFloatArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05delta\x18\x02 \x01(\x01R\x05delta\"&\n" +
"\x12MapNgdotDeleteArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\"\x16\n" +
"\x14StringNgdotValueArgs\"\x17\n" +
"\x15StringNgdotStringArgs\"*\n" +
"\x12StringNgdotSetArgs\x12\x14\n" +
"\x05value\x18\x01 \x01(\tR\x05value\"!\n" +
"\vPublishArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x1d\n" +
"\aGetArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\" \n" +
"\n" +
"NewIntArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\"\n" +
"\fNewFloatArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\" \n" +
"\n" +
"NewMapArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"#\n" +
"\rNewStringArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\r\n" +
"\vHandlerArgs\"\xfd\f\n" +
"\fNgoloFuzzOne\x12D\n" +
"\rIntNgdotValue\x18\x01 \x01(\v2\x1c.ngolofuzz.IntNgdotValueArgsH\x00R\rIntNgdotValue\x12G\n" +
"\x0eIntNgdotString\x18\x02 \x01(\v2\x1d.ngolofuzz.IntNgdotStringArgsH\x00R\x0eIntNgdotString\x12>\n" +
"\vIntNgdotAdd\x18\x03 \x01(\v2\x1a.ngolofuzz.IntNgdotAddArgsH\x00R\vIntNgdotAdd\x12>\n" +
"\vIntNgdotSet\x18\x04 \x01(\v2\x1a.ngolofuzz.IntNgdotSetArgsH\x00R\vIntNgdotSet\x12J\n" +
"\x0fFloatNgdotValue\x18\x05 \x01(\v2\x1e.ngolofuzz.FloatNgdotValueArgsH\x00R\x0fFloatNgdotValue\x12M\n" +
"\x10FloatNgdotString\x18\x06 \x01(\v2\x1f.ngolofuzz.FloatNgdotStringArgsH\x00R\x10FloatNgdotString\x12D\n" +
"\rFloatNgdotAdd\x18\a \x01(\v2\x1c.ngolofuzz.FloatNgdotAddArgsH\x00R\rFloatNgdotAdd\x12D\n" +
"\rFloatNgdotSet\x18\b \x01(\v2\x1c.ngolofuzz.FloatNgdotSetArgsH\x00R\rFloatNgdotSet\x12G\n" +
"\x0eMapNgdotString\x18\t \x01(\v2\x1d.ngolofuzz.MapNgdotStringArgsH\x00R\x0eMapNgdotString\x12A\n" +
"\fMapNgdotInit\x18\n" +
" \x01(\v2\x1b.ngolofuzz.MapNgdotInitArgsH\x00R\fMapNgdotInit\x12>\n" +
"\vMapNgdotGet\x18\v \x01(\v2\x1a.ngolofuzz.MapNgdotGetArgsH\x00R\vMapNgdotGet\x12>\n" +
"\vMapNgdotSet\x18\f \x01(\v2\x1a.ngolofuzz.MapNgdotSetArgsH\x00R\vMapNgdotSet\x12>\n" +
"\vMapNgdotAdd\x18\r \x01(\v2\x1a.ngolofuzz.MapNgdotAddArgsH\x00R\vMapNgdotAdd\x12M\n" +
"\x10MapNgdotAddFloat\x18\x0e \x01(\v2\x1f.ngolofuzz.MapNgdotAddFloatArgsH\x00R\x10MapNgdotAddFloat\x12G\n" +
"\x0eMapNgdotDelete\x18\x0f \x01(\v2\x1d.ngolofuzz.MapNgdotDeleteArgsH\x00R\x0eMapNgdotDelete\x12M\n" +
"\x10StringNgdotValue\x18\x10 \x01(\v2\x1f.ngolofuzz.StringNgdotValueArgsH\x00R\x10StringNgdotValue\x12P\n" +
"\x11StringNgdotString\x18\x11 \x01(\v2 .ngolofuzz.StringNgdotStringArgsH\x00R\x11StringNgdotString\x12G\n" +
"\x0eStringNgdotSet\x18\x12 \x01(\v2\x1d.ngolofuzz.StringNgdotSetArgsH\x00R\x0eStringNgdotSet\x122\n" +
"\aPublish\x18\x13 \x01(\v2\x16.ngolofuzz.PublishArgsH\x00R\aPublish\x12&\n" +
"\x03Get\x18\x14 \x01(\v2\x12.ngolofuzz.GetArgsH\x00R\x03Get\x12/\n" +
"\x06NewInt\x18\x15 \x01(\v2\x15.ngolofuzz.NewIntArgsH\x00R\x06NewInt\x125\n" +
"\bNewFloat\x18\x16 \x01(\v2\x17.ngolofuzz.NewFloatArgsH\x00R\bNewFloat\x12/\n" +
"\x06NewMap\x18\x17 \x01(\v2\x15.ngolofuzz.NewMapArgsH\x00R\x06NewMap\x128\n" +
"\tNewString\x18\x18 \x01(\v2\x18.ngolofuzz.NewStringArgsH\x00R\tNewString\x122\n" +
"\aHandler\x18\x19 \x01(\v2\x16.ngolofuzz.HandlerArgsH\x00R\aHandlerB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x13Z\x11./;fuzz_ng_expvarb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 28)
var file_ngolofuzz_proto_goTypes = []any{
(*IntNgdotValueArgs)(nil), // 0: ngolofuzz.IntNgdotValueArgs
(*IntNgdotStringArgs)(nil), // 1: ngolofuzz.IntNgdotStringArgs
(*IntNgdotAddArgs)(nil), // 2: ngolofuzz.IntNgdotAddArgs
(*IntNgdotSetArgs)(nil), // 3: ngolofuzz.IntNgdotSetArgs
(*FloatNgdotValueArgs)(nil), // 4: ngolofuzz.FloatNgdotValueArgs
(*FloatNgdotStringArgs)(nil), // 5: ngolofuzz.FloatNgdotStringArgs
(*FloatNgdotAddArgs)(nil), // 6: ngolofuzz.FloatNgdotAddArgs
(*FloatNgdotSetArgs)(nil), // 7: ngolofuzz.FloatNgdotSetArgs
(*MapNgdotStringArgs)(nil), // 8: ngolofuzz.MapNgdotStringArgs
(*MapNgdotInitArgs)(nil), // 9: ngolofuzz.MapNgdotInitArgs
(*MapNgdotGetArgs)(nil), // 10: ngolofuzz.MapNgdotGetArgs
(*MapNgdotSetArgs)(nil), // 11: ngolofuzz.MapNgdotSetArgs
(*MapNgdotAddArgs)(nil), // 12: ngolofuzz.MapNgdotAddArgs
(*MapNgdotAddFloatArgs)(nil), // 13: ngolofuzz.MapNgdotAddFloatArgs
(*MapNgdotDeleteArgs)(nil), // 14: ngolofuzz.MapNgdotDeleteArgs
(*StringNgdotValueArgs)(nil), // 15: ngolofuzz.StringNgdotValueArgs
(*StringNgdotStringArgs)(nil), // 16: ngolofuzz.StringNgdotStringArgs
(*StringNgdotSetArgs)(nil), // 17: ngolofuzz.StringNgdotSetArgs
(*PublishArgs)(nil), // 18: ngolofuzz.PublishArgs
(*GetArgs)(nil), // 19: ngolofuzz.GetArgs
(*NewIntArgs)(nil), // 20: ngolofuzz.NewIntArgs
(*NewFloatArgs)(nil), // 21: ngolofuzz.NewFloatArgs
(*NewMapArgs)(nil), // 22: ngolofuzz.NewMapArgs
(*NewStringArgs)(nil), // 23: ngolofuzz.NewStringArgs
(*HandlerArgs)(nil), // 24: ngolofuzz.HandlerArgs
(*NgoloFuzzOne)(nil), // 25: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 26: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 27: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.IntNgdotValue:type_name -> ngolofuzz.IntNgdotValueArgs
1, // 1: ngolofuzz.NgoloFuzzOne.IntNgdotString:type_name -> ngolofuzz.IntNgdotStringArgs
2, // 2: ngolofuzz.NgoloFuzzOne.IntNgdotAdd:type_name -> ngolofuzz.IntNgdotAddArgs
3, // 3: ngolofuzz.NgoloFuzzOne.IntNgdotSet:type_name -> ngolofuzz.IntNgdotSetArgs
4, // 4: ngolofuzz.NgoloFuzzOne.FloatNgdotValue:type_name -> ngolofuzz.FloatNgdotValueArgs
5, // 5: ngolofuzz.NgoloFuzzOne.FloatNgdotString:type_name -> ngolofuzz.FloatNgdotStringArgs
6, // 6: ngolofuzz.NgoloFuzzOne.FloatNgdotAdd:type_name -> ngolofuzz.FloatNgdotAddArgs
7, // 7: ngolofuzz.NgoloFuzzOne.FloatNgdotSet:type_name -> ngolofuzz.FloatNgdotSetArgs
8, // 8: ngolofuzz.NgoloFuzzOne.MapNgdotString:type_name -> ngolofuzz.MapNgdotStringArgs
9, // 9: ngolofuzz.NgoloFuzzOne.MapNgdotInit:type_name -> ngolofuzz.MapNgdotInitArgs
10, // 10: ngolofuzz.NgoloFuzzOne.MapNgdotGet:type_name -> ngolofuzz.MapNgdotGetArgs
11, // 11: ngolofuzz.NgoloFuzzOne.MapNgdotSet:type_name -> ngolofuzz.MapNgdotSetArgs
12, // 12: ngolofuzz.NgoloFuzzOne.MapNgdotAdd:type_name -> ngolofuzz.MapNgdotAddArgs
13, // 13: ngolofuzz.NgoloFuzzOne.MapNgdotAddFloat:type_name -> ngolofuzz.MapNgdotAddFloatArgs
14, // 14: ngolofuzz.NgoloFuzzOne.MapNgdotDelete:type_name -> ngolofuzz.MapNgdotDeleteArgs
15, // 15: ngolofuzz.NgoloFuzzOne.StringNgdotValue:type_name -> ngolofuzz.StringNgdotValueArgs
16, // 16: ngolofuzz.NgoloFuzzOne.StringNgdotString:type_name -> ngolofuzz.StringNgdotStringArgs
17, // 17: ngolofuzz.NgoloFuzzOne.StringNgdotSet:type_name -> ngolofuzz.StringNgdotSetArgs
18, // 18: ngolofuzz.NgoloFuzzOne.Publish:type_name -> ngolofuzz.PublishArgs
19, // 19: ngolofuzz.NgoloFuzzOne.Get:type_name -> ngolofuzz.GetArgs
20, // 20: ngolofuzz.NgoloFuzzOne.NewInt:type_name -> ngolofuzz.NewIntArgs
21, // 21: ngolofuzz.NgoloFuzzOne.NewFloat:type_name -> ngolofuzz.NewFloatArgs
22, // 22: ngolofuzz.NgoloFuzzOne.NewMap:type_name -> ngolofuzz.NewMapArgs
23, // 23: ngolofuzz.NgoloFuzzOne.NewString:type_name -> ngolofuzz.NewStringArgs
24, // 24: ngolofuzz.NgoloFuzzOne.Handler:type_name -> ngolofuzz.HandlerArgs
25, // 25: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
26, // [26:26] is the sub-list for method output_type
26, // [26:26] is the sub-list for method input_type
26, // [26:26] is the sub-list for extension type_name
26, // [26:26] is the sub-list for extension extendee
0, // [0:26] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[25].OneofWrappers = []any{
(*NgoloFuzzOne_IntNgdotValue)(nil),
(*NgoloFuzzOne_IntNgdotString)(nil),
(*NgoloFuzzOne_IntNgdotAdd)(nil),
(*NgoloFuzzOne_IntNgdotSet)(nil),
(*NgoloFuzzOne_FloatNgdotValue)(nil),
(*NgoloFuzzOne_FloatNgdotString)(nil),
(*NgoloFuzzOne_FloatNgdotAdd)(nil),
(*NgoloFuzzOne_FloatNgdotSet)(nil),
(*NgoloFuzzOne_MapNgdotString)(nil),
(*NgoloFuzzOne_MapNgdotInit)(nil),
(*NgoloFuzzOne_MapNgdotGet)(nil),
(*NgoloFuzzOne_MapNgdotSet)(nil),
(*NgoloFuzzOne_MapNgdotAdd)(nil),
(*NgoloFuzzOne_MapNgdotAddFloat)(nil),
(*NgoloFuzzOne_MapNgdotDelete)(nil),
(*NgoloFuzzOne_StringNgdotValue)(nil),
(*NgoloFuzzOne_StringNgdotString)(nil),
(*NgoloFuzzOne_StringNgdotSet)(nil),
(*NgoloFuzzOne_Publish)(nil),
(*NgoloFuzzOne_Get)(nil),
(*NgoloFuzzOne_NewInt)(nil),
(*NgoloFuzzOne_NewFloat)(nil),
(*NgoloFuzzOne_NewMap)(nil),
(*NgoloFuzzOne_NewString)(nil),
(*NgoloFuzzOne_Handler)(nil),
}
file_ngolofuzz_proto_msgTypes[26].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 28,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_go_build
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"go/build"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func ImportModeNewFromFuzz(p ImportModeEnum) build.ImportMode{
switch p {
case 1:
return build.AllowBinary
case 2:
return build.ImportComment
case 3:
return build.IgnoreVendor
}
return build.FindOnly
}
func ConvertImportModeNewFromFuzz(a []ImportModeEnum) []build.ImportMode{
r := make([]build.ImportMode, len(a))
for i := range a {
r[i] = ImportModeNewFromFuzz(a[i])
}
return r
}
func ContextNewFromFuzz(p *ContextStruct) *build.Context{
if p == nil {
return nil
}
return &build.Context{
GOARCH: p.GOARCH,
GOOS: p.GOOS,
GOROOT: p.GOROOT,
GOPATH: p.GOPATH,
Dir: p.Dir,
CgoEnabled: p.CgoEnabled,
UseAllFiles: p.UseAllFiles,
Compiler: p.Compiler,
BuildTags: p.BuildTags,
ToolTags: p.ToolTags,
ReleaseTags: p.ReleaseTags,
InstallSuffix: p.InstallSuffix,
}
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var PackageResults []*build.Package
PackageResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ContextNgdotSrcDirs:
arg0 := ContextNewFromFuzz(a.ContextNgdotSrcDirs.Ctxt)
if arg0 == nil {
continue
}
arg0.SrcDirs()
case *NgoloFuzzOne_PackageNgdotIsCommand:
if len(PackageResults) == 0 {
continue
}
arg0 := PackageResults[PackageResultsIndex]
PackageResultsIndex = (PackageResultsIndex + 1) % len(PackageResults)
arg0.IsCommand()
case *NgoloFuzzOne_ContextNgdotImportDir:
arg0 := ContextNewFromFuzz(a.ContextNgdotImportDir.Ctxt)
if arg0 == nil {
continue
}
arg2 := ImportModeNewFromFuzz(a.ContextNgdotImportDir.Mode)
_, r1 := arg0.ImportDir(a.ContextNgdotImportDir.Dir, arg2)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ContextNgdotImport:
arg0 := ContextNewFromFuzz(a.ContextNgdotImport.Ctxt)
if arg0 == nil {
continue
}
arg3 := ImportModeNewFromFuzz(a.ContextNgdotImport.Mode)
_, r1 := arg0.Import(a.ContextNgdotImport.Path, a.ContextNgdotImport.SrcDir, arg3)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ContextNgdotMatchFile:
arg0 := ContextNewFromFuzz(a.ContextNgdotMatchFile.Ctxt)
if arg0 == nil {
continue
}
_, r1 := arg0.MatchFile(a.ContextNgdotMatchFile.Dir, a.ContextNgdotMatchFile.Name)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Import:
arg2 := ImportModeNewFromFuzz(a.Import.Mode)
_, r1 := build.Import(a.Import.Path, a.Import.SrcDir, arg2)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ImportDir:
arg1 := ImportModeNewFromFuzz(a.ImportDir.Mode)
_, r1 := build.ImportDir(a.ImportDir.Dir, arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_IsLocalImport:
build.IsLocalImport(a.IsLocalImport.Path)
case *NgoloFuzzOne_ArchChar:
_, r1 := build.ArchChar(a.ArchChar.Goarch)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
PackageNb := 0
PackageResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ContextNgdotSrcDirs:
w.WriteString(fmt.Sprintf("ContextNewFromFuzz(%#+v).SrcDirs()\n", a.ContextNgdotSrcDirs.Ctxt))
case *NgoloFuzzOne_PackageNgdotIsCommand:
if PackageNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Package%d.IsCommand()\n", PackageResultsIndex))
PackageResultsIndex = (PackageResultsIndex + 1) % PackageNb
case *NgoloFuzzOne_ContextNgdotImportDir:
w.WriteString(fmt.Sprintf("ContextNewFromFuzz(%#+v).ImportDir(%#+v, ImportModeNewFromFuzz(%#+v))\n", a.ContextNgdotImportDir.Ctxt, a.ContextNgdotImportDir.Dir, a.ContextNgdotImportDir.Mode))
case *NgoloFuzzOne_ContextNgdotImport:
w.WriteString(fmt.Sprintf("ContextNewFromFuzz(%#+v).Import(%#+v, %#+v, ImportModeNewFromFuzz(%#+v))\n", a.ContextNgdotImport.Ctxt, a.ContextNgdotImport.Path, a.ContextNgdotImport.SrcDir, a.ContextNgdotImport.Mode))
case *NgoloFuzzOne_ContextNgdotMatchFile:
w.WriteString(fmt.Sprintf("ContextNewFromFuzz(%#+v).MatchFile(%#+v, %#+v)\n", a.ContextNgdotMatchFile.Ctxt, a.ContextNgdotMatchFile.Dir, a.ContextNgdotMatchFile.Name))
case *NgoloFuzzOne_Import:
w.WriteString(fmt.Sprintf("build.Import(%#+v, %#+v, ImportModeNewFromFuzz(%#+v))\n", a.Import.Path, a.Import.SrcDir, a.Import.Mode))
case *NgoloFuzzOne_ImportDir:
w.WriteString(fmt.Sprintf("build.ImportDir(%#+v, ImportModeNewFromFuzz(%#+v))\n", a.ImportDir.Dir, a.ImportDir.Mode))
case *NgoloFuzzOne_IsLocalImport:
w.WriteString(fmt.Sprintf("build.IsLocalImport(%#+v)\n", a.IsLocalImport.Path))
case *NgoloFuzzOne_ArchChar:
w.WriteString(fmt.Sprintf("build.ArchChar(%#+v)\n", a.ArchChar.Goarch))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_go_build
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ImportModeEnum int32
const (
ImportModeEnum_FindOnly ImportModeEnum = 0
ImportModeEnum_AllowBinary ImportModeEnum = 1
ImportModeEnum_ImportComment ImportModeEnum = 2
ImportModeEnum_IgnoreVendor ImportModeEnum = 3
)
// Enum value maps for ImportModeEnum.
var (
ImportModeEnum_name = map[int32]string{
0: "FindOnly",
1: "AllowBinary",
2: "ImportComment",
3: "IgnoreVendor",
}
ImportModeEnum_value = map[string]int32{
"FindOnly": 0,
"AllowBinary": 1,
"ImportComment": 2,
"IgnoreVendor": 3,
}
)
func (x ImportModeEnum) Enum() *ImportModeEnum {
p := new(ImportModeEnum)
*p = x
return p
}
func (x ImportModeEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (ImportModeEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[0].Descriptor()
}
func (ImportModeEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[0]
}
func (x ImportModeEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use ImportModeEnum.Descriptor instead.
func (ImportModeEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type ContextStruct struct {
state protoimpl.MessageState `protogen:"open.v1"`
GOARCH string `protobuf:"bytes,1,opt,name=GOARCH,proto3" json:"GOARCH,omitempty"`
GOOS string `protobuf:"bytes,2,opt,name=GOOS,proto3" json:"GOOS,omitempty"`
GOROOT string `protobuf:"bytes,3,opt,name=GOROOT,proto3" json:"GOROOT,omitempty"`
GOPATH string `protobuf:"bytes,4,opt,name=GOPATH,proto3" json:"GOPATH,omitempty"`
Dir string `protobuf:"bytes,5,opt,name=Dir,proto3" json:"Dir,omitempty"`
CgoEnabled bool `protobuf:"varint,6,opt,name=CgoEnabled,proto3" json:"CgoEnabled,omitempty"`
UseAllFiles bool `protobuf:"varint,7,opt,name=UseAllFiles,proto3" json:"UseAllFiles,omitempty"`
Compiler string `protobuf:"bytes,8,opt,name=Compiler,proto3" json:"Compiler,omitempty"`
BuildTags []string `protobuf:"bytes,9,rep,name=BuildTags,proto3" json:"BuildTags,omitempty"`
ToolTags []string `protobuf:"bytes,10,rep,name=ToolTags,proto3" json:"ToolTags,omitempty"`
ReleaseTags []string `protobuf:"bytes,11,rep,name=ReleaseTags,proto3" json:"ReleaseTags,omitempty"`
InstallSuffix string `protobuf:"bytes,12,opt,name=InstallSuffix,proto3" json:"InstallSuffix,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ContextStruct) Reset() {
*x = ContextStruct{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ContextStruct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ContextStruct) ProtoMessage() {}
func (x *ContextStruct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ContextStruct.ProtoReflect.Descriptor instead.
func (*ContextStruct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *ContextStruct) GetGOARCH() string {
if x != nil {
return x.GOARCH
}
return ""
}
func (x *ContextStruct) GetGOOS() string {
if x != nil {
return x.GOOS
}
return ""
}
func (x *ContextStruct) GetGOROOT() string {
if x != nil {
return x.GOROOT
}
return ""
}
func (x *ContextStruct) GetGOPATH() string {
if x != nil {
return x.GOPATH
}
return ""
}
func (x *ContextStruct) GetDir() string {
if x != nil {
return x.Dir
}
return ""
}
func (x *ContextStruct) GetCgoEnabled() bool {
if x != nil {
return x.CgoEnabled
}
return false
}
func (x *ContextStruct) GetUseAllFiles() bool {
if x != nil {
return x.UseAllFiles
}
return false
}
func (x *ContextStruct) GetCompiler() string {
if x != nil {
return x.Compiler
}
return ""
}
func (x *ContextStruct) GetBuildTags() []string {
if x != nil {
return x.BuildTags
}
return nil
}
func (x *ContextStruct) GetToolTags() []string {
if x != nil {
return x.ToolTags
}
return nil
}
func (x *ContextStruct) GetReleaseTags() []string {
if x != nil {
return x.ReleaseTags
}
return nil
}
func (x *ContextStruct) GetInstallSuffix() string {
if x != nil {
return x.InstallSuffix
}
return ""
}
type ContextNgdotSrcDirsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ctxt *ContextStruct `protobuf:"bytes,1,opt,name=ctxt,proto3" json:"ctxt,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ContextNgdotSrcDirsArgs) Reset() {
*x = ContextNgdotSrcDirsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ContextNgdotSrcDirsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ContextNgdotSrcDirsArgs) ProtoMessage() {}
func (x *ContextNgdotSrcDirsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ContextNgdotSrcDirsArgs.ProtoReflect.Descriptor instead.
func (*ContextNgdotSrcDirsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *ContextNgdotSrcDirsArgs) GetCtxt() *ContextStruct {
if x != nil {
return x.Ctxt
}
return nil
}
type PackageNgdotIsCommandArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PackageNgdotIsCommandArgs) Reset() {
*x = PackageNgdotIsCommandArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PackageNgdotIsCommandArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PackageNgdotIsCommandArgs) ProtoMessage() {}
func (x *PackageNgdotIsCommandArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PackageNgdotIsCommandArgs.ProtoReflect.Descriptor instead.
func (*PackageNgdotIsCommandArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type ContextNgdotImportDirArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ctxt *ContextStruct `protobuf:"bytes,1,opt,name=ctxt,proto3" json:"ctxt,omitempty"`
Dir string `protobuf:"bytes,2,opt,name=dir,proto3" json:"dir,omitempty"`
Mode ImportModeEnum `protobuf:"varint,3,opt,name=mode,proto3,enum=ngolofuzz.ImportModeEnum" json:"mode,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ContextNgdotImportDirArgs) Reset() {
*x = ContextNgdotImportDirArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ContextNgdotImportDirArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ContextNgdotImportDirArgs) ProtoMessage() {}
func (x *ContextNgdotImportDirArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ContextNgdotImportDirArgs.ProtoReflect.Descriptor instead.
func (*ContextNgdotImportDirArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ContextNgdotImportDirArgs) GetCtxt() *ContextStruct {
if x != nil {
return x.Ctxt
}
return nil
}
func (x *ContextNgdotImportDirArgs) GetDir() string {
if x != nil {
return x.Dir
}
return ""
}
func (x *ContextNgdotImportDirArgs) GetMode() ImportModeEnum {
if x != nil {
return x.Mode
}
return ImportModeEnum_FindOnly
}
type ContextNgdotImportArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ctxt *ContextStruct `protobuf:"bytes,1,opt,name=ctxt,proto3" json:"ctxt,omitempty"`
Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
SrcDir string `protobuf:"bytes,3,opt,name=srcDir,proto3" json:"srcDir,omitempty"`
Mode ImportModeEnum `protobuf:"varint,4,opt,name=mode,proto3,enum=ngolofuzz.ImportModeEnum" json:"mode,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ContextNgdotImportArgs) Reset() {
*x = ContextNgdotImportArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ContextNgdotImportArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ContextNgdotImportArgs) ProtoMessage() {}
func (x *ContextNgdotImportArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ContextNgdotImportArgs.ProtoReflect.Descriptor instead.
func (*ContextNgdotImportArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *ContextNgdotImportArgs) GetCtxt() *ContextStruct {
if x != nil {
return x.Ctxt
}
return nil
}
func (x *ContextNgdotImportArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
func (x *ContextNgdotImportArgs) GetSrcDir() string {
if x != nil {
return x.SrcDir
}
return ""
}
func (x *ContextNgdotImportArgs) GetMode() ImportModeEnum {
if x != nil {
return x.Mode
}
return ImportModeEnum_FindOnly
}
type ContextNgdotMatchFileArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ctxt *ContextStruct `protobuf:"bytes,1,opt,name=ctxt,proto3" json:"ctxt,omitempty"`
Dir string `protobuf:"bytes,2,opt,name=dir,proto3" json:"dir,omitempty"`
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ContextNgdotMatchFileArgs) Reset() {
*x = ContextNgdotMatchFileArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ContextNgdotMatchFileArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ContextNgdotMatchFileArgs) ProtoMessage() {}
func (x *ContextNgdotMatchFileArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ContextNgdotMatchFileArgs.ProtoReflect.Descriptor instead.
func (*ContextNgdotMatchFileArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *ContextNgdotMatchFileArgs) GetCtxt() *ContextStruct {
if x != nil {
return x.Ctxt
}
return nil
}
func (x *ContextNgdotMatchFileArgs) GetDir() string {
if x != nil {
return x.Dir
}
return ""
}
func (x *ContextNgdotMatchFileArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type ImportArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
SrcDir string `protobuf:"bytes,2,opt,name=srcDir,proto3" json:"srcDir,omitempty"`
Mode ImportModeEnum `protobuf:"varint,3,opt,name=mode,proto3,enum=ngolofuzz.ImportModeEnum" json:"mode,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ImportArgs) Reset() {
*x = ImportArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ImportArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ImportArgs) ProtoMessage() {}
func (x *ImportArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ImportArgs.ProtoReflect.Descriptor instead.
func (*ImportArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *ImportArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
func (x *ImportArgs) GetSrcDir() string {
if x != nil {
return x.SrcDir
}
return ""
}
func (x *ImportArgs) GetMode() ImportModeEnum {
if x != nil {
return x.Mode
}
return ImportModeEnum_FindOnly
}
type ImportDirArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"`
Mode ImportModeEnum `protobuf:"varint,2,opt,name=mode,proto3,enum=ngolofuzz.ImportModeEnum" json:"mode,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ImportDirArgs) Reset() {
*x = ImportDirArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ImportDirArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ImportDirArgs) ProtoMessage() {}
func (x *ImportDirArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ImportDirArgs.ProtoReflect.Descriptor instead.
func (*ImportDirArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *ImportDirArgs) GetDir() string {
if x != nil {
return x.Dir
}
return ""
}
func (x *ImportDirArgs) GetMode() ImportModeEnum {
if x != nil {
return x.Mode
}
return ImportModeEnum_FindOnly
}
type IsLocalImportArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsLocalImportArgs) Reset() {
*x = IsLocalImportArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsLocalImportArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsLocalImportArgs) ProtoMessage() {}
func (x *IsLocalImportArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsLocalImportArgs.ProtoReflect.Descriptor instead.
func (*IsLocalImportArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *IsLocalImportArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type ArchCharArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Goarch string `protobuf:"bytes,1,opt,name=goarch,proto3" json:"goarch,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ArchCharArgs) Reset() {
*x = ArchCharArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ArchCharArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ArchCharArgs) ProtoMessage() {}
func (x *ArchCharArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ArchCharArgs.ProtoReflect.Descriptor instead.
func (*ArchCharArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *ArchCharArgs) GetGoarch() string {
if x != nil {
return x.Goarch
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_ContextNgdotSrcDirs
// *NgoloFuzzOne_PackageNgdotIsCommand
// *NgoloFuzzOne_ContextNgdotImportDir
// *NgoloFuzzOne_ContextNgdotImport
// *NgoloFuzzOne_ContextNgdotMatchFile
// *NgoloFuzzOne_Import
// *NgoloFuzzOne_ImportDir
// *NgoloFuzzOne_IsLocalImport
// *NgoloFuzzOne_ArchChar
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetContextNgdotSrcDirs() *ContextNgdotSrcDirsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ContextNgdotSrcDirs); ok {
return x.ContextNgdotSrcDirs
}
}
return nil
}
func (x *NgoloFuzzOne) GetPackageNgdotIsCommand() *PackageNgdotIsCommandArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PackageNgdotIsCommand); ok {
return x.PackageNgdotIsCommand
}
}
return nil
}
func (x *NgoloFuzzOne) GetContextNgdotImportDir() *ContextNgdotImportDirArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ContextNgdotImportDir); ok {
return x.ContextNgdotImportDir
}
}
return nil
}
func (x *NgoloFuzzOne) GetContextNgdotImport() *ContextNgdotImportArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ContextNgdotImport); ok {
return x.ContextNgdotImport
}
}
return nil
}
func (x *NgoloFuzzOne) GetContextNgdotMatchFile() *ContextNgdotMatchFileArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ContextNgdotMatchFile); ok {
return x.ContextNgdotMatchFile
}
}
return nil
}
func (x *NgoloFuzzOne) GetImport() *ImportArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Import); ok {
return x.Import
}
}
return nil
}
func (x *NgoloFuzzOne) GetImportDir() *ImportDirArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ImportDir); ok {
return x.ImportDir
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsLocalImport() *IsLocalImportArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsLocalImport); ok {
return x.IsLocalImport
}
}
return nil
}
func (x *NgoloFuzzOne) GetArchChar() *ArchCharArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ArchChar); ok {
return x.ArchChar
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_ContextNgdotSrcDirs struct {
ContextNgdotSrcDirs *ContextNgdotSrcDirsArgs `protobuf:"bytes,1,opt,name=ContextNgdotSrcDirs,proto3,oneof"`
}
type NgoloFuzzOne_PackageNgdotIsCommand struct {
PackageNgdotIsCommand *PackageNgdotIsCommandArgs `protobuf:"bytes,2,opt,name=PackageNgdotIsCommand,proto3,oneof"`
}
type NgoloFuzzOne_ContextNgdotImportDir struct {
ContextNgdotImportDir *ContextNgdotImportDirArgs `protobuf:"bytes,3,opt,name=ContextNgdotImportDir,proto3,oneof"`
}
type NgoloFuzzOne_ContextNgdotImport struct {
ContextNgdotImport *ContextNgdotImportArgs `protobuf:"bytes,4,opt,name=ContextNgdotImport,proto3,oneof"`
}
type NgoloFuzzOne_ContextNgdotMatchFile struct {
ContextNgdotMatchFile *ContextNgdotMatchFileArgs `protobuf:"bytes,5,opt,name=ContextNgdotMatchFile,proto3,oneof"`
}
type NgoloFuzzOne_Import struct {
Import *ImportArgs `protobuf:"bytes,6,opt,name=Import,proto3,oneof"`
}
type NgoloFuzzOne_ImportDir struct {
ImportDir *ImportDirArgs `protobuf:"bytes,7,opt,name=ImportDir,proto3,oneof"`
}
type NgoloFuzzOne_IsLocalImport struct {
IsLocalImport *IsLocalImportArgs `protobuf:"bytes,8,opt,name=IsLocalImport,proto3,oneof"`
}
type NgoloFuzzOne_ArchChar struct {
ArchChar *ArchCharArgs `protobuf:"bytes,9,opt,name=ArchChar,proto3,oneof"`
}
func (*NgoloFuzzOne_ContextNgdotSrcDirs) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PackageNgdotIsCommand) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ContextNgdotImportDir) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ContextNgdotImport) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ContextNgdotMatchFile) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Import) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ImportDir) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsLocalImport) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ArchChar) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\xdd\x02\n" +
"\rContextStruct\x12\x16\n" +
"\x06GOARCH\x18\x01 \x01(\tR\x06GOARCH\x12\x12\n" +
"\x04GOOS\x18\x02 \x01(\tR\x04GOOS\x12\x16\n" +
"\x06GOROOT\x18\x03 \x01(\tR\x06GOROOT\x12\x16\n" +
"\x06GOPATH\x18\x04 \x01(\tR\x06GOPATH\x12\x10\n" +
"\x03Dir\x18\x05 \x01(\tR\x03Dir\x12\x1e\n" +
"\n" +
"CgoEnabled\x18\x06 \x01(\bR\n" +
"CgoEnabled\x12 \n" +
"\vUseAllFiles\x18\a \x01(\bR\vUseAllFiles\x12\x1a\n" +
"\bCompiler\x18\b \x01(\tR\bCompiler\x12\x1c\n" +
"\tBuildTags\x18\t \x03(\tR\tBuildTags\x12\x1a\n" +
"\bToolTags\x18\n" +
" \x03(\tR\bToolTags\x12 \n" +
"\vReleaseTags\x18\v \x03(\tR\vReleaseTags\x12$\n" +
"\rInstallSuffix\x18\f \x01(\tR\rInstallSuffix\"G\n" +
"\x17ContextNgdotSrcDirsArgs\x12,\n" +
"\x04ctxt\x18\x01 \x01(\v2\x18.ngolofuzz.ContextStructR\x04ctxt\"\x1b\n" +
"\x19PackageNgdotIsCommandArgs\"\x8a\x01\n" +
"\x19ContextNgdotImportDirArgs\x12,\n" +
"\x04ctxt\x18\x01 \x01(\v2\x18.ngolofuzz.ContextStructR\x04ctxt\x12\x10\n" +
"\x03dir\x18\x02 \x01(\tR\x03dir\x12-\n" +
"\x04mode\x18\x03 \x01(\x0e2\x19.ngolofuzz.ImportModeEnumR\x04mode\"\xa1\x01\n" +
"\x16ContextNgdotImportArgs\x12,\n" +
"\x04ctxt\x18\x01 \x01(\v2\x18.ngolofuzz.ContextStructR\x04ctxt\x12\x12\n" +
"\x04path\x18\x02 \x01(\tR\x04path\x12\x16\n" +
"\x06srcDir\x18\x03 \x01(\tR\x06srcDir\x12-\n" +
"\x04mode\x18\x04 \x01(\x0e2\x19.ngolofuzz.ImportModeEnumR\x04mode\"o\n" +
"\x19ContextNgdotMatchFileArgs\x12,\n" +
"\x04ctxt\x18\x01 \x01(\v2\x18.ngolofuzz.ContextStructR\x04ctxt\x12\x10\n" +
"\x03dir\x18\x02 \x01(\tR\x03dir\x12\x12\n" +
"\x04name\x18\x03 \x01(\tR\x04name\"g\n" +
"\n" +
"ImportArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\x12\x16\n" +
"\x06srcDir\x18\x02 \x01(\tR\x06srcDir\x12-\n" +
"\x04mode\x18\x03 \x01(\x0e2\x19.ngolofuzz.ImportModeEnumR\x04mode\"P\n" +
"\rImportDirArgs\x12\x10\n" +
"\x03dir\x18\x01 \x01(\tR\x03dir\x12-\n" +
"\x04mode\x18\x02 \x01(\x0e2\x19.ngolofuzz.ImportModeEnumR\x04mode\"'\n" +
"\x11IsLocalImportArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"&\n" +
"\fArchCharArgs\x12\x16\n" +
"\x06goarch\x18\x01 \x01(\tR\x06goarch\"\xc5\x05\n" +
"\fNgoloFuzzOne\x12V\n" +
"\x13ContextNgdotSrcDirs\x18\x01 \x01(\v2\".ngolofuzz.ContextNgdotSrcDirsArgsH\x00R\x13ContextNgdotSrcDirs\x12\\\n" +
"\x15PackageNgdotIsCommand\x18\x02 \x01(\v2$.ngolofuzz.PackageNgdotIsCommandArgsH\x00R\x15PackageNgdotIsCommand\x12\\\n" +
"\x15ContextNgdotImportDir\x18\x03 \x01(\v2$.ngolofuzz.ContextNgdotImportDirArgsH\x00R\x15ContextNgdotImportDir\x12S\n" +
"\x12ContextNgdotImport\x18\x04 \x01(\v2!.ngolofuzz.ContextNgdotImportArgsH\x00R\x12ContextNgdotImport\x12\\\n" +
"\x15ContextNgdotMatchFile\x18\x05 \x01(\v2$.ngolofuzz.ContextNgdotMatchFileArgsH\x00R\x15ContextNgdotMatchFile\x12/\n" +
"\x06Import\x18\x06 \x01(\v2\x15.ngolofuzz.ImportArgsH\x00R\x06Import\x128\n" +
"\tImportDir\x18\a \x01(\v2\x18.ngolofuzz.ImportDirArgsH\x00R\tImportDir\x12D\n" +
"\rIsLocalImport\x18\b \x01(\v2\x1c.ngolofuzz.IsLocalImportArgsH\x00R\rIsLocalImport\x125\n" +
"\bArchChar\x18\t \x01(\v2\x17.ngolofuzz.ArchCharArgsH\x00R\bArchCharB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04list*T\n" +
"\x0eImportModeEnum\x12\f\n" +
"\bFindOnly\x10\x00\x12\x0f\n" +
"\vAllowBinary\x10\x01\x12\x11\n" +
"\rImportComment\x10\x02\x12\x10\n" +
"\fIgnoreVendor\x10\x03B\x15Z\x13./;fuzz_ng_go_buildb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
var file_ngolofuzz_proto_goTypes = []any{
(ImportModeEnum)(0), // 0: ngolofuzz.ImportModeEnum
(*ContextStruct)(nil), // 1: ngolofuzz.ContextStruct
(*ContextNgdotSrcDirsArgs)(nil), // 2: ngolofuzz.ContextNgdotSrcDirsArgs
(*PackageNgdotIsCommandArgs)(nil), // 3: ngolofuzz.PackageNgdotIsCommandArgs
(*ContextNgdotImportDirArgs)(nil), // 4: ngolofuzz.ContextNgdotImportDirArgs
(*ContextNgdotImportArgs)(nil), // 5: ngolofuzz.ContextNgdotImportArgs
(*ContextNgdotMatchFileArgs)(nil), // 6: ngolofuzz.ContextNgdotMatchFileArgs
(*ImportArgs)(nil), // 7: ngolofuzz.ImportArgs
(*ImportDirArgs)(nil), // 8: ngolofuzz.ImportDirArgs
(*IsLocalImportArgs)(nil), // 9: ngolofuzz.IsLocalImportArgs
(*ArchCharArgs)(nil), // 10: ngolofuzz.ArchCharArgs
(*NgoloFuzzOne)(nil), // 11: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 12: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 13: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
1, // 0: ngolofuzz.ContextNgdotSrcDirsArgs.ctxt:type_name -> ngolofuzz.ContextStruct
1, // 1: ngolofuzz.ContextNgdotImportDirArgs.ctxt:type_name -> ngolofuzz.ContextStruct
0, // 2: ngolofuzz.ContextNgdotImportDirArgs.mode:type_name -> ngolofuzz.ImportModeEnum
1, // 3: ngolofuzz.ContextNgdotImportArgs.ctxt:type_name -> ngolofuzz.ContextStruct
0, // 4: ngolofuzz.ContextNgdotImportArgs.mode:type_name -> ngolofuzz.ImportModeEnum
1, // 5: ngolofuzz.ContextNgdotMatchFileArgs.ctxt:type_name -> ngolofuzz.ContextStruct
0, // 6: ngolofuzz.ImportArgs.mode:type_name -> ngolofuzz.ImportModeEnum
0, // 7: ngolofuzz.ImportDirArgs.mode:type_name -> ngolofuzz.ImportModeEnum
2, // 8: ngolofuzz.NgoloFuzzOne.ContextNgdotSrcDirs:type_name -> ngolofuzz.ContextNgdotSrcDirsArgs
3, // 9: ngolofuzz.NgoloFuzzOne.PackageNgdotIsCommand:type_name -> ngolofuzz.PackageNgdotIsCommandArgs
4, // 10: ngolofuzz.NgoloFuzzOne.ContextNgdotImportDir:type_name -> ngolofuzz.ContextNgdotImportDirArgs
5, // 11: ngolofuzz.NgoloFuzzOne.ContextNgdotImport:type_name -> ngolofuzz.ContextNgdotImportArgs
6, // 12: ngolofuzz.NgoloFuzzOne.ContextNgdotMatchFile:type_name -> ngolofuzz.ContextNgdotMatchFileArgs
7, // 13: ngolofuzz.NgoloFuzzOne.Import:type_name -> ngolofuzz.ImportArgs
8, // 14: ngolofuzz.NgoloFuzzOne.ImportDir:type_name -> ngolofuzz.ImportDirArgs
9, // 15: ngolofuzz.NgoloFuzzOne.IsLocalImport:type_name -> ngolofuzz.IsLocalImportArgs
10, // 16: ngolofuzz.NgoloFuzzOne.ArchChar:type_name -> ngolofuzz.ArchCharArgs
11, // 17: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
18, // [18:18] is the sub-list for method output_type
18, // [18:18] is the sub-list for method input_type
18, // [18:18] is the sub-list for extension type_name
18, // [18:18] is the sub-list for extension extendee
0, // [0:18] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[10].OneofWrappers = []any{
(*NgoloFuzzOne_ContextNgdotSrcDirs)(nil),
(*NgoloFuzzOne_PackageNgdotIsCommand)(nil),
(*NgoloFuzzOne_ContextNgdotImportDir)(nil),
(*NgoloFuzzOne_ContextNgdotImport)(nil),
(*NgoloFuzzOne_ContextNgdotMatchFile)(nil),
(*NgoloFuzzOne_Import)(nil),
(*NgoloFuzzOne_ImportDir)(nil),
(*NgoloFuzzOne_IsLocalImport)(nil),
(*NgoloFuzzOne_ArchChar)(nil),
}
file_ngolofuzz_proto_msgTypes[11].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 1,
NumMessages: 13,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
EnumInfos: file_ngolofuzz_proto_enumTypes,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_go_build_constraint
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"go/build/constraint"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func TagExprNewFromFuzz(p *TagExprStruct) *constraint.TagExpr{
if p == nil {
return nil
}
return &constraint.TagExpr{
Tag: p.Tag,
}
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ExprResults []*constraint.Expr
ExprResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_TagExprNgdotString:
arg0 := TagExprNewFromFuzz(a.TagExprNgdotString.X)
if arg0 == nil {
continue
}
arg0.String()
case *NgoloFuzzOne_Parse:
r0, r1 := constraint.Parse(a.Parse.Line)
ExprResults = append(ExprResults, &r0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_IsGoBuild:
constraint.IsGoBuild(a.IsGoBuild.Line)
case *NgoloFuzzOne_IsPlusBuild:
constraint.IsPlusBuild(a.IsPlusBuild.Line)
case *NgoloFuzzOne_PlusBuildLines:
if len(ExprResults) == 0 {
continue
}
arg0 := *ExprResults[ExprResultsIndex]
ExprResultsIndex = (ExprResultsIndex + 1) % len(ExprResults)
_, r1 := constraint.PlusBuildLines(arg0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_GoVersion:
if len(ExprResults) == 0 {
continue
}
arg0 := *ExprResults[ExprResultsIndex]
ExprResultsIndex = (ExprResultsIndex + 1) % len(ExprResults)
constraint.GoVersion(arg0)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ExprNb := 0
ExprResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_TagExprNgdotString:
w.WriteString(fmt.Sprintf("TagExprNewFromFuzz(%#+v).String()\n", a.TagExprNgdotString.X))
case *NgoloFuzzOne_Parse:
w.WriteString(fmt.Sprintf("Expr%d, _ := constraint.Parse(%#+v)\n", ExprNb, a.Parse.Line))
ExprNb = ExprNb + 1
case *NgoloFuzzOne_IsGoBuild:
w.WriteString(fmt.Sprintf("constraint.IsGoBuild(%#+v)\n", a.IsGoBuild.Line))
case *NgoloFuzzOne_IsPlusBuild:
w.WriteString(fmt.Sprintf("constraint.IsPlusBuild(%#+v)\n", a.IsPlusBuild.Line))
case *NgoloFuzzOne_PlusBuildLines:
if ExprNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("constraint.PlusBuildLines(Expr%d)\n", (ExprResultsIndex + 0) % ExprNb))
ExprResultsIndex = (ExprResultsIndex + 1) % ExprNb
case *NgoloFuzzOne_GoVersion:
if ExprNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("constraint.GoVersion(Expr%d)\n", (ExprResultsIndex + 0) % ExprNb))
ExprResultsIndex = (ExprResultsIndex + 1) % ExprNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_go_build_constraint
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type TagExprStruct struct {
state protoimpl.MessageState `protogen:"open.v1"`
Tag string `protobuf:"bytes,1,opt,name=Tag,proto3" json:"Tag,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TagExprStruct) Reset() {
*x = TagExprStruct{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TagExprStruct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TagExprStruct) ProtoMessage() {}
func (x *TagExprStruct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TagExprStruct.ProtoReflect.Descriptor instead.
func (*TagExprStruct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *TagExprStruct) GetTag() string {
if x != nil {
return x.Tag
}
return ""
}
type TagExprNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X *TagExprStruct `protobuf:"bytes,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TagExprNgdotStringArgs) Reset() {
*x = TagExprNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TagExprNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TagExprNgdotStringArgs) ProtoMessage() {}
func (x *TagExprNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TagExprNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*TagExprNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *TagExprNgdotStringArgs) GetX() *TagExprStruct {
if x != nil {
return x.X
}
return nil
}
type ParseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Line string `protobuf:"bytes,1,opt,name=line,proto3" json:"line,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseArgs) Reset() {
*x = ParseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseArgs) ProtoMessage() {}
func (x *ParseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseArgs.ProtoReflect.Descriptor instead.
func (*ParseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *ParseArgs) GetLine() string {
if x != nil {
return x.Line
}
return ""
}
type IsGoBuildArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Line string `protobuf:"bytes,1,opt,name=line,proto3" json:"line,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsGoBuildArgs) Reset() {
*x = IsGoBuildArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsGoBuildArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsGoBuildArgs) ProtoMessage() {}
func (x *IsGoBuildArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsGoBuildArgs.ProtoReflect.Descriptor instead.
func (*IsGoBuildArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *IsGoBuildArgs) GetLine() string {
if x != nil {
return x.Line
}
return ""
}
type IsPlusBuildArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Line string `protobuf:"bytes,1,opt,name=line,proto3" json:"line,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsPlusBuildArgs) Reset() {
*x = IsPlusBuildArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsPlusBuildArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsPlusBuildArgs) ProtoMessage() {}
func (x *IsPlusBuildArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsPlusBuildArgs.ProtoReflect.Descriptor instead.
func (*IsPlusBuildArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *IsPlusBuildArgs) GetLine() string {
if x != nil {
return x.Line
}
return ""
}
type PlusBuildLinesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PlusBuildLinesArgs) Reset() {
*x = PlusBuildLinesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PlusBuildLinesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PlusBuildLinesArgs) ProtoMessage() {}
func (x *PlusBuildLinesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PlusBuildLinesArgs.ProtoReflect.Descriptor instead.
func (*PlusBuildLinesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type GoVersionArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GoVersionArgs) Reset() {
*x = GoVersionArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GoVersionArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GoVersionArgs) ProtoMessage() {}
func (x *GoVersionArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GoVersionArgs.ProtoReflect.Descriptor instead.
func (*GoVersionArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_TagExprNgdotString
// *NgoloFuzzOne_Parse
// *NgoloFuzzOne_IsGoBuild
// *NgoloFuzzOne_IsPlusBuild
// *NgoloFuzzOne_PlusBuildLines
// *NgoloFuzzOne_GoVersion
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetTagExprNgdotString() *TagExprNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TagExprNgdotString); ok {
return x.TagExprNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetParse() *ParseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Parse); ok {
return x.Parse
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsGoBuild() *IsGoBuildArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsGoBuild); ok {
return x.IsGoBuild
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsPlusBuild() *IsPlusBuildArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsPlusBuild); ok {
return x.IsPlusBuild
}
}
return nil
}
func (x *NgoloFuzzOne) GetPlusBuildLines() *PlusBuildLinesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PlusBuildLines); ok {
return x.PlusBuildLines
}
}
return nil
}
func (x *NgoloFuzzOne) GetGoVersion() *GoVersionArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GoVersion); ok {
return x.GoVersion
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_TagExprNgdotString struct {
TagExprNgdotString *TagExprNgdotStringArgs `protobuf:"bytes,1,opt,name=TagExprNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_Parse struct {
Parse *ParseArgs `protobuf:"bytes,2,opt,name=Parse,proto3,oneof"`
}
type NgoloFuzzOne_IsGoBuild struct {
IsGoBuild *IsGoBuildArgs `protobuf:"bytes,3,opt,name=IsGoBuild,proto3,oneof"`
}
type NgoloFuzzOne_IsPlusBuild struct {
IsPlusBuild *IsPlusBuildArgs `protobuf:"bytes,4,opt,name=IsPlusBuild,proto3,oneof"`
}
type NgoloFuzzOne_PlusBuildLines struct {
PlusBuildLines *PlusBuildLinesArgs `protobuf:"bytes,5,opt,name=PlusBuildLines,proto3,oneof"`
}
type NgoloFuzzOne_GoVersion struct {
GoVersion *GoVersionArgs `protobuf:"bytes,6,opt,name=GoVersion,proto3,oneof"`
}
func (*NgoloFuzzOne_TagExprNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Parse) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsGoBuild) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsPlusBuild) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PlusBuildLines) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GoVersion) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"!\n" +
"\rTagExprStruct\x12\x10\n" +
"\x03Tag\x18\x01 \x01(\tR\x03Tag\"@\n" +
"\x16TagExprNgdotStringArgs\x12&\n" +
"\x01x\x18\x01 \x01(\v2\x18.ngolofuzz.TagExprStructR\x01x\"\x1f\n" +
"\tParseArgs\x12\x12\n" +
"\x04line\x18\x01 \x01(\tR\x04line\"#\n" +
"\rIsGoBuildArgs\x12\x12\n" +
"\x04line\x18\x01 \x01(\tR\x04line\"%\n" +
"\x0fIsPlusBuildArgs\x12\x12\n" +
"\x04line\x18\x01 \x01(\tR\x04line\"\x14\n" +
"\x12PlusBuildLinesArgs\"\x0f\n" +
"\rGoVersionArgs\"\x96\x03\n" +
"\fNgoloFuzzOne\x12S\n" +
"\x12TagExprNgdotString\x18\x01 \x01(\v2!.ngolofuzz.TagExprNgdotStringArgsH\x00R\x12TagExprNgdotString\x12,\n" +
"\x05Parse\x18\x02 \x01(\v2\x14.ngolofuzz.ParseArgsH\x00R\x05Parse\x128\n" +
"\tIsGoBuild\x18\x03 \x01(\v2\x18.ngolofuzz.IsGoBuildArgsH\x00R\tIsGoBuild\x12>\n" +
"\vIsPlusBuild\x18\x04 \x01(\v2\x1a.ngolofuzz.IsPlusBuildArgsH\x00R\vIsPlusBuild\x12G\n" +
"\x0ePlusBuildLines\x18\x05 \x01(\v2\x1d.ngolofuzz.PlusBuildLinesArgsH\x00R\x0ePlusBuildLines\x128\n" +
"\tGoVersion\x18\x06 \x01(\v2\x18.ngolofuzz.GoVersionArgsH\x00R\tGoVersionB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB Z\x1e./;fuzz_ng_go_build_constraintb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
var file_ngolofuzz_proto_goTypes = []any{
(*TagExprStruct)(nil), // 0: ngolofuzz.TagExprStruct
(*TagExprNgdotStringArgs)(nil), // 1: ngolofuzz.TagExprNgdotStringArgs
(*ParseArgs)(nil), // 2: ngolofuzz.ParseArgs
(*IsGoBuildArgs)(nil), // 3: ngolofuzz.IsGoBuildArgs
(*IsPlusBuildArgs)(nil), // 4: ngolofuzz.IsPlusBuildArgs
(*PlusBuildLinesArgs)(nil), // 5: ngolofuzz.PlusBuildLinesArgs
(*GoVersionArgs)(nil), // 6: ngolofuzz.GoVersionArgs
(*NgoloFuzzOne)(nil), // 7: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 8: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 9: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.TagExprNgdotStringArgs.x:type_name -> ngolofuzz.TagExprStruct
1, // 1: ngolofuzz.NgoloFuzzOne.TagExprNgdotString:type_name -> ngolofuzz.TagExprNgdotStringArgs
2, // 2: ngolofuzz.NgoloFuzzOne.Parse:type_name -> ngolofuzz.ParseArgs
3, // 3: ngolofuzz.NgoloFuzzOne.IsGoBuild:type_name -> ngolofuzz.IsGoBuildArgs
4, // 4: ngolofuzz.NgoloFuzzOne.IsPlusBuild:type_name -> ngolofuzz.IsPlusBuildArgs
5, // 5: ngolofuzz.NgoloFuzzOne.PlusBuildLines:type_name -> ngolofuzz.PlusBuildLinesArgs
6, // 6: ngolofuzz.NgoloFuzzOne.GoVersion:type_name -> ngolofuzz.GoVersionArgs
7, // 7: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
8, // [8:8] is the sub-list for method output_type
8, // [8:8] is the sub-list for method input_type
8, // [8:8] is the sub-list for extension type_name
8, // [8:8] is the sub-list for extension extendee
0, // [0:8] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[7].OneofWrappers = []any{
(*NgoloFuzzOne_TagExprNgdotString)(nil),
(*NgoloFuzzOne_Parse)(nil),
(*NgoloFuzzOne_IsGoBuild)(nil),
(*NgoloFuzzOne_IsPlusBuild)(nil),
(*NgoloFuzzOne_PlusBuildLines)(nil),
(*NgoloFuzzOne_GoVersion)(nil),
}
file_ngolofuzz_proto_msgTypes[8].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 10,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_go_constant
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"go/constant"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func KindNewFromFuzz(p KindEnum) constant.Kind{
switch p {
case 1:
return constant.Bool
case 2:
return constant.String
case 3:
return constant.Int
case 4:
return constant.Float
case 5:
return constant.Complex
}
return constant.Unknown
}
func ConvertKindNewFromFuzz(a []KindEnum) []constant.Kind{
r := make([]constant.Kind, len(a))
for i := range a {
r[i] = KindNewFromFuzz(a[i])
}
return r
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ValueResults []*constant.Value
ValueResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_KindNgdotString:
arg0 := KindNewFromFuzz(a.KindNgdotString.I)
arg0.String()
case *NgoloFuzzOne_MakeUnknown:
r0 := constant.MakeUnknown()
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_MakeBool:
r0 := constant.MakeBool(a.MakeBool.B)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_MakeString:
r0 := constant.MakeString(a.MakeString.S)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_MakeInt64:
r0 := constant.MakeInt64(a.MakeInt64.X)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_MakeUint64:
r0 := constant.MakeUint64(a.MakeUint64.X)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_MakeFloat64:
r0 := constant.MakeFloat64(a.MakeFloat64.X)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_BoolVal:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
constant.BoolVal(arg0)
case *NgoloFuzzOne_StringVal:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
constant.StringVal(arg0)
case *NgoloFuzzOne_Int64Val:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
constant.Int64Val(arg0)
case *NgoloFuzzOne_Uint64Val:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
constant.Uint64Val(arg0)
case *NgoloFuzzOne_Float32Val:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
constant.Float32Val(arg0)
case *NgoloFuzzOne_Float64Val:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
constant.Float64Val(arg0)
case *NgoloFuzzOne_Val:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
constant.Val(arg0)
case *NgoloFuzzOne_Make:
r0 := constant.Make(a.Make.X)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_BitLen:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
constant.BitLen(arg0)
case *NgoloFuzzOne_Sign:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
constant.Sign(arg0)
case *NgoloFuzzOne_Bytes:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
constant.Bytes(arg0)
case *NgoloFuzzOne_MakeFromBytes:
r0 := constant.MakeFromBytes(a.MakeFromBytes.Bytes)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_Num:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
r0 := constant.Num(arg0)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_Denom:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
r0 := constant.Denom(arg0)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_MakeImag:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
r0 := constant.MakeImag(arg0)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_Real:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
r0 := constant.Real(arg0)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_Imag:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
r0 := constant.Imag(arg0)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_ToInt:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
r0 := constant.ToInt(arg0)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_ToFloat:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
r0 := constant.ToFloat(arg0)
ValueResults = append(ValueResults, &r0)
case *NgoloFuzzOne_ToComplex:
if len(ValueResults) == 0 {
continue
}
arg0 := *ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
r0 := constant.ToComplex(arg0)
ValueResults = append(ValueResults, &r0)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ValueNb := 0
ValueResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_KindNgdotString:
w.WriteString(fmt.Sprintf("KindNewFromFuzz(%#+v).String()\n", a.KindNgdotString.I))
case *NgoloFuzzOne_MakeUnknown:
w.WriteString(fmt.Sprintf("Value%d := constant.MakeUnknown()\n", ValueNb))
ValueNb = ValueNb + 1
case *NgoloFuzzOne_MakeBool:
w.WriteString(fmt.Sprintf("Value%d := constant.MakeBool(%#+v)\n", ValueNb, a.MakeBool.B))
ValueNb = ValueNb + 1
case *NgoloFuzzOne_MakeString:
w.WriteString(fmt.Sprintf("Value%d := constant.MakeString(%#+v)\n", ValueNb, a.MakeString.S))
ValueNb = ValueNb + 1
case *NgoloFuzzOne_MakeInt64:
w.WriteString(fmt.Sprintf("Value%d := constant.MakeInt64(%#+v)\n", ValueNb, a.MakeInt64.X))
ValueNb = ValueNb + 1
case *NgoloFuzzOne_MakeUint64:
w.WriteString(fmt.Sprintf("Value%d := constant.MakeUint64(%#+v)\n", ValueNb, a.MakeUint64.X))
ValueNb = ValueNb + 1
case *NgoloFuzzOne_MakeFloat64:
w.WriteString(fmt.Sprintf("Value%d := constant.MakeFloat64(%#+v)\n", ValueNb, a.MakeFloat64.X))
ValueNb = ValueNb + 1
case *NgoloFuzzOne_BoolVal:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("constant.BoolVal(Value%d)\n", (ValueResultsIndex + 0) % ValueNb))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_StringVal:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("constant.StringVal(Value%d)\n", (ValueResultsIndex + 0) % ValueNb))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_Int64Val:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("constant.Int64Val(Value%d)\n", (ValueResultsIndex + 0) % ValueNb))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_Uint64Val:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("constant.Uint64Val(Value%d)\n", (ValueResultsIndex + 0) % ValueNb))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_Float32Val:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("constant.Float32Val(Value%d)\n", (ValueResultsIndex + 0) % ValueNb))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_Float64Val:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("constant.Float64Val(Value%d)\n", (ValueResultsIndex + 0) % ValueNb))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_Val:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("constant.Val(Value%d)\n", (ValueResultsIndex + 0) % ValueNb))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_Make:
w.WriteString(fmt.Sprintf("Value%d := constant.Make(%#+v)\n", ValueNb, a.Make.X))
ValueNb = ValueNb + 1
case *NgoloFuzzOne_BitLen:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("constant.BitLen(Value%d)\n", (ValueResultsIndex + 0) % ValueNb))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_Sign:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("constant.Sign(Value%d)\n", (ValueResultsIndex + 0) % ValueNb))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_Bytes:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("constant.Bytes(Value%d)\n", (ValueResultsIndex + 0) % ValueNb))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_MakeFromBytes:
w.WriteString(fmt.Sprintf("Value%d := constant.MakeFromBytes(%#+v)\n", ValueNb, a.MakeFromBytes.Bytes))
ValueNb = ValueNb + 1
case *NgoloFuzzOne_Num:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d := constant.Num(Value%d)\n", ValueNb, (ValueResultsIndex + 0) % ValueNb))
ValueNb = ValueNb + 1
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_Denom:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d := constant.Denom(Value%d)\n", ValueNb, (ValueResultsIndex + 0) % ValueNb))
ValueNb = ValueNb + 1
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_MakeImag:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d := constant.MakeImag(Value%d)\n", ValueNb, (ValueResultsIndex + 0) % ValueNb))
ValueNb = ValueNb + 1
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_Real:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d := constant.Real(Value%d)\n", ValueNb, (ValueResultsIndex + 0) % ValueNb))
ValueNb = ValueNb + 1
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_Imag:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d := constant.Imag(Value%d)\n", ValueNb, (ValueResultsIndex + 0) % ValueNb))
ValueNb = ValueNb + 1
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_ToInt:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d := constant.ToInt(Value%d)\n", ValueNb, (ValueResultsIndex + 0) % ValueNb))
ValueNb = ValueNb + 1
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_ToFloat:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d := constant.ToFloat(Value%d)\n", ValueNb, (ValueResultsIndex + 0) % ValueNb))
ValueNb = ValueNb + 1
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_ToComplex:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d := constant.ToComplex(Value%d)\n", ValueNb, (ValueResultsIndex + 0) % ValueNb))
ValueNb = ValueNb + 1
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_go_constant
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type KindEnum int32
const (
KindEnum_Unknown KindEnum = 0
KindEnum_Bool KindEnum = 1
KindEnum_String KindEnum = 2
KindEnum_Int KindEnum = 3
KindEnum_Float KindEnum = 4
KindEnum_Complex KindEnum = 5
)
// Enum value maps for KindEnum.
var (
KindEnum_name = map[int32]string{
0: "Unknown",
1: "Bool",
2: "String",
3: "Int",
4: "Float",
5: "Complex",
}
KindEnum_value = map[string]int32{
"Unknown": 0,
"Bool": 1,
"String": 2,
"Int": 3,
"Float": 4,
"Complex": 5,
}
)
func (x KindEnum) Enum() *KindEnum {
p := new(KindEnum)
*p = x
return p
}
func (x KindEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (KindEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[0].Descriptor()
}
func (KindEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[0]
}
func (x KindEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use KindEnum.Descriptor instead.
func (KindEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type KindNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I KindEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.KindEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *KindNgdotStringArgs) Reset() {
*x = KindNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *KindNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*KindNgdotStringArgs) ProtoMessage() {}
func (x *KindNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use KindNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*KindNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *KindNgdotStringArgs) GetI() KindEnum {
if x != nil {
return x.I
}
return KindEnum_Unknown
}
type MakeUnknownArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MakeUnknownArgs) Reset() {
*x = MakeUnknownArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MakeUnknownArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MakeUnknownArgs) ProtoMessage() {}
func (x *MakeUnknownArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MakeUnknownArgs.ProtoReflect.Descriptor instead.
func (*MakeUnknownArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type MakeBoolArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B bool `protobuf:"varint,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MakeBoolArgs) Reset() {
*x = MakeBoolArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MakeBoolArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MakeBoolArgs) ProtoMessage() {}
func (x *MakeBoolArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MakeBoolArgs.ProtoReflect.Descriptor instead.
func (*MakeBoolArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *MakeBoolArgs) GetB() bool {
if x != nil {
return x.B
}
return false
}
type MakeStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MakeStringArgs) Reset() {
*x = MakeStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MakeStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MakeStringArgs) ProtoMessage() {}
func (x *MakeStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MakeStringArgs.ProtoReflect.Descriptor instead.
func (*MakeStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *MakeStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type MakeInt64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MakeInt64Args) Reset() {
*x = MakeInt64Args{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MakeInt64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MakeInt64Args) ProtoMessage() {}
func (x *MakeInt64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MakeInt64Args.ProtoReflect.Descriptor instead.
func (*MakeInt64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *MakeInt64Args) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
type MakeUint64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MakeUint64Args) Reset() {
*x = MakeUint64Args{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MakeUint64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MakeUint64Args) ProtoMessage() {}
func (x *MakeUint64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MakeUint64Args.ProtoReflect.Descriptor instead.
func (*MakeUint64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *MakeUint64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
type MakeFloat64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X float64 `protobuf:"fixed64,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MakeFloat64Args) Reset() {
*x = MakeFloat64Args{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MakeFloat64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MakeFloat64Args) ProtoMessage() {}
func (x *MakeFloat64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MakeFloat64Args.ProtoReflect.Descriptor instead.
func (*MakeFloat64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *MakeFloat64Args) GetX() float64 {
if x != nil {
return x.X
}
return 0
}
type BoolValArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BoolValArgs) Reset() {
*x = BoolValArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BoolValArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BoolValArgs) ProtoMessage() {}
func (x *BoolValArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BoolValArgs.ProtoReflect.Descriptor instead.
func (*BoolValArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type StringValArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StringValArgs) Reset() {
*x = StringValArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StringValArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StringValArgs) ProtoMessage() {}
func (x *StringValArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StringValArgs.ProtoReflect.Descriptor instead.
func (*StringValArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type Int64ValArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Int64ValArgs) Reset() {
*x = Int64ValArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Int64ValArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Int64ValArgs) ProtoMessage() {}
func (x *Int64ValArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Int64ValArgs.ProtoReflect.Descriptor instead.
func (*Int64ValArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type Uint64ValArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Uint64ValArgs) Reset() {
*x = Uint64ValArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Uint64ValArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Uint64ValArgs) ProtoMessage() {}
func (x *Uint64ValArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Uint64ValArgs.ProtoReflect.Descriptor instead.
func (*Uint64ValArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
type Float32ValArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Float32ValArgs) Reset() {
*x = Float32ValArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Float32ValArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Float32ValArgs) ProtoMessage() {}
func (x *Float32ValArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Float32ValArgs.ProtoReflect.Descriptor instead.
func (*Float32ValArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type Float64ValArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Float64ValArgs) Reset() {
*x = Float64ValArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Float64ValArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Float64ValArgs) ProtoMessage() {}
func (x *Float64ValArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Float64ValArgs.ProtoReflect.Descriptor instead.
func (*Float64ValArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type ValArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValArgs) Reset() {
*x = ValArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValArgs) ProtoMessage() {}
func (x *ValArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValArgs.ProtoReflect.Descriptor instead.
func (*ValArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type MakeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X *NgoloFuzzAny `protobuf:"bytes,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MakeArgs) Reset() {
*x = MakeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MakeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MakeArgs) ProtoMessage() {}
func (x *MakeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MakeArgs.ProtoReflect.Descriptor instead.
func (*MakeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *MakeArgs) GetX() *NgoloFuzzAny {
if x != nil {
return x.X
}
return nil
}
type BitLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BitLenArgs) Reset() {
*x = BitLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BitLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BitLenArgs) ProtoMessage() {}
func (x *BitLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BitLenArgs.ProtoReflect.Descriptor instead.
func (*BitLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
type SignArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SignArgs) Reset() {
*x = SignArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SignArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SignArgs) ProtoMessage() {}
func (x *SignArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SignArgs.ProtoReflect.Descriptor instead.
func (*SignArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
type BytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BytesArgs) Reset() {
*x = BytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BytesArgs) ProtoMessage() {}
func (x *BytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BytesArgs.ProtoReflect.Descriptor instead.
func (*BytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
type MakeFromBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Bytes []byte `protobuf:"bytes,1,opt,name=bytes,proto3" json:"bytes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MakeFromBytesArgs) Reset() {
*x = MakeFromBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MakeFromBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MakeFromBytesArgs) ProtoMessage() {}
func (x *MakeFromBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MakeFromBytesArgs.ProtoReflect.Descriptor instead.
func (*MakeFromBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *MakeFromBytesArgs) GetBytes() []byte {
if x != nil {
return x.Bytes
}
return nil
}
type NumArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NumArgs) Reset() {
*x = NumArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NumArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NumArgs) ProtoMessage() {}
func (x *NumArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NumArgs.ProtoReflect.Descriptor instead.
func (*NumArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
type DenomArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DenomArgs) Reset() {
*x = DenomArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DenomArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DenomArgs) ProtoMessage() {}
func (x *DenomArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DenomArgs.ProtoReflect.Descriptor instead.
func (*DenomArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
type MakeImagArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MakeImagArgs) Reset() {
*x = MakeImagArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MakeImagArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MakeImagArgs) ProtoMessage() {}
func (x *MakeImagArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MakeImagArgs.ProtoReflect.Descriptor instead.
func (*MakeImagArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
type RealArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RealArgs) Reset() {
*x = RealArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RealArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RealArgs) ProtoMessage() {}
func (x *RealArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RealArgs.ProtoReflect.Descriptor instead.
func (*RealArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
type ImagArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ImagArgs) Reset() {
*x = ImagArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ImagArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ImagArgs) ProtoMessage() {}
func (x *ImagArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ImagArgs.ProtoReflect.Descriptor instead.
func (*ImagArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
type ToIntArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToIntArgs) Reset() {
*x = ToIntArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToIntArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToIntArgs) ProtoMessage() {}
func (x *ToIntArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToIntArgs.ProtoReflect.Descriptor instead.
func (*ToIntArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
type ToFloatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToFloatArgs) Reset() {
*x = ToFloatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToFloatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToFloatArgs) ProtoMessage() {}
func (x *ToFloatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToFloatArgs.ProtoReflect.Descriptor instead.
func (*ToFloatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
type ToComplexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToComplexArgs) Reset() {
*x = ToComplexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToComplexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToComplexArgs) ProtoMessage() {}
func (x *ToComplexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToComplexArgs.ProtoReflect.Descriptor instead.
func (*ToComplexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_KindNgdotString
// *NgoloFuzzOne_MakeUnknown
// *NgoloFuzzOne_MakeBool
// *NgoloFuzzOne_MakeString
// *NgoloFuzzOne_MakeInt64
// *NgoloFuzzOne_MakeUint64
// *NgoloFuzzOne_MakeFloat64
// *NgoloFuzzOne_BoolVal
// *NgoloFuzzOne_StringVal
// *NgoloFuzzOne_Int64Val
// *NgoloFuzzOne_Uint64Val
// *NgoloFuzzOne_Float32Val
// *NgoloFuzzOne_Float64Val
// *NgoloFuzzOne_Val
// *NgoloFuzzOne_Make
// *NgoloFuzzOne_BitLen
// *NgoloFuzzOne_Sign
// *NgoloFuzzOne_Bytes
// *NgoloFuzzOne_MakeFromBytes
// *NgoloFuzzOne_Num
// *NgoloFuzzOne_Denom
// *NgoloFuzzOne_MakeImag
// *NgoloFuzzOne_Real
// *NgoloFuzzOne_Imag
// *NgoloFuzzOne_ToInt
// *NgoloFuzzOne_ToFloat
// *NgoloFuzzOne_ToComplex
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetKindNgdotString() *KindNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_KindNgdotString); ok {
return x.KindNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetMakeUnknown() *MakeUnknownArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MakeUnknown); ok {
return x.MakeUnknown
}
}
return nil
}
func (x *NgoloFuzzOne) GetMakeBool() *MakeBoolArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MakeBool); ok {
return x.MakeBool
}
}
return nil
}
func (x *NgoloFuzzOne) GetMakeString() *MakeStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MakeString); ok {
return x.MakeString
}
}
return nil
}
func (x *NgoloFuzzOne) GetMakeInt64() *MakeInt64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MakeInt64); ok {
return x.MakeInt64
}
}
return nil
}
func (x *NgoloFuzzOne) GetMakeUint64() *MakeUint64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MakeUint64); ok {
return x.MakeUint64
}
}
return nil
}
func (x *NgoloFuzzOne) GetMakeFloat64() *MakeFloat64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MakeFloat64); ok {
return x.MakeFloat64
}
}
return nil
}
func (x *NgoloFuzzOne) GetBoolVal() *BoolValArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_BoolVal); ok {
return x.BoolVal
}
}
return nil
}
func (x *NgoloFuzzOne) GetStringVal() *StringValArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_StringVal); ok {
return x.StringVal
}
}
return nil
}
func (x *NgoloFuzzOne) GetInt64Val() *Int64ValArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Int64Val); ok {
return x.Int64Val
}
}
return nil
}
func (x *NgoloFuzzOne) GetUint64Val() *Uint64ValArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Uint64Val); ok {
return x.Uint64Val
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloat32Val() *Float32ValArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Float32Val); ok {
return x.Float32Val
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloat64Val() *Float64ValArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Float64Val); ok {
return x.Float64Val
}
}
return nil
}
func (x *NgoloFuzzOne) GetVal() *ValArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Val); ok {
return x.Val
}
}
return nil
}
func (x *NgoloFuzzOne) GetMake() *MakeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Make); ok {
return x.Make
}
}
return nil
}
func (x *NgoloFuzzOne) GetBitLen() *BitLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_BitLen); ok {
return x.BitLen
}
}
return nil
}
func (x *NgoloFuzzOne) GetSign() *SignArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sign); ok {
return x.Sign
}
}
return nil
}
func (x *NgoloFuzzOne) GetBytes() *BytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Bytes); ok {
return x.Bytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetMakeFromBytes() *MakeFromBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MakeFromBytes); ok {
return x.MakeFromBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetNum() *NumArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Num); ok {
return x.Num
}
}
return nil
}
func (x *NgoloFuzzOne) GetDenom() *DenomArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Denom); ok {
return x.Denom
}
}
return nil
}
func (x *NgoloFuzzOne) GetMakeImag() *MakeImagArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MakeImag); ok {
return x.MakeImag
}
}
return nil
}
func (x *NgoloFuzzOne) GetReal() *RealArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Real); ok {
return x.Real
}
}
return nil
}
func (x *NgoloFuzzOne) GetImag() *ImagArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Imag); ok {
return x.Imag
}
}
return nil
}
func (x *NgoloFuzzOne) GetToInt() *ToIntArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ToInt); ok {
return x.ToInt
}
}
return nil
}
func (x *NgoloFuzzOne) GetToFloat() *ToFloatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ToFloat); ok {
return x.ToFloat
}
}
return nil
}
func (x *NgoloFuzzOne) GetToComplex() *ToComplexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ToComplex); ok {
return x.ToComplex
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_KindNgdotString struct {
KindNgdotString *KindNgdotStringArgs `protobuf:"bytes,1,opt,name=KindNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_MakeUnknown struct {
MakeUnknown *MakeUnknownArgs `protobuf:"bytes,2,opt,name=MakeUnknown,proto3,oneof"`
}
type NgoloFuzzOne_MakeBool struct {
MakeBool *MakeBoolArgs `protobuf:"bytes,3,opt,name=MakeBool,proto3,oneof"`
}
type NgoloFuzzOne_MakeString struct {
MakeString *MakeStringArgs `protobuf:"bytes,4,opt,name=MakeString,proto3,oneof"`
}
type NgoloFuzzOne_MakeInt64 struct {
MakeInt64 *MakeInt64Args `protobuf:"bytes,5,opt,name=MakeInt64,proto3,oneof"`
}
type NgoloFuzzOne_MakeUint64 struct {
MakeUint64 *MakeUint64Args `protobuf:"bytes,6,opt,name=MakeUint64,proto3,oneof"`
}
type NgoloFuzzOne_MakeFloat64 struct {
MakeFloat64 *MakeFloat64Args `protobuf:"bytes,7,opt,name=MakeFloat64,proto3,oneof"`
}
type NgoloFuzzOne_BoolVal struct {
BoolVal *BoolValArgs `protobuf:"bytes,8,opt,name=BoolVal,proto3,oneof"`
}
type NgoloFuzzOne_StringVal struct {
StringVal *StringValArgs `protobuf:"bytes,9,opt,name=StringVal,proto3,oneof"`
}
type NgoloFuzzOne_Int64Val struct {
Int64Val *Int64ValArgs `protobuf:"bytes,10,opt,name=Int64Val,proto3,oneof"`
}
type NgoloFuzzOne_Uint64Val struct {
Uint64Val *Uint64ValArgs `protobuf:"bytes,11,opt,name=Uint64Val,proto3,oneof"`
}
type NgoloFuzzOne_Float32Val struct {
Float32Val *Float32ValArgs `protobuf:"bytes,12,opt,name=Float32Val,proto3,oneof"`
}
type NgoloFuzzOne_Float64Val struct {
Float64Val *Float64ValArgs `protobuf:"bytes,13,opt,name=Float64Val,proto3,oneof"`
}
type NgoloFuzzOne_Val struct {
Val *ValArgs `protobuf:"bytes,14,opt,name=Val,proto3,oneof"`
}
type NgoloFuzzOne_Make struct {
Make *MakeArgs `protobuf:"bytes,15,opt,name=Make,proto3,oneof"`
}
type NgoloFuzzOne_BitLen struct {
BitLen *BitLenArgs `protobuf:"bytes,16,opt,name=BitLen,proto3,oneof"`
}
type NgoloFuzzOne_Sign struct {
Sign *SignArgs `protobuf:"bytes,17,opt,name=Sign,proto3,oneof"`
}
type NgoloFuzzOne_Bytes struct {
Bytes *BytesArgs `protobuf:"bytes,18,opt,name=Bytes,proto3,oneof"`
}
type NgoloFuzzOne_MakeFromBytes struct {
MakeFromBytes *MakeFromBytesArgs `protobuf:"bytes,19,opt,name=MakeFromBytes,proto3,oneof"`
}
type NgoloFuzzOne_Num struct {
Num *NumArgs `protobuf:"bytes,20,opt,name=Num,proto3,oneof"`
}
type NgoloFuzzOne_Denom struct {
Denom *DenomArgs `protobuf:"bytes,21,opt,name=Denom,proto3,oneof"`
}
type NgoloFuzzOne_MakeImag struct {
MakeImag *MakeImagArgs `protobuf:"bytes,22,opt,name=MakeImag,proto3,oneof"`
}
type NgoloFuzzOne_Real struct {
Real *RealArgs `protobuf:"bytes,23,opt,name=Real,proto3,oneof"`
}
type NgoloFuzzOne_Imag struct {
Imag *ImagArgs `protobuf:"bytes,24,opt,name=Imag,proto3,oneof"`
}
type NgoloFuzzOne_ToInt struct {
ToInt *ToIntArgs `protobuf:"bytes,25,opt,name=ToInt,proto3,oneof"`
}
type NgoloFuzzOne_ToFloat struct {
ToFloat *ToFloatArgs `protobuf:"bytes,26,opt,name=ToFloat,proto3,oneof"`
}
type NgoloFuzzOne_ToComplex struct {
ToComplex *ToComplexArgs `protobuf:"bytes,27,opt,name=ToComplex,proto3,oneof"`
}
func (*NgoloFuzzOne_KindNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MakeUnknown) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MakeBool) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MakeString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MakeInt64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MakeUint64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MakeFloat64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_BoolVal) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_StringVal) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Int64Val) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Uint64Val) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Float32Val) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Float64Val) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Val) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Make) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_BitLen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sign) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Bytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MakeFromBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Num) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Denom) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MakeImag) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Real) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Imag) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ToInt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ToFloat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ToComplex) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"8\n" +
"\x13KindNgdotStringArgs\x12!\n" +
"\x01i\x18\x01 \x01(\x0e2\x13.ngolofuzz.KindEnumR\x01i\"\x11\n" +
"\x0fMakeUnknownArgs\"\x1c\n" +
"\fMakeBoolArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\bR\x01b\"\x1e\n" +
"\x0eMakeStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1d\n" +
"\rMakeInt64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\"\x1e\n" +
"\x0eMakeUint64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\"\x1f\n" +
"\x0fMakeFloat64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x01R\x01x\"\r\n" +
"\vBoolValArgs\"\x0f\n" +
"\rStringValArgs\"\x0e\n" +
"\fInt64ValArgs\"\x0f\n" +
"\rUint64ValArgs\"\x10\n" +
"\x0eFloat32ValArgs\"\x10\n" +
"\x0eFloat64ValArgs\"\t\n" +
"\aValArgs\"1\n" +
"\bMakeArgs\x12%\n" +
"\x01x\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01x\"\f\n" +
"\n" +
"BitLenArgs\"\n" +
"\n" +
"\bSignArgs\"\v\n" +
"\tBytesArgs\")\n" +
"\x11MakeFromBytesArgs\x12\x14\n" +
"\x05bytes\x18\x01 \x01(\fR\x05bytes\"\t\n" +
"\aNumArgs\"\v\n" +
"\tDenomArgs\"\x0e\n" +
"\fMakeImagArgs\"\n" +
"\n" +
"\bRealArgs\"\n" +
"\n" +
"\bImagArgs\"\v\n" +
"\tToIntArgs\"\r\n" +
"\vToFloatArgs\"\x0f\n" +
"\rToComplexArgs\"\xc8\v\n" +
"\fNgoloFuzzOne\x12J\n" +
"\x0fKindNgdotString\x18\x01 \x01(\v2\x1e.ngolofuzz.KindNgdotStringArgsH\x00R\x0fKindNgdotString\x12>\n" +
"\vMakeUnknown\x18\x02 \x01(\v2\x1a.ngolofuzz.MakeUnknownArgsH\x00R\vMakeUnknown\x125\n" +
"\bMakeBool\x18\x03 \x01(\v2\x17.ngolofuzz.MakeBoolArgsH\x00R\bMakeBool\x12;\n" +
"\n" +
"MakeString\x18\x04 \x01(\v2\x19.ngolofuzz.MakeStringArgsH\x00R\n" +
"MakeString\x128\n" +
"\tMakeInt64\x18\x05 \x01(\v2\x18.ngolofuzz.MakeInt64ArgsH\x00R\tMakeInt64\x12;\n" +
"\n" +
"MakeUint64\x18\x06 \x01(\v2\x19.ngolofuzz.MakeUint64ArgsH\x00R\n" +
"MakeUint64\x12>\n" +
"\vMakeFloat64\x18\a \x01(\v2\x1a.ngolofuzz.MakeFloat64ArgsH\x00R\vMakeFloat64\x122\n" +
"\aBoolVal\x18\b \x01(\v2\x16.ngolofuzz.BoolValArgsH\x00R\aBoolVal\x128\n" +
"\tStringVal\x18\t \x01(\v2\x18.ngolofuzz.StringValArgsH\x00R\tStringVal\x125\n" +
"\bInt64Val\x18\n" +
" \x01(\v2\x17.ngolofuzz.Int64ValArgsH\x00R\bInt64Val\x128\n" +
"\tUint64Val\x18\v \x01(\v2\x18.ngolofuzz.Uint64ValArgsH\x00R\tUint64Val\x12;\n" +
"\n" +
"Float32Val\x18\f \x01(\v2\x19.ngolofuzz.Float32ValArgsH\x00R\n" +
"Float32Val\x12;\n" +
"\n" +
"Float64Val\x18\r \x01(\v2\x19.ngolofuzz.Float64ValArgsH\x00R\n" +
"Float64Val\x12&\n" +
"\x03Val\x18\x0e \x01(\v2\x12.ngolofuzz.ValArgsH\x00R\x03Val\x12)\n" +
"\x04Make\x18\x0f \x01(\v2\x13.ngolofuzz.MakeArgsH\x00R\x04Make\x12/\n" +
"\x06BitLen\x18\x10 \x01(\v2\x15.ngolofuzz.BitLenArgsH\x00R\x06BitLen\x12)\n" +
"\x04Sign\x18\x11 \x01(\v2\x13.ngolofuzz.SignArgsH\x00R\x04Sign\x12,\n" +
"\x05Bytes\x18\x12 \x01(\v2\x14.ngolofuzz.BytesArgsH\x00R\x05Bytes\x12D\n" +
"\rMakeFromBytes\x18\x13 \x01(\v2\x1c.ngolofuzz.MakeFromBytesArgsH\x00R\rMakeFromBytes\x12&\n" +
"\x03Num\x18\x14 \x01(\v2\x12.ngolofuzz.NumArgsH\x00R\x03Num\x12,\n" +
"\x05Denom\x18\x15 \x01(\v2\x14.ngolofuzz.DenomArgsH\x00R\x05Denom\x125\n" +
"\bMakeImag\x18\x16 \x01(\v2\x17.ngolofuzz.MakeImagArgsH\x00R\bMakeImag\x12)\n" +
"\x04Real\x18\x17 \x01(\v2\x13.ngolofuzz.RealArgsH\x00R\x04Real\x12)\n" +
"\x04Imag\x18\x18 \x01(\v2\x13.ngolofuzz.ImagArgsH\x00R\x04Imag\x12,\n" +
"\x05ToInt\x18\x19 \x01(\v2\x14.ngolofuzz.ToIntArgsH\x00R\x05ToInt\x122\n" +
"\aToFloat\x18\x1a \x01(\v2\x16.ngolofuzz.ToFloatArgsH\x00R\aToFloat\x128\n" +
"\tToComplex\x18\x1b \x01(\v2\x18.ngolofuzz.ToComplexArgsH\x00R\tToComplexB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04list*N\n" +
"\bKindEnum\x12\v\n" +
"\aUnknown\x10\x00\x12\b\n" +
"\x04Bool\x10\x01\x12\n" +
"\n" +
"\x06String\x10\x02\x12\a\n" +
"\x03Int\x10\x03\x12\t\n" +
"\x05Float\x10\x04\x12\v\n" +
"\aComplex\x10\x05B\x18Z\x16./;fuzz_ng_go_constantb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 30)
var file_ngolofuzz_proto_goTypes = []any{
(KindEnum)(0), // 0: ngolofuzz.KindEnum
(*KindNgdotStringArgs)(nil), // 1: ngolofuzz.KindNgdotStringArgs
(*MakeUnknownArgs)(nil), // 2: ngolofuzz.MakeUnknownArgs
(*MakeBoolArgs)(nil), // 3: ngolofuzz.MakeBoolArgs
(*MakeStringArgs)(nil), // 4: ngolofuzz.MakeStringArgs
(*MakeInt64Args)(nil), // 5: ngolofuzz.MakeInt64Args
(*MakeUint64Args)(nil), // 6: ngolofuzz.MakeUint64Args
(*MakeFloat64Args)(nil), // 7: ngolofuzz.MakeFloat64Args
(*BoolValArgs)(nil), // 8: ngolofuzz.BoolValArgs
(*StringValArgs)(nil), // 9: ngolofuzz.StringValArgs
(*Int64ValArgs)(nil), // 10: ngolofuzz.Int64ValArgs
(*Uint64ValArgs)(nil), // 11: ngolofuzz.Uint64ValArgs
(*Float32ValArgs)(nil), // 12: ngolofuzz.Float32ValArgs
(*Float64ValArgs)(nil), // 13: ngolofuzz.Float64ValArgs
(*ValArgs)(nil), // 14: ngolofuzz.ValArgs
(*MakeArgs)(nil), // 15: ngolofuzz.MakeArgs
(*BitLenArgs)(nil), // 16: ngolofuzz.BitLenArgs
(*SignArgs)(nil), // 17: ngolofuzz.SignArgs
(*BytesArgs)(nil), // 18: ngolofuzz.BytesArgs
(*MakeFromBytesArgs)(nil), // 19: ngolofuzz.MakeFromBytesArgs
(*NumArgs)(nil), // 20: ngolofuzz.NumArgs
(*DenomArgs)(nil), // 21: ngolofuzz.DenomArgs
(*MakeImagArgs)(nil), // 22: ngolofuzz.MakeImagArgs
(*RealArgs)(nil), // 23: ngolofuzz.RealArgs
(*ImagArgs)(nil), // 24: ngolofuzz.ImagArgs
(*ToIntArgs)(nil), // 25: ngolofuzz.ToIntArgs
(*ToFloatArgs)(nil), // 26: ngolofuzz.ToFloatArgs
(*ToComplexArgs)(nil), // 27: ngolofuzz.ToComplexArgs
(*NgoloFuzzOne)(nil), // 28: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 29: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 30: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.KindNgdotStringArgs.i:type_name -> ngolofuzz.KindEnum
29, // 1: ngolofuzz.MakeArgs.x:type_name -> ngolofuzz.NgoloFuzzAny
1, // 2: ngolofuzz.NgoloFuzzOne.KindNgdotString:type_name -> ngolofuzz.KindNgdotStringArgs
2, // 3: ngolofuzz.NgoloFuzzOne.MakeUnknown:type_name -> ngolofuzz.MakeUnknownArgs
3, // 4: ngolofuzz.NgoloFuzzOne.MakeBool:type_name -> ngolofuzz.MakeBoolArgs
4, // 5: ngolofuzz.NgoloFuzzOne.MakeString:type_name -> ngolofuzz.MakeStringArgs
5, // 6: ngolofuzz.NgoloFuzzOne.MakeInt64:type_name -> ngolofuzz.MakeInt64Args
6, // 7: ngolofuzz.NgoloFuzzOne.MakeUint64:type_name -> ngolofuzz.MakeUint64Args
7, // 8: ngolofuzz.NgoloFuzzOne.MakeFloat64:type_name -> ngolofuzz.MakeFloat64Args
8, // 9: ngolofuzz.NgoloFuzzOne.BoolVal:type_name -> ngolofuzz.BoolValArgs
9, // 10: ngolofuzz.NgoloFuzzOne.StringVal:type_name -> ngolofuzz.StringValArgs
10, // 11: ngolofuzz.NgoloFuzzOne.Int64Val:type_name -> ngolofuzz.Int64ValArgs
11, // 12: ngolofuzz.NgoloFuzzOne.Uint64Val:type_name -> ngolofuzz.Uint64ValArgs
12, // 13: ngolofuzz.NgoloFuzzOne.Float32Val:type_name -> ngolofuzz.Float32ValArgs
13, // 14: ngolofuzz.NgoloFuzzOne.Float64Val:type_name -> ngolofuzz.Float64ValArgs
14, // 15: ngolofuzz.NgoloFuzzOne.Val:type_name -> ngolofuzz.ValArgs
15, // 16: ngolofuzz.NgoloFuzzOne.Make:type_name -> ngolofuzz.MakeArgs
16, // 17: ngolofuzz.NgoloFuzzOne.BitLen:type_name -> ngolofuzz.BitLenArgs
17, // 18: ngolofuzz.NgoloFuzzOne.Sign:type_name -> ngolofuzz.SignArgs
18, // 19: ngolofuzz.NgoloFuzzOne.Bytes:type_name -> ngolofuzz.BytesArgs
19, // 20: ngolofuzz.NgoloFuzzOne.MakeFromBytes:type_name -> ngolofuzz.MakeFromBytesArgs
20, // 21: ngolofuzz.NgoloFuzzOne.Num:type_name -> ngolofuzz.NumArgs
21, // 22: ngolofuzz.NgoloFuzzOne.Denom:type_name -> ngolofuzz.DenomArgs
22, // 23: ngolofuzz.NgoloFuzzOne.MakeImag:type_name -> ngolofuzz.MakeImagArgs
23, // 24: ngolofuzz.NgoloFuzzOne.Real:type_name -> ngolofuzz.RealArgs
24, // 25: ngolofuzz.NgoloFuzzOne.Imag:type_name -> ngolofuzz.ImagArgs
25, // 26: ngolofuzz.NgoloFuzzOne.ToInt:type_name -> ngolofuzz.ToIntArgs
26, // 27: ngolofuzz.NgoloFuzzOne.ToFloat:type_name -> ngolofuzz.ToFloatArgs
27, // 28: ngolofuzz.NgoloFuzzOne.ToComplex:type_name -> ngolofuzz.ToComplexArgs
28, // 29: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
30, // [30:30] is the sub-list for method output_type
30, // [30:30] is the sub-list for method input_type
30, // [30:30] is the sub-list for extension type_name
30, // [30:30] is the sub-list for extension extendee
0, // [0:30] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[27].OneofWrappers = []any{
(*NgoloFuzzOne_KindNgdotString)(nil),
(*NgoloFuzzOne_MakeUnknown)(nil),
(*NgoloFuzzOne_MakeBool)(nil),
(*NgoloFuzzOne_MakeString)(nil),
(*NgoloFuzzOne_MakeInt64)(nil),
(*NgoloFuzzOne_MakeUint64)(nil),
(*NgoloFuzzOne_MakeFloat64)(nil),
(*NgoloFuzzOne_BoolVal)(nil),
(*NgoloFuzzOne_StringVal)(nil),
(*NgoloFuzzOne_Int64Val)(nil),
(*NgoloFuzzOne_Uint64Val)(nil),
(*NgoloFuzzOne_Float32Val)(nil),
(*NgoloFuzzOne_Float64Val)(nil),
(*NgoloFuzzOne_Val)(nil),
(*NgoloFuzzOne_Make)(nil),
(*NgoloFuzzOne_BitLen)(nil),
(*NgoloFuzzOne_Sign)(nil),
(*NgoloFuzzOne_Bytes)(nil),
(*NgoloFuzzOne_MakeFromBytes)(nil),
(*NgoloFuzzOne_Num)(nil),
(*NgoloFuzzOne_Denom)(nil),
(*NgoloFuzzOne_MakeImag)(nil),
(*NgoloFuzzOne_Real)(nil),
(*NgoloFuzzOne_Imag)(nil),
(*NgoloFuzzOne_ToInt)(nil),
(*NgoloFuzzOne_ToFloat)(nil),
(*NgoloFuzzOne_ToComplex)(nil),
}
file_ngolofuzz_proto_msgTypes[28].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 1,
NumMessages: 30,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
EnumInfos: file_ngolofuzz_proto_enumTypes,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_go_doc
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"go/doc"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func ModeNewFromFuzz(p ModeEnum) doc.Mode{
switch p {
case 1:
return doc.AllMethods
case 2:
return doc.PreserveAST
}
return doc.AllDecls
}
func ConvertModeNewFromFuzz(a []ModeEnum) []doc.Mode{
r := make([]doc.Mode, len(a))
for i := range a {
r[i] = ModeNewFromFuzz(a[i])
}
return r
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var PackageResults []*doc.Package
PackageResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ToHTML:
arg0 := bytes.NewBuffer(a.ToHTML.W)
doc.ToHTML(arg0, a.ToHTML.Text, a.ToHTML.Words)
case *NgoloFuzzOne_ToText:
arg0 := bytes.NewBuffer(a.ToText.W)
arg4 := int(a.ToText.Width)
doc.ToText(arg0, a.ToText.Text, a.ToText.Prefix, a.ToText.CodePrefix, arg4)
case *NgoloFuzzOne_PackageNgdotParser:
if len(PackageResults) == 0 {
continue
}
arg0 := PackageResults[PackageResultsIndex]
PackageResultsIndex = (PackageResultsIndex + 1) % len(PackageResults)
arg0.Parser()
case *NgoloFuzzOne_PackageNgdotPrinter:
if len(PackageResults) == 0 {
continue
}
arg0 := PackageResults[PackageResultsIndex]
PackageResultsIndex = (PackageResultsIndex + 1) % len(PackageResults)
arg0.Printer()
case *NgoloFuzzOne_PackageNgdotHTML:
if len(PackageResults) == 0 {
continue
}
arg0 := PackageResults[PackageResultsIndex]
PackageResultsIndex = (PackageResultsIndex + 1) % len(PackageResults)
arg0.HTML(a.PackageNgdotHTML.Text)
case *NgoloFuzzOne_PackageNgdotMarkdown:
if len(PackageResults) == 0 {
continue
}
arg0 := PackageResults[PackageResultsIndex]
PackageResultsIndex = (PackageResultsIndex + 1) % len(PackageResults)
arg0.Markdown(a.PackageNgdotMarkdown.Text)
case *NgoloFuzzOne_PackageNgdotText:
if len(PackageResults) == 0 {
continue
}
arg0 := PackageResults[PackageResultsIndex]
PackageResultsIndex = (PackageResultsIndex + 1) % len(PackageResults)
arg0.Text(a.PackageNgdotText.Text)
case *NgoloFuzzOne_IsPredeclared:
doc.IsPredeclared(a.IsPredeclared.S)
case *NgoloFuzzOne_Synopsis:
doc.Synopsis(a.Synopsis.Text)
case *NgoloFuzzOne_PackageNgdotSynopsis:
if len(PackageResults) == 0 {
continue
}
arg0 := PackageResults[PackageResultsIndex]
PackageResultsIndex = (PackageResultsIndex + 1) % len(PackageResults)
arg0.Synopsis(a.PackageNgdotSynopsis.Text)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
PackageNb := 0
PackageResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ToHTML:
w.WriteString(fmt.Sprintf("doc.ToHTML(bytes.NewBuffer(%#+v), %#+v, %#+v)\n", a.ToHTML.W, a.ToHTML.Text, a.ToHTML.Words))
case *NgoloFuzzOne_ToText:
w.WriteString(fmt.Sprintf("doc.ToText(bytes.NewBuffer(%#+v), %#+v, %#+v, %#+v, int(%#+v))\n", a.ToText.W, a.ToText.Text, a.ToText.Prefix, a.ToText.CodePrefix, a.ToText.Width))
case *NgoloFuzzOne_PackageNgdotParser:
if PackageNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Package%d.Parser()\n", PackageResultsIndex))
PackageResultsIndex = (PackageResultsIndex + 1) % PackageNb
case *NgoloFuzzOne_PackageNgdotPrinter:
if PackageNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Package%d.Printer()\n", PackageResultsIndex))
PackageResultsIndex = (PackageResultsIndex + 1) % PackageNb
case *NgoloFuzzOne_PackageNgdotHTML:
if PackageNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Package%d.HTML(%#+v)\n", PackageResultsIndex, a.PackageNgdotHTML.Text))
PackageResultsIndex = (PackageResultsIndex + 1) % PackageNb
case *NgoloFuzzOne_PackageNgdotMarkdown:
if PackageNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Package%d.Markdown(%#+v)\n", PackageResultsIndex, a.PackageNgdotMarkdown.Text))
PackageResultsIndex = (PackageResultsIndex + 1) % PackageNb
case *NgoloFuzzOne_PackageNgdotText:
if PackageNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Package%d.Text(%#+v)\n", PackageResultsIndex, a.PackageNgdotText.Text))
PackageResultsIndex = (PackageResultsIndex + 1) % PackageNb
case *NgoloFuzzOne_IsPredeclared:
w.WriteString(fmt.Sprintf("doc.IsPredeclared(%#+v)\n", a.IsPredeclared.S))
case *NgoloFuzzOne_Synopsis:
w.WriteString(fmt.Sprintf("doc.Synopsis(%#+v)\n", a.Synopsis.Text))
case *NgoloFuzzOne_PackageNgdotSynopsis:
if PackageNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Package%d.Synopsis(%#+v)\n", PackageResultsIndex, a.PackageNgdotSynopsis.Text))
PackageResultsIndex = (PackageResultsIndex + 1) % PackageNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_go_doc
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ModeEnum int32
const (
ModeEnum_AllDecls ModeEnum = 0
ModeEnum_AllMethods ModeEnum = 1
ModeEnum_PreserveAST ModeEnum = 2
)
// Enum value maps for ModeEnum.
var (
ModeEnum_name = map[int32]string{
0: "AllDecls",
1: "AllMethods",
2: "PreserveAST",
}
ModeEnum_value = map[string]int32{
"AllDecls": 0,
"AllMethods": 1,
"PreserveAST": 2,
}
)
func (x ModeEnum) Enum() *ModeEnum {
p := new(ModeEnum)
*p = x
return p
}
func (x ModeEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (ModeEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[0].Descriptor()
}
func (ModeEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[0]
}
func (x ModeEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use ModeEnum.Descriptor instead.
func (ModeEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type ToHTMLArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
Text string `protobuf:"bytes,2,opt,name=text,proto3" json:"text,omitempty"`
Words map[string]string `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToHTMLArgs) Reset() {
*x = ToHTMLArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToHTMLArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToHTMLArgs) ProtoMessage() {}
func (x *ToHTMLArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToHTMLArgs.ProtoReflect.Descriptor instead.
func (*ToHTMLArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *ToHTMLArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *ToHTMLArgs) GetText() string {
if x != nil {
return x.Text
}
return ""
}
func (x *ToHTMLArgs) GetWords() map[string]string {
if x != nil {
return x.Words
}
return nil
}
type ToTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
Text string `protobuf:"bytes,2,opt,name=text,proto3" json:"text,omitempty"`
Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"`
CodePrefix string `protobuf:"bytes,4,opt,name=codePrefix,proto3" json:"codePrefix,omitempty"`
Width int64 `protobuf:"varint,5,opt,name=width,proto3" json:"width,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToTextArgs) Reset() {
*x = ToTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToTextArgs) ProtoMessage() {}
func (x *ToTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToTextArgs.ProtoReflect.Descriptor instead.
func (*ToTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *ToTextArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *ToTextArgs) GetText() string {
if x != nil {
return x.Text
}
return ""
}
func (x *ToTextArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
func (x *ToTextArgs) GetCodePrefix() string {
if x != nil {
return x.CodePrefix
}
return ""
}
func (x *ToTextArgs) GetWidth() int64 {
if x != nil {
return x.Width
}
return 0
}
type PackageNgdotParserArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PackageNgdotParserArgs) Reset() {
*x = PackageNgdotParserArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PackageNgdotParserArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PackageNgdotParserArgs) ProtoMessage() {}
func (x *PackageNgdotParserArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PackageNgdotParserArgs.ProtoReflect.Descriptor instead.
func (*PackageNgdotParserArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type PackageNgdotPrinterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PackageNgdotPrinterArgs) Reset() {
*x = PackageNgdotPrinterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PackageNgdotPrinterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PackageNgdotPrinterArgs) ProtoMessage() {}
func (x *PackageNgdotPrinterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PackageNgdotPrinterArgs.ProtoReflect.Descriptor instead.
func (*PackageNgdotPrinterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type PackageNgdotHTMLArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PackageNgdotHTMLArgs) Reset() {
*x = PackageNgdotHTMLArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PackageNgdotHTMLArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PackageNgdotHTMLArgs) ProtoMessage() {}
func (x *PackageNgdotHTMLArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PackageNgdotHTMLArgs.ProtoReflect.Descriptor instead.
func (*PackageNgdotHTMLArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *PackageNgdotHTMLArgs) GetText() string {
if x != nil {
return x.Text
}
return ""
}
type PackageNgdotMarkdownArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PackageNgdotMarkdownArgs) Reset() {
*x = PackageNgdotMarkdownArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PackageNgdotMarkdownArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PackageNgdotMarkdownArgs) ProtoMessage() {}
func (x *PackageNgdotMarkdownArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PackageNgdotMarkdownArgs.ProtoReflect.Descriptor instead.
func (*PackageNgdotMarkdownArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *PackageNgdotMarkdownArgs) GetText() string {
if x != nil {
return x.Text
}
return ""
}
type PackageNgdotTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PackageNgdotTextArgs) Reset() {
*x = PackageNgdotTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PackageNgdotTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PackageNgdotTextArgs) ProtoMessage() {}
func (x *PackageNgdotTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PackageNgdotTextArgs.ProtoReflect.Descriptor instead.
func (*PackageNgdotTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *PackageNgdotTextArgs) GetText() string {
if x != nil {
return x.Text
}
return ""
}
type IsPredeclaredArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsPredeclaredArgs) Reset() {
*x = IsPredeclaredArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsPredeclaredArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsPredeclaredArgs) ProtoMessage() {}
func (x *IsPredeclaredArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsPredeclaredArgs.ProtoReflect.Descriptor instead.
func (*IsPredeclaredArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *IsPredeclaredArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type SynopsisArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SynopsisArgs) Reset() {
*x = SynopsisArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SynopsisArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SynopsisArgs) ProtoMessage() {}
func (x *SynopsisArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SynopsisArgs.ProtoReflect.Descriptor instead.
func (*SynopsisArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *SynopsisArgs) GetText() string {
if x != nil {
return x.Text
}
return ""
}
type PackageNgdotSynopsisArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PackageNgdotSynopsisArgs) Reset() {
*x = PackageNgdotSynopsisArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PackageNgdotSynopsisArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PackageNgdotSynopsisArgs) ProtoMessage() {}
func (x *PackageNgdotSynopsisArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PackageNgdotSynopsisArgs.ProtoReflect.Descriptor instead.
func (*PackageNgdotSynopsisArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *PackageNgdotSynopsisArgs) GetText() string {
if x != nil {
return x.Text
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_ToHTML
// *NgoloFuzzOne_ToText
// *NgoloFuzzOne_PackageNgdotParser
// *NgoloFuzzOne_PackageNgdotPrinter
// *NgoloFuzzOne_PackageNgdotHTML
// *NgoloFuzzOne_PackageNgdotMarkdown
// *NgoloFuzzOne_PackageNgdotText
// *NgoloFuzzOne_IsPredeclared
// *NgoloFuzzOne_Synopsis
// *NgoloFuzzOne_PackageNgdotSynopsis
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetToHTML() *ToHTMLArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ToHTML); ok {
return x.ToHTML
}
}
return nil
}
func (x *NgoloFuzzOne) GetToText() *ToTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ToText); ok {
return x.ToText
}
}
return nil
}
func (x *NgoloFuzzOne) GetPackageNgdotParser() *PackageNgdotParserArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PackageNgdotParser); ok {
return x.PackageNgdotParser
}
}
return nil
}
func (x *NgoloFuzzOne) GetPackageNgdotPrinter() *PackageNgdotPrinterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PackageNgdotPrinter); ok {
return x.PackageNgdotPrinter
}
}
return nil
}
func (x *NgoloFuzzOne) GetPackageNgdotHTML() *PackageNgdotHTMLArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PackageNgdotHTML); ok {
return x.PackageNgdotHTML
}
}
return nil
}
func (x *NgoloFuzzOne) GetPackageNgdotMarkdown() *PackageNgdotMarkdownArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PackageNgdotMarkdown); ok {
return x.PackageNgdotMarkdown
}
}
return nil
}
func (x *NgoloFuzzOne) GetPackageNgdotText() *PackageNgdotTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PackageNgdotText); ok {
return x.PackageNgdotText
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsPredeclared() *IsPredeclaredArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsPredeclared); ok {
return x.IsPredeclared
}
}
return nil
}
func (x *NgoloFuzzOne) GetSynopsis() *SynopsisArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Synopsis); ok {
return x.Synopsis
}
}
return nil
}
func (x *NgoloFuzzOne) GetPackageNgdotSynopsis() *PackageNgdotSynopsisArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PackageNgdotSynopsis); ok {
return x.PackageNgdotSynopsis
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_ToHTML struct {
ToHTML *ToHTMLArgs `protobuf:"bytes,1,opt,name=ToHTML,proto3,oneof"`
}
type NgoloFuzzOne_ToText struct {
ToText *ToTextArgs `protobuf:"bytes,2,opt,name=ToText,proto3,oneof"`
}
type NgoloFuzzOne_PackageNgdotParser struct {
PackageNgdotParser *PackageNgdotParserArgs `protobuf:"bytes,3,opt,name=PackageNgdotParser,proto3,oneof"`
}
type NgoloFuzzOne_PackageNgdotPrinter struct {
PackageNgdotPrinter *PackageNgdotPrinterArgs `protobuf:"bytes,4,opt,name=PackageNgdotPrinter,proto3,oneof"`
}
type NgoloFuzzOne_PackageNgdotHTML struct {
PackageNgdotHTML *PackageNgdotHTMLArgs `protobuf:"bytes,5,opt,name=PackageNgdotHTML,proto3,oneof"`
}
type NgoloFuzzOne_PackageNgdotMarkdown struct {
PackageNgdotMarkdown *PackageNgdotMarkdownArgs `protobuf:"bytes,6,opt,name=PackageNgdotMarkdown,proto3,oneof"`
}
type NgoloFuzzOne_PackageNgdotText struct {
PackageNgdotText *PackageNgdotTextArgs `protobuf:"bytes,7,opt,name=PackageNgdotText,proto3,oneof"`
}
type NgoloFuzzOne_IsPredeclared struct {
IsPredeclared *IsPredeclaredArgs `protobuf:"bytes,8,opt,name=IsPredeclared,proto3,oneof"`
}
type NgoloFuzzOne_Synopsis struct {
Synopsis *SynopsisArgs `protobuf:"bytes,9,opt,name=Synopsis,proto3,oneof"`
}
type NgoloFuzzOne_PackageNgdotSynopsis struct {
PackageNgdotSynopsis *PackageNgdotSynopsisArgs `protobuf:"bytes,10,opt,name=PackageNgdotSynopsis,proto3,oneof"`
}
func (*NgoloFuzzOne_ToHTML) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ToText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PackageNgdotParser) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PackageNgdotPrinter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PackageNgdotHTML) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PackageNgdotMarkdown) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PackageNgdotText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsPredeclared) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Synopsis) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PackageNgdotSynopsis) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\xa0\x01\n" +
"\n" +
"ToHTMLArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\x12\n" +
"\x04text\x18\x02 \x01(\tR\x04text\x126\n" +
"\x05words\x18\x03 \x03(\v2 .ngolofuzz.ToHTMLArgs.WordsEntryR\x05words\x1a8\n" +
"\n" +
"WordsEntry\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"|\n" +
"\n" +
"ToTextArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\x12\n" +
"\x04text\x18\x02 \x01(\tR\x04text\x12\x16\n" +
"\x06prefix\x18\x03 \x01(\tR\x06prefix\x12\x1e\n" +
"\n" +
"codePrefix\x18\x04 \x01(\tR\n" +
"codePrefix\x12\x14\n" +
"\x05width\x18\x05 \x01(\x03R\x05width\"\x18\n" +
"\x16PackageNgdotParserArgs\"\x19\n" +
"\x17PackageNgdotPrinterArgs\"*\n" +
"\x14PackageNgdotHTMLArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\tR\x04text\".\n" +
"\x18PackageNgdotMarkdownArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\tR\x04text\"*\n" +
"\x14PackageNgdotTextArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\tR\x04text\"!\n" +
"\x11IsPredeclaredArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\"\n" +
"\fSynopsisArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\tR\x04text\".\n" +
"\x18PackageNgdotSynopsisArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\tR\x04text\"\xf6\x05\n" +
"\fNgoloFuzzOne\x12/\n" +
"\x06ToHTML\x18\x01 \x01(\v2\x15.ngolofuzz.ToHTMLArgsH\x00R\x06ToHTML\x12/\n" +
"\x06ToText\x18\x02 \x01(\v2\x15.ngolofuzz.ToTextArgsH\x00R\x06ToText\x12S\n" +
"\x12PackageNgdotParser\x18\x03 \x01(\v2!.ngolofuzz.PackageNgdotParserArgsH\x00R\x12PackageNgdotParser\x12V\n" +
"\x13PackageNgdotPrinter\x18\x04 \x01(\v2\".ngolofuzz.PackageNgdotPrinterArgsH\x00R\x13PackageNgdotPrinter\x12M\n" +
"\x10PackageNgdotHTML\x18\x05 \x01(\v2\x1f.ngolofuzz.PackageNgdotHTMLArgsH\x00R\x10PackageNgdotHTML\x12Y\n" +
"\x14PackageNgdotMarkdown\x18\x06 \x01(\v2#.ngolofuzz.PackageNgdotMarkdownArgsH\x00R\x14PackageNgdotMarkdown\x12M\n" +
"\x10PackageNgdotText\x18\a \x01(\v2\x1f.ngolofuzz.PackageNgdotTextArgsH\x00R\x10PackageNgdotText\x12D\n" +
"\rIsPredeclared\x18\b \x01(\v2\x1c.ngolofuzz.IsPredeclaredArgsH\x00R\rIsPredeclared\x125\n" +
"\bSynopsis\x18\t \x01(\v2\x17.ngolofuzz.SynopsisArgsH\x00R\bSynopsis\x12Y\n" +
"\x14PackageNgdotSynopsis\x18\n" +
" \x01(\v2#.ngolofuzz.PackageNgdotSynopsisArgsH\x00R\x14PackageNgdotSynopsisB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04list*9\n" +
"\bModeEnum\x12\f\n" +
"\bAllDecls\x10\x00\x12\x0e\n" +
"\n" +
"AllMethods\x10\x01\x12\x0f\n" +
"\vPreserveAST\x10\x02B\x13Z\x11./;fuzz_ng_go_docb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 14)
var file_ngolofuzz_proto_goTypes = []any{
(ModeEnum)(0), // 0: ngolofuzz.ModeEnum
(*ToHTMLArgs)(nil), // 1: ngolofuzz.ToHTMLArgs
(*ToTextArgs)(nil), // 2: ngolofuzz.ToTextArgs
(*PackageNgdotParserArgs)(nil), // 3: ngolofuzz.PackageNgdotParserArgs
(*PackageNgdotPrinterArgs)(nil), // 4: ngolofuzz.PackageNgdotPrinterArgs
(*PackageNgdotHTMLArgs)(nil), // 5: ngolofuzz.PackageNgdotHTMLArgs
(*PackageNgdotMarkdownArgs)(nil), // 6: ngolofuzz.PackageNgdotMarkdownArgs
(*PackageNgdotTextArgs)(nil), // 7: ngolofuzz.PackageNgdotTextArgs
(*IsPredeclaredArgs)(nil), // 8: ngolofuzz.IsPredeclaredArgs
(*SynopsisArgs)(nil), // 9: ngolofuzz.SynopsisArgs
(*PackageNgdotSynopsisArgs)(nil), // 10: ngolofuzz.PackageNgdotSynopsisArgs
(*NgoloFuzzOne)(nil), // 11: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 12: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 13: ngolofuzz.NgoloFuzzList
nil, // 14: ngolofuzz.ToHTMLArgs.WordsEntry
}
var file_ngolofuzz_proto_depIdxs = []int32{
14, // 0: ngolofuzz.ToHTMLArgs.words:type_name -> ngolofuzz.ToHTMLArgs.WordsEntry
1, // 1: ngolofuzz.NgoloFuzzOne.ToHTML:type_name -> ngolofuzz.ToHTMLArgs
2, // 2: ngolofuzz.NgoloFuzzOne.ToText:type_name -> ngolofuzz.ToTextArgs
3, // 3: ngolofuzz.NgoloFuzzOne.PackageNgdotParser:type_name -> ngolofuzz.PackageNgdotParserArgs
4, // 4: ngolofuzz.NgoloFuzzOne.PackageNgdotPrinter:type_name -> ngolofuzz.PackageNgdotPrinterArgs
5, // 5: ngolofuzz.NgoloFuzzOne.PackageNgdotHTML:type_name -> ngolofuzz.PackageNgdotHTMLArgs
6, // 6: ngolofuzz.NgoloFuzzOne.PackageNgdotMarkdown:type_name -> ngolofuzz.PackageNgdotMarkdownArgs
7, // 7: ngolofuzz.NgoloFuzzOne.PackageNgdotText:type_name -> ngolofuzz.PackageNgdotTextArgs
8, // 8: ngolofuzz.NgoloFuzzOne.IsPredeclared:type_name -> ngolofuzz.IsPredeclaredArgs
9, // 9: ngolofuzz.NgoloFuzzOne.Synopsis:type_name -> ngolofuzz.SynopsisArgs
10, // 10: ngolofuzz.NgoloFuzzOne.PackageNgdotSynopsis:type_name -> ngolofuzz.PackageNgdotSynopsisArgs
11, // 11: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
12, // [12:12] is the sub-list for method output_type
12, // [12:12] is the sub-list for method input_type
12, // [12:12] is the sub-list for extension type_name
12, // [12:12] is the sub-list for extension extendee
0, // [0:12] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[10].OneofWrappers = []any{
(*NgoloFuzzOne_ToHTML)(nil),
(*NgoloFuzzOne_ToText)(nil),
(*NgoloFuzzOne_PackageNgdotParser)(nil),
(*NgoloFuzzOne_PackageNgdotPrinter)(nil),
(*NgoloFuzzOne_PackageNgdotHTML)(nil),
(*NgoloFuzzOne_PackageNgdotMarkdown)(nil),
(*NgoloFuzzOne_PackageNgdotText)(nil),
(*NgoloFuzzOne_IsPredeclared)(nil),
(*NgoloFuzzOne_Synopsis)(nil),
(*NgoloFuzzOne_PackageNgdotSynopsis)(nil),
}
file_ngolofuzz_proto_msgTypes[11].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 1,
NumMessages: 14,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
EnumInfos: file_ngolofuzz_proto_enumTypes,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_go_doc_comment
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"go/doc/comment"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func ParserNewFromFuzz(p *ParserStruct) *comment.Parser{
if p == nil {
return nil
}
return &comment.Parser{
Words: p.Words,
}
}
func PrinterNewFromFuzz(p *PrinterStruct) *comment.Printer{
if p == nil {
return nil
}
return &comment.Printer{
HeadingLevel: int(p.HeadingLevel),
DocLinkBaseURL: p.DocLinkBaseURL,
TextPrefix: p.TextPrefix,
TextCodePrefix: p.TextCodePrefix,
TextWidth: int(p.TextWidth),
}
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var DocResults []*comment.Doc
DocResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_PrinterNgdotHTML:
arg0 := PrinterNewFromFuzz(a.PrinterNgdotHTML.P)
if arg0 == nil {
continue
}
if len(DocResults) == 0 {
continue
}
arg1 := DocResults[DocResultsIndex]
DocResultsIndex = (DocResultsIndex + 1) % len(DocResults)
arg0.HTML(arg1)
case *NgoloFuzzOne_PrinterNgdotMarkdown:
arg0 := PrinterNewFromFuzz(a.PrinterNgdotMarkdown.P)
if arg0 == nil {
continue
}
if len(DocResults) == 0 {
continue
}
arg1 := DocResults[DocResultsIndex]
DocResultsIndex = (DocResultsIndex + 1) % len(DocResults)
arg0.Markdown(arg1)
case *NgoloFuzzOne_DefaultLookupPackage:
comment.DefaultLookupPackage(a.DefaultLookupPackage.Name)
case *NgoloFuzzOne_ParserNgdotParse:
arg0 := ParserNewFromFuzz(a.ParserNgdotParse.P)
if arg0 == nil {
continue
}
arg0.Parse(a.ParserNgdotParse.Text)
case *NgoloFuzzOne_PrinterNgdotComment:
arg0 := PrinterNewFromFuzz(a.PrinterNgdotComment.P)
if arg0 == nil {
continue
}
if len(DocResults) == 0 {
continue
}
arg1 := DocResults[DocResultsIndex]
DocResultsIndex = (DocResultsIndex + 1) % len(DocResults)
arg0.Comment(arg1)
case *NgoloFuzzOne_PrinterNgdotText:
arg0 := PrinterNewFromFuzz(a.PrinterNgdotText.P)
if arg0 == nil {
continue
}
if len(DocResults) == 0 {
continue
}
arg1 := DocResults[DocResultsIndex]
DocResultsIndex = (DocResultsIndex + 1) % len(DocResults)
arg0.Text(arg1)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
DocNb := 0
DocResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_PrinterNgdotHTML:
if DocNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PrinterNewFromFuzz(%#+v).HTML(Doc%d)\n", a.PrinterNgdotHTML.P, (DocResultsIndex + 0) % DocNb))
DocResultsIndex = (DocResultsIndex + 1) % DocNb
case *NgoloFuzzOne_PrinterNgdotMarkdown:
if DocNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PrinterNewFromFuzz(%#+v).Markdown(Doc%d)\n", a.PrinterNgdotMarkdown.P, (DocResultsIndex + 0) % DocNb))
DocResultsIndex = (DocResultsIndex + 1) % DocNb
case *NgoloFuzzOne_DefaultLookupPackage:
w.WriteString(fmt.Sprintf("comment.DefaultLookupPackage(%#+v)\n", a.DefaultLookupPackage.Name))
case *NgoloFuzzOne_ParserNgdotParse:
w.WriteString(fmt.Sprintf("ParserNewFromFuzz(%#+v).Parse(%#+v)\n", a.ParserNgdotParse.P, a.ParserNgdotParse.Text))
case *NgoloFuzzOne_PrinterNgdotComment:
if DocNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PrinterNewFromFuzz(%#+v).Comment(Doc%d)\n", a.PrinterNgdotComment.P, (DocResultsIndex + 0) % DocNb))
DocResultsIndex = (DocResultsIndex + 1) % DocNb
case *NgoloFuzzOne_PrinterNgdotText:
if DocNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PrinterNewFromFuzz(%#+v).Text(Doc%d)\n", a.PrinterNgdotText.P, (DocResultsIndex + 0) % DocNb))
DocResultsIndex = (DocResultsIndex + 1) % DocNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_go_doc_comment
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ParserStruct struct {
state protoimpl.MessageState `protogen:"open.v1"`
Words map[string]string `protobuf:"bytes,1,rep,name=Words,proto3" json:"Words,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParserStruct) Reset() {
*x = ParserStruct{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParserStruct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParserStruct) ProtoMessage() {}
func (x *ParserStruct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParserStruct.ProtoReflect.Descriptor instead.
func (*ParserStruct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *ParserStruct) GetWords() map[string]string {
if x != nil {
return x.Words
}
return nil
}
type PrinterStruct struct {
state protoimpl.MessageState `protogen:"open.v1"`
HeadingLevel int64 `protobuf:"varint,1,opt,name=HeadingLevel,proto3" json:"HeadingLevel,omitempty"`
DocLinkBaseURL string `protobuf:"bytes,2,opt,name=DocLinkBaseURL,proto3" json:"DocLinkBaseURL,omitempty"`
TextPrefix string `protobuf:"bytes,3,opt,name=TextPrefix,proto3" json:"TextPrefix,omitempty"`
TextCodePrefix string `protobuf:"bytes,4,opt,name=TextCodePrefix,proto3" json:"TextCodePrefix,omitempty"`
TextWidth int64 `protobuf:"varint,5,opt,name=TextWidth,proto3" json:"TextWidth,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrinterStruct) Reset() {
*x = PrinterStruct{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrinterStruct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrinterStruct) ProtoMessage() {}
func (x *PrinterStruct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrinterStruct.ProtoReflect.Descriptor instead.
func (*PrinterStruct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *PrinterStruct) GetHeadingLevel() int64 {
if x != nil {
return x.HeadingLevel
}
return 0
}
func (x *PrinterStruct) GetDocLinkBaseURL() string {
if x != nil {
return x.DocLinkBaseURL
}
return ""
}
func (x *PrinterStruct) GetTextPrefix() string {
if x != nil {
return x.TextPrefix
}
return ""
}
func (x *PrinterStruct) GetTextCodePrefix() string {
if x != nil {
return x.TextCodePrefix
}
return ""
}
func (x *PrinterStruct) GetTextWidth() int64 {
if x != nil {
return x.TextWidth
}
return 0
}
type PrinterNgdotHTMLArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P *PrinterStruct `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrinterNgdotHTMLArgs) Reset() {
*x = PrinterNgdotHTMLArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrinterNgdotHTMLArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrinterNgdotHTMLArgs) ProtoMessage() {}
func (x *PrinterNgdotHTMLArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrinterNgdotHTMLArgs.ProtoReflect.Descriptor instead.
func (*PrinterNgdotHTMLArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *PrinterNgdotHTMLArgs) GetP() *PrinterStruct {
if x != nil {
return x.P
}
return nil
}
type PrinterNgdotMarkdownArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P *PrinterStruct `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrinterNgdotMarkdownArgs) Reset() {
*x = PrinterNgdotMarkdownArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrinterNgdotMarkdownArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrinterNgdotMarkdownArgs) ProtoMessage() {}
func (x *PrinterNgdotMarkdownArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrinterNgdotMarkdownArgs.ProtoReflect.Descriptor instead.
func (*PrinterNgdotMarkdownArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *PrinterNgdotMarkdownArgs) GetP() *PrinterStruct {
if x != nil {
return x.P
}
return nil
}
type DefaultLookupPackageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DefaultLookupPackageArgs) Reset() {
*x = DefaultLookupPackageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DefaultLookupPackageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DefaultLookupPackageArgs) ProtoMessage() {}
func (x *DefaultLookupPackageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DefaultLookupPackageArgs.ProtoReflect.Descriptor instead.
func (*DefaultLookupPackageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *DefaultLookupPackageArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type ParserNgdotParseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P *ParserStruct `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
Text string `protobuf:"bytes,2,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParserNgdotParseArgs) Reset() {
*x = ParserNgdotParseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParserNgdotParseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParserNgdotParseArgs) ProtoMessage() {}
func (x *ParserNgdotParseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParserNgdotParseArgs.ProtoReflect.Descriptor instead.
func (*ParserNgdotParseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *ParserNgdotParseArgs) GetP() *ParserStruct {
if x != nil {
return x.P
}
return nil
}
func (x *ParserNgdotParseArgs) GetText() string {
if x != nil {
return x.Text
}
return ""
}
type PrinterNgdotCommentArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P *PrinterStruct `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrinterNgdotCommentArgs) Reset() {
*x = PrinterNgdotCommentArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrinterNgdotCommentArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrinterNgdotCommentArgs) ProtoMessage() {}
func (x *PrinterNgdotCommentArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrinterNgdotCommentArgs.ProtoReflect.Descriptor instead.
func (*PrinterNgdotCommentArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *PrinterNgdotCommentArgs) GetP() *PrinterStruct {
if x != nil {
return x.P
}
return nil
}
type PrinterNgdotTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P *PrinterStruct `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrinterNgdotTextArgs) Reset() {
*x = PrinterNgdotTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrinterNgdotTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrinterNgdotTextArgs) ProtoMessage() {}
func (x *PrinterNgdotTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrinterNgdotTextArgs.ProtoReflect.Descriptor instead.
func (*PrinterNgdotTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *PrinterNgdotTextArgs) GetP() *PrinterStruct {
if x != nil {
return x.P
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_PrinterNgdotHTML
// *NgoloFuzzOne_PrinterNgdotMarkdown
// *NgoloFuzzOne_DefaultLookupPackage
// *NgoloFuzzOne_ParserNgdotParse
// *NgoloFuzzOne_PrinterNgdotComment
// *NgoloFuzzOne_PrinterNgdotText
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetPrinterNgdotHTML() *PrinterNgdotHTMLArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PrinterNgdotHTML); ok {
return x.PrinterNgdotHTML
}
}
return nil
}
func (x *NgoloFuzzOne) GetPrinterNgdotMarkdown() *PrinterNgdotMarkdownArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PrinterNgdotMarkdown); ok {
return x.PrinterNgdotMarkdown
}
}
return nil
}
func (x *NgoloFuzzOne) GetDefaultLookupPackage() *DefaultLookupPackageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DefaultLookupPackage); ok {
return x.DefaultLookupPackage
}
}
return nil
}
func (x *NgoloFuzzOne) GetParserNgdotParse() *ParserNgdotParseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParserNgdotParse); ok {
return x.ParserNgdotParse
}
}
return nil
}
func (x *NgoloFuzzOne) GetPrinterNgdotComment() *PrinterNgdotCommentArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PrinterNgdotComment); ok {
return x.PrinterNgdotComment
}
}
return nil
}
func (x *NgoloFuzzOne) GetPrinterNgdotText() *PrinterNgdotTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PrinterNgdotText); ok {
return x.PrinterNgdotText
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_PrinterNgdotHTML struct {
PrinterNgdotHTML *PrinterNgdotHTMLArgs `protobuf:"bytes,1,opt,name=PrinterNgdotHTML,proto3,oneof"`
}
type NgoloFuzzOne_PrinterNgdotMarkdown struct {
PrinterNgdotMarkdown *PrinterNgdotMarkdownArgs `protobuf:"bytes,2,opt,name=PrinterNgdotMarkdown,proto3,oneof"`
}
type NgoloFuzzOne_DefaultLookupPackage struct {
DefaultLookupPackage *DefaultLookupPackageArgs `protobuf:"bytes,3,opt,name=DefaultLookupPackage,proto3,oneof"`
}
type NgoloFuzzOne_ParserNgdotParse struct {
ParserNgdotParse *ParserNgdotParseArgs `protobuf:"bytes,4,opt,name=ParserNgdotParse,proto3,oneof"`
}
type NgoloFuzzOne_PrinterNgdotComment struct {
PrinterNgdotComment *PrinterNgdotCommentArgs `protobuf:"bytes,5,opt,name=PrinterNgdotComment,proto3,oneof"`
}
type NgoloFuzzOne_PrinterNgdotText struct {
PrinterNgdotText *PrinterNgdotTextArgs `protobuf:"bytes,6,opt,name=PrinterNgdotText,proto3,oneof"`
}
func (*NgoloFuzzOne_PrinterNgdotHTML) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PrinterNgdotMarkdown) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DefaultLookupPackage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParserNgdotParse) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PrinterNgdotComment) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PrinterNgdotText) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x82\x01\n" +
"\fParserStruct\x128\n" +
"\x05Words\x18\x01 \x03(\v2\".ngolofuzz.ParserStruct.WordsEntryR\x05Words\x1a8\n" +
"\n" +
"WordsEntry\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xc1\x01\n" +
"\rPrinterStruct\x12\"\n" +
"\fHeadingLevel\x18\x01 \x01(\x03R\fHeadingLevel\x12&\n" +
"\x0eDocLinkBaseURL\x18\x02 \x01(\tR\x0eDocLinkBaseURL\x12\x1e\n" +
"\n" +
"TextPrefix\x18\x03 \x01(\tR\n" +
"TextPrefix\x12&\n" +
"\x0eTextCodePrefix\x18\x04 \x01(\tR\x0eTextCodePrefix\x12\x1c\n" +
"\tTextWidth\x18\x05 \x01(\x03R\tTextWidth\">\n" +
"\x14PrinterNgdotHTMLArgs\x12&\n" +
"\x01p\x18\x01 \x01(\v2\x18.ngolofuzz.PrinterStructR\x01p\"B\n" +
"\x18PrinterNgdotMarkdownArgs\x12&\n" +
"\x01p\x18\x01 \x01(\v2\x18.ngolofuzz.PrinterStructR\x01p\".\n" +
"\x18DefaultLookupPackageArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"Q\n" +
"\x14ParserNgdotParseArgs\x12%\n" +
"\x01p\x18\x01 \x01(\v2\x17.ngolofuzz.ParserStructR\x01p\x12\x12\n" +
"\x04text\x18\x02 \x01(\tR\x04text\"A\n" +
"\x17PrinterNgdotCommentArgs\x12&\n" +
"\x01p\x18\x01 \x01(\v2\x18.ngolofuzz.PrinterStructR\x01p\">\n" +
"\x14PrinterNgdotTextArgs\x12&\n" +
"\x01p\x18\x01 \x01(\v2\x18.ngolofuzz.PrinterStructR\x01p\"\x91\x04\n" +
"\fNgoloFuzzOne\x12M\n" +
"\x10PrinterNgdotHTML\x18\x01 \x01(\v2\x1f.ngolofuzz.PrinterNgdotHTMLArgsH\x00R\x10PrinterNgdotHTML\x12Y\n" +
"\x14PrinterNgdotMarkdown\x18\x02 \x01(\v2#.ngolofuzz.PrinterNgdotMarkdownArgsH\x00R\x14PrinterNgdotMarkdown\x12Y\n" +
"\x14DefaultLookupPackage\x18\x03 \x01(\v2#.ngolofuzz.DefaultLookupPackageArgsH\x00R\x14DefaultLookupPackage\x12M\n" +
"\x10ParserNgdotParse\x18\x04 \x01(\v2\x1f.ngolofuzz.ParserNgdotParseArgsH\x00R\x10ParserNgdotParse\x12V\n" +
"\x13PrinterNgdotComment\x18\x05 \x01(\v2\".ngolofuzz.PrinterNgdotCommentArgsH\x00R\x13PrinterNgdotComment\x12M\n" +
"\x10PrinterNgdotText\x18\x06 \x01(\v2\x1f.ngolofuzz.PrinterNgdotTextArgsH\x00R\x10PrinterNgdotTextB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1bZ\x19./;fuzz_ng_go_doc_commentb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
var file_ngolofuzz_proto_goTypes = []any{
(*ParserStruct)(nil), // 0: ngolofuzz.ParserStruct
(*PrinterStruct)(nil), // 1: ngolofuzz.PrinterStruct
(*PrinterNgdotHTMLArgs)(nil), // 2: ngolofuzz.PrinterNgdotHTMLArgs
(*PrinterNgdotMarkdownArgs)(nil), // 3: ngolofuzz.PrinterNgdotMarkdownArgs
(*DefaultLookupPackageArgs)(nil), // 4: ngolofuzz.DefaultLookupPackageArgs
(*ParserNgdotParseArgs)(nil), // 5: ngolofuzz.ParserNgdotParseArgs
(*PrinterNgdotCommentArgs)(nil), // 6: ngolofuzz.PrinterNgdotCommentArgs
(*PrinterNgdotTextArgs)(nil), // 7: ngolofuzz.PrinterNgdotTextArgs
(*NgoloFuzzOne)(nil), // 8: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 9: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 10: ngolofuzz.NgoloFuzzList
nil, // 11: ngolofuzz.ParserStruct.WordsEntry
}
var file_ngolofuzz_proto_depIdxs = []int32{
11, // 0: ngolofuzz.ParserStruct.Words:type_name -> ngolofuzz.ParserStruct.WordsEntry
1, // 1: ngolofuzz.PrinterNgdotHTMLArgs.p:type_name -> ngolofuzz.PrinterStruct
1, // 2: ngolofuzz.PrinterNgdotMarkdownArgs.p:type_name -> ngolofuzz.PrinterStruct
0, // 3: ngolofuzz.ParserNgdotParseArgs.p:type_name -> ngolofuzz.ParserStruct
1, // 4: ngolofuzz.PrinterNgdotCommentArgs.p:type_name -> ngolofuzz.PrinterStruct
1, // 5: ngolofuzz.PrinterNgdotTextArgs.p:type_name -> ngolofuzz.PrinterStruct
2, // 6: ngolofuzz.NgoloFuzzOne.PrinterNgdotHTML:type_name -> ngolofuzz.PrinterNgdotHTMLArgs
3, // 7: ngolofuzz.NgoloFuzzOne.PrinterNgdotMarkdown:type_name -> ngolofuzz.PrinterNgdotMarkdownArgs
4, // 8: ngolofuzz.NgoloFuzzOne.DefaultLookupPackage:type_name -> ngolofuzz.DefaultLookupPackageArgs
5, // 9: ngolofuzz.NgoloFuzzOne.ParserNgdotParse:type_name -> ngolofuzz.ParserNgdotParseArgs
6, // 10: ngolofuzz.NgoloFuzzOne.PrinterNgdotComment:type_name -> ngolofuzz.PrinterNgdotCommentArgs
7, // 11: ngolofuzz.NgoloFuzzOne.PrinterNgdotText:type_name -> ngolofuzz.PrinterNgdotTextArgs
8, // 12: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
13, // [13:13] is the sub-list for method output_type
13, // [13:13] is the sub-list for method input_type
13, // [13:13] is the sub-list for extension type_name
13, // [13:13] is the sub-list for extension extendee
0, // [0:13] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[8].OneofWrappers = []any{
(*NgoloFuzzOne_PrinterNgdotHTML)(nil),
(*NgoloFuzzOne_PrinterNgdotMarkdown)(nil),
(*NgoloFuzzOne_DefaultLookupPackage)(nil),
(*NgoloFuzzOne_ParserNgdotParse)(nil),
(*NgoloFuzzOne_PrinterNgdotComment)(nil),
(*NgoloFuzzOne_PrinterNgdotText)(nil),
}
file_ngolofuzz_proto_msgTypes[9].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 12,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_go_format
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"go/format"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Source:
_, r1 := format.Source(a.Source.Src)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Source:
w.WriteString(fmt.Sprintf("format.Source(%#+v)\n", a.Source.Src))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_go_format
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type SourceArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Src []byte `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SourceArgs) Reset() {
*x = SourceArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SourceArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SourceArgs) ProtoMessage() {}
func (x *SourceArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SourceArgs.ProtoReflect.Descriptor instead.
func (*SourceArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *SourceArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Source
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetSource() *SourceArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Source); ok {
return x.Source
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Source struct {
Source *SourceArgs `protobuf:"bytes,1,opt,name=Source,proto3,oneof"`
}
func (*NgoloFuzzOne_Source) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1e\n" +
"\n" +
"SourceArgs\x12\x10\n" +
"\x03src\x18\x01 \x01(\fR\x03src\"G\n" +
"\fNgoloFuzzOne\x12/\n" +
"\x06Source\x18\x01 \x01(\v2\x15.ngolofuzz.SourceArgsH\x00R\x06SourceB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x16Z\x14./;fuzz_ng_go_formatb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_ngolofuzz_proto_goTypes = []any{
(*SourceArgs)(nil), // 0: ngolofuzz.SourceArgs
(*NgoloFuzzOne)(nil), // 1: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 2: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 3: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Source:type_name -> ngolofuzz.SourceArgs
1, // 1: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[1].OneofWrappers = []any{
(*NgoloFuzzOne_Source)(nil),
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_go_parser
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"go/parser"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func ModeNewFromFuzz(p ModeEnum) parser.Mode{
switch p {
case 1:
return parser.ImportsOnly
case 2:
return parser.ParseComments
case 3:
return parser.Trace
case 4:
return parser.DeclarationErrors
case 5:
return parser.SpuriousErrors
case 6:
return parser.SkipObjectResolution
case 7:
return parser.AllErrors
}
return parser.PackageClauseOnly
}
func ConvertModeNewFromFuzz(a []ModeEnum) []parser.Mode{
r := make([]parser.Mode, len(a))
for i := range a {
r[i] = ModeNewFromFuzz(a[i])
}
return r
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ParseExpr:
_, r1 := parser.ParseExpr(a.ParseExpr.X)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ParseExpr:
w.WriteString(fmt.Sprintf("parser.ParseExpr(%#+v)\n", a.ParseExpr.X))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_go_parser
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ModeEnum int32
const (
ModeEnum_PackageClauseOnly ModeEnum = 0
ModeEnum_ImportsOnly ModeEnum = 1
ModeEnum_ParseComments ModeEnum = 2
ModeEnum_Trace ModeEnum = 3
ModeEnum_DeclarationErrors ModeEnum = 4
ModeEnum_SpuriousErrors ModeEnum = 5
ModeEnum_SkipObjectResolution ModeEnum = 6
ModeEnum_AllErrors ModeEnum = 7
)
// Enum value maps for ModeEnum.
var (
ModeEnum_name = map[int32]string{
0: "PackageClauseOnly",
1: "ImportsOnly",
2: "ParseComments",
3: "Trace",
4: "DeclarationErrors",
5: "SpuriousErrors",
6: "SkipObjectResolution",
7: "AllErrors",
}
ModeEnum_value = map[string]int32{
"PackageClauseOnly": 0,
"ImportsOnly": 1,
"ParseComments": 2,
"Trace": 3,
"DeclarationErrors": 4,
"SpuriousErrors": 5,
"SkipObjectResolution": 6,
"AllErrors": 7,
}
)
func (x ModeEnum) Enum() *ModeEnum {
p := new(ModeEnum)
*p = x
return p
}
func (x ModeEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (ModeEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[0].Descriptor()
}
func (ModeEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[0]
}
func (x ModeEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use ModeEnum.Descriptor instead.
func (ModeEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type ParseExprArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X string `protobuf:"bytes,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseExprArgs) Reset() {
*x = ParseExprArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseExprArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseExprArgs) ProtoMessage() {}
func (x *ParseExprArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseExprArgs.ProtoReflect.Descriptor instead.
func (*ParseExprArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *ParseExprArgs) GetX() string {
if x != nil {
return x.X
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_ParseExpr
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetParseExpr() *ParseExprArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseExpr); ok {
return x.ParseExpr
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_ParseExpr struct {
ParseExpr *ParseExprArgs `protobuf:"bytes,1,opt,name=ParseExpr,proto3,oneof"`
}
func (*NgoloFuzzOne_ParseExpr) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1d\n" +
"\rParseExprArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\tR\x01x\"P\n" +
"\fNgoloFuzzOne\x128\n" +
"\tParseExpr\x18\x01 \x01(\v2\x18.ngolofuzz.ParseExprArgsH\x00R\tParseExprB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04list*\xa4\x01\n" +
"\bModeEnum\x12\x15\n" +
"\x11PackageClauseOnly\x10\x00\x12\x0f\n" +
"\vImportsOnly\x10\x01\x12\x11\n" +
"\rParseComments\x10\x02\x12\t\n" +
"\x05Trace\x10\x03\x12\x15\n" +
"\x11DeclarationErrors\x10\x04\x12\x12\n" +
"\x0eSpuriousErrors\x10\x05\x12\x18\n" +
"\x14SkipObjectResolution\x10\x06\x12\r\n" +
"\tAllErrors\x10\aB\x16Z\x14./;fuzz_ng_go_parserb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_ngolofuzz_proto_goTypes = []any{
(ModeEnum)(0), // 0: ngolofuzz.ModeEnum
(*ParseExprArgs)(nil), // 1: ngolofuzz.ParseExprArgs
(*NgoloFuzzOne)(nil), // 2: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 3: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 4: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
1, // 0: ngolofuzz.NgoloFuzzOne.ParseExpr:type_name -> ngolofuzz.ParseExprArgs
2, // 1: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[1].OneofWrappers = []any{
(*NgoloFuzzOne_ParseExpr)(nil),
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 1,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
EnumInfos: file_ngolofuzz_proto_enumTypes,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_go_version
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"go/version"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Lang:
version.Lang(a.Lang.X)
case *NgoloFuzzOne_Compare:
version.Compare(a.Compare.X, a.Compare.Y)
case *NgoloFuzzOne_IsValid:
version.IsValid(a.IsValid.X)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Lang:
w.WriteString(fmt.Sprintf("version.Lang(%#+v)\n", a.Lang.X))
case *NgoloFuzzOne_Compare:
w.WriteString(fmt.Sprintf("version.Compare(%#+v, %#+v)\n", a.Compare.X, a.Compare.Y))
case *NgoloFuzzOne_IsValid:
w.WriteString(fmt.Sprintf("version.IsValid(%#+v)\n", a.IsValid.X))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_go_version
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type LangArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X string `protobuf:"bytes,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LangArgs) Reset() {
*x = LangArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LangArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LangArgs) ProtoMessage() {}
func (x *LangArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LangArgs.ProtoReflect.Descriptor instead.
func (*LangArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *LangArgs) GetX() string {
if x != nil {
return x.X
}
return ""
}
type CompareArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X string `protobuf:"bytes,1,opt,name=x,proto3" json:"x,omitempty"`
Y string `protobuf:"bytes,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CompareArgs) Reset() {
*x = CompareArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CompareArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CompareArgs) ProtoMessage() {}
func (x *CompareArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CompareArgs.ProtoReflect.Descriptor instead.
func (*CompareArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *CompareArgs) GetX() string {
if x != nil {
return x.X
}
return ""
}
func (x *CompareArgs) GetY() string {
if x != nil {
return x.Y
}
return ""
}
type IsValidArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X string `protobuf:"bytes,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsValidArgs) Reset() {
*x = IsValidArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsValidArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsValidArgs) ProtoMessage() {}
func (x *IsValidArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsValidArgs.ProtoReflect.Descriptor instead.
func (*IsValidArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *IsValidArgs) GetX() string {
if x != nil {
return x.X
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Lang
// *NgoloFuzzOne_Compare
// *NgoloFuzzOne_IsValid
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetLang() *LangArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Lang); ok {
return x.Lang
}
}
return nil
}
func (x *NgoloFuzzOne) GetCompare() *CompareArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Compare); ok {
return x.Compare
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsValid() *IsValidArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsValid); ok {
return x.IsValid
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Lang struct {
Lang *LangArgs `protobuf:"bytes,1,opt,name=Lang,proto3,oneof"`
}
type NgoloFuzzOne_Compare struct {
Compare *CompareArgs `protobuf:"bytes,2,opt,name=Compare,proto3,oneof"`
}
type NgoloFuzzOne_IsValid struct {
IsValid *IsValidArgs `protobuf:"bytes,3,opt,name=IsValid,proto3,oneof"`
}
func (*NgoloFuzzOne_Lang) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Compare) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsValid) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x18\n" +
"\bLangArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\tR\x01x\")\n" +
"\vCompareArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\tR\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\tR\x01y\"\x1b\n" +
"\vIsValidArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\tR\x01x\"\xa9\x01\n" +
"\fNgoloFuzzOne\x12)\n" +
"\x04Lang\x18\x01 \x01(\v2\x13.ngolofuzz.LangArgsH\x00R\x04Lang\x122\n" +
"\aCompare\x18\x02 \x01(\v2\x16.ngolofuzz.CompareArgsH\x00R\aCompare\x122\n" +
"\aIsValid\x18\x03 \x01(\v2\x16.ngolofuzz.IsValidArgsH\x00R\aIsValidB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x17Z\x15./;fuzz_ng_go_versionb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_ngolofuzz_proto_goTypes = []any{
(*LangArgs)(nil), // 0: ngolofuzz.LangArgs
(*CompareArgs)(nil), // 1: ngolofuzz.CompareArgs
(*IsValidArgs)(nil), // 2: ngolofuzz.IsValidArgs
(*NgoloFuzzOne)(nil), // 3: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 4: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 5: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Lang:type_name -> ngolofuzz.LangArgs
1, // 1: ngolofuzz.NgoloFuzzOne.Compare:type_name -> ngolofuzz.CompareArgs
2, // 2: ngolofuzz.NgoloFuzzOne.IsValid:type_name -> ngolofuzz.IsValidArgs
3, // 3: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzOne_Lang)(nil),
(*NgoloFuzzOne_Compare)(nil),
(*NgoloFuzzOne_IsValid)(nil),
}
file_ngolofuzz_proto_msgTypes[4].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_hash_adler32
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"hash/adler32"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
adler32.New()
case *NgoloFuzzOne_Checksum:
adler32.Checksum(a.Checksum.Data)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("adler32.New()\n"))
case *NgoloFuzzOne_Checksum:
w.WriteString(fmt.Sprintf("adler32.Checksum(%#+v)\n", a.Checksum.Data))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_hash_adler32
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type ChecksumArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ChecksumArgs) Reset() {
*x = ChecksumArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ChecksumArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ChecksumArgs) ProtoMessage() {}
func (x *ChecksumArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ChecksumArgs.ProtoReflect.Descriptor instead.
func (*ChecksumArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *ChecksumArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_New
// *NgoloFuzzOne_Checksum
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetChecksum() *ChecksumArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Checksum); ok {
return x.Checksum
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,1,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_Checksum struct {
Checksum *ChecksumArgs `protobuf:"bytes,2,opt,name=Checksum,proto3,oneof"`
}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Checksum) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\t\n" +
"\aNewArgs\"\"\n" +
"\fChecksumArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"u\n" +
"\fNgoloFuzzOne\x12&\n" +
"\x03New\x18\x01 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x125\n" +
"\bChecksum\x18\x02 \x01(\v2\x17.ngolofuzz.ChecksumArgsH\x00R\bChecksumB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x19Z\x17./;fuzz_ng_hash_adler32b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_ngolofuzz_proto_goTypes = []any{
(*NewArgs)(nil), // 0: ngolofuzz.NewArgs
(*ChecksumArgs)(nil), // 1: ngolofuzz.ChecksumArgs
(*NgoloFuzzOne)(nil), // 2: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 3: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 4: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
1, // 1: ngolofuzz.NgoloFuzzOne.Checksum:type_name -> ngolofuzz.ChecksumArgs
2, // 2: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_Checksum)(nil),
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_hash_crc32
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"hash/crc32"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var TableResults []*crc32.Table
TableResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_MakeTable:
r0 := crc32.MakeTable(a.MakeTable.Poly)
if r0 != nil{
TableResults = append(TableResults, r0)
}
case *NgoloFuzzOne_New:
if len(TableResults) == 0 {
continue
}
arg0 := TableResults[TableResultsIndex]
TableResultsIndex = (TableResultsIndex + 1) % len(TableResults)
crc32.New(arg0)
case *NgoloFuzzOne_NewIEEE:
crc32.NewIEEE()
case *NgoloFuzzOne_Update:
if len(TableResults) == 0 {
continue
}
arg1 := TableResults[TableResultsIndex]
TableResultsIndex = (TableResultsIndex + 1) % len(TableResults)
crc32.Update(a.Update.Crc, arg1, a.Update.P)
case *NgoloFuzzOne_Checksum:
if len(TableResults) == 0 {
continue
}
arg1 := TableResults[TableResultsIndex]
TableResultsIndex = (TableResultsIndex + 1) % len(TableResults)
crc32.Checksum(a.Checksum.Data, arg1)
case *NgoloFuzzOne_ChecksumIEEE:
crc32.ChecksumIEEE(a.ChecksumIEEE.Data)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
TableNb := 0
TableResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_MakeTable:
w.WriteString(fmt.Sprintf("Table%d := crc32.MakeTable(%#+v)\n", TableNb, a.MakeTable.Poly))
TableNb = TableNb + 1
case *NgoloFuzzOne_New:
if TableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("crc32.New(Table%d)\n", (TableResultsIndex + 0) % TableNb))
TableResultsIndex = (TableResultsIndex + 1) % TableNb
case *NgoloFuzzOne_NewIEEE:
w.WriteString(fmt.Sprintf("crc32.NewIEEE()\n"))
case *NgoloFuzzOne_Update:
if TableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("crc32.Update(%#+v, Table%d, %#+v)\n", a.Update.Crc, (TableResultsIndex + 0) % TableNb, a.Update.P))
TableResultsIndex = (TableResultsIndex + 1) % TableNb
case *NgoloFuzzOne_Checksum:
if TableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("crc32.Checksum(%#+v, Table%d)\n", a.Checksum.Data, (TableResultsIndex + 0) % TableNb))
TableResultsIndex = (TableResultsIndex + 1) % TableNb
case *NgoloFuzzOne_ChecksumIEEE:
w.WriteString(fmt.Sprintf("crc32.ChecksumIEEE(%#+v)\n", a.ChecksumIEEE.Data))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_hash_crc32
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type MakeTableArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Poly uint32 `protobuf:"varint,1,opt,name=poly,proto3" json:"poly,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MakeTableArgs) Reset() {
*x = MakeTableArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MakeTableArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MakeTableArgs) ProtoMessage() {}
func (x *MakeTableArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MakeTableArgs.ProtoReflect.Descriptor instead.
func (*MakeTableArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *MakeTableArgs) GetPoly() uint32 {
if x != nil {
return x.Poly
}
return 0
}
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type NewIEEEArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewIEEEArgs) Reset() {
*x = NewIEEEArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewIEEEArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewIEEEArgs) ProtoMessage() {}
func (x *NewIEEEArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewIEEEArgs.ProtoReflect.Descriptor instead.
func (*NewIEEEArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type UpdateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Crc uint32 `protobuf:"varint,1,opt,name=crc,proto3" json:"crc,omitempty"`
P []byte `protobuf:"bytes,2,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UpdateArgs) Reset() {
*x = UpdateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UpdateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpdateArgs) ProtoMessage() {}
func (x *UpdateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpdateArgs.ProtoReflect.Descriptor instead.
func (*UpdateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *UpdateArgs) GetCrc() uint32 {
if x != nil {
return x.Crc
}
return 0
}
func (x *UpdateArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type ChecksumArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ChecksumArgs) Reset() {
*x = ChecksumArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ChecksumArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ChecksumArgs) ProtoMessage() {}
func (x *ChecksumArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ChecksumArgs.ProtoReflect.Descriptor instead.
func (*ChecksumArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *ChecksumArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type ChecksumIEEEArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ChecksumIEEEArgs) Reset() {
*x = ChecksumIEEEArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ChecksumIEEEArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ChecksumIEEEArgs) ProtoMessage() {}
func (x *ChecksumIEEEArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ChecksumIEEEArgs.ProtoReflect.Descriptor instead.
func (*ChecksumIEEEArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *ChecksumIEEEArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_MakeTable
// *NgoloFuzzOne_New
// *NgoloFuzzOne_NewIEEE
// *NgoloFuzzOne_Update
// *NgoloFuzzOne_Checksum
// *NgoloFuzzOne_ChecksumIEEE
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetMakeTable() *MakeTableArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MakeTable); ok {
return x.MakeTable
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewIEEE() *NewIEEEArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewIEEE); ok {
return x.NewIEEE
}
}
return nil
}
func (x *NgoloFuzzOne) GetUpdate() *UpdateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Update); ok {
return x.Update
}
}
return nil
}
func (x *NgoloFuzzOne) GetChecksum() *ChecksumArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Checksum); ok {
return x.Checksum
}
}
return nil
}
func (x *NgoloFuzzOne) GetChecksumIEEE() *ChecksumIEEEArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ChecksumIEEE); ok {
return x.ChecksumIEEE
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_MakeTable struct {
MakeTable *MakeTableArgs `protobuf:"bytes,1,opt,name=MakeTable,proto3,oneof"`
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,2,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_NewIEEE struct {
NewIEEE *NewIEEEArgs `protobuf:"bytes,3,opt,name=NewIEEE,proto3,oneof"`
}
type NgoloFuzzOne_Update struct {
Update *UpdateArgs `protobuf:"bytes,4,opt,name=Update,proto3,oneof"`
}
type NgoloFuzzOne_Checksum struct {
Checksum *ChecksumArgs `protobuf:"bytes,5,opt,name=Checksum,proto3,oneof"`
}
type NgoloFuzzOne_ChecksumIEEE struct {
ChecksumIEEE *ChecksumIEEEArgs `protobuf:"bytes,6,opt,name=ChecksumIEEE,proto3,oneof"`
}
func (*NgoloFuzzOne_MakeTable) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewIEEE) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Update) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Checksum) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ChecksumIEEE) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"#\n" +
"\rMakeTableArgs\x12\x12\n" +
"\x04poly\x18\x01 \x01(\rR\x04poly\"\t\n" +
"\aNewArgs\"\r\n" +
"\vNewIEEEArgs\",\n" +
"\n" +
"UpdateArgs\x12\x10\n" +
"\x03crc\x18\x01 \x01(\rR\x03crc\x12\f\n" +
"\x01p\x18\x02 \x01(\fR\x01p\"\"\n" +
"\fChecksumArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"&\n" +
"\x10ChecksumIEEEArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\xd7\x02\n" +
"\fNgoloFuzzOne\x128\n" +
"\tMakeTable\x18\x01 \x01(\v2\x18.ngolofuzz.MakeTableArgsH\x00R\tMakeTable\x12&\n" +
"\x03New\x18\x02 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x122\n" +
"\aNewIEEE\x18\x03 \x01(\v2\x16.ngolofuzz.NewIEEEArgsH\x00R\aNewIEEE\x12/\n" +
"\x06Update\x18\x04 \x01(\v2\x15.ngolofuzz.UpdateArgsH\x00R\x06Update\x125\n" +
"\bChecksum\x18\x05 \x01(\v2\x17.ngolofuzz.ChecksumArgsH\x00R\bChecksum\x12A\n" +
"\fChecksumIEEE\x18\x06 \x01(\v2\x1b.ngolofuzz.ChecksumIEEEArgsH\x00R\fChecksumIEEEB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x17Z\x15./;fuzz_ng_hash_crc32b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
var file_ngolofuzz_proto_goTypes = []any{
(*MakeTableArgs)(nil), // 0: ngolofuzz.MakeTableArgs
(*NewArgs)(nil), // 1: ngolofuzz.NewArgs
(*NewIEEEArgs)(nil), // 2: ngolofuzz.NewIEEEArgs
(*UpdateArgs)(nil), // 3: ngolofuzz.UpdateArgs
(*ChecksumArgs)(nil), // 4: ngolofuzz.ChecksumArgs
(*ChecksumIEEEArgs)(nil), // 5: ngolofuzz.ChecksumIEEEArgs
(*NgoloFuzzOne)(nil), // 6: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 7: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 8: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.MakeTable:type_name -> ngolofuzz.MakeTableArgs
1, // 1: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
2, // 2: ngolofuzz.NgoloFuzzOne.NewIEEE:type_name -> ngolofuzz.NewIEEEArgs
3, // 3: ngolofuzz.NgoloFuzzOne.Update:type_name -> ngolofuzz.UpdateArgs
4, // 4: ngolofuzz.NgoloFuzzOne.Checksum:type_name -> ngolofuzz.ChecksumArgs
5, // 5: ngolofuzz.NgoloFuzzOne.ChecksumIEEE:type_name -> ngolofuzz.ChecksumIEEEArgs
6, // 6: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
7, // [7:7] is the sub-list for method output_type
7, // [7:7] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[6].OneofWrappers = []any{
(*NgoloFuzzOne_MakeTable)(nil),
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_NewIEEE)(nil),
(*NgoloFuzzOne_Update)(nil),
(*NgoloFuzzOne_Checksum)(nil),
(*NgoloFuzzOne_ChecksumIEEE)(nil),
}
file_ngolofuzz_proto_msgTypes[7].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_hash_crc64
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"hash/crc64"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var TableResults []*crc64.Table
TableResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_MakeTable:
r0 := crc64.MakeTable(a.MakeTable.Poly)
if r0 != nil{
TableResults = append(TableResults, r0)
}
case *NgoloFuzzOne_New:
if len(TableResults) == 0 {
continue
}
arg0 := TableResults[TableResultsIndex]
TableResultsIndex = (TableResultsIndex + 1) % len(TableResults)
crc64.New(arg0)
case *NgoloFuzzOne_Update:
if len(TableResults) == 0 {
continue
}
arg1 := TableResults[TableResultsIndex]
TableResultsIndex = (TableResultsIndex + 1) % len(TableResults)
crc64.Update(a.Update.Crc, arg1, a.Update.P)
case *NgoloFuzzOne_Checksum:
if len(TableResults) == 0 {
continue
}
arg1 := TableResults[TableResultsIndex]
TableResultsIndex = (TableResultsIndex + 1) % len(TableResults)
crc64.Checksum(a.Checksum.Data, arg1)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
TableNb := 0
TableResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_MakeTable:
w.WriteString(fmt.Sprintf("Table%d := crc64.MakeTable(%#+v)\n", TableNb, a.MakeTable.Poly))
TableNb = TableNb + 1
case *NgoloFuzzOne_New:
if TableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("crc64.New(Table%d)\n", (TableResultsIndex + 0) % TableNb))
TableResultsIndex = (TableResultsIndex + 1) % TableNb
case *NgoloFuzzOne_Update:
if TableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("crc64.Update(%#+v, Table%d, %#+v)\n", a.Update.Crc, (TableResultsIndex + 0) % TableNb, a.Update.P))
TableResultsIndex = (TableResultsIndex + 1) % TableNb
case *NgoloFuzzOne_Checksum:
if TableNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("crc64.Checksum(%#+v, Table%d)\n", a.Checksum.Data, (TableResultsIndex + 0) % TableNb))
TableResultsIndex = (TableResultsIndex + 1) % TableNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_hash_crc64
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type MakeTableArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Poly uint64 `protobuf:"varint,1,opt,name=poly,proto3" json:"poly,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MakeTableArgs) Reset() {
*x = MakeTableArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MakeTableArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MakeTableArgs) ProtoMessage() {}
func (x *MakeTableArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MakeTableArgs.ProtoReflect.Descriptor instead.
func (*MakeTableArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *MakeTableArgs) GetPoly() uint64 {
if x != nil {
return x.Poly
}
return 0
}
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type UpdateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Crc uint64 `protobuf:"varint,1,opt,name=crc,proto3" json:"crc,omitempty"`
P []byte `protobuf:"bytes,2,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UpdateArgs) Reset() {
*x = UpdateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UpdateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpdateArgs) ProtoMessage() {}
func (x *UpdateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpdateArgs.ProtoReflect.Descriptor instead.
func (*UpdateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *UpdateArgs) GetCrc() uint64 {
if x != nil {
return x.Crc
}
return 0
}
func (x *UpdateArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type ChecksumArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ChecksumArgs) Reset() {
*x = ChecksumArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ChecksumArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ChecksumArgs) ProtoMessage() {}
func (x *ChecksumArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ChecksumArgs.ProtoReflect.Descriptor instead.
func (*ChecksumArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ChecksumArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_MakeTable
// *NgoloFuzzOne_New
// *NgoloFuzzOne_Update
// *NgoloFuzzOne_Checksum
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetMakeTable() *MakeTableArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MakeTable); ok {
return x.MakeTable
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetUpdate() *UpdateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Update); ok {
return x.Update
}
}
return nil
}
func (x *NgoloFuzzOne) GetChecksum() *ChecksumArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Checksum); ok {
return x.Checksum
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_MakeTable struct {
MakeTable *MakeTableArgs `protobuf:"bytes,1,opt,name=MakeTable,proto3,oneof"`
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,2,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_Update struct {
Update *UpdateArgs `protobuf:"bytes,3,opt,name=Update,proto3,oneof"`
}
type NgoloFuzzOne_Checksum struct {
Checksum *ChecksumArgs `protobuf:"bytes,4,opt,name=Checksum,proto3,oneof"`
}
func (*NgoloFuzzOne_MakeTable) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Update) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Checksum) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"#\n" +
"\rMakeTableArgs\x12\x12\n" +
"\x04poly\x18\x01 \x01(\x04R\x04poly\"\t\n" +
"\aNewArgs\",\n" +
"\n" +
"UpdateArgs\x12\x10\n" +
"\x03crc\x18\x01 \x01(\x04R\x03crc\x12\f\n" +
"\x01p\x18\x02 \x01(\fR\x01p\"\"\n" +
"\fChecksumArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\xe0\x01\n" +
"\fNgoloFuzzOne\x128\n" +
"\tMakeTable\x18\x01 \x01(\v2\x18.ngolofuzz.MakeTableArgsH\x00R\tMakeTable\x12&\n" +
"\x03New\x18\x02 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x12/\n" +
"\x06Update\x18\x03 \x01(\v2\x15.ngolofuzz.UpdateArgsH\x00R\x06Update\x125\n" +
"\bChecksum\x18\x04 \x01(\v2\x17.ngolofuzz.ChecksumArgsH\x00R\bChecksumB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x17Z\x15./;fuzz_ng_hash_crc64b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_ngolofuzz_proto_goTypes = []any{
(*MakeTableArgs)(nil), // 0: ngolofuzz.MakeTableArgs
(*NewArgs)(nil), // 1: ngolofuzz.NewArgs
(*UpdateArgs)(nil), // 2: ngolofuzz.UpdateArgs
(*ChecksumArgs)(nil), // 3: ngolofuzz.ChecksumArgs
(*NgoloFuzzOne)(nil), // 4: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 5: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 6: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.MakeTable:type_name -> ngolofuzz.MakeTableArgs
1, // 1: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
2, // 2: ngolofuzz.NgoloFuzzOne.Update:type_name -> ngolofuzz.UpdateArgs
3, // 3: ngolofuzz.NgoloFuzzOne.Checksum:type_name -> ngolofuzz.ChecksumArgs
4, // 4: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
5, // [5:5] is the sub-list for method output_type
5, // [5:5] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[4].OneofWrappers = []any{
(*NgoloFuzzOne_MakeTable)(nil),
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_Update)(nil),
(*NgoloFuzzOne_Checksum)(nil),
}
file_ngolofuzz_proto_msgTypes[5].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_html
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"html"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_EscapeString:
html.EscapeString(a.EscapeString.S)
case *NgoloFuzzOne_UnescapeString:
html.UnescapeString(a.UnescapeString.S)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_EscapeString:
w.WriteString(fmt.Sprintf("html.EscapeString(%#+v)\n", a.EscapeString.S))
case *NgoloFuzzOne_UnescapeString:
w.WriteString(fmt.Sprintf("html.UnescapeString(%#+v)\n", a.UnescapeString.S))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_html
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type EscapeStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EscapeStringArgs) Reset() {
*x = EscapeStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EscapeStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EscapeStringArgs) ProtoMessage() {}
func (x *EscapeStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EscapeStringArgs.ProtoReflect.Descriptor instead.
func (*EscapeStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *EscapeStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type UnescapeStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnescapeStringArgs) Reset() {
*x = UnescapeStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnescapeStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnescapeStringArgs) ProtoMessage() {}
func (x *UnescapeStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnescapeStringArgs.ProtoReflect.Descriptor instead.
func (*UnescapeStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *UnescapeStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_EscapeString
// *NgoloFuzzOne_UnescapeString
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetEscapeString() *EscapeStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EscapeString); ok {
return x.EscapeString
}
}
return nil
}
func (x *NgoloFuzzOne) GetUnescapeString() *UnescapeStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UnescapeString); ok {
return x.UnescapeString
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_EscapeString struct {
EscapeString *EscapeStringArgs `protobuf:"bytes,1,opt,name=EscapeString,proto3,oneof"`
}
type NgoloFuzzOne_UnescapeString struct {
UnescapeString *UnescapeStringArgs `protobuf:"bytes,2,opt,name=UnescapeString,proto3,oneof"`
}
func (*NgoloFuzzOne_EscapeString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UnescapeString) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\" \n" +
"\x10EscapeStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\"\n" +
"\x12UnescapeStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\xa2\x01\n" +
"\fNgoloFuzzOne\x12A\n" +
"\fEscapeString\x18\x01 \x01(\v2\x1b.ngolofuzz.EscapeStringArgsH\x00R\fEscapeString\x12G\n" +
"\x0eUnescapeString\x18\x02 \x01(\v2\x1d.ngolofuzz.UnescapeStringArgsH\x00R\x0eUnescapeStringB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x11Z\x0f./;fuzz_ng_htmlb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_ngolofuzz_proto_goTypes = []any{
(*EscapeStringArgs)(nil), // 0: ngolofuzz.EscapeStringArgs
(*UnescapeStringArgs)(nil), // 1: ngolofuzz.UnescapeStringArgs
(*NgoloFuzzOne)(nil), // 2: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 3: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 4: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.EscapeString:type_name -> ngolofuzz.EscapeStringArgs
1, // 1: ngolofuzz.NgoloFuzzOne.UnescapeString:type_name -> ngolofuzz.UnescapeStringArgs
2, // 2: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzOne_EscapeString)(nil),
(*NgoloFuzzOne_UnescapeString)(nil),
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_html_template
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"html/template"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var TemplateResults []*template.Template
TemplateResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_HTMLEscape:
arg0 := bytes.NewBuffer(a.HTMLEscape.W)
template.HTMLEscape(arg0, a.HTMLEscape.B)
case *NgoloFuzzOne_HTMLEscapeString:
template.HTMLEscapeString(a.HTMLEscapeString.S)
case *NgoloFuzzOne_JSEscape:
arg0 := bytes.NewBuffer(a.JSEscape.W)
template.JSEscape(arg0, a.JSEscape.B)
case *NgoloFuzzOne_JSEscapeString:
template.JSEscapeString(a.JSEscapeString.S)
case *NgoloFuzzOne_TemplateNgdotTemplates:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
r0 := arg0.Templates()
TemplateResults = append(TemplateResults, r0...)
case *NgoloFuzzOne_TemplateNgdotExecute:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
arg1 := bytes.NewBuffer(a.TemplateNgdotExecute.Wr)
r0 := arg0.Execute(arg1, a.TemplateNgdotExecute.Data)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_TemplateNgdotExecuteTemplate:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
arg1 := bytes.NewBuffer(a.TemplateNgdotExecuteTemplate.Wr)
r0 := arg0.ExecuteTemplate(arg1, a.TemplateNgdotExecuteTemplate.Name, a.TemplateNgdotExecuteTemplate.Data)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_TemplateNgdotDefinedTemplates:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
arg0.DefinedTemplates()
case *NgoloFuzzOne_TemplateNgdotParse:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
r0, r1 := arg0.Parse(a.TemplateNgdotParse.Text)
if r0 != nil{
TemplateResults = append(TemplateResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TemplateNgdotClone:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
r0, r1 := arg0.Clone()
if r0 != nil{
TemplateResults = append(TemplateResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_New:
r0 := template.New(a.New.Name)
if r0 != nil{
TemplateResults = append(TemplateResults, r0)
}
case *NgoloFuzzOne_TemplateNgdotNew:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
r0 := arg0.New(a.TemplateNgdotNew.Name)
if r0 != nil{
TemplateResults = append(TemplateResults, r0)
}
case *NgoloFuzzOne_TemplateNgdotName:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
arg0.Name()
case *NgoloFuzzOne_TemplateNgdotDelims:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
r0 := arg0.Delims(a.TemplateNgdotDelims.Left, a.TemplateNgdotDelims.Right)
if r0 != nil{
TemplateResults = append(TemplateResults, r0)
}
case *NgoloFuzzOne_TemplateNgdotLookup:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
r0 := arg0.Lookup(a.TemplateNgdotLookup.Name)
if r0 != nil{
TemplateResults = append(TemplateResults, r0)
}
case *NgoloFuzzOne_IsTrue:
template.IsTrue(a.IsTrue.Val)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
TemplateNb := 0
TemplateResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_HTMLEscape:
w.WriteString(fmt.Sprintf("template.HTMLEscape(bytes.NewBuffer(%#+v), %#+v)\n", a.HTMLEscape.W, a.HTMLEscape.B))
case *NgoloFuzzOne_HTMLEscapeString:
w.WriteString(fmt.Sprintf("template.HTMLEscapeString(%#+v)\n", a.HTMLEscapeString.S))
case *NgoloFuzzOne_JSEscape:
w.WriteString(fmt.Sprintf("template.JSEscape(bytes.NewBuffer(%#+v), %#+v)\n", a.JSEscape.W, a.JSEscape.B))
case *NgoloFuzzOne_JSEscapeString:
w.WriteString(fmt.Sprintf("template.JSEscapeString(%#+v)\n", a.JSEscapeString.S))
case *NgoloFuzzOne_TemplateNgdotTemplates:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d := Template%d.Templates()\n", TemplateNb, TemplateResultsIndex))
TemplateNb = TemplateNb + 1
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotExecute:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d.Execute(bytes.NewBuffer(%#+v), %#+v)\n", TemplateResultsIndex, a.TemplateNgdotExecute.Wr, a.TemplateNgdotExecute.Data))
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotExecuteTemplate:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d.ExecuteTemplate(bytes.NewBuffer(%#+v), %#+v, %#+v)\n", TemplateResultsIndex, a.TemplateNgdotExecuteTemplate.Wr, a.TemplateNgdotExecuteTemplate.Name, a.TemplateNgdotExecuteTemplate.Data))
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotDefinedTemplates:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d.DefinedTemplates()\n", TemplateResultsIndex))
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotParse:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d, _ := Template%d.Parse(%#+v)\n", TemplateNb, TemplateResultsIndex, a.TemplateNgdotParse.Text))
TemplateNb = TemplateNb + 1
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotClone:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d, _ := Template%d.Clone()\n", TemplateNb, TemplateResultsIndex))
TemplateNb = TemplateNb + 1
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("Template%d := template.New(%#+v)\n", TemplateNb, a.New.Name))
TemplateNb = TemplateNb + 1
case *NgoloFuzzOne_TemplateNgdotNew:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d := Template%d.New(%#+v)\n", TemplateNb, TemplateResultsIndex, a.TemplateNgdotNew.Name))
TemplateNb = TemplateNb + 1
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotName:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d.Name()\n", TemplateResultsIndex))
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotDelims:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d := Template%d.Delims(%#+v, %#+v)\n", TemplateNb, TemplateResultsIndex, a.TemplateNgdotDelims.Left, a.TemplateNgdotDelims.Right))
TemplateNb = TemplateNb + 1
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotLookup:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d := Template%d.Lookup(%#+v)\n", TemplateNb, TemplateResultsIndex, a.TemplateNgdotLookup.Name))
TemplateNb = TemplateNb + 1
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_IsTrue:
w.WriteString(fmt.Sprintf("template.IsTrue(%#+v)\n", a.IsTrue.Val))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_html_template
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type HTMLEscapeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
B []byte `protobuf:"bytes,2,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HTMLEscapeArgs) Reset() {
*x = HTMLEscapeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HTMLEscapeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HTMLEscapeArgs) ProtoMessage() {}
func (x *HTMLEscapeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HTMLEscapeArgs.ProtoReflect.Descriptor instead.
func (*HTMLEscapeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *HTMLEscapeArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *HTMLEscapeArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type HTMLEscapeStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HTMLEscapeStringArgs) Reset() {
*x = HTMLEscapeStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HTMLEscapeStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HTMLEscapeStringArgs) ProtoMessage() {}
func (x *HTMLEscapeStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HTMLEscapeStringArgs.ProtoReflect.Descriptor instead.
func (*HTMLEscapeStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *HTMLEscapeStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type JSEscapeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
B []byte `protobuf:"bytes,2,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *JSEscapeArgs) Reset() {
*x = JSEscapeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *JSEscapeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*JSEscapeArgs) ProtoMessage() {}
func (x *JSEscapeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use JSEscapeArgs.ProtoReflect.Descriptor instead.
func (*JSEscapeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *JSEscapeArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *JSEscapeArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type JSEscapeStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *JSEscapeStringArgs) Reset() {
*x = JSEscapeStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *JSEscapeStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*JSEscapeStringArgs) ProtoMessage() {}
func (x *JSEscapeStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use JSEscapeStringArgs.ProtoReflect.Descriptor instead.
func (*JSEscapeStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *JSEscapeStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type TemplateNgdotTemplatesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotTemplatesArgs) Reset() {
*x = TemplateNgdotTemplatesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotTemplatesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotTemplatesArgs) ProtoMessage() {}
func (x *TemplateNgdotTemplatesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotTemplatesArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotTemplatesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type TemplateNgdotExecuteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Wr []byte `protobuf:"bytes,1,opt,name=wr,proto3" json:"wr,omitempty"`
Data *NgoloFuzzAny `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotExecuteArgs) Reset() {
*x = TemplateNgdotExecuteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotExecuteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotExecuteArgs) ProtoMessage() {}
func (x *TemplateNgdotExecuteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotExecuteArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotExecuteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *TemplateNgdotExecuteArgs) GetWr() []byte {
if x != nil {
return x.Wr
}
return nil
}
func (x *TemplateNgdotExecuteArgs) GetData() *NgoloFuzzAny {
if x != nil {
return x.Data
}
return nil
}
type TemplateNgdotExecuteTemplateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Wr []byte `protobuf:"bytes,1,opt,name=wr,proto3" json:"wr,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
Data *NgoloFuzzAny `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotExecuteTemplateArgs) Reset() {
*x = TemplateNgdotExecuteTemplateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotExecuteTemplateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotExecuteTemplateArgs) ProtoMessage() {}
func (x *TemplateNgdotExecuteTemplateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotExecuteTemplateArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotExecuteTemplateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *TemplateNgdotExecuteTemplateArgs) GetWr() []byte {
if x != nil {
return x.Wr
}
return nil
}
func (x *TemplateNgdotExecuteTemplateArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *TemplateNgdotExecuteTemplateArgs) GetData() *NgoloFuzzAny {
if x != nil {
return x.Data
}
return nil
}
type TemplateNgdotDefinedTemplatesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotDefinedTemplatesArgs) Reset() {
*x = TemplateNgdotDefinedTemplatesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotDefinedTemplatesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotDefinedTemplatesArgs) ProtoMessage() {}
func (x *TemplateNgdotDefinedTemplatesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotDefinedTemplatesArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotDefinedTemplatesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type TemplateNgdotParseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotParseArgs) Reset() {
*x = TemplateNgdotParseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotParseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotParseArgs) ProtoMessage() {}
func (x *TemplateNgdotParseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotParseArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotParseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *TemplateNgdotParseArgs) GetText() string {
if x != nil {
return x.Text
}
return ""
}
type TemplateNgdotCloneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotCloneArgs) Reset() {
*x = TemplateNgdotCloneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotCloneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotCloneArgs) ProtoMessage() {}
func (x *TemplateNgdotCloneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotCloneArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotCloneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NewArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type TemplateNgdotNewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotNewArgs) Reset() {
*x = TemplateNgdotNewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotNewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotNewArgs) ProtoMessage() {}
func (x *TemplateNgdotNewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotNewArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotNewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *TemplateNgdotNewArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type TemplateNgdotNameArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotNameArgs) Reset() {
*x = TemplateNgdotNameArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotNameArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotNameArgs) ProtoMessage() {}
func (x *TemplateNgdotNameArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotNameArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotNameArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type TemplateNgdotDelimsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Left string `protobuf:"bytes,1,opt,name=left,proto3" json:"left,omitempty"`
Right string `protobuf:"bytes,2,opt,name=right,proto3" json:"right,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotDelimsArgs) Reset() {
*x = TemplateNgdotDelimsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotDelimsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotDelimsArgs) ProtoMessage() {}
func (x *TemplateNgdotDelimsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotDelimsArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotDelimsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *TemplateNgdotDelimsArgs) GetLeft() string {
if x != nil {
return x.Left
}
return ""
}
func (x *TemplateNgdotDelimsArgs) GetRight() string {
if x != nil {
return x.Right
}
return ""
}
type TemplateNgdotLookupArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotLookupArgs) Reset() {
*x = TemplateNgdotLookupArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotLookupArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotLookupArgs) ProtoMessage() {}
func (x *TemplateNgdotLookupArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotLookupArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotLookupArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *TemplateNgdotLookupArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type IsTrueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Val *NgoloFuzzAny `protobuf:"bytes,1,opt,name=val,proto3" json:"val,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsTrueArgs) Reset() {
*x = IsTrueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsTrueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsTrueArgs) ProtoMessage() {}
func (x *IsTrueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsTrueArgs.ProtoReflect.Descriptor instead.
func (*IsTrueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *IsTrueArgs) GetVal() *NgoloFuzzAny {
if x != nil {
return x.Val
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_HTMLEscape
// *NgoloFuzzOne_HTMLEscapeString
// *NgoloFuzzOne_JSEscape
// *NgoloFuzzOne_JSEscapeString
// *NgoloFuzzOne_TemplateNgdotTemplates
// *NgoloFuzzOne_TemplateNgdotExecute
// *NgoloFuzzOne_TemplateNgdotExecuteTemplate
// *NgoloFuzzOne_TemplateNgdotDefinedTemplates
// *NgoloFuzzOne_TemplateNgdotParse
// *NgoloFuzzOne_TemplateNgdotClone
// *NgoloFuzzOne_New
// *NgoloFuzzOne_TemplateNgdotNew
// *NgoloFuzzOne_TemplateNgdotName
// *NgoloFuzzOne_TemplateNgdotDelims
// *NgoloFuzzOne_TemplateNgdotLookup
// *NgoloFuzzOne_IsTrue
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetHTMLEscape() *HTMLEscapeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_HTMLEscape); ok {
return x.HTMLEscape
}
}
return nil
}
func (x *NgoloFuzzOne) GetHTMLEscapeString() *HTMLEscapeStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_HTMLEscapeString); ok {
return x.HTMLEscapeString
}
}
return nil
}
func (x *NgoloFuzzOne) GetJSEscape() *JSEscapeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_JSEscape); ok {
return x.JSEscape
}
}
return nil
}
func (x *NgoloFuzzOne) GetJSEscapeString() *JSEscapeStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_JSEscapeString); ok {
return x.JSEscapeString
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotTemplates() *TemplateNgdotTemplatesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotTemplates); ok {
return x.TemplateNgdotTemplates
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotExecute() *TemplateNgdotExecuteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotExecute); ok {
return x.TemplateNgdotExecute
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotExecuteTemplate() *TemplateNgdotExecuteTemplateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotExecuteTemplate); ok {
return x.TemplateNgdotExecuteTemplate
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotDefinedTemplates() *TemplateNgdotDefinedTemplatesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotDefinedTemplates); ok {
return x.TemplateNgdotDefinedTemplates
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotParse() *TemplateNgdotParseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotParse); ok {
return x.TemplateNgdotParse
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotClone() *TemplateNgdotCloneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotClone); ok {
return x.TemplateNgdotClone
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotNew() *TemplateNgdotNewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotNew); ok {
return x.TemplateNgdotNew
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotName() *TemplateNgdotNameArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotName); ok {
return x.TemplateNgdotName
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotDelims() *TemplateNgdotDelimsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotDelims); ok {
return x.TemplateNgdotDelims
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotLookup() *TemplateNgdotLookupArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotLookup); ok {
return x.TemplateNgdotLookup
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsTrue() *IsTrueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsTrue); ok {
return x.IsTrue
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_HTMLEscape struct {
HTMLEscape *HTMLEscapeArgs `protobuf:"bytes,1,opt,name=HTMLEscape,proto3,oneof"`
}
type NgoloFuzzOne_HTMLEscapeString struct {
HTMLEscapeString *HTMLEscapeStringArgs `protobuf:"bytes,2,opt,name=HTMLEscapeString,proto3,oneof"`
}
type NgoloFuzzOne_JSEscape struct {
JSEscape *JSEscapeArgs `protobuf:"bytes,3,opt,name=JSEscape,proto3,oneof"`
}
type NgoloFuzzOne_JSEscapeString struct {
JSEscapeString *JSEscapeStringArgs `protobuf:"bytes,4,opt,name=JSEscapeString,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotTemplates struct {
TemplateNgdotTemplates *TemplateNgdotTemplatesArgs `protobuf:"bytes,5,opt,name=TemplateNgdotTemplates,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotExecute struct {
TemplateNgdotExecute *TemplateNgdotExecuteArgs `protobuf:"bytes,6,opt,name=TemplateNgdotExecute,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotExecuteTemplate struct {
TemplateNgdotExecuteTemplate *TemplateNgdotExecuteTemplateArgs `protobuf:"bytes,7,opt,name=TemplateNgdotExecuteTemplate,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotDefinedTemplates struct {
TemplateNgdotDefinedTemplates *TemplateNgdotDefinedTemplatesArgs `protobuf:"bytes,8,opt,name=TemplateNgdotDefinedTemplates,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotParse struct {
TemplateNgdotParse *TemplateNgdotParseArgs `protobuf:"bytes,9,opt,name=TemplateNgdotParse,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotClone struct {
TemplateNgdotClone *TemplateNgdotCloneArgs `protobuf:"bytes,10,opt,name=TemplateNgdotClone,proto3,oneof"`
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,11,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotNew struct {
TemplateNgdotNew *TemplateNgdotNewArgs `protobuf:"bytes,12,opt,name=TemplateNgdotNew,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotName struct {
TemplateNgdotName *TemplateNgdotNameArgs `protobuf:"bytes,13,opt,name=TemplateNgdotName,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotDelims struct {
TemplateNgdotDelims *TemplateNgdotDelimsArgs `protobuf:"bytes,14,opt,name=TemplateNgdotDelims,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotLookup struct {
TemplateNgdotLookup *TemplateNgdotLookupArgs `protobuf:"bytes,15,opt,name=TemplateNgdotLookup,proto3,oneof"`
}
type NgoloFuzzOne_IsTrue struct {
IsTrue *IsTrueArgs `protobuf:"bytes,16,opt,name=IsTrue,proto3,oneof"`
}
func (*NgoloFuzzOne_HTMLEscape) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_HTMLEscapeString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_JSEscape) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_JSEscapeString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotTemplates) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotExecute) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotExecuteTemplate) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotDefinedTemplates) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotParse) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotClone) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotNew) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotName) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotDelims) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotLookup) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsTrue) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\",\n" +
"\x0eHTMLEscapeArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\f\n" +
"\x01b\x18\x02 \x01(\fR\x01b\"$\n" +
"\x14HTMLEscapeStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"*\n" +
"\fJSEscapeArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\f\n" +
"\x01b\x18\x02 \x01(\fR\x01b\"\"\n" +
"\x12JSEscapeStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1c\n" +
"\x1aTemplateNgdotTemplatesArgs\"W\n" +
"\x18TemplateNgdotExecuteArgs\x12\x0e\n" +
"\x02wr\x18\x01 \x01(\fR\x02wr\x12+\n" +
"\x04data\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x04data\"s\n" +
" TemplateNgdotExecuteTemplateArgs\x12\x0e\n" +
"\x02wr\x18\x01 \x01(\fR\x02wr\x12\x12\n" +
"\x04name\x18\x02 \x01(\tR\x04name\x12+\n" +
"\x04data\x18\x03 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x04data\"#\n" +
"!TemplateNgdotDefinedTemplatesArgs\",\n" +
"\x16TemplateNgdotParseArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\tR\x04text\"\x18\n" +
"\x16TemplateNgdotCloneArgs\"\x1d\n" +
"\aNewArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"*\n" +
"\x14TemplateNgdotNewArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x17\n" +
"\x15TemplateNgdotNameArgs\"C\n" +
"\x17TemplateNgdotDelimsArgs\x12\x12\n" +
"\x04left\x18\x01 \x01(\tR\x04left\x12\x14\n" +
"\x05right\x18\x02 \x01(\tR\x05right\"-\n" +
"\x17TemplateNgdotLookupArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"7\n" +
"\n" +
"IsTrueArgs\x12)\n" +
"\x03val\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x03val\"\x9b\n" +
"\n" +
"\fNgoloFuzzOne\x12;\n" +
"\n" +
"HTMLEscape\x18\x01 \x01(\v2\x19.ngolofuzz.HTMLEscapeArgsH\x00R\n" +
"HTMLEscape\x12M\n" +
"\x10HTMLEscapeString\x18\x02 \x01(\v2\x1f.ngolofuzz.HTMLEscapeStringArgsH\x00R\x10HTMLEscapeString\x125\n" +
"\bJSEscape\x18\x03 \x01(\v2\x17.ngolofuzz.JSEscapeArgsH\x00R\bJSEscape\x12G\n" +
"\x0eJSEscapeString\x18\x04 \x01(\v2\x1d.ngolofuzz.JSEscapeStringArgsH\x00R\x0eJSEscapeString\x12_\n" +
"\x16TemplateNgdotTemplates\x18\x05 \x01(\v2%.ngolofuzz.TemplateNgdotTemplatesArgsH\x00R\x16TemplateNgdotTemplates\x12Y\n" +
"\x14TemplateNgdotExecute\x18\x06 \x01(\v2#.ngolofuzz.TemplateNgdotExecuteArgsH\x00R\x14TemplateNgdotExecute\x12q\n" +
"\x1cTemplateNgdotExecuteTemplate\x18\a \x01(\v2+.ngolofuzz.TemplateNgdotExecuteTemplateArgsH\x00R\x1cTemplateNgdotExecuteTemplate\x12t\n" +
"\x1dTemplateNgdotDefinedTemplates\x18\b \x01(\v2,.ngolofuzz.TemplateNgdotDefinedTemplatesArgsH\x00R\x1dTemplateNgdotDefinedTemplates\x12S\n" +
"\x12TemplateNgdotParse\x18\t \x01(\v2!.ngolofuzz.TemplateNgdotParseArgsH\x00R\x12TemplateNgdotParse\x12S\n" +
"\x12TemplateNgdotClone\x18\n" +
" \x01(\v2!.ngolofuzz.TemplateNgdotCloneArgsH\x00R\x12TemplateNgdotClone\x12&\n" +
"\x03New\x18\v \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x12M\n" +
"\x10TemplateNgdotNew\x18\f \x01(\v2\x1f.ngolofuzz.TemplateNgdotNewArgsH\x00R\x10TemplateNgdotNew\x12P\n" +
"\x11TemplateNgdotName\x18\r \x01(\v2 .ngolofuzz.TemplateNgdotNameArgsH\x00R\x11TemplateNgdotName\x12V\n" +
"\x13TemplateNgdotDelims\x18\x0e \x01(\v2\".ngolofuzz.TemplateNgdotDelimsArgsH\x00R\x13TemplateNgdotDelims\x12V\n" +
"\x13TemplateNgdotLookup\x18\x0f \x01(\v2\".ngolofuzz.TemplateNgdotLookupArgsH\x00R\x13TemplateNgdotLookup\x12/\n" +
"\x06IsTrue\x18\x10 \x01(\v2\x15.ngolofuzz.IsTrueArgsH\x00R\x06IsTrueB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_html_templateb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
var file_ngolofuzz_proto_goTypes = []any{
(*HTMLEscapeArgs)(nil), // 0: ngolofuzz.HTMLEscapeArgs
(*HTMLEscapeStringArgs)(nil), // 1: ngolofuzz.HTMLEscapeStringArgs
(*JSEscapeArgs)(nil), // 2: ngolofuzz.JSEscapeArgs
(*JSEscapeStringArgs)(nil), // 3: ngolofuzz.JSEscapeStringArgs
(*TemplateNgdotTemplatesArgs)(nil), // 4: ngolofuzz.TemplateNgdotTemplatesArgs
(*TemplateNgdotExecuteArgs)(nil), // 5: ngolofuzz.TemplateNgdotExecuteArgs
(*TemplateNgdotExecuteTemplateArgs)(nil), // 6: ngolofuzz.TemplateNgdotExecuteTemplateArgs
(*TemplateNgdotDefinedTemplatesArgs)(nil), // 7: ngolofuzz.TemplateNgdotDefinedTemplatesArgs
(*TemplateNgdotParseArgs)(nil), // 8: ngolofuzz.TemplateNgdotParseArgs
(*TemplateNgdotCloneArgs)(nil), // 9: ngolofuzz.TemplateNgdotCloneArgs
(*NewArgs)(nil), // 10: ngolofuzz.NewArgs
(*TemplateNgdotNewArgs)(nil), // 11: ngolofuzz.TemplateNgdotNewArgs
(*TemplateNgdotNameArgs)(nil), // 12: ngolofuzz.TemplateNgdotNameArgs
(*TemplateNgdotDelimsArgs)(nil), // 13: ngolofuzz.TemplateNgdotDelimsArgs
(*TemplateNgdotLookupArgs)(nil), // 14: ngolofuzz.TemplateNgdotLookupArgs
(*IsTrueArgs)(nil), // 15: ngolofuzz.IsTrueArgs
(*NgoloFuzzOne)(nil), // 16: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 17: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 18: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
17, // 0: ngolofuzz.TemplateNgdotExecuteArgs.data:type_name -> ngolofuzz.NgoloFuzzAny
17, // 1: ngolofuzz.TemplateNgdotExecuteTemplateArgs.data:type_name -> ngolofuzz.NgoloFuzzAny
17, // 2: ngolofuzz.IsTrueArgs.val:type_name -> ngolofuzz.NgoloFuzzAny
0, // 3: ngolofuzz.NgoloFuzzOne.HTMLEscape:type_name -> ngolofuzz.HTMLEscapeArgs
1, // 4: ngolofuzz.NgoloFuzzOne.HTMLEscapeString:type_name -> ngolofuzz.HTMLEscapeStringArgs
2, // 5: ngolofuzz.NgoloFuzzOne.JSEscape:type_name -> ngolofuzz.JSEscapeArgs
3, // 6: ngolofuzz.NgoloFuzzOne.JSEscapeString:type_name -> ngolofuzz.JSEscapeStringArgs
4, // 7: ngolofuzz.NgoloFuzzOne.TemplateNgdotTemplates:type_name -> ngolofuzz.TemplateNgdotTemplatesArgs
5, // 8: ngolofuzz.NgoloFuzzOne.TemplateNgdotExecute:type_name -> ngolofuzz.TemplateNgdotExecuteArgs
6, // 9: ngolofuzz.NgoloFuzzOne.TemplateNgdotExecuteTemplate:type_name -> ngolofuzz.TemplateNgdotExecuteTemplateArgs
7, // 10: ngolofuzz.NgoloFuzzOne.TemplateNgdotDefinedTemplates:type_name -> ngolofuzz.TemplateNgdotDefinedTemplatesArgs
8, // 11: ngolofuzz.NgoloFuzzOne.TemplateNgdotParse:type_name -> ngolofuzz.TemplateNgdotParseArgs
9, // 12: ngolofuzz.NgoloFuzzOne.TemplateNgdotClone:type_name -> ngolofuzz.TemplateNgdotCloneArgs
10, // 13: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
11, // 14: ngolofuzz.NgoloFuzzOne.TemplateNgdotNew:type_name -> ngolofuzz.TemplateNgdotNewArgs
12, // 15: ngolofuzz.NgoloFuzzOne.TemplateNgdotName:type_name -> ngolofuzz.TemplateNgdotNameArgs
13, // 16: ngolofuzz.NgoloFuzzOne.TemplateNgdotDelims:type_name -> ngolofuzz.TemplateNgdotDelimsArgs
14, // 17: ngolofuzz.NgoloFuzzOne.TemplateNgdotLookup:type_name -> ngolofuzz.TemplateNgdotLookupArgs
15, // 18: ngolofuzz.NgoloFuzzOne.IsTrue:type_name -> ngolofuzz.IsTrueArgs
16, // 19: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
20, // [20:20] is the sub-list for method output_type
20, // [20:20] is the sub-list for method input_type
20, // [20:20] is the sub-list for extension type_name
20, // [20:20] is the sub-list for extension extendee
0, // [0:20] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[16].OneofWrappers = []any{
(*NgoloFuzzOne_HTMLEscape)(nil),
(*NgoloFuzzOne_HTMLEscapeString)(nil),
(*NgoloFuzzOne_JSEscape)(nil),
(*NgoloFuzzOne_JSEscapeString)(nil),
(*NgoloFuzzOne_TemplateNgdotTemplates)(nil),
(*NgoloFuzzOne_TemplateNgdotExecute)(nil),
(*NgoloFuzzOne_TemplateNgdotExecuteTemplate)(nil),
(*NgoloFuzzOne_TemplateNgdotDefinedTemplates)(nil),
(*NgoloFuzzOne_TemplateNgdotParse)(nil),
(*NgoloFuzzOne_TemplateNgdotClone)(nil),
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_TemplateNgdotNew)(nil),
(*NgoloFuzzOne_TemplateNgdotName)(nil),
(*NgoloFuzzOne_TemplateNgdotDelims)(nil),
(*NgoloFuzzOne_TemplateNgdotLookup)(nil),
(*NgoloFuzzOne_IsTrue)(nil),
}
file_ngolofuzz_proto_msgTypes[17].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 19,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_image
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"image"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func YCbCrSubsampleRatioNewFromFuzz(p YCbCrSubsampleRatioEnum) image.YCbCrSubsampleRatio{
switch p {
case 1:
return image.YCbCrSubsampleRatio422
case 2:
return image.YCbCrSubsampleRatio420
case 3:
return image.YCbCrSubsampleRatio440
case 4:
return image.YCbCrSubsampleRatio411
case 5:
return image.YCbCrSubsampleRatio410
}
return image.YCbCrSubsampleRatio444
}
func ConvertYCbCrSubsampleRatioNewFromFuzz(a []YCbCrSubsampleRatioEnum) []image.YCbCrSubsampleRatio{
r := make([]image.YCbCrSubsampleRatio, len(a))
for i := range a {
r[i] = YCbCrSubsampleRatioNewFromFuzz(a[i])
}
return r
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var PalettedResults []*image.Paletted
PalettedResultsIndex := 0
var NRGBAResults []*image.NRGBA
NRGBAResultsIndex := 0
var NYCbCrAResults []*image.NYCbCrA
NYCbCrAResultsIndex := 0
var RGBAResults []*image.RGBA
RGBAResultsIndex := 0
var RGBA64Results []*image.RGBA64
RGBA64ResultsIndex := 0
var GrayResults []*image.Gray
GrayResultsIndex := 0
var PointResults []*image.Point
PointResultsIndex := 0
var RectangleResults []*image.Rectangle
RectangleResultsIndex := 0
var NRGBA64Results []*image.NRGBA64
NRGBA64ResultsIndex := 0
var AlphaResults []*image.Alpha
AlphaResultsIndex := 0
var CMYKResults []*image.CMYK
CMYKResultsIndex := 0
var UniformResults []*image.Uniform
UniformResultsIndex := 0
var YCbCrResults []*image.YCbCr
YCbCrResultsIndex := 0
var Alpha16Results []*image.Alpha16
Alpha16ResultsIndex := 0
var Gray16Results []*image.Gray16
Gray16ResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Decode:
arg0 := bytes.NewReader(a.Decode.R)
_, _, r2 := image.Decode(arg0)
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_DecodeConfig:
arg0 := bytes.NewReader(a.DecodeConfig.R)
_, _, r2 := image.DecodeConfig(arg0)
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_PointNgdotString:
if len(PointResults) == 0 {
continue
}
arg0 := PointResults[PointResultsIndex]
PointResultsIndex = (PointResultsIndex + 1) % len(PointResults)
arg0.String()
case *NgoloFuzzOne_PointNgdotAdd:
if len(PointResults) == 0 {
continue
}
arg0 := PointResults[PointResultsIndex]
PointResultsIndex = (PointResultsIndex + 1) % len(PointResults)
if len(PointResults) == 0 {
continue
}
arg1 := *PointResults[PointResultsIndex]
PointResultsIndex = (PointResultsIndex + 1) % len(PointResults)
arg0.Add(arg1)
case *NgoloFuzzOne_PointNgdotSub:
if len(PointResults) == 0 {
continue
}
arg0 := PointResults[PointResultsIndex]
PointResultsIndex = (PointResultsIndex + 1) % len(PointResults)
if len(PointResults) == 0 {
continue
}
arg1 := *PointResults[PointResultsIndex]
PointResultsIndex = (PointResultsIndex + 1) % len(PointResults)
arg0.Sub(arg1)
case *NgoloFuzzOne_PointNgdotMul:
if len(PointResults) == 0 {
continue
}
arg0 := PointResults[PointResultsIndex]
PointResultsIndex = (PointResultsIndex + 1) % len(PointResults)
arg1 := int(a.PointNgdotMul.K)
arg0.Mul(arg1)
case *NgoloFuzzOne_PointNgdotIn:
if len(PointResults) == 0 {
continue
}
arg0 := PointResults[PointResultsIndex]
PointResultsIndex = (PointResultsIndex + 1) % len(PointResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.In(arg1)
case *NgoloFuzzOne_PointNgdotMod:
if len(PointResults) == 0 {
continue
}
arg0 := PointResults[PointResultsIndex]
PointResultsIndex = (PointResultsIndex + 1) % len(PointResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.Mod(arg1)
case *NgoloFuzzOne_PointNgdotEq:
if len(PointResults) == 0 {
continue
}
arg0 := PointResults[PointResultsIndex]
PointResultsIndex = (PointResultsIndex + 1) % len(PointResults)
if len(PointResults) == 0 {
continue
}
arg1 := *PointResults[PointResultsIndex]
PointResultsIndex = (PointResultsIndex + 1) % len(PointResults)
arg0.Eq(arg1)
case *NgoloFuzzOne_Pt:
arg0 := int(a.Pt.X)
arg1 := int(a.Pt.Y)
image.Pt(arg0, arg1)
case *NgoloFuzzOne_RectangleNgdotString:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.String()
case *NgoloFuzzOne_RectangleNgdotDx:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.Dx()
case *NgoloFuzzOne_RectangleNgdotDy:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.Dy()
case *NgoloFuzzOne_RectangleNgdotSize:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.Size()
case *NgoloFuzzOne_RectangleNgdotAdd:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
if len(PointResults) == 0 {
continue
}
arg1 := *PointResults[PointResultsIndex]
PointResultsIndex = (PointResultsIndex + 1) % len(PointResults)
arg0.Add(arg1)
case *NgoloFuzzOne_RectangleNgdotSub:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
if len(PointResults) == 0 {
continue
}
arg1 := *PointResults[PointResultsIndex]
PointResultsIndex = (PointResultsIndex + 1) % len(PointResults)
arg0.Sub(arg1)
case *NgoloFuzzOne_RectangleNgdotInset:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg1 := int(a.RectangleNgdotInset.N)
arg0.Inset(arg1)
case *NgoloFuzzOne_RectangleNgdotIntersect:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.Intersect(arg1)
case *NgoloFuzzOne_RectangleNgdotUnion:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.Union(arg1)
case *NgoloFuzzOne_RectangleNgdotEmpty:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.Empty()
case *NgoloFuzzOne_RectangleNgdotEq:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.Eq(arg1)
case *NgoloFuzzOne_RectangleNgdotOverlaps:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.Overlaps(arg1)
case *NgoloFuzzOne_RectangleNgdotIn:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.In(arg1)
case *NgoloFuzzOne_RectangleNgdotCanon:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.Canon()
case *NgoloFuzzOne_RectangleNgdotAt:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg1 := int(a.RectangleNgdotAt.X)
arg2 := int(a.RectangleNgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_RectangleNgdotRGBA64At:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg1 := int(a.RectangleNgdotRGBA64At.X)
arg2 := int(a.RectangleNgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_RectangleNgdotBounds:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.Bounds()
case *NgoloFuzzOne_RectangleNgdotColorModel:
if len(RectangleResults) == 0 {
continue
}
arg0 := RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.ColorModel()
case *NgoloFuzzOne_Rect:
arg0 := int(a.Rect.X0)
arg1 := int(a.Rect.Y0)
arg2 := int(a.Rect.X1)
arg3 := int(a.Rect.Y1)
image.Rect(arg0, arg1, arg2 % 0x10001, arg3 % 0x10001)
case *NgoloFuzzOne_RGBANgdotColorModel:
if len(RGBAResults) == 0 {
continue
}
arg0 := RGBAResults[RGBAResultsIndex]
RGBAResultsIndex = (RGBAResultsIndex + 1) % len(RGBAResults)
arg0.ColorModel()
case *NgoloFuzzOne_RGBANgdotBounds:
if len(RGBAResults) == 0 {
continue
}
arg0 := RGBAResults[RGBAResultsIndex]
RGBAResultsIndex = (RGBAResultsIndex + 1) % len(RGBAResults)
arg0.Bounds()
case *NgoloFuzzOne_RGBANgdotAt:
if len(RGBAResults) == 0 {
continue
}
arg0 := RGBAResults[RGBAResultsIndex]
RGBAResultsIndex = (RGBAResultsIndex + 1) % len(RGBAResults)
arg1 := int(a.RGBANgdotAt.X)
arg2 := int(a.RGBANgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_RGBANgdotRGBA64At:
if len(RGBAResults) == 0 {
continue
}
arg0 := RGBAResults[RGBAResultsIndex]
RGBAResultsIndex = (RGBAResultsIndex + 1) % len(RGBAResults)
arg1 := int(a.RGBANgdotRGBA64At.X)
arg2 := int(a.RGBANgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_RGBANgdotRGBAAt:
if len(RGBAResults) == 0 {
continue
}
arg0 := RGBAResults[RGBAResultsIndex]
RGBAResultsIndex = (RGBAResultsIndex + 1) % len(RGBAResults)
arg1 := int(a.RGBANgdotRGBAAt.X)
arg2 := int(a.RGBANgdotRGBAAt.Y)
arg0.RGBAAt(arg1, arg2)
case *NgoloFuzzOne_RGBANgdotPixOffset:
if len(RGBAResults) == 0 {
continue
}
arg0 := RGBAResults[RGBAResultsIndex]
RGBAResultsIndex = (RGBAResultsIndex + 1) % len(RGBAResults)
arg1 := int(a.RGBANgdotPixOffset.X)
arg2 := int(a.RGBANgdotPixOffset.Y)
arg0.PixOffset(arg1, arg2)
case *NgoloFuzzOne_RGBANgdotSubImage:
if len(RGBAResults) == 0 {
continue
}
arg0 := RGBAResults[RGBAResultsIndex]
RGBAResultsIndex = (RGBAResultsIndex + 1) % len(RGBAResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.SubImage(arg1)
case *NgoloFuzzOne_RGBANgdotOpaque:
if len(RGBAResults) == 0 {
continue
}
arg0 := RGBAResults[RGBAResultsIndex]
RGBAResultsIndex = (RGBAResultsIndex + 1) % len(RGBAResults)
arg0.Opaque()
case *NgoloFuzzOne_NewRGBA:
if len(RectangleResults) == 0 {
continue
}
arg0 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
image.NewRGBA(arg0)
case *NgoloFuzzOne_RGBA64NgdotColorModel:
if len(RGBA64Results) == 0 {
continue
}
arg0 := RGBA64Results[RGBA64ResultsIndex]
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % len(RGBA64Results)
arg0.ColorModel()
case *NgoloFuzzOne_RGBA64NgdotBounds:
if len(RGBA64Results) == 0 {
continue
}
arg0 := RGBA64Results[RGBA64ResultsIndex]
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % len(RGBA64Results)
arg0.Bounds()
case *NgoloFuzzOne_RGBA64NgdotAt:
if len(RGBA64Results) == 0 {
continue
}
arg0 := RGBA64Results[RGBA64ResultsIndex]
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % len(RGBA64Results)
arg1 := int(a.RGBA64NgdotAt.X)
arg2 := int(a.RGBA64NgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_RGBA64NgdotRGBA64At:
if len(RGBA64Results) == 0 {
continue
}
arg0 := RGBA64Results[RGBA64ResultsIndex]
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % len(RGBA64Results)
arg1 := int(a.RGBA64NgdotRGBA64At.X)
arg2 := int(a.RGBA64NgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_RGBA64NgdotPixOffset:
if len(RGBA64Results) == 0 {
continue
}
arg0 := RGBA64Results[RGBA64ResultsIndex]
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % len(RGBA64Results)
arg1 := int(a.RGBA64NgdotPixOffset.X)
arg2 := int(a.RGBA64NgdotPixOffset.Y)
arg0.PixOffset(arg1, arg2)
case *NgoloFuzzOne_RGBA64NgdotSubImage:
if len(RGBA64Results) == 0 {
continue
}
arg0 := RGBA64Results[RGBA64ResultsIndex]
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % len(RGBA64Results)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.SubImage(arg1)
case *NgoloFuzzOne_RGBA64NgdotOpaque:
if len(RGBA64Results) == 0 {
continue
}
arg0 := RGBA64Results[RGBA64ResultsIndex]
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % len(RGBA64Results)
arg0.Opaque()
case *NgoloFuzzOne_NewRGBA64:
if len(RectangleResults) == 0 {
continue
}
arg0 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
image.NewRGBA64(arg0)
case *NgoloFuzzOne_NRGBANgdotColorModel:
if len(NRGBAResults) == 0 {
continue
}
arg0 := NRGBAResults[NRGBAResultsIndex]
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % len(NRGBAResults)
arg0.ColorModel()
case *NgoloFuzzOne_NRGBANgdotBounds:
if len(NRGBAResults) == 0 {
continue
}
arg0 := NRGBAResults[NRGBAResultsIndex]
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % len(NRGBAResults)
arg0.Bounds()
case *NgoloFuzzOne_NRGBANgdotAt:
if len(NRGBAResults) == 0 {
continue
}
arg0 := NRGBAResults[NRGBAResultsIndex]
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % len(NRGBAResults)
arg1 := int(a.NRGBANgdotAt.X)
arg2 := int(a.NRGBANgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_NRGBANgdotRGBA64At:
if len(NRGBAResults) == 0 {
continue
}
arg0 := NRGBAResults[NRGBAResultsIndex]
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % len(NRGBAResults)
arg1 := int(a.NRGBANgdotRGBA64At.X)
arg2 := int(a.NRGBANgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_NRGBANgdotNRGBAAt:
if len(NRGBAResults) == 0 {
continue
}
arg0 := NRGBAResults[NRGBAResultsIndex]
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % len(NRGBAResults)
arg1 := int(a.NRGBANgdotNRGBAAt.X)
arg2 := int(a.NRGBANgdotNRGBAAt.Y)
arg0.NRGBAAt(arg1, arg2)
case *NgoloFuzzOne_NRGBANgdotPixOffset:
if len(NRGBAResults) == 0 {
continue
}
arg0 := NRGBAResults[NRGBAResultsIndex]
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % len(NRGBAResults)
arg1 := int(a.NRGBANgdotPixOffset.X)
arg2 := int(a.NRGBANgdotPixOffset.Y)
arg0.PixOffset(arg1, arg2)
case *NgoloFuzzOne_NRGBANgdotSubImage:
if len(NRGBAResults) == 0 {
continue
}
arg0 := NRGBAResults[NRGBAResultsIndex]
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % len(NRGBAResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.SubImage(arg1)
case *NgoloFuzzOne_NRGBANgdotOpaque:
if len(NRGBAResults) == 0 {
continue
}
arg0 := NRGBAResults[NRGBAResultsIndex]
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % len(NRGBAResults)
arg0.Opaque()
case *NgoloFuzzOne_NewNRGBA:
if len(RectangleResults) == 0 {
continue
}
arg0 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
image.NewNRGBA(arg0)
case *NgoloFuzzOne_NRGBA64NgdotColorModel:
if len(NRGBA64Results) == 0 {
continue
}
arg0 := NRGBA64Results[NRGBA64ResultsIndex]
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % len(NRGBA64Results)
arg0.ColorModel()
case *NgoloFuzzOne_NRGBA64NgdotBounds:
if len(NRGBA64Results) == 0 {
continue
}
arg0 := NRGBA64Results[NRGBA64ResultsIndex]
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % len(NRGBA64Results)
arg0.Bounds()
case *NgoloFuzzOne_NRGBA64NgdotAt:
if len(NRGBA64Results) == 0 {
continue
}
arg0 := NRGBA64Results[NRGBA64ResultsIndex]
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % len(NRGBA64Results)
arg1 := int(a.NRGBA64NgdotAt.X)
arg2 := int(a.NRGBA64NgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_NRGBA64NgdotRGBA64At:
if len(NRGBA64Results) == 0 {
continue
}
arg0 := NRGBA64Results[NRGBA64ResultsIndex]
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % len(NRGBA64Results)
arg1 := int(a.NRGBA64NgdotRGBA64At.X)
arg2 := int(a.NRGBA64NgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_NRGBA64NgdotNRGBA64At:
if len(NRGBA64Results) == 0 {
continue
}
arg0 := NRGBA64Results[NRGBA64ResultsIndex]
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % len(NRGBA64Results)
arg1 := int(a.NRGBA64NgdotNRGBA64At.X)
arg2 := int(a.NRGBA64NgdotNRGBA64At.Y)
arg0.NRGBA64At(arg1, arg2)
case *NgoloFuzzOne_NRGBA64NgdotPixOffset:
if len(NRGBA64Results) == 0 {
continue
}
arg0 := NRGBA64Results[NRGBA64ResultsIndex]
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % len(NRGBA64Results)
arg1 := int(a.NRGBA64NgdotPixOffset.X)
arg2 := int(a.NRGBA64NgdotPixOffset.Y)
arg0.PixOffset(arg1, arg2)
case *NgoloFuzzOne_NRGBA64NgdotSubImage:
if len(NRGBA64Results) == 0 {
continue
}
arg0 := NRGBA64Results[NRGBA64ResultsIndex]
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % len(NRGBA64Results)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.SubImage(arg1)
case *NgoloFuzzOne_NRGBA64NgdotOpaque:
if len(NRGBA64Results) == 0 {
continue
}
arg0 := NRGBA64Results[NRGBA64ResultsIndex]
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % len(NRGBA64Results)
arg0.Opaque()
case *NgoloFuzzOne_NewNRGBA64:
if len(RectangleResults) == 0 {
continue
}
arg0 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
image.NewNRGBA64(arg0)
case *NgoloFuzzOne_AlphaNgdotColorModel:
if len(AlphaResults) == 0 {
continue
}
arg0 := AlphaResults[AlphaResultsIndex]
AlphaResultsIndex = (AlphaResultsIndex + 1) % len(AlphaResults)
arg0.ColorModel()
case *NgoloFuzzOne_AlphaNgdotBounds:
if len(AlphaResults) == 0 {
continue
}
arg0 := AlphaResults[AlphaResultsIndex]
AlphaResultsIndex = (AlphaResultsIndex + 1) % len(AlphaResults)
arg0.Bounds()
case *NgoloFuzzOne_AlphaNgdotAt:
if len(AlphaResults) == 0 {
continue
}
arg0 := AlphaResults[AlphaResultsIndex]
AlphaResultsIndex = (AlphaResultsIndex + 1) % len(AlphaResults)
arg1 := int(a.AlphaNgdotAt.X)
arg2 := int(a.AlphaNgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_AlphaNgdotRGBA64At:
if len(AlphaResults) == 0 {
continue
}
arg0 := AlphaResults[AlphaResultsIndex]
AlphaResultsIndex = (AlphaResultsIndex + 1) % len(AlphaResults)
arg1 := int(a.AlphaNgdotRGBA64At.X)
arg2 := int(a.AlphaNgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_AlphaNgdotAlphaAt:
if len(AlphaResults) == 0 {
continue
}
arg0 := AlphaResults[AlphaResultsIndex]
AlphaResultsIndex = (AlphaResultsIndex + 1) % len(AlphaResults)
arg1 := int(a.AlphaNgdotAlphaAt.X)
arg2 := int(a.AlphaNgdotAlphaAt.Y)
arg0.AlphaAt(arg1, arg2)
case *NgoloFuzzOne_AlphaNgdotPixOffset:
if len(AlphaResults) == 0 {
continue
}
arg0 := AlphaResults[AlphaResultsIndex]
AlphaResultsIndex = (AlphaResultsIndex + 1) % len(AlphaResults)
arg1 := int(a.AlphaNgdotPixOffset.X)
arg2 := int(a.AlphaNgdotPixOffset.Y)
arg0.PixOffset(arg1, arg2)
case *NgoloFuzzOne_AlphaNgdotSubImage:
if len(AlphaResults) == 0 {
continue
}
arg0 := AlphaResults[AlphaResultsIndex]
AlphaResultsIndex = (AlphaResultsIndex + 1) % len(AlphaResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.SubImage(arg1)
case *NgoloFuzzOne_AlphaNgdotOpaque:
if len(AlphaResults) == 0 {
continue
}
arg0 := AlphaResults[AlphaResultsIndex]
AlphaResultsIndex = (AlphaResultsIndex + 1) % len(AlphaResults)
arg0.Opaque()
case *NgoloFuzzOne_NewAlpha:
if len(RectangleResults) == 0 {
continue
}
arg0 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
image.NewAlpha(arg0)
case *NgoloFuzzOne_Alpha16NgdotColorModel:
if len(Alpha16Results) == 0 {
continue
}
arg0 := Alpha16Results[Alpha16ResultsIndex]
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % len(Alpha16Results)
arg0.ColorModel()
case *NgoloFuzzOne_Alpha16NgdotBounds:
if len(Alpha16Results) == 0 {
continue
}
arg0 := Alpha16Results[Alpha16ResultsIndex]
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % len(Alpha16Results)
arg0.Bounds()
case *NgoloFuzzOne_Alpha16NgdotAt:
if len(Alpha16Results) == 0 {
continue
}
arg0 := Alpha16Results[Alpha16ResultsIndex]
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % len(Alpha16Results)
arg1 := int(a.Alpha16NgdotAt.X)
arg2 := int(a.Alpha16NgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_Alpha16NgdotRGBA64At:
if len(Alpha16Results) == 0 {
continue
}
arg0 := Alpha16Results[Alpha16ResultsIndex]
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % len(Alpha16Results)
arg1 := int(a.Alpha16NgdotRGBA64At.X)
arg2 := int(a.Alpha16NgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_Alpha16NgdotAlpha16At:
if len(Alpha16Results) == 0 {
continue
}
arg0 := Alpha16Results[Alpha16ResultsIndex]
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % len(Alpha16Results)
arg1 := int(a.Alpha16NgdotAlpha16At.X)
arg2 := int(a.Alpha16NgdotAlpha16At.Y)
arg0.Alpha16At(arg1, arg2)
case *NgoloFuzzOne_Alpha16NgdotPixOffset:
if len(Alpha16Results) == 0 {
continue
}
arg0 := Alpha16Results[Alpha16ResultsIndex]
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % len(Alpha16Results)
arg1 := int(a.Alpha16NgdotPixOffset.X)
arg2 := int(a.Alpha16NgdotPixOffset.Y)
arg0.PixOffset(arg1, arg2)
case *NgoloFuzzOne_Alpha16NgdotSubImage:
if len(Alpha16Results) == 0 {
continue
}
arg0 := Alpha16Results[Alpha16ResultsIndex]
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % len(Alpha16Results)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.SubImage(arg1)
case *NgoloFuzzOne_Alpha16NgdotOpaque:
if len(Alpha16Results) == 0 {
continue
}
arg0 := Alpha16Results[Alpha16ResultsIndex]
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % len(Alpha16Results)
arg0.Opaque()
case *NgoloFuzzOne_NewAlpha16:
if len(RectangleResults) == 0 {
continue
}
arg0 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
image.NewAlpha16(arg0)
case *NgoloFuzzOne_GrayNgdotColorModel:
if len(GrayResults) == 0 {
continue
}
arg0 := GrayResults[GrayResultsIndex]
GrayResultsIndex = (GrayResultsIndex + 1) % len(GrayResults)
arg0.ColorModel()
case *NgoloFuzzOne_GrayNgdotBounds:
if len(GrayResults) == 0 {
continue
}
arg0 := GrayResults[GrayResultsIndex]
GrayResultsIndex = (GrayResultsIndex + 1) % len(GrayResults)
arg0.Bounds()
case *NgoloFuzzOne_GrayNgdotAt:
if len(GrayResults) == 0 {
continue
}
arg0 := GrayResults[GrayResultsIndex]
GrayResultsIndex = (GrayResultsIndex + 1) % len(GrayResults)
arg1 := int(a.GrayNgdotAt.X)
arg2 := int(a.GrayNgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_GrayNgdotRGBA64At:
if len(GrayResults) == 0 {
continue
}
arg0 := GrayResults[GrayResultsIndex]
GrayResultsIndex = (GrayResultsIndex + 1) % len(GrayResults)
arg1 := int(a.GrayNgdotRGBA64At.X)
arg2 := int(a.GrayNgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_GrayNgdotGrayAt:
if len(GrayResults) == 0 {
continue
}
arg0 := GrayResults[GrayResultsIndex]
GrayResultsIndex = (GrayResultsIndex + 1) % len(GrayResults)
arg1 := int(a.GrayNgdotGrayAt.X)
arg2 := int(a.GrayNgdotGrayAt.Y)
arg0.GrayAt(arg1, arg2)
case *NgoloFuzzOne_GrayNgdotPixOffset:
if len(GrayResults) == 0 {
continue
}
arg0 := GrayResults[GrayResultsIndex]
GrayResultsIndex = (GrayResultsIndex + 1) % len(GrayResults)
arg1 := int(a.GrayNgdotPixOffset.X)
arg2 := int(a.GrayNgdotPixOffset.Y)
arg0.PixOffset(arg1, arg2)
case *NgoloFuzzOne_GrayNgdotSubImage:
if len(GrayResults) == 0 {
continue
}
arg0 := GrayResults[GrayResultsIndex]
GrayResultsIndex = (GrayResultsIndex + 1) % len(GrayResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.SubImage(arg1)
case *NgoloFuzzOne_GrayNgdotOpaque:
if len(GrayResults) == 0 {
continue
}
arg0 := GrayResults[GrayResultsIndex]
GrayResultsIndex = (GrayResultsIndex + 1) % len(GrayResults)
arg0.Opaque()
case *NgoloFuzzOne_NewGray:
if len(RectangleResults) == 0 {
continue
}
arg0 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
image.NewGray(arg0)
case *NgoloFuzzOne_Gray16NgdotColorModel:
if len(Gray16Results) == 0 {
continue
}
arg0 := Gray16Results[Gray16ResultsIndex]
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % len(Gray16Results)
arg0.ColorModel()
case *NgoloFuzzOne_Gray16NgdotBounds:
if len(Gray16Results) == 0 {
continue
}
arg0 := Gray16Results[Gray16ResultsIndex]
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % len(Gray16Results)
arg0.Bounds()
case *NgoloFuzzOne_Gray16NgdotAt:
if len(Gray16Results) == 0 {
continue
}
arg0 := Gray16Results[Gray16ResultsIndex]
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % len(Gray16Results)
arg1 := int(a.Gray16NgdotAt.X)
arg2 := int(a.Gray16NgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_Gray16NgdotRGBA64At:
if len(Gray16Results) == 0 {
continue
}
arg0 := Gray16Results[Gray16ResultsIndex]
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % len(Gray16Results)
arg1 := int(a.Gray16NgdotRGBA64At.X)
arg2 := int(a.Gray16NgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_Gray16NgdotGray16At:
if len(Gray16Results) == 0 {
continue
}
arg0 := Gray16Results[Gray16ResultsIndex]
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % len(Gray16Results)
arg1 := int(a.Gray16NgdotGray16At.X)
arg2 := int(a.Gray16NgdotGray16At.Y)
arg0.Gray16At(arg1, arg2)
case *NgoloFuzzOne_Gray16NgdotPixOffset:
if len(Gray16Results) == 0 {
continue
}
arg0 := Gray16Results[Gray16ResultsIndex]
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % len(Gray16Results)
arg1 := int(a.Gray16NgdotPixOffset.X)
arg2 := int(a.Gray16NgdotPixOffset.Y)
arg0.PixOffset(arg1, arg2)
case *NgoloFuzzOne_Gray16NgdotSubImage:
if len(Gray16Results) == 0 {
continue
}
arg0 := Gray16Results[Gray16ResultsIndex]
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % len(Gray16Results)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.SubImage(arg1)
case *NgoloFuzzOne_Gray16NgdotOpaque:
if len(Gray16Results) == 0 {
continue
}
arg0 := Gray16Results[Gray16ResultsIndex]
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % len(Gray16Results)
arg0.Opaque()
case *NgoloFuzzOne_NewGray16:
if len(RectangleResults) == 0 {
continue
}
arg0 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
image.NewGray16(arg0)
case *NgoloFuzzOne_CMYKNgdotColorModel:
if len(CMYKResults) == 0 {
continue
}
arg0 := CMYKResults[CMYKResultsIndex]
CMYKResultsIndex = (CMYKResultsIndex + 1) % len(CMYKResults)
arg0.ColorModel()
case *NgoloFuzzOne_CMYKNgdotBounds:
if len(CMYKResults) == 0 {
continue
}
arg0 := CMYKResults[CMYKResultsIndex]
CMYKResultsIndex = (CMYKResultsIndex + 1) % len(CMYKResults)
arg0.Bounds()
case *NgoloFuzzOne_CMYKNgdotAt:
if len(CMYKResults) == 0 {
continue
}
arg0 := CMYKResults[CMYKResultsIndex]
CMYKResultsIndex = (CMYKResultsIndex + 1) % len(CMYKResults)
arg1 := int(a.CMYKNgdotAt.X)
arg2 := int(a.CMYKNgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_CMYKNgdotRGBA64At:
if len(CMYKResults) == 0 {
continue
}
arg0 := CMYKResults[CMYKResultsIndex]
CMYKResultsIndex = (CMYKResultsIndex + 1) % len(CMYKResults)
arg1 := int(a.CMYKNgdotRGBA64At.X)
arg2 := int(a.CMYKNgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_CMYKNgdotCMYKAt:
if len(CMYKResults) == 0 {
continue
}
arg0 := CMYKResults[CMYKResultsIndex]
CMYKResultsIndex = (CMYKResultsIndex + 1) % len(CMYKResults)
arg1 := int(a.CMYKNgdotCMYKAt.X)
arg2 := int(a.CMYKNgdotCMYKAt.Y)
arg0.CMYKAt(arg1, arg2)
case *NgoloFuzzOne_CMYKNgdotPixOffset:
if len(CMYKResults) == 0 {
continue
}
arg0 := CMYKResults[CMYKResultsIndex]
CMYKResultsIndex = (CMYKResultsIndex + 1) % len(CMYKResults)
arg1 := int(a.CMYKNgdotPixOffset.X)
arg2 := int(a.CMYKNgdotPixOffset.Y)
arg0.PixOffset(arg1, arg2)
case *NgoloFuzzOne_CMYKNgdotSubImage:
if len(CMYKResults) == 0 {
continue
}
arg0 := CMYKResults[CMYKResultsIndex]
CMYKResultsIndex = (CMYKResultsIndex + 1) % len(CMYKResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.SubImage(arg1)
case *NgoloFuzzOne_CMYKNgdotOpaque:
if len(CMYKResults) == 0 {
continue
}
arg0 := CMYKResults[CMYKResultsIndex]
CMYKResultsIndex = (CMYKResultsIndex + 1) % len(CMYKResults)
arg0.Opaque()
case *NgoloFuzzOne_NewCMYK:
if len(RectangleResults) == 0 {
continue
}
arg0 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
image.NewCMYK(arg0)
case *NgoloFuzzOne_PalettedNgdotColorModel:
if len(PalettedResults) == 0 {
continue
}
arg0 := PalettedResults[PalettedResultsIndex]
PalettedResultsIndex = (PalettedResultsIndex + 1) % len(PalettedResults)
arg0.ColorModel()
case *NgoloFuzzOne_PalettedNgdotBounds:
if len(PalettedResults) == 0 {
continue
}
arg0 := PalettedResults[PalettedResultsIndex]
PalettedResultsIndex = (PalettedResultsIndex + 1) % len(PalettedResults)
arg0.Bounds()
case *NgoloFuzzOne_PalettedNgdotAt:
if len(PalettedResults) == 0 {
continue
}
arg0 := PalettedResults[PalettedResultsIndex]
PalettedResultsIndex = (PalettedResultsIndex + 1) % len(PalettedResults)
arg1 := int(a.PalettedNgdotAt.X)
arg2 := int(a.PalettedNgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_PalettedNgdotRGBA64At:
if len(PalettedResults) == 0 {
continue
}
arg0 := PalettedResults[PalettedResultsIndex]
PalettedResultsIndex = (PalettedResultsIndex + 1) % len(PalettedResults)
arg1 := int(a.PalettedNgdotRGBA64At.X)
arg2 := int(a.PalettedNgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_PalettedNgdotPixOffset:
if len(PalettedResults) == 0 {
continue
}
arg0 := PalettedResults[PalettedResultsIndex]
PalettedResultsIndex = (PalettedResultsIndex + 1) % len(PalettedResults)
arg1 := int(a.PalettedNgdotPixOffset.X)
arg2 := int(a.PalettedNgdotPixOffset.Y)
arg0.PixOffset(arg1, arg2)
case *NgoloFuzzOne_PalettedNgdotColorIndexAt:
if len(PalettedResults) == 0 {
continue
}
arg0 := PalettedResults[PalettedResultsIndex]
PalettedResultsIndex = (PalettedResultsIndex + 1) % len(PalettedResults)
arg1 := int(a.PalettedNgdotColorIndexAt.X)
arg2 := int(a.PalettedNgdotColorIndexAt.Y)
arg0.ColorIndexAt(arg1, arg2)
case *NgoloFuzzOne_PalettedNgdotSetColorIndex:
if len(PalettedResults) == 0 {
continue
}
arg0 := PalettedResults[PalettedResultsIndex]
PalettedResultsIndex = (PalettedResultsIndex + 1) % len(PalettedResults)
arg1 := int(a.PalettedNgdotSetColorIndex.X)
arg2 := int(a.PalettedNgdotSetColorIndex.Y)
arg3 := uint8(a.PalettedNgdotSetColorIndex.Index)
arg0.SetColorIndex(arg1, arg2, arg3)
case *NgoloFuzzOne_PalettedNgdotSubImage:
if len(PalettedResults) == 0 {
continue
}
arg0 := PalettedResults[PalettedResultsIndex]
PalettedResultsIndex = (PalettedResultsIndex + 1) % len(PalettedResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.SubImage(arg1)
case *NgoloFuzzOne_PalettedNgdotOpaque:
if len(PalettedResults) == 0 {
continue
}
arg0 := PalettedResults[PalettedResultsIndex]
PalettedResultsIndex = (PalettedResultsIndex + 1) % len(PalettedResults)
arg0.Opaque()
case *NgoloFuzzOne_UniformNgdotRGBA:
if len(UniformResults) == 0 {
continue
}
arg0 := UniformResults[UniformResultsIndex]
UniformResultsIndex = (UniformResultsIndex + 1) % len(UniformResults)
arg0.RGBA()
case *NgoloFuzzOne_UniformNgdotColorModel:
if len(UniformResults) == 0 {
continue
}
arg0 := UniformResults[UniformResultsIndex]
UniformResultsIndex = (UniformResultsIndex + 1) % len(UniformResults)
arg0.ColorModel()
case *NgoloFuzzOne_UniformNgdotBounds:
if len(UniformResults) == 0 {
continue
}
arg0 := UniformResults[UniformResultsIndex]
UniformResultsIndex = (UniformResultsIndex + 1) % len(UniformResults)
arg0.Bounds()
case *NgoloFuzzOne_UniformNgdotAt:
if len(UniformResults) == 0 {
continue
}
arg0 := UniformResults[UniformResultsIndex]
UniformResultsIndex = (UniformResultsIndex + 1) % len(UniformResults)
arg1 := int(a.UniformNgdotAt.X)
arg2 := int(a.UniformNgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_UniformNgdotRGBA64At:
if len(UniformResults) == 0 {
continue
}
arg0 := UniformResults[UniformResultsIndex]
UniformResultsIndex = (UniformResultsIndex + 1) % len(UniformResults)
arg1 := int(a.UniformNgdotRGBA64At.X)
arg2 := int(a.UniformNgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_UniformNgdotOpaque:
if len(UniformResults) == 0 {
continue
}
arg0 := UniformResults[UniformResultsIndex]
UniformResultsIndex = (UniformResultsIndex + 1) % len(UniformResults)
arg0.Opaque()
case *NgoloFuzzOne_YCbCrSubsampleRatioNgdotString:
arg0 := YCbCrSubsampleRatioNewFromFuzz(a.YCbCrSubsampleRatioNgdotString.S)
arg0.String()
case *NgoloFuzzOne_YCbCrNgdotColorModel:
if len(YCbCrResults) == 0 {
continue
}
arg0 := YCbCrResults[YCbCrResultsIndex]
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % len(YCbCrResults)
arg0.ColorModel()
case *NgoloFuzzOne_YCbCrNgdotBounds:
if len(YCbCrResults) == 0 {
continue
}
arg0 := YCbCrResults[YCbCrResultsIndex]
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % len(YCbCrResults)
arg0.Bounds()
case *NgoloFuzzOne_YCbCrNgdotAt:
if len(YCbCrResults) == 0 {
continue
}
arg0 := YCbCrResults[YCbCrResultsIndex]
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % len(YCbCrResults)
arg1 := int(a.YCbCrNgdotAt.X)
arg2 := int(a.YCbCrNgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_YCbCrNgdotRGBA64At:
if len(YCbCrResults) == 0 {
continue
}
arg0 := YCbCrResults[YCbCrResultsIndex]
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % len(YCbCrResults)
arg1 := int(a.YCbCrNgdotRGBA64At.X)
arg2 := int(a.YCbCrNgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_YCbCrNgdotYCbCrAt:
if len(YCbCrResults) == 0 {
continue
}
arg0 := YCbCrResults[YCbCrResultsIndex]
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % len(YCbCrResults)
arg1 := int(a.YCbCrNgdotYCbCrAt.X)
arg2 := int(a.YCbCrNgdotYCbCrAt.Y)
arg0.YCbCrAt(arg1, arg2)
case *NgoloFuzzOne_YCbCrNgdotYOffset:
if len(YCbCrResults) == 0 {
continue
}
arg0 := YCbCrResults[YCbCrResultsIndex]
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % len(YCbCrResults)
arg1 := int(a.YCbCrNgdotYOffset.X)
arg2 := int(a.YCbCrNgdotYOffset.Y)
arg0.YOffset(arg1, arg2)
case *NgoloFuzzOne_YCbCrNgdotCOffset:
if len(YCbCrResults) == 0 {
continue
}
arg0 := YCbCrResults[YCbCrResultsIndex]
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % len(YCbCrResults)
arg1 := int(a.YCbCrNgdotCOffset.X)
arg2 := int(a.YCbCrNgdotCOffset.Y)
arg0.COffset(arg1, arg2)
case *NgoloFuzzOne_YCbCrNgdotSubImage:
if len(YCbCrResults) == 0 {
continue
}
arg0 := YCbCrResults[YCbCrResultsIndex]
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % len(YCbCrResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.SubImage(arg1)
case *NgoloFuzzOne_YCbCrNgdotOpaque:
if len(YCbCrResults) == 0 {
continue
}
arg0 := YCbCrResults[YCbCrResultsIndex]
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % len(YCbCrResults)
arg0.Opaque()
case *NgoloFuzzOne_NewYCbCr:
if len(RectangleResults) == 0 {
continue
}
arg0 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg1 := YCbCrSubsampleRatioNewFromFuzz(a.NewYCbCr.SubsampleRatio)
image.NewYCbCr(arg0, arg1)
case *NgoloFuzzOne_NYCbCrANgdotColorModel:
if len(NYCbCrAResults) == 0 {
continue
}
arg0 := NYCbCrAResults[NYCbCrAResultsIndex]
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % len(NYCbCrAResults)
arg0.ColorModel()
case *NgoloFuzzOne_NYCbCrANgdotAt:
if len(NYCbCrAResults) == 0 {
continue
}
arg0 := NYCbCrAResults[NYCbCrAResultsIndex]
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % len(NYCbCrAResults)
arg1 := int(a.NYCbCrANgdotAt.X)
arg2 := int(a.NYCbCrANgdotAt.Y)
arg0.At(arg1, arg2)
case *NgoloFuzzOne_NYCbCrANgdotRGBA64At:
if len(NYCbCrAResults) == 0 {
continue
}
arg0 := NYCbCrAResults[NYCbCrAResultsIndex]
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % len(NYCbCrAResults)
arg1 := int(a.NYCbCrANgdotRGBA64At.X)
arg2 := int(a.NYCbCrANgdotRGBA64At.Y)
arg0.RGBA64At(arg1, arg2)
case *NgoloFuzzOne_NYCbCrANgdotNYCbCrAAt:
if len(NYCbCrAResults) == 0 {
continue
}
arg0 := NYCbCrAResults[NYCbCrAResultsIndex]
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % len(NYCbCrAResults)
arg1 := int(a.NYCbCrANgdotNYCbCrAAt.X)
arg2 := int(a.NYCbCrANgdotNYCbCrAAt.Y)
arg0.NYCbCrAAt(arg1, arg2)
case *NgoloFuzzOne_NYCbCrANgdotAOffset:
if len(NYCbCrAResults) == 0 {
continue
}
arg0 := NYCbCrAResults[NYCbCrAResultsIndex]
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % len(NYCbCrAResults)
arg1 := int(a.NYCbCrANgdotAOffset.X)
arg2 := int(a.NYCbCrANgdotAOffset.Y)
arg0.AOffset(arg1, arg2)
case *NgoloFuzzOne_NYCbCrANgdotSubImage:
if len(NYCbCrAResults) == 0 {
continue
}
arg0 := NYCbCrAResults[NYCbCrAResultsIndex]
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % len(NYCbCrAResults)
if len(RectangleResults) == 0 {
continue
}
arg1 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg0.SubImage(arg1)
case *NgoloFuzzOne_NYCbCrANgdotOpaque:
if len(NYCbCrAResults) == 0 {
continue
}
arg0 := NYCbCrAResults[NYCbCrAResultsIndex]
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % len(NYCbCrAResults)
arg0.Opaque()
case *NgoloFuzzOne_NewNYCbCrA:
if len(RectangleResults) == 0 {
continue
}
arg0 := *RectangleResults[RectangleResultsIndex]
RectangleResultsIndex = (RectangleResultsIndex + 1) % len(RectangleResults)
arg1 := YCbCrSubsampleRatioNewFromFuzz(a.NewNYCbCrA.SubsampleRatio)
image.NewNYCbCrA(arg0, arg1)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
PalettedNb := 0
PalettedResultsIndex := 0
NRGBANb := 0
NRGBAResultsIndex := 0
NYCbCrANb := 0
NYCbCrAResultsIndex := 0
RGBANb := 0
RGBAResultsIndex := 0
RGBA64Nb := 0
RGBA64ResultsIndex := 0
GrayNb := 0
GrayResultsIndex := 0
PointNb := 0
PointResultsIndex := 0
RectangleNb := 0
RectangleResultsIndex := 0
NRGBA64Nb := 0
NRGBA64ResultsIndex := 0
AlphaNb := 0
AlphaResultsIndex := 0
CMYKNb := 0
CMYKResultsIndex := 0
UniformNb := 0
UniformResultsIndex := 0
YCbCrNb := 0
YCbCrResultsIndex := 0
Alpha16Nb := 0
Alpha16ResultsIndex := 0
Gray16Nb := 0
Gray16ResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Decode:
w.WriteString(fmt.Sprintf("image.Decode(bytes.NewReader(%#+v))\n", a.Decode.R))
case *NgoloFuzzOne_DecodeConfig:
w.WriteString(fmt.Sprintf("image.DecodeConfig(bytes.NewReader(%#+v))\n", a.DecodeConfig.R))
case *NgoloFuzzOne_PointNgdotString:
if PointNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Point%d.String()\n", PointResultsIndex))
PointResultsIndex = (PointResultsIndex + 1) % PointNb
case *NgoloFuzzOne_PointNgdotAdd:
if PointNb == 0 {
continue
}
if PointNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Point%d.Add(Point%d)\n", PointResultsIndex, (PointResultsIndex + 1) % PointNb))
PointResultsIndex = (PointResultsIndex + 1) % PointNb
PointResultsIndex = (PointResultsIndex + 1) % PointNb
case *NgoloFuzzOne_PointNgdotSub:
if PointNb == 0 {
continue
}
if PointNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Point%d.Sub(Point%d)\n", PointResultsIndex, (PointResultsIndex + 1) % PointNb))
PointResultsIndex = (PointResultsIndex + 1) % PointNb
PointResultsIndex = (PointResultsIndex + 1) % PointNb
case *NgoloFuzzOne_PointNgdotMul:
if PointNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Point%d.Mul(int(%#+v))\n", PointResultsIndex, a.PointNgdotMul.K))
PointResultsIndex = (PointResultsIndex + 1) % PointNb
case *NgoloFuzzOne_PointNgdotIn:
if PointNb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Point%d.In(Rectangle%d)\n", PointResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
PointResultsIndex = (PointResultsIndex + 1) % PointNb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_PointNgdotMod:
if PointNb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Point%d.Mod(Rectangle%d)\n", PointResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
PointResultsIndex = (PointResultsIndex + 1) % PointNb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_PointNgdotEq:
if PointNb == 0 {
continue
}
if PointNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Point%d.Eq(Point%d)\n", PointResultsIndex, (PointResultsIndex + 1) % PointNb))
PointResultsIndex = (PointResultsIndex + 1) % PointNb
PointResultsIndex = (PointResultsIndex + 1) % PointNb
case *NgoloFuzzOne_Pt:
w.WriteString(fmt.Sprintf("image.Pt(int(%#+v), int(%#+v))\n", a.Pt.X, a.Pt.Y))
case *NgoloFuzzOne_RectangleNgdotString:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.String()\n", RectangleResultsIndex))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotDx:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.Dx()\n", RectangleResultsIndex))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotDy:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.Dy()\n", RectangleResultsIndex))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotSize:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.Size()\n", RectangleResultsIndex))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotAdd:
if RectangleNb == 0 {
continue
}
if PointNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.Add(Point%d)\n", RectangleResultsIndex, (PointResultsIndex + 0) % PointNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
PointResultsIndex = (PointResultsIndex + 1) % PointNb
case *NgoloFuzzOne_RectangleNgdotSub:
if RectangleNb == 0 {
continue
}
if PointNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.Sub(Point%d)\n", RectangleResultsIndex, (PointResultsIndex + 0) % PointNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
PointResultsIndex = (PointResultsIndex + 1) % PointNb
case *NgoloFuzzOne_RectangleNgdotInset:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.Inset(int(%#+v))\n", RectangleResultsIndex, a.RectangleNgdotInset.N))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotIntersect:
if RectangleNb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.Intersect(Rectangle%d)\n", RectangleResultsIndex, (RectangleResultsIndex + 1) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotUnion:
if RectangleNb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.Union(Rectangle%d)\n", RectangleResultsIndex, (RectangleResultsIndex + 1) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotEmpty:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.Empty()\n", RectangleResultsIndex))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotEq:
if RectangleNb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.Eq(Rectangle%d)\n", RectangleResultsIndex, (RectangleResultsIndex + 1) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotOverlaps:
if RectangleNb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.Overlaps(Rectangle%d)\n", RectangleResultsIndex, (RectangleResultsIndex + 1) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotIn:
if RectangleNb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.In(Rectangle%d)\n", RectangleResultsIndex, (RectangleResultsIndex + 1) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotCanon:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.Canon()\n", RectangleResultsIndex))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotAt:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.At(int(%#+v), int(%#+v))\n", RectangleResultsIndex, a.RectangleNgdotAt.X, a.RectangleNgdotAt.Y))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotRGBA64At:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.RGBA64At(int(%#+v), int(%#+v))\n", RectangleResultsIndex, a.RectangleNgdotRGBA64At.X, a.RectangleNgdotRGBA64At.Y))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotBounds:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.Bounds()\n", RectangleResultsIndex))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RectangleNgdotColorModel:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rectangle%d.ColorModel()\n", RectangleResultsIndex))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_Rect:
w.WriteString(fmt.Sprintf("image.Rect(int(%#+v), int(%#+v), int(%#+v) %% 0x10001, int(%#+v) %% 0x10001)\n", a.Rect.X0, a.Rect.Y0, a.Rect.X1, a.Rect.Y1))
case *NgoloFuzzOne_RGBANgdotColorModel:
if RGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA%d.ColorModel()\n", RGBAResultsIndex))
RGBAResultsIndex = (RGBAResultsIndex + 1) % RGBANb
case *NgoloFuzzOne_RGBANgdotBounds:
if RGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA%d.Bounds()\n", RGBAResultsIndex))
RGBAResultsIndex = (RGBAResultsIndex + 1) % RGBANb
case *NgoloFuzzOne_RGBANgdotAt:
if RGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA%d.At(int(%#+v), int(%#+v))\n", RGBAResultsIndex, a.RGBANgdotAt.X, a.RGBANgdotAt.Y))
RGBAResultsIndex = (RGBAResultsIndex + 1) % RGBANb
case *NgoloFuzzOne_RGBANgdotRGBA64At:
if RGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA%d.RGBA64At(int(%#+v), int(%#+v))\n", RGBAResultsIndex, a.RGBANgdotRGBA64At.X, a.RGBANgdotRGBA64At.Y))
RGBAResultsIndex = (RGBAResultsIndex + 1) % RGBANb
case *NgoloFuzzOne_RGBANgdotRGBAAt:
if RGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA%d.RGBAAt(int(%#+v), int(%#+v))\n", RGBAResultsIndex, a.RGBANgdotRGBAAt.X, a.RGBANgdotRGBAAt.Y))
RGBAResultsIndex = (RGBAResultsIndex + 1) % RGBANb
case *NgoloFuzzOne_RGBANgdotPixOffset:
if RGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA%d.PixOffset(int(%#+v), int(%#+v))\n", RGBAResultsIndex, a.RGBANgdotPixOffset.X, a.RGBANgdotPixOffset.Y))
RGBAResultsIndex = (RGBAResultsIndex + 1) % RGBANb
case *NgoloFuzzOne_RGBANgdotSubImage:
if RGBANb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA%d.SubImage(Rectangle%d)\n", RGBAResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
RGBAResultsIndex = (RGBAResultsIndex + 1) % RGBANb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RGBANgdotOpaque:
if RGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA%d.Opaque()\n", RGBAResultsIndex))
RGBAResultsIndex = (RGBAResultsIndex + 1) % RGBANb
case *NgoloFuzzOne_NewRGBA:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("image.NewRGBA(Rectangle%d)\n", (RectangleResultsIndex + 0) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RGBA64NgdotColorModel:
if RGBA64Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA64%d.ColorModel()\n", RGBA64ResultsIndex))
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % RGBA64Nb
case *NgoloFuzzOne_RGBA64NgdotBounds:
if RGBA64Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA64%d.Bounds()\n", RGBA64ResultsIndex))
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % RGBA64Nb
case *NgoloFuzzOne_RGBA64NgdotAt:
if RGBA64Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA64%d.At(int(%#+v), int(%#+v))\n", RGBA64ResultsIndex, a.RGBA64NgdotAt.X, a.RGBA64NgdotAt.Y))
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % RGBA64Nb
case *NgoloFuzzOne_RGBA64NgdotRGBA64At:
if RGBA64Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA64%d.RGBA64At(int(%#+v), int(%#+v))\n", RGBA64ResultsIndex, a.RGBA64NgdotRGBA64At.X, a.RGBA64NgdotRGBA64At.Y))
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % RGBA64Nb
case *NgoloFuzzOne_RGBA64NgdotPixOffset:
if RGBA64Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA64%d.PixOffset(int(%#+v), int(%#+v))\n", RGBA64ResultsIndex, a.RGBA64NgdotPixOffset.X, a.RGBA64NgdotPixOffset.Y))
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % RGBA64Nb
case *NgoloFuzzOne_RGBA64NgdotSubImage:
if RGBA64Nb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA64%d.SubImage(Rectangle%d)\n", RGBA64ResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % RGBA64Nb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_RGBA64NgdotOpaque:
if RGBA64Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RGBA64%d.Opaque()\n", RGBA64ResultsIndex))
RGBA64ResultsIndex = (RGBA64ResultsIndex + 1) % RGBA64Nb
case *NgoloFuzzOne_NewRGBA64:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("image.NewRGBA64(Rectangle%d)\n", (RectangleResultsIndex + 0) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_NRGBANgdotColorModel:
if NRGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA%d.ColorModel()\n", NRGBAResultsIndex))
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % NRGBANb
case *NgoloFuzzOne_NRGBANgdotBounds:
if NRGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA%d.Bounds()\n", NRGBAResultsIndex))
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % NRGBANb
case *NgoloFuzzOne_NRGBANgdotAt:
if NRGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA%d.At(int(%#+v), int(%#+v))\n", NRGBAResultsIndex, a.NRGBANgdotAt.X, a.NRGBANgdotAt.Y))
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % NRGBANb
case *NgoloFuzzOne_NRGBANgdotRGBA64At:
if NRGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA%d.RGBA64At(int(%#+v), int(%#+v))\n", NRGBAResultsIndex, a.NRGBANgdotRGBA64At.X, a.NRGBANgdotRGBA64At.Y))
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % NRGBANb
case *NgoloFuzzOne_NRGBANgdotNRGBAAt:
if NRGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA%d.NRGBAAt(int(%#+v), int(%#+v))\n", NRGBAResultsIndex, a.NRGBANgdotNRGBAAt.X, a.NRGBANgdotNRGBAAt.Y))
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % NRGBANb
case *NgoloFuzzOne_NRGBANgdotPixOffset:
if NRGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA%d.PixOffset(int(%#+v), int(%#+v))\n", NRGBAResultsIndex, a.NRGBANgdotPixOffset.X, a.NRGBANgdotPixOffset.Y))
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % NRGBANb
case *NgoloFuzzOne_NRGBANgdotSubImage:
if NRGBANb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA%d.SubImage(Rectangle%d)\n", NRGBAResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % NRGBANb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_NRGBANgdotOpaque:
if NRGBANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA%d.Opaque()\n", NRGBAResultsIndex))
NRGBAResultsIndex = (NRGBAResultsIndex + 1) % NRGBANb
case *NgoloFuzzOne_NewNRGBA:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("image.NewNRGBA(Rectangle%d)\n", (RectangleResultsIndex + 0) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_NRGBA64NgdotColorModel:
if NRGBA64Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA64%d.ColorModel()\n", NRGBA64ResultsIndex))
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % NRGBA64Nb
case *NgoloFuzzOne_NRGBA64NgdotBounds:
if NRGBA64Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA64%d.Bounds()\n", NRGBA64ResultsIndex))
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % NRGBA64Nb
case *NgoloFuzzOne_NRGBA64NgdotAt:
if NRGBA64Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA64%d.At(int(%#+v), int(%#+v))\n", NRGBA64ResultsIndex, a.NRGBA64NgdotAt.X, a.NRGBA64NgdotAt.Y))
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % NRGBA64Nb
case *NgoloFuzzOne_NRGBA64NgdotRGBA64At:
if NRGBA64Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA64%d.RGBA64At(int(%#+v), int(%#+v))\n", NRGBA64ResultsIndex, a.NRGBA64NgdotRGBA64At.X, a.NRGBA64NgdotRGBA64At.Y))
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % NRGBA64Nb
case *NgoloFuzzOne_NRGBA64NgdotNRGBA64At:
if NRGBA64Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA64%d.NRGBA64At(int(%#+v), int(%#+v))\n", NRGBA64ResultsIndex, a.NRGBA64NgdotNRGBA64At.X, a.NRGBA64NgdotNRGBA64At.Y))
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % NRGBA64Nb
case *NgoloFuzzOne_NRGBA64NgdotPixOffset:
if NRGBA64Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA64%d.PixOffset(int(%#+v), int(%#+v))\n", NRGBA64ResultsIndex, a.NRGBA64NgdotPixOffset.X, a.NRGBA64NgdotPixOffset.Y))
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % NRGBA64Nb
case *NgoloFuzzOne_NRGBA64NgdotSubImage:
if NRGBA64Nb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA64%d.SubImage(Rectangle%d)\n", NRGBA64ResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % NRGBA64Nb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_NRGBA64NgdotOpaque:
if NRGBA64Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NRGBA64%d.Opaque()\n", NRGBA64ResultsIndex))
NRGBA64ResultsIndex = (NRGBA64ResultsIndex + 1) % NRGBA64Nb
case *NgoloFuzzOne_NewNRGBA64:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("image.NewNRGBA64(Rectangle%d)\n", (RectangleResultsIndex + 0) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_AlphaNgdotColorModel:
if AlphaNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha%d.ColorModel()\n", AlphaResultsIndex))
AlphaResultsIndex = (AlphaResultsIndex + 1) % AlphaNb
case *NgoloFuzzOne_AlphaNgdotBounds:
if AlphaNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha%d.Bounds()\n", AlphaResultsIndex))
AlphaResultsIndex = (AlphaResultsIndex + 1) % AlphaNb
case *NgoloFuzzOne_AlphaNgdotAt:
if AlphaNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha%d.At(int(%#+v), int(%#+v))\n", AlphaResultsIndex, a.AlphaNgdotAt.X, a.AlphaNgdotAt.Y))
AlphaResultsIndex = (AlphaResultsIndex + 1) % AlphaNb
case *NgoloFuzzOne_AlphaNgdotRGBA64At:
if AlphaNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha%d.RGBA64At(int(%#+v), int(%#+v))\n", AlphaResultsIndex, a.AlphaNgdotRGBA64At.X, a.AlphaNgdotRGBA64At.Y))
AlphaResultsIndex = (AlphaResultsIndex + 1) % AlphaNb
case *NgoloFuzzOne_AlphaNgdotAlphaAt:
if AlphaNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha%d.AlphaAt(int(%#+v), int(%#+v))\n", AlphaResultsIndex, a.AlphaNgdotAlphaAt.X, a.AlphaNgdotAlphaAt.Y))
AlphaResultsIndex = (AlphaResultsIndex + 1) % AlphaNb
case *NgoloFuzzOne_AlphaNgdotPixOffset:
if AlphaNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha%d.PixOffset(int(%#+v), int(%#+v))\n", AlphaResultsIndex, a.AlphaNgdotPixOffset.X, a.AlphaNgdotPixOffset.Y))
AlphaResultsIndex = (AlphaResultsIndex + 1) % AlphaNb
case *NgoloFuzzOne_AlphaNgdotSubImage:
if AlphaNb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha%d.SubImage(Rectangle%d)\n", AlphaResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
AlphaResultsIndex = (AlphaResultsIndex + 1) % AlphaNb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_AlphaNgdotOpaque:
if AlphaNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha%d.Opaque()\n", AlphaResultsIndex))
AlphaResultsIndex = (AlphaResultsIndex + 1) % AlphaNb
case *NgoloFuzzOne_NewAlpha:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("image.NewAlpha(Rectangle%d)\n", (RectangleResultsIndex + 0) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_Alpha16NgdotColorModel:
if Alpha16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha16%d.ColorModel()\n", Alpha16ResultsIndex))
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % Alpha16Nb
case *NgoloFuzzOne_Alpha16NgdotBounds:
if Alpha16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha16%d.Bounds()\n", Alpha16ResultsIndex))
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % Alpha16Nb
case *NgoloFuzzOne_Alpha16NgdotAt:
if Alpha16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha16%d.At(int(%#+v), int(%#+v))\n", Alpha16ResultsIndex, a.Alpha16NgdotAt.X, a.Alpha16NgdotAt.Y))
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % Alpha16Nb
case *NgoloFuzzOne_Alpha16NgdotRGBA64At:
if Alpha16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha16%d.RGBA64At(int(%#+v), int(%#+v))\n", Alpha16ResultsIndex, a.Alpha16NgdotRGBA64At.X, a.Alpha16NgdotRGBA64At.Y))
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % Alpha16Nb
case *NgoloFuzzOne_Alpha16NgdotAlpha16At:
if Alpha16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha16%d.Alpha16At(int(%#+v), int(%#+v))\n", Alpha16ResultsIndex, a.Alpha16NgdotAlpha16At.X, a.Alpha16NgdotAlpha16At.Y))
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % Alpha16Nb
case *NgoloFuzzOne_Alpha16NgdotPixOffset:
if Alpha16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha16%d.PixOffset(int(%#+v), int(%#+v))\n", Alpha16ResultsIndex, a.Alpha16NgdotPixOffset.X, a.Alpha16NgdotPixOffset.Y))
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % Alpha16Nb
case *NgoloFuzzOne_Alpha16NgdotSubImage:
if Alpha16Nb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha16%d.SubImage(Rectangle%d)\n", Alpha16ResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % Alpha16Nb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_Alpha16NgdotOpaque:
if Alpha16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Alpha16%d.Opaque()\n", Alpha16ResultsIndex))
Alpha16ResultsIndex = (Alpha16ResultsIndex + 1) % Alpha16Nb
case *NgoloFuzzOne_NewAlpha16:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("image.NewAlpha16(Rectangle%d)\n", (RectangleResultsIndex + 0) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_GrayNgdotColorModel:
if GrayNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray%d.ColorModel()\n", GrayResultsIndex))
GrayResultsIndex = (GrayResultsIndex + 1) % GrayNb
case *NgoloFuzzOne_GrayNgdotBounds:
if GrayNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray%d.Bounds()\n", GrayResultsIndex))
GrayResultsIndex = (GrayResultsIndex + 1) % GrayNb
case *NgoloFuzzOne_GrayNgdotAt:
if GrayNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray%d.At(int(%#+v), int(%#+v))\n", GrayResultsIndex, a.GrayNgdotAt.X, a.GrayNgdotAt.Y))
GrayResultsIndex = (GrayResultsIndex + 1) % GrayNb
case *NgoloFuzzOne_GrayNgdotRGBA64At:
if GrayNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray%d.RGBA64At(int(%#+v), int(%#+v))\n", GrayResultsIndex, a.GrayNgdotRGBA64At.X, a.GrayNgdotRGBA64At.Y))
GrayResultsIndex = (GrayResultsIndex + 1) % GrayNb
case *NgoloFuzzOne_GrayNgdotGrayAt:
if GrayNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray%d.GrayAt(int(%#+v), int(%#+v))\n", GrayResultsIndex, a.GrayNgdotGrayAt.X, a.GrayNgdotGrayAt.Y))
GrayResultsIndex = (GrayResultsIndex + 1) % GrayNb
case *NgoloFuzzOne_GrayNgdotPixOffset:
if GrayNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray%d.PixOffset(int(%#+v), int(%#+v))\n", GrayResultsIndex, a.GrayNgdotPixOffset.X, a.GrayNgdotPixOffset.Y))
GrayResultsIndex = (GrayResultsIndex + 1) % GrayNb
case *NgoloFuzzOne_GrayNgdotSubImage:
if GrayNb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray%d.SubImage(Rectangle%d)\n", GrayResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
GrayResultsIndex = (GrayResultsIndex + 1) % GrayNb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_GrayNgdotOpaque:
if GrayNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray%d.Opaque()\n", GrayResultsIndex))
GrayResultsIndex = (GrayResultsIndex + 1) % GrayNb
case *NgoloFuzzOne_NewGray:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("image.NewGray(Rectangle%d)\n", (RectangleResultsIndex + 0) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_Gray16NgdotColorModel:
if Gray16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray16%d.ColorModel()\n", Gray16ResultsIndex))
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % Gray16Nb
case *NgoloFuzzOne_Gray16NgdotBounds:
if Gray16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray16%d.Bounds()\n", Gray16ResultsIndex))
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % Gray16Nb
case *NgoloFuzzOne_Gray16NgdotAt:
if Gray16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray16%d.At(int(%#+v), int(%#+v))\n", Gray16ResultsIndex, a.Gray16NgdotAt.X, a.Gray16NgdotAt.Y))
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % Gray16Nb
case *NgoloFuzzOne_Gray16NgdotRGBA64At:
if Gray16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray16%d.RGBA64At(int(%#+v), int(%#+v))\n", Gray16ResultsIndex, a.Gray16NgdotRGBA64At.X, a.Gray16NgdotRGBA64At.Y))
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % Gray16Nb
case *NgoloFuzzOne_Gray16NgdotGray16At:
if Gray16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray16%d.Gray16At(int(%#+v), int(%#+v))\n", Gray16ResultsIndex, a.Gray16NgdotGray16At.X, a.Gray16NgdotGray16At.Y))
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % Gray16Nb
case *NgoloFuzzOne_Gray16NgdotPixOffset:
if Gray16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray16%d.PixOffset(int(%#+v), int(%#+v))\n", Gray16ResultsIndex, a.Gray16NgdotPixOffset.X, a.Gray16NgdotPixOffset.Y))
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % Gray16Nb
case *NgoloFuzzOne_Gray16NgdotSubImage:
if Gray16Nb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray16%d.SubImage(Rectangle%d)\n", Gray16ResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % Gray16Nb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_Gray16NgdotOpaque:
if Gray16Nb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Gray16%d.Opaque()\n", Gray16ResultsIndex))
Gray16ResultsIndex = (Gray16ResultsIndex + 1) % Gray16Nb
case *NgoloFuzzOne_NewGray16:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("image.NewGray16(Rectangle%d)\n", (RectangleResultsIndex + 0) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_CMYKNgdotColorModel:
if CMYKNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CMYK%d.ColorModel()\n", CMYKResultsIndex))
CMYKResultsIndex = (CMYKResultsIndex + 1) % CMYKNb
case *NgoloFuzzOne_CMYKNgdotBounds:
if CMYKNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CMYK%d.Bounds()\n", CMYKResultsIndex))
CMYKResultsIndex = (CMYKResultsIndex + 1) % CMYKNb
case *NgoloFuzzOne_CMYKNgdotAt:
if CMYKNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CMYK%d.At(int(%#+v), int(%#+v))\n", CMYKResultsIndex, a.CMYKNgdotAt.X, a.CMYKNgdotAt.Y))
CMYKResultsIndex = (CMYKResultsIndex + 1) % CMYKNb
case *NgoloFuzzOne_CMYKNgdotRGBA64At:
if CMYKNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CMYK%d.RGBA64At(int(%#+v), int(%#+v))\n", CMYKResultsIndex, a.CMYKNgdotRGBA64At.X, a.CMYKNgdotRGBA64At.Y))
CMYKResultsIndex = (CMYKResultsIndex + 1) % CMYKNb
case *NgoloFuzzOne_CMYKNgdotCMYKAt:
if CMYKNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CMYK%d.CMYKAt(int(%#+v), int(%#+v))\n", CMYKResultsIndex, a.CMYKNgdotCMYKAt.X, a.CMYKNgdotCMYKAt.Y))
CMYKResultsIndex = (CMYKResultsIndex + 1) % CMYKNb
case *NgoloFuzzOne_CMYKNgdotPixOffset:
if CMYKNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CMYK%d.PixOffset(int(%#+v), int(%#+v))\n", CMYKResultsIndex, a.CMYKNgdotPixOffset.X, a.CMYKNgdotPixOffset.Y))
CMYKResultsIndex = (CMYKResultsIndex + 1) % CMYKNb
case *NgoloFuzzOne_CMYKNgdotSubImage:
if CMYKNb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CMYK%d.SubImage(Rectangle%d)\n", CMYKResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
CMYKResultsIndex = (CMYKResultsIndex + 1) % CMYKNb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_CMYKNgdotOpaque:
if CMYKNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("CMYK%d.Opaque()\n", CMYKResultsIndex))
CMYKResultsIndex = (CMYKResultsIndex + 1) % CMYKNb
case *NgoloFuzzOne_NewCMYK:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("image.NewCMYK(Rectangle%d)\n", (RectangleResultsIndex + 0) % RectangleNb))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_PalettedNgdotColorModel:
if PalettedNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Paletted%d.ColorModel()\n", PalettedResultsIndex))
PalettedResultsIndex = (PalettedResultsIndex + 1) % PalettedNb
case *NgoloFuzzOne_PalettedNgdotBounds:
if PalettedNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Paletted%d.Bounds()\n", PalettedResultsIndex))
PalettedResultsIndex = (PalettedResultsIndex + 1) % PalettedNb
case *NgoloFuzzOne_PalettedNgdotAt:
if PalettedNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Paletted%d.At(int(%#+v), int(%#+v))\n", PalettedResultsIndex, a.PalettedNgdotAt.X, a.PalettedNgdotAt.Y))
PalettedResultsIndex = (PalettedResultsIndex + 1) % PalettedNb
case *NgoloFuzzOne_PalettedNgdotRGBA64At:
if PalettedNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Paletted%d.RGBA64At(int(%#+v), int(%#+v))\n", PalettedResultsIndex, a.PalettedNgdotRGBA64At.X, a.PalettedNgdotRGBA64At.Y))
PalettedResultsIndex = (PalettedResultsIndex + 1) % PalettedNb
case *NgoloFuzzOne_PalettedNgdotPixOffset:
if PalettedNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Paletted%d.PixOffset(int(%#+v), int(%#+v))\n", PalettedResultsIndex, a.PalettedNgdotPixOffset.X, a.PalettedNgdotPixOffset.Y))
PalettedResultsIndex = (PalettedResultsIndex + 1) % PalettedNb
case *NgoloFuzzOne_PalettedNgdotColorIndexAt:
if PalettedNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Paletted%d.ColorIndexAt(int(%#+v), int(%#+v))\n", PalettedResultsIndex, a.PalettedNgdotColorIndexAt.X, a.PalettedNgdotColorIndexAt.Y))
PalettedResultsIndex = (PalettedResultsIndex + 1) % PalettedNb
case *NgoloFuzzOne_PalettedNgdotSetColorIndex:
if PalettedNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Paletted%d.SetColorIndex(int(%#+v), int(%#+v), uint8(%#+v))\n", PalettedResultsIndex, a.PalettedNgdotSetColorIndex.X, a.PalettedNgdotSetColorIndex.Y, a.PalettedNgdotSetColorIndex.Index))
PalettedResultsIndex = (PalettedResultsIndex + 1) % PalettedNb
case *NgoloFuzzOne_PalettedNgdotSubImage:
if PalettedNb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Paletted%d.SubImage(Rectangle%d)\n", PalettedResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
PalettedResultsIndex = (PalettedResultsIndex + 1) % PalettedNb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_PalettedNgdotOpaque:
if PalettedNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Paletted%d.Opaque()\n", PalettedResultsIndex))
PalettedResultsIndex = (PalettedResultsIndex + 1) % PalettedNb
case *NgoloFuzzOne_UniformNgdotRGBA:
if UniformNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Uniform%d.RGBA()\n", UniformResultsIndex))
UniformResultsIndex = (UniformResultsIndex + 1) % UniformNb
case *NgoloFuzzOne_UniformNgdotColorModel:
if UniformNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Uniform%d.ColorModel()\n", UniformResultsIndex))
UniformResultsIndex = (UniformResultsIndex + 1) % UniformNb
case *NgoloFuzzOne_UniformNgdotBounds:
if UniformNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Uniform%d.Bounds()\n", UniformResultsIndex))
UniformResultsIndex = (UniformResultsIndex + 1) % UniformNb
case *NgoloFuzzOne_UniformNgdotAt:
if UniformNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Uniform%d.At(int(%#+v), int(%#+v))\n", UniformResultsIndex, a.UniformNgdotAt.X, a.UniformNgdotAt.Y))
UniformResultsIndex = (UniformResultsIndex + 1) % UniformNb
case *NgoloFuzzOne_UniformNgdotRGBA64At:
if UniformNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Uniform%d.RGBA64At(int(%#+v), int(%#+v))\n", UniformResultsIndex, a.UniformNgdotRGBA64At.X, a.UniformNgdotRGBA64At.Y))
UniformResultsIndex = (UniformResultsIndex + 1) % UniformNb
case *NgoloFuzzOne_UniformNgdotOpaque:
if UniformNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Uniform%d.Opaque()\n", UniformResultsIndex))
UniformResultsIndex = (UniformResultsIndex + 1) % UniformNb
case *NgoloFuzzOne_YCbCrSubsampleRatioNgdotString:
w.WriteString(fmt.Sprintf("YCbCrSubsampleRatioNewFromFuzz(%#+v).String()\n", a.YCbCrSubsampleRatioNgdotString.S))
case *NgoloFuzzOne_YCbCrNgdotColorModel:
if YCbCrNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("YCbCr%d.ColorModel()\n", YCbCrResultsIndex))
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % YCbCrNb
case *NgoloFuzzOne_YCbCrNgdotBounds:
if YCbCrNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("YCbCr%d.Bounds()\n", YCbCrResultsIndex))
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % YCbCrNb
case *NgoloFuzzOne_YCbCrNgdotAt:
if YCbCrNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("YCbCr%d.At(int(%#+v), int(%#+v))\n", YCbCrResultsIndex, a.YCbCrNgdotAt.X, a.YCbCrNgdotAt.Y))
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % YCbCrNb
case *NgoloFuzzOne_YCbCrNgdotRGBA64At:
if YCbCrNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("YCbCr%d.RGBA64At(int(%#+v), int(%#+v))\n", YCbCrResultsIndex, a.YCbCrNgdotRGBA64At.X, a.YCbCrNgdotRGBA64At.Y))
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % YCbCrNb
case *NgoloFuzzOne_YCbCrNgdotYCbCrAt:
if YCbCrNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("YCbCr%d.YCbCrAt(int(%#+v), int(%#+v))\n", YCbCrResultsIndex, a.YCbCrNgdotYCbCrAt.X, a.YCbCrNgdotYCbCrAt.Y))
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % YCbCrNb
case *NgoloFuzzOne_YCbCrNgdotYOffset:
if YCbCrNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("YCbCr%d.YOffset(int(%#+v), int(%#+v))\n", YCbCrResultsIndex, a.YCbCrNgdotYOffset.X, a.YCbCrNgdotYOffset.Y))
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % YCbCrNb
case *NgoloFuzzOne_YCbCrNgdotCOffset:
if YCbCrNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("YCbCr%d.COffset(int(%#+v), int(%#+v))\n", YCbCrResultsIndex, a.YCbCrNgdotCOffset.X, a.YCbCrNgdotCOffset.Y))
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % YCbCrNb
case *NgoloFuzzOne_YCbCrNgdotSubImage:
if YCbCrNb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("YCbCr%d.SubImage(Rectangle%d)\n", YCbCrResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % YCbCrNb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_YCbCrNgdotOpaque:
if YCbCrNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("YCbCr%d.Opaque()\n", YCbCrResultsIndex))
YCbCrResultsIndex = (YCbCrResultsIndex + 1) % YCbCrNb
case *NgoloFuzzOne_NewYCbCr:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("image.NewYCbCr(Rectangle%d, YCbCrSubsampleRatioNewFromFuzz(%#+v))\n", (RectangleResultsIndex + 0) % RectangleNb, a.NewYCbCr.SubsampleRatio))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_NYCbCrANgdotColorModel:
if NYCbCrANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NYCbCrA%d.ColorModel()\n", NYCbCrAResultsIndex))
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % NYCbCrANb
case *NgoloFuzzOne_NYCbCrANgdotAt:
if NYCbCrANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NYCbCrA%d.At(int(%#+v), int(%#+v))\n", NYCbCrAResultsIndex, a.NYCbCrANgdotAt.X, a.NYCbCrANgdotAt.Y))
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % NYCbCrANb
case *NgoloFuzzOne_NYCbCrANgdotRGBA64At:
if NYCbCrANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NYCbCrA%d.RGBA64At(int(%#+v), int(%#+v))\n", NYCbCrAResultsIndex, a.NYCbCrANgdotRGBA64At.X, a.NYCbCrANgdotRGBA64At.Y))
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % NYCbCrANb
case *NgoloFuzzOne_NYCbCrANgdotNYCbCrAAt:
if NYCbCrANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NYCbCrA%d.NYCbCrAAt(int(%#+v), int(%#+v))\n", NYCbCrAResultsIndex, a.NYCbCrANgdotNYCbCrAAt.X, a.NYCbCrANgdotNYCbCrAAt.Y))
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % NYCbCrANb
case *NgoloFuzzOne_NYCbCrANgdotAOffset:
if NYCbCrANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NYCbCrA%d.AOffset(int(%#+v), int(%#+v))\n", NYCbCrAResultsIndex, a.NYCbCrANgdotAOffset.X, a.NYCbCrANgdotAOffset.Y))
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % NYCbCrANb
case *NgoloFuzzOne_NYCbCrANgdotSubImage:
if NYCbCrANb == 0 {
continue
}
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NYCbCrA%d.SubImage(Rectangle%d)\n", NYCbCrAResultsIndex, (RectangleResultsIndex + 0) % RectangleNb))
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % NYCbCrANb
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
case *NgoloFuzzOne_NYCbCrANgdotOpaque:
if NYCbCrANb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NYCbCrA%d.Opaque()\n", NYCbCrAResultsIndex))
NYCbCrAResultsIndex = (NYCbCrAResultsIndex + 1) % NYCbCrANb
case *NgoloFuzzOne_NewNYCbCrA:
if RectangleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("image.NewNYCbCrA(Rectangle%d, YCbCrSubsampleRatioNewFromFuzz(%#+v))\n", (RectangleResultsIndex + 0) % RectangleNb, a.NewNYCbCrA.SubsampleRatio))
RectangleResultsIndex = (RectangleResultsIndex + 1) % RectangleNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_image
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type YCbCrSubsampleRatioEnum int32
const (
YCbCrSubsampleRatioEnum_YCbCrSubsampleRatio444 YCbCrSubsampleRatioEnum = 0
YCbCrSubsampleRatioEnum_YCbCrSubsampleRatio422 YCbCrSubsampleRatioEnum = 1
YCbCrSubsampleRatioEnum_YCbCrSubsampleRatio420 YCbCrSubsampleRatioEnum = 2
YCbCrSubsampleRatioEnum_YCbCrSubsampleRatio440 YCbCrSubsampleRatioEnum = 3
YCbCrSubsampleRatioEnum_YCbCrSubsampleRatio411 YCbCrSubsampleRatioEnum = 4
YCbCrSubsampleRatioEnum_YCbCrSubsampleRatio410 YCbCrSubsampleRatioEnum = 5
)
// Enum value maps for YCbCrSubsampleRatioEnum.
var (
YCbCrSubsampleRatioEnum_name = map[int32]string{
0: "YCbCrSubsampleRatio444",
1: "YCbCrSubsampleRatio422",
2: "YCbCrSubsampleRatio420",
3: "YCbCrSubsampleRatio440",
4: "YCbCrSubsampleRatio411",
5: "YCbCrSubsampleRatio410",
}
YCbCrSubsampleRatioEnum_value = map[string]int32{
"YCbCrSubsampleRatio444": 0,
"YCbCrSubsampleRatio422": 1,
"YCbCrSubsampleRatio420": 2,
"YCbCrSubsampleRatio440": 3,
"YCbCrSubsampleRatio411": 4,
"YCbCrSubsampleRatio410": 5,
}
)
func (x YCbCrSubsampleRatioEnum) Enum() *YCbCrSubsampleRatioEnum {
p := new(YCbCrSubsampleRatioEnum)
*p = x
return p
}
func (x YCbCrSubsampleRatioEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (YCbCrSubsampleRatioEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[0].Descriptor()
}
func (YCbCrSubsampleRatioEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[0]
}
func (x YCbCrSubsampleRatioEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use YCbCrSubsampleRatioEnum.Descriptor instead.
func (YCbCrSubsampleRatioEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type DecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeArgs) Reset() {
*x = DecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeArgs) ProtoMessage() {}
func (x *DecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeArgs.ProtoReflect.Descriptor instead.
func (*DecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *DecodeArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type DecodeConfigArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeConfigArgs) Reset() {
*x = DecodeConfigArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeConfigArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeConfigArgs) ProtoMessage() {}
func (x *DecodeConfigArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeConfigArgs.ProtoReflect.Descriptor instead.
func (*DecodeConfigArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *DecodeConfigArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type PointNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PointNgdotStringArgs) Reset() {
*x = PointNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PointNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PointNgdotStringArgs) ProtoMessage() {}
func (x *PointNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PointNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*PointNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type PointNgdotAddArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PointNgdotAddArgs) Reset() {
*x = PointNgdotAddArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PointNgdotAddArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PointNgdotAddArgs) ProtoMessage() {}
func (x *PointNgdotAddArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PointNgdotAddArgs.ProtoReflect.Descriptor instead.
func (*PointNgdotAddArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type PointNgdotSubArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PointNgdotSubArgs) Reset() {
*x = PointNgdotSubArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PointNgdotSubArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PointNgdotSubArgs) ProtoMessage() {}
func (x *PointNgdotSubArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PointNgdotSubArgs.ProtoReflect.Descriptor instead.
func (*PointNgdotSubArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type PointNgdotMulArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
K int64 `protobuf:"varint,1,opt,name=k,proto3" json:"k,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PointNgdotMulArgs) Reset() {
*x = PointNgdotMulArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PointNgdotMulArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PointNgdotMulArgs) ProtoMessage() {}
func (x *PointNgdotMulArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PointNgdotMulArgs.ProtoReflect.Descriptor instead.
func (*PointNgdotMulArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *PointNgdotMulArgs) GetK() int64 {
if x != nil {
return x.K
}
return 0
}
type PointNgdotInArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PointNgdotInArgs) Reset() {
*x = PointNgdotInArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PointNgdotInArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PointNgdotInArgs) ProtoMessage() {}
func (x *PointNgdotInArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PointNgdotInArgs.ProtoReflect.Descriptor instead.
func (*PointNgdotInArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type PointNgdotModArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PointNgdotModArgs) Reset() {
*x = PointNgdotModArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PointNgdotModArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PointNgdotModArgs) ProtoMessage() {}
func (x *PointNgdotModArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PointNgdotModArgs.ProtoReflect.Descriptor instead.
func (*PointNgdotModArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type PointNgdotEqArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PointNgdotEqArgs) Reset() {
*x = PointNgdotEqArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PointNgdotEqArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PointNgdotEqArgs) ProtoMessage() {}
func (x *PointNgdotEqArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PointNgdotEqArgs.ProtoReflect.Descriptor instead.
func (*PointNgdotEqArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type PtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=X,proto3" json:"X,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=Y,proto3" json:"Y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PtArgs) Reset() {
*x = PtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PtArgs) ProtoMessage() {}
func (x *PtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PtArgs.ProtoReflect.Descriptor instead.
func (*PtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *PtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *PtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type RectangleNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotStringArgs) Reset() {
*x = RectangleNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotStringArgs) ProtoMessage() {}
func (x *RectangleNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
type RectangleNgdotDxArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotDxArgs) Reset() {
*x = RectangleNgdotDxArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotDxArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotDxArgs) ProtoMessage() {}
func (x *RectangleNgdotDxArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotDxArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotDxArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type RectangleNgdotDyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotDyArgs) Reset() {
*x = RectangleNgdotDyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotDyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotDyArgs) ProtoMessage() {}
func (x *RectangleNgdotDyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotDyArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotDyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type RectangleNgdotSizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotSizeArgs) Reset() {
*x = RectangleNgdotSizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotSizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotSizeArgs) ProtoMessage() {}
func (x *RectangleNgdotSizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotSizeArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotSizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type RectangleNgdotAddArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotAddArgs) Reset() {
*x = RectangleNgdotAddArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotAddArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotAddArgs) ProtoMessage() {}
func (x *RectangleNgdotAddArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotAddArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotAddArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type RectangleNgdotSubArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotSubArgs) Reset() {
*x = RectangleNgdotSubArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotSubArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotSubArgs) ProtoMessage() {}
func (x *RectangleNgdotSubArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotSubArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotSubArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
type RectangleNgdotInsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotInsetArgs) Reset() {
*x = RectangleNgdotInsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotInsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotInsetArgs) ProtoMessage() {}
func (x *RectangleNgdotInsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotInsetArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotInsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *RectangleNgdotInsetArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type RectangleNgdotIntersectArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotIntersectArgs) Reset() {
*x = RectangleNgdotIntersectArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotIntersectArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotIntersectArgs) ProtoMessage() {}
func (x *RectangleNgdotIntersectArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotIntersectArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotIntersectArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
type RectangleNgdotUnionArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotUnionArgs) Reset() {
*x = RectangleNgdotUnionArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotUnionArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotUnionArgs) ProtoMessage() {}
func (x *RectangleNgdotUnionArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotUnionArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotUnionArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
type RectangleNgdotEmptyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotEmptyArgs) Reset() {
*x = RectangleNgdotEmptyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotEmptyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotEmptyArgs) ProtoMessage() {}
func (x *RectangleNgdotEmptyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotEmptyArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotEmptyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
type RectangleNgdotEqArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotEqArgs) Reset() {
*x = RectangleNgdotEqArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotEqArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotEqArgs) ProtoMessage() {}
func (x *RectangleNgdotEqArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotEqArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotEqArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
type RectangleNgdotOverlapsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotOverlapsArgs) Reset() {
*x = RectangleNgdotOverlapsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotOverlapsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotOverlapsArgs) ProtoMessage() {}
func (x *RectangleNgdotOverlapsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotOverlapsArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotOverlapsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
type RectangleNgdotInArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotInArgs) Reset() {
*x = RectangleNgdotInArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotInArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotInArgs) ProtoMessage() {}
func (x *RectangleNgdotInArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotInArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotInArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
type RectangleNgdotCanonArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotCanonArgs) Reset() {
*x = RectangleNgdotCanonArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotCanonArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotCanonArgs) ProtoMessage() {}
func (x *RectangleNgdotCanonArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotCanonArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotCanonArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
type RectangleNgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotAtArgs) Reset() {
*x = RectangleNgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotAtArgs) ProtoMessage() {}
func (x *RectangleNgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotAtArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
func (x *RectangleNgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *RectangleNgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type RectangleNgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotRGBA64AtArgs) Reset() {
*x = RectangleNgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotRGBA64AtArgs) ProtoMessage() {}
func (x *RectangleNgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
func (x *RectangleNgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *RectangleNgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type RectangleNgdotBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotBoundsArgs) Reset() {
*x = RectangleNgdotBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotBoundsArgs) ProtoMessage() {}
func (x *RectangleNgdotBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotBoundsArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
type RectangleNgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectangleNgdotColorModelArgs) Reset() {
*x = RectangleNgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectangleNgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectangleNgdotColorModelArgs) ProtoMessage() {}
func (x *RectangleNgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectangleNgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*RectangleNgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
type RectArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X0 int64 `protobuf:"varint,1,opt,name=x0,proto3" json:"x0,omitempty"`
Y0 int64 `protobuf:"varint,2,opt,name=y0,proto3" json:"y0,omitempty"`
X1 int64 `protobuf:"varint,3,opt,name=x1,proto3" json:"x1,omitempty"`
Y1 int64 `protobuf:"varint,4,opt,name=y1,proto3" json:"y1,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RectArgs) Reset() {
*x = RectArgs{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RectArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RectArgs) ProtoMessage() {}
func (x *RectArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RectArgs.ProtoReflect.Descriptor instead.
func (*RectArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
func (x *RectArgs) GetX0() int64 {
if x != nil {
return x.X0
}
return 0
}
func (x *RectArgs) GetY0() int64 {
if x != nil {
return x.Y0
}
return 0
}
func (x *RectArgs) GetX1() int64 {
if x != nil {
return x.X1
}
return 0
}
func (x *RectArgs) GetY1() int64 {
if x != nil {
return x.Y1
}
return 0
}
type RGBANgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBANgdotColorModelArgs) Reset() {
*x = RGBANgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBANgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBANgdotColorModelArgs) ProtoMessage() {}
func (x *RGBANgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBANgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*RGBANgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
type RGBANgdotBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBANgdotBoundsArgs) Reset() {
*x = RGBANgdotBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBANgdotBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBANgdotBoundsArgs) ProtoMessage() {}
func (x *RGBANgdotBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBANgdotBoundsArgs.ProtoReflect.Descriptor instead.
func (*RGBANgdotBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{30}
}
type RGBANgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBANgdotAtArgs) Reset() {
*x = RGBANgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBANgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBANgdotAtArgs) ProtoMessage() {}
func (x *RGBANgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBANgdotAtArgs.ProtoReflect.Descriptor instead.
func (*RGBANgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{31}
}
func (x *RGBANgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *RGBANgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type RGBANgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBANgdotRGBA64AtArgs) Reset() {
*x = RGBANgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBANgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBANgdotRGBA64AtArgs) ProtoMessage() {}
func (x *RGBANgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBANgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*RGBANgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{32}
}
func (x *RGBANgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *RGBANgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type RGBANgdotRGBAAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBANgdotRGBAAtArgs) Reset() {
*x = RGBANgdotRGBAAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBANgdotRGBAAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBANgdotRGBAAtArgs) ProtoMessage() {}
func (x *RGBANgdotRGBAAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[33]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBANgdotRGBAAtArgs.ProtoReflect.Descriptor instead.
func (*RGBANgdotRGBAAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{33}
}
func (x *RGBANgdotRGBAAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *RGBANgdotRGBAAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type RGBANgdotPixOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBANgdotPixOffsetArgs) Reset() {
*x = RGBANgdotPixOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBANgdotPixOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBANgdotPixOffsetArgs) ProtoMessage() {}
func (x *RGBANgdotPixOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[34]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBANgdotPixOffsetArgs.ProtoReflect.Descriptor instead.
func (*RGBANgdotPixOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{34}
}
func (x *RGBANgdotPixOffsetArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *RGBANgdotPixOffsetArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type RGBANgdotSubImageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBANgdotSubImageArgs) Reset() {
*x = RGBANgdotSubImageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBANgdotSubImageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBANgdotSubImageArgs) ProtoMessage() {}
func (x *RGBANgdotSubImageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[35]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBANgdotSubImageArgs.ProtoReflect.Descriptor instead.
func (*RGBANgdotSubImageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{35}
}
type RGBANgdotOpaqueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBANgdotOpaqueArgs) Reset() {
*x = RGBANgdotOpaqueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBANgdotOpaqueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBANgdotOpaqueArgs) ProtoMessage() {}
func (x *RGBANgdotOpaqueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[36]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBANgdotOpaqueArgs.ProtoReflect.Descriptor instead.
func (*RGBANgdotOpaqueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{36}
}
type NewRGBAArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewRGBAArgs) Reset() {
*x = NewRGBAArgs{}
mi := &file_ngolofuzz_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewRGBAArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewRGBAArgs) ProtoMessage() {}
func (x *NewRGBAArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[37]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewRGBAArgs.ProtoReflect.Descriptor instead.
func (*NewRGBAArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{37}
}
type RGBA64NgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBA64NgdotColorModelArgs) Reset() {
*x = RGBA64NgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBA64NgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBA64NgdotColorModelArgs) ProtoMessage() {}
func (x *RGBA64NgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[38]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBA64NgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*RGBA64NgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{38}
}
type RGBA64NgdotBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBA64NgdotBoundsArgs) Reset() {
*x = RGBA64NgdotBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBA64NgdotBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBA64NgdotBoundsArgs) ProtoMessage() {}
func (x *RGBA64NgdotBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[39]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBA64NgdotBoundsArgs.ProtoReflect.Descriptor instead.
func (*RGBA64NgdotBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{39}
}
type RGBA64NgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBA64NgdotAtArgs) Reset() {
*x = RGBA64NgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBA64NgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBA64NgdotAtArgs) ProtoMessage() {}
func (x *RGBA64NgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[40]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBA64NgdotAtArgs.ProtoReflect.Descriptor instead.
func (*RGBA64NgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{40}
}
func (x *RGBA64NgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *RGBA64NgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type RGBA64NgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBA64NgdotRGBA64AtArgs) Reset() {
*x = RGBA64NgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBA64NgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBA64NgdotRGBA64AtArgs) ProtoMessage() {}
func (x *RGBA64NgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[41]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBA64NgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*RGBA64NgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{41}
}
func (x *RGBA64NgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *RGBA64NgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type RGBA64NgdotPixOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBA64NgdotPixOffsetArgs) Reset() {
*x = RGBA64NgdotPixOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBA64NgdotPixOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBA64NgdotPixOffsetArgs) ProtoMessage() {}
func (x *RGBA64NgdotPixOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[42]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBA64NgdotPixOffsetArgs.ProtoReflect.Descriptor instead.
func (*RGBA64NgdotPixOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{42}
}
func (x *RGBA64NgdotPixOffsetArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *RGBA64NgdotPixOffsetArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type RGBA64NgdotSubImageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBA64NgdotSubImageArgs) Reset() {
*x = RGBA64NgdotSubImageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBA64NgdotSubImageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBA64NgdotSubImageArgs) ProtoMessage() {}
func (x *RGBA64NgdotSubImageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[43]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBA64NgdotSubImageArgs.ProtoReflect.Descriptor instead.
func (*RGBA64NgdotSubImageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{43}
}
type RGBA64NgdotOpaqueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RGBA64NgdotOpaqueArgs) Reset() {
*x = RGBA64NgdotOpaqueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RGBA64NgdotOpaqueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RGBA64NgdotOpaqueArgs) ProtoMessage() {}
func (x *RGBA64NgdotOpaqueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[44]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RGBA64NgdotOpaqueArgs.ProtoReflect.Descriptor instead.
func (*RGBA64NgdotOpaqueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{44}
}
type NewRGBA64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewRGBA64Args) Reset() {
*x = NewRGBA64Args{}
mi := &file_ngolofuzz_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewRGBA64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewRGBA64Args) ProtoMessage() {}
func (x *NewRGBA64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[45]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewRGBA64Args.ProtoReflect.Descriptor instead.
func (*NewRGBA64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{45}
}
type NRGBANgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBANgdotColorModelArgs) Reset() {
*x = NRGBANgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[46]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBANgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBANgdotColorModelArgs) ProtoMessage() {}
func (x *NRGBANgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[46]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBANgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*NRGBANgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{46}
}
type NRGBANgdotBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBANgdotBoundsArgs) Reset() {
*x = NRGBANgdotBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[47]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBANgdotBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBANgdotBoundsArgs) ProtoMessage() {}
func (x *NRGBANgdotBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[47]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBANgdotBoundsArgs.ProtoReflect.Descriptor instead.
func (*NRGBANgdotBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{47}
}
type NRGBANgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBANgdotAtArgs) Reset() {
*x = NRGBANgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[48]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBANgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBANgdotAtArgs) ProtoMessage() {}
func (x *NRGBANgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[48]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBANgdotAtArgs.ProtoReflect.Descriptor instead.
func (*NRGBANgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{48}
}
func (x *NRGBANgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *NRGBANgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type NRGBANgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBANgdotRGBA64AtArgs) Reset() {
*x = NRGBANgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[49]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBANgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBANgdotRGBA64AtArgs) ProtoMessage() {}
func (x *NRGBANgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[49]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBANgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*NRGBANgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{49}
}
func (x *NRGBANgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *NRGBANgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type NRGBANgdotNRGBAAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBANgdotNRGBAAtArgs) Reset() {
*x = NRGBANgdotNRGBAAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[50]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBANgdotNRGBAAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBANgdotNRGBAAtArgs) ProtoMessage() {}
func (x *NRGBANgdotNRGBAAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[50]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBANgdotNRGBAAtArgs.ProtoReflect.Descriptor instead.
func (*NRGBANgdotNRGBAAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{50}
}
func (x *NRGBANgdotNRGBAAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *NRGBANgdotNRGBAAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type NRGBANgdotPixOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBANgdotPixOffsetArgs) Reset() {
*x = NRGBANgdotPixOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[51]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBANgdotPixOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBANgdotPixOffsetArgs) ProtoMessage() {}
func (x *NRGBANgdotPixOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[51]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBANgdotPixOffsetArgs.ProtoReflect.Descriptor instead.
func (*NRGBANgdotPixOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{51}
}
func (x *NRGBANgdotPixOffsetArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *NRGBANgdotPixOffsetArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type NRGBANgdotSubImageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBANgdotSubImageArgs) Reset() {
*x = NRGBANgdotSubImageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[52]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBANgdotSubImageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBANgdotSubImageArgs) ProtoMessage() {}
func (x *NRGBANgdotSubImageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[52]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBANgdotSubImageArgs.ProtoReflect.Descriptor instead.
func (*NRGBANgdotSubImageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{52}
}
type NRGBANgdotOpaqueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBANgdotOpaqueArgs) Reset() {
*x = NRGBANgdotOpaqueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[53]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBANgdotOpaqueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBANgdotOpaqueArgs) ProtoMessage() {}
func (x *NRGBANgdotOpaqueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[53]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBANgdotOpaqueArgs.ProtoReflect.Descriptor instead.
func (*NRGBANgdotOpaqueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{53}
}
type NewNRGBAArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewNRGBAArgs) Reset() {
*x = NewNRGBAArgs{}
mi := &file_ngolofuzz_proto_msgTypes[54]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewNRGBAArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewNRGBAArgs) ProtoMessage() {}
func (x *NewNRGBAArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[54]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewNRGBAArgs.ProtoReflect.Descriptor instead.
func (*NewNRGBAArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{54}
}
type NRGBA64NgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBA64NgdotColorModelArgs) Reset() {
*x = NRGBA64NgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[55]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBA64NgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBA64NgdotColorModelArgs) ProtoMessage() {}
func (x *NRGBA64NgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[55]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBA64NgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*NRGBA64NgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{55}
}
type NRGBA64NgdotBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBA64NgdotBoundsArgs) Reset() {
*x = NRGBA64NgdotBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[56]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBA64NgdotBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBA64NgdotBoundsArgs) ProtoMessage() {}
func (x *NRGBA64NgdotBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[56]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBA64NgdotBoundsArgs.ProtoReflect.Descriptor instead.
func (*NRGBA64NgdotBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{56}
}
type NRGBA64NgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBA64NgdotAtArgs) Reset() {
*x = NRGBA64NgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[57]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBA64NgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBA64NgdotAtArgs) ProtoMessage() {}
func (x *NRGBA64NgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[57]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBA64NgdotAtArgs.ProtoReflect.Descriptor instead.
func (*NRGBA64NgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{57}
}
func (x *NRGBA64NgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *NRGBA64NgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type NRGBA64NgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBA64NgdotRGBA64AtArgs) Reset() {
*x = NRGBA64NgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[58]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBA64NgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBA64NgdotRGBA64AtArgs) ProtoMessage() {}
func (x *NRGBA64NgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[58]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBA64NgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*NRGBA64NgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{58}
}
func (x *NRGBA64NgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *NRGBA64NgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type NRGBA64NgdotNRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBA64NgdotNRGBA64AtArgs) Reset() {
*x = NRGBA64NgdotNRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[59]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBA64NgdotNRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBA64NgdotNRGBA64AtArgs) ProtoMessage() {}
func (x *NRGBA64NgdotNRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[59]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBA64NgdotNRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*NRGBA64NgdotNRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{59}
}
func (x *NRGBA64NgdotNRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *NRGBA64NgdotNRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type NRGBA64NgdotPixOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBA64NgdotPixOffsetArgs) Reset() {
*x = NRGBA64NgdotPixOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[60]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBA64NgdotPixOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBA64NgdotPixOffsetArgs) ProtoMessage() {}
func (x *NRGBA64NgdotPixOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[60]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBA64NgdotPixOffsetArgs.ProtoReflect.Descriptor instead.
func (*NRGBA64NgdotPixOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{60}
}
func (x *NRGBA64NgdotPixOffsetArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *NRGBA64NgdotPixOffsetArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type NRGBA64NgdotSubImageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBA64NgdotSubImageArgs) Reset() {
*x = NRGBA64NgdotSubImageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[61]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBA64NgdotSubImageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBA64NgdotSubImageArgs) ProtoMessage() {}
func (x *NRGBA64NgdotSubImageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[61]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBA64NgdotSubImageArgs.ProtoReflect.Descriptor instead.
func (*NRGBA64NgdotSubImageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{61}
}
type NRGBA64NgdotOpaqueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NRGBA64NgdotOpaqueArgs) Reset() {
*x = NRGBA64NgdotOpaqueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[62]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NRGBA64NgdotOpaqueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NRGBA64NgdotOpaqueArgs) ProtoMessage() {}
func (x *NRGBA64NgdotOpaqueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[62]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NRGBA64NgdotOpaqueArgs.ProtoReflect.Descriptor instead.
func (*NRGBA64NgdotOpaqueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{62}
}
type NewNRGBA64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewNRGBA64Args) Reset() {
*x = NewNRGBA64Args{}
mi := &file_ngolofuzz_proto_msgTypes[63]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewNRGBA64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewNRGBA64Args) ProtoMessage() {}
func (x *NewNRGBA64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[63]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewNRGBA64Args.ProtoReflect.Descriptor instead.
func (*NewNRGBA64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{63}
}
type AlphaNgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AlphaNgdotColorModelArgs) Reset() {
*x = AlphaNgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[64]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AlphaNgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AlphaNgdotColorModelArgs) ProtoMessage() {}
func (x *AlphaNgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[64]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AlphaNgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*AlphaNgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{64}
}
type AlphaNgdotBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AlphaNgdotBoundsArgs) Reset() {
*x = AlphaNgdotBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[65]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AlphaNgdotBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AlphaNgdotBoundsArgs) ProtoMessage() {}
func (x *AlphaNgdotBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[65]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AlphaNgdotBoundsArgs.ProtoReflect.Descriptor instead.
func (*AlphaNgdotBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{65}
}
type AlphaNgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AlphaNgdotAtArgs) Reset() {
*x = AlphaNgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[66]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AlphaNgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AlphaNgdotAtArgs) ProtoMessage() {}
func (x *AlphaNgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[66]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AlphaNgdotAtArgs.ProtoReflect.Descriptor instead.
func (*AlphaNgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{66}
}
func (x *AlphaNgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *AlphaNgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type AlphaNgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AlphaNgdotRGBA64AtArgs) Reset() {
*x = AlphaNgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[67]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AlphaNgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AlphaNgdotRGBA64AtArgs) ProtoMessage() {}
func (x *AlphaNgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[67]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AlphaNgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*AlphaNgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{67}
}
func (x *AlphaNgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *AlphaNgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type AlphaNgdotAlphaAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AlphaNgdotAlphaAtArgs) Reset() {
*x = AlphaNgdotAlphaAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[68]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AlphaNgdotAlphaAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AlphaNgdotAlphaAtArgs) ProtoMessage() {}
func (x *AlphaNgdotAlphaAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[68]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AlphaNgdotAlphaAtArgs.ProtoReflect.Descriptor instead.
func (*AlphaNgdotAlphaAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{68}
}
func (x *AlphaNgdotAlphaAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *AlphaNgdotAlphaAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type AlphaNgdotPixOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AlphaNgdotPixOffsetArgs) Reset() {
*x = AlphaNgdotPixOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[69]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AlphaNgdotPixOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AlphaNgdotPixOffsetArgs) ProtoMessage() {}
func (x *AlphaNgdotPixOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[69]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AlphaNgdotPixOffsetArgs.ProtoReflect.Descriptor instead.
func (*AlphaNgdotPixOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{69}
}
func (x *AlphaNgdotPixOffsetArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *AlphaNgdotPixOffsetArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type AlphaNgdotSubImageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AlphaNgdotSubImageArgs) Reset() {
*x = AlphaNgdotSubImageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[70]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AlphaNgdotSubImageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AlphaNgdotSubImageArgs) ProtoMessage() {}
func (x *AlphaNgdotSubImageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[70]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AlphaNgdotSubImageArgs.ProtoReflect.Descriptor instead.
func (*AlphaNgdotSubImageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{70}
}
type AlphaNgdotOpaqueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AlphaNgdotOpaqueArgs) Reset() {
*x = AlphaNgdotOpaqueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[71]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AlphaNgdotOpaqueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AlphaNgdotOpaqueArgs) ProtoMessage() {}
func (x *AlphaNgdotOpaqueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[71]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AlphaNgdotOpaqueArgs.ProtoReflect.Descriptor instead.
func (*AlphaNgdotOpaqueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{71}
}
type NewAlphaArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewAlphaArgs) Reset() {
*x = NewAlphaArgs{}
mi := &file_ngolofuzz_proto_msgTypes[72]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewAlphaArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewAlphaArgs) ProtoMessage() {}
func (x *NewAlphaArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[72]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewAlphaArgs.ProtoReflect.Descriptor instead.
func (*NewAlphaArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{72}
}
type Alpha16NgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Alpha16NgdotColorModelArgs) Reset() {
*x = Alpha16NgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[73]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Alpha16NgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Alpha16NgdotColorModelArgs) ProtoMessage() {}
func (x *Alpha16NgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[73]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Alpha16NgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*Alpha16NgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{73}
}
type Alpha16NgdotBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Alpha16NgdotBoundsArgs) Reset() {
*x = Alpha16NgdotBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[74]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Alpha16NgdotBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Alpha16NgdotBoundsArgs) ProtoMessage() {}
func (x *Alpha16NgdotBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[74]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Alpha16NgdotBoundsArgs.ProtoReflect.Descriptor instead.
func (*Alpha16NgdotBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{74}
}
type Alpha16NgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Alpha16NgdotAtArgs) Reset() {
*x = Alpha16NgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[75]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Alpha16NgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Alpha16NgdotAtArgs) ProtoMessage() {}
func (x *Alpha16NgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[75]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Alpha16NgdotAtArgs.ProtoReflect.Descriptor instead.
func (*Alpha16NgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{75}
}
func (x *Alpha16NgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *Alpha16NgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type Alpha16NgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Alpha16NgdotRGBA64AtArgs) Reset() {
*x = Alpha16NgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[76]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Alpha16NgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Alpha16NgdotRGBA64AtArgs) ProtoMessage() {}
func (x *Alpha16NgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[76]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Alpha16NgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*Alpha16NgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{76}
}
func (x *Alpha16NgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *Alpha16NgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type Alpha16NgdotAlpha16AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Alpha16NgdotAlpha16AtArgs) Reset() {
*x = Alpha16NgdotAlpha16AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[77]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Alpha16NgdotAlpha16AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Alpha16NgdotAlpha16AtArgs) ProtoMessage() {}
func (x *Alpha16NgdotAlpha16AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[77]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Alpha16NgdotAlpha16AtArgs.ProtoReflect.Descriptor instead.
func (*Alpha16NgdotAlpha16AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{77}
}
func (x *Alpha16NgdotAlpha16AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *Alpha16NgdotAlpha16AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type Alpha16NgdotPixOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Alpha16NgdotPixOffsetArgs) Reset() {
*x = Alpha16NgdotPixOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[78]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Alpha16NgdotPixOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Alpha16NgdotPixOffsetArgs) ProtoMessage() {}
func (x *Alpha16NgdotPixOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[78]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Alpha16NgdotPixOffsetArgs.ProtoReflect.Descriptor instead.
func (*Alpha16NgdotPixOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{78}
}
func (x *Alpha16NgdotPixOffsetArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *Alpha16NgdotPixOffsetArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type Alpha16NgdotSubImageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Alpha16NgdotSubImageArgs) Reset() {
*x = Alpha16NgdotSubImageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[79]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Alpha16NgdotSubImageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Alpha16NgdotSubImageArgs) ProtoMessage() {}
func (x *Alpha16NgdotSubImageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[79]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Alpha16NgdotSubImageArgs.ProtoReflect.Descriptor instead.
func (*Alpha16NgdotSubImageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{79}
}
type Alpha16NgdotOpaqueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Alpha16NgdotOpaqueArgs) Reset() {
*x = Alpha16NgdotOpaqueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[80]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Alpha16NgdotOpaqueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Alpha16NgdotOpaqueArgs) ProtoMessage() {}
func (x *Alpha16NgdotOpaqueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[80]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Alpha16NgdotOpaqueArgs.ProtoReflect.Descriptor instead.
func (*Alpha16NgdotOpaqueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{80}
}
type NewAlpha16Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewAlpha16Args) Reset() {
*x = NewAlpha16Args{}
mi := &file_ngolofuzz_proto_msgTypes[81]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewAlpha16Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewAlpha16Args) ProtoMessage() {}
func (x *NewAlpha16Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[81]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewAlpha16Args.ProtoReflect.Descriptor instead.
func (*NewAlpha16Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{81}
}
type GrayNgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GrayNgdotColorModelArgs) Reset() {
*x = GrayNgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[82]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GrayNgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GrayNgdotColorModelArgs) ProtoMessage() {}
func (x *GrayNgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[82]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GrayNgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*GrayNgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{82}
}
type GrayNgdotBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GrayNgdotBoundsArgs) Reset() {
*x = GrayNgdotBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[83]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GrayNgdotBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GrayNgdotBoundsArgs) ProtoMessage() {}
func (x *GrayNgdotBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[83]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GrayNgdotBoundsArgs.ProtoReflect.Descriptor instead.
func (*GrayNgdotBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{83}
}
type GrayNgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GrayNgdotAtArgs) Reset() {
*x = GrayNgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[84]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GrayNgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GrayNgdotAtArgs) ProtoMessage() {}
func (x *GrayNgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[84]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GrayNgdotAtArgs.ProtoReflect.Descriptor instead.
func (*GrayNgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{84}
}
func (x *GrayNgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *GrayNgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type GrayNgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GrayNgdotRGBA64AtArgs) Reset() {
*x = GrayNgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[85]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GrayNgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GrayNgdotRGBA64AtArgs) ProtoMessage() {}
func (x *GrayNgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[85]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GrayNgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*GrayNgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{85}
}
func (x *GrayNgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *GrayNgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type GrayNgdotGrayAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GrayNgdotGrayAtArgs) Reset() {
*x = GrayNgdotGrayAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[86]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GrayNgdotGrayAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GrayNgdotGrayAtArgs) ProtoMessage() {}
func (x *GrayNgdotGrayAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[86]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GrayNgdotGrayAtArgs.ProtoReflect.Descriptor instead.
func (*GrayNgdotGrayAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{86}
}
func (x *GrayNgdotGrayAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *GrayNgdotGrayAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type GrayNgdotPixOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GrayNgdotPixOffsetArgs) Reset() {
*x = GrayNgdotPixOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[87]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GrayNgdotPixOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GrayNgdotPixOffsetArgs) ProtoMessage() {}
func (x *GrayNgdotPixOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[87]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GrayNgdotPixOffsetArgs.ProtoReflect.Descriptor instead.
func (*GrayNgdotPixOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{87}
}
func (x *GrayNgdotPixOffsetArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *GrayNgdotPixOffsetArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type GrayNgdotSubImageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GrayNgdotSubImageArgs) Reset() {
*x = GrayNgdotSubImageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[88]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GrayNgdotSubImageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GrayNgdotSubImageArgs) ProtoMessage() {}
func (x *GrayNgdotSubImageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[88]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GrayNgdotSubImageArgs.ProtoReflect.Descriptor instead.
func (*GrayNgdotSubImageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{88}
}
type GrayNgdotOpaqueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GrayNgdotOpaqueArgs) Reset() {
*x = GrayNgdotOpaqueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[89]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GrayNgdotOpaqueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GrayNgdotOpaqueArgs) ProtoMessage() {}
func (x *GrayNgdotOpaqueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[89]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GrayNgdotOpaqueArgs.ProtoReflect.Descriptor instead.
func (*GrayNgdotOpaqueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{89}
}
type NewGrayArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewGrayArgs) Reset() {
*x = NewGrayArgs{}
mi := &file_ngolofuzz_proto_msgTypes[90]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewGrayArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewGrayArgs) ProtoMessage() {}
func (x *NewGrayArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[90]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewGrayArgs.ProtoReflect.Descriptor instead.
func (*NewGrayArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{90}
}
type Gray16NgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Gray16NgdotColorModelArgs) Reset() {
*x = Gray16NgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[91]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Gray16NgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Gray16NgdotColorModelArgs) ProtoMessage() {}
func (x *Gray16NgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[91]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Gray16NgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*Gray16NgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{91}
}
type Gray16NgdotBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Gray16NgdotBoundsArgs) Reset() {
*x = Gray16NgdotBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[92]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Gray16NgdotBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Gray16NgdotBoundsArgs) ProtoMessage() {}
func (x *Gray16NgdotBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[92]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Gray16NgdotBoundsArgs.ProtoReflect.Descriptor instead.
func (*Gray16NgdotBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{92}
}
type Gray16NgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Gray16NgdotAtArgs) Reset() {
*x = Gray16NgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[93]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Gray16NgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Gray16NgdotAtArgs) ProtoMessage() {}
func (x *Gray16NgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[93]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Gray16NgdotAtArgs.ProtoReflect.Descriptor instead.
func (*Gray16NgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{93}
}
func (x *Gray16NgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *Gray16NgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type Gray16NgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Gray16NgdotRGBA64AtArgs) Reset() {
*x = Gray16NgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[94]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Gray16NgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Gray16NgdotRGBA64AtArgs) ProtoMessage() {}
func (x *Gray16NgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[94]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Gray16NgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*Gray16NgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{94}
}
func (x *Gray16NgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *Gray16NgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type Gray16NgdotGray16AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Gray16NgdotGray16AtArgs) Reset() {
*x = Gray16NgdotGray16AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[95]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Gray16NgdotGray16AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Gray16NgdotGray16AtArgs) ProtoMessage() {}
func (x *Gray16NgdotGray16AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[95]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Gray16NgdotGray16AtArgs.ProtoReflect.Descriptor instead.
func (*Gray16NgdotGray16AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{95}
}
func (x *Gray16NgdotGray16AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *Gray16NgdotGray16AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type Gray16NgdotPixOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Gray16NgdotPixOffsetArgs) Reset() {
*x = Gray16NgdotPixOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[96]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Gray16NgdotPixOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Gray16NgdotPixOffsetArgs) ProtoMessage() {}
func (x *Gray16NgdotPixOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[96]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Gray16NgdotPixOffsetArgs.ProtoReflect.Descriptor instead.
func (*Gray16NgdotPixOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{96}
}
func (x *Gray16NgdotPixOffsetArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *Gray16NgdotPixOffsetArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type Gray16NgdotSubImageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Gray16NgdotSubImageArgs) Reset() {
*x = Gray16NgdotSubImageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[97]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Gray16NgdotSubImageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Gray16NgdotSubImageArgs) ProtoMessage() {}
func (x *Gray16NgdotSubImageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[97]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Gray16NgdotSubImageArgs.ProtoReflect.Descriptor instead.
func (*Gray16NgdotSubImageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{97}
}
type Gray16NgdotOpaqueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Gray16NgdotOpaqueArgs) Reset() {
*x = Gray16NgdotOpaqueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[98]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Gray16NgdotOpaqueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Gray16NgdotOpaqueArgs) ProtoMessage() {}
func (x *Gray16NgdotOpaqueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[98]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Gray16NgdotOpaqueArgs.ProtoReflect.Descriptor instead.
func (*Gray16NgdotOpaqueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{98}
}
type NewGray16Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewGray16Args) Reset() {
*x = NewGray16Args{}
mi := &file_ngolofuzz_proto_msgTypes[99]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewGray16Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewGray16Args) ProtoMessage() {}
func (x *NewGray16Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[99]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewGray16Args.ProtoReflect.Descriptor instead.
func (*NewGray16Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{99}
}
type CMYKNgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CMYKNgdotColorModelArgs) Reset() {
*x = CMYKNgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[100]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CMYKNgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CMYKNgdotColorModelArgs) ProtoMessage() {}
func (x *CMYKNgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[100]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CMYKNgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*CMYKNgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{100}
}
type CMYKNgdotBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CMYKNgdotBoundsArgs) Reset() {
*x = CMYKNgdotBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[101]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CMYKNgdotBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CMYKNgdotBoundsArgs) ProtoMessage() {}
func (x *CMYKNgdotBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[101]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CMYKNgdotBoundsArgs.ProtoReflect.Descriptor instead.
func (*CMYKNgdotBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{101}
}
type CMYKNgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CMYKNgdotAtArgs) Reset() {
*x = CMYKNgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[102]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CMYKNgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CMYKNgdotAtArgs) ProtoMessage() {}
func (x *CMYKNgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[102]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CMYKNgdotAtArgs.ProtoReflect.Descriptor instead.
func (*CMYKNgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{102}
}
func (x *CMYKNgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *CMYKNgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type CMYKNgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CMYKNgdotRGBA64AtArgs) Reset() {
*x = CMYKNgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[103]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CMYKNgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CMYKNgdotRGBA64AtArgs) ProtoMessage() {}
func (x *CMYKNgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[103]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CMYKNgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*CMYKNgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{103}
}
func (x *CMYKNgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *CMYKNgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type CMYKNgdotCMYKAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CMYKNgdotCMYKAtArgs) Reset() {
*x = CMYKNgdotCMYKAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[104]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CMYKNgdotCMYKAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CMYKNgdotCMYKAtArgs) ProtoMessage() {}
func (x *CMYKNgdotCMYKAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[104]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CMYKNgdotCMYKAtArgs.ProtoReflect.Descriptor instead.
func (*CMYKNgdotCMYKAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{104}
}
func (x *CMYKNgdotCMYKAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *CMYKNgdotCMYKAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type CMYKNgdotPixOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CMYKNgdotPixOffsetArgs) Reset() {
*x = CMYKNgdotPixOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[105]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CMYKNgdotPixOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CMYKNgdotPixOffsetArgs) ProtoMessage() {}
func (x *CMYKNgdotPixOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[105]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CMYKNgdotPixOffsetArgs.ProtoReflect.Descriptor instead.
func (*CMYKNgdotPixOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{105}
}
func (x *CMYKNgdotPixOffsetArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *CMYKNgdotPixOffsetArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type CMYKNgdotSubImageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CMYKNgdotSubImageArgs) Reset() {
*x = CMYKNgdotSubImageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[106]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CMYKNgdotSubImageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CMYKNgdotSubImageArgs) ProtoMessage() {}
func (x *CMYKNgdotSubImageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[106]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CMYKNgdotSubImageArgs.ProtoReflect.Descriptor instead.
func (*CMYKNgdotSubImageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{106}
}
type CMYKNgdotOpaqueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CMYKNgdotOpaqueArgs) Reset() {
*x = CMYKNgdotOpaqueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[107]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CMYKNgdotOpaqueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CMYKNgdotOpaqueArgs) ProtoMessage() {}
func (x *CMYKNgdotOpaqueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[107]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CMYKNgdotOpaqueArgs.ProtoReflect.Descriptor instead.
func (*CMYKNgdotOpaqueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{107}
}
type NewCMYKArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewCMYKArgs) Reset() {
*x = NewCMYKArgs{}
mi := &file_ngolofuzz_proto_msgTypes[108]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewCMYKArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewCMYKArgs) ProtoMessage() {}
func (x *NewCMYKArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[108]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewCMYKArgs.ProtoReflect.Descriptor instead.
func (*NewCMYKArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{108}
}
type PalettedNgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PalettedNgdotColorModelArgs) Reset() {
*x = PalettedNgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[109]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PalettedNgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PalettedNgdotColorModelArgs) ProtoMessage() {}
func (x *PalettedNgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[109]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PalettedNgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*PalettedNgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{109}
}
type PalettedNgdotBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PalettedNgdotBoundsArgs) Reset() {
*x = PalettedNgdotBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[110]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PalettedNgdotBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PalettedNgdotBoundsArgs) ProtoMessage() {}
func (x *PalettedNgdotBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[110]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PalettedNgdotBoundsArgs.ProtoReflect.Descriptor instead.
func (*PalettedNgdotBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{110}
}
type PalettedNgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PalettedNgdotAtArgs) Reset() {
*x = PalettedNgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[111]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PalettedNgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PalettedNgdotAtArgs) ProtoMessage() {}
func (x *PalettedNgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[111]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PalettedNgdotAtArgs.ProtoReflect.Descriptor instead.
func (*PalettedNgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{111}
}
func (x *PalettedNgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *PalettedNgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type PalettedNgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PalettedNgdotRGBA64AtArgs) Reset() {
*x = PalettedNgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[112]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PalettedNgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PalettedNgdotRGBA64AtArgs) ProtoMessage() {}
func (x *PalettedNgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[112]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PalettedNgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*PalettedNgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{112}
}
func (x *PalettedNgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *PalettedNgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type PalettedNgdotPixOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PalettedNgdotPixOffsetArgs) Reset() {
*x = PalettedNgdotPixOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[113]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PalettedNgdotPixOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PalettedNgdotPixOffsetArgs) ProtoMessage() {}
func (x *PalettedNgdotPixOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[113]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PalettedNgdotPixOffsetArgs.ProtoReflect.Descriptor instead.
func (*PalettedNgdotPixOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{113}
}
func (x *PalettedNgdotPixOffsetArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *PalettedNgdotPixOffsetArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type PalettedNgdotColorIndexAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PalettedNgdotColorIndexAtArgs) Reset() {
*x = PalettedNgdotColorIndexAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[114]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PalettedNgdotColorIndexAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PalettedNgdotColorIndexAtArgs) ProtoMessage() {}
func (x *PalettedNgdotColorIndexAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[114]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PalettedNgdotColorIndexAtArgs.ProtoReflect.Descriptor instead.
func (*PalettedNgdotColorIndexAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{114}
}
func (x *PalettedNgdotColorIndexAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *PalettedNgdotColorIndexAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type PalettedNgdotSetColorIndexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PalettedNgdotSetColorIndexArgs) Reset() {
*x = PalettedNgdotSetColorIndexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[115]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PalettedNgdotSetColorIndexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PalettedNgdotSetColorIndexArgs) ProtoMessage() {}
func (x *PalettedNgdotSetColorIndexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[115]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PalettedNgdotSetColorIndexArgs.ProtoReflect.Descriptor instead.
func (*PalettedNgdotSetColorIndexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{115}
}
func (x *PalettedNgdotSetColorIndexArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *PalettedNgdotSetColorIndexArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
func (x *PalettedNgdotSetColorIndexArgs) GetIndex() uint32 {
if x != nil {
return x.Index
}
return 0
}
type PalettedNgdotSubImageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PalettedNgdotSubImageArgs) Reset() {
*x = PalettedNgdotSubImageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[116]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PalettedNgdotSubImageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PalettedNgdotSubImageArgs) ProtoMessage() {}
func (x *PalettedNgdotSubImageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[116]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PalettedNgdotSubImageArgs.ProtoReflect.Descriptor instead.
func (*PalettedNgdotSubImageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{116}
}
type PalettedNgdotOpaqueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PalettedNgdotOpaqueArgs) Reset() {
*x = PalettedNgdotOpaqueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[117]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PalettedNgdotOpaqueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PalettedNgdotOpaqueArgs) ProtoMessage() {}
func (x *PalettedNgdotOpaqueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[117]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PalettedNgdotOpaqueArgs.ProtoReflect.Descriptor instead.
func (*PalettedNgdotOpaqueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{117}
}
type UniformNgdotRGBAArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UniformNgdotRGBAArgs) Reset() {
*x = UniformNgdotRGBAArgs{}
mi := &file_ngolofuzz_proto_msgTypes[118]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UniformNgdotRGBAArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UniformNgdotRGBAArgs) ProtoMessage() {}
func (x *UniformNgdotRGBAArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[118]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UniformNgdotRGBAArgs.ProtoReflect.Descriptor instead.
func (*UniformNgdotRGBAArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{118}
}
type UniformNgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UniformNgdotColorModelArgs) Reset() {
*x = UniformNgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[119]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UniformNgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UniformNgdotColorModelArgs) ProtoMessage() {}
func (x *UniformNgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[119]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UniformNgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*UniformNgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{119}
}
type UniformNgdotBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UniformNgdotBoundsArgs) Reset() {
*x = UniformNgdotBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[120]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UniformNgdotBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UniformNgdotBoundsArgs) ProtoMessage() {}
func (x *UniformNgdotBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[120]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UniformNgdotBoundsArgs.ProtoReflect.Descriptor instead.
func (*UniformNgdotBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{120}
}
type UniformNgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UniformNgdotAtArgs) Reset() {
*x = UniformNgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[121]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UniformNgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UniformNgdotAtArgs) ProtoMessage() {}
func (x *UniformNgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[121]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UniformNgdotAtArgs.ProtoReflect.Descriptor instead.
func (*UniformNgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{121}
}
func (x *UniformNgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *UniformNgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type UniformNgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UniformNgdotRGBA64AtArgs) Reset() {
*x = UniformNgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[122]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UniformNgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UniformNgdotRGBA64AtArgs) ProtoMessage() {}
func (x *UniformNgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[122]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UniformNgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*UniformNgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{122}
}
func (x *UniformNgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *UniformNgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type UniformNgdotOpaqueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UniformNgdotOpaqueArgs) Reset() {
*x = UniformNgdotOpaqueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[123]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UniformNgdotOpaqueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UniformNgdotOpaqueArgs) ProtoMessage() {}
func (x *UniformNgdotOpaqueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[123]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UniformNgdotOpaqueArgs.ProtoReflect.Descriptor instead.
func (*UniformNgdotOpaqueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{123}
}
type YCbCrSubsampleRatioNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S YCbCrSubsampleRatioEnum `protobuf:"varint,1,opt,name=s,proto3,enum=ngolofuzz.YCbCrSubsampleRatioEnum" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *YCbCrSubsampleRatioNgdotStringArgs) Reset() {
*x = YCbCrSubsampleRatioNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[124]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *YCbCrSubsampleRatioNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*YCbCrSubsampleRatioNgdotStringArgs) ProtoMessage() {}
func (x *YCbCrSubsampleRatioNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[124]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use YCbCrSubsampleRatioNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*YCbCrSubsampleRatioNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{124}
}
func (x *YCbCrSubsampleRatioNgdotStringArgs) GetS() YCbCrSubsampleRatioEnum {
if x != nil {
return x.S
}
return YCbCrSubsampleRatioEnum_YCbCrSubsampleRatio444
}
type YCbCrNgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *YCbCrNgdotColorModelArgs) Reset() {
*x = YCbCrNgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[125]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *YCbCrNgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*YCbCrNgdotColorModelArgs) ProtoMessage() {}
func (x *YCbCrNgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[125]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use YCbCrNgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*YCbCrNgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{125}
}
type YCbCrNgdotBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *YCbCrNgdotBoundsArgs) Reset() {
*x = YCbCrNgdotBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[126]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *YCbCrNgdotBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*YCbCrNgdotBoundsArgs) ProtoMessage() {}
func (x *YCbCrNgdotBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[126]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use YCbCrNgdotBoundsArgs.ProtoReflect.Descriptor instead.
func (*YCbCrNgdotBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{126}
}
type YCbCrNgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *YCbCrNgdotAtArgs) Reset() {
*x = YCbCrNgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[127]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *YCbCrNgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*YCbCrNgdotAtArgs) ProtoMessage() {}
func (x *YCbCrNgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[127]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use YCbCrNgdotAtArgs.ProtoReflect.Descriptor instead.
func (*YCbCrNgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{127}
}
func (x *YCbCrNgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *YCbCrNgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type YCbCrNgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *YCbCrNgdotRGBA64AtArgs) Reset() {
*x = YCbCrNgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[128]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *YCbCrNgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*YCbCrNgdotRGBA64AtArgs) ProtoMessage() {}
func (x *YCbCrNgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[128]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use YCbCrNgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*YCbCrNgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{128}
}
func (x *YCbCrNgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *YCbCrNgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type YCbCrNgdotYCbCrAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *YCbCrNgdotYCbCrAtArgs) Reset() {
*x = YCbCrNgdotYCbCrAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[129]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *YCbCrNgdotYCbCrAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*YCbCrNgdotYCbCrAtArgs) ProtoMessage() {}
func (x *YCbCrNgdotYCbCrAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[129]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use YCbCrNgdotYCbCrAtArgs.ProtoReflect.Descriptor instead.
func (*YCbCrNgdotYCbCrAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{129}
}
func (x *YCbCrNgdotYCbCrAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *YCbCrNgdotYCbCrAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type YCbCrNgdotYOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *YCbCrNgdotYOffsetArgs) Reset() {
*x = YCbCrNgdotYOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[130]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *YCbCrNgdotYOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*YCbCrNgdotYOffsetArgs) ProtoMessage() {}
func (x *YCbCrNgdotYOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[130]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use YCbCrNgdotYOffsetArgs.ProtoReflect.Descriptor instead.
func (*YCbCrNgdotYOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{130}
}
func (x *YCbCrNgdotYOffsetArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *YCbCrNgdotYOffsetArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type YCbCrNgdotCOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *YCbCrNgdotCOffsetArgs) Reset() {
*x = YCbCrNgdotCOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[131]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *YCbCrNgdotCOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*YCbCrNgdotCOffsetArgs) ProtoMessage() {}
func (x *YCbCrNgdotCOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[131]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use YCbCrNgdotCOffsetArgs.ProtoReflect.Descriptor instead.
func (*YCbCrNgdotCOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{131}
}
func (x *YCbCrNgdotCOffsetArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *YCbCrNgdotCOffsetArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type YCbCrNgdotSubImageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *YCbCrNgdotSubImageArgs) Reset() {
*x = YCbCrNgdotSubImageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[132]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *YCbCrNgdotSubImageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*YCbCrNgdotSubImageArgs) ProtoMessage() {}
func (x *YCbCrNgdotSubImageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[132]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use YCbCrNgdotSubImageArgs.ProtoReflect.Descriptor instead.
func (*YCbCrNgdotSubImageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{132}
}
type YCbCrNgdotOpaqueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *YCbCrNgdotOpaqueArgs) Reset() {
*x = YCbCrNgdotOpaqueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[133]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *YCbCrNgdotOpaqueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*YCbCrNgdotOpaqueArgs) ProtoMessage() {}
func (x *YCbCrNgdotOpaqueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[133]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use YCbCrNgdotOpaqueArgs.ProtoReflect.Descriptor instead.
func (*YCbCrNgdotOpaqueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{133}
}
type NewYCbCrArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
SubsampleRatio YCbCrSubsampleRatioEnum `protobuf:"varint,1,opt,name=subsampleRatio,proto3,enum=ngolofuzz.YCbCrSubsampleRatioEnum" json:"subsampleRatio,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewYCbCrArgs) Reset() {
*x = NewYCbCrArgs{}
mi := &file_ngolofuzz_proto_msgTypes[134]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewYCbCrArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewYCbCrArgs) ProtoMessage() {}
func (x *NewYCbCrArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[134]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewYCbCrArgs.ProtoReflect.Descriptor instead.
func (*NewYCbCrArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{134}
}
func (x *NewYCbCrArgs) GetSubsampleRatio() YCbCrSubsampleRatioEnum {
if x != nil {
return x.SubsampleRatio
}
return YCbCrSubsampleRatioEnum_YCbCrSubsampleRatio444
}
type NYCbCrANgdotColorModelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NYCbCrANgdotColorModelArgs) Reset() {
*x = NYCbCrANgdotColorModelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[135]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NYCbCrANgdotColorModelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NYCbCrANgdotColorModelArgs) ProtoMessage() {}
func (x *NYCbCrANgdotColorModelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[135]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NYCbCrANgdotColorModelArgs.ProtoReflect.Descriptor instead.
func (*NYCbCrANgdotColorModelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{135}
}
type NYCbCrANgdotAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NYCbCrANgdotAtArgs) Reset() {
*x = NYCbCrANgdotAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[136]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NYCbCrANgdotAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NYCbCrANgdotAtArgs) ProtoMessage() {}
func (x *NYCbCrANgdotAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[136]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NYCbCrANgdotAtArgs.ProtoReflect.Descriptor instead.
func (*NYCbCrANgdotAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{136}
}
func (x *NYCbCrANgdotAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *NYCbCrANgdotAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type NYCbCrANgdotRGBA64AtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NYCbCrANgdotRGBA64AtArgs) Reset() {
*x = NYCbCrANgdotRGBA64AtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[137]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NYCbCrANgdotRGBA64AtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NYCbCrANgdotRGBA64AtArgs) ProtoMessage() {}
func (x *NYCbCrANgdotRGBA64AtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[137]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NYCbCrANgdotRGBA64AtArgs.ProtoReflect.Descriptor instead.
func (*NYCbCrANgdotRGBA64AtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{137}
}
func (x *NYCbCrANgdotRGBA64AtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *NYCbCrANgdotRGBA64AtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type NYCbCrANgdotNYCbCrAAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NYCbCrANgdotNYCbCrAAtArgs) Reset() {
*x = NYCbCrANgdotNYCbCrAAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[138]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NYCbCrANgdotNYCbCrAAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NYCbCrANgdotNYCbCrAAtArgs) ProtoMessage() {}
func (x *NYCbCrANgdotNYCbCrAAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[138]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NYCbCrANgdotNYCbCrAAtArgs.ProtoReflect.Descriptor instead.
func (*NYCbCrANgdotNYCbCrAAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{138}
}
func (x *NYCbCrANgdotNYCbCrAAtArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *NYCbCrANgdotNYCbCrAAtArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type NYCbCrANgdotAOffsetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y int64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NYCbCrANgdotAOffsetArgs) Reset() {
*x = NYCbCrANgdotAOffsetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[139]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NYCbCrANgdotAOffsetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NYCbCrANgdotAOffsetArgs) ProtoMessage() {}
func (x *NYCbCrANgdotAOffsetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[139]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NYCbCrANgdotAOffsetArgs.ProtoReflect.Descriptor instead.
func (*NYCbCrANgdotAOffsetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{139}
}
func (x *NYCbCrANgdotAOffsetArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
func (x *NYCbCrANgdotAOffsetArgs) GetY() int64 {
if x != nil {
return x.Y
}
return 0
}
type NYCbCrANgdotSubImageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NYCbCrANgdotSubImageArgs) Reset() {
*x = NYCbCrANgdotSubImageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[140]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NYCbCrANgdotSubImageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NYCbCrANgdotSubImageArgs) ProtoMessage() {}
func (x *NYCbCrANgdotSubImageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[140]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NYCbCrANgdotSubImageArgs.ProtoReflect.Descriptor instead.
func (*NYCbCrANgdotSubImageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{140}
}
type NYCbCrANgdotOpaqueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NYCbCrANgdotOpaqueArgs) Reset() {
*x = NYCbCrANgdotOpaqueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[141]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NYCbCrANgdotOpaqueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NYCbCrANgdotOpaqueArgs) ProtoMessage() {}
func (x *NYCbCrANgdotOpaqueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[141]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NYCbCrANgdotOpaqueArgs.ProtoReflect.Descriptor instead.
func (*NYCbCrANgdotOpaqueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{141}
}
type NewNYCbCrAArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
SubsampleRatio YCbCrSubsampleRatioEnum `protobuf:"varint,1,opt,name=subsampleRatio,proto3,enum=ngolofuzz.YCbCrSubsampleRatioEnum" json:"subsampleRatio,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewNYCbCrAArgs) Reset() {
*x = NewNYCbCrAArgs{}
mi := &file_ngolofuzz_proto_msgTypes[142]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewNYCbCrAArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewNYCbCrAArgs) ProtoMessage() {}
func (x *NewNYCbCrAArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[142]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewNYCbCrAArgs.ProtoReflect.Descriptor instead.
func (*NewNYCbCrAArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{142}
}
func (x *NewNYCbCrAArgs) GetSubsampleRatio() YCbCrSubsampleRatioEnum {
if x != nil {
return x.SubsampleRatio
}
return YCbCrSubsampleRatioEnum_YCbCrSubsampleRatio444
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Decode
// *NgoloFuzzOne_DecodeConfig
// *NgoloFuzzOne_PointNgdotString
// *NgoloFuzzOne_PointNgdotAdd
// *NgoloFuzzOne_PointNgdotSub
// *NgoloFuzzOne_PointNgdotMul
// *NgoloFuzzOne_PointNgdotIn
// *NgoloFuzzOne_PointNgdotMod
// *NgoloFuzzOne_PointNgdotEq
// *NgoloFuzzOne_Pt
// *NgoloFuzzOne_RectangleNgdotString
// *NgoloFuzzOne_RectangleNgdotDx
// *NgoloFuzzOne_RectangleNgdotDy
// *NgoloFuzzOne_RectangleNgdotSize
// *NgoloFuzzOne_RectangleNgdotAdd
// *NgoloFuzzOne_RectangleNgdotSub
// *NgoloFuzzOne_RectangleNgdotInset
// *NgoloFuzzOne_RectangleNgdotIntersect
// *NgoloFuzzOne_RectangleNgdotUnion
// *NgoloFuzzOne_RectangleNgdotEmpty
// *NgoloFuzzOne_RectangleNgdotEq
// *NgoloFuzzOne_RectangleNgdotOverlaps
// *NgoloFuzzOne_RectangleNgdotIn
// *NgoloFuzzOne_RectangleNgdotCanon
// *NgoloFuzzOne_RectangleNgdotAt
// *NgoloFuzzOne_RectangleNgdotRGBA64At
// *NgoloFuzzOne_RectangleNgdotBounds
// *NgoloFuzzOne_RectangleNgdotColorModel
// *NgoloFuzzOne_Rect
// *NgoloFuzzOne_RGBANgdotColorModel
// *NgoloFuzzOne_RGBANgdotBounds
// *NgoloFuzzOne_RGBANgdotAt
// *NgoloFuzzOne_RGBANgdotRGBA64At
// *NgoloFuzzOne_RGBANgdotRGBAAt
// *NgoloFuzzOne_RGBANgdotPixOffset
// *NgoloFuzzOne_RGBANgdotSubImage
// *NgoloFuzzOne_RGBANgdotOpaque
// *NgoloFuzzOne_NewRGBA
// *NgoloFuzzOne_RGBA64NgdotColorModel
// *NgoloFuzzOne_RGBA64NgdotBounds
// *NgoloFuzzOne_RGBA64NgdotAt
// *NgoloFuzzOne_RGBA64NgdotRGBA64At
// *NgoloFuzzOne_RGBA64NgdotPixOffset
// *NgoloFuzzOne_RGBA64NgdotSubImage
// *NgoloFuzzOne_RGBA64NgdotOpaque
// *NgoloFuzzOne_NewRGBA64
// *NgoloFuzzOne_NRGBANgdotColorModel
// *NgoloFuzzOne_NRGBANgdotBounds
// *NgoloFuzzOne_NRGBANgdotAt
// *NgoloFuzzOne_NRGBANgdotRGBA64At
// *NgoloFuzzOne_NRGBANgdotNRGBAAt
// *NgoloFuzzOne_NRGBANgdotPixOffset
// *NgoloFuzzOne_NRGBANgdotSubImage
// *NgoloFuzzOne_NRGBANgdotOpaque
// *NgoloFuzzOne_NewNRGBA
// *NgoloFuzzOne_NRGBA64NgdotColorModel
// *NgoloFuzzOne_NRGBA64NgdotBounds
// *NgoloFuzzOne_NRGBA64NgdotAt
// *NgoloFuzzOne_NRGBA64NgdotRGBA64At
// *NgoloFuzzOne_NRGBA64NgdotNRGBA64At
// *NgoloFuzzOne_NRGBA64NgdotPixOffset
// *NgoloFuzzOne_NRGBA64NgdotSubImage
// *NgoloFuzzOne_NRGBA64NgdotOpaque
// *NgoloFuzzOne_NewNRGBA64
// *NgoloFuzzOne_AlphaNgdotColorModel
// *NgoloFuzzOne_AlphaNgdotBounds
// *NgoloFuzzOne_AlphaNgdotAt
// *NgoloFuzzOne_AlphaNgdotRGBA64At
// *NgoloFuzzOne_AlphaNgdotAlphaAt
// *NgoloFuzzOne_AlphaNgdotPixOffset
// *NgoloFuzzOne_AlphaNgdotSubImage
// *NgoloFuzzOne_AlphaNgdotOpaque
// *NgoloFuzzOne_NewAlpha
// *NgoloFuzzOne_Alpha16NgdotColorModel
// *NgoloFuzzOne_Alpha16NgdotBounds
// *NgoloFuzzOne_Alpha16NgdotAt
// *NgoloFuzzOne_Alpha16NgdotRGBA64At
// *NgoloFuzzOne_Alpha16NgdotAlpha16At
// *NgoloFuzzOne_Alpha16NgdotPixOffset
// *NgoloFuzzOne_Alpha16NgdotSubImage
// *NgoloFuzzOne_Alpha16NgdotOpaque
// *NgoloFuzzOne_NewAlpha16
// *NgoloFuzzOne_GrayNgdotColorModel
// *NgoloFuzzOne_GrayNgdotBounds
// *NgoloFuzzOne_GrayNgdotAt
// *NgoloFuzzOne_GrayNgdotRGBA64At
// *NgoloFuzzOne_GrayNgdotGrayAt
// *NgoloFuzzOne_GrayNgdotPixOffset
// *NgoloFuzzOne_GrayNgdotSubImage
// *NgoloFuzzOne_GrayNgdotOpaque
// *NgoloFuzzOne_NewGray
// *NgoloFuzzOne_Gray16NgdotColorModel
// *NgoloFuzzOne_Gray16NgdotBounds
// *NgoloFuzzOne_Gray16NgdotAt
// *NgoloFuzzOne_Gray16NgdotRGBA64At
// *NgoloFuzzOne_Gray16NgdotGray16At
// *NgoloFuzzOne_Gray16NgdotPixOffset
// *NgoloFuzzOne_Gray16NgdotSubImage
// *NgoloFuzzOne_Gray16NgdotOpaque
// *NgoloFuzzOne_NewGray16
// *NgoloFuzzOne_CMYKNgdotColorModel
// *NgoloFuzzOne_CMYKNgdotBounds
// *NgoloFuzzOne_CMYKNgdotAt
// *NgoloFuzzOne_CMYKNgdotRGBA64At
// *NgoloFuzzOne_CMYKNgdotCMYKAt
// *NgoloFuzzOne_CMYKNgdotPixOffset
// *NgoloFuzzOne_CMYKNgdotSubImage
// *NgoloFuzzOne_CMYKNgdotOpaque
// *NgoloFuzzOne_NewCMYK
// *NgoloFuzzOne_PalettedNgdotColorModel
// *NgoloFuzzOne_PalettedNgdotBounds
// *NgoloFuzzOne_PalettedNgdotAt
// *NgoloFuzzOne_PalettedNgdotRGBA64At
// *NgoloFuzzOne_PalettedNgdotPixOffset
// *NgoloFuzzOne_PalettedNgdotColorIndexAt
// *NgoloFuzzOne_PalettedNgdotSetColorIndex
// *NgoloFuzzOne_PalettedNgdotSubImage
// *NgoloFuzzOne_PalettedNgdotOpaque
// *NgoloFuzzOne_UniformNgdotRGBA
// *NgoloFuzzOne_UniformNgdotColorModel
// *NgoloFuzzOne_UniformNgdotBounds
// *NgoloFuzzOne_UniformNgdotAt
// *NgoloFuzzOne_UniformNgdotRGBA64At
// *NgoloFuzzOne_UniformNgdotOpaque
// *NgoloFuzzOne_YCbCrSubsampleRatioNgdotString
// *NgoloFuzzOne_YCbCrNgdotColorModel
// *NgoloFuzzOne_YCbCrNgdotBounds
// *NgoloFuzzOne_YCbCrNgdotAt
// *NgoloFuzzOne_YCbCrNgdotRGBA64At
// *NgoloFuzzOne_YCbCrNgdotYCbCrAt
// *NgoloFuzzOne_YCbCrNgdotYOffset
// *NgoloFuzzOne_YCbCrNgdotCOffset
// *NgoloFuzzOne_YCbCrNgdotSubImage
// *NgoloFuzzOne_YCbCrNgdotOpaque
// *NgoloFuzzOne_NewYCbCr
// *NgoloFuzzOne_NYCbCrANgdotColorModel
// *NgoloFuzzOne_NYCbCrANgdotAt
// *NgoloFuzzOne_NYCbCrANgdotRGBA64At
// *NgoloFuzzOne_NYCbCrANgdotNYCbCrAAt
// *NgoloFuzzOne_NYCbCrANgdotAOffset
// *NgoloFuzzOne_NYCbCrANgdotSubImage
// *NgoloFuzzOne_NYCbCrANgdotOpaque
// *NgoloFuzzOne_NewNYCbCrA
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[143]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[143]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{143}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetDecode() *DecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Decode); ok {
return x.Decode
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecodeConfig() *DecodeConfigArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecodeConfig); ok {
return x.DecodeConfig
}
}
return nil
}
func (x *NgoloFuzzOne) GetPointNgdotString() *PointNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PointNgdotString); ok {
return x.PointNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetPointNgdotAdd() *PointNgdotAddArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PointNgdotAdd); ok {
return x.PointNgdotAdd
}
}
return nil
}
func (x *NgoloFuzzOne) GetPointNgdotSub() *PointNgdotSubArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PointNgdotSub); ok {
return x.PointNgdotSub
}
}
return nil
}
func (x *NgoloFuzzOne) GetPointNgdotMul() *PointNgdotMulArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PointNgdotMul); ok {
return x.PointNgdotMul
}
}
return nil
}
func (x *NgoloFuzzOne) GetPointNgdotIn() *PointNgdotInArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PointNgdotIn); ok {
return x.PointNgdotIn
}
}
return nil
}
func (x *NgoloFuzzOne) GetPointNgdotMod() *PointNgdotModArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PointNgdotMod); ok {
return x.PointNgdotMod
}
}
return nil
}
func (x *NgoloFuzzOne) GetPointNgdotEq() *PointNgdotEqArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PointNgdotEq); ok {
return x.PointNgdotEq
}
}
return nil
}
func (x *NgoloFuzzOne) GetPt() *PtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Pt); ok {
return x.Pt
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotString() *RectangleNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotString); ok {
return x.RectangleNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotDx() *RectangleNgdotDxArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotDx); ok {
return x.RectangleNgdotDx
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotDy() *RectangleNgdotDyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotDy); ok {
return x.RectangleNgdotDy
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotSize() *RectangleNgdotSizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotSize); ok {
return x.RectangleNgdotSize
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotAdd() *RectangleNgdotAddArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotAdd); ok {
return x.RectangleNgdotAdd
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotSub() *RectangleNgdotSubArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotSub); ok {
return x.RectangleNgdotSub
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotInset() *RectangleNgdotInsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotInset); ok {
return x.RectangleNgdotInset
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotIntersect() *RectangleNgdotIntersectArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotIntersect); ok {
return x.RectangleNgdotIntersect
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotUnion() *RectangleNgdotUnionArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotUnion); ok {
return x.RectangleNgdotUnion
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotEmpty() *RectangleNgdotEmptyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotEmpty); ok {
return x.RectangleNgdotEmpty
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotEq() *RectangleNgdotEqArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotEq); ok {
return x.RectangleNgdotEq
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotOverlaps() *RectangleNgdotOverlapsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotOverlaps); ok {
return x.RectangleNgdotOverlaps
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotIn() *RectangleNgdotInArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotIn); ok {
return x.RectangleNgdotIn
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotCanon() *RectangleNgdotCanonArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotCanon); ok {
return x.RectangleNgdotCanon
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotAt() *RectangleNgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotAt); ok {
return x.RectangleNgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotRGBA64At() *RectangleNgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotRGBA64At); ok {
return x.RectangleNgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotBounds() *RectangleNgdotBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotBounds); ok {
return x.RectangleNgdotBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetRectangleNgdotColorModel() *RectangleNgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RectangleNgdotColorModel); ok {
return x.RectangleNgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetRect() *RectArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Rect); ok {
return x.Rect
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBANgdotColorModel() *RGBANgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBANgdotColorModel); ok {
return x.RGBANgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBANgdotBounds() *RGBANgdotBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBANgdotBounds); ok {
return x.RGBANgdotBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBANgdotAt() *RGBANgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBANgdotAt); ok {
return x.RGBANgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBANgdotRGBA64At() *RGBANgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBANgdotRGBA64At); ok {
return x.RGBANgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBANgdotRGBAAt() *RGBANgdotRGBAAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBANgdotRGBAAt); ok {
return x.RGBANgdotRGBAAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBANgdotPixOffset() *RGBANgdotPixOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBANgdotPixOffset); ok {
return x.RGBANgdotPixOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBANgdotSubImage() *RGBANgdotSubImageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBANgdotSubImage); ok {
return x.RGBANgdotSubImage
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBANgdotOpaque() *RGBANgdotOpaqueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBANgdotOpaque); ok {
return x.RGBANgdotOpaque
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewRGBA() *NewRGBAArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewRGBA); ok {
return x.NewRGBA
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBA64NgdotColorModel() *RGBA64NgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBA64NgdotColorModel); ok {
return x.RGBA64NgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBA64NgdotBounds() *RGBA64NgdotBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBA64NgdotBounds); ok {
return x.RGBA64NgdotBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBA64NgdotAt() *RGBA64NgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBA64NgdotAt); ok {
return x.RGBA64NgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBA64NgdotRGBA64At() *RGBA64NgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBA64NgdotRGBA64At); ok {
return x.RGBA64NgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBA64NgdotPixOffset() *RGBA64NgdotPixOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBA64NgdotPixOffset); ok {
return x.RGBA64NgdotPixOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBA64NgdotSubImage() *RGBA64NgdotSubImageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBA64NgdotSubImage); ok {
return x.RGBA64NgdotSubImage
}
}
return nil
}
func (x *NgoloFuzzOne) GetRGBA64NgdotOpaque() *RGBA64NgdotOpaqueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RGBA64NgdotOpaque); ok {
return x.RGBA64NgdotOpaque
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewRGBA64() *NewRGBA64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewRGBA64); ok {
return x.NewRGBA64
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBANgdotColorModel() *NRGBANgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBANgdotColorModel); ok {
return x.NRGBANgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBANgdotBounds() *NRGBANgdotBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBANgdotBounds); ok {
return x.NRGBANgdotBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBANgdotAt() *NRGBANgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBANgdotAt); ok {
return x.NRGBANgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBANgdotRGBA64At() *NRGBANgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBANgdotRGBA64At); ok {
return x.NRGBANgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBANgdotNRGBAAt() *NRGBANgdotNRGBAAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBANgdotNRGBAAt); ok {
return x.NRGBANgdotNRGBAAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBANgdotPixOffset() *NRGBANgdotPixOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBANgdotPixOffset); ok {
return x.NRGBANgdotPixOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBANgdotSubImage() *NRGBANgdotSubImageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBANgdotSubImage); ok {
return x.NRGBANgdotSubImage
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBANgdotOpaque() *NRGBANgdotOpaqueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBANgdotOpaque); ok {
return x.NRGBANgdotOpaque
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewNRGBA() *NewNRGBAArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewNRGBA); ok {
return x.NewNRGBA
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBA64NgdotColorModel() *NRGBA64NgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBA64NgdotColorModel); ok {
return x.NRGBA64NgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBA64NgdotBounds() *NRGBA64NgdotBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBA64NgdotBounds); ok {
return x.NRGBA64NgdotBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBA64NgdotAt() *NRGBA64NgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBA64NgdotAt); ok {
return x.NRGBA64NgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBA64NgdotRGBA64At() *NRGBA64NgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBA64NgdotRGBA64At); ok {
return x.NRGBA64NgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBA64NgdotNRGBA64At() *NRGBA64NgdotNRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBA64NgdotNRGBA64At); ok {
return x.NRGBA64NgdotNRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBA64NgdotPixOffset() *NRGBA64NgdotPixOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBA64NgdotPixOffset); ok {
return x.NRGBA64NgdotPixOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBA64NgdotSubImage() *NRGBA64NgdotSubImageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBA64NgdotSubImage); ok {
return x.NRGBA64NgdotSubImage
}
}
return nil
}
func (x *NgoloFuzzOne) GetNRGBA64NgdotOpaque() *NRGBA64NgdotOpaqueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NRGBA64NgdotOpaque); ok {
return x.NRGBA64NgdotOpaque
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewNRGBA64() *NewNRGBA64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewNRGBA64); ok {
return x.NewNRGBA64
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlphaNgdotColorModel() *AlphaNgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AlphaNgdotColorModel); ok {
return x.AlphaNgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlphaNgdotBounds() *AlphaNgdotBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AlphaNgdotBounds); ok {
return x.AlphaNgdotBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlphaNgdotAt() *AlphaNgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AlphaNgdotAt); ok {
return x.AlphaNgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlphaNgdotRGBA64At() *AlphaNgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AlphaNgdotRGBA64At); ok {
return x.AlphaNgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlphaNgdotAlphaAt() *AlphaNgdotAlphaAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AlphaNgdotAlphaAt); ok {
return x.AlphaNgdotAlphaAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlphaNgdotPixOffset() *AlphaNgdotPixOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AlphaNgdotPixOffset); ok {
return x.AlphaNgdotPixOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlphaNgdotSubImage() *AlphaNgdotSubImageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AlphaNgdotSubImage); ok {
return x.AlphaNgdotSubImage
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlphaNgdotOpaque() *AlphaNgdotOpaqueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AlphaNgdotOpaque); ok {
return x.AlphaNgdotOpaque
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewAlpha() *NewAlphaArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewAlpha); ok {
return x.NewAlpha
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlpha16NgdotColorModel() *Alpha16NgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Alpha16NgdotColorModel); ok {
return x.Alpha16NgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlpha16NgdotBounds() *Alpha16NgdotBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Alpha16NgdotBounds); ok {
return x.Alpha16NgdotBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlpha16NgdotAt() *Alpha16NgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Alpha16NgdotAt); ok {
return x.Alpha16NgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlpha16NgdotRGBA64At() *Alpha16NgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Alpha16NgdotRGBA64At); ok {
return x.Alpha16NgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlpha16NgdotAlpha16At() *Alpha16NgdotAlpha16AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Alpha16NgdotAlpha16At); ok {
return x.Alpha16NgdotAlpha16At
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlpha16NgdotPixOffset() *Alpha16NgdotPixOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Alpha16NgdotPixOffset); ok {
return x.Alpha16NgdotPixOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlpha16NgdotSubImage() *Alpha16NgdotSubImageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Alpha16NgdotSubImage); ok {
return x.Alpha16NgdotSubImage
}
}
return nil
}
func (x *NgoloFuzzOne) GetAlpha16NgdotOpaque() *Alpha16NgdotOpaqueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Alpha16NgdotOpaque); ok {
return x.Alpha16NgdotOpaque
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewAlpha16() *NewAlpha16Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewAlpha16); ok {
return x.NewAlpha16
}
}
return nil
}
func (x *NgoloFuzzOne) GetGrayNgdotColorModel() *GrayNgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GrayNgdotColorModel); ok {
return x.GrayNgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetGrayNgdotBounds() *GrayNgdotBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GrayNgdotBounds); ok {
return x.GrayNgdotBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetGrayNgdotAt() *GrayNgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GrayNgdotAt); ok {
return x.GrayNgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetGrayNgdotRGBA64At() *GrayNgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GrayNgdotRGBA64At); ok {
return x.GrayNgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetGrayNgdotGrayAt() *GrayNgdotGrayAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GrayNgdotGrayAt); ok {
return x.GrayNgdotGrayAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetGrayNgdotPixOffset() *GrayNgdotPixOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GrayNgdotPixOffset); ok {
return x.GrayNgdotPixOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetGrayNgdotSubImage() *GrayNgdotSubImageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GrayNgdotSubImage); ok {
return x.GrayNgdotSubImage
}
}
return nil
}
func (x *NgoloFuzzOne) GetGrayNgdotOpaque() *GrayNgdotOpaqueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GrayNgdotOpaque); ok {
return x.GrayNgdotOpaque
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewGray() *NewGrayArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewGray); ok {
return x.NewGray
}
}
return nil
}
func (x *NgoloFuzzOne) GetGray16NgdotColorModel() *Gray16NgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Gray16NgdotColorModel); ok {
return x.Gray16NgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetGray16NgdotBounds() *Gray16NgdotBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Gray16NgdotBounds); ok {
return x.Gray16NgdotBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetGray16NgdotAt() *Gray16NgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Gray16NgdotAt); ok {
return x.Gray16NgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetGray16NgdotRGBA64At() *Gray16NgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Gray16NgdotRGBA64At); ok {
return x.Gray16NgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetGray16NgdotGray16At() *Gray16NgdotGray16AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Gray16NgdotGray16At); ok {
return x.Gray16NgdotGray16At
}
}
return nil
}
func (x *NgoloFuzzOne) GetGray16NgdotPixOffset() *Gray16NgdotPixOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Gray16NgdotPixOffset); ok {
return x.Gray16NgdotPixOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetGray16NgdotSubImage() *Gray16NgdotSubImageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Gray16NgdotSubImage); ok {
return x.Gray16NgdotSubImage
}
}
return nil
}
func (x *NgoloFuzzOne) GetGray16NgdotOpaque() *Gray16NgdotOpaqueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Gray16NgdotOpaque); ok {
return x.Gray16NgdotOpaque
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewGray16() *NewGray16Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewGray16); ok {
return x.NewGray16
}
}
return nil
}
func (x *NgoloFuzzOne) GetCMYKNgdotColorModel() *CMYKNgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CMYKNgdotColorModel); ok {
return x.CMYKNgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetCMYKNgdotBounds() *CMYKNgdotBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CMYKNgdotBounds); ok {
return x.CMYKNgdotBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetCMYKNgdotAt() *CMYKNgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CMYKNgdotAt); ok {
return x.CMYKNgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetCMYKNgdotRGBA64At() *CMYKNgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CMYKNgdotRGBA64At); ok {
return x.CMYKNgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetCMYKNgdotCMYKAt() *CMYKNgdotCMYKAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CMYKNgdotCMYKAt); ok {
return x.CMYKNgdotCMYKAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetCMYKNgdotPixOffset() *CMYKNgdotPixOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CMYKNgdotPixOffset); ok {
return x.CMYKNgdotPixOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetCMYKNgdotSubImage() *CMYKNgdotSubImageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CMYKNgdotSubImage); ok {
return x.CMYKNgdotSubImage
}
}
return nil
}
func (x *NgoloFuzzOne) GetCMYKNgdotOpaque() *CMYKNgdotOpaqueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CMYKNgdotOpaque); ok {
return x.CMYKNgdotOpaque
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewCMYK() *NewCMYKArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewCMYK); ok {
return x.NewCMYK
}
}
return nil
}
func (x *NgoloFuzzOne) GetPalettedNgdotColorModel() *PalettedNgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PalettedNgdotColorModel); ok {
return x.PalettedNgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetPalettedNgdotBounds() *PalettedNgdotBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PalettedNgdotBounds); ok {
return x.PalettedNgdotBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetPalettedNgdotAt() *PalettedNgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PalettedNgdotAt); ok {
return x.PalettedNgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetPalettedNgdotRGBA64At() *PalettedNgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PalettedNgdotRGBA64At); ok {
return x.PalettedNgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetPalettedNgdotPixOffset() *PalettedNgdotPixOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PalettedNgdotPixOffset); ok {
return x.PalettedNgdotPixOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetPalettedNgdotColorIndexAt() *PalettedNgdotColorIndexAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PalettedNgdotColorIndexAt); ok {
return x.PalettedNgdotColorIndexAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetPalettedNgdotSetColorIndex() *PalettedNgdotSetColorIndexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PalettedNgdotSetColorIndex); ok {
return x.PalettedNgdotSetColorIndex
}
}
return nil
}
func (x *NgoloFuzzOne) GetPalettedNgdotSubImage() *PalettedNgdotSubImageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PalettedNgdotSubImage); ok {
return x.PalettedNgdotSubImage
}
}
return nil
}
func (x *NgoloFuzzOne) GetPalettedNgdotOpaque() *PalettedNgdotOpaqueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PalettedNgdotOpaque); ok {
return x.PalettedNgdotOpaque
}
}
return nil
}
func (x *NgoloFuzzOne) GetUniformNgdotRGBA() *UniformNgdotRGBAArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UniformNgdotRGBA); ok {
return x.UniformNgdotRGBA
}
}
return nil
}
func (x *NgoloFuzzOne) GetUniformNgdotColorModel() *UniformNgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UniformNgdotColorModel); ok {
return x.UniformNgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetUniformNgdotBounds() *UniformNgdotBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UniformNgdotBounds); ok {
return x.UniformNgdotBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetUniformNgdotAt() *UniformNgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UniformNgdotAt); ok {
return x.UniformNgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetUniformNgdotRGBA64At() *UniformNgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UniformNgdotRGBA64At); ok {
return x.UniformNgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetUniformNgdotOpaque() *UniformNgdotOpaqueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UniformNgdotOpaque); ok {
return x.UniformNgdotOpaque
}
}
return nil
}
func (x *NgoloFuzzOne) GetYCbCrSubsampleRatioNgdotString() *YCbCrSubsampleRatioNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_YCbCrSubsampleRatioNgdotString); ok {
return x.YCbCrSubsampleRatioNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetYCbCrNgdotColorModel() *YCbCrNgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_YCbCrNgdotColorModel); ok {
return x.YCbCrNgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetYCbCrNgdotBounds() *YCbCrNgdotBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_YCbCrNgdotBounds); ok {
return x.YCbCrNgdotBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetYCbCrNgdotAt() *YCbCrNgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_YCbCrNgdotAt); ok {
return x.YCbCrNgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetYCbCrNgdotRGBA64At() *YCbCrNgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_YCbCrNgdotRGBA64At); ok {
return x.YCbCrNgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetYCbCrNgdotYCbCrAt() *YCbCrNgdotYCbCrAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_YCbCrNgdotYCbCrAt); ok {
return x.YCbCrNgdotYCbCrAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetYCbCrNgdotYOffset() *YCbCrNgdotYOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_YCbCrNgdotYOffset); ok {
return x.YCbCrNgdotYOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetYCbCrNgdotCOffset() *YCbCrNgdotCOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_YCbCrNgdotCOffset); ok {
return x.YCbCrNgdotCOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetYCbCrNgdotSubImage() *YCbCrNgdotSubImageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_YCbCrNgdotSubImage); ok {
return x.YCbCrNgdotSubImage
}
}
return nil
}
func (x *NgoloFuzzOne) GetYCbCrNgdotOpaque() *YCbCrNgdotOpaqueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_YCbCrNgdotOpaque); ok {
return x.YCbCrNgdotOpaque
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewYCbCr() *NewYCbCrArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewYCbCr); ok {
return x.NewYCbCr
}
}
return nil
}
func (x *NgoloFuzzOne) GetNYCbCrANgdotColorModel() *NYCbCrANgdotColorModelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NYCbCrANgdotColorModel); ok {
return x.NYCbCrANgdotColorModel
}
}
return nil
}
func (x *NgoloFuzzOne) GetNYCbCrANgdotAt() *NYCbCrANgdotAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NYCbCrANgdotAt); ok {
return x.NYCbCrANgdotAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetNYCbCrANgdotRGBA64At() *NYCbCrANgdotRGBA64AtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NYCbCrANgdotRGBA64At); ok {
return x.NYCbCrANgdotRGBA64At
}
}
return nil
}
func (x *NgoloFuzzOne) GetNYCbCrANgdotNYCbCrAAt() *NYCbCrANgdotNYCbCrAAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NYCbCrANgdotNYCbCrAAt); ok {
return x.NYCbCrANgdotNYCbCrAAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetNYCbCrANgdotAOffset() *NYCbCrANgdotAOffsetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NYCbCrANgdotAOffset); ok {
return x.NYCbCrANgdotAOffset
}
}
return nil
}
func (x *NgoloFuzzOne) GetNYCbCrANgdotSubImage() *NYCbCrANgdotSubImageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NYCbCrANgdotSubImage); ok {
return x.NYCbCrANgdotSubImage
}
}
return nil
}
func (x *NgoloFuzzOne) GetNYCbCrANgdotOpaque() *NYCbCrANgdotOpaqueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NYCbCrANgdotOpaque); ok {
return x.NYCbCrANgdotOpaque
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewNYCbCrA() *NewNYCbCrAArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewNYCbCrA); ok {
return x.NewNYCbCrA
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Decode struct {
Decode *DecodeArgs `protobuf:"bytes,1,opt,name=Decode,proto3,oneof"`
}
type NgoloFuzzOne_DecodeConfig struct {
DecodeConfig *DecodeConfigArgs `protobuf:"bytes,2,opt,name=DecodeConfig,proto3,oneof"`
}
type NgoloFuzzOne_PointNgdotString struct {
PointNgdotString *PointNgdotStringArgs `protobuf:"bytes,3,opt,name=PointNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_PointNgdotAdd struct {
PointNgdotAdd *PointNgdotAddArgs `protobuf:"bytes,4,opt,name=PointNgdotAdd,proto3,oneof"`
}
type NgoloFuzzOne_PointNgdotSub struct {
PointNgdotSub *PointNgdotSubArgs `protobuf:"bytes,5,opt,name=PointNgdotSub,proto3,oneof"`
}
type NgoloFuzzOne_PointNgdotMul struct {
PointNgdotMul *PointNgdotMulArgs `protobuf:"bytes,6,opt,name=PointNgdotMul,proto3,oneof"`
}
type NgoloFuzzOne_PointNgdotIn struct {
PointNgdotIn *PointNgdotInArgs `protobuf:"bytes,7,opt,name=PointNgdotIn,proto3,oneof"`
}
type NgoloFuzzOne_PointNgdotMod struct {
PointNgdotMod *PointNgdotModArgs `protobuf:"bytes,8,opt,name=PointNgdotMod,proto3,oneof"`
}
type NgoloFuzzOne_PointNgdotEq struct {
PointNgdotEq *PointNgdotEqArgs `protobuf:"bytes,9,opt,name=PointNgdotEq,proto3,oneof"`
}
type NgoloFuzzOne_Pt struct {
Pt *PtArgs `protobuf:"bytes,10,opt,name=Pt,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotString struct {
RectangleNgdotString *RectangleNgdotStringArgs `protobuf:"bytes,11,opt,name=RectangleNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotDx struct {
RectangleNgdotDx *RectangleNgdotDxArgs `protobuf:"bytes,12,opt,name=RectangleNgdotDx,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotDy struct {
RectangleNgdotDy *RectangleNgdotDyArgs `protobuf:"bytes,13,opt,name=RectangleNgdotDy,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotSize struct {
RectangleNgdotSize *RectangleNgdotSizeArgs `protobuf:"bytes,14,opt,name=RectangleNgdotSize,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotAdd struct {
RectangleNgdotAdd *RectangleNgdotAddArgs `protobuf:"bytes,15,opt,name=RectangleNgdotAdd,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotSub struct {
RectangleNgdotSub *RectangleNgdotSubArgs `protobuf:"bytes,16,opt,name=RectangleNgdotSub,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotInset struct {
RectangleNgdotInset *RectangleNgdotInsetArgs `protobuf:"bytes,17,opt,name=RectangleNgdotInset,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotIntersect struct {
RectangleNgdotIntersect *RectangleNgdotIntersectArgs `protobuf:"bytes,18,opt,name=RectangleNgdotIntersect,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotUnion struct {
RectangleNgdotUnion *RectangleNgdotUnionArgs `protobuf:"bytes,19,opt,name=RectangleNgdotUnion,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotEmpty struct {
RectangleNgdotEmpty *RectangleNgdotEmptyArgs `protobuf:"bytes,20,opt,name=RectangleNgdotEmpty,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotEq struct {
RectangleNgdotEq *RectangleNgdotEqArgs `protobuf:"bytes,21,opt,name=RectangleNgdotEq,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotOverlaps struct {
RectangleNgdotOverlaps *RectangleNgdotOverlapsArgs `protobuf:"bytes,22,opt,name=RectangleNgdotOverlaps,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotIn struct {
RectangleNgdotIn *RectangleNgdotInArgs `protobuf:"bytes,23,opt,name=RectangleNgdotIn,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotCanon struct {
RectangleNgdotCanon *RectangleNgdotCanonArgs `protobuf:"bytes,24,opt,name=RectangleNgdotCanon,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotAt struct {
RectangleNgdotAt *RectangleNgdotAtArgs `protobuf:"bytes,25,opt,name=RectangleNgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotRGBA64At struct {
RectangleNgdotRGBA64At *RectangleNgdotRGBA64AtArgs `protobuf:"bytes,26,opt,name=RectangleNgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotBounds struct {
RectangleNgdotBounds *RectangleNgdotBoundsArgs `protobuf:"bytes,27,opt,name=RectangleNgdotBounds,proto3,oneof"`
}
type NgoloFuzzOne_RectangleNgdotColorModel struct {
RectangleNgdotColorModel *RectangleNgdotColorModelArgs `protobuf:"bytes,28,opt,name=RectangleNgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_Rect struct {
Rect *RectArgs `protobuf:"bytes,29,opt,name=Rect,proto3,oneof"`
}
type NgoloFuzzOne_RGBANgdotColorModel struct {
RGBANgdotColorModel *RGBANgdotColorModelArgs `protobuf:"bytes,30,opt,name=RGBANgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_RGBANgdotBounds struct {
RGBANgdotBounds *RGBANgdotBoundsArgs `protobuf:"bytes,31,opt,name=RGBANgdotBounds,proto3,oneof"`
}
type NgoloFuzzOne_RGBANgdotAt struct {
RGBANgdotAt *RGBANgdotAtArgs `protobuf:"bytes,32,opt,name=RGBANgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_RGBANgdotRGBA64At struct {
RGBANgdotRGBA64At *RGBANgdotRGBA64AtArgs `protobuf:"bytes,33,opt,name=RGBANgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_RGBANgdotRGBAAt struct {
RGBANgdotRGBAAt *RGBANgdotRGBAAtArgs `protobuf:"bytes,34,opt,name=RGBANgdotRGBAAt,proto3,oneof"`
}
type NgoloFuzzOne_RGBANgdotPixOffset struct {
RGBANgdotPixOffset *RGBANgdotPixOffsetArgs `protobuf:"bytes,35,opt,name=RGBANgdotPixOffset,proto3,oneof"`
}
type NgoloFuzzOne_RGBANgdotSubImage struct {
RGBANgdotSubImage *RGBANgdotSubImageArgs `protobuf:"bytes,36,opt,name=RGBANgdotSubImage,proto3,oneof"`
}
type NgoloFuzzOne_RGBANgdotOpaque struct {
RGBANgdotOpaque *RGBANgdotOpaqueArgs `protobuf:"bytes,37,opt,name=RGBANgdotOpaque,proto3,oneof"`
}
type NgoloFuzzOne_NewRGBA struct {
NewRGBA *NewRGBAArgs `protobuf:"bytes,38,opt,name=NewRGBA,proto3,oneof"`
}
type NgoloFuzzOne_RGBA64NgdotColorModel struct {
RGBA64NgdotColorModel *RGBA64NgdotColorModelArgs `protobuf:"bytes,39,opt,name=RGBA64NgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_RGBA64NgdotBounds struct {
RGBA64NgdotBounds *RGBA64NgdotBoundsArgs `protobuf:"bytes,40,opt,name=RGBA64NgdotBounds,proto3,oneof"`
}
type NgoloFuzzOne_RGBA64NgdotAt struct {
RGBA64NgdotAt *RGBA64NgdotAtArgs `protobuf:"bytes,41,opt,name=RGBA64NgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_RGBA64NgdotRGBA64At struct {
RGBA64NgdotRGBA64At *RGBA64NgdotRGBA64AtArgs `protobuf:"bytes,42,opt,name=RGBA64NgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_RGBA64NgdotPixOffset struct {
RGBA64NgdotPixOffset *RGBA64NgdotPixOffsetArgs `protobuf:"bytes,43,opt,name=RGBA64NgdotPixOffset,proto3,oneof"`
}
type NgoloFuzzOne_RGBA64NgdotSubImage struct {
RGBA64NgdotSubImage *RGBA64NgdotSubImageArgs `protobuf:"bytes,44,opt,name=RGBA64NgdotSubImage,proto3,oneof"`
}
type NgoloFuzzOne_RGBA64NgdotOpaque struct {
RGBA64NgdotOpaque *RGBA64NgdotOpaqueArgs `protobuf:"bytes,45,opt,name=RGBA64NgdotOpaque,proto3,oneof"`
}
type NgoloFuzzOne_NewRGBA64 struct {
NewRGBA64 *NewRGBA64Args `protobuf:"bytes,46,opt,name=NewRGBA64,proto3,oneof"`
}
type NgoloFuzzOne_NRGBANgdotColorModel struct {
NRGBANgdotColorModel *NRGBANgdotColorModelArgs `protobuf:"bytes,47,opt,name=NRGBANgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_NRGBANgdotBounds struct {
NRGBANgdotBounds *NRGBANgdotBoundsArgs `protobuf:"bytes,48,opt,name=NRGBANgdotBounds,proto3,oneof"`
}
type NgoloFuzzOne_NRGBANgdotAt struct {
NRGBANgdotAt *NRGBANgdotAtArgs `protobuf:"bytes,49,opt,name=NRGBANgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_NRGBANgdotRGBA64At struct {
NRGBANgdotRGBA64At *NRGBANgdotRGBA64AtArgs `protobuf:"bytes,50,opt,name=NRGBANgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_NRGBANgdotNRGBAAt struct {
NRGBANgdotNRGBAAt *NRGBANgdotNRGBAAtArgs `protobuf:"bytes,51,opt,name=NRGBANgdotNRGBAAt,proto3,oneof"`
}
type NgoloFuzzOne_NRGBANgdotPixOffset struct {
NRGBANgdotPixOffset *NRGBANgdotPixOffsetArgs `protobuf:"bytes,52,opt,name=NRGBANgdotPixOffset,proto3,oneof"`
}
type NgoloFuzzOne_NRGBANgdotSubImage struct {
NRGBANgdotSubImage *NRGBANgdotSubImageArgs `protobuf:"bytes,53,opt,name=NRGBANgdotSubImage,proto3,oneof"`
}
type NgoloFuzzOne_NRGBANgdotOpaque struct {
NRGBANgdotOpaque *NRGBANgdotOpaqueArgs `protobuf:"bytes,54,opt,name=NRGBANgdotOpaque,proto3,oneof"`
}
type NgoloFuzzOne_NewNRGBA struct {
NewNRGBA *NewNRGBAArgs `protobuf:"bytes,55,opt,name=NewNRGBA,proto3,oneof"`
}
type NgoloFuzzOne_NRGBA64NgdotColorModel struct {
NRGBA64NgdotColorModel *NRGBA64NgdotColorModelArgs `protobuf:"bytes,56,opt,name=NRGBA64NgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_NRGBA64NgdotBounds struct {
NRGBA64NgdotBounds *NRGBA64NgdotBoundsArgs `protobuf:"bytes,57,opt,name=NRGBA64NgdotBounds,proto3,oneof"`
}
type NgoloFuzzOne_NRGBA64NgdotAt struct {
NRGBA64NgdotAt *NRGBA64NgdotAtArgs `protobuf:"bytes,58,opt,name=NRGBA64NgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_NRGBA64NgdotRGBA64At struct {
NRGBA64NgdotRGBA64At *NRGBA64NgdotRGBA64AtArgs `protobuf:"bytes,59,opt,name=NRGBA64NgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_NRGBA64NgdotNRGBA64At struct {
NRGBA64NgdotNRGBA64At *NRGBA64NgdotNRGBA64AtArgs `protobuf:"bytes,60,opt,name=NRGBA64NgdotNRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_NRGBA64NgdotPixOffset struct {
NRGBA64NgdotPixOffset *NRGBA64NgdotPixOffsetArgs `protobuf:"bytes,61,opt,name=NRGBA64NgdotPixOffset,proto3,oneof"`
}
type NgoloFuzzOne_NRGBA64NgdotSubImage struct {
NRGBA64NgdotSubImage *NRGBA64NgdotSubImageArgs `protobuf:"bytes,62,opt,name=NRGBA64NgdotSubImage,proto3,oneof"`
}
type NgoloFuzzOne_NRGBA64NgdotOpaque struct {
NRGBA64NgdotOpaque *NRGBA64NgdotOpaqueArgs `protobuf:"bytes,63,opt,name=NRGBA64NgdotOpaque,proto3,oneof"`
}
type NgoloFuzzOne_NewNRGBA64 struct {
NewNRGBA64 *NewNRGBA64Args `protobuf:"bytes,64,opt,name=NewNRGBA64,proto3,oneof"`
}
type NgoloFuzzOne_AlphaNgdotColorModel struct {
AlphaNgdotColorModel *AlphaNgdotColorModelArgs `protobuf:"bytes,65,opt,name=AlphaNgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_AlphaNgdotBounds struct {
AlphaNgdotBounds *AlphaNgdotBoundsArgs `protobuf:"bytes,66,opt,name=AlphaNgdotBounds,proto3,oneof"`
}
type NgoloFuzzOne_AlphaNgdotAt struct {
AlphaNgdotAt *AlphaNgdotAtArgs `protobuf:"bytes,67,opt,name=AlphaNgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_AlphaNgdotRGBA64At struct {
AlphaNgdotRGBA64At *AlphaNgdotRGBA64AtArgs `protobuf:"bytes,68,opt,name=AlphaNgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_AlphaNgdotAlphaAt struct {
AlphaNgdotAlphaAt *AlphaNgdotAlphaAtArgs `protobuf:"bytes,69,opt,name=AlphaNgdotAlphaAt,proto3,oneof"`
}
type NgoloFuzzOne_AlphaNgdotPixOffset struct {
AlphaNgdotPixOffset *AlphaNgdotPixOffsetArgs `protobuf:"bytes,70,opt,name=AlphaNgdotPixOffset,proto3,oneof"`
}
type NgoloFuzzOne_AlphaNgdotSubImage struct {
AlphaNgdotSubImage *AlphaNgdotSubImageArgs `protobuf:"bytes,71,opt,name=AlphaNgdotSubImage,proto3,oneof"`
}
type NgoloFuzzOne_AlphaNgdotOpaque struct {
AlphaNgdotOpaque *AlphaNgdotOpaqueArgs `protobuf:"bytes,72,opt,name=AlphaNgdotOpaque,proto3,oneof"`
}
type NgoloFuzzOne_NewAlpha struct {
NewAlpha *NewAlphaArgs `protobuf:"bytes,73,opt,name=NewAlpha,proto3,oneof"`
}
type NgoloFuzzOne_Alpha16NgdotColorModel struct {
Alpha16NgdotColorModel *Alpha16NgdotColorModelArgs `protobuf:"bytes,74,opt,name=Alpha16NgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_Alpha16NgdotBounds struct {
Alpha16NgdotBounds *Alpha16NgdotBoundsArgs `protobuf:"bytes,75,opt,name=Alpha16NgdotBounds,proto3,oneof"`
}
type NgoloFuzzOne_Alpha16NgdotAt struct {
Alpha16NgdotAt *Alpha16NgdotAtArgs `protobuf:"bytes,76,opt,name=Alpha16NgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_Alpha16NgdotRGBA64At struct {
Alpha16NgdotRGBA64At *Alpha16NgdotRGBA64AtArgs `protobuf:"bytes,77,opt,name=Alpha16NgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_Alpha16NgdotAlpha16At struct {
Alpha16NgdotAlpha16At *Alpha16NgdotAlpha16AtArgs `protobuf:"bytes,78,opt,name=Alpha16NgdotAlpha16At,proto3,oneof"`
}
type NgoloFuzzOne_Alpha16NgdotPixOffset struct {
Alpha16NgdotPixOffset *Alpha16NgdotPixOffsetArgs `protobuf:"bytes,79,opt,name=Alpha16NgdotPixOffset,proto3,oneof"`
}
type NgoloFuzzOne_Alpha16NgdotSubImage struct {
Alpha16NgdotSubImage *Alpha16NgdotSubImageArgs `protobuf:"bytes,80,opt,name=Alpha16NgdotSubImage,proto3,oneof"`
}
type NgoloFuzzOne_Alpha16NgdotOpaque struct {
Alpha16NgdotOpaque *Alpha16NgdotOpaqueArgs `protobuf:"bytes,81,opt,name=Alpha16NgdotOpaque,proto3,oneof"`
}
type NgoloFuzzOne_NewAlpha16 struct {
NewAlpha16 *NewAlpha16Args `protobuf:"bytes,82,opt,name=NewAlpha16,proto3,oneof"`
}
type NgoloFuzzOne_GrayNgdotColorModel struct {
GrayNgdotColorModel *GrayNgdotColorModelArgs `protobuf:"bytes,83,opt,name=GrayNgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_GrayNgdotBounds struct {
GrayNgdotBounds *GrayNgdotBoundsArgs `protobuf:"bytes,84,opt,name=GrayNgdotBounds,proto3,oneof"`
}
type NgoloFuzzOne_GrayNgdotAt struct {
GrayNgdotAt *GrayNgdotAtArgs `protobuf:"bytes,85,opt,name=GrayNgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_GrayNgdotRGBA64At struct {
GrayNgdotRGBA64At *GrayNgdotRGBA64AtArgs `protobuf:"bytes,86,opt,name=GrayNgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_GrayNgdotGrayAt struct {
GrayNgdotGrayAt *GrayNgdotGrayAtArgs `protobuf:"bytes,87,opt,name=GrayNgdotGrayAt,proto3,oneof"`
}
type NgoloFuzzOne_GrayNgdotPixOffset struct {
GrayNgdotPixOffset *GrayNgdotPixOffsetArgs `protobuf:"bytes,88,opt,name=GrayNgdotPixOffset,proto3,oneof"`
}
type NgoloFuzzOne_GrayNgdotSubImage struct {
GrayNgdotSubImage *GrayNgdotSubImageArgs `protobuf:"bytes,89,opt,name=GrayNgdotSubImage,proto3,oneof"`
}
type NgoloFuzzOne_GrayNgdotOpaque struct {
GrayNgdotOpaque *GrayNgdotOpaqueArgs `protobuf:"bytes,90,opt,name=GrayNgdotOpaque,proto3,oneof"`
}
type NgoloFuzzOne_NewGray struct {
NewGray *NewGrayArgs `protobuf:"bytes,91,opt,name=NewGray,proto3,oneof"`
}
type NgoloFuzzOne_Gray16NgdotColorModel struct {
Gray16NgdotColorModel *Gray16NgdotColorModelArgs `protobuf:"bytes,92,opt,name=Gray16NgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_Gray16NgdotBounds struct {
Gray16NgdotBounds *Gray16NgdotBoundsArgs `protobuf:"bytes,93,opt,name=Gray16NgdotBounds,proto3,oneof"`
}
type NgoloFuzzOne_Gray16NgdotAt struct {
Gray16NgdotAt *Gray16NgdotAtArgs `protobuf:"bytes,94,opt,name=Gray16NgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_Gray16NgdotRGBA64At struct {
Gray16NgdotRGBA64At *Gray16NgdotRGBA64AtArgs `protobuf:"bytes,95,opt,name=Gray16NgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_Gray16NgdotGray16At struct {
Gray16NgdotGray16At *Gray16NgdotGray16AtArgs `protobuf:"bytes,96,opt,name=Gray16NgdotGray16At,proto3,oneof"`
}
type NgoloFuzzOne_Gray16NgdotPixOffset struct {
Gray16NgdotPixOffset *Gray16NgdotPixOffsetArgs `protobuf:"bytes,97,opt,name=Gray16NgdotPixOffset,proto3,oneof"`
}
type NgoloFuzzOne_Gray16NgdotSubImage struct {
Gray16NgdotSubImage *Gray16NgdotSubImageArgs `protobuf:"bytes,98,opt,name=Gray16NgdotSubImage,proto3,oneof"`
}
type NgoloFuzzOne_Gray16NgdotOpaque struct {
Gray16NgdotOpaque *Gray16NgdotOpaqueArgs `protobuf:"bytes,99,opt,name=Gray16NgdotOpaque,proto3,oneof"`
}
type NgoloFuzzOne_NewGray16 struct {
NewGray16 *NewGray16Args `protobuf:"bytes,100,opt,name=NewGray16,proto3,oneof"`
}
type NgoloFuzzOne_CMYKNgdotColorModel struct {
CMYKNgdotColorModel *CMYKNgdotColorModelArgs `protobuf:"bytes,101,opt,name=CMYKNgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_CMYKNgdotBounds struct {
CMYKNgdotBounds *CMYKNgdotBoundsArgs `protobuf:"bytes,102,opt,name=CMYKNgdotBounds,proto3,oneof"`
}
type NgoloFuzzOne_CMYKNgdotAt struct {
CMYKNgdotAt *CMYKNgdotAtArgs `protobuf:"bytes,103,opt,name=CMYKNgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_CMYKNgdotRGBA64At struct {
CMYKNgdotRGBA64At *CMYKNgdotRGBA64AtArgs `protobuf:"bytes,104,opt,name=CMYKNgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_CMYKNgdotCMYKAt struct {
CMYKNgdotCMYKAt *CMYKNgdotCMYKAtArgs `protobuf:"bytes,105,opt,name=CMYKNgdotCMYKAt,proto3,oneof"`
}
type NgoloFuzzOne_CMYKNgdotPixOffset struct {
CMYKNgdotPixOffset *CMYKNgdotPixOffsetArgs `protobuf:"bytes,106,opt,name=CMYKNgdotPixOffset,proto3,oneof"`
}
type NgoloFuzzOne_CMYKNgdotSubImage struct {
CMYKNgdotSubImage *CMYKNgdotSubImageArgs `protobuf:"bytes,107,opt,name=CMYKNgdotSubImage,proto3,oneof"`
}
type NgoloFuzzOne_CMYKNgdotOpaque struct {
CMYKNgdotOpaque *CMYKNgdotOpaqueArgs `protobuf:"bytes,108,opt,name=CMYKNgdotOpaque,proto3,oneof"`
}
type NgoloFuzzOne_NewCMYK struct {
NewCMYK *NewCMYKArgs `protobuf:"bytes,109,opt,name=NewCMYK,proto3,oneof"`
}
type NgoloFuzzOne_PalettedNgdotColorModel struct {
PalettedNgdotColorModel *PalettedNgdotColorModelArgs `protobuf:"bytes,110,opt,name=PalettedNgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_PalettedNgdotBounds struct {
PalettedNgdotBounds *PalettedNgdotBoundsArgs `protobuf:"bytes,111,opt,name=PalettedNgdotBounds,proto3,oneof"`
}
type NgoloFuzzOne_PalettedNgdotAt struct {
PalettedNgdotAt *PalettedNgdotAtArgs `protobuf:"bytes,112,opt,name=PalettedNgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_PalettedNgdotRGBA64At struct {
PalettedNgdotRGBA64At *PalettedNgdotRGBA64AtArgs `protobuf:"bytes,113,opt,name=PalettedNgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_PalettedNgdotPixOffset struct {
PalettedNgdotPixOffset *PalettedNgdotPixOffsetArgs `protobuf:"bytes,114,opt,name=PalettedNgdotPixOffset,proto3,oneof"`
}
type NgoloFuzzOne_PalettedNgdotColorIndexAt struct {
PalettedNgdotColorIndexAt *PalettedNgdotColorIndexAtArgs `protobuf:"bytes,115,opt,name=PalettedNgdotColorIndexAt,proto3,oneof"`
}
type NgoloFuzzOne_PalettedNgdotSetColorIndex struct {
PalettedNgdotSetColorIndex *PalettedNgdotSetColorIndexArgs `protobuf:"bytes,116,opt,name=PalettedNgdotSetColorIndex,proto3,oneof"`
}
type NgoloFuzzOne_PalettedNgdotSubImage struct {
PalettedNgdotSubImage *PalettedNgdotSubImageArgs `protobuf:"bytes,117,opt,name=PalettedNgdotSubImage,proto3,oneof"`
}
type NgoloFuzzOne_PalettedNgdotOpaque struct {
PalettedNgdotOpaque *PalettedNgdotOpaqueArgs `protobuf:"bytes,118,opt,name=PalettedNgdotOpaque,proto3,oneof"`
}
type NgoloFuzzOne_UniformNgdotRGBA struct {
UniformNgdotRGBA *UniformNgdotRGBAArgs `protobuf:"bytes,119,opt,name=UniformNgdotRGBA,proto3,oneof"`
}
type NgoloFuzzOne_UniformNgdotColorModel struct {
UniformNgdotColorModel *UniformNgdotColorModelArgs `protobuf:"bytes,120,opt,name=UniformNgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_UniformNgdotBounds struct {
UniformNgdotBounds *UniformNgdotBoundsArgs `protobuf:"bytes,121,opt,name=UniformNgdotBounds,proto3,oneof"`
}
type NgoloFuzzOne_UniformNgdotAt struct {
UniformNgdotAt *UniformNgdotAtArgs `protobuf:"bytes,122,opt,name=UniformNgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_UniformNgdotRGBA64At struct {
UniformNgdotRGBA64At *UniformNgdotRGBA64AtArgs `protobuf:"bytes,123,opt,name=UniformNgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_UniformNgdotOpaque struct {
UniformNgdotOpaque *UniformNgdotOpaqueArgs `protobuf:"bytes,124,opt,name=UniformNgdotOpaque,proto3,oneof"`
}
type NgoloFuzzOne_YCbCrSubsampleRatioNgdotString struct {
YCbCrSubsampleRatioNgdotString *YCbCrSubsampleRatioNgdotStringArgs `protobuf:"bytes,125,opt,name=YCbCrSubsampleRatioNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_YCbCrNgdotColorModel struct {
YCbCrNgdotColorModel *YCbCrNgdotColorModelArgs `protobuf:"bytes,126,opt,name=YCbCrNgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_YCbCrNgdotBounds struct {
YCbCrNgdotBounds *YCbCrNgdotBoundsArgs `protobuf:"bytes,127,opt,name=YCbCrNgdotBounds,proto3,oneof"`
}
type NgoloFuzzOne_YCbCrNgdotAt struct {
YCbCrNgdotAt *YCbCrNgdotAtArgs `protobuf:"bytes,128,opt,name=YCbCrNgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_YCbCrNgdotRGBA64At struct {
YCbCrNgdotRGBA64At *YCbCrNgdotRGBA64AtArgs `protobuf:"bytes,129,opt,name=YCbCrNgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_YCbCrNgdotYCbCrAt struct {
YCbCrNgdotYCbCrAt *YCbCrNgdotYCbCrAtArgs `protobuf:"bytes,130,opt,name=YCbCrNgdotYCbCrAt,proto3,oneof"`
}
type NgoloFuzzOne_YCbCrNgdotYOffset struct {
YCbCrNgdotYOffset *YCbCrNgdotYOffsetArgs `protobuf:"bytes,131,opt,name=YCbCrNgdotYOffset,proto3,oneof"`
}
type NgoloFuzzOne_YCbCrNgdotCOffset struct {
YCbCrNgdotCOffset *YCbCrNgdotCOffsetArgs `protobuf:"bytes,132,opt,name=YCbCrNgdotCOffset,proto3,oneof"`
}
type NgoloFuzzOne_YCbCrNgdotSubImage struct {
YCbCrNgdotSubImage *YCbCrNgdotSubImageArgs `protobuf:"bytes,133,opt,name=YCbCrNgdotSubImage,proto3,oneof"`
}
type NgoloFuzzOne_YCbCrNgdotOpaque struct {
YCbCrNgdotOpaque *YCbCrNgdotOpaqueArgs `protobuf:"bytes,134,opt,name=YCbCrNgdotOpaque,proto3,oneof"`
}
type NgoloFuzzOne_NewYCbCr struct {
NewYCbCr *NewYCbCrArgs `protobuf:"bytes,135,opt,name=NewYCbCr,proto3,oneof"`
}
type NgoloFuzzOne_NYCbCrANgdotColorModel struct {
NYCbCrANgdotColorModel *NYCbCrANgdotColorModelArgs `protobuf:"bytes,136,opt,name=NYCbCrANgdotColorModel,proto3,oneof"`
}
type NgoloFuzzOne_NYCbCrANgdotAt struct {
NYCbCrANgdotAt *NYCbCrANgdotAtArgs `protobuf:"bytes,137,opt,name=NYCbCrANgdotAt,proto3,oneof"`
}
type NgoloFuzzOne_NYCbCrANgdotRGBA64At struct {
NYCbCrANgdotRGBA64At *NYCbCrANgdotRGBA64AtArgs `protobuf:"bytes,138,opt,name=NYCbCrANgdotRGBA64At,proto3,oneof"`
}
type NgoloFuzzOne_NYCbCrANgdotNYCbCrAAt struct {
NYCbCrANgdotNYCbCrAAt *NYCbCrANgdotNYCbCrAAtArgs `protobuf:"bytes,139,opt,name=NYCbCrANgdotNYCbCrAAt,proto3,oneof"`
}
type NgoloFuzzOne_NYCbCrANgdotAOffset struct {
NYCbCrANgdotAOffset *NYCbCrANgdotAOffsetArgs `protobuf:"bytes,140,opt,name=NYCbCrANgdotAOffset,proto3,oneof"`
}
type NgoloFuzzOne_NYCbCrANgdotSubImage struct {
NYCbCrANgdotSubImage *NYCbCrANgdotSubImageArgs `protobuf:"bytes,141,opt,name=NYCbCrANgdotSubImage,proto3,oneof"`
}
type NgoloFuzzOne_NYCbCrANgdotOpaque struct {
NYCbCrANgdotOpaque *NYCbCrANgdotOpaqueArgs `protobuf:"bytes,142,opt,name=NYCbCrANgdotOpaque,proto3,oneof"`
}
type NgoloFuzzOne_NewNYCbCrA struct {
NewNYCbCrA *NewNYCbCrAArgs `protobuf:"bytes,143,opt,name=NewNYCbCrA,proto3,oneof"`
}
func (*NgoloFuzzOne_Decode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecodeConfig) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PointNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PointNgdotAdd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PointNgdotSub) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PointNgdotMul) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PointNgdotIn) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PointNgdotMod) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PointNgdotEq) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Pt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotDx) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotDy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotSize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotAdd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotSub) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotInset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotIntersect) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotUnion) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotEmpty) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotEq) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotOverlaps) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotIn) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotCanon) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RectangleNgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Rect) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBANgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBANgdotBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBANgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBANgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBANgdotRGBAAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBANgdotPixOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBANgdotSubImage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBANgdotOpaque) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewRGBA) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBA64NgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBA64NgdotBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBA64NgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBA64NgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBA64NgdotPixOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBA64NgdotSubImage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RGBA64NgdotOpaque) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewRGBA64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBANgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBANgdotBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBANgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBANgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBANgdotNRGBAAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBANgdotPixOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBANgdotSubImage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBANgdotOpaque) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewNRGBA) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBA64NgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBA64NgdotBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBA64NgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBA64NgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBA64NgdotNRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBA64NgdotPixOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBA64NgdotSubImage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NRGBA64NgdotOpaque) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewNRGBA64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AlphaNgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AlphaNgdotBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AlphaNgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AlphaNgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AlphaNgdotAlphaAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AlphaNgdotPixOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AlphaNgdotSubImage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AlphaNgdotOpaque) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewAlpha) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Alpha16NgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Alpha16NgdotBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Alpha16NgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Alpha16NgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Alpha16NgdotAlpha16At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Alpha16NgdotPixOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Alpha16NgdotSubImage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Alpha16NgdotOpaque) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewAlpha16) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GrayNgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GrayNgdotBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GrayNgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GrayNgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GrayNgdotGrayAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GrayNgdotPixOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GrayNgdotSubImage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GrayNgdotOpaque) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewGray) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Gray16NgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Gray16NgdotBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Gray16NgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Gray16NgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Gray16NgdotGray16At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Gray16NgdotPixOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Gray16NgdotSubImage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Gray16NgdotOpaque) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewGray16) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CMYKNgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CMYKNgdotBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CMYKNgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CMYKNgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CMYKNgdotCMYKAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CMYKNgdotPixOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CMYKNgdotSubImage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CMYKNgdotOpaque) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewCMYK) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PalettedNgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PalettedNgdotBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PalettedNgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PalettedNgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PalettedNgdotPixOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PalettedNgdotColorIndexAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PalettedNgdotSetColorIndex) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PalettedNgdotSubImage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PalettedNgdotOpaque) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UniformNgdotRGBA) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UniformNgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UniformNgdotBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UniformNgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UniformNgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UniformNgdotOpaque) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_YCbCrSubsampleRatioNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_YCbCrNgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_YCbCrNgdotBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_YCbCrNgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_YCbCrNgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_YCbCrNgdotYCbCrAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_YCbCrNgdotYOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_YCbCrNgdotCOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_YCbCrNgdotSubImage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_YCbCrNgdotOpaque) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewYCbCr) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NYCbCrANgdotColorModel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NYCbCrANgdotAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NYCbCrANgdotRGBA64At) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NYCbCrANgdotNYCbCrAAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NYCbCrANgdotAOffset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NYCbCrANgdotSubImage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NYCbCrANgdotOpaque) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewNYCbCrA) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[144]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[144]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{144}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[145]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[145]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{145}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1a\n" +
"\n" +
"DecodeArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\" \n" +
"\x10DecodeConfigArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x16\n" +
"\x14PointNgdotStringArgs\"\x13\n" +
"\x11PointNgdotAddArgs\"\x13\n" +
"\x11PointNgdotSubArgs\"!\n" +
"\x11PointNgdotMulArgs\x12\f\n" +
"\x01k\x18\x01 \x01(\x03R\x01k\"\x12\n" +
"\x10PointNgdotInArgs\"\x13\n" +
"\x11PointNgdotModArgs\"\x12\n" +
"\x10PointNgdotEqArgs\"$\n" +
"\x06PtArgs\x12\f\n" +
"\x01X\x18\x01 \x01(\x03R\x01X\x12\f\n" +
"\x01Y\x18\x02 \x01(\x03R\x01Y\"\x1a\n" +
"\x18RectangleNgdotStringArgs\"\x16\n" +
"\x14RectangleNgdotDxArgs\"\x16\n" +
"\x14RectangleNgdotDyArgs\"\x18\n" +
"\x16RectangleNgdotSizeArgs\"\x17\n" +
"\x15RectangleNgdotAddArgs\"\x17\n" +
"\x15RectangleNgdotSubArgs\"'\n" +
"\x17RectangleNgdotInsetArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"\x1d\n" +
"\x1bRectangleNgdotIntersectArgs\"\x19\n" +
"\x17RectangleNgdotUnionArgs\"\x19\n" +
"\x17RectangleNgdotEmptyArgs\"\x16\n" +
"\x14RectangleNgdotEqArgs\"\x1c\n" +
"\x1aRectangleNgdotOverlapsArgs\"\x16\n" +
"\x14RectangleNgdotInArgs\"\x19\n" +
"\x17RectangleNgdotCanonArgs\"2\n" +
"\x14RectangleNgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"8\n" +
"\x1aRectangleNgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"\x1a\n" +
"\x18RectangleNgdotBoundsArgs\"\x1e\n" +
"\x1cRectangleNgdotColorModelArgs\"J\n" +
"\bRectArgs\x12\x0e\n" +
"\x02x0\x18\x01 \x01(\x03R\x02x0\x12\x0e\n" +
"\x02y0\x18\x02 \x01(\x03R\x02y0\x12\x0e\n" +
"\x02x1\x18\x03 \x01(\x03R\x02x1\x12\x0e\n" +
"\x02y1\x18\x04 \x01(\x03R\x02y1\"\x19\n" +
"\x17RGBANgdotColorModelArgs\"\x15\n" +
"\x13RGBANgdotBoundsArgs\"-\n" +
"\x0fRGBANgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"3\n" +
"\x15RGBANgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"1\n" +
"\x13RGBANgdotRGBAAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"4\n" +
"\x16RGBANgdotPixOffsetArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"\x17\n" +
"\x15RGBANgdotSubImageArgs\"\x15\n" +
"\x13RGBANgdotOpaqueArgs\"\r\n" +
"\vNewRGBAArgs\"\x1b\n" +
"\x19RGBA64NgdotColorModelArgs\"\x17\n" +
"\x15RGBA64NgdotBoundsArgs\"/\n" +
"\x11RGBA64NgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"5\n" +
"\x17RGBA64NgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"6\n" +
"\x18RGBA64NgdotPixOffsetArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"\x19\n" +
"\x17RGBA64NgdotSubImageArgs\"\x17\n" +
"\x15RGBA64NgdotOpaqueArgs\"\x0f\n" +
"\rNewRGBA64Args\"\x1a\n" +
"\x18NRGBANgdotColorModelArgs\"\x16\n" +
"\x14NRGBANgdotBoundsArgs\".\n" +
"\x10NRGBANgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"4\n" +
"\x16NRGBANgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"3\n" +
"\x15NRGBANgdotNRGBAAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"5\n" +
"\x17NRGBANgdotPixOffsetArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"\x18\n" +
"\x16NRGBANgdotSubImageArgs\"\x16\n" +
"\x14NRGBANgdotOpaqueArgs\"\x0e\n" +
"\fNewNRGBAArgs\"\x1c\n" +
"\x1aNRGBA64NgdotColorModelArgs\"\x18\n" +
"\x16NRGBA64NgdotBoundsArgs\"0\n" +
"\x12NRGBA64NgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"6\n" +
"\x18NRGBA64NgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"7\n" +
"\x19NRGBA64NgdotNRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"7\n" +
"\x19NRGBA64NgdotPixOffsetArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"\x1a\n" +
"\x18NRGBA64NgdotSubImageArgs\"\x18\n" +
"\x16NRGBA64NgdotOpaqueArgs\"\x10\n" +
"\x0eNewNRGBA64Args\"\x1a\n" +
"\x18AlphaNgdotColorModelArgs\"\x16\n" +
"\x14AlphaNgdotBoundsArgs\".\n" +
"\x10AlphaNgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"4\n" +
"\x16AlphaNgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"3\n" +
"\x15AlphaNgdotAlphaAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"5\n" +
"\x17AlphaNgdotPixOffsetArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"\x18\n" +
"\x16AlphaNgdotSubImageArgs\"\x16\n" +
"\x14AlphaNgdotOpaqueArgs\"\x0e\n" +
"\fNewAlphaArgs\"\x1c\n" +
"\x1aAlpha16NgdotColorModelArgs\"\x18\n" +
"\x16Alpha16NgdotBoundsArgs\"0\n" +
"\x12Alpha16NgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"6\n" +
"\x18Alpha16NgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"7\n" +
"\x19Alpha16NgdotAlpha16AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"7\n" +
"\x19Alpha16NgdotPixOffsetArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"\x1a\n" +
"\x18Alpha16NgdotSubImageArgs\"\x18\n" +
"\x16Alpha16NgdotOpaqueArgs\"\x10\n" +
"\x0eNewAlpha16Args\"\x19\n" +
"\x17GrayNgdotColorModelArgs\"\x15\n" +
"\x13GrayNgdotBoundsArgs\"-\n" +
"\x0fGrayNgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"3\n" +
"\x15GrayNgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"1\n" +
"\x13GrayNgdotGrayAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"4\n" +
"\x16GrayNgdotPixOffsetArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"\x17\n" +
"\x15GrayNgdotSubImageArgs\"\x15\n" +
"\x13GrayNgdotOpaqueArgs\"\r\n" +
"\vNewGrayArgs\"\x1b\n" +
"\x19Gray16NgdotColorModelArgs\"\x17\n" +
"\x15Gray16NgdotBoundsArgs\"/\n" +
"\x11Gray16NgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"5\n" +
"\x17Gray16NgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"5\n" +
"\x17Gray16NgdotGray16AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"6\n" +
"\x18Gray16NgdotPixOffsetArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"\x19\n" +
"\x17Gray16NgdotSubImageArgs\"\x17\n" +
"\x15Gray16NgdotOpaqueArgs\"\x0f\n" +
"\rNewGray16Args\"\x19\n" +
"\x17CMYKNgdotColorModelArgs\"\x15\n" +
"\x13CMYKNgdotBoundsArgs\"-\n" +
"\x0fCMYKNgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"3\n" +
"\x15CMYKNgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"1\n" +
"\x13CMYKNgdotCMYKAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"4\n" +
"\x16CMYKNgdotPixOffsetArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"\x17\n" +
"\x15CMYKNgdotSubImageArgs\"\x15\n" +
"\x13CMYKNgdotOpaqueArgs\"\r\n" +
"\vNewCMYKArgs\"\x1d\n" +
"\x1bPalettedNgdotColorModelArgs\"\x19\n" +
"\x17PalettedNgdotBoundsArgs\"1\n" +
"\x13PalettedNgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"7\n" +
"\x19PalettedNgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"8\n" +
"\x1aPalettedNgdotPixOffsetArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\";\n" +
"\x1dPalettedNgdotColorIndexAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"R\n" +
"\x1ePalettedNgdotSetColorIndexArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\x12\x14\n" +
"\x05index\x18\x03 \x01(\rR\x05index\"\x1b\n" +
"\x19PalettedNgdotSubImageArgs\"\x19\n" +
"\x17PalettedNgdotOpaqueArgs\"\x16\n" +
"\x14UniformNgdotRGBAArgs\"\x1c\n" +
"\x1aUniformNgdotColorModelArgs\"\x18\n" +
"\x16UniformNgdotBoundsArgs\"0\n" +
"\x12UniformNgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"6\n" +
"\x18UniformNgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"\x18\n" +
"\x16UniformNgdotOpaqueArgs\"V\n" +
"\"YCbCrSubsampleRatioNgdotStringArgs\x120\n" +
"\x01s\x18\x01 \x01(\x0e2\".ngolofuzz.YCbCrSubsampleRatioEnumR\x01s\"\x1a\n" +
"\x18YCbCrNgdotColorModelArgs\"\x16\n" +
"\x14YCbCrNgdotBoundsArgs\".\n" +
"\x10YCbCrNgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"4\n" +
"\x16YCbCrNgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"3\n" +
"\x15YCbCrNgdotYCbCrAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"3\n" +
"\x15YCbCrNgdotYOffsetArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"3\n" +
"\x15YCbCrNgdotCOffsetArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"\x18\n" +
"\x16YCbCrNgdotSubImageArgs\"\x16\n" +
"\x14YCbCrNgdotOpaqueArgs\"Z\n" +
"\fNewYCbCrArgs\x12J\n" +
"\x0esubsampleRatio\x18\x01 \x01(\x0e2\".ngolofuzz.YCbCrSubsampleRatioEnumR\x0esubsampleRatio\"\x1c\n" +
"\x1aNYCbCrANgdotColorModelArgs\"0\n" +
"\x12NYCbCrANgdotAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"6\n" +
"\x18NYCbCrANgdotRGBA64AtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"7\n" +
"\x19NYCbCrANgdotNYCbCrAAtArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"5\n" +
"\x17NYCbCrANgdotAOffsetArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x03R\x01y\"\x1a\n" +
"\x18NYCbCrANgdotSubImageArgs\"\x18\n" +
"\x16NYCbCrANgdotOpaqueArgs\"\\\n" +
"\x0eNewNYCbCrAArgs\x12J\n" +
"\x0esubsampleRatio\x18\x01 \x01(\x0e2\".ngolofuzz.YCbCrSubsampleRatioEnumR\x0esubsampleRatio\"\x85[\n" +
"\fNgoloFuzzOne\x12/\n" +
"\x06Decode\x18\x01 \x01(\v2\x15.ngolofuzz.DecodeArgsH\x00R\x06Decode\x12A\n" +
"\fDecodeConfig\x18\x02 \x01(\v2\x1b.ngolofuzz.DecodeConfigArgsH\x00R\fDecodeConfig\x12M\n" +
"\x10PointNgdotString\x18\x03 \x01(\v2\x1f.ngolofuzz.PointNgdotStringArgsH\x00R\x10PointNgdotString\x12D\n" +
"\rPointNgdotAdd\x18\x04 \x01(\v2\x1c.ngolofuzz.PointNgdotAddArgsH\x00R\rPointNgdotAdd\x12D\n" +
"\rPointNgdotSub\x18\x05 \x01(\v2\x1c.ngolofuzz.PointNgdotSubArgsH\x00R\rPointNgdotSub\x12D\n" +
"\rPointNgdotMul\x18\x06 \x01(\v2\x1c.ngolofuzz.PointNgdotMulArgsH\x00R\rPointNgdotMul\x12A\n" +
"\fPointNgdotIn\x18\a \x01(\v2\x1b.ngolofuzz.PointNgdotInArgsH\x00R\fPointNgdotIn\x12D\n" +
"\rPointNgdotMod\x18\b \x01(\v2\x1c.ngolofuzz.PointNgdotModArgsH\x00R\rPointNgdotMod\x12A\n" +
"\fPointNgdotEq\x18\t \x01(\v2\x1b.ngolofuzz.PointNgdotEqArgsH\x00R\fPointNgdotEq\x12#\n" +
"\x02Pt\x18\n" +
" \x01(\v2\x11.ngolofuzz.PtArgsH\x00R\x02Pt\x12Y\n" +
"\x14RectangleNgdotString\x18\v \x01(\v2#.ngolofuzz.RectangleNgdotStringArgsH\x00R\x14RectangleNgdotString\x12M\n" +
"\x10RectangleNgdotDx\x18\f \x01(\v2\x1f.ngolofuzz.RectangleNgdotDxArgsH\x00R\x10RectangleNgdotDx\x12M\n" +
"\x10RectangleNgdotDy\x18\r \x01(\v2\x1f.ngolofuzz.RectangleNgdotDyArgsH\x00R\x10RectangleNgdotDy\x12S\n" +
"\x12RectangleNgdotSize\x18\x0e \x01(\v2!.ngolofuzz.RectangleNgdotSizeArgsH\x00R\x12RectangleNgdotSize\x12P\n" +
"\x11RectangleNgdotAdd\x18\x0f \x01(\v2 .ngolofuzz.RectangleNgdotAddArgsH\x00R\x11RectangleNgdotAdd\x12P\n" +
"\x11RectangleNgdotSub\x18\x10 \x01(\v2 .ngolofuzz.RectangleNgdotSubArgsH\x00R\x11RectangleNgdotSub\x12V\n" +
"\x13RectangleNgdotInset\x18\x11 \x01(\v2\".ngolofuzz.RectangleNgdotInsetArgsH\x00R\x13RectangleNgdotInset\x12b\n" +
"\x17RectangleNgdotIntersect\x18\x12 \x01(\v2&.ngolofuzz.RectangleNgdotIntersectArgsH\x00R\x17RectangleNgdotIntersect\x12V\n" +
"\x13RectangleNgdotUnion\x18\x13 \x01(\v2\".ngolofuzz.RectangleNgdotUnionArgsH\x00R\x13RectangleNgdotUnion\x12V\n" +
"\x13RectangleNgdotEmpty\x18\x14 \x01(\v2\".ngolofuzz.RectangleNgdotEmptyArgsH\x00R\x13RectangleNgdotEmpty\x12M\n" +
"\x10RectangleNgdotEq\x18\x15 \x01(\v2\x1f.ngolofuzz.RectangleNgdotEqArgsH\x00R\x10RectangleNgdotEq\x12_\n" +
"\x16RectangleNgdotOverlaps\x18\x16 \x01(\v2%.ngolofuzz.RectangleNgdotOverlapsArgsH\x00R\x16RectangleNgdotOverlaps\x12M\n" +
"\x10RectangleNgdotIn\x18\x17 \x01(\v2\x1f.ngolofuzz.RectangleNgdotInArgsH\x00R\x10RectangleNgdotIn\x12V\n" +
"\x13RectangleNgdotCanon\x18\x18 \x01(\v2\".ngolofuzz.RectangleNgdotCanonArgsH\x00R\x13RectangleNgdotCanon\x12M\n" +
"\x10RectangleNgdotAt\x18\x19 \x01(\v2\x1f.ngolofuzz.RectangleNgdotAtArgsH\x00R\x10RectangleNgdotAt\x12_\n" +
"\x16RectangleNgdotRGBA64At\x18\x1a \x01(\v2%.ngolofuzz.RectangleNgdotRGBA64AtArgsH\x00R\x16RectangleNgdotRGBA64At\x12Y\n" +
"\x14RectangleNgdotBounds\x18\x1b \x01(\v2#.ngolofuzz.RectangleNgdotBoundsArgsH\x00R\x14RectangleNgdotBounds\x12e\n" +
"\x18RectangleNgdotColorModel\x18\x1c \x01(\v2'.ngolofuzz.RectangleNgdotColorModelArgsH\x00R\x18RectangleNgdotColorModel\x12)\n" +
"\x04Rect\x18\x1d \x01(\v2\x13.ngolofuzz.RectArgsH\x00R\x04Rect\x12V\n" +
"\x13RGBANgdotColorModel\x18\x1e \x01(\v2\".ngolofuzz.RGBANgdotColorModelArgsH\x00R\x13RGBANgdotColorModel\x12J\n" +
"\x0fRGBANgdotBounds\x18\x1f \x01(\v2\x1e.ngolofuzz.RGBANgdotBoundsArgsH\x00R\x0fRGBANgdotBounds\x12>\n" +
"\vRGBANgdotAt\x18 \x01(\v2\x1a.ngolofuzz.RGBANgdotAtArgsH\x00R\vRGBANgdotAt\x12P\n" +
"\x11RGBANgdotRGBA64At\x18! \x01(\v2 .ngolofuzz.RGBANgdotRGBA64AtArgsH\x00R\x11RGBANgdotRGBA64At\x12J\n" +
"\x0fRGBANgdotRGBAAt\x18\" \x01(\v2\x1e.ngolofuzz.RGBANgdotRGBAAtArgsH\x00R\x0fRGBANgdotRGBAAt\x12S\n" +
"\x12RGBANgdotPixOffset\x18# \x01(\v2!.ngolofuzz.RGBANgdotPixOffsetArgsH\x00R\x12RGBANgdotPixOffset\x12P\n" +
"\x11RGBANgdotSubImage\x18$ \x01(\v2 .ngolofuzz.RGBANgdotSubImageArgsH\x00R\x11RGBANgdotSubImage\x12J\n" +
"\x0fRGBANgdotOpaque\x18% \x01(\v2\x1e.ngolofuzz.RGBANgdotOpaqueArgsH\x00R\x0fRGBANgdotOpaque\x122\n" +
"\aNewRGBA\x18& \x01(\v2\x16.ngolofuzz.NewRGBAArgsH\x00R\aNewRGBA\x12\\\n" +
"\x15RGBA64NgdotColorModel\x18' \x01(\v2$.ngolofuzz.RGBA64NgdotColorModelArgsH\x00R\x15RGBA64NgdotColorModel\x12P\n" +
"\x11RGBA64NgdotBounds\x18( \x01(\v2 .ngolofuzz.RGBA64NgdotBoundsArgsH\x00R\x11RGBA64NgdotBounds\x12D\n" +
"\rRGBA64NgdotAt\x18) \x01(\v2\x1c.ngolofuzz.RGBA64NgdotAtArgsH\x00R\rRGBA64NgdotAt\x12V\n" +
"\x13RGBA64NgdotRGBA64At\x18* \x01(\v2\".ngolofuzz.RGBA64NgdotRGBA64AtArgsH\x00R\x13RGBA64NgdotRGBA64At\x12Y\n" +
"\x14RGBA64NgdotPixOffset\x18+ \x01(\v2#.ngolofuzz.RGBA64NgdotPixOffsetArgsH\x00R\x14RGBA64NgdotPixOffset\x12V\n" +
"\x13RGBA64NgdotSubImage\x18, \x01(\v2\".ngolofuzz.RGBA64NgdotSubImageArgsH\x00R\x13RGBA64NgdotSubImage\x12P\n" +
"\x11RGBA64NgdotOpaque\x18- \x01(\v2 .ngolofuzz.RGBA64NgdotOpaqueArgsH\x00R\x11RGBA64NgdotOpaque\x128\n" +
"\tNewRGBA64\x18. \x01(\v2\x18.ngolofuzz.NewRGBA64ArgsH\x00R\tNewRGBA64\x12Y\n" +
"\x14NRGBANgdotColorModel\x18/ \x01(\v2#.ngolofuzz.NRGBANgdotColorModelArgsH\x00R\x14NRGBANgdotColorModel\x12M\n" +
"\x10NRGBANgdotBounds\x180 \x01(\v2\x1f.ngolofuzz.NRGBANgdotBoundsArgsH\x00R\x10NRGBANgdotBounds\x12A\n" +
"\fNRGBANgdotAt\x181 \x01(\v2\x1b.ngolofuzz.NRGBANgdotAtArgsH\x00R\fNRGBANgdotAt\x12S\n" +
"\x12NRGBANgdotRGBA64At\x182 \x01(\v2!.ngolofuzz.NRGBANgdotRGBA64AtArgsH\x00R\x12NRGBANgdotRGBA64At\x12P\n" +
"\x11NRGBANgdotNRGBAAt\x183 \x01(\v2 .ngolofuzz.NRGBANgdotNRGBAAtArgsH\x00R\x11NRGBANgdotNRGBAAt\x12V\n" +
"\x13NRGBANgdotPixOffset\x184 \x01(\v2\".ngolofuzz.NRGBANgdotPixOffsetArgsH\x00R\x13NRGBANgdotPixOffset\x12S\n" +
"\x12NRGBANgdotSubImage\x185 \x01(\v2!.ngolofuzz.NRGBANgdotSubImageArgsH\x00R\x12NRGBANgdotSubImage\x12M\n" +
"\x10NRGBANgdotOpaque\x186 \x01(\v2\x1f.ngolofuzz.NRGBANgdotOpaqueArgsH\x00R\x10NRGBANgdotOpaque\x125\n" +
"\bNewNRGBA\x187 \x01(\v2\x17.ngolofuzz.NewNRGBAArgsH\x00R\bNewNRGBA\x12_\n" +
"\x16NRGBA64NgdotColorModel\x188 \x01(\v2%.ngolofuzz.NRGBA64NgdotColorModelArgsH\x00R\x16NRGBA64NgdotColorModel\x12S\n" +
"\x12NRGBA64NgdotBounds\x189 \x01(\v2!.ngolofuzz.NRGBA64NgdotBoundsArgsH\x00R\x12NRGBA64NgdotBounds\x12G\n" +
"\x0eNRGBA64NgdotAt\x18: \x01(\v2\x1d.ngolofuzz.NRGBA64NgdotAtArgsH\x00R\x0eNRGBA64NgdotAt\x12Y\n" +
"\x14NRGBA64NgdotRGBA64At\x18; \x01(\v2#.ngolofuzz.NRGBA64NgdotRGBA64AtArgsH\x00R\x14NRGBA64NgdotRGBA64At\x12\\\n" +
"\x15NRGBA64NgdotNRGBA64At\x18< \x01(\v2$.ngolofuzz.NRGBA64NgdotNRGBA64AtArgsH\x00R\x15NRGBA64NgdotNRGBA64At\x12\\\n" +
"\x15NRGBA64NgdotPixOffset\x18= \x01(\v2$.ngolofuzz.NRGBA64NgdotPixOffsetArgsH\x00R\x15NRGBA64NgdotPixOffset\x12Y\n" +
"\x14NRGBA64NgdotSubImage\x18> \x01(\v2#.ngolofuzz.NRGBA64NgdotSubImageArgsH\x00R\x14NRGBA64NgdotSubImage\x12S\n" +
"\x12NRGBA64NgdotOpaque\x18? \x01(\v2!.ngolofuzz.NRGBA64NgdotOpaqueArgsH\x00R\x12NRGBA64NgdotOpaque\x12;\n" +
"\n" +
"NewNRGBA64\x18@ \x01(\v2\x19.ngolofuzz.NewNRGBA64ArgsH\x00R\n" +
"NewNRGBA64\x12Y\n" +
"\x14AlphaNgdotColorModel\x18A \x01(\v2#.ngolofuzz.AlphaNgdotColorModelArgsH\x00R\x14AlphaNgdotColorModel\x12M\n" +
"\x10AlphaNgdotBounds\x18B \x01(\v2\x1f.ngolofuzz.AlphaNgdotBoundsArgsH\x00R\x10AlphaNgdotBounds\x12A\n" +
"\fAlphaNgdotAt\x18C \x01(\v2\x1b.ngolofuzz.AlphaNgdotAtArgsH\x00R\fAlphaNgdotAt\x12S\n" +
"\x12AlphaNgdotRGBA64At\x18D \x01(\v2!.ngolofuzz.AlphaNgdotRGBA64AtArgsH\x00R\x12AlphaNgdotRGBA64At\x12P\n" +
"\x11AlphaNgdotAlphaAt\x18E \x01(\v2 .ngolofuzz.AlphaNgdotAlphaAtArgsH\x00R\x11AlphaNgdotAlphaAt\x12V\n" +
"\x13AlphaNgdotPixOffset\x18F \x01(\v2\".ngolofuzz.AlphaNgdotPixOffsetArgsH\x00R\x13AlphaNgdotPixOffset\x12S\n" +
"\x12AlphaNgdotSubImage\x18G \x01(\v2!.ngolofuzz.AlphaNgdotSubImageArgsH\x00R\x12AlphaNgdotSubImage\x12M\n" +
"\x10AlphaNgdotOpaque\x18H \x01(\v2\x1f.ngolofuzz.AlphaNgdotOpaqueArgsH\x00R\x10AlphaNgdotOpaque\x125\n" +
"\bNewAlpha\x18I \x01(\v2\x17.ngolofuzz.NewAlphaArgsH\x00R\bNewAlpha\x12_\n" +
"\x16Alpha16NgdotColorModel\x18J \x01(\v2%.ngolofuzz.Alpha16NgdotColorModelArgsH\x00R\x16Alpha16NgdotColorModel\x12S\n" +
"\x12Alpha16NgdotBounds\x18K \x01(\v2!.ngolofuzz.Alpha16NgdotBoundsArgsH\x00R\x12Alpha16NgdotBounds\x12G\n" +
"\x0eAlpha16NgdotAt\x18L \x01(\v2\x1d.ngolofuzz.Alpha16NgdotAtArgsH\x00R\x0eAlpha16NgdotAt\x12Y\n" +
"\x14Alpha16NgdotRGBA64At\x18M \x01(\v2#.ngolofuzz.Alpha16NgdotRGBA64AtArgsH\x00R\x14Alpha16NgdotRGBA64At\x12\\\n" +
"\x15Alpha16NgdotAlpha16At\x18N \x01(\v2$.ngolofuzz.Alpha16NgdotAlpha16AtArgsH\x00R\x15Alpha16NgdotAlpha16At\x12\\\n" +
"\x15Alpha16NgdotPixOffset\x18O \x01(\v2$.ngolofuzz.Alpha16NgdotPixOffsetArgsH\x00R\x15Alpha16NgdotPixOffset\x12Y\n" +
"\x14Alpha16NgdotSubImage\x18P \x01(\v2#.ngolofuzz.Alpha16NgdotSubImageArgsH\x00R\x14Alpha16NgdotSubImage\x12S\n" +
"\x12Alpha16NgdotOpaque\x18Q \x01(\v2!.ngolofuzz.Alpha16NgdotOpaqueArgsH\x00R\x12Alpha16NgdotOpaque\x12;\n" +
"\n" +
"NewAlpha16\x18R \x01(\v2\x19.ngolofuzz.NewAlpha16ArgsH\x00R\n" +
"NewAlpha16\x12V\n" +
"\x13GrayNgdotColorModel\x18S \x01(\v2\".ngolofuzz.GrayNgdotColorModelArgsH\x00R\x13GrayNgdotColorModel\x12J\n" +
"\x0fGrayNgdotBounds\x18T \x01(\v2\x1e.ngolofuzz.GrayNgdotBoundsArgsH\x00R\x0fGrayNgdotBounds\x12>\n" +
"\vGrayNgdotAt\x18U \x01(\v2\x1a.ngolofuzz.GrayNgdotAtArgsH\x00R\vGrayNgdotAt\x12P\n" +
"\x11GrayNgdotRGBA64At\x18V \x01(\v2 .ngolofuzz.GrayNgdotRGBA64AtArgsH\x00R\x11GrayNgdotRGBA64At\x12J\n" +
"\x0fGrayNgdotGrayAt\x18W \x01(\v2\x1e.ngolofuzz.GrayNgdotGrayAtArgsH\x00R\x0fGrayNgdotGrayAt\x12S\n" +
"\x12GrayNgdotPixOffset\x18X \x01(\v2!.ngolofuzz.GrayNgdotPixOffsetArgsH\x00R\x12GrayNgdotPixOffset\x12P\n" +
"\x11GrayNgdotSubImage\x18Y \x01(\v2 .ngolofuzz.GrayNgdotSubImageArgsH\x00R\x11GrayNgdotSubImage\x12J\n" +
"\x0fGrayNgdotOpaque\x18Z \x01(\v2\x1e.ngolofuzz.GrayNgdotOpaqueArgsH\x00R\x0fGrayNgdotOpaque\x122\n" +
"\aNewGray\x18[ \x01(\v2\x16.ngolofuzz.NewGrayArgsH\x00R\aNewGray\x12\\\n" +
"\x15Gray16NgdotColorModel\x18\\ \x01(\v2$.ngolofuzz.Gray16NgdotColorModelArgsH\x00R\x15Gray16NgdotColorModel\x12P\n" +
"\x11Gray16NgdotBounds\x18] \x01(\v2 .ngolofuzz.Gray16NgdotBoundsArgsH\x00R\x11Gray16NgdotBounds\x12D\n" +
"\rGray16NgdotAt\x18^ \x01(\v2\x1c.ngolofuzz.Gray16NgdotAtArgsH\x00R\rGray16NgdotAt\x12V\n" +
"\x13Gray16NgdotRGBA64At\x18_ \x01(\v2\".ngolofuzz.Gray16NgdotRGBA64AtArgsH\x00R\x13Gray16NgdotRGBA64At\x12V\n" +
"\x13Gray16NgdotGray16At\x18` \x01(\v2\".ngolofuzz.Gray16NgdotGray16AtArgsH\x00R\x13Gray16NgdotGray16At\x12Y\n" +
"\x14Gray16NgdotPixOffset\x18a \x01(\v2#.ngolofuzz.Gray16NgdotPixOffsetArgsH\x00R\x14Gray16NgdotPixOffset\x12V\n" +
"\x13Gray16NgdotSubImage\x18b \x01(\v2\".ngolofuzz.Gray16NgdotSubImageArgsH\x00R\x13Gray16NgdotSubImage\x12P\n" +
"\x11Gray16NgdotOpaque\x18c \x01(\v2 .ngolofuzz.Gray16NgdotOpaqueArgsH\x00R\x11Gray16NgdotOpaque\x128\n" +
"\tNewGray16\x18d \x01(\v2\x18.ngolofuzz.NewGray16ArgsH\x00R\tNewGray16\x12V\n" +
"\x13CMYKNgdotColorModel\x18e \x01(\v2\".ngolofuzz.CMYKNgdotColorModelArgsH\x00R\x13CMYKNgdotColorModel\x12J\n" +
"\x0fCMYKNgdotBounds\x18f \x01(\v2\x1e.ngolofuzz.CMYKNgdotBoundsArgsH\x00R\x0fCMYKNgdotBounds\x12>\n" +
"\vCMYKNgdotAt\x18g \x01(\v2\x1a.ngolofuzz.CMYKNgdotAtArgsH\x00R\vCMYKNgdotAt\x12P\n" +
"\x11CMYKNgdotRGBA64At\x18h \x01(\v2 .ngolofuzz.CMYKNgdotRGBA64AtArgsH\x00R\x11CMYKNgdotRGBA64At\x12J\n" +
"\x0fCMYKNgdotCMYKAt\x18i \x01(\v2\x1e.ngolofuzz.CMYKNgdotCMYKAtArgsH\x00R\x0fCMYKNgdotCMYKAt\x12S\n" +
"\x12CMYKNgdotPixOffset\x18j \x01(\v2!.ngolofuzz.CMYKNgdotPixOffsetArgsH\x00R\x12CMYKNgdotPixOffset\x12P\n" +
"\x11CMYKNgdotSubImage\x18k \x01(\v2 .ngolofuzz.CMYKNgdotSubImageArgsH\x00R\x11CMYKNgdotSubImage\x12J\n" +
"\x0fCMYKNgdotOpaque\x18l \x01(\v2\x1e.ngolofuzz.CMYKNgdotOpaqueArgsH\x00R\x0fCMYKNgdotOpaque\x122\n" +
"\aNewCMYK\x18m \x01(\v2\x16.ngolofuzz.NewCMYKArgsH\x00R\aNewCMYK\x12b\n" +
"\x17PalettedNgdotColorModel\x18n \x01(\v2&.ngolofuzz.PalettedNgdotColorModelArgsH\x00R\x17PalettedNgdotColorModel\x12V\n" +
"\x13PalettedNgdotBounds\x18o \x01(\v2\".ngolofuzz.PalettedNgdotBoundsArgsH\x00R\x13PalettedNgdotBounds\x12J\n" +
"\x0fPalettedNgdotAt\x18p \x01(\v2\x1e.ngolofuzz.PalettedNgdotAtArgsH\x00R\x0fPalettedNgdotAt\x12\\\n" +
"\x15PalettedNgdotRGBA64At\x18q \x01(\v2$.ngolofuzz.PalettedNgdotRGBA64AtArgsH\x00R\x15PalettedNgdotRGBA64At\x12_\n" +
"\x16PalettedNgdotPixOffset\x18r \x01(\v2%.ngolofuzz.PalettedNgdotPixOffsetArgsH\x00R\x16PalettedNgdotPixOffset\x12h\n" +
"\x19PalettedNgdotColorIndexAt\x18s \x01(\v2(.ngolofuzz.PalettedNgdotColorIndexAtArgsH\x00R\x19PalettedNgdotColorIndexAt\x12k\n" +
"\x1aPalettedNgdotSetColorIndex\x18t \x01(\v2).ngolofuzz.PalettedNgdotSetColorIndexArgsH\x00R\x1aPalettedNgdotSetColorIndex\x12\\\n" +
"\x15PalettedNgdotSubImage\x18u \x01(\v2$.ngolofuzz.PalettedNgdotSubImageArgsH\x00R\x15PalettedNgdotSubImage\x12V\n" +
"\x13PalettedNgdotOpaque\x18v \x01(\v2\".ngolofuzz.PalettedNgdotOpaqueArgsH\x00R\x13PalettedNgdotOpaque\x12M\n" +
"\x10UniformNgdotRGBA\x18w \x01(\v2\x1f.ngolofuzz.UniformNgdotRGBAArgsH\x00R\x10UniformNgdotRGBA\x12_\n" +
"\x16UniformNgdotColorModel\x18x \x01(\v2%.ngolofuzz.UniformNgdotColorModelArgsH\x00R\x16UniformNgdotColorModel\x12S\n" +
"\x12UniformNgdotBounds\x18y \x01(\v2!.ngolofuzz.UniformNgdotBoundsArgsH\x00R\x12UniformNgdotBounds\x12G\n" +
"\x0eUniformNgdotAt\x18z \x01(\v2\x1d.ngolofuzz.UniformNgdotAtArgsH\x00R\x0eUniformNgdotAt\x12Y\n" +
"\x14UniformNgdotRGBA64At\x18{ \x01(\v2#.ngolofuzz.UniformNgdotRGBA64AtArgsH\x00R\x14UniformNgdotRGBA64At\x12S\n" +
"\x12UniformNgdotOpaque\x18| \x01(\v2!.ngolofuzz.UniformNgdotOpaqueArgsH\x00R\x12UniformNgdotOpaque\x12w\n" +
"\x1eYCbCrSubsampleRatioNgdotString\x18} \x01(\v2-.ngolofuzz.YCbCrSubsampleRatioNgdotStringArgsH\x00R\x1eYCbCrSubsampleRatioNgdotString\x12Y\n" +
"\x14YCbCrNgdotColorModel\x18~ \x01(\v2#.ngolofuzz.YCbCrNgdotColorModelArgsH\x00R\x14YCbCrNgdotColorModel\x12M\n" +
"\x10YCbCrNgdotBounds\x18\x7f \x01(\v2\x1f.ngolofuzz.YCbCrNgdotBoundsArgsH\x00R\x10YCbCrNgdotBounds\x12B\n" +
"\fYCbCrNgdotAt\x18\x80\x01 \x01(\v2\x1b.ngolofuzz.YCbCrNgdotAtArgsH\x00R\fYCbCrNgdotAt\x12T\n" +
"\x12YCbCrNgdotRGBA64At\x18\x81\x01 \x01(\v2!.ngolofuzz.YCbCrNgdotRGBA64AtArgsH\x00R\x12YCbCrNgdotRGBA64At\x12Q\n" +
"\x11YCbCrNgdotYCbCrAt\x18\x82\x01 \x01(\v2 .ngolofuzz.YCbCrNgdotYCbCrAtArgsH\x00R\x11YCbCrNgdotYCbCrAt\x12Q\n" +
"\x11YCbCrNgdotYOffset\x18\x83\x01 \x01(\v2 .ngolofuzz.YCbCrNgdotYOffsetArgsH\x00R\x11YCbCrNgdotYOffset\x12Q\n" +
"\x11YCbCrNgdotCOffset\x18\x84\x01 \x01(\v2 .ngolofuzz.YCbCrNgdotCOffsetArgsH\x00R\x11YCbCrNgdotCOffset\x12T\n" +
"\x12YCbCrNgdotSubImage\x18\x85\x01 \x01(\v2!.ngolofuzz.YCbCrNgdotSubImageArgsH\x00R\x12YCbCrNgdotSubImage\x12N\n" +
"\x10YCbCrNgdotOpaque\x18\x86\x01 \x01(\v2\x1f.ngolofuzz.YCbCrNgdotOpaqueArgsH\x00R\x10YCbCrNgdotOpaque\x126\n" +
"\bNewYCbCr\x18\x87\x01 \x01(\v2\x17.ngolofuzz.NewYCbCrArgsH\x00R\bNewYCbCr\x12`\n" +
"\x16NYCbCrANgdotColorModel\x18\x88\x01 \x01(\v2%.ngolofuzz.NYCbCrANgdotColorModelArgsH\x00R\x16NYCbCrANgdotColorModel\x12H\n" +
"\x0eNYCbCrANgdotAt\x18\x89\x01 \x01(\v2\x1d.ngolofuzz.NYCbCrANgdotAtArgsH\x00R\x0eNYCbCrANgdotAt\x12Z\n" +
"\x14NYCbCrANgdotRGBA64At\x18\x8a\x01 \x01(\v2#.ngolofuzz.NYCbCrANgdotRGBA64AtArgsH\x00R\x14NYCbCrANgdotRGBA64At\x12]\n" +
"\x15NYCbCrANgdotNYCbCrAAt\x18\x8b\x01 \x01(\v2$.ngolofuzz.NYCbCrANgdotNYCbCrAAtArgsH\x00R\x15NYCbCrANgdotNYCbCrAAt\x12W\n" +
"\x13NYCbCrANgdotAOffset\x18\x8c\x01 \x01(\v2\".ngolofuzz.NYCbCrANgdotAOffsetArgsH\x00R\x13NYCbCrANgdotAOffset\x12Z\n" +
"\x14NYCbCrANgdotSubImage\x18\x8d\x01 \x01(\v2#.ngolofuzz.NYCbCrANgdotSubImageArgsH\x00R\x14NYCbCrANgdotSubImage\x12T\n" +
"\x12NYCbCrANgdotOpaque\x18\x8e\x01 \x01(\v2!.ngolofuzz.NYCbCrANgdotOpaqueArgsH\x00R\x12NYCbCrANgdotOpaque\x12<\n" +
"\n" +
"NewNYCbCrA\x18\x8f\x01 \x01(\v2\x19.ngolofuzz.NewNYCbCrAArgsH\x00R\n" +
"NewNYCbCrAB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04list*\xc1\x01\n" +
"\x17YCbCrSubsampleRatioEnum\x12\x1a\n" +
"\x16YCbCrSubsampleRatio444\x10\x00\x12\x1a\n" +
"\x16YCbCrSubsampleRatio422\x10\x01\x12\x1a\n" +
"\x16YCbCrSubsampleRatio420\x10\x02\x12\x1a\n" +
"\x16YCbCrSubsampleRatio440\x10\x03\x12\x1a\n" +
"\x16YCbCrSubsampleRatio411\x10\x04\x12\x1a\n" +
"\x16YCbCrSubsampleRatio410\x10\x05B\x12Z\x10./;fuzz_ng_imageb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 146)
var file_ngolofuzz_proto_goTypes = []any{
(YCbCrSubsampleRatioEnum)(0), // 0: ngolofuzz.YCbCrSubsampleRatioEnum
(*DecodeArgs)(nil), // 1: ngolofuzz.DecodeArgs
(*DecodeConfigArgs)(nil), // 2: ngolofuzz.DecodeConfigArgs
(*PointNgdotStringArgs)(nil), // 3: ngolofuzz.PointNgdotStringArgs
(*PointNgdotAddArgs)(nil), // 4: ngolofuzz.PointNgdotAddArgs
(*PointNgdotSubArgs)(nil), // 5: ngolofuzz.PointNgdotSubArgs
(*PointNgdotMulArgs)(nil), // 6: ngolofuzz.PointNgdotMulArgs
(*PointNgdotInArgs)(nil), // 7: ngolofuzz.PointNgdotInArgs
(*PointNgdotModArgs)(nil), // 8: ngolofuzz.PointNgdotModArgs
(*PointNgdotEqArgs)(nil), // 9: ngolofuzz.PointNgdotEqArgs
(*PtArgs)(nil), // 10: ngolofuzz.PtArgs
(*RectangleNgdotStringArgs)(nil), // 11: ngolofuzz.RectangleNgdotStringArgs
(*RectangleNgdotDxArgs)(nil), // 12: ngolofuzz.RectangleNgdotDxArgs
(*RectangleNgdotDyArgs)(nil), // 13: ngolofuzz.RectangleNgdotDyArgs
(*RectangleNgdotSizeArgs)(nil), // 14: ngolofuzz.RectangleNgdotSizeArgs
(*RectangleNgdotAddArgs)(nil), // 15: ngolofuzz.RectangleNgdotAddArgs
(*RectangleNgdotSubArgs)(nil), // 16: ngolofuzz.RectangleNgdotSubArgs
(*RectangleNgdotInsetArgs)(nil), // 17: ngolofuzz.RectangleNgdotInsetArgs
(*RectangleNgdotIntersectArgs)(nil), // 18: ngolofuzz.RectangleNgdotIntersectArgs
(*RectangleNgdotUnionArgs)(nil), // 19: ngolofuzz.RectangleNgdotUnionArgs
(*RectangleNgdotEmptyArgs)(nil), // 20: ngolofuzz.RectangleNgdotEmptyArgs
(*RectangleNgdotEqArgs)(nil), // 21: ngolofuzz.RectangleNgdotEqArgs
(*RectangleNgdotOverlapsArgs)(nil), // 22: ngolofuzz.RectangleNgdotOverlapsArgs
(*RectangleNgdotInArgs)(nil), // 23: ngolofuzz.RectangleNgdotInArgs
(*RectangleNgdotCanonArgs)(nil), // 24: ngolofuzz.RectangleNgdotCanonArgs
(*RectangleNgdotAtArgs)(nil), // 25: ngolofuzz.RectangleNgdotAtArgs
(*RectangleNgdotRGBA64AtArgs)(nil), // 26: ngolofuzz.RectangleNgdotRGBA64AtArgs
(*RectangleNgdotBoundsArgs)(nil), // 27: ngolofuzz.RectangleNgdotBoundsArgs
(*RectangleNgdotColorModelArgs)(nil), // 28: ngolofuzz.RectangleNgdotColorModelArgs
(*RectArgs)(nil), // 29: ngolofuzz.RectArgs
(*RGBANgdotColorModelArgs)(nil), // 30: ngolofuzz.RGBANgdotColorModelArgs
(*RGBANgdotBoundsArgs)(nil), // 31: ngolofuzz.RGBANgdotBoundsArgs
(*RGBANgdotAtArgs)(nil), // 32: ngolofuzz.RGBANgdotAtArgs
(*RGBANgdotRGBA64AtArgs)(nil), // 33: ngolofuzz.RGBANgdotRGBA64AtArgs
(*RGBANgdotRGBAAtArgs)(nil), // 34: ngolofuzz.RGBANgdotRGBAAtArgs
(*RGBANgdotPixOffsetArgs)(nil), // 35: ngolofuzz.RGBANgdotPixOffsetArgs
(*RGBANgdotSubImageArgs)(nil), // 36: ngolofuzz.RGBANgdotSubImageArgs
(*RGBANgdotOpaqueArgs)(nil), // 37: ngolofuzz.RGBANgdotOpaqueArgs
(*NewRGBAArgs)(nil), // 38: ngolofuzz.NewRGBAArgs
(*RGBA64NgdotColorModelArgs)(nil), // 39: ngolofuzz.RGBA64NgdotColorModelArgs
(*RGBA64NgdotBoundsArgs)(nil), // 40: ngolofuzz.RGBA64NgdotBoundsArgs
(*RGBA64NgdotAtArgs)(nil), // 41: ngolofuzz.RGBA64NgdotAtArgs
(*RGBA64NgdotRGBA64AtArgs)(nil), // 42: ngolofuzz.RGBA64NgdotRGBA64AtArgs
(*RGBA64NgdotPixOffsetArgs)(nil), // 43: ngolofuzz.RGBA64NgdotPixOffsetArgs
(*RGBA64NgdotSubImageArgs)(nil), // 44: ngolofuzz.RGBA64NgdotSubImageArgs
(*RGBA64NgdotOpaqueArgs)(nil), // 45: ngolofuzz.RGBA64NgdotOpaqueArgs
(*NewRGBA64Args)(nil), // 46: ngolofuzz.NewRGBA64Args
(*NRGBANgdotColorModelArgs)(nil), // 47: ngolofuzz.NRGBANgdotColorModelArgs
(*NRGBANgdotBoundsArgs)(nil), // 48: ngolofuzz.NRGBANgdotBoundsArgs
(*NRGBANgdotAtArgs)(nil), // 49: ngolofuzz.NRGBANgdotAtArgs
(*NRGBANgdotRGBA64AtArgs)(nil), // 50: ngolofuzz.NRGBANgdotRGBA64AtArgs
(*NRGBANgdotNRGBAAtArgs)(nil), // 51: ngolofuzz.NRGBANgdotNRGBAAtArgs
(*NRGBANgdotPixOffsetArgs)(nil), // 52: ngolofuzz.NRGBANgdotPixOffsetArgs
(*NRGBANgdotSubImageArgs)(nil), // 53: ngolofuzz.NRGBANgdotSubImageArgs
(*NRGBANgdotOpaqueArgs)(nil), // 54: ngolofuzz.NRGBANgdotOpaqueArgs
(*NewNRGBAArgs)(nil), // 55: ngolofuzz.NewNRGBAArgs
(*NRGBA64NgdotColorModelArgs)(nil), // 56: ngolofuzz.NRGBA64NgdotColorModelArgs
(*NRGBA64NgdotBoundsArgs)(nil), // 57: ngolofuzz.NRGBA64NgdotBoundsArgs
(*NRGBA64NgdotAtArgs)(nil), // 58: ngolofuzz.NRGBA64NgdotAtArgs
(*NRGBA64NgdotRGBA64AtArgs)(nil), // 59: ngolofuzz.NRGBA64NgdotRGBA64AtArgs
(*NRGBA64NgdotNRGBA64AtArgs)(nil), // 60: ngolofuzz.NRGBA64NgdotNRGBA64AtArgs
(*NRGBA64NgdotPixOffsetArgs)(nil), // 61: ngolofuzz.NRGBA64NgdotPixOffsetArgs
(*NRGBA64NgdotSubImageArgs)(nil), // 62: ngolofuzz.NRGBA64NgdotSubImageArgs
(*NRGBA64NgdotOpaqueArgs)(nil), // 63: ngolofuzz.NRGBA64NgdotOpaqueArgs
(*NewNRGBA64Args)(nil), // 64: ngolofuzz.NewNRGBA64Args
(*AlphaNgdotColorModelArgs)(nil), // 65: ngolofuzz.AlphaNgdotColorModelArgs
(*AlphaNgdotBoundsArgs)(nil), // 66: ngolofuzz.AlphaNgdotBoundsArgs
(*AlphaNgdotAtArgs)(nil), // 67: ngolofuzz.AlphaNgdotAtArgs
(*AlphaNgdotRGBA64AtArgs)(nil), // 68: ngolofuzz.AlphaNgdotRGBA64AtArgs
(*AlphaNgdotAlphaAtArgs)(nil), // 69: ngolofuzz.AlphaNgdotAlphaAtArgs
(*AlphaNgdotPixOffsetArgs)(nil), // 70: ngolofuzz.AlphaNgdotPixOffsetArgs
(*AlphaNgdotSubImageArgs)(nil), // 71: ngolofuzz.AlphaNgdotSubImageArgs
(*AlphaNgdotOpaqueArgs)(nil), // 72: ngolofuzz.AlphaNgdotOpaqueArgs
(*NewAlphaArgs)(nil), // 73: ngolofuzz.NewAlphaArgs
(*Alpha16NgdotColorModelArgs)(nil), // 74: ngolofuzz.Alpha16NgdotColorModelArgs
(*Alpha16NgdotBoundsArgs)(nil), // 75: ngolofuzz.Alpha16NgdotBoundsArgs
(*Alpha16NgdotAtArgs)(nil), // 76: ngolofuzz.Alpha16NgdotAtArgs
(*Alpha16NgdotRGBA64AtArgs)(nil), // 77: ngolofuzz.Alpha16NgdotRGBA64AtArgs
(*Alpha16NgdotAlpha16AtArgs)(nil), // 78: ngolofuzz.Alpha16NgdotAlpha16AtArgs
(*Alpha16NgdotPixOffsetArgs)(nil), // 79: ngolofuzz.Alpha16NgdotPixOffsetArgs
(*Alpha16NgdotSubImageArgs)(nil), // 80: ngolofuzz.Alpha16NgdotSubImageArgs
(*Alpha16NgdotOpaqueArgs)(nil), // 81: ngolofuzz.Alpha16NgdotOpaqueArgs
(*NewAlpha16Args)(nil), // 82: ngolofuzz.NewAlpha16Args
(*GrayNgdotColorModelArgs)(nil), // 83: ngolofuzz.GrayNgdotColorModelArgs
(*GrayNgdotBoundsArgs)(nil), // 84: ngolofuzz.GrayNgdotBoundsArgs
(*GrayNgdotAtArgs)(nil), // 85: ngolofuzz.GrayNgdotAtArgs
(*GrayNgdotRGBA64AtArgs)(nil), // 86: ngolofuzz.GrayNgdotRGBA64AtArgs
(*GrayNgdotGrayAtArgs)(nil), // 87: ngolofuzz.GrayNgdotGrayAtArgs
(*GrayNgdotPixOffsetArgs)(nil), // 88: ngolofuzz.GrayNgdotPixOffsetArgs
(*GrayNgdotSubImageArgs)(nil), // 89: ngolofuzz.GrayNgdotSubImageArgs
(*GrayNgdotOpaqueArgs)(nil), // 90: ngolofuzz.GrayNgdotOpaqueArgs
(*NewGrayArgs)(nil), // 91: ngolofuzz.NewGrayArgs
(*Gray16NgdotColorModelArgs)(nil), // 92: ngolofuzz.Gray16NgdotColorModelArgs
(*Gray16NgdotBoundsArgs)(nil), // 93: ngolofuzz.Gray16NgdotBoundsArgs
(*Gray16NgdotAtArgs)(nil), // 94: ngolofuzz.Gray16NgdotAtArgs
(*Gray16NgdotRGBA64AtArgs)(nil), // 95: ngolofuzz.Gray16NgdotRGBA64AtArgs
(*Gray16NgdotGray16AtArgs)(nil), // 96: ngolofuzz.Gray16NgdotGray16AtArgs
(*Gray16NgdotPixOffsetArgs)(nil), // 97: ngolofuzz.Gray16NgdotPixOffsetArgs
(*Gray16NgdotSubImageArgs)(nil), // 98: ngolofuzz.Gray16NgdotSubImageArgs
(*Gray16NgdotOpaqueArgs)(nil), // 99: ngolofuzz.Gray16NgdotOpaqueArgs
(*NewGray16Args)(nil), // 100: ngolofuzz.NewGray16Args
(*CMYKNgdotColorModelArgs)(nil), // 101: ngolofuzz.CMYKNgdotColorModelArgs
(*CMYKNgdotBoundsArgs)(nil), // 102: ngolofuzz.CMYKNgdotBoundsArgs
(*CMYKNgdotAtArgs)(nil), // 103: ngolofuzz.CMYKNgdotAtArgs
(*CMYKNgdotRGBA64AtArgs)(nil), // 104: ngolofuzz.CMYKNgdotRGBA64AtArgs
(*CMYKNgdotCMYKAtArgs)(nil), // 105: ngolofuzz.CMYKNgdotCMYKAtArgs
(*CMYKNgdotPixOffsetArgs)(nil), // 106: ngolofuzz.CMYKNgdotPixOffsetArgs
(*CMYKNgdotSubImageArgs)(nil), // 107: ngolofuzz.CMYKNgdotSubImageArgs
(*CMYKNgdotOpaqueArgs)(nil), // 108: ngolofuzz.CMYKNgdotOpaqueArgs
(*NewCMYKArgs)(nil), // 109: ngolofuzz.NewCMYKArgs
(*PalettedNgdotColorModelArgs)(nil), // 110: ngolofuzz.PalettedNgdotColorModelArgs
(*PalettedNgdotBoundsArgs)(nil), // 111: ngolofuzz.PalettedNgdotBoundsArgs
(*PalettedNgdotAtArgs)(nil), // 112: ngolofuzz.PalettedNgdotAtArgs
(*PalettedNgdotRGBA64AtArgs)(nil), // 113: ngolofuzz.PalettedNgdotRGBA64AtArgs
(*PalettedNgdotPixOffsetArgs)(nil), // 114: ngolofuzz.PalettedNgdotPixOffsetArgs
(*PalettedNgdotColorIndexAtArgs)(nil), // 115: ngolofuzz.PalettedNgdotColorIndexAtArgs
(*PalettedNgdotSetColorIndexArgs)(nil), // 116: ngolofuzz.PalettedNgdotSetColorIndexArgs
(*PalettedNgdotSubImageArgs)(nil), // 117: ngolofuzz.PalettedNgdotSubImageArgs
(*PalettedNgdotOpaqueArgs)(nil), // 118: ngolofuzz.PalettedNgdotOpaqueArgs
(*UniformNgdotRGBAArgs)(nil), // 119: ngolofuzz.UniformNgdotRGBAArgs
(*UniformNgdotColorModelArgs)(nil), // 120: ngolofuzz.UniformNgdotColorModelArgs
(*UniformNgdotBoundsArgs)(nil), // 121: ngolofuzz.UniformNgdotBoundsArgs
(*UniformNgdotAtArgs)(nil), // 122: ngolofuzz.UniformNgdotAtArgs
(*UniformNgdotRGBA64AtArgs)(nil), // 123: ngolofuzz.UniformNgdotRGBA64AtArgs
(*UniformNgdotOpaqueArgs)(nil), // 124: ngolofuzz.UniformNgdotOpaqueArgs
(*YCbCrSubsampleRatioNgdotStringArgs)(nil), // 125: ngolofuzz.YCbCrSubsampleRatioNgdotStringArgs
(*YCbCrNgdotColorModelArgs)(nil), // 126: ngolofuzz.YCbCrNgdotColorModelArgs
(*YCbCrNgdotBoundsArgs)(nil), // 127: ngolofuzz.YCbCrNgdotBoundsArgs
(*YCbCrNgdotAtArgs)(nil), // 128: ngolofuzz.YCbCrNgdotAtArgs
(*YCbCrNgdotRGBA64AtArgs)(nil), // 129: ngolofuzz.YCbCrNgdotRGBA64AtArgs
(*YCbCrNgdotYCbCrAtArgs)(nil), // 130: ngolofuzz.YCbCrNgdotYCbCrAtArgs
(*YCbCrNgdotYOffsetArgs)(nil), // 131: ngolofuzz.YCbCrNgdotYOffsetArgs
(*YCbCrNgdotCOffsetArgs)(nil), // 132: ngolofuzz.YCbCrNgdotCOffsetArgs
(*YCbCrNgdotSubImageArgs)(nil), // 133: ngolofuzz.YCbCrNgdotSubImageArgs
(*YCbCrNgdotOpaqueArgs)(nil), // 134: ngolofuzz.YCbCrNgdotOpaqueArgs
(*NewYCbCrArgs)(nil), // 135: ngolofuzz.NewYCbCrArgs
(*NYCbCrANgdotColorModelArgs)(nil), // 136: ngolofuzz.NYCbCrANgdotColorModelArgs
(*NYCbCrANgdotAtArgs)(nil), // 137: ngolofuzz.NYCbCrANgdotAtArgs
(*NYCbCrANgdotRGBA64AtArgs)(nil), // 138: ngolofuzz.NYCbCrANgdotRGBA64AtArgs
(*NYCbCrANgdotNYCbCrAAtArgs)(nil), // 139: ngolofuzz.NYCbCrANgdotNYCbCrAAtArgs
(*NYCbCrANgdotAOffsetArgs)(nil), // 140: ngolofuzz.NYCbCrANgdotAOffsetArgs
(*NYCbCrANgdotSubImageArgs)(nil), // 141: ngolofuzz.NYCbCrANgdotSubImageArgs
(*NYCbCrANgdotOpaqueArgs)(nil), // 142: ngolofuzz.NYCbCrANgdotOpaqueArgs
(*NewNYCbCrAArgs)(nil), // 143: ngolofuzz.NewNYCbCrAArgs
(*NgoloFuzzOne)(nil), // 144: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 145: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 146: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.YCbCrSubsampleRatioNgdotStringArgs.s:type_name -> ngolofuzz.YCbCrSubsampleRatioEnum
0, // 1: ngolofuzz.NewYCbCrArgs.subsampleRatio:type_name -> ngolofuzz.YCbCrSubsampleRatioEnum
0, // 2: ngolofuzz.NewNYCbCrAArgs.subsampleRatio:type_name -> ngolofuzz.YCbCrSubsampleRatioEnum
1, // 3: ngolofuzz.NgoloFuzzOne.Decode:type_name -> ngolofuzz.DecodeArgs
2, // 4: ngolofuzz.NgoloFuzzOne.DecodeConfig:type_name -> ngolofuzz.DecodeConfigArgs
3, // 5: ngolofuzz.NgoloFuzzOne.PointNgdotString:type_name -> ngolofuzz.PointNgdotStringArgs
4, // 6: ngolofuzz.NgoloFuzzOne.PointNgdotAdd:type_name -> ngolofuzz.PointNgdotAddArgs
5, // 7: ngolofuzz.NgoloFuzzOne.PointNgdotSub:type_name -> ngolofuzz.PointNgdotSubArgs
6, // 8: ngolofuzz.NgoloFuzzOne.PointNgdotMul:type_name -> ngolofuzz.PointNgdotMulArgs
7, // 9: ngolofuzz.NgoloFuzzOne.PointNgdotIn:type_name -> ngolofuzz.PointNgdotInArgs
8, // 10: ngolofuzz.NgoloFuzzOne.PointNgdotMod:type_name -> ngolofuzz.PointNgdotModArgs
9, // 11: ngolofuzz.NgoloFuzzOne.PointNgdotEq:type_name -> ngolofuzz.PointNgdotEqArgs
10, // 12: ngolofuzz.NgoloFuzzOne.Pt:type_name -> ngolofuzz.PtArgs
11, // 13: ngolofuzz.NgoloFuzzOne.RectangleNgdotString:type_name -> ngolofuzz.RectangleNgdotStringArgs
12, // 14: ngolofuzz.NgoloFuzzOne.RectangleNgdotDx:type_name -> ngolofuzz.RectangleNgdotDxArgs
13, // 15: ngolofuzz.NgoloFuzzOne.RectangleNgdotDy:type_name -> ngolofuzz.RectangleNgdotDyArgs
14, // 16: ngolofuzz.NgoloFuzzOne.RectangleNgdotSize:type_name -> ngolofuzz.RectangleNgdotSizeArgs
15, // 17: ngolofuzz.NgoloFuzzOne.RectangleNgdotAdd:type_name -> ngolofuzz.RectangleNgdotAddArgs
16, // 18: ngolofuzz.NgoloFuzzOne.RectangleNgdotSub:type_name -> ngolofuzz.RectangleNgdotSubArgs
17, // 19: ngolofuzz.NgoloFuzzOne.RectangleNgdotInset:type_name -> ngolofuzz.RectangleNgdotInsetArgs
18, // 20: ngolofuzz.NgoloFuzzOne.RectangleNgdotIntersect:type_name -> ngolofuzz.RectangleNgdotIntersectArgs
19, // 21: ngolofuzz.NgoloFuzzOne.RectangleNgdotUnion:type_name -> ngolofuzz.RectangleNgdotUnionArgs
20, // 22: ngolofuzz.NgoloFuzzOne.RectangleNgdotEmpty:type_name -> ngolofuzz.RectangleNgdotEmptyArgs
21, // 23: ngolofuzz.NgoloFuzzOne.RectangleNgdotEq:type_name -> ngolofuzz.RectangleNgdotEqArgs
22, // 24: ngolofuzz.NgoloFuzzOne.RectangleNgdotOverlaps:type_name -> ngolofuzz.RectangleNgdotOverlapsArgs
23, // 25: ngolofuzz.NgoloFuzzOne.RectangleNgdotIn:type_name -> ngolofuzz.RectangleNgdotInArgs
24, // 26: ngolofuzz.NgoloFuzzOne.RectangleNgdotCanon:type_name -> ngolofuzz.RectangleNgdotCanonArgs
25, // 27: ngolofuzz.NgoloFuzzOne.RectangleNgdotAt:type_name -> ngolofuzz.RectangleNgdotAtArgs
26, // 28: ngolofuzz.NgoloFuzzOne.RectangleNgdotRGBA64At:type_name -> ngolofuzz.RectangleNgdotRGBA64AtArgs
27, // 29: ngolofuzz.NgoloFuzzOne.RectangleNgdotBounds:type_name -> ngolofuzz.RectangleNgdotBoundsArgs
28, // 30: ngolofuzz.NgoloFuzzOne.RectangleNgdotColorModel:type_name -> ngolofuzz.RectangleNgdotColorModelArgs
29, // 31: ngolofuzz.NgoloFuzzOne.Rect:type_name -> ngolofuzz.RectArgs
30, // 32: ngolofuzz.NgoloFuzzOne.RGBANgdotColorModel:type_name -> ngolofuzz.RGBANgdotColorModelArgs
31, // 33: ngolofuzz.NgoloFuzzOne.RGBANgdotBounds:type_name -> ngolofuzz.RGBANgdotBoundsArgs
32, // 34: ngolofuzz.NgoloFuzzOne.RGBANgdotAt:type_name -> ngolofuzz.RGBANgdotAtArgs
33, // 35: ngolofuzz.NgoloFuzzOne.RGBANgdotRGBA64At:type_name -> ngolofuzz.RGBANgdotRGBA64AtArgs
34, // 36: ngolofuzz.NgoloFuzzOne.RGBANgdotRGBAAt:type_name -> ngolofuzz.RGBANgdotRGBAAtArgs
35, // 37: ngolofuzz.NgoloFuzzOne.RGBANgdotPixOffset:type_name -> ngolofuzz.RGBANgdotPixOffsetArgs
36, // 38: ngolofuzz.NgoloFuzzOne.RGBANgdotSubImage:type_name -> ngolofuzz.RGBANgdotSubImageArgs
37, // 39: ngolofuzz.NgoloFuzzOne.RGBANgdotOpaque:type_name -> ngolofuzz.RGBANgdotOpaqueArgs
38, // 40: ngolofuzz.NgoloFuzzOne.NewRGBA:type_name -> ngolofuzz.NewRGBAArgs
39, // 41: ngolofuzz.NgoloFuzzOne.RGBA64NgdotColorModel:type_name -> ngolofuzz.RGBA64NgdotColorModelArgs
40, // 42: ngolofuzz.NgoloFuzzOne.RGBA64NgdotBounds:type_name -> ngolofuzz.RGBA64NgdotBoundsArgs
41, // 43: ngolofuzz.NgoloFuzzOne.RGBA64NgdotAt:type_name -> ngolofuzz.RGBA64NgdotAtArgs
42, // 44: ngolofuzz.NgoloFuzzOne.RGBA64NgdotRGBA64At:type_name -> ngolofuzz.RGBA64NgdotRGBA64AtArgs
43, // 45: ngolofuzz.NgoloFuzzOne.RGBA64NgdotPixOffset:type_name -> ngolofuzz.RGBA64NgdotPixOffsetArgs
44, // 46: ngolofuzz.NgoloFuzzOne.RGBA64NgdotSubImage:type_name -> ngolofuzz.RGBA64NgdotSubImageArgs
45, // 47: ngolofuzz.NgoloFuzzOne.RGBA64NgdotOpaque:type_name -> ngolofuzz.RGBA64NgdotOpaqueArgs
46, // 48: ngolofuzz.NgoloFuzzOne.NewRGBA64:type_name -> ngolofuzz.NewRGBA64Args
47, // 49: ngolofuzz.NgoloFuzzOne.NRGBANgdotColorModel:type_name -> ngolofuzz.NRGBANgdotColorModelArgs
48, // 50: ngolofuzz.NgoloFuzzOne.NRGBANgdotBounds:type_name -> ngolofuzz.NRGBANgdotBoundsArgs
49, // 51: ngolofuzz.NgoloFuzzOne.NRGBANgdotAt:type_name -> ngolofuzz.NRGBANgdotAtArgs
50, // 52: ngolofuzz.NgoloFuzzOne.NRGBANgdotRGBA64At:type_name -> ngolofuzz.NRGBANgdotRGBA64AtArgs
51, // 53: ngolofuzz.NgoloFuzzOne.NRGBANgdotNRGBAAt:type_name -> ngolofuzz.NRGBANgdotNRGBAAtArgs
52, // 54: ngolofuzz.NgoloFuzzOne.NRGBANgdotPixOffset:type_name -> ngolofuzz.NRGBANgdotPixOffsetArgs
53, // 55: ngolofuzz.NgoloFuzzOne.NRGBANgdotSubImage:type_name -> ngolofuzz.NRGBANgdotSubImageArgs
54, // 56: ngolofuzz.NgoloFuzzOne.NRGBANgdotOpaque:type_name -> ngolofuzz.NRGBANgdotOpaqueArgs
55, // 57: ngolofuzz.NgoloFuzzOne.NewNRGBA:type_name -> ngolofuzz.NewNRGBAArgs
56, // 58: ngolofuzz.NgoloFuzzOne.NRGBA64NgdotColorModel:type_name -> ngolofuzz.NRGBA64NgdotColorModelArgs
57, // 59: ngolofuzz.NgoloFuzzOne.NRGBA64NgdotBounds:type_name -> ngolofuzz.NRGBA64NgdotBoundsArgs
58, // 60: ngolofuzz.NgoloFuzzOne.NRGBA64NgdotAt:type_name -> ngolofuzz.NRGBA64NgdotAtArgs
59, // 61: ngolofuzz.NgoloFuzzOne.NRGBA64NgdotRGBA64At:type_name -> ngolofuzz.NRGBA64NgdotRGBA64AtArgs
60, // 62: ngolofuzz.NgoloFuzzOne.NRGBA64NgdotNRGBA64At:type_name -> ngolofuzz.NRGBA64NgdotNRGBA64AtArgs
61, // 63: ngolofuzz.NgoloFuzzOne.NRGBA64NgdotPixOffset:type_name -> ngolofuzz.NRGBA64NgdotPixOffsetArgs
62, // 64: ngolofuzz.NgoloFuzzOne.NRGBA64NgdotSubImage:type_name -> ngolofuzz.NRGBA64NgdotSubImageArgs
63, // 65: ngolofuzz.NgoloFuzzOne.NRGBA64NgdotOpaque:type_name -> ngolofuzz.NRGBA64NgdotOpaqueArgs
64, // 66: ngolofuzz.NgoloFuzzOne.NewNRGBA64:type_name -> ngolofuzz.NewNRGBA64Args
65, // 67: ngolofuzz.NgoloFuzzOne.AlphaNgdotColorModel:type_name -> ngolofuzz.AlphaNgdotColorModelArgs
66, // 68: ngolofuzz.NgoloFuzzOne.AlphaNgdotBounds:type_name -> ngolofuzz.AlphaNgdotBoundsArgs
67, // 69: ngolofuzz.NgoloFuzzOne.AlphaNgdotAt:type_name -> ngolofuzz.AlphaNgdotAtArgs
68, // 70: ngolofuzz.NgoloFuzzOne.AlphaNgdotRGBA64At:type_name -> ngolofuzz.AlphaNgdotRGBA64AtArgs
69, // 71: ngolofuzz.NgoloFuzzOne.AlphaNgdotAlphaAt:type_name -> ngolofuzz.AlphaNgdotAlphaAtArgs
70, // 72: ngolofuzz.NgoloFuzzOne.AlphaNgdotPixOffset:type_name -> ngolofuzz.AlphaNgdotPixOffsetArgs
71, // 73: ngolofuzz.NgoloFuzzOne.AlphaNgdotSubImage:type_name -> ngolofuzz.AlphaNgdotSubImageArgs
72, // 74: ngolofuzz.NgoloFuzzOne.AlphaNgdotOpaque:type_name -> ngolofuzz.AlphaNgdotOpaqueArgs
73, // 75: ngolofuzz.NgoloFuzzOne.NewAlpha:type_name -> ngolofuzz.NewAlphaArgs
74, // 76: ngolofuzz.NgoloFuzzOne.Alpha16NgdotColorModel:type_name -> ngolofuzz.Alpha16NgdotColorModelArgs
75, // 77: ngolofuzz.NgoloFuzzOne.Alpha16NgdotBounds:type_name -> ngolofuzz.Alpha16NgdotBoundsArgs
76, // 78: ngolofuzz.NgoloFuzzOne.Alpha16NgdotAt:type_name -> ngolofuzz.Alpha16NgdotAtArgs
77, // 79: ngolofuzz.NgoloFuzzOne.Alpha16NgdotRGBA64At:type_name -> ngolofuzz.Alpha16NgdotRGBA64AtArgs
78, // 80: ngolofuzz.NgoloFuzzOne.Alpha16NgdotAlpha16At:type_name -> ngolofuzz.Alpha16NgdotAlpha16AtArgs
79, // 81: ngolofuzz.NgoloFuzzOne.Alpha16NgdotPixOffset:type_name -> ngolofuzz.Alpha16NgdotPixOffsetArgs
80, // 82: ngolofuzz.NgoloFuzzOne.Alpha16NgdotSubImage:type_name -> ngolofuzz.Alpha16NgdotSubImageArgs
81, // 83: ngolofuzz.NgoloFuzzOne.Alpha16NgdotOpaque:type_name -> ngolofuzz.Alpha16NgdotOpaqueArgs
82, // 84: ngolofuzz.NgoloFuzzOne.NewAlpha16:type_name -> ngolofuzz.NewAlpha16Args
83, // 85: ngolofuzz.NgoloFuzzOne.GrayNgdotColorModel:type_name -> ngolofuzz.GrayNgdotColorModelArgs
84, // 86: ngolofuzz.NgoloFuzzOne.GrayNgdotBounds:type_name -> ngolofuzz.GrayNgdotBoundsArgs
85, // 87: ngolofuzz.NgoloFuzzOne.GrayNgdotAt:type_name -> ngolofuzz.GrayNgdotAtArgs
86, // 88: ngolofuzz.NgoloFuzzOne.GrayNgdotRGBA64At:type_name -> ngolofuzz.GrayNgdotRGBA64AtArgs
87, // 89: ngolofuzz.NgoloFuzzOne.GrayNgdotGrayAt:type_name -> ngolofuzz.GrayNgdotGrayAtArgs
88, // 90: ngolofuzz.NgoloFuzzOne.GrayNgdotPixOffset:type_name -> ngolofuzz.GrayNgdotPixOffsetArgs
89, // 91: ngolofuzz.NgoloFuzzOne.GrayNgdotSubImage:type_name -> ngolofuzz.GrayNgdotSubImageArgs
90, // 92: ngolofuzz.NgoloFuzzOne.GrayNgdotOpaque:type_name -> ngolofuzz.GrayNgdotOpaqueArgs
91, // 93: ngolofuzz.NgoloFuzzOne.NewGray:type_name -> ngolofuzz.NewGrayArgs
92, // 94: ngolofuzz.NgoloFuzzOne.Gray16NgdotColorModel:type_name -> ngolofuzz.Gray16NgdotColorModelArgs
93, // 95: ngolofuzz.NgoloFuzzOne.Gray16NgdotBounds:type_name -> ngolofuzz.Gray16NgdotBoundsArgs
94, // 96: ngolofuzz.NgoloFuzzOne.Gray16NgdotAt:type_name -> ngolofuzz.Gray16NgdotAtArgs
95, // 97: ngolofuzz.NgoloFuzzOne.Gray16NgdotRGBA64At:type_name -> ngolofuzz.Gray16NgdotRGBA64AtArgs
96, // 98: ngolofuzz.NgoloFuzzOne.Gray16NgdotGray16At:type_name -> ngolofuzz.Gray16NgdotGray16AtArgs
97, // 99: ngolofuzz.NgoloFuzzOne.Gray16NgdotPixOffset:type_name -> ngolofuzz.Gray16NgdotPixOffsetArgs
98, // 100: ngolofuzz.NgoloFuzzOne.Gray16NgdotSubImage:type_name -> ngolofuzz.Gray16NgdotSubImageArgs
99, // 101: ngolofuzz.NgoloFuzzOne.Gray16NgdotOpaque:type_name -> ngolofuzz.Gray16NgdotOpaqueArgs
100, // 102: ngolofuzz.NgoloFuzzOne.NewGray16:type_name -> ngolofuzz.NewGray16Args
101, // 103: ngolofuzz.NgoloFuzzOne.CMYKNgdotColorModel:type_name -> ngolofuzz.CMYKNgdotColorModelArgs
102, // 104: ngolofuzz.NgoloFuzzOne.CMYKNgdotBounds:type_name -> ngolofuzz.CMYKNgdotBoundsArgs
103, // 105: ngolofuzz.NgoloFuzzOne.CMYKNgdotAt:type_name -> ngolofuzz.CMYKNgdotAtArgs
104, // 106: ngolofuzz.NgoloFuzzOne.CMYKNgdotRGBA64At:type_name -> ngolofuzz.CMYKNgdotRGBA64AtArgs
105, // 107: ngolofuzz.NgoloFuzzOne.CMYKNgdotCMYKAt:type_name -> ngolofuzz.CMYKNgdotCMYKAtArgs
106, // 108: ngolofuzz.NgoloFuzzOne.CMYKNgdotPixOffset:type_name -> ngolofuzz.CMYKNgdotPixOffsetArgs
107, // 109: ngolofuzz.NgoloFuzzOne.CMYKNgdotSubImage:type_name -> ngolofuzz.CMYKNgdotSubImageArgs
108, // 110: ngolofuzz.NgoloFuzzOne.CMYKNgdotOpaque:type_name -> ngolofuzz.CMYKNgdotOpaqueArgs
109, // 111: ngolofuzz.NgoloFuzzOne.NewCMYK:type_name -> ngolofuzz.NewCMYKArgs
110, // 112: ngolofuzz.NgoloFuzzOne.PalettedNgdotColorModel:type_name -> ngolofuzz.PalettedNgdotColorModelArgs
111, // 113: ngolofuzz.NgoloFuzzOne.PalettedNgdotBounds:type_name -> ngolofuzz.PalettedNgdotBoundsArgs
112, // 114: ngolofuzz.NgoloFuzzOne.PalettedNgdotAt:type_name -> ngolofuzz.PalettedNgdotAtArgs
113, // 115: ngolofuzz.NgoloFuzzOne.PalettedNgdotRGBA64At:type_name -> ngolofuzz.PalettedNgdotRGBA64AtArgs
114, // 116: ngolofuzz.NgoloFuzzOne.PalettedNgdotPixOffset:type_name -> ngolofuzz.PalettedNgdotPixOffsetArgs
115, // 117: ngolofuzz.NgoloFuzzOne.PalettedNgdotColorIndexAt:type_name -> ngolofuzz.PalettedNgdotColorIndexAtArgs
116, // 118: ngolofuzz.NgoloFuzzOne.PalettedNgdotSetColorIndex:type_name -> ngolofuzz.PalettedNgdotSetColorIndexArgs
117, // 119: ngolofuzz.NgoloFuzzOne.PalettedNgdotSubImage:type_name -> ngolofuzz.PalettedNgdotSubImageArgs
118, // 120: ngolofuzz.NgoloFuzzOne.PalettedNgdotOpaque:type_name -> ngolofuzz.PalettedNgdotOpaqueArgs
119, // 121: ngolofuzz.NgoloFuzzOne.UniformNgdotRGBA:type_name -> ngolofuzz.UniformNgdotRGBAArgs
120, // 122: ngolofuzz.NgoloFuzzOne.UniformNgdotColorModel:type_name -> ngolofuzz.UniformNgdotColorModelArgs
121, // 123: ngolofuzz.NgoloFuzzOne.UniformNgdotBounds:type_name -> ngolofuzz.UniformNgdotBoundsArgs
122, // 124: ngolofuzz.NgoloFuzzOne.UniformNgdotAt:type_name -> ngolofuzz.UniformNgdotAtArgs
123, // 125: ngolofuzz.NgoloFuzzOne.UniformNgdotRGBA64At:type_name -> ngolofuzz.UniformNgdotRGBA64AtArgs
124, // 126: ngolofuzz.NgoloFuzzOne.UniformNgdotOpaque:type_name -> ngolofuzz.UniformNgdotOpaqueArgs
125, // 127: ngolofuzz.NgoloFuzzOne.YCbCrSubsampleRatioNgdotString:type_name -> ngolofuzz.YCbCrSubsampleRatioNgdotStringArgs
126, // 128: ngolofuzz.NgoloFuzzOne.YCbCrNgdotColorModel:type_name -> ngolofuzz.YCbCrNgdotColorModelArgs
127, // 129: ngolofuzz.NgoloFuzzOne.YCbCrNgdotBounds:type_name -> ngolofuzz.YCbCrNgdotBoundsArgs
128, // 130: ngolofuzz.NgoloFuzzOne.YCbCrNgdotAt:type_name -> ngolofuzz.YCbCrNgdotAtArgs
129, // 131: ngolofuzz.NgoloFuzzOne.YCbCrNgdotRGBA64At:type_name -> ngolofuzz.YCbCrNgdotRGBA64AtArgs
130, // 132: ngolofuzz.NgoloFuzzOne.YCbCrNgdotYCbCrAt:type_name -> ngolofuzz.YCbCrNgdotYCbCrAtArgs
131, // 133: ngolofuzz.NgoloFuzzOne.YCbCrNgdotYOffset:type_name -> ngolofuzz.YCbCrNgdotYOffsetArgs
132, // 134: ngolofuzz.NgoloFuzzOne.YCbCrNgdotCOffset:type_name -> ngolofuzz.YCbCrNgdotCOffsetArgs
133, // 135: ngolofuzz.NgoloFuzzOne.YCbCrNgdotSubImage:type_name -> ngolofuzz.YCbCrNgdotSubImageArgs
134, // 136: ngolofuzz.NgoloFuzzOne.YCbCrNgdotOpaque:type_name -> ngolofuzz.YCbCrNgdotOpaqueArgs
135, // 137: ngolofuzz.NgoloFuzzOne.NewYCbCr:type_name -> ngolofuzz.NewYCbCrArgs
136, // 138: ngolofuzz.NgoloFuzzOne.NYCbCrANgdotColorModel:type_name -> ngolofuzz.NYCbCrANgdotColorModelArgs
137, // 139: ngolofuzz.NgoloFuzzOne.NYCbCrANgdotAt:type_name -> ngolofuzz.NYCbCrANgdotAtArgs
138, // 140: ngolofuzz.NgoloFuzzOne.NYCbCrANgdotRGBA64At:type_name -> ngolofuzz.NYCbCrANgdotRGBA64AtArgs
139, // 141: ngolofuzz.NgoloFuzzOne.NYCbCrANgdotNYCbCrAAt:type_name -> ngolofuzz.NYCbCrANgdotNYCbCrAAtArgs
140, // 142: ngolofuzz.NgoloFuzzOne.NYCbCrANgdotAOffset:type_name -> ngolofuzz.NYCbCrANgdotAOffsetArgs
141, // 143: ngolofuzz.NgoloFuzzOne.NYCbCrANgdotSubImage:type_name -> ngolofuzz.NYCbCrANgdotSubImageArgs
142, // 144: ngolofuzz.NgoloFuzzOne.NYCbCrANgdotOpaque:type_name -> ngolofuzz.NYCbCrANgdotOpaqueArgs
143, // 145: ngolofuzz.NgoloFuzzOne.NewNYCbCrA:type_name -> ngolofuzz.NewNYCbCrAArgs
144, // 146: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
147, // [147:147] is the sub-list for method output_type
147, // [147:147] is the sub-list for method input_type
147, // [147:147] is the sub-list for extension type_name
147, // [147:147] is the sub-list for extension extendee
0, // [0:147] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[143].OneofWrappers = []any{
(*NgoloFuzzOne_Decode)(nil),
(*NgoloFuzzOne_DecodeConfig)(nil),
(*NgoloFuzzOne_PointNgdotString)(nil),
(*NgoloFuzzOne_PointNgdotAdd)(nil),
(*NgoloFuzzOne_PointNgdotSub)(nil),
(*NgoloFuzzOne_PointNgdotMul)(nil),
(*NgoloFuzzOne_PointNgdotIn)(nil),
(*NgoloFuzzOne_PointNgdotMod)(nil),
(*NgoloFuzzOne_PointNgdotEq)(nil),
(*NgoloFuzzOne_Pt)(nil),
(*NgoloFuzzOne_RectangleNgdotString)(nil),
(*NgoloFuzzOne_RectangleNgdotDx)(nil),
(*NgoloFuzzOne_RectangleNgdotDy)(nil),
(*NgoloFuzzOne_RectangleNgdotSize)(nil),
(*NgoloFuzzOne_RectangleNgdotAdd)(nil),
(*NgoloFuzzOne_RectangleNgdotSub)(nil),
(*NgoloFuzzOne_RectangleNgdotInset)(nil),
(*NgoloFuzzOne_RectangleNgdotIntersect)(nil),
(*NgoloFuzzOne_RectangleNgdotUnion)(nil),
(*NgoloFuzzOne_RectangleNgdotEmpty)(nil),
(*NgoloFuzzOne_RectangleNgdotEq)(nil),
(*NgoloFuzzOne_RectangleNgdotOverlaps)(nil),
(*NgoloFuzzOne_RectangleNgdotIn)(nil),
(*NgoloFuzzOne_RectangleNgdotCanon)(nil),
(*NgoloFuzzOne_RectangleNgdotAt)(nil),
(*NgoloFuzzOne_RectangleNgdotRGBA64At)(nil),
(*NgoloFuzzOne_RectangleNgdotBounds)(nil),
(*NgoloFuzzOne_RectangleNgdotColorModel)(nil),
(*NgoloFuzzOne_Rect)(nil),
(*NgoloFuzzOne_RGBANgdotColorModel)(nil),
(*NgoloFuzzOne_RGBANgdotBounds)(nil),
(*NgoloFuzzOne_RGBANgdotAt)(nil),
(*NgoloFuzzOne_RGBANgdotRGBA64At)(nil),
(*NgoloFuzzOne_RGBANgdotRGBAAt)(nil),
(*NgoloFuzzOne_RGBANgdotPixOffset)(nil),
(*NgoloFuzzOne_RGBANgdotSubImage)(nil),
(*NgoloFuzzOne_RGBANgdotOpaque)(nil),
(*NgoloFuzzOne_NewRGBA)(nil),
(*NgoloFuzzOne_RGBA64NgdotColorModel)(nil),
(*NgoloFuzzOne_RGBA64NgdotBounds)(nil),
(*NgoloFuzzOne_RGBA64NgdotAt)(nil),
(*NgoloFuzzOne_RGBA64NgdotRGBA64At)(nil),
(*NgoloFuzzOne_RGBA64NgdotPixOffset)(nil),
(*NgoloFuzzOne_RGBA64NgdotSubImage)(nil),
(*NgoloFuzzOne_RGBA64NgdotOpaque)(nil),
(*NgoloFuzzOne_NewRGBA64)(nil),
(*NgoloFuzzOne_NRGBANgdotColorModel)(nil),
(*NgoloFuzzOne_NRGBANgdotBounds)(nil),
(*NgoloFuzzOne_NRGBANgdotAt)(nil),
(*NgoloFuzzOne_NRGBANgdotRGBA64At)(nil),
(*NgoloFuzzOne_NRGBANgdotNRGBAAt)(nil),
(*NgoloFuzzOne_NRGBANgdotPixOffset)(nil),
(*NgoloFuzzOne_NRGBANgdotSubImage)(nil),
(*NgoloFuzzOne_NRGBANgdotOpaque)(nil),
(*NgoloFuzzOne_NewNRGBA)(nil),
(*NgoloFuzzOne_NRGBA64NgdotColorModel)(nil),
(*NgoloFuzzOne_NRGBA64NgdotBounds)(nil),
(*NgoloFuzzOne_NRGBA64NgdotAt)(nil),
(*NgoloFuzzOne_NRGBA64NgdotRGBA64At)(nil),
(*NgoloFuzzOne_NRGBA64NgdotNRGBA64At)(nil),
(*NgoloFuzzOne_NRGBA64NgdotPixOffset)(nil),
(*NgoloFuzzOne_NRGBA64NgdotSubImage)(nil),
(*NgoloFuzzOne_NRGBA64NgdotOpaque)(nil),
(*NgoloFuzzOne_NewNRGBA64)(nil),
(*NgoloFuzzOne_AlphaNgdotColorModel)(nil),
(*NgoloFuzzOne_AlphaNgdotBounds)(nil),
(*NgoloFuzzOne_AlphaNgdotAt)(nil),
(*NgoloFuzzOne_AlphaNgdotRGBA64At)(nil),
(*NgoloFuzzOne_AlphaNgdotAlphaAt)(nil),
(*NgoloFuzzOne_AlphaNgdotPixOffset)(nil),
(*NgoloFuzzOne_AlphaNgdotSubImage)(nil),
(*NgoloFuzzOne_AlphaNgdotOpaque)(nil),
(*NgoloFuzzOne_NewAlpha)(nil),
(*NgoloFuzzOne_Alpha16NgdotColorModel)(nil),
(*NgoloFuzzOne_Alpha16NgdotBounds)(nil),
(*NgoloFuzzOne_Alpha16NgdotAt)(nil),
(*NgoloFuzzOne_Alpha16NgdotRGBA64At)(nil),
(*NgoloFuzzOne_Alpha16NgdotAlpha16At)(nil),
(*NgoloFuzzOne_Alpha16NgdotPixOffset)(nil),
(*NgoloFuzzOne_Alpha16NgdotSubImage)(nil),
(*NgoloFuzzOne_Alpha16NgdotOpaque)(nil),
(*NgoloFuzzOne_NewAlpha16)(nil),
(*NgoloFuzzOne_GrayNgdotColorModel)(nil),
(*NgoloFuzzOne_GrayNgdotBounds)(nil),
(*NgoloFuzzOne_GrayNgdotAt)(nil),
(*NgoloFuzzOne_GrayNgdotRGBA64At)(nil),
(*NgoloFuzzOne_GrayNgdotGrayAt)(nil),
(*NgoloFuzzOne_GrayNgdotPixOffset)(nil),
(*NgoloFuzzOne_GrayNgdotSubImage)(nil),
(*NgoloFuzzOne_GrayNgdotOpaque)(nil),
(*NgoloFuzzOne_NewGray)(nil),
(*NgoloFuzzOne_Gray16NgdotColorModel)(nil),
(*NgoloFuzzOne_Gray16NgdotBounds)(nil),
(*NgoloFuzzOne_Gray16NgdotAt)(nil),
(*NgoloFuzzOne_Gray16NgdotRGBA64At)(nil),
(*NgoloFuzzOne_Gray16NgdotGray16At)(nil),
(*NgoloFuzzOne_Gray16NgdotPixOffset)(nil),
(*NgoloFuzzOne_Gray16NgdotSubImage)(nil),
(*NgoloFuzzOne_Gray16NgdotOpaque)(nil),
(*NgoloFuzzOne_NewGray16)(nil),
(*NgoloFuzzOne_CMYKNgdotColorModel)(nil),
(*NgoloFuzzOne_CMYKNgdotBounds)(nil),
(*NgoloFuzzOne_CMYKNgdotAt)(nil),
(*NgoloFuzzOne_CMYKNgdotRGBA64At)(nil),
(*NgoloFuzzOne_CMYKNgdotCMYKAt)(nil),
(*NgoloFuzzOne_CMYKNgdotPixOffset)(nil),
(*NgoloFuzzOne_CMYKNgdotSubImage)(nil),
(*NgoloFuzzOne_CMYKNgdotOpaque)(nil),
(*NgoloFuzzOne_NewCMYK)(nil),
(*NgoloFuzzOne_PalettedNgdotColorModel)(nil),
(*NgoloFuzzOne_PalettedNgdotBounds)(nil),
(*NgoloFuzzOne_PalettedNgdotAt)(nil),
(*NgoloFuzzOne_PalettedNgdotRGBA64At)(nil),
(*NgoloFuzzOne_PalettedNgdotPixOffset)(nil),
(*NgoloFuzzOne_PalettedNgdotColorIndexAt)(nil),
(*NgoloFuzzOne_PalettedNgdotSetColorIndex)(nil),
(*NgoloFuzzOne_PalettedNgdotSubImage)(nil),
(*NgoloFuzzOne_PalettedNgdotOpaque)(nil),
(*NgoloFuzzOne_UniformNgdotRGBA)(nil),
(*NgoloFuzzOne_UniformNgdotColorModel)(nil),
(*NgoloFuzzOne_UniformNgdotBounds)(nil),
(*NgoloFuzzOne_UniformNgdotAt)(nil),
(*NgoloFuzzOne_UniformNgdotRGBA64At)(nil),
(*NgoloFuzzOne_UniformNgdotOpaque)(nil),
(*NgoloFuzzOne_YCbCrSubsampleRatioNgdotString)(nil),
(*NgoloFuzzOne_YCbCrNgdotColorModel)(nil),
(*NgoloFuzzOne_YCbCrNgdotBounds)(nil),
(*NgoloFuzzOne_YCbCrNgdotAt)(nil),
(*NgoloFuzzOne_YCbCrNgdotRGBA64At)(nil),
(*NgoloFuzzOne_YCbCrNgdotYCbCrAt)(nil),
(*NgoloFuzzOne_YCbCrNgdotYOffset)(nil),
(*NgoloFuzzOne_YCbCrNgdotCOffset)(nil),
(*NgoloFuzzOne_YCbCrNgdotSubImage)(nil),
(*NgoloFuzzOne_YCbCrNgdotOpaque)(nil),
(*NgoloFuzzOne_NewYCbCr)(nil),
(*NgoloFuzzOne_NYCbCrANgdotColorModel)(nil),
(*NgoloFuzzOne_NYCbCrANgdotAt)(nil),
(*NgoloFuzzOne_NYCbCrANgdotRGBA64At)(nil),
(*NgoloFuzzOne_NYCbCrANgdotNYCbCrAAt)(nil),
(*NgoloFuzzOne_NYCbCrANgdotAOffset)(nil),
(*NgoloFuzzOne_NYCbCrANgdotSubImage)(nil),
(*NgoloFuzzOne_NYCbCrANgdotOpaque)(nil),
(*NgoloFuzzOne_NewNYCbCrA)(nil),
}
file_ngolofuzz_proto_msgTypes[144].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 1,
NumMessages: 146,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
EnumInfos: file_ngolofuzz_proto_enumTypes,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_image_gif
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"image/gif"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var GIFResults []*gif.GIF
GIFResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Decode:
arg1 := bytes.NewReader(a.Decode.R)
cfg, err := gif.DecodeConfig(arg1)
if err != nil {
return 0
}
if cfg.Width * cfg.Height > 1024*1024 {
continue
}
arg0 := bytes.NewReader(a.Decode.R)
_, r1 := gif.Decode(arg0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DecodeAll:
arg1 := bytes.NewReader(a.DecodeAll.R)
cfg, err := gif.DecodeConfig(arg1)
if err != nil {
return 0
}
if cfg.Width * cfg.Height > 1024*1024 {
continue
}
arg0 := bytes.NewReader(a.DecodeAll.R)
_, r1 := gif.DecodeAll(arg0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DecodeConfig:
arg0 := bytes.NewReader(a.DecodeConfig.R)
_, r1 := gif.DecodeConfig(arg0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_EncodeAll:
arg0 := bytes.NewBuffer(a.EncodeAll.W)
if len(GIFResults) == 0 {
continue
}
arg1 := GIFResults[GIFResultsIndex]
GIFResultsIndex = (GIFResultsIndex + 1) % len(GIFResults)
r0 := gif.EncodeAll(arg0, arg1)
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
GIFNb := 0
GIFResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Decode:
w.WriteString(fmt.Sprintf("gif.Decode(bytes.NewReader(%#+v))\n", a.Decode.R))
case *NgoloFuzzOne_DecodeAll:
w.WriteString(fmt.Sprintf("gif.DecodeAll(bytes.NewReader(%#+v))\n", a.DecodeAll.R))
case *NgoloFuzzOne_DecodeConfig:
w.WriteString(fmt.Sprintf("gif.DecodeConfig(bytes.NewReader(%#+v))\n", a.DecodeConfig.R))
case *NgoloFuzzOne_EncodeAll:
if GIFNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("gif.EncodeAll(bytes.NewBuffer(%#+v), GIF%d)\n", a.EncodeAll.W, (GIFResultsIndex + 0) % GIFNb))
GIFResultsIndex = (GIFResultsIndex + 1) % GIFNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_image_gif
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type DecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeArgs) Reset() {
*x = DecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeArgs) ProtoMessage() {}
func (x *DecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeArgs.ProtoReflect.Descriptor instead.
func (*DecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *DecodeArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type DecodeAllArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeAllArgs) Reset() {
*x = DecodeAllArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeAllArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeAllArgs) ProtoMessage() {}
func (x *DecodeAllArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeAllArgs.ProtoReflect.Descriptor instead.
func (*DecodeAllArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *DecodeAllArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type DecodeConfigArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeConfigArgs) Reset() {
*x = DecodeConfigArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeConfigArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeConfigArgs) ProtoMessage() {}
func (x *DecodeConfigArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeConfigArgs.ProtoReflect.Descriptor instead.
func (*DecodeConfigArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *DecodeConfigArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type EncodeAllArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodeAllArgs) Reset() {
*x = EncodeAllArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodeAllArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodeAllArgs) ProtoMessage() {}
func (x *EncodeAllArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodeAllArgs.ProtoReflect.Descriptor instead.
func (*EncodeAllArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *EncodeAllArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Decode
// *NgoloFuzzOne_DecodeAll
// *NgoloFuzzOne_DecodeConfig
// *NgoloFuzzOne_EncodeAll
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetDecode() *DecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Decode); ok {
return x.Decode
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecodeAll() *DecodeAllArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecodeAll); ok {
return x.DecodeAll
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecodeConfig() *DecodeConfigArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecodeConfig); ok {
return x.DecodeConfig
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodeAll() *EncodeAllArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodeAll); ok {
return x.EncodeAll
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Decode struct {
Decode *DecodeArgs `protobuf:"bytes,1,opt,name=Decode,proto3,oneof"`
}
type NgoloFuzzOne_DecodeAll struct {
DecodeAll *DecodeAllArgs `protobuf:"bytes,2,opt,name=DecodeAll,proto3,oneof"`
}
type NgoloFuzzOne_DecodeConfig struct {
DecodeConfig *DecodeConfigArgs `protobuf:"bytes,3,opt,name=DecodeConfig,proto3,oneof"`
}
type NgoloFuzzOne_EncodeAll struct {
EncodeAll *EncodeAllArgs `protobuf:"bytes,4,opt,name=EncodeAll,proto3,oneof"`
}
func (*NgoloFuzzOne_Decode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecodeAll) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecodeConfig) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodeAll) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1a\n" +
"\n" +
"DecodeArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x1d\n" +
"\rDecodeAllArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\" \n" +
"\x10DecodeConfigArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x1d\n" +
"\rEncodeAllArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"\xfe\x01\n" +
"\fNgoloFuzzOne\x12/\n" +
"\x06Decode\x18\x01 \x01(\v2\x15.ngolofuzz.DecodeArgsH\x00R\x06Decode\x128\n" +
"\tDecodeAll\x18\x02 \x01(\v2\x18.ngolofuzz.DecodeAllArgsH\x00R\tDecodeAll\x12A\n" +
"\fDecodeConfig\x18\x03 \x01(\v2\x1b.ngolofuzz.DecodeConfigArgsH\x00R\fDecodeConfig\x128\n" +
"\tEncodeAll\x18\x04 \x01(\v2\x18.ngolofuzz.EncodeAllArgsH\x00R\tEncodeAllB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x16Z\x14./;fuzz_ng_image_gifb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_ngolofuzz_proto_goTypes = []any{
(*DecodeArgs)(nil), // 0: ngolofuzz.DecodeArgs
(*DecodeAllArgs)(nil), // 1: ngolofuzz.DecodeAllArgs
(*DecodeConfigArgs)(nil), // 2: ngolofuzz.DecodeConfigArgs
(*EncodeAllArgs)(nil), // 3: ngolofuzz.EncodeAllArgs
(*NgoloFuzzOne)(nil), // 4: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 5: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 6: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Decode:type_name -> ngolofuzz.DecodeArgs
1, // 1: ngolofuzz.NgoloFuzzOne.DecodeAll:type_name -> ngolofuzz.DecodeAllArgs
2, // 2: ngolofuzz.NgoloFuzzOne.DecodeConfig:type_name -> ngolofuzz.DecodeConfigArgs
3, // 3: ngolofuzz.NgoloFuzzOne.EncodeAll:type_name -> ngolofuzz.EncodeAllArgs
4, // 4: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
5, // [5:5] is the sub-list for method output_type
5, // [5:5] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[4].OneofWrappers = []any{
(*NgoloFuzzOne_Decode)(nil),
(*NgoloFuzzOne_DecodeAll)(nil),
(*NgoloFuzzOne_DecodeConfig)(nil),
(*NgoloFuzzOne_EncodeAll)(nil),
}
file_ngolofuzz_proto_msgTypes[5].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_image_jpeg
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"image/jpeg"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func OptionsNewFromFuzz(p *OptionsStruct) *jpeg.Options{
if p == nil {
return nil
}
return &jpeg.Options{
Quality: int(p.Quality),
}
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Decode:
arg1 := bytes.NewReader(a.Decode.R)
cfg, err := jpeg.DecodeConfig(arg1)
if err != nil {
return 0
}
if cfg.Width * cfg.Height > 1024*1024 {
continue
}
arg0 := bytes.NewReader(a.Decode.R)
_, r1 := jpeg.Decode(arg0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DecodeConfig:
arg0 := bytes.NewReader(a.DecodeConfig.R)
_, r1 := jpeg.DecodeConfig(arg0)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Decode:
w.WriteString(fmt.Sprintf("jpeg.Decode(bytes.NewReader(%#+v))\n", a.Decode.R))
case *NgoloFuzzOne_DecodeConfig:
w.WriteString(fmt.Sprintf("jpeg.DecodeConfig(bytes.NewReader(%#+v))\n", a.DecodeConfig.R))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_image_jpeg
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type OptionsStruct struct {
state protoimpl.MessageState `protogen:"open.v1"`
Quality int64 `protobuf:"varint,1,opt,name=Quality,proto3" json:"Quality,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OptionsStruct) Reset() {
*x = OptionsStruct{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OptionsStruct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OptionsStruct) ProtoMessage() {}
func (x *OptionsStruct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OptionsStruct.ProtoReflect.Descriptor instead.
func (*OptionsStruct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *OptionsStruct) GetQuality() int64 {
if x != nil {
return x.Quality
}
return 0
}
type DecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeArgs) Reset() {
*x = DecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeArgs) ProtoMessage() {}
func (x *DecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeArgs.ProtoReflect.Descriptor instead.
func (*DecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *DecodeArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type DecodeConfigArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeConfigArgs) Reset() {
*x = DecodeConfigArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeConfigArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeConfigArgs) ProtoMessage() {}
func (x *DecodeConfigArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeConfigArgs.ProtoReflect.Descriptor instead.
func (*DecodeConfigArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *DecodeConfigArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Decode
// *NgoloFuzzOne_DecodeConfig
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetDecode() *DecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Decode); ok {
return x.Decode
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecodeConfig() *DecodeConfigArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecodeConfig); ok {
return x.DecodeConfig
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Decode struct {
Decode *DecodeArgs `protobuf:"bytes,1,opt,name=Decode,proto3,oneof"`
}
type NgoloFuzzOne_DecodeConfig struct {
DecodeConfig *DecodeConfigArgs `protobuf:"bytes,2,opt,name=DecodeConfig,proto3,oneof"`
}
func (*NgoloFuzzOne_Decode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecodeConfig) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\")\n" +
"\rOptionsStruct\x12\x18\n" +
"\aQuality\x18\x01 \x01(\x03R\aQuality\"\x1a\n" +
"\n" +
"DecodeArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\" \n" +
"\x10DecodeConfigArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x8a\x01\n" +
"\fNgoloFuzzOne\x12/\n" +
"\x06Decode\x18\x01 \x01(\v2\x15.ngolofuzz.DecodeArgsH\x00R\x06Decode\x12A\n" +
"\fDecodeConfig\x18\x02 \x01(\v2\x1b.ngolofuzz.DecodeConfigArgsH\x00R\fDecodeConfigB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x17Z\x15./;fuzz_ng_image_jpegb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_ngolofuzz_proto_goTypes = []any{
(*OptionsStruct)(nil), // 0: ngolofuzz.OptionsStruct
(*DecodeArgs)(nil), // 1: ngolofuzz.DecodeArgs
(*DecodeConfigArgs)(nil), // 2: ngolofuzz.DecodeConfigArgs
(*NgoloFuzzOne)(nil), // 3: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 4: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 5: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
1, // 0: ngolofuzz.NgoloFuzzOne.Decode:type_name -> ngolofuzz.DecodeArgs
2, // 1: ngolofuzz.NgoloFuzzOne.DecodeConfig:type_name -> ngolofuzz.DecodeConfigArgs
3, // 2: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzOne_Decode)(nil),
(*NgoloFuzzOne_DecodeConfig)(nil),
}
file_ngolofuzz_proto_msgTypes[4].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_image_png
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"image/png"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Decode:
arg1 := bytes.NewReader(a.Decode.R)
cfg, err := png.DecodeConfig(arg1)
if err != nil {
return 0
}
if cfg.Width * cfg.Height > 1024*1024 {
continue
}
arg0 := bytes.NewReader(a.Decode.R)
_, r1 := png.Decode(arg0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_DecodeConfig:
arg0 := bytes.NewReader(a.DecodeConfig.R)
_, r1 := png.DecodeConfig(arg0)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Decode:
w.WriteString(fmt.Sprintf("png.Decode(bytes.NewReader(%#+v))\n", a.Decode.R))
case *NgoloFuzzOne_DecodeConfig:
w.WriteString(fmt.Sprintf("png.DecodeConfig(bytes.NewReader(%#+v))\n", a.DecodeConfig.R))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_image_png
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type DecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeArgs) Reset() {
*x = DecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeArgs) ProtoMessage() {}
func (x *DecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeArgs.ProtoReflect.Descriptor instead.
func (*DecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *DecodeArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type DecodeConfigArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeConfigArgs) Reset() {
*x = DecodeConfigArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeConfigArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeConfigArgs) ProtoMessage() {}
func (x *DecodeConfigArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeConfigArgs.ProtoReflect.Descriptor instead.
func (*DecodeConfigArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *DecodeConfigArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Decode
// *NgoloFuzzOne_DecodeConfig
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetDecode() *DecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Decode); ok {
return x.Decode
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecodeConfig() *DecodeConfigArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecodeConfig); ok {
return x.DecodeConfig
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Decode struct {
Decode *DecodeArgs `protobuf:"bytes,1,opt,name=Decode,proto3,oneof"`
}
type NgoloFuzzOne_DecodeConfig struct {
DecodeConfig *DecodeConfigArgs `protobuf:"bytes,2,opt,name=DecodeConfig,proto3,oneof"`
}
func (*NgoloFuzzOne_Decode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecodeConfig) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1a\n" +
"\n" +
"DecodeArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\" \n" +
"\x10DecodeConfigArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x8a\x01\n" +
"\fNgoloFuzzOne\x12/\n" +
"\x06Decode\x18\x01 \x01(\v2\x15.ngolofuzz.DecodeArgsH\x00R\x06Decode\x12A\n" +
"\fDecodeConfig\x18\x02 \x01(\v2\x1b.ngolofuzz.DecodeConfigArgsH\x00R\fDecodeConfigB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x16Z\x14./;fuzz_ng_image_pngb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_ngolofuzz_proto_goTypes = []any{
(*DecodeArgs)(nil), // 0: ngolofuzz.DecodeArgs
(*DecodeConfigArgs)(nil), // 1: ngolofuzz.DecodeConfigArgs
(*NgoloFuzzOne)(nil), // 2: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 3: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 4: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Decode:type_name -> ngolofuzz.DecodeArgs
1, // 1: ngolofuzz.NgoloFuzzOne.DecodeConfig:type_name -> ngolofuzz.DecodeConfigArgs
2, // 2: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzOne_Decode)(nil),
(*NgoloFuzzOne_DecodeConfig)(nil),
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_index_suffixarray
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"index/suffixarray"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var IndexResults []*suffixarray.Index
IndexResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
r0 := suffixarray.New(a.New.Data)
if r0 != nil{
IndexResults = append(IndexResults, r0)
}
case *NgoloFuzzOne_IndexNgdotRead:
if len(IndexResults) == 0 {
continue
}
arg0 := IndexResults[IndexResultsIndex]
IndexResultsIndex = (IndexResultsIndex + 1) % len(IndexResults)
arg1 := bytes.NewReader(a.IndexNgdotRead.R)
r0 := arg0.Read(arg1)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_IndexNgdotWrite:
if len(IndexResults) == 0 {
continue
}
arg0 := IndexResults[IndexResultsIndex]
IndexResultsIndex = (IndexResultsIndex + 1) % len(IndexResults)
arg1 := bytes.NewBuffer(a.IndexNgdotWrite.W)
r0 := arg0.Write(arg1)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_IndexNgdotBytes:
if len(IndexResults) == 0 {
continue
}
arg0 := IndexResults[IndexResultsIndex]
IndexResultsIndex = (IndexResultsIndex + 1) % len(IndexResults)
arg0.Bytes()
case *NgoloFuzzOne_IndexNgdotLookup:
if len(IndexResults) == 0 {
continue
}
arg0 := IndexResults[IndexResultsIndex]
IndexResultsIndex = (IndexResultsIndex + 1) % len(IndexResults)
arg2 := int(a.IndexNgdotLookup.N)
arg0.Lookup(a.IndexNgdotLookup.S, arg2)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
IndexNb := 0
IndexResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("Index%d := suffixarray.New(%#+v)\n", IndexNb, a.New.Data))
IndexNb = IndexNb + 1
case *NgoloFuzzOne_IndexNgdotRead:
if IndexNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Index%d.Read(bytes.NewReader(%#+v))\n", IndexResultsIndex, a.IndexNgdotRead.R))
IndexResultsIndex = (IndexResultsIndex + 1) % IndexNb
case *NgoloFuzzOne_IndexNgdotWrite:
if IndexNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Index%d.Write(bytes.NewBuffer(%#+v))\n", IndexResultsIndex, a.IndexNgdotWrite.W))
IndexResultsIndex = (IndexResultsIndex + 1) % IndexNb
case *NgoloFuzzOne_IndexNgdotBytes:
if IndexNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Index%d.Bytes()\n", IndexResultsIndex))
IndexResultsIndex = (IndexResultsIndex + 1) % IndexNb
case *NgoloFuzzOne_IndexNgdotLookup:
if IndexNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Index%d.Lookup(%#+v, int(%#+v))\n", IndexResultsIndex, a.IndexNgdotLookup.S, a.IndexNgdotLookup.N))
IndexResultsIndex = (IndexResultsIndex + 1) % IndexNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_index_suffixarray
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type IndexNgdotReadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IndexNgdotReadArgs) Reset() {
*x = IndexNgdotReadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IndexNgdotReadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IndexNgdotReadArgs) ProtoMessage() {}
func (x *IndexNgdotReadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IndexNgdotReadArgs.ProtoReflect.Descriptor instead.
func (*IndexNgdotReadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *IndexNgdotReadArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type IndexNgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IndexNgdotWriteArgs) Reset() {
*x = IndexNgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IndexNgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IndexNgdotWriteArgs) ProtoMessage() {}
func (x *IndexNgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IndexNgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*IndexNgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *IndexNgdotWriteArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type IndexNgdotBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IndexNgdotBytesArgs) Reset() {
*x = IndexNgdotBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IndexNgdotBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IndexNgdotBytesArgs) ProtoMessage() {}
func (x *IndexNgdotBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IndexNgdotBytesArgs.ProtoReflect.Descriptor instead.
func (*IndexNgdotBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type IndexNgdotLookupArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S []byte `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
N int64 `protobuf:"varint,2,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IndexNgdotLookupArgs) Reset() {
*x = IndexNgdotLookupArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IndexNgdotLookupArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IndexNgdotLookupArgs) ProtoMessage() {}
func (x *IndexNgdotLookupArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IndexNgdotLookupArgs.ProtoReflect.Descriptor instead.
func (*IndexNgdotLookupArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *IndexNgdotLookupArgs) GetS() []byte {
if x != nil {
return x.S
}
return nil
}
func (x *IndexNgdotLookupArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_New
// *NgoloFuzzOne_IndexNgdotRead
// *NgoloFuzzOne_IndexNgdotWrite
// *NgoloFuzzOne_IndexNgdotBytes
// *NgoloFuzzOne_IndexNgdotLookup
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetIndexNgdotRead() *IndexNgdotReadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IndexNgdotRead); ok {
return x.IndexNgdotRead
}
}
return nil
}
func (x *NgoloFuzzOne) GetIndexNgdotWrite() *IndexNgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IndexNgdotWrite); ok {
return x.IndexNgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetIndexNgdotBytes() *IndexNgdotBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IndexNgdotBytes); ok {
return x.IndexNgdotBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetIndexNgdotLookup() *IndexNgdotLookupArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IndexNgdotLookup); ok {
return x.IndexNgdotLookup
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,1,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_IndexNgdotRead struct {
IndexNgdotRead *IndexNgdotReadArgs `protobuf:"bytes,2,opt,name=IndexNgdotRead,proto3,oneof"`
}
type NgoloFuzzOne_IndexNgdotWrite struct {
IndexNgdotWrite *IndexNgdotWriteArgs `protobuf:"bytes,3,opt,name=IndexNgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_IndexNgdotBytes struct {
IndexNgdotBytes *IndexNgdotBytesArgs `protobuf:"bytes,4,opt,name=IndexNgdotBytes,proto3,oneof"`
}
type NgoloFuzzOne_IndexNgdotLookup struct {
IndexNgdotLookup *IndexNgdotLookupArgs `protobuf:"bytes,5,opt,name=IndexNgdotLookup,proto3,oneof"`
}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IndexNgdotRead) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IndexNgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IndexNgdotBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IndexNgdotLookup) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1d\n" +
"\aNewArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\"\n" +
"\x12IndexNgdotReadArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"#\n" +
"\x13IndexNgdotWriteArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"\x15\n" +
"\x13IndexNgdotBytesArgs\"2\n" +
"\x14IndexNgdotLookupArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\fR\x01s\x12\f\n" +
"\x01n\x18\x02 \x01(\x03R\x01n\"\xee\x02\n" +
"\fNgoloFuzzOne\x12&\n" +
"\x03New\x18\x01 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x12G\n" +
"\x0eIndexNgdotRead\x18\x02 \x01(\v2\x1d.ngolofuzz.IndexNgdotReadArgsH\x00R\x0eIndexNgdotRead\x12J\n" +
"\x0fIndexNgdotWrite\x18\x03 \x01(\v2\x1e.ngolofuzz.IndexNgdotWriteArgsH\x00R\x0fIndexNgdotWrite\x12J\n" +
"\x0fIndexNgdotBytes\x18\x04 \x01(\v2\x1e.ngolofuzz.IndexNgdotBytesArgsH\x00R\x0fIndexNgdotBytes\x12M\n" +
"\x10IndexNgdotLookup\x18\x05 \x01(\v2\x1f.ngolofuzz.IndexNgdotLookupArgsH\x00R\x10IndexNgdotLookupB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1eZ\x1c./;fuzz_ng_index_suffixarrayb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_ngolofuzz_proto_goTypes = []any{
(*NewArgs)(nil), // 0: ngolofuzz.NewArgs
(*IndexNgdotReadArgs)(nil), // 1: ngolofuzz.IndexNgdotReadArgs
(*IndexNgdotWriteArgs)(nil), // 2: ngolofuzz.IndexNgdotWriteArgs
(*IndexNgdotBytesArgs)(nil), // 3: ngolofuzz.IndexNgdotBytesArgs
(*IndexNgdotLookupArgs)(nil), // 4: ngolofuzz.IndexNgdotLookupArgs
(*NgoloFuzzOne)(nil), // 5: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 6: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 7: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
1, // 1: ngolofuzz.NgoloFuzzOne.IndexNgdotRead:type_name -> ngolofuzz.IndexNgdotReadArgs
2, // 2: ngolofuzz.NgoloFuzzOne.IndexNgdotWrite:type_name -> ngolofuzz.IndexNgdotWriteArgs
3, // 3: ngolofuzz.NgoloFuzzOne.IndexNgdotBytes:type_name -> ngolofuzz.IndexNgdotBytesArgs
4, // 4: ngolofuzz.NgoloFuzzOne.IndexNgdotLookup:type_name -> ngolofuzz.IndexNgdotLookupArgs
5, // 5: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
6, // [6:6] is the sub-list for method output_type
6, // [6:6] is the sub-list for method input_type
6, // [6:6] is the sub-list for extension type_name
6, // [6:6] is the sub-list for extension extendee
0, // [0:6] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[5].OneofWrappers = []any{
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_IndexNgdotRead)(nil),
(*NgoloFuzzOne_IndexNgdotWrite)(nil),
(*NgoloFuzzOne_IndexNgdotBytes)(nil),
(*NgoloFuzzOne_IndexNgdotLookup)(nil),
}
file_ngolofuzz_proto_msgTypes[6].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 8,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_io
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var OffsetWriterResults []*io.OffsetWriter
OffsetWriterResultsIndex := 0
var SectionReaderResults []*io.SectionReader
SectionReaderResultsIndex := 0
var WriterResults []*io.Writer
WriterResultsIndex := 0
var ReaderResults []*io.Reader
ReaderResultsIndex := 0
var ReaderAtResults []*io.ReaderAt
ReaderAtResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_WriteString:
if len(WriterResults) == 0 {
continue
}
arg0 := *WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
_, r1 := io.WriteString(arg0, a.WriteString.S)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReadAtLeast:
if len(ReaderResults) == 0 {
continue
}
arg0 := *ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg2 := int(a.ReadAtLeast.Min)
_, r1 := io.ReadAtLeast(arg0, a.ReadAtLeast.Buf, arg2)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReadFull:
if len(ReaderResults) == 0 {
continue
}
arg0 := *ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := io.ReadFull(arg0, a.ReadFull.Buf)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_CopyN:
if len(WriterResults) == 0 {
continue
}
arg0 := *WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
if len(ReaderResults) == 0 {
continue
}
arg1 := *ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := io.CopyN(arg0, arg1, a.CopyN.N)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Copy:
if len(WriterResults) == 0 {
continue
}
arg0 := *WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
if len(ReaderResults) == 0 {
continue
}
arg1 := *ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := io.Copy(arg0, arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_CopyBuffer:
if len(WriterResults) == 0 {
continue
}
arg0 := *WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
if len(ReaderResults) == 0 {
continue
}
arg1 := *ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := io.CopyBuffer(arg0, arg1, a.CopyBuffer.Buf)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_LimitReader:
if len(ReaderResults) == 0 {
continue
}
arg0 := *ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
r0 := io.LimitReader(arg0, a.LimitReader.N)
ReaderResults = append(ReaderResults, &r0)
case *NgoloFuzzOne_NewSectionReader:
if len(ReaderAtResults) == 0 {
continue
}
arg0 := *ReaderAtResults[ReaderAtResultsIndex]
ReaderAtResultsIndex = (ReaderAtResultsIndex + 1) % len(ReaderAtResults)
r0 := io.NewSectionReader(arg0, a.NewSectionReader.Off, a.NewSectionReader.N)
if r0 != nil{
SectionReaderResults = append(SectionReaderResults, r0)
}
case *NgoloFuzzOne_SectionReaderNgdotRead:
if len(SectionReaderResults) == 0 {
continue
}
arg0 := SectionReaderResults[SectionReaderResultsIndex]
SectionReaderResultsIndex = (SectionReaderResultsIndex + 1) % len(SectionReaderResults)
_, r1 := arg0.Read(a.SectionReaderNgdotRead.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SectionReaderNgdotSeek:
if len(SectionReaderResults) == 0 {
continue
}
arg0 := SectionReaderResults[SectionReaderResultsIndex]
SectionReaderResultsIndex = (SectionReaderResultsIndex + 1) % len(SectionReaderResults)
arg2 := int(a.SectionReaderNgdotSeek.Whence)
_, r1 := arg0.Seek(a.SectionReaderNgdotSeek.Offset, arg2)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SectionReaderNgdotReadAt:
if len(SectionReaderResults) == 0 {
continue
}
arg0 := SectionReaderResults[SectionReaderResultsIndex]
SectionReaderResultsIndex = (SectionReaderResultsIndex + 1) % len(SectionReaderResults)
_, r1 := arg0.ReadAt(a.SectionReaderNgdotReadAt.P, a.SectionReaderNgdotReadAt.Off)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SectionReaderNgdotSize:
if len(SectionReaderResults) == 0 {
continue
}
arg0 := SectionReaderResults[SectionReaderResultsIndex]
SectionReaderResultsIndex = (SectionReaderResultsIndex + 1) % len(SectionReaderResults)
arg0.Size()
case *NgoloFuzzOne_SectionReaderNgdotOuter:
if len(SectionReaderResults) == 0 {
continue
}
arg0 := SectionReaderResults[SectionReaderResultsIndex]
SectionReaderResultsIndex = (SectionReaderResultsIndex + 1) % len(SectionReaderResults)
r0, _, _ := arg0.Outer()
ReaderAtResults = append(ReaderAtResults, &r0)
case *NgoloFuzzOne_OffsetWriterNgdotWrite:
if len(OffsetWriterResults) == 0 {
continue
}
arg0 := OffsetWriterResults[OffsetWriterResultsIndex]
OffsetWriterResultsIndex = (OffsetWriterResultsIndex + 1) % len(OffsetWriterResults)
_, r1 := arg0.Write(a.OffsetWriterNgdotWrite.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_OffsetWriterNgdotWriteAt:
if len(OffsetWriterResults) == 0 {
continue
}
arg0 := OffsetWriterResults[OffsetWriterResultsIndex]
OffsetWriterResultsIndex = (OffsetWriterResultsIndex + 1) % len(OffsetWriterResults)
_, r1 := arg0.WriteAt(a.OffsetWriterNgdotWriteAt.P, a.OffsetWriterNgdotWriteAt.Off)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_OffsetWriterNgdotSeek:
if len(OffsetWriterResults) == 0 {
continue
}
arg0 := OffsetWriterResults[OffsetWriterResultsIndex]
OffsetWriterResultsIndex = (OffsetWriterResultsIndex + 1) % len(OffsetWriterResults)
arg2 := int(a.OffsetWriterNgdotSeek.Whence)
_, r1 := arg0.Seek(a.OffsetWriterNgdotSeek.Offset, arg2)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TeeReader:
if len(ReaderResults) == 0 {
continue
}
arg0 := *ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
if len(WriterResults) == 0 {
continue
}
arg1 := *WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := io.TeeReader(arg0, arg1)
ReaderResults = append(ReaderResults, &r0)
case *NgoloFuzzOne_NopCloser:
if len(ReaderResults) == 0 {
continue
}
arg0 := *ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
io.NopCloser(arg0)
case *NgoloFuzzOne_ReadAll:
if len(ReaderResults) == 0 {
continue
}
arg0 := *ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := io.ReadAll(arg0)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
OffsetWriterNb := 0
OffsetWriterResultsIndex := 0
SectionReaderNb := 0
SectionReaderResultsIndex := 0
WriterNb := 0
WriterResultsIndex := 0
ReaderNb := 0
ReaderResultsIndex := 0
ReaderAtNb := 0
ReaderAtResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_WriteString:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("io.WriteString(Writer%d, %#+v)\n", (WriterResultsIndex + 0) % WriterNb, a.WriteString.S))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_ReadAtLeast:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("io.ReadAtLeast(Reader%d, %#+v, int(%#+v))\n", (ReaderResultsIndex + 0) % ReaderNb, a.ReadAtLeast.Buf, a.ReadAtLeast.Min))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReadFull:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("io.ReadFull(Reader%d, %#+v)\n", (ReaderResultsIndex + 0) % ReaderNb, a.ReadFull.Buf))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_CopyN:
if WriterNb == 0 {
continue
}
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("io.CopyN(Writer%d, Reader%d, %#+v)\n", (WriterResultsIndex + 0) % WriterNb, (ReaderResultsIndex + 0) % ReaderNb, a.CopyN.N))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_Copy:
if WriterNb == 0 {
continue
}
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("io.Copy(Writer%d, Reader%d)\n", (WriterResultsIndex + 0) % WriterNb, (ReaderResultsIndex + 0) % ReaderNb))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_CopyBuffer:
if WriterNb == 0 {
continue
}
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("io.CopyBuffer(Writer%d, Reader%d, %#+v)\n", (WriterResultsIndex + 0) % WriterNb, (ReaderResultsIndex + 0) % ReaderNb, a.CopyBuffer.Buf))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_LimitReader:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d := io.LimitReader(Reader%d, %#+v)\n", ReaderNb, (ReaderResultsIndex + 0) % ReaderNb, a.LimitReader.N))
ReaderNb = ReaderNb + 1
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_NewSectionReader:
if ReaderAtNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SectionReader%d := io.NewSectionReader(ReaderAt%d, %#+v, %#+v)\n", SectionReaderNb, (ReaderAtResultsIndex + 0) % ReaderAtNb, a.NewSectionReader.Off, a.NewSectionReader.N))
SectionReaderNb = SectionReaderNb + 1
ReaderAtResultsIndex = (ReaderAtResultsIndex + 1) % ReaderAtNb
case *NgoloFuzzOne_SectionReaderNgdotRead:
if SectionReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SectionReader%d.Read(%#+v)\n", SectionReaderResultsIndex, a.SectionReaderNgdotRead.P))
SectionReaderResultsIndex = (SectionReaderResultsIndex + 1) % SectionReaderNb
case *NgoloFuzzOne_SectionReaderNgdotSeek:
if SectionReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SectionReader%d.Seek(%#+v, int(%#+v))\n", SectionReaderResultsIndex, a.SectionReaderNgdotSeek.Offset, a.SectionReaderNgdotSeek.Whence))
SectionReaderResultsIndex = (SectionReaderResultsIndex + 1) % SectionReaderNb
case *NgoloFuzzOne_SectionReaderNgdotReadAt:
if SectionReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SectionReader%d.ReadAt(%#+v, %#+v)\n", SectionReaderResultsIndex, a.SectionReaderNgdotReadAt.P, a.SectionReaderNgdotReadAt.Off))
SectionReaderResultsIndex = (SectionReaderResultsIndex + 1) % SectionReaderNb
case *NgoloFuzzOne_SectionReaderNgdotSize:
if SectionReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("SectionReader%d.Size()\n", SectionReaderResultsIndex))
SectionReaderResultsIndex = (SectionReaderResultsIndex + 1) % SectionReaderNb
case *NgoloFuzzOne_SectionReaderNgdotOuter:
if SectionReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ReaderAt%d, _, _ := SectionReader%d.Outer()\n", ReaderAtNb, SectionReaderResultsIndex))
ReaderAtNb = ReaderAtNb + 1
SectionReaderResultsIndex = (SectionReaderResultsIndex + 1) % SectionReaderNb
case *NgoloFuzzOne_OffsetWriterNgdotWrite:
if OffsetWriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("OffsetWriter%d.Write(%#+v)\n", OffsetWriterResultsIndex, a.OffsetWriterNgdotWrite.P))
OffsetWriterResultsIndex = (OffsetWriterResultsIndex + 1) % OffsetWriterNb
case *NgoloFuzzOne_OffsetWriterNgdotWriteAt:
if OffsetWriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("OffsetWriter%d.WriteAt(%#+v, %#+v)\n", OffsetWriterResultsIndex, a.OffsetWriterNgdotWriteAt.P, a.OffsetWriterNgdotWriteAt.Off))
OffsetWriterResultsIndex = (OffsetWriterResultsIndex + 1) % OffsetWriterNb
case *NgoloFuzzOne_OffsetWriterNgdotSeek:
if OffsetWriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("OffsetWriter%d.Seek(%#+v, int(%#+v))\n", OffsetWriterResultsIndex, a.OffsetWriterNgdotSeek.Offset, a.OffsetWriterNgdotSeek.Whence))
OffsetWriterResultsIndex = (OffsetWriterResultsIndex + 1) % OffsetWriterNb
case *NgoloFuzzOne_TeeReader:
if ReaderNb == 0 {
continue
}
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d := io.TeeReader(Reader%d, Writer%d)\n", ReaderNb, (ReaderResultsIndex + 0) % ReaderNb, (WriterResultsIndex + 0) % WriterNb))
ReaderNb = ReaderNb + 1
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_NopCloser:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("io.NopCloser(Reader%d)\n", (ReaderResultsIndex + 0) % ReaderNb))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReadAll:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("io.ReadAll(Reader%d)\n", (ReaderResultsIndex + 0) % ReaderNb))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_io
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type WriteStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriteStringArgs) Reset() {
*x = WriteStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriteStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriteStringArgs) ProtoMessage() {}
func (x *WriteStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriteStringArgs.ProtoReflect.Descriptor instead.
func (*WriteStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *WriteStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type ReadAtLeastArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
Min int64 `protobuf:"varint,2,opt,name=min,proto3" json:"min,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReadAtLeastArgs) Reset() {
*x = ReadAtLeastArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReadAtLeastArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReadAtLeastArgs) ProtoMessage() {}
func (x *ReadAtLeastArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReadAtLeastArgs.ProtoReflect.Descriptor instead.
func (*ReadAtLeastArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *ReadAtLeastArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
func (x *ReadAtLeastArgs) GetMin() int64 {
if x != nil {
return x.Min
}
return 0
}
type ReadFullArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReadFullArgs) Reset() {
*x = ReadFullArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReadFullArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReadFullArgs) ProtoMessage() {}
func (x *ReadFullArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReadFullArgs.ProtoReflect.Descriptor instead.
func (*ReadFullArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *ReadFullArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
type CopyNArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CopyNArgs) Reset() {
*x = CopyNArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CopyNArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CopyNArgs) ProtoMessage() {}
func (x *CopyNArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CopyNArgs.ProtoReflect.Descriptor instead.
func (*CopyNArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *CopyNArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type CopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CopyArgs) Reset() {
*x = CopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CopyArgs) ProtoMessage() {}
func (x *CopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CopyArgs.ProtoReflect.Descriptor instead.
func (*CopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type CopyBufferArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CopyBufferArgs) Reset() {
*x = CopyBufferArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CopyBufferArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CopyBufferArgs) ProtoMessage() {}
func (x *CopyBufferArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CopyBufferArgs.ProtoReflect.Descriptor instead.
func (*CopyBufferArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *CopyBufferArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
type LimitReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LimitReaderArgs) Reset() {
*x = LimitReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LimitReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LimitReaderArgs) ProtoMessage() {}
func (x *LimitReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LimitReaderArgs.ProtoReflect.Descriptor instead.
func (*LimitReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *LimitReaderArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type NewSectionReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Off int64 `protobuf:"varint,1,opt,name=off,proto3" json:"off,omitempty"`
N int64 `protobuf:"varint,2,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewSectionReaderArgs) Reset() {
*x = NewSectionReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewSectionReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewSectionReaderArgs) ProtoMessage() {}
func (x *NewSectionReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewSectionReaderArgs.ProtoReflect.Descriptor instead.
func (*NewSectionReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NewSectionReaderArgs) GetOff() int64 {
if x != nil {
return x.Off
}
return 0
}
func (x *NewSectionReaderArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type SectionReaderNgdotReadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SectionReaderNgdotReadArgs) Reset() {
*x = SectionReaderNgdotReadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SectionReaderNgdotReadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SectionReaderNgdotReadArgs) ProtoMessage() {}
func (x *SectionReaderNgdotReadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SectionReaderNgdotReadArgs.ProtoReflect.Descriptor instead.
func (*SectionReaderNgdotReadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *SectionReaderNgdotReadArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type SectionReaderNgdotSeekArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
Whence int64 `protobuf:"varint,2,opt,name=whence,proto3" json:"whence,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SectionReaderNgdotSeekArgs) Reset() {
*x = SectionReaderNgdotSeekArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SectionReaderNgdotSeekArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SectionReaderNgdotSeekArgs) ProtoMessage() {}
func (x *SectionReaderNgdotSeekArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SectionReaderNgdotSeekArgs.ProtoReflect.Descriptor instead.
func (*SectionReaderNgdotSeekArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *SectionReaderNgdotSeekArgs) GetOffset() int64 {
if x != nil {
return x.Offset
}
return 0
}
func (x *SectionReaderNgdotSeekArgs) GetWhence() int64 {
if x != nil {
return x.Whence
}
return 0
}
type SectionReaderNgdotReadAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
Off int64 `protobuf:"varint,2,opt,name=off,proto3" json:"off,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SectionReaderNgdotReadAtArgs) Reset() {
*x = SectionReaderNgdotReadAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SectionReaderNgdotReadAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SectionReaderNgdotReadAtArgs) ProtoMessage() {}
func (x *SectionReaderNgdotReadAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SectionReaderNgdotReadAtArgs.ProtoReflect.Descriptor instead.
func (*SectionReaderNgdotReadAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *SectionReaderNgdotReadAtArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
func (x *SectionReaderNgdotReadAtArgs) GetOff() int64 {
if x != nil {
return x.Off
}
return 0
}
type SectionReaderNgdotSizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SectionReaderNgdotSizeArgs) Reset() {
*x = SectionReaderNgdotSizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SectionReaderNgdotSizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SectionReaderNgdotSizeArgs) ProtoMessage() {}
func (x *SectionReaderNgdotSizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SectionReaderNgdotSizeArgs.ProtoReflect.Descriptor instead.
func (*SectionReaderNgdotSizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type SectionReaderNgdotOuterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SectionReaderNgdotOuterArgs) Reset() {
*x = SectionReaderNgdotOuterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SectionReaderNgdotOuterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SectionReaderNgdotOuterArgs) ProtoMessage() {}
func (x *SectionReaderNgdotOuterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SectionReaderNgdotOuterArgs.ProtoReflect.Descriptor instead.
func (*SectionReaderNgdotOuterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type OffsetWriterNgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OffsetWriterNgdotWriteArgs) Reset() {
*x = OffsetWriterNgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OffsetWriterNgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OffsetWriterNgdotWriteArgs) ProtoMessage() {}
func (x *OffsetWriterNgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OffsetWriterNgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*OffsetWriterNgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *OffsetWriterNgdotWriteArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type OffsetWriterNgdotWriteAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
Off int64 `protobuf:"varint,2,opt,name=off,proto3" json:"off,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OffsetWriterNgdotWriteAtArgs) Reset() {
*x = OffsetWriterNgdotWriteAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OffsetWriterNgdotWriteAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OffsetWriterNgdotWriteAtArgs) ProtoMessage() {}
func (x *OffsetWriterNgdotWriteAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OffsetWriterNgdotWriteAtArgs.ProtoReflect.Descriptor instead.
func (*OffsetWriterNgdotWriteAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *OffsetWriterNgdotWriteAtArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
func (x *OffsetWriterNgdotWriteAtArgs) GetOff() int64 {
if x != nil {
return x.Off
}
return 0
}
type OffsetWriterNgdotSeekArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
Whence int64 `protobuf:"varint,2,opt,name=whence,proto3" json:"whence,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OffsetWriterNgdotSeekArgs) Reset() {
*x = OffsetWriterNgdotSeekArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OffsetWriterNgdotSeekArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OffsetWriterNgdotSeekArgs) ProtoMessage() {}
func (x *OffsetWriterNgdotSeekArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OffsetWriterNgdotSeekArgs.ProtoReflect.Descriptor instead.
func (*OffsetWriterNgdotSeekArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *OffsetWriterNgdotSeekArgs) GetOffset() int64 {
if x != nil {
return x.Offset
}
return 0
}
func (x *OffsetWriterNgdotSeekArgs) GetWhence() int64 {
if x != nil {
return x.Whence
}
return 0
}
type TeeReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TeeReaderArgs) Reset() {
*x = TeeReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TeeReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TeeReaderArgs) ProtoMessage() {}
func (x *TeeReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TeeReaderArgs.ProtoReflect.Descriptor instead.
func (*TeeReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
type NopCloserArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NopCloserArgs) Reset() {
*x = NopCloserArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NopCloserArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NopCloserArgs) ProtoMessage() {}
func (x *NopCloserArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NopCloserArgs.ProtoReflect.Descriptor instead.
func (*NopCloserArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
type ReadAllArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReadAllArgs) Reset() {
*x = ReadAllArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReadAllArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReadAllArgs) ProtoMessage() {}
func (x *ReadAllArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReadAllArgs.ProtoReflect.Descriptor instead.
func (*ReadAllArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_WriteString
// *NgoloFuzzOne_ReadAtLeast
// *NgoloFuzzOne_ReadFull
// *NgoloFuzzOne_CopyN
// *NgoloFuzzOne_Copy
// *NgoloFuzzOne_CopyBuffer
// *NgoloFuzzOne_LimitReader
// *NgoloFuzzOne_NewSectionReader
// *NgoloFuzzOne_SectionReaderNgdotRead
// *NgoloFuzzOne_SectionReaderNgdotSeek
// *NgoloFuzzOne_SectionReaderNgdotReadAt
// *NgoloFuzzOne_SectionReaderNgdotSize
// *NgoloFuzzOne_SectionReaderNgdotOuter
// *NgoloFuzzOne_OffsetWriterNgdotWrite
// *NgoloFuzzOne_OffsetWriterNgdotWriteAt
// *NgoloFuzzOne_OffsetWriterNgdotSeek
// *NgoloFuzzOne_TeeReader
// *NgoloFuzzOne_NopCloser
// *NgoloFuzzOne_ReadAll
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetWriteString() *WriteStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriteString); ok {
return x.WriteString
}
}
return nil
}
func (x *NgoloFuzzOne) GetReadAtLeast() *ReadAtLeastArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReadAtLeast); ok {
return x.ReadAtLeast
}
}
return nil
}
func (x *NgoloFuzzOne) GetReadFull() *ReadFullArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReadFull); ok {
return x.ReadFull
}
}
return nil
}
func (x *NgoloFuzzOne) GetCopyN() *CopyNArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CopyN); ok {
return x.CopyN
}
}
return nil
}
func (x *NgoloFuzzOne) GetCopy() *CopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Copy); ok {
return x.Copy
}
}
return nil
}
func (x *NgoloFuzzOne) GetCopyBuffer() *CopyBufferArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CopyBuffer); ok {
return x.CopyBuffer
}
}
return nil
}
func (x *NgoloFuzzOne) GetLimitReader() *LimitReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LimitReader); ok {
return x.LimitReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewSectionReader() *NewSectionReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewSectionReader); ok {
return x.NewSectionReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetSectionReaderNgdotRead() *SectionReaderNgdotReadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SectionReaderNgdotRead); ok {
return x.SectionReaderNgdotRead
}
}
return nil
}
func (x *NgoloFuzzOne) GetSectionReaderNgdotSeek() *SectionReaderNgdotSeekArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SectionReaderNgdotSeek); ok {
return x.SectionReaderNgdotSeek
}
}
return nil
}
func (x *NgoloFuzzOne) GetSectionReaderNgdotReadAt() *SectionReaderNgdotReadAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SectionReaderNgdotReadAt); ok {
return x.SectionReaderNgdotReadAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetSectionReaderNgdotSize() *SectionReaderNgdotSizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SectionReaderNgdotSize); ok {
return x.SectionReaderNgdotSize
}
}
return nil
}
func (x *NgoloFuzzOne) GetSectionReaderNgdotOuter() *SectionReaderNgdotOuterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SectionReaderNgdotOuter); ok {
return x.SectionReaderNgdotOuter
}
}
return nil
}
func (x *NgoloFuzzOne) GetOffsetWriterNgdotWrite() *OffsetWriterNgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_OffsetWriterNgdotWrite); ok {
return x.OffsetWriterNgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetOffsetWriterNgdotWriteAt() *OffsetWriterNgdotWriteAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_OffsetWriterNgdotWriteAt); ok {
return x.OffsetWriterNgdotWriteAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetOffsetWriterNgdotSeek() *OffsetWriterNgdotSeekArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_OffsetWriterNgdotSeek); ok {
return x.OffsetWriterNgdotSeek
}
}
return nil
}
func (x *NgoloFuzzOne) GetTeeReader() *TeeReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TeeReader); ok {
return x.TeeReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetNopCloser() *NopCloserArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NopCloser); ok {
return x.NopCloser
}
}
return nil
}
func (x *NgoloFuzzOne) GetReadAll() *ReadAllArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReadAll); ok {
return x.ReadAll
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_WriteString struct {
WriteString *WriteStringArgs `protobuf:"bytes,1,opt,name=WriteString,proto3,oneof"`
}
type NgoloFuzzOne_ReadAtLeast struct {
ReadAtLeast *ReadAtLeastArgs `protobuf:"bytes,2,opt,name=ReadAtLeast,proto3,oneof"`
}
type NgoloFuzzOne_ReadFull struct {
ReadFull *ReadFullArgs `protobuf:"bytes,3,opt,name=ReadFull,proto3,oneof"`
}
type NgoloFuzzOne_CopyN struct {
CopyN *CopyNArgs `protobuf:"bytes,4,opt,name=CopyN,proto3,oneof"`
}
type NgoloFuzzOne_Copy struct {
Copy *CopyArgs `protobuf:"bytes,5,opt,name=Copy,proto3,oneof"`
}
type NgoloFuzzOne_CopyBuffer struct {
CopyBuffer *CopyBufferArgs `protobuf:"bytes,6,opt,name=CopyBuffer,proto3,oneof"`
}
type NgoloFuzzOne_LimitReader struct {
LimitReader *LimitReaderArgs `protobuf:"bytes,7,opt,name=LimitReader,proto3,oneof"`
}
type NgoloFuzzOne_NewSectionReader struct {
NewSectionReader *NewSectionReaderArgs `protobuf:"bytes,8,opt,name=NewSectionReader,proto3,oneof"`
}
type NgoloFuzzOne_SectionReaderNgdotRead struct {
SectionReaderNgdotRead *SectionReaderNgdotReadArgs `protobuf:"bytes,9,opt,name=SectionReaderNgdotRead,proto3,oneof"`
}
type NgoloFuzzOne_SectionReaderNgdotSeek struct {
SectionReaderNgdotSeek *SectionReaderNgdotSeekArgs `protobuf:"bytes,10,opt,name=SectionReaderNgdotSeek,proto3,oneof"`
}
type NgoloFuzzOne_SectionReaderNgdotReadAt struct {
SectionReaderNgdotReadAt *SectionReaderNgdotReadAtArgs `protobuf:"bytes,11,opt,name=SectionReaderNgdotReadAt,proto3,oneof"`
}
type NgoloFuzzOne_SectionReaderNgdotSize struct {
SectionReaderNgdotSize *SectionReaderNgdotSizeArgs `protobuf:"bytes,12,opt,name=SectionReaderNgdotSize,proto3,oneof"`
}
type NgoloFuzzOne_SectionReaderNgdotOuter struct {
SectionReaderNgdotOuter *SectionReaderNgdotOuterArgs `protobuf:"bytes,13,opt,name=SectionReaderNgdotOuter,proto3,oneof"`
}
type NgoloFuzzOne_OffsetWriterNgdotWrite struct {
OffsetWriterNgdotWrite *OffsetWriterNgdotWriteArgs `protobuf:"bytes,14,opt,name=OffsetWriterNgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_OffsetWriterNgdotWriteAt struct {
OffsetWriterNgdotWriteAt *OffsetWriterNgdotWriteAtArgs `protobuf:"bytes,15,opt,name=OffsetWriterNgdotWriteAt,proto3,oneof"`
}
type NgoloFuzzOne_OffsetWriterNgdotSeek struct {
OffsetWriterNgdotSeek *OffsetWriterNgdotSeekArgs `protobuf:"bytes,16,opt,name=OffsetWriterNgdotSeek,proto3,oneof"`
}
type NgoloFuzzOne_TeeReader struct {
TeeReader *TeeReaderArgs `protobuf:"bytes,17,opt,name=TeeReader,proto3,oneof"`
}
type NgoloFuzzOne_NopCloser struct {
NopCloser *NopCloserArgs `protobuf:"bytes,18,opt,name=NopCloser,proto3,oneof"`
}
type NgoloFuzzOne_ReadAll struct {
ReadAll *ReadAllArgs `protobuf:"bytes,19,opt,name=ReadAll,proto3,oneof"`
}
func (*NgoloFuzzOne_WriteString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReadAtLeast) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReadFull) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CopyN) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Copy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CopyBuffer) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LimitReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewSectionReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SectionReaderNgdotRead) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SectionReaderNgdotSeek) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SectionReaderNgdotReadAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SectionReaderNgdotSize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SectionReaderNgdotOuter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_OffsetWriterNgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_OffsetWriterNgdotWriteAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_OffsetWriterNgdotSeek) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TeeReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NopCloser) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReadAll) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1f\n" +
"\x0fWriteStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"5\n" +
"\x0fReadAtLeastArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\x12\x10\n" +
"\x03min\x18\x02 \x01(\x03R\x03min\" \n" +
"\fReadFullArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\"\x19\n" +
"\tCopyNArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"\n" +
"\n" +
"\bCopyArgs\"\"\n" +
"\x0eCopyBufferArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\"\x1f\n" +
"\x0fLimitReaderArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"6\n" +
"\x14NewSectionReaderArgs\x12\x10\n" +
"\x03off\x18\x01 \x01(\x03R\x03off\x12\f\n" +
"\x01n\x18\x02 \x01(\x03R\x01n\"*\n" +
"\x1aSectionReaderNgdotReadArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"L\n" +
"\x1aSectionReaderNgdotSeekArgs\x12\x16\n" +
"\x06offset\x18\x01 \x01(\x03R\x06offset\x12\x16\n" +
"\x06whence\x18\x02 \x01(\x03R\x06whence\">\n" +
"\x1cSectionReaderNgdotReadAtArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\x12\x10\n" +
"\x03off\x18\x02 \x01(\x03R\x03off\"\x1c\n" +
"\x1aSectionReaderNgdotSizeArgs\"\x1d\n" +
"\x1bSectionReaderNgdotOuterArgs\"*\n" +
"\x1aOffsetWriterNgdotWriteArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\">\n" +
"\x1cOffsetWriterNgdotWriteAtArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\x12\x10\n" +
"\x03off\x18\x02 \x01(\x03R\x03off\"K\n" +
"\x19OffsetWriterNgdotSeekArgs\x12\x16\n" +
"\x06offset\x18\x01 \x01(\x03R\x06offset\x12\x16\n" +
"\x06whence\x18\x02 \x01(\x03R\x06whence\"\x0f\n" +
"\rTeeReaderArgs\"\x0f\n" +
"\rNopCloserArgs\"\r\n" +
"\vReadAllArgs\"\xae\v\n" +
"\fNgoloFuzzOne\x12>\n" +
"\vWriteString\x18\x01 \x01(\v2\x1a.ngolofuzz.WriteStringArgsH\x00R\vWriteString\x12>\n" +
"\vReadAtLeast\x18\x02 \x01(\v2\x1a.ngolofuzz.ReadAtLeastArgsH\x00R\vReadAtLeast\x125\n" +
"\bReadFull\x18\x03 \x01(\v2\x17.ngolofuzz.ReadFullArgsH\x00R\bReadFull\x12,\n" +
"\x05CopyN\x18\x04 \x01(\v2\x14.ngolofuzz.CopyNArgsH\x00R\x05CopyN\x12)\n" +
"\x04Copy\x18\x05 \x01(\v2\x13.ngolofuzz.CopyArgsH\x00R\x04Copy\x12;\n" +
"\n" +
"CopyBuffer\x18\x06 \x01(\v2\x19.ngolofuzz.CopyBufferArgsH\x00R\n" +
"CopyBuffer\x12>\n" +
"\vLimitReader\x18\a \x01(\v2\x1a.ngolofuzz.LimitReaderArgsH\x00R\vLimitReader\x12M\n" +
"\x10NewSectionReader\x18\b \x01(\v2\x1f.ngolofuzz.NewSectionReaderArgsH\x00R\x10NewSectionReader\x12_\n" +
"\x16SectionReaderNgdotRead\x18\t \x01(\v2%.ngolofuzz.SectionReaderNgdotReadArgsH\x00R\x16SectionReaderNgdotRead\x12_\n" +
"\x16SectionReaderNgdotSeek\x18\n" +
" \x01(\v2%.ngolofuzz.SectionReaderNgdotSeekArgsH\x00R\x16SectionReaderNgdotSeek\x12e\n" +
"\x18SectionReaderNgdotReadAt\x18\v \x01(\v2'.ngolofuzz.SectionReaderNgdotReadAtArgsH\x00R\x18SectionReaderNgdotReadAt\x12_\n" +
"\x16SectionReaderNgdotSize\x18\f \x01(\v2%.ngolofuzz.SectionReaderNgdotSizeArgsH\x00R\x16SectionReaderNgdotSize\x12b\n" +
"\x17SectionReaderNgdotOuter\x18\r \x01(\v2&.ngolofuzz.SectionReaderNgdotOuterArgsH\x00R\x17SectionReaderNgdotOuter\x12_\n" +
"\x16OffsetWriterNgdotWrite\x18\x0e \x01(\v2%.ngolofuzz.OffsetWriterNgdotWriteArgsH\x00R\x16OffsetWriterNgdotWrite\x12e\n" +
"\x18OffsetWriterNgdotWriteAt\x18\x0f \x01(\v2'.ngolofuzz.OffsetWriterNgdotWriteAtArgsH\x00R\x18OffsetWriterNgdotWriteAt\x12\\\n" +
"\x15OffsetWriterNgdotSeek\x18\x10 \x01(\v2$.ngolofuzz.OffsetWriterNgdotSeekArgsH\x00R\x15OffsetWriterNgdotSeek\x128\n" +
"\tTeeReader\x18\x11 \x01(\v2\x18.ngolofuzz.TeeReaderArgsH\x00R\tTeeReader\x128\n" +
"\tNopCloser\x18\x12 \x01(\v2\x18.ngolofuzz.NopCloserArgsH\x00R\tNopCloser\x122\n" +
"\aReadAll\x18\x13 \x01(\v2\x16.ngolofuzz.ReadAllArgsH\x00R\aReadAllB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x0fZ\r./;fuzz_ng_iob\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 22)
var file_ngolofuzz_proto_goTypes = []any{
(*WriteStringArgs)(nil), // 0: ngolofuzz.WriteStringArgs
(*ReadAtLeastArgs)(nil), // 1: ngolofuzz.ReadAtLeastArgs
(*ReadFullArgs)(nil), // 2: ngolofuzz.ReadFullArgs
(*CopyNArgs)(nil), // 3: ngolofuzz.CopyNArgs
(*CopyArgs)(nil), // 4: ngolofuzz.CopyArgs
(*CopyBufferArgs)(nil), // 5: ngolofuzz.CopyBufferArgs
(*LimitReaderArgs)(nil), // 6: ngolofuzz.LimitReaderArgs
(*NewSectionReaderArgs)(nil), // 7: ngolofuzz.NewSectionReaderArgs
(*SectionReaderNgdotReadArgs)(nil), // 8: ngolofuzz.SectionReaderNgdotReadArgs
(*SectionReaderNgdotSeekArgs)(nil), // 9: ngolofuzz.SectionReaderNgdotSeekArgs
(*SectionReaderNgdotReadAtArgs)(nil), // 10: ngolofuzz.SectionReaderNgdotReadAtArgs
(*SectionReaderNgdotSizeArgs)(nil), // 11: ngolofuzz.SectionReaderNgdotSizeArgs
(*SectionReaderNgdotOuterArgs)(nil), // 12: ngolofuzz.SectionReaderNgdotOuterArgs
(*OffsetWriterNgdotWriteArgs)(nil), // 13: ngolofuzz.OffsetWriterNgdotWriteArgs
(*OffsetWriterNgdotWriteAtArgs)(nil), // 14: ngolofuzz.OffsetWriterNgdotWriteAtArgs
(*OffsetWriterNgdotSeekArgs)(nil), // 15: ngolofuzz.OffsetWriterNgdotSeekArgs
(*TeeReaderArgs)(nil), // 16: ngolofuzz.TeeReaderArgs
(*NopCloserArgs)(nil), // 17: ngolofuzz.NopCloserArgs
(*ReadAllArgs)(nil), // 18: ngolofuzz.ReadAllArgs
(*NgoloFuzzOne)(nil), // 19: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 20: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 21: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.WriteString:type_name -> ngolofuzz.WriteStringArgs
1, // 1: ngolofuzz.NgoloFuzzOne.ReadAtLeast:type_name -> ngolofuzz.ReadAtLeastArgs
2, // 2: ngolofuzz.NgoloFuzzOne.ReadFull:type_name -> ngolofuzz.ReadFullArgs
3, // 3: ngolofuzz.NgoloFuzzOne.CopyN:type_name -> ngolofuzz.CopyNArgs
4, // 4: ngolofuzz.NgoloFuzzOne.Copy:type_name -> ngolofuzz.CopyArgs
5, // 5: ngolofuzz.NgoloFuzzOne.CopyBuffer:type_name -> ngolofuzz.CopyBufferArgs
6, // 6: ngolofuzz.NgoloFuzzOne.LimitReader:type_name -> ngolofuzz.LimitReaderArgs
7, // 7: ngolofuzz.NgoloFuzzOne.NewSectionReader:type_name -> ngolofuzz.NewSectionReaderArgs
8, // 8: ngolofuzz.NgoloFuzzOne.SectionReaderNgdotRead:type_name -> ngolofuzz.SectionReaderNgdotReadArgs
9, // 9: ngolofuzz.NgoloFuzzOne.SectionReaderNgdotSeek:type_name -> ngolofuzz.SectionReaderNgdotSeekArgs
10, // 10: ngolofuzz.NgoloFuzzOne.SectionReaderNgdotReadAt:type_name -> ngolofuzz.SectionReaderNgdotReadAtArgs
11, // 11: ngolofuzz.NgoloFuzzOne.SectionReaderNgdotSize:type_name -> ngolofuzz.SectionReaderNgdotSizeArgs
12, // 12: ngolofuzz.NgoloFuzzOne.SectionReaderNgdotOuter:type_name -> ngolofuzz.SectionReaderNgdotOuterArgs
13, // 13: ngolofuzz.NgoloFuzzOne.OffsetWriterNgdotWrite:type_name -> ngolofuzz.OffsetWriterNgdotWriteArgs
14, // 14: ngolofuzz.NgoloFuzzOne.OffsetWriterNgdotWriteAt:type_name -> ngolofuzz.OffsetWriterNgdotWriteAtArgs
15, // 15: ngolofuzz.NgoloFuzzOne.OffsetWriterNgdotSeek:type_name -> ngolofuzz.OffsetWriterNgdotSeekArgs
16, // 16: ngolofuzz.NgoloFuzzOne.TeeReader:type_name -> ngolofuzz.TeeReaderArgs
17, // 17: ngolofuzz.NgoloFuzzOne.NopCloser:type_name -> ngolofuzz.NopCloserArgs
18, // 18: ngolofuzz.NgoloFuzzOne.ReadAll:type_name -> ngolofuzz.ReadAllArgs
19, // 19: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
20, // [20:20] is the sub-list for method output_type
20, // [20:20] is the sub-list for method input_type
20, // [20:20] is the sub-list for extension type_name
20, // [20:20] is the sub-list for extension extendee
0, // [0:20] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[19].OneofWrappers = []any{
(*NgoloFuzzOne_WriteString)(nil),
(*NgoloFuzzOne_ReadAtLeast)(nil),
(*NgoloFuzzOne_ReadFull)(nil),
(*NgoloFuzzOne_CopyN)(nil),
(*NgoloFuzzOne_Copy)(nil),
(*NgoloFuzzOne_CopyBuffer)(nil),
(*NgoloFuzzOne_LimitReader)(nil),
(*NgoloFuzzOne_NewSectionReader)(nil),
(*NgoloFuzzOne_SectionReaderNgdotRead)(nil),
(*NgoloFuzzOne_SectionReaderNgdotSeek)(nil),
(*NgoloFuzzOne_SectionReaderNgdotReadAt)(nil),
(*NgoloFuzzOne_SectionReaderNgdotSize)(nil),
(*NgoloFuzzOne_SectionReaderNgdotOuter)(nil),
(*NgoloFuzzOne_OffsetWriterNgdotWrite)(nil),
(*NgoloFuzzOne_OffsetWriterNgdotWriteAt)(nil),
(*NgoloFuzzOne_OffsetWriterNgdotSeek)(nil),
(*NgoloFuzzOne_TeeReader)(nil),
(*NgoloFuzzOne_NopCloser)(nil),
(*NgoloFuzzOne_ReadAll)(nil),
}
file_ngolofuzz_proto_msgTypes[20].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 22,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_io_ioutil
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ReadAll:
arg0 := bytes.NewReader(a.ReadAll.R)
_, r1 := ioutil.ReadAll(arg0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReadFile:
_, r1 := ioutil.ReadFile(a.ReadFile.Filename)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReadDir:
_, r1 := ioutil.ReadDir(a.ReadDir.Dirname)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NopCloser:
arg0 := bytes.NewReader(a.NopCloser.R)
ioutil.NopCloser(arg0)
case *NgoloFuzzOne_TempFile:
_, r1 := ioutil.TempFile(a.TempFile.Dir, a.TempFile.Pattern)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TempDir:
_, r1 := ioutil.TempDir(a.TempDir.Dir, a.TempDir.Pattern)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ReadAll:
w.WriteString(fmt.Sprintf("ioutil.ReadAll(bytes.NewReader(%#+v))\n", a.ReadAll.R))
case *NgoloFuzzOne_ReadFile:
w.WriteString(fmt.Sprintf("ioutil.ReadFile(%#+v)\n", a.ReadFile.Filename))
case *NgoloFuzzOne_ReadDir:
w.WriteString(fmt.Sprintf("ioutil.ReadDir(%#+v)\n", a.ReadDir.Dirname))
case *NgoloFuzzOne_NopCloser:
w.WriteString(fmt.Sprintf("ioutil.NopCloser(bytes.NewReader(%#+v))\n", a.NopCloser.R))
case *NgoloFuzzOne_TempFile:
w.WriteString(fmt.Sprintf("ioutil.TempFile(%#+v, %#+v)\n", a.TempFile.Dir, a.TempFile.Pattern))
case *NgoloFuzzOne_TempDir:
w.WriteString(fmt.Sprintf("ioutil.TempDir(%#+v, %#+v)\n", a.TempDir.Dir, a.TempDir.Pattern))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_io_ioutil
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ReadAllArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReadAllArgs) Reset() {
*x = ReadAllArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReadAllArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReadAllArgs) ProtoMessage() {}
func (x *ReadAllArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReadAllArgs.ProtoReflect.Descriptor instead.
func (*ReadAllArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *ReadAllArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type ReadFileArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Filename string `protobuf:"bytes,1,opt,name=filename,proto3" json:"filename,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReadFileArgs) Reset() {
*x = ReadFileArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReadFileArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReadFileArgs) ProtoMessage() {}
func (x *ReadFileArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReadFileArgs.ProtoReflect.Descriptor instead.
func (*ReadFileArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *ReadFileArgs) GetFilename() string {
if x != nil {
return x.Filename
}
return ""
}
type ReadDirArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dirname string `protobuf:"bytes,1,opt,name=dirname,proto3" json:"dirname,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReadDirArgs) Reset() {
*x = ReadDirArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReadDirArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReadDirArgs) ProtoMessage() {}
func (x *ReadDirArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReadDirArgs.ProtoReflect.Descriptor instead.
func (*ReadDirArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *ReadDirArgs) GetDirname() string {
if x != nil {
return x.Dirname
}
return ""
}
type NopCloserArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NopCloserArgs) Reset() {
*x = NopCloserArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NopCloserArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NopCloserArgs) ProtoMessage() {}
func (x *NopCloserArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NopCloserArgs.ProtoReflect.Descriptor instead.
func (*NopCloserArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NopCloserArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type TempFileArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"`
Pattern string `protobuf:"bytes,2,opt,name=pattern,proto3" json:"pattern,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TempFileArgs) Reset() {
*x = TempFileArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TempFileArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TempFileArgs) ProtoMessage() {}
func (x *TempFileArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TempFileArgs.ProtoReflect.Descriptor instead.
func (*TempFileArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *TempFileArgs) GetDir() string {
if x != nil {
return x.Dir
}
return ""
}
func (x *TempFileArgs) GetPattern() string {
if x != nil {
return x.Pattern
}
return ""
}
type TempDirArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"`
Pattern string `protobuf:"bytes,2,opt,name=pattern,proto3" json:"pattern,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TempDirArgs) Reset() {
*x = TempDirArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TempDirArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TempDirArgs) ProtoMessage() {}
func (x *TempDirArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TempDirArgs.ProtoReflect.Descriptor instead.
func (*TempDirArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *TempDirArgs) GetDir() string {
if x != nil {
return x.Dir
}
return ""
}
func (x *TempDirArgs) GetPattern() string {
if x != nil {
return x.Pattern
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_ReadAll
// *NgoloFuzzOne_ReadFile
// *NgoloFuzzOne_ReadDir
// *NgoloFuzzOne_NopCloser
// *NgoloFuzzOne_TempFile
// *NgoloFuzzOne_TempDir
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetReadAll() *ReadAllArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReadAll); ok {
return x.ReadAll
}
}
return nil
}
func (x *NgoloFuzzOne) GetReadFile() *ReadFileArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReadFile); ok {
return x.ReadFile
}
}
return nil
}
func (x *NgoloFuzzOne) GetReadDir() *ReadDirArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReadDir); ok {
return x.ReadDir
}
}
return nil
}
func (x *NgoloFuzzOne) GetNopCloser() *NopCloserArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NopCloser); ok {
return x.NopCloser
}
}
return nil
}
func (x *NgoloFuzzOne) GetTempFile() *TempFileArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TempFile); ok {
return x.TempFile
}
}
return nil
}
func (x *NgoloFuzzOne) GetTempDir() *TempDirArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TempDir); ok {
return x.TempDir
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_ReadAll struct {
ReadAll *ReadAllArgs `protobuf:"bytes,1,opt,name=ReadAll,proto3,oneof"`
}
type NgoloFuzzOne_ReadFile struct {
ReadFile *ReadFileArgs `protobuf:"bytes,2,opt,name=ReadFile,proto3,oneof"`
}
type NgoloFuzzOne_ReadDir struct {
ReadDir *ReadDirArgs `protobuf:"bytes,3,opt,name=ReadDir,proto3,oneof"`
}
type NgoloFuzzOne_NopCloser struct {
NopCloser *NopCloserArgs `protobuf:"bytes,4,opt,name=NopCloser,proto3,oneof"`
}
type NgoloFuzzOne_TempFile struct {
TempFile *TempFileArgs `protobuf:"bytes,5,opt,name=TempFile,proto3,oneof"`
}
type NgoloFuzzOne_TempDir struct {
TempDir *TempDirArgs `protobuf:"bytes,6,opt,name=TempDir,proto3,oneof"`
}
func (*NgoloFuzzOne_ReadAll) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReadFile) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReadDir) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NopCloser) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TempFile) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TempDir) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1b\n" +
"\vReadAllArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"*\n" +
"\fReadFileArgs\x12\x1a\n" +
"\bfilename\x18\x01 \x01(\tR\bfilename\"'\n" +
"\vReadDirArgs\x12\x18\n" +
"\adirname\x18\x01 \x01(\tR\adirname\"\x1d\n" +
"\rNopCloserArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\":\n" +
"\fTempFileArgs\x12\x10\n" +
"\x03dir\x18\x01 \x01(\tR\x03dir\x12\x18\n" +
"\apattern\x18\x02 \x01(\tR\apattern\"9\n" +
"\vTempDirArgs\x12\x10\n" +
"\x03dir\x18\x01 \x01(\tR\x03dir\x12\x18\n" +
"\apattern\x18\x02 \x01(\tR\apattern\"\xda\x02\n" +
"\fNgoloFuzzOne\x122\n" +
"\aReadAll\x18\x01 \x01(\v2\x16.ngolofuzz.ReadAllArgsH\x00R\aReadAll\x125\n" +
"\bReadFile\x18\x02 \x01(\v2\x17.ngolofuzz.ReadFileArgsH\x00R\bReadFile\x122\n" +
"\aReadDir\x18\x03 \x01(\v2\x16.ngolofuzz.ReadDirArgsH\x00R\aReadDir\x128\n" +
"\tNopCloser\x18\x04 \x01(\v2\x18.ngolofuzz.NopCloserArgsH\x00R\tNopCloser\x125\n" +
"\bTempFile\x18\x05 \x01(\v2\x17.ngolofuzz.TempFileArgsH\x00R\bTempFile\x122\n" +
"\aTempDir\x18\x06 \x01(\v2\x16.ngolofuzz.TempDirArgsH\x00R\aTempDirB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x16Z\x14./;fuzz_ng_io_ioutilb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
var file_ngolofuzz_proto_goTypes = []any{
(*ReadAllArgs)(nil), // 0: ngolofuzz.ReadAllArgs
(*ReadFileArgs)(nil), // 1: ngolofuzz.ReadFileArgs
(*ReadDirArgs)(nil), // 2: ngolofuzz.ReadDirArgs
(*NopCloserArgs)(nil), // 3: ngolofuzz.NopCloserArgs
(*TempFileArgs)(nil), // 4: ngolofuzz.TempFileArgs
(*TempDirArgs)(nil), // 5: ngolofuzz.TempDirArgs
(*NgoloFuzzOne)(nil), // 6: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 7: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 8: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.ReadAll:type_name -> ngolofuzz.ReadAllArgs
1, // 1: ngolofuzz.NgoloFuzzOne.ReadFile:type_name -> ngolofuzz.ReadFileArgs
2, // 2: ngolofuzz.NgoloFuzzOne.ReadDir:type_name -> ngolofuzz.ReadDirArgs
3, // 3: ngolofuzz.NgoloFuzzOne.NopCloser:type_name -> ngolofuzz.NopCloserArgs
4, // 4: ngolofuzz.NgoloFuzzOne.TempFile:type_name -> ngolofuzz.TempFileArgs
5, // 5: ngolofuzz.NgoloFuzzOne.TempDir:type_name -> ngolofuzz.TempDirArgs
6, // 6: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
7, // [7:7] is the sub-list for method output_type
7, // [7:7] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[6].OneofWrappers = []any{
(*NgoloFuzzOne_ReadAll)(nil),
(*NgoloFuzzOne_ReadFile)(nil),
(*NgoloFuzzOne_ReadDir)(nil),
(*NgoloFuzzOne_NopCloser)(nil),
(*NgoloFuzzOne_TempFile)(nil),
(*NgoloFuzzOne_TempDir)(nil),
}
file_ngolofuzz_proto_msgTypes[7].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_log
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var LoggerResults []*log.Logger
LoggerResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
arg0 := bytes.NewBuffer(a.New.Out)
arg2 := int(a.New.Flag)
r0 := log.New(arg0, a.New.Prefix, arg2)
if r0 != nil{
LoggerResults = append(LoggerResults, r0)
}
case *NgoloFuzzOne_LoggerNgdotSetOutput:
if len(LoggerResults) == 0 {
continue
}
arg0 := LoggerResults[LoggerResultsIndex]
LoggerResultsIndex = (LoggerResultsIndex + 1) % len(LoggerResults)
arg1 := bytes.NewBuffer(a.LoggerNgdotSetOutput.W)
arg0.SetOutput(arg1)
case *NgoloFuzzOne_Default:
r0 := log.Default()
if r0 != nil{
LoggerResults = append(LoggerResults, r0)
}
case *NgoloFuzzOne_LoggerNgdotOutput:
if len(LoggerResults) == 0 {
continue
}
arg0 := LoggerResults[LoggerResultsIndex]
LoggerResultsIndex = (LoggerResultsIndex + 1) % len(LoggerResults)
arg1 := int(a.LoggerNgdotOutput.Calldepth)
r0 := arg0.Output(arg1, a.LoggerNgdotOutput.S)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_LoggerNgdotFlags:
if len(LoggerResults) == 0 {
continue
}
arg0 := LoggerResults[LoggerResultsIndex]
LoggerResultsIndex = (LoggerResultsIndex + 1) % len(LoggerResults)
arg0.Flags()
case *NgoloFuzzOne_LoggerNgdotSetFlags:
if len(LoggerResults) == 0 {
continue
}
arg0 := LoggerResults[LoggerResultsIndex]
LoggerResultsIndex = (LoggerResultsIndex + 1) % len(LoggerResults)
arg1 := int(a.LoggerNgdotSetFlags.Flag)
arg0.SetFlags(arg1)
case *NgoloFuzzOne_LoggerNgdotPrefix:
if len(LoggerResults) == 0 {
continue
}
arg0 := LoggerResults[LoggerResultsIndex]
LoggerResultsIndex = (LoggerResultsIndex + 1) % len(LoggerResults)
arg0.Prefix()
case *NgoloFuzzOne_LoggerNgdotSetPrefix:
if len(LoggerResults) == 0 {
continue
}
arg0 := LoggerResults[LoggerResultsIndex]
LoggerResultsIndex = (LoggerResultsIndex + 1) % len(LoggerResults)
arg0.SetPrefix(a.LoggerNgdotSetPrefix.Prefix)
case *NgoloFuzzOne_LoggerNgdotWriter:
if len(LoggerResults) == 0 {
continue
}
arg0 := LoggerResults[LoggerResultsIndex]
LoggerResultsIndex = (LoggerResultsIndex + 1) % len(LoggerResults)
arg0.Writer()
case *NgoloFuzzOne_SetOutput:
arg0 := bytes.NewBuffer(a.SetOutput.W)
log.SetOutput(arg0)
case *NgoloFuzzOne_Flags:
log.Flags()
case *NgoloFuzzOne_SetFlags:
arg0 := int(a.SetFlags.Flag)
log.SetFlags(arg0)
case *NgoloFuzzOne_Prefix:
log.Prefix()
case *NgoloFuzzOne_SetPrefix:
log.SetPrefix(a.SetPrefix.Prefix)
case *NgoloFuzzOne_Writer:
log.Writer()
case *NgoloFuzzOne_Output:
arg0 := int(a.Output.Calldepth)
r0 := log.Output(arg0, a.Output.S)
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
LoggerNb := 0
LoggerResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("Logger%d := log.New(bytes.NewBuffer(%#+v), %#+v, int(%#+v))\n", LoggerNb, a.New.Out, a.New.Prefix, a.New.Flag))
LoggerNb = LoggerNb + 1
case *NgoloFuzzOne_LoggerNgdotSetOutput:
if LoggerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Logger%d.SetOutput(bytes.NewBuffer(%#+v))\n", LoggerResultsIndex, a.LoggerNgdotSetOutput.W))
LoggerResultsIndex = (LoggerResultsIndex + 1) % LoggerNb
case *NgoloFuzzOne_Default:
w.WriteString(fmt.Sprintf("Logger%d := log.Default()\n", LoggerNb))
LoggerNb = LoggerNb + 1
case *NgoloFuzzOne_LoggerNgdotOutput:
if LoggerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Logger%d.Output(int(%#+v), %#+v)\n", LoggerResultsIndex, a.LoggerNgdotOutput.Calldepth, a.LoggerNgdotOutput.S))
LoggerResultsIndex = (LoggerResultsIndex + 1) % LoggerNb
case *NgoloFuzzOne_LoggerNgdotFlags:
if LoggerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Logger%d.Flags()\n", LoggerResultsIndex))
LoggerResultsIndex = (LoggerResultsIndex + 1) % LoggerNb
case *NgoloFuzzOne_LoggerNgdotSetFlags:
if LoggerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Logger%d.SetFlags(int(%#+v))\n", LoggerResultsIndex, a.LoggerNgdotSetFlags.Flag))
LoggerResultsIndex = (LoggerResultsIndex + 1) % LoggerNb
case *NgoloFuzzOne_LoggerNgdotPrefix:
if LoggerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Logger%d.Prefix()\n", LoggerResultsIndex))
LoggerResultsIndex = (LoggerResultsIndex + 1) % LoggerNb
case *NgoloFuzzOne_LoggerNgdotSetPrefix:
if LoggerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Logger%d.SetPrefix(%#+v)\n", LoggerResultsIndex, a.LoggerNgdotSetPrefix.Prefix))
LoggerResultsIndex = (LoggerResultsIndex + 1) % LoggerNb
case *NgoloFuzzOne_LoggerNgdotWriter:
if LoggerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Logger%d.Writer()\n", LoggerResultsIndex))
LoggerResultsIndex = (LoggerResultsIndex + 1) % LoggerNb
case *NgoloFuzzOne_SetOutput:
w.WriteString(fmt.Sprintf("log.SetOutput(bytes.NewBuffer(%#+v))\n", a.SetOutput.W))
case *NgoloFuzzOne_Flags:
w.WriteString(fmt.Sprintf("log.Flags()\n"))
case *NgoloFuzzOne_SetFlags:
w.WriteString(fmt.Sprintf("log.SetFlags(int(%#+v))\n", a.SetFlags.Flag))
case *NgoloFuzzOne_Prefix:
w.WriteString(fmt.Sprintf("log.Prefix()\n"))
case *NgoloFuzzOne_SetPrefix:
w.WriteString(fmt.Sprintf("log.SetPrefix(%#+v)\n", a.SetPrefix.Prefix))
case *NgoloFuzzOne_Writer:
w.WriteString(fmt.Sprintf("log.Writer()\n"))
case *NgoloFuzzOne_Output:
w.WriteString(fmt.Sprintf("log.Output(int(%#+v), %#+v)\n", a.Output.Calldepth, a.Output.S))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_log
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Out []byte `protobuf:"bytes,1,opt,name=out,proto3" json:"out,omitempty"`
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
Flag int64 `protobuf:"varint,3,opt,name=flag,proto3" json:"flag,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewArgs) GetOut() []byte {
if x != nil {
return x.Out
}
return nil
}
func (x *NewArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
func (x *NewArgs) GetFlag() int64 {
if x != nil {
return x.Flag
}
return 0
}
type LoggerNgdotSetOutputArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LoggerNgdotSetOutputArgs) Reset() {
*x = LoggerNgdotSetOutputArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LoggerNgdotSetOutputArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LoggerNgdotSetOutputArgs) ProtoMessage() {}
func (x *LoggerNgdotSetOutputArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LoggerNgdotSetOutputArgs.ProtoReflect.Descriptor instead.
func (*LoggerNgdotSetOutputArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *LoggerNgdotSetOutputArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type DefaultArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DefaultArgs) Reset() {
*x = DefaultArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DefaultArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DefaultArgs) ProtoMessage() {}
func (x *DefaultArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DefaultArgs.ProtoReflect.Descriptor instead.
func (*DefaultArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type LoggerNgdotOutputArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Calldepth int64 `protobuf:"varint,1,opt,name=calldepth,proto3" json:"calldepth,omitempty"`
S string `protobuf:"bytes,2,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LoggerNgdotOutputArgs) Reset() {
*x = LoggerNgdotOutputArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LoggerNgdotOutputArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LoggerNgdotOutputArgs) ProtoMessage() {}
func (x *LoggerNgdotOutputArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LoggerNgdotOutputArgs.ProtoReflect.Descriptor instead.
func (*LoggerNgdotOutputArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *LoggerNgdotOutputArgs) GetCalldepth() int64 {
if x != nil {
return x.Calldepth
}
return 0
}
func (x *LoggerNgdotOutputArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type LoggerNgdotFlagsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LoggerNgdotFlagsArgs) Reset() {
*x = LoggerNgdotFlagsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LoggerNgdotFlagsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LoggerNgdotFlagsArgs) ProtoMessage() {}
func (x *LoggerNgdotFlagsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LoggerNgdotFlagsArgs.ProtoReflect.Descriptor instead.
func (*LoggerNgdotFlagsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type LoggerNgdotSetFlagsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Flag int64 `protobuf:"varint,1,opt,name=flag,proto3" json:"flag,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LoggerNgdotSetFlagsArgs) Reset() {
*x = LoggerNgdotSetFlagsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LoggerNgdotSetFlagsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LoggerNgdotSetFlagsArgs) ProtoMessage() {}
func (x *LoggerNgdotSetFlagsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LoggerNgdotSetFlagsArgs.ProtoReflect.Descriptor instead.
func (*LoggerNgdotSetFlagsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *LoggerNgdotSetFlagsArgs) GetFlag() int64 {
if x != nil {
return x.Flag
}
return 0
}
type LoggerNgdotPrefixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LoggerNgdotPrefixArgs) Reset() {
*x = LoggerNgdotPrefixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LoggerNgdotPrefixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LoggerNgdotPrefixArgs) ProtoMessage() {}
func (x *LoggerNgdotPrefixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LoggerNgdotPrefixArgs.ProtoReflect.Descriptor instead.
func (*LoggerNgdotPrefixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type LoggerNgdotSetPrefixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LoggerNgdotSetPrefixArgs) Reset() {
*x = LoggerNgdotSetPrefixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LoggerNgdotSetPrefixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LoggerNgdotSetPrefixArgs) ProtoMessage() {}
func (x *LoggerNgdotSetPrefixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LoggerNgdotSetPrefixArgs.ProtoReflect.Descriptor instead.
func (*LoggerNgdotSetPrefixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *LoggerNgdotSetPrefixArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
type LoggerNgdotWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LoggerNgdotWriterArgs) Reset() {
*x = LoggerNgdotWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LoggerNgdotWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LoggerNgdotWriterArgs) ProtoMessage() {}
func (x *LoggerNgdotWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LoggerNgdotWriterArgs.ProtoReflect.Descriptor instead.
func (*LoggerNgdotWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type SetOutputArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetOutputArgs) Reset() {
*x = SetOutputArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetOutputArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetOutputArgs) ProtoMessage() {}
func (x *SetOutputArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetOutputArgs.ProtoReflect.Descriptor instead.
func (*SetOutputArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *SetOutputArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type FlagsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FlagsArgs) Reset() {
*x = FlagsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FlagsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FlagsArgs) ProtoMessage() {}
func (x *FlagsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FlagsArgs.ProtoReflect.Descriptor instead.
func (*FlagsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
type SetFlagsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Flag int64 `protobuf:"varint,1,opt,name=flag,proto3" json:"flag,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetFlagsArgs) Reset() {
*x = SetFlagsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetFlagsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetFlagsArgs) ProtoMessage() {}
func (x *SetFlagsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetFlagsArgs.ProtoReflect.Descriptor instead.
func (*SetFlagsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *SetFlagsArgs) GetFlag() int64 {
if x != nil {
return x.Flag
}
return 0
}
type PrefixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrefixArgs) Reset() {
*x = PrefixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrefixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrefixArgs) ProtoMessage() {}
func (x *PrefixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrefixArgs.ProtoReflect.Descriptor instead.
func (*PrefixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type SetPrefixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetPrefixArgs) Reset() {
*x = SetPrefixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetPrefixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetPrefixArgs) ProtoMessage() {}
func (x *SetPrefixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetPrefixArgs.ProtoReflect.Descriptor instead.
func (*SetPrefixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *SetPrefixArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
type WriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterArgs) Reset() {
*x = WriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterArgs) ProtoMessage() {}
func (x *WriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterArgs.ProtoReflect.Descriptor instead.
func (*WriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type OutputArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Calldepth int64 `protobuf:"varint,1,opt,name=calldepth,proto3" json:"calldepth,omitempty"`
S string `protobuf:"bytes,2,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OutputArgs) Reset() {
*x = OutputArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OutputArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OutputArgs) ProtoMessage() {}
func (x *OutputArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OutputArgs.ProtoReflect.Descriptor instead.
func (*OutputArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *OutputArgs) GetCalldepth() int64 {
if x != nil {
return x.Calldepth
}
return 0
}
func (x *OutputArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_New
// *NgoloFuzzOne_LoggerNgdotSetOutput
// *NgoloFuzzOne_Default
// *NgoloFuzzOne_LoggerNgdotOutput
// *NgoloFuzzOne_LoggerNgdotFlags
// *NgoloFuzzOne_LoggerNgdotSetFlags
// *NgoloFuzzOne_LoggerNgdotPrefix
// *NgoloFuzzOne_LoggerNgdotSetPrefix
// *NgoloFuzzOne_LoggerNgdotWriter
// *NgoloFuzzOne_SetOutput
// *NgoloFuzzOne_Flags
// *NgoloFuzzOne_SetFlags
// *NgoloFuzzOne_Prefix
// *NgoloFuzzOne_SetPrefix
// *NgoloFuzzOne_Writer
// *NgoloFuzzOne_Output
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetLoggerNgdotSetOutput() *LoggerNgdotSetOutputArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LoggerNgdotSetOutput); ok {
return x.LoggerNgdotSetOutput
}
}
return nil
}
func (x *NgoloFuzzOne) GetDefault() *DefaultArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Default); ok {
return x.Default
}
}
return nil
}
func (x *NgoloFuzzOne) GetLoggerNgdotOutput() *LoggerNgdotOutputArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LoggerNgdotOutput); ok {
return x.LoggerNgdotOutput
}
}
return nil
}
func (x *NgoloFuzzOne) GetLoggerNgdotFlags() *LoggerNgdotFlagsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LoggerNgdotFlags); ok {
return x.LoggerNgdotFlags
}
}
return nil
}
func (x *NgoloFuzzOne) GetLoggerNgdotSetFlags() *LoggerNgdotSetFlagsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LoggerNgdotSetFlags); ok {
return x.LoggerNgdotSetFlags
}
}
return nil
}
func (x *NgoloFuzzOne) GetLoggerNgdotPrefix() *LoggerNgdotPrefixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LoggerNgdotPrefix); ok {
return x.LoggerNgdotPrefix
}
}
return nil
}
func (x *NgoloFuzzOne) GetLoggerNgdotSetPrefix() *LoggerNgdotSetPrefixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LoggerNgdotSetPrefix); ok {
return x.LoggerNgdotSetPrefix
}
}
return nil
}
func (x *NgoloFuzzOne) GetLoggerNgdotWriter() *LoggerNgdotWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LoggerNgdotWriter); ok {
return x.LoggerNgdotWriter
}
}
return nil
}
func (x *NgoloFuzzOne) GetSetOutput() *SetOutputArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SetOutput); ok {
return x.SetOutput
}
}
return nil
}
func (x *NgoloFuzzOne) GetFlags() *FlagsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Flags); ok {
return x.Flags
}
}
return nil
}
func (x *NgoloFuzzOne) GetSetFlags() *SetFlagsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SetFlags); ok {
return x.SetFlags
}
}
return nil
}
func (x *NgoloFuzzOne) GetPrefix() *PrefixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Prefix); ok {
return x.Prefix
}
}
return nil
}
func (x *NgoloFuzzOne) GetSetPrefix() *SetPrefixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SetPrefix); ok {
return x.SetPrefix
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriter() *WriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Writer); ok {
return x.Writer
}
}
return nil
}
func (x *NgoloFuzzOne) GetOutput() *OutputArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Output); ok {
return x.Output
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,1,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_LoggerNgdotSetOutput struct {
LoggerNgdotSetOutput *LoggerNgdotSetOutputArgs `protobuf:"bytes,2,opt,name=LoggerNgdotSetOutput,proto3,oneof"`
}
type NgoloFuzzOne_Default struct {
Default *DefaultArgs `protobuf:"bytes,3,opt,name=Default,proto3,oneof"`
}
type NgoloFuzzOne_LoggerNgdotOutput struct {
LoggerNgdotOutput *LoggerNgdotOutputArgs `protobuf:"bytes,4,opt,name=LoggerNgdotOutput,proto3,oneof"`
}
type NgoloFuzzOne_LoggerNgdotFlags struct {
LoggerNgdotFlags *LoggerNgdotFlagsArgs `protobuf:"bytes,5,opt,name=LoggerNgdotFlags,proto3,oneof"`
}
type NgoloFuzzOne_LoggerNgdotSetFlags struct {
LoggerNgdotSetFlags *LoggerNgdotSetFlagsArgs `protobuf:"bytes,6,opt,name=LoggerNgdotSetFlags,proto3,oneof"`
}
type NgoloFuzzOne_LoggerNgdotPrefix struct {
LoggerNgdotPrefix *LoggerNgdotPrefixArgs `protobuf:"bytes,7,opt,name=LoggerNgdotPrefix,proto3,oneof"`
}
type NgoloFuzzOne_LoggerNgdotSetPrefix struct {
LoggerNgdotSetPrefix *LoggerNgdotSetPrefixArgs `protobuf:"bytes,8,opt,name=LoggerNgdotSetPrefix,proto3,oneof"`
}
type NgoloFuzzOne_LoggerNgdotWriter struct {
LoggerNgdotWriter *LoggerNgdotWriterArgs `protobuf:"bytes,9,opt,name=LoggerNgdotWriter,proto3,oneof"`
}
type NgoloFuzzOne_SetOutput struct {
SetOutput *SetOutputArgs `protobuf:"bytes,10,opt,name=SetOutput,proto3,oneof"`
}
type NgoloFuzzOne_Flags struct {
Flags *FlagsArgs `protobuf:"bytes,11,opt,name=Flags,proto3,oneof"`
}
type NgoloFuzzOne_SetFlags struct {
SetFlags *SetFlagsArgs `protobuf:"bytes,12,opt,name=SetFlags,proto3,oneof"`
}
type NgoloFuzzOne_Prefix struct {
Prefix *PrefixArgs `protobuf:"bytes,13,opt,name=Prefix,proto3,oneof"`
}
type NgoloFuzzOne_SetPrefix struct {
SetPrefix *SetPrefixArgs `protobuf:"bytes,14,opt,name=SetPrefix,proto3,oneof"`
}
type NgoloFuzzOne_Writer struct {
Writer *WriterArgs `protobuf:"bytes,15,opt,name=Writer,proto3,oneof"`
}
type NgoloFuzzOne_Output struct {
Output *OutputArgs `protobuf:"bytes,16,opt,name=Output,proto3,oneof"`
}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LoggerNgdotSetOutput) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Default) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LoggerNgdotOutput) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LoggerNgdotFlags) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LoggerNgdotSetFlags) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LoggerNgdotPrefix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LoggerNgdotSetPrefix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LoggerNgdotWriter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SetOutput) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Flags) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SetFlags) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Prefix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SetPrefix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Writer) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Output) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"G\n" +
"\aNewArgs\x12\x10\n" +
"\x03out\x18\x01 \x01(\fR\x03out\x12\x16\n" +
"\x06prefix\x18\x02 \x01(\tR\x06prefix\x12\x12\n" +
"\x04flag\x18\x03 \x01(\x03R\x04flag\"(\n" +
"\x18LoggerNgdotSetOutputArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"\r\n" +
"\vDefaultArgs\"C\n" +
"\x15LoggerNgdotOutputArgs\x12\x1c\n" +
"\tcalldepth\x18\x01 \x01(\x03R\tcalldepth\x12\f\n" +
"\x01s\x18\x02 \x01(\tR\x01s\"\x16\n" +
"\x14LoggerNgdotFlagsArgs\"-\n" +
"\x17LoggerNgdotSetFlagsArgs\x12\x12\n" +
"\x04flag\x18\x01 \x01(\x03R\x04flag\"\x17\n" +
"\x15LoggerNgdotPrefixArgs\"2\n" +
"\x18LoggerNgdotSetPrefixArgs\x12\x16\n" +
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\x17\n" +
"\x15LoggerNgdotWriterArgs\"\x1d\n" +
"\rSetOutputArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"\v\n" +
"\tFlagsArgs\"\"\n" +
"\fSetFlagsArgs\x12\x12\n" +
"\x04flag\x18\x01 \x01(\x03R\x04flag\"\f\n" +
"\n" +
"PrefixArgs\"'\n" +
"\rSetPrefixArgs\x12\x16\n" +
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\f\n" +
"\n" +
"WriterArgs\"8\n" +
"\n" +
"OutputArgs\x12\x1c\n" +
"\tcalldepth\x18\x01 \x01(\x03R\tcalldepth\x12\f\n" +
"\x01s\x18\x02 \x01(\tR\x01s\"\xb1\b\n" +
"\fNgoloFuzzOne\x12&\n" +
"\x03New\x18\x01 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x12Y\n" +
"\x14LoggerNgdotSetOutput\x18\x02 \x01(\v2#.ngolofuzz.LoggerNgdotSetOutputArgsH\x00R\x14LoggerNgdotSetOutput\x122\n" +
"\aDefault\x18\x03 \x01(\v2\x16.ngolofuzz.DefaultArgsH\x00R\aDefault\x12P\n" +
"\x11LoggerNgdotOutput\x18\x04 \x01(\v2 .ngolofuzz.LoggerNgdotOutputArgsH\x00R\x11LoggerNgdotOutput\x12M\n" +
"\x10LoggerNgdotFlags\x18\x05 \x01(\v2\x1f.ngolofuzz.LoggerNgdotFlagsArgsH\x00R\x10LoggerNgdotFlags\x12V\n" +
"\x13LoggerNgdotSetFlags\x18\x06 \x01(\v2\".ngolofuzz.LoggerNgdotSetFlagsArgsH\x00R\x13LoggerNgdotSetFlags\x12P\n" +
"\x11LoggerNgdotPrefix\x18\a \x01(\v2 .ngolofuzz.LoggerNgdotPrefixArgsH\x00R\x11LoggerNgdotPrefix\x12Y\n" +
"\x14LoggerNgdotSetPrefix\x18\b \x01(\v2#.ngolofuzz.LoggerNgdotSetPrefixArgsH\x00R\x14LoggerNgdotSetPrefix\x12P\n" +
"\x11LoggerNgdotWriter\x18\t \x01(\v2 .ngolofuzz.LoggerNgdotWriterArgsH\x00R\x11LoggerNgdotWriter\x128\n" +
"\tSetOutput\x18\n" +
" \x01(\v2\x18.ngolofuzz.SetOutputArgsH\x00R\tSetOutput\x12,\n" +
"\x05Flags\x18\v \x01(\v2\x14.ngolofuzz.FlagsArgsH\x00R\x05Flags\x125\n" +
"\bSetFlags\x18\f \x01(\v2\x17.ngolofuzz.SetFlagsArgsH\x00R\bSetFlags\x12/\n" +
"\x06Prefix\x18\r \x01(\v2\x15.ngolofuzz.PrefixArgsH\x00R\x06Prefix\x128\n" +
"\tSetPrefix\x18\x0e \x01(\v2\x18.ngolofuzz.SetPrefixArgsH\x00R\tSetPrefix\x12/\n" +
"\x06Writer\x18\x0f \x01(\v2\x15.ngolofuzz.WriterArgsH\x00R\x06Writer\x12/\n" +
"\x06Output\x18\x10 \x01(\v2\x15.ngolofuzz.OutputArgsH\x00R\x06OutputB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x10Z\x0e./;fuzz_ng_logb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
var file_ngolofuzz_proto_goTypes = []any{
(*NewArgs)(nil), // 0: ngolofuzz.NewArgs
(*LoggerNgdotSetOutputArgs)(nil), // 1: ngolofuzz.LoggerNgdotSetOutputArgs
(*DefaultArgs)(nil), // 2: ngolofuzz.DefaultArgs
(*LoggerNgdotOutputArgs)(nil), // 3: ngolofuzz.LoggerNgdotOutputArgs
(*LoggerNgdotFlagsArgs)(nil), // 4: ngolofuzz.LoggerNgdotFlagsArgs
(*LoggerNgdotSetFlagsArgs)(nil), // 5: ngolofuzz.LoggerNgdotSetFlagsArgs
(*LoggerNgdotPrefixArgs)(nil), // 6: ngolofuzz.LoggerNgdotPrefixArgs
(*LoggerNgdotSetPrefixArgs)(nil), // 7: ngolofuzz.LoggerNgdotSetPrefixArgs
(*LoggerNgdotWriterArgs)(nil), // 8: ngolofuzz.LoggerNgdotWriterArgs
(*SetOutputArgs)(nil), // 9: ngolofuzz.SetOutputArgs
(*FlagsArgs)(nil), // 10: ngolofuzz.FlagsArgs
(*SetFlagsArgs)(nil), // 11: ngolofuzz.SetFlagsArgs
(*PrefixArgs)(nil), // 12: ngolofuzz.PrefixArgs
(*SetPrefixArgs)(nil), // 13: ngolofuzz.SetPrefixArgs
(*WriterArgs)(nil), // 14: ngolofuzz.WriterArgs
(*OutputArgs)(nil), // 15: ngolofuzz.OutputArgs
(*NgoloFuzzOne)(nil), // 16: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 17: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 18: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
1, // 1: ngolofuzz.NgoloFuzzOne.LoggerNgdotSetOutput:type_name -> ngolofuzz.LoggerNgdotSetOutputArgs
2, // 2: ngolofuzz.NgoloFuzzOne.Default:type_name -> ngolofuzz.DefaultArgs
3, // 3: ngolofuzz.NgoloFuzzOne.LoggerNgdotOutput:type_name -> ngolofuzz.LoggerNgdotOutputArgs
4, // 4: ngolofuzz.NgoloFuzzOne.LoggerNgdotFlags:type_name -> ngolofuzz.LoggerNgdotFlagsArgs
5, // 5: ngolofuzz.NgoloFuzzOne.LoggerNgdotSetFlags:type_name -> ngolofuzz.LoggerNgdotSetFlagsArgs
6, // 6: ngolofuzz.NgoloFuzzOne.LoggerNgdotPrefix:type_name -> ngolofuzz.LoggerNgdotPrefixArgs
7, // 7: ngolofuzz.NgoloFuzzOne.LoggerNgdotSetPrefix:type_name -> ngolofuzz.LoggerNgdotSetPrefixArgs
8, // 8: ngolofuzz.NgoloFuzzOne.LoggerNgdotWriter:type_name -> ngolofuzz.LoggerNgdotWriterArgs
9, // 9: ngolofuzz.NgoloFuzzOne.SetOutput:type_name -> ngolofuzz.SetOutputArgs
10, // 10: ngolofuzz.NgoloFuzzOne.Flags:type_name -> ngolofuzz.FlagsArgs
11, // 11: ngolofuzz.NgoloFuzzOne.SetFlags:type_name -> ngolofuzz.SetFlagsArgs
12, // 12: ngolofuzz.NgoloFuzzOne.Prefix:type_name -> ngolofuzz.PrefixArgs
13, // 13: ngolofuzz.NgoloFuzzOne.SetPrefix:type_name -> ngolofuzz.SetPrefixArgs
14, // 14: ngolofuzz.NgoloFuzzOne.Writer:type_name -> ngolofuzz.WriterArgs
15, // 15: ngolofuzz.NgoloFuzzOne.Output:type_name -> ngolofuzz.OutputArgs
16, // 16: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
17, // [17:17] is the sub-list for method output_type
17, // [17:17] is the sub-list for method input_type
17, // [17:17] is the sub-list for extension type_name
17, // [17:17] is the sub-list for extension extendee
0, // [0:17] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[16].OneofWrappers = []any{
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_LoggerNgdotSetOutput)(nil),
(*NgoloFuzzOne_Default)(nil),
(*NgoloFuzzOne_LoggerNgdotOutput)(nil),
(*NgoloFuzzOne_LoggerNgdotFlags)(nil),
(*NgoloFuzzOne_LoggerNgdotSetFlags)(nil),
(*NgoloFuzzOne_LoggerNgdotPrefix)(nil),
(*NgoloFuzzOne_LoggerNgdotSetPrefix)(nil),
(*NgoloFuzzOne_LoggerNgdotWriter)(nil),
(*NgoloFuzzOne_SetOutput)(nil),
(*NgoloFuzzOne_Flags)(nil),
(*NgoloFuzzOne_SetFlags)(nil),
(*NgoloFuzzOne_Prefix)(nil),
(*NgoloFuzzOne_SetPrefix)(nil),
(*NgoloFuzzOne_Writer)(nil),
(*NgoloFuzzOne_Output)(nil),
}
file_ngolofuzz_proto_msgTypes[17].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 19,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_log_syslog
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"log/syslog"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func PriorityNewFromFuzz(p PriorityEnum) syslog.Priority{
switch p {
case 1:
return syslog.LOG_ALERT
case 2:
return syslog.LOG_CRIT
case 3:
return syslog.LOG_ERR
case 4:
return syslog.LOG_WARNING
case 5:
return syslog.LOG_NOTICE
case 6:
return syslog.LOG_INFO
case 7:
return syslog.LOG_DEBUG
}
return syslog.LOG_EMERG
}
func ConvertPriorityNewFromFuzz(a []PriorityEnum) []syslog.Priority{
r := make([]syslog.Priority, len(a))
for i := range a {
r[i] = PriorityNewFromFuzz(a[i])
}
return r
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var WriterResults []*syslog.Writer
WriterResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
arg0 := PriorityNewFromFuzz(a.New.Priority)
r0, r1 := syslog.New(arg0, a.New.Tag)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Dial:
arg2 := PriorityNewFromFuzz(a.Dial.Priority)
r0, r1 := syslog.Dial(a.Dial.Network, a.Dial.Raddr, arg2, a.Dial.Tag)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotWrite:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
_, r1 := arg0.Write(a.WriterNgdotWrite.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotClose:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotEmerg:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Emerg(a.WriterNgdotEmerg.M)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotAlert:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Alert(a.WriterNgdotAlert.M)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotCrit:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Crit(a.WriterNgdotCrit.M)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotErr:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Err(a.WriterNgdotErr.M)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotWarning:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Warning(a.WriterNgdotWarning.M)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotNotice:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Notice(a.WriterNgdotNotice.M)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotInfo:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Info(a.WriterNgdotInfo.M)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotDebug:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Debug(a.WriterNgdotDebug.M)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_NewLogger:
arg0 := PriorityNewFromFuzz(a.NewLogger.P)
arg1 := int(a.NewLogger.LogFlag)
_, r1 := syslog.NewLogger(arg0, arg1)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
WriterNb := 0
WriterResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("Writer%d, _ := syslog.New(PriorityNewFromFuzz(%#+v), %#+v)\n", WriterNb, a.New.Priority, a.New.Tag))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_Dial:
w.WriteString(fmt.Sprintf("Writer%d, _ := syslog.Dial(%#+v, %#+v, PriorityNewFromFuzz(%#+v), %#+v)\n", WriterNb, a.Dial.Network, a.Dial.Raddr, a.Dial.Priority, a.Dial.Tag))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_WriterNgdotWrite:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Write(%#+v)\n", WriterResultsIndex, a.WriterNgdotWrite.B))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotClose:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Close()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotEmerg:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Emerg(%#+v)\n", WriterResultsIndex, a.WriterNgdotEmerg.M))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotAlert:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Alert(%#+v)\n", WriterResultsIndex, a.WriterNgdotAlert.M))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotCrit:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Crit(%#+v)\n", WriterResultsIndex, a.WriterNgdotCrit.M))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotErr:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Err(%#+v)\n", WriterResultsIndex, a.WriterNgdotErr.M))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotWarning:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Warning(%#+v)\n", WriterResultsIndex, a.WriterNgdotWarning.M))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotNotice:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Notice(%#+v)\n", WriterResultsIndex, a.WriterNgdotNotice.M))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotInfo:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Info(%#+v)\n", WriterResultsIndex, a.WriterNgdotInfo.M))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotDebug:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Debug(%#+v)\n", WriterResultsIndex, a.WriterNgdotDebug.M))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_NewLogger:
w.WriteString(fmt.Sprintf("syslog.NewLogger(PriorityNewFromFuzz(%#+v), int(%#+v))\n", a.NewLogger.P, a.NewLogger.LogFlag))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_log_syslog
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type PriorityEnum int32
const (
PriorityEnum_LOG_EMERG PriorityEnum = 0
PriorityEnum_LOG_ALERT PriorityEnum = 1
PriorityEnum_LOG_CRIT PriorityEnum = 2
PriorityEnum_LOG_ERR PriorityEnum = 3
PriorityEnum_LOG_WARNING PriorityEnum = 4
PriorityEnum_LOG_NOTICE PriorityEnum = 5
PriorityEnum_LOG_INFO PriorityEnum = 6
PriorityEnum_LOG_DEBUG PriorityEnum = 7
)
// Enum value maps for PriorityEnum.
var (
PriorityEnum_name = map[int32]string{
0: "LOG_EMERG",
1: "LOG_ALERT",
2: "LOG_CRIT",
3: "LOG_ERR",
4: "LOG_WARNING",
5: "LOG_NOTICE",
6: "LOG_INFO",
7: "LOG_DEBUG",
}
PriorityEnum_value = map[string]int32{
"LOG_EMERG": 0,
"LOG_ALERT": 1,
"LOG_CRIT": 2,
"LOG_ERR": 3,
"LOG_WARNING": 4,
"LOG_NOTICE": 5,
"LOG_INFO": 6,
"LOG_DEBUG": 7,
}
)
func (x PriorityEnum) Enum() *PriorityEnum {
p := new(PriorityEnum)
*p = x
return p
}
func (x PriorityEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (PriorityEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[0].Descriptor()
}
func (PriorityEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[0]
}
func (x PriorityEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use PriorityEnum.Descriptor instead.
func (PriorityEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Priority PriorityEnum `protobuf:"varint,1,opt,name=priority,proto3,enum=ngolofuzz.PriorityEnum" json:"priority,omitempty"`
Tag string `protobuf:"bytes,2,opt,name=tag,proto3" json:"tag,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewArgs) GetPriority() PriorityEnum {
if x != nil {
return x.Priority
}
return PriorityEnum_LOG_EMERG
}
func (x *NewArgs) GetTag() string {
if x != nil {
return x.Tag
}
return ""
}
type DialArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Network string `protobuf:"bytes,1,opt,name=network,proto3" json:"network,omitempty"`
Raddr string `protobuf:"bytes,2,opt,name=raddr,proto3" json:"raddr,omitempty"`
Priority PriorityEnum `protobuf:"varint,3,opt,name=priority,proto3,enum=ngolofuzz.PriorityEnum" json:"priority,omitempty"`
Tag string `protobuf:"bytes,4,opt,name=tag,proto3" json:"tag,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DialArgs) Reset() {
*x = DialArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DialArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DialArgs) ProtoMessage() {}
func (x *DialArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DialArgs.ProtoReflect.Descriptor instead.
func (*DialArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *DialArgs) GetNetwork() string {
if x != nil {
return x.Network
}
return ""
}
func (x *DialArgs) GetRaddr() string {
if x != nil {
return x.Raddr
}
return ""
}
func (x *DialArgs) GetPriority() PriorityEnum {
if x != nil {
return x.Priority
}
return PriorityEnum_LOG_EMERG
}
func (x *DialArgs) GetTag() string {
if x != nil {
return x.Tag
}
return ""
}
type WriterNgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteArgs) Reset() {
*x = WriterNgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteArgs) ProtoMessage() {}
func (x *WriterNgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *WriterNgdotWriteArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type WriterNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCloseArgs) Reset() {
*x = WriterNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCloseArgs) ProtoMessage() {}
func (x *WriterNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type WriterNgdotEmergArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
M string `protobuf:"bytes,1,opt,name=m,proto3" json:"m,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotEmergArgs) Reset() {
*x = WriterNgdotEmergArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotEmergArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotEmergArgs) ProtoMessage() {}
func (x *WriterNgdotEmergArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotEmergArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotEmergArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *WriterNgdotEmergArgs) GetM() string {
if x != nil {
return x.M
}
return ""
}
type WriterNgdotAlertArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
M string `protobuf:"bytes,1,opt,name=m,proto3" json:"m,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotAlertArgs) Reset() {
*x = WriterNgdotAlertArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotAlertArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotAlertArgs) ProtoMessage() {}
func (x *WriterNgdotAlertArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotAlertArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotAlertArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *WriterNgdotAlertArgs) GetM() string {
if x != nil {
return x.M
}
return ""
}
type WriterNgdotCritArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
M string `protobuf:"bytes,1,opt,name=m,proto3" json:"m,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCritArgs) Reset() {
*x = WriterNgdotCritArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCritArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCritArgs) ProtoMessage() {}
func (x *WriterNgdotCritArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCritArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCritArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *WriterNgdotCritArgs) GetM() string {
if x != nil {
return x.M
}
return ""
}
type WriterNgdotErrArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
M string `protobuf:"bytes,1,opt,name=m,proto3" json:"m,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotErrArgs) Reset() {
*x = WriterNgdotErrArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotErrArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotErrArgs) ProtoMessage() {}
func (x *WriterNgdotErrArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotErrArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotErrArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *WriterNgdotErrArgs) GetM() string {
if x != nil {
return x.M
}
return ""
}
type WriterNgdotWarningArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
M string `protobuf:"bytes,1,opt,name=m,proto3" json:"m,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWarningArgs) Reset() {
*x = WriterNgdotWarningArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWarningArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWarningArgs) ProtoMessage() {}
func (x *WriterNgdotWarningArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWarningArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWarningArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *WriterNgdotWarningArgs) GetM() string {
if x != nil {
return x.M
}
return ""
}
type WriterNgdotNoticeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
M string `protobuf:"bytes,1,opt,name=m,proto3" json:"m,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotNoticeArgs) Reset() {
*x = WriterNgdotNoticeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotNoticeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotNoticeArgs) ProtoMessage() {}
func (x *WriterNgdotNoticeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotNoticeArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotNoticeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *WriterNgdotNoticeArgs) GetM() string {
if x != nil {
return x.M
}
return ""
}
type WriterNgdotInfoArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
M string `protobuf:"bytes,1,opt,name=m,proto3" json:"m,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotInfoArgs) Reset() {
*x = WriterNgdotInfoArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotInfoArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotInfoArgs) ProtoMessage() {}
func (x *WriterNgdotInfoArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotInfoArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotInfoArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *WriterNgdotInfoArgs) GetM() string {
if x != nil {
return x.M
}
return ""
}
type WriterNgdotDebugArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
M string `protobuf:"bytes,1,opt,name=m,proto3" json:"m,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotDebugArgs) Reset() {
*x = WriterNgdotDebugArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotDebugArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotDebugArgs) ProtoMessage() {}
func (x *WriterNgdotDebugArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotDebugArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotDebugArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *WriterNgdotDebugArgs) GetM() string {
if x != nil {
return x.M
}
return ""
}
type NewLoggerArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P PriorityEnum `protobuf:"varint,1,opt,name=p,proto3,enum=ngolofuzz.PriorityEnum" json:"p,omitempty"`
LogFlag int64 `protobuf:"varint,2,opt,name=logFlag,proto3" json:"logFlag,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewLoggerArgs) Reset() {
*x = NewLoggerArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewLoggerArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewLoggerArgs) ProtoMessage() {}
func (x *NewLoggerArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewLoggerArgs.ProtoReflect.Descriptor instead.
func (*NewLoggerArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *NewLoggerArgs) GetP() PriorityEnum {
if x != nil {
return x.P
}
return PriorityEnum_LOG_EMERG
}
func (x *NewLoggerArgs) GetLogFlag() int64 {
if x != nil {
return x.LogFlag
}
return 0
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_New
// *NgoloFuzzOne_Dial
// *NgoloFuzzOne_WriterNgdotWrite
// *NgoloFuzzOne_WriterNgdotClose
// *NgoloFuzzOne_WriterNgdotEmerg
// *NgoloFuzzOne_WriterNgdotAlert
// *NgoloFuzzOne_WriterNgdotCrit
// *NgoloFuzzOne_WriterNgdotErr
// *NgoloFuzzOne_WriterNgdotWarning
// *NgoloFuzzOne_WriterNgdotNotice
// *NgoloFuzzOne_WriterNgdotInfo
// *NgoloFuzzOne_WriterNgdotDebug
// *NgoloFuzzOne_NewLogger
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetDial() *DialArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Dial); ok {
return x.Dial
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWrite() *WriterNgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWrite); ok {
return x.WriterNgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotClose() *WriterNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotClose); ok {
return x.WriterNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotEmerg() *WriterNgdotEmergArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotEmerg); ok {
return x.WriterNgdotEmerg
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotAlert() *WriterNgdotAlertArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotAlert); ok {
return x.WriterNgdotAlert
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotCrit() *WriterNgdotCritArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotCrit); ok {
return x.WriterNgdotCrit
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotErr() *WriterNgdotErrArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotErr); ok {
return x.WriterNgdotErr
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWarning() *WriterNgdotWarningArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWarning); ok {
return x.WriterNgdotWarning
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotNotice() *WriterNgdotNoticeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotNotice); ok {
return x.WriterNgdotNotice
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotInfo() *WriterNgdotInfoArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotInfo); ok {
return x.WriterNgdotInfo
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotDebug() *WriterNgdotDebugArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotDebug); ok {
return x.WriterNgdotDebug
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewLogger() *NewLoggerArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewLogger); ok {
return x.NewLogger
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,1,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_Dial struct {
Dial *DialArgs `protobuf:"bytes,2,opt,name=Dial,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWrite struct {
WriterNgdotWrite *WriterNgdotWriteArgs `protobuf:"bytes,3,opt,name=WriterNgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotClose struct {
WriterNgdotClose *WriterNgdotCloseArgs `protobuf:"bytes,4,opt,name=WriterNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotEmerg struct {
WriterNgdotEmerg *WriterNgdotEmergArgs `protobuf:"bytes,5,opt,name=WriterNgdotEmerg,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotAlert struct {
WriterNgdotAlert *WriterNgdotAlertArgs `protobuf:"bytes,6,opt,name=WriterNgdotAlert,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotCrit struct {
WriterNgdotCrit *WriterNgdotCritArgs `protobuf:"bytes,7,opt,name=WriterNgdotCrit,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotErr struct {
WriterNgdotErr *WriterNgdotErrArgs `protobuf:"bytes,8,opt,name=WriterNgdotErr,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWarning struct {
WriterNgdotWarning *WriterNgdotWarningArgs `protobuf:"bytes,9,opt,name=WriterNgdotWarning,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotNotice struct {
WriterNgdotNotice *WriterNgdotNoticeArgs `protobuf:"bytes,10,opt,name=WriterNgdotNotice,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotInfo struct {
WriterNgdotInfo *WriterNgdotInfoArgs `protobuf:"bytes,11,opt,name=WriterNgdotInfo,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotDebug struct {
WriterNgdotDebug *WriterNgdotDebugArgs `protobuf:"bytes,12,opt,name=WriterNgdotDebug,proto3,oneof"`
}
type NgoloFuzzOne_NewLogger struct {
NewLogger *NewLoggerArgs `protobuf:"bytes,13,opt,name=NewLogger,proto3,oneof"`
}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Dial) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotEmerg) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotAlert) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotCrit) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotErr) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWarning) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotNotice) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotInfo) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotDebug) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewLogger) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"P\n" +
"\aNewArgs\x123\n" +
"\bpriority\x18\x01 \x01(\x0e2\x17.ngolofuzz.PriorityEnumR\bpriority\x12\x10\n" +
"\x03tag\x18\x02 \x01(\tR\x03tag\"\x81\x01\n" +
"\bDialArgs\x12\x18\n" +
"\anetwork\x18\x01 \x01(\tR\anetwork\x12\x14\n" +
"\x05raddr\x18\x02 \x01(\tR\x05raddr\x123\n" +
"\bpriority\x18\x03 \x01(\x0e2\x17.ngolofuzz.PriorityEnumR\bpriority\x12\x10\n" +
"\x03tag\x18\x04 \x01(\tR\x03tag\"$\n" +
"\x14WriterNgdotWriteArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"\x16\n" +
"\x14WriterNgdotCloseArgs\"$\n" +
"\x14WriterNgdotEmergArgs\x12\f\n" +
"\x01m\x18\x01 \x01(\tR\x01m\"$\n" +
"\x14WriterNgdotAlertArgs\x12\f\n" +
"\x01m\x18\x01 \x01(\tR\x01m\"#\n" +
"\x13WriterNgdotCritArgs\x12\f\n" +
"\x01m\x18\x01 \x01(\tR\x01m\"\"\n" +
"\x12WriterNgdotErrArgs\x12\f\n" +
"\x01m\x18\x01 \x01(\tR\x01m\"&\n" +
"\x16WriterNgdotWarningArgs\x12\f\n" +
"\x01m\x18\x01 \x01(\tR\x01m\"%\n" +
"\x15WriterNgdotNoticeArgs\x12\f\n" +
"\x01m\x18\x01 \x01(\tR\x01m\"#\n" +
"\x13WriterNgdotInfoArgs\x12\f\n" +
"\x01m\x18\x01 \x01(\tR\x01m\"$\n" +
"\x14WriterNgdotDebugArgs\x12\f\n" +
"\x01m\x18\x01 \x01(\tR\x01m\"P\n" +
"\rNewLoggerArgs\x12%\n" +
"\x01p\x18\x01 \x01(\x0e2\x17.ngolofuzz.PriorityEnumR\x01p\x12\x18\n" +
"\alogFlag\x18\x02 \x01(\x03R\alogFlag\"\xb6\a\n" +
"\fNgoloFuzzOne\x12&\n" +
"\x03New\x18\x01 \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x12)\n" +
"\x04Dial\x18\x02 \x01(\v2\x13.ngolofuzz.DialArgsH\x00R\x04Dial\x12M\n" +
"\x10WriterNgdotWrite\x18\x03 \x01(\v2\x1f.ngolofuzz.WriterNgdotWriteArgsH\x00R\x10WriterNgdotWrite\x12M\n" +
"\x10WriterNgdotClose\x18\x04 \x01(\v2\x1f.ngolofuzz.WriterNgdotCloseArgsH\x00R\x10WriterNgdotClose\x12M\n" +
"\x10WriterNgdotEmerg\x18\x05 \x01(\v2\x1f.ngolofuzz.WriterNgdotEmergArgsH\x00R\x10WriterNgdotEmerg\x12M\n" +
"\x10WriterNgdotAlert\x18\x06 \x01(\v2\x1f.ngolofuzz.WriterNgdotAlertArgsH\x00R\x10WriterNgdotAlert\x12J\n" +
"\x0fWriterNgdotCrit\x18\a \x01(\v2\x1e.ngolofuzz.WriterNgdotCritArgsH\x00R\x0fWriterNgdotCrit\x12G\n" +
"\x0eWriterNgdotErr\x18\b \x01(\v2\x1d.ngolofuzz.WriterNgdotErrArgsH\x00R\x0eWriterNgdotErr\x12S\n" +
"\x12WriterNgdotWarning\x18\t \x01(\v2!.ngolofuzz.WriterNgdotWarningArgsH\x00R\x12WriterNgdotWarning\x12P\n" +
"\x11WriterNgdotNotice\x18\n" +
" \x01(\v2 .ngolofuzz.WriterNgdotNoticeArgsH\x00R\x11WriterNgdotNotice\x12J\n" +
"\x0fWriterNgdotInfo\x18\v \x01(\v2\x1e.ngolofuzz.WriterNgdotInfoArgsH\x00R\x0fWriterNgdotInfo\x12M\n" +
"\x10WriterNgdotDebug\x18\f \x01(\v2\x1f.ngolofuzz.WriterNgdotDebugArgsH\x00R\x10WriterNgdotDebug\x128\n" +
"\tNewLogger\x18\r \x01(\v2\x18.ngolofuzz.NewLoggerArgsH\x00R\tNewLoggerB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04list*\x85\x01\n" +
"\fPriorityEnum\x12\r\n" +
"\tLOG_EMERG\x10\x00\x12\r\n" +
"\tLOG_ALERT\x10\x01\x12\f\n" +
"\bLOG_CRIT\x10\x02\x12\v\n" +
"\aLOG_ERR\x10\x03\x12\x0f\n" +
"\vLOG_WARNING\x10\x04\x12\x0e\n" +
"\n" +
"LOG_NOTICE\x10\x05\x12\f\n" +
"\bLOG_INFO\x10\x06\x12\r\n" +
"\tLOG_DEBUG\x10\aB\x17Z\x15./;fuzz_ng_log_syslogb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
var file_ngolofuzz_proto_goTypes = []any{
(PriorityEnum)(0), // 0: ngolofuzz.PriorityEnum
(*NewArgs)(nil), // 1: ngolofuzz.NewArgs
(*DialArgs)(nil), // 2: ngolofuzz.DialArgs
(*WriterNgdotWriteArgs)(nil), // 3: ngolofuzz.WriterNgdotWriteArgs
(*WriterNgdotCloseArgs)(nil), // 4: ngolofuzz.WriterNgdotCloseArgs
(*WriterNgdotEmergArgs)(nil), // 5: ngolofuzz.WriterNgdotEmergArgs
(*WriterNgdotAlertArgs)(nil), // 6: ngolofuzz.WriterNgdotAlertArgs
(*WriterNgdotCritArgs)(nil), // 7: ngolofuzz.WriterNgdotCritArgs
(*WriterNgdotErrArgs)(nil), // 8: ngolofuzz.WriterNgdotErrArgs
(*WriterNgdotWarningArgs)(nil), // 9: ngolofuzz.WriterNgdotWarningArgs
(*WriterNgdotNoticeArgs)(nil), // 10: ngolofuzz.WriterNgdotNoticeArgs
(*WriterNgdotInfoArgs)(nil), // 11: ngolofuzz.WriterNgdotInfoArgs
(*WriterNgdotDebugArgs)(nil), // 12: ngolofuzz.WriterNgdotDebugArgs
(*NewLoggerArgs)(nil), // 13: ngolofuzz.NewLoggerArgs
(*NgoloFuzzOne)(nil), // 14: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 15: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 16: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NewArgs.priority:type_name -> ngolofuzz.PriorityEnum
0, // 1: ngolofuzz.DialArgs.priority:type_name -> ngolofuzz.PriorityEnum
0, // 2: ngolofuzz.NewLoggerArgs.p:type_name -> ngolofuzz.PriorityEnum
1, // 3: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
2, // 4: ngolofuzz.NgoloFuzzOne.Dial:type_name -> ngolofuzz.DialArgs
3, // 5: ngolofuzz.NgoloFuzzOne.WriterNgdotWrite:type_name -> ngolofuzz.WriterNgdotWriteArgs
4, // 6: ngolofuzz.NgoloFuzzOne.WriterNgdotClose:type_name -> ngolofuzz.WriterNgdotCloseArgs
5, // 7: ngolofuzz.NgoloFuzzOne.WriterNgdotEmerg:type_name -> ngolofuzz.WriterNgdotEmergArgs
6, // 8: ngolofuzz.NgoloFuzzOne.WriterNgdotAlert:type_name -> ngolofuzz.WriterNgdotAlertArgs
7, // 9: ngolofuzz.NgoloFuzzOne.WriterNgdotCrit:type_name -> ngolofuzz.WriterNgdotCritArgs
8, // 10: ngolofuzz.NgoloFuzzOne.WriterNgdotErr:type_name -> ngolofuzz.WriterNgdotErrArgs
9, // 11: ngolofuzz.NgoloFuzzOne.WriterNgdotWarning:type_name -> ngolofuzz.WriterNgdotWarningArgs
10, // 12: ngolofuzz.NgoloFuzzOne.WriterNgdotNotice:type_name -> ngolofuzz.WriterNgdotNoticeArgs
11, // 13: ngolofuzz.NgoloFuzzOne.WriterNgdotInfo:type_name -> ngolofuzz.WriterNgdotInfoArgs
12, // 14: ngolofuzz.NgoloFuzzOne.WriterNgdotDebug:type_name -> ngolofuzz.WriterNgdotDebugArgs
13, // 15: ngolofuzz.NgoloFuzzOne.NewLogger:type_name -> ngolofuzz.NewLoggerArgs
14, // 16: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
17, // [17:17] is the sub-list for method output_type
17, // [17:17] is the sub-list for method input_type
17, // [17:17] is the sub-list for extension type_name
17, // [17:17] is the sub-list for extension extendee
0, // [0:17] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[13].OneofWrappers = []any{
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_Dial)(nil),
(*NgoloFuzzOne_WriterNgdotWrite)(nil),
(*NgoloFuzzOne_WriterNgdotClose)(nil),
(*NgoloFuzzOne_WriterNgdotEmerg)(nil),
(*NgoloFuzzOne_WriterNgdotAlert)(nil),
(*NgoloFuzzOne_WriterNgdotCrit)(nil),
(*NgoloFuzzOne_WriterNgdotErr)(nil),
(*NgoloFuzzOne_WriterNgdotWarning)(nil),
(*NgoloFuzzOne_WriterNgdotNotice)(nil),
(*NgoloFuzzOne_WriterNgdotInfo)(nil),
(*NgoloFuzzOne_WriterNgdotDebug)(nil),
(*NgoloFuzzOne_NewLogger)(nil),
}
file_ngolofuzz_proto_msgTypes[14].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 1,
NumMessages: 16,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
EnumInfos: file_ngolofuzz_proto_enumTypes,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_math_big
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string, big.ErrNaN:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string, big.ErrNaN:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var RoundingModeResults []*big.RoundingMode
RoundingModeResultsIndex := 0
var AccuracyResults []*big.Accuracy
AccuracyResultsIndex := 0
var IntResults []*big.Int
IntResultsIndex := 0
var RatResults []*big.Rat
RatResultsIndex := 0
var FloatResults []*big.Float
FloatResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_AccuracyNgdotString:
if len(AccuracyResults) == 0 {
continue
}
arg0 := AccuracyResults[AccuracyResultsIndex]
AccuracyResultsIndex = (AccuracyResultsIndex + 1) % len(AccuracyResults)
arg0.String()
case *NgoloFuzzOne_NewFloat:
r0 := big.NewFloat(a.NewFloat.X)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotSetPrec:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg1 := uint(a.FloatNgdotSetPrec.Prec)
r0 := arg0.SetPrec(arg1 % 0x10001)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotSetMode:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(RoundingModeResults) == 0 {
continue
}
arg1 := *RoundingModeResults[RoundingModeResultsIndex]
RoundingModeResultsIndex = (RoundingModeResultsIndex + 1) % len(RoundingModeResults)
r0 := arg0.SetMode(arg1)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotPrec:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg0.Prec()
case *NgoloFuzzOne_FloatNgdotMinPrec:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg0.MinPrec()
case *NgoloFuzzOne_FloatNgdotMode:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.Mode()
RoundingModeResults = append(RoundingModeResults, &r0)
case *NgoloFuzzOne_FloatNgdotAcc:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.Acc()
AccuracyResults = append(AccuracyResults, &r0)
case *NgoloFuzzOne_FloatNgdotSign:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg0.Sign()
case *NgoloFuzzOne_FloatNgdotMantExp:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg1 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg0.MantExp(arg1)
case *NgoloFuzzOne_FloatNgdotSetMantExp:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg1 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg2 := int(a.FloatNgdotSetMantExp.Exp)
r0 := arg0.SetMantExp(arg1, arg2 % 0x10001)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotSignbit:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg0.Signbit()
case *NgoloFuzzOne_FloatNgdotIsInf:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg0.IsInf()
case *NgoloFuzzOne_FloatNgdotIsInt:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg0.IsInt()
case *NgoloFuzzOne_FloatNgdotSetUint64:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.SetUint64(a.FloatNgdotSetUint64.X)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotSetInt64:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.SetInt64(a.FloatNgdotSetInt64.X)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotSetFloat64:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.SetFloat64(a.FloatNgdotSetFloat64.X)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotSetInt:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.SetInt(arg1)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotSetRat:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(RatResults) == 0 {
continue
}
arg1 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.SetRat(arg1)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotSetInf:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.SetInf(a.FloatNgdotSetInf.Signbit)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotSet:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg1 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.Set(arg1)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotCopy:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg1 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.Copy(arg1)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotUint64:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
_, r1 := arg0.Uint64()
AccuracyResults = append(AccuracyResults, &r1)
case *NgoloFuzzOne_FloatNgdotInt64:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
_, r1 := arg0.Int64()
AccuracyResults = append(AccuracyResults, &r1)
case *NgoloFuzzOne_FloatNgdotFloat32:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
_, r1 := arg0.Float32()
AccuracyResults = append(AccuracyResults, &r1)
case *NgoloFuzzOne_FloatNgdotFloat64:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
_, r1 := arg0.Float64()
AccuracyResults = append(AccuracyResults, &r1)
case *NgoloFuzzOne_FloatNgdotInt:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0, r1 := arg0.Int(arg1)
if r0 != nil{
IntResults = append(IntResults, r0)
}
AccuracyResults = append(AccuracyResults, &r1)
case *NgoloFuzzOne_FloatNgdotRat:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(RatResults) == 0 {
continue
}
arg1 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0, r1 := arg0.Rat(arg1)
if r0 != nil{
RatResults = append(RatResults, r0)
}
AccuracyResults = append(AccuracyResults, &r1)
case *NgoloFuzzOne_FloatNgdotAbs:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg1 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.Abs(arg1)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotNeg:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg1 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.Neg(arg1)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotAdd:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg1 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg2 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.Add(arg1, arg2)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotSub:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg1 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg2 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.Sub(arg1, arg2)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotMul:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg1 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg2 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.Mul(arg1, arg2)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotCmp:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg1 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg0.Cmp(arg1)
case *NgoloFuzzOne_FloatNgdotSetString:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0, _ := arg0.SetString(a.FloatNgdotSetString.S)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
case *NgoloFuzzOne_FloatNgdotParse:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg2 := int(a.FloatNgdotParse.Base)
r0, _, r2 := arg0.Parse(a.FloatNgdotParse.S, arg2)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_ParseFloat:
arg1 := int(a.ParseFloat.Base)
arg2 := uint(a.ParseFloat.Prec)
if len(RoundingModeResults) == 0 {
continue
}
arg3 := *RoundingModeResults[RoundingModeResultsIndex]
RoundingModeResultsIndex = (RoundingModeResultsIndex + 1) % len(RoundingModeResults)
r0, _, r2 := big.ParseFloat(a.ParseFloat.S, arg1, arg2, arg3)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_FloatNgdotGobEncode:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
_, r1 := arg0.GobEncode()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FloatNgdotGobDecode:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.GobDecode(a.FloatNgdotGobDecode.Buf)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_FloatNgdotAppendText:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
_, r1 := arg0.AppendText(a.FloatNgdotAppendText.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FloatNgdotMarshalText:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
_, r1 := arg0.MarshalText()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FloatNgdotUnmarshalText:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.UnmarshalText(a.FloatNgdotUnmarshalText.Text)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_FloatNgdotText:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg1 := byte(a.FloatNgdotText.Format)
arg2 := int(a.FloatNgdotText.Prec)
arg0.Text(arg1, arg2 % 0x10001)
case *NgoloFuzzOne_FloatNgdotString:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg0.String()
case *NgoloFuzzOne_FloatNgdotAppend:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
arg2 := byte(a.FloatNgdotAppend.Fmt)
arg3 := int(a.FloatNgdotAppend.Prec)
arg0.Append(a.FloatNgdotAppend.Buf, arg2, arg3 % 0x10001)
case *NgoloFuzzOne_IntNgdotSign:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.Sign()
case *NgoloFuzzOne_IntNgdotSetInt64:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.SetInt64(a.IntNgdotSetInt64.X)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotSetUint64:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.SetUint64(a.IntNgdotSetUint64.X)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_NewInt:
r0 := big.NewInt(a.NewInt.X)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotSet:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Set(arg1)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotBits:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.Bits()
case *NgoloFuzzOne_IntNgdotAbs:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Abs(arg1)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotNeg:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Neg(arg1)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotAdd:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Add(arg1, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotSub:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Sub(arg1, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotMul:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Mul(arg1, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotMulRange:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.MulRange(a.IntNgdotMulRange.A, a.IntNgdotMulRange.B)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotBinomial:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Binomial(a.IntNgdotBinomial.N % 0x10001, a.IntNgdotBinomial.K % 0x10001)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotRem:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Rem(arg1, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotDiv:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Div(arg1, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotMod:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Mod(arg1, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotDivMod:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg3 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0, r1 := arg0.DivMod(arg1, arg2, arg3)
if r0 != nil{
IntResults = append(IntResults, r0)
}
if r1 != nil{
IntResults = append(IntResults, r1)
}
case *NgoloFuzzOne_IntNgdotCmp:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.Cmp(arg1)
case *NgoloFuzzOne_IntNgdotCmpAbs:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.CmpAbs(arg1)
case *NgoloFuzzOne_IntNgdotInt64:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.Int64()
case *NgoloFuzzOne_IntNgdotUint64:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.Uint64()
case *NgoloFuzzOne_IntNgdotIsInt64:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.IsInt64()
case *NgoloFuzzOne_IntNgdotIsUint64:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.IsUint64()
case *NgoloFuzzOne_IntNgdotFloat64:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
_, r1 := arg0.Float64()
AccuracyResults = append(AccuracyResults, &r1)
case *NgoloFuzzOne_IntNgdotSetString:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg2 := int(a.IntNgdotSetString.Base)
r0, _ := arg0.SetString(a.IntNgdotSetString.S, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotSetBytes:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.SetBytes(a.IntNgdotSetBytes.Buf)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotBytes:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.Bytes()
case *NgoloFuzzOne_IntNgdotFillBytes:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.FillBytes(a.IntNgdotFillBytes.Buf)
case *NgoloFuzzOne_IntNgdotBitLen:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.BitLen()
case *NgoloFuzzOne_IntNgdotTrailingZeroBits:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.TrailingZeroBits()
case *NgoloFuzzOne_IntNgdotExp:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg3 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if arg1.BitLen() + arg2.BitLen() > 1024 && arg3.BitLen() <= 1 {
continue
}
r0 := arg0.Exp(arg1, arg2, arg3)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotGCD:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg3 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg4 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.GCD(arg1, arg2, arg3, arg4)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotModInverse:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.ModInverse(arg1, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_Jacobi:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
big.Jacobi(arg0, arg1)
case *NgoloFuzzOne_IntNgdotModSqrt:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if !arg2.ProbablyPrime(20) {
continue
}
r0 := arg0.ModSqrt(arg1, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotLsh:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg2 := uint(a.IntNgdotLsh.N)
r0 := arg0.Lsh(arg1, arg2 % 0x10001)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotRsh:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg2 := uint(a.IntNgdotRsh.N)
r0 := arg0.Rsh(arg1, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotBit:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg1 := int(a.IntNgdotBit.I)
arg0.Bit(arg1)
case *NgoloFuzzOne_IntNgdotSetBit:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg2 := int(a.IntNgdotSetBit.I)
arg3 := uint(a.IntNgdotSetBit.B)
r0 := arg0.SetBit(arg1, arg2 % 0x10001, arg3)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotAnd:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.And(arg1, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotAndNot:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.AndNot(arg1, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotOr:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Or(arg1, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotXor:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Xor(arg1, arg2)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotNot:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Not(arg1)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotSqrt:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.Sqrt(arg1)
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_IntNgdotText:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg1 := int(a.IntNgdotText.Base)
arg0.Text(arg1)
case *NgoloFuzzOne_IntNgdotAppend:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg2 := int(a.IntNgdotAppend.Base)
arg0.Append(a.IntNgdotAppend.Buf, arg2)
case *NgoloFuzzOne_IntNgdotString:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg0.String()
case *NgoloFuzzOne_IntNgdotGobEncode:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
_, r1 := arg0.GobEncode()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_IntNgdotGobDecode:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.GobDecode(a.IntNgdotGobDecode.Buf)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_IntNgdotAppendText:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
_, r1 := arg0.AppendText(a.IntNgdotAppendText.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_IntNgdotMarshalText:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
_, r1 := arg0.MarshalText()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_IntNgdotUnmarshalText:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.UnmarshalText(a.IntNgdotUnmarshalText.Text)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_IntNgdotMarshalJSON:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
_, r1 := arg0.MarshalJSON()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_IntNgdotUnmarshalJSON:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.UnmarshalJSON(a.IntNgdotUnmarshalJSON.Text)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_IntNgdotProbablyPrime:
if len(IntResults) == 0 {
continue
}
arg0 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
arg1 := int(a.IntNgdotProbablyPrime.N)
arg0.ProbablyPrime(arg1 % 0x10001)
case *NgoloFuzzOne_NewRat:
r0 := big.NewRat(a.NewRat.A, a.NewRat.B)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotSetFloat64:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.SetFloat64(a.RatNgdotSetFloat64.F)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotFloat32:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
arg0.Float32()
case *NgoloFuzzOne_RatNgdotFloat64:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
arg0.Float64()
case *NgoloFuzzOne_RatNgdotSetFrac:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
if len(IntResults) == 0 {
continue
}
arg2 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.SetFrac(arg1, arg2)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotSetFrac64:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.SetFrac64(a.RatNgdotSetFrac64.A, a.RatNgdotSetFrac64.B)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotSetInt:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
if len(IntResults) == 0 {
continue
}
arg1 := IntResults[IntResultsIndex]
IntResultsIndex = (IntResultsIndex + 1) % len(IntResults)
r0 := arg0.SetInt(arg1)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotSetInt64:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.SetInt64(a.RatNgdotSetInt64.X)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotSetUint64:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.SetUint64(a.RatNgdotSetUint64.X)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotSet:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
if len(RatResults) == 0 {
continue
}
arg1 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.Set(arg1)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotAbs:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
if len(RatResults) == 0 {
continue
}
arg1 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.Abs(arg1)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotNeg:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
if len(RatResults) == 0 {
continue
}
arg1 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.Neg(arg1)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotInv:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
if len(RatResults) == 0 {
continue
}
arg1 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.Inv(arg1)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotSign:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
arg0.Sign()
case *NgoloFuzzOne_RatNgdotIsInt:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
arg0.IsInt()
case *NgoloFuzzOne_RatNgdotNum:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.Num()
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_RatNgdotDenom:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.Denom()
if r0 != nil{
IntResults = append(IntResults, r0)
}
case *NgoloFuzzOne_RatNgdotCmp:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
if len(RatResults) == 0 {
continue
}
arg1 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
arg0.Cmp(arg1)
case *NgoloFuzzOne_RatNgdotAdd:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
if len(RatResults) == 0 {
continue
}
arg1 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
if len(RatResults) == 0 {
continue
}
arg2 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.Add(arg1, arg2)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotSub:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
if len(RatResults) == 0 {
continue
}
arg1 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
if len(RatResults) == 0 {
continue
}
arg2 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.Sub(arg1, arg2)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotMul:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
if len(RatResults) == 0 {
continue
}
arg1 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
if len(RatResults) == 0 {
continue
}
arg2 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.Mul(arg1, arg2)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotSetString:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0, _ := arg0.SetString(a.RatNgdotSetString.S)
if r0 != nil{
RatResults = append(RatResults, r0)
}
case *NgoloFuzzOne_RatNgdotString:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
arg0.String()
case *NgoloFuzzOne_RatNgdotRatString:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
arg0.RatString()
case *NgoloFuzzOne_RatNgdotFloatString:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
arg1 := int(a.RatNgdotFloatString.Prec)
arg0.FloatString(arg1 % 0x10001)
case *NgoloFuzzOne_RatNgdotFloatPrec:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
arg0.FloatPrec()
case *NgoloFuzzOne_RatNgdotGobEncode:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
_, r1 := arg0.GobEncode()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_RatNgdotGobDecode:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.GobDecode(a.RatNgdotGobDecode.Buf)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_RatNgdotAppendText:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
_, r1 := arg0.AppendText(a.RatNgdotAppendText.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_RatNgdotMarshalText:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
_, r1 := arg0.MarshalText()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_RatNgdotUnmarshalText:
if len(RatResults) == 0 {
continue
}
arg0 := RatResults[RatResultsIndex]
RatResultsIndex = (RatResultsIndex + 1) % len(RatResults)
r0 := arg0.UnmarshalText(a.RatNgdotUnmarshalText.Text)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_RoundingModeNgdotString:
if len(RoundingModeResults) == 0 {
continue
}
arg0 := RoundingModeResults[RoundingModeResultsIndex]
RoundingModeResultsIndex = (RoundingModeResultsIndex + 1) % len(RoundingModeResults)
arg0.String()
case *NgoloFuzzOne_FloatNgdotSqrt:
if len(FloatResults) == 0 {
continue
}
arg0 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
if len(FloatResults) == 0 {
continue
}
arg1 := FloatResults[FloatResultsIndex]
FloatResultsIndex = (FloatResultsIndex + 1) % len(FloatResults)
r0 := arg0.Sqrt(arg1)
if r0 != nil{
FloatResults = append(FloatResults, r0)
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
RoundingModeNb := 0
RoundingModeResultsIndex := 0
AccuracyNb := 0
AccuracyResultsIndex := 0
IntNb := 0
IntResultsIndex := 0
RatNb := 0
RatResultsIndex := 0
FloatNb := 0
FloatResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_AccuracyNgdotString:
if AccuracyNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Accuracy%d.String()\n", AccuracyResultsIndex))
AccuracyResultsIndex = (AccuracyResultsIndex + 1) % AccuracyNb
case *NgoloFuzzOne_NewFloat:
w.WriteString(fmt.Sprintf("Float%d := big.NewFloat(%#+v)\n", FloatNb, a.NewFloat.X))
FloatNb = FloatNb + 1
case *NgoloFuzzOne_FloatNgdotSetPrec:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.SetPrec(uint(%#+v) %% 0x10001)\n", FloatNb, FloatResultsIndex, a.FloatNgdotSetPrec.Prec))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotSetMode:
if FloatNb == 0 {
continue
}
if RoundingModeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.SetMode(RoundingMode%d)\n", FloatNb, FloatResultsIndex, (RoundingModeResultsIndex + 0) % RoundingModeNb))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
RoundingModeResultsIndex = (RoundingModeResultsIndex + 1) % RoundingModeNb
case *NgoloFuzzOne_FloatNgdotPrec:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.Prec()\n", FloatResultsIndex))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotMinPrec:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.MinPrec()\n", FloatResultsIndex))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotMode:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RoundingMode%d := Float%d.Mode()\n", RoundingModeNb, FloatResultsIndex))
RoundingModeNb = RoundingModeNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotAcc:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Accuracy%d := Float%d.Acc()\n", AccuracyNb, FloatResultsIndex))
AccuracyNb = AccuracyNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotSign:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.Sign()\n", FloatResultsIndex))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotMantExp:
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.MantExp(Float%d)\n", FloatResultsIndex, (FloatResultsIndex + 1) % FloatNb))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotSetMantExp:
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.SetMantExp(Float%d, int(%#+v) %% 0x10001)\n", FloatNb, FloatResultsIndex, (FloatResultsIndex + 1) % FloatNb, a.FloatNgdotSetMantExp.Exp))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotSignbit:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.Signbit()\n", FloatResultsIndex))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotIsInf:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.IsInf()\n", FloatResultsIndex))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotIsInt:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.IsInt()\n", FloatResultsIndex))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotSetUint64:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.SetUint64(%#+v)\n", FloatNb, FloatResultsIndex, a.FloatNgdotSetUint64.X))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotSetInt64:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.SetInt64(%#+v)\n", FloatNb, FloatResultsIndex, a.FloatNgdotSetInt64.X))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotSetFloat64:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.SetFloat64(%#+v)\n", FloatNb, FloatResultsIndex, a.FloatNgdotSetFloat64.X))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotSetInt:
if FloatNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.SetInt(Int%d)\n", FloatNb, FloatResultsIndex, (IntResultsIndex + 0) % IntNb))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_FloatNgdotSetRat:
if FloatNb == 0 {
continue
}
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.SetRat(Rat%d)\n", FloatNb, FloatResultsIndex, (RatResultsIndex + 0) % RatNb))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_FloatNgdotSetInf:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.SetInf(%#+v)\n", FloatNb, FloatResultsIndex, a.FloatNgdotSetInf.Signbit))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotSet:
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.Set(Float%d)\n", FloatNb, FloatResultsIndex, (FloatResultsIndex + 1) % FloatNb))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotCopy:
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.Copy(Float%d)\n", FloatNb, FloatResultsIndex, (FloatResultsIndex + 1) % FloatNb))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotUint64:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("_, Accuracy%d := Float%d.Uint64()\n", AccuracyNb, FloatResultsIndex))
AccuracyNb = AccuracyNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotInt64:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("_, Accuracy%d := Float%d.Int64()\n", AccuracyNb, FloatResultsIndex))
AccuracyNb = AccuracyNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotFloat32:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("_, Accuracy%d := Float%d.Float32()\n", AccuracyNb, FloatResultsIndex))
AccuracyNb = AccuracyNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotFloat64:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("_, Accuracy%d := Float%d.Float64()\n", AccuracyNb, FloatResultsIndex))
AccuracyNb = AccuracyNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotInt:
if FloatNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d, Accuracy%d := Float%d.Int(Int%d)\n", IntNb, AccuracyNb, FloatResultsIndex, (IntResultsIndex + 0) % IntNb))
IntNb = IntNb + 1
AccuracyNb = AccuracyNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_FloatNgdotRat:
if FloatNb == 0 {
continue
}
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d, Accuracy%d := Float%d.Rat(Rat%d)\n", RatNb, AccuracyNb, FloatResultsIndex, (RatResultsIndex + 0) % RatNb))
RatNb = RatNb + 1
AccuracyNb = AccuracyNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_FloatNgdotAbs:
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.Abs(Float%d)\n", FloatNb, FloatResultsIndex, (FloatResultsIndex + 1) % FloatNb))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotNeg:
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.Neg(Float%d)\n", FloatNb, FloatResultsIndex, (FloatResultsIndex + 1) % FloatNb))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotAdd:
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.Add(Float%d, Float%d)\n", FloatNb, FloatResultsIndex, (FloatResultsIndex + 1) % FloatNb, (FloatResultsIndex + 2) % FloatNb))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotSub:
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.Sub(Float%d, Float%d)\n", FloatNb, FloatResultsIndex, (FloatResultsIndex + 1) % FloatNb, (FloatResultsIndex + 2) % FloatNb))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotMul:
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.Mul(Float%d, Float%d)\n", FloatNb, FloatResultsIndex, (FloatResultsIndex + 1) % FloatNb, (FloatResultsIndex + 2) % FloatNb))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotCmp:
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.Cmp(Float%d)\n", FloatResultsIndex, (FloatResultsIndex + 1) % FloatNb))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotSetString:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d, _ := Float%d.SetString(%#+v)\n", FloatNb, FloatResultsIndex, a.FloatNgdotSetString.S))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotParse:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d, _, _ := Float%d.Parse(%#+v, int(%#+v))\n", FloatNb, FloatResultsIndex, a.FloatNgdotParse.S, a.FloatNgdotParse.Base))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_ParseFloat:
if RoundingModeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d, _, _ := big.ParseFloat(%#+v, int(%#+v), uint(%#+v), RoundingMode%d)\n", FloatNb, a.ParseFloat.S, a.ParseFloat.Base, a.ParseFloat.Prec, (RoundingModeResultsIndex + 0) % RoundingModeNb))
FloatNb = FloatNb + 1
RoundingModeResultsIndex = (RoundingModeResultsIndex + 1) % RoundingModeNb
case *NgoloFuzzOne_FloatNgdotGobEncode:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.GobEncode()\n", FloatResultsIndex))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotGobDecode:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.GobDecode(%#+v)\n", FloatResultsIndex, a.FloatNgdotGobDecode.Buf))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotAppendText:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.AppendText(%#+v)\n", FloatResultsIndex, a.FloatNgdotAppendText.B))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotMarshalText:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.MarshalText()\n", FloatResultsIndex))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotUnmarshalText:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.UnmarshalText(%#+v)\n", FloatResultsIndex, a.FloatNgdotUnmarshalText.Text))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotText:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.Text(byte(%#+v), int(%#+v) %% 0x10001)\n", FloatResultsIndex, a.FloatNgdotText.Format, a.FloatNgdotText.Prec))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotString:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.String()\n", FloatResultsIndex))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_FloatNgdotAppend:
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d.Append(%#+v, byte(%#+v), int(%#+v) %% 0x10001)\n", FloatResultsIndex, a.FloatNgdotAppend.Buf, a.FloatNgdotAppend.Fmt, a.FloatNgdotAppend.Prec))
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
case *NgoloFuzzOne_IntNgdotSign:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.Sign()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotSetInt64:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.SetInt64(%#+v)\n", IntNb, IntResultsIndex, a.IntNgdotSetInt64.X))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotSetUint64:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.SetUint64(%#+v)\n", IntNb, IntResultsIndex, a.IntNgdotSetUint64.X))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_NewInt:
w.WriteString(fmt.Sprintf("Int%d := big.NewInt(%#+v)\n", IntNb, a.NewInt.X))
IntNb = IntNb + 1
case *NgoloFuzzOne_IntNgdotSet:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Set(Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotBits:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.Bits()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotAbs:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Abs(Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotNeg:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Neg(Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotAdd:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Add(Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotSub:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Sub(Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotMul:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Mul(Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotMulRange:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.MulRange(%#+v, %#+v)\n", IntNb, IntResultsIndex, a.IntNgdotMulRange.A, a.IntNgdotMulRange.B))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotBinomial:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Binomial(%#+v %% 0x10001, %#+v %% 0x10001)\n", IntNb, IntResultsIndex, a.IntNgdotBinomial.N, a.IntNgdotBinomial.K))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotRem:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Rem(Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotDiv:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Div(Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotMod:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Mod(Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotDivMod:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d, Int%d := Int%d.DivMod(Int%d, Int%d, Int%d)\n", IntNb, IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb, (IntResultsIndex + 3) % IntNb))
IntNb = IntNb + 1
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotCmp:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.Cmp(Int%d)\n", IntResultsIndex, (IntResultsIndex + 1) % IntNb))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotCmpAbs:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.CmpAbs(Int%d)\n", IntResultsIndex, (IntResultsIndex + 1) % IntNb))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotInt64:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.Int64()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotUint64:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.Uint64()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotIsInt64:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.IsInt64()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotIsUint64:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.IsUint64()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotFloat64:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("_, Accuracy%d := Int%d.Float64()\n", AccuracyNb, IntResultsIndex))
AccuracyNb = AccuracyNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotSetString:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d, _ := Int%d.SetString(%#+v, int(%#+v))\n", IntNb, IntResultsIndex, a.IntNgdotSetString.S, a.IntNgdotSetString.Base))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotSetBytes:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.SetBytes(%#+v)\n", IntNb, IntResultsIndex, a.IntNgdotSetBytes.Buf))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotBytes:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.Bytes()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotFillBytes:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.FillBytes(%#+v)\n", IntResultsIndex, a.IntNgdotFillBytes.Buf))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotBitLen:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.BitLen()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotTrailingZeroBits:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.TrailingZeroBits()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotExp:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Exp(Int%d, Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb, (IntResultsIndex + 3) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotGCD:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.GCD(Int%d, Int%d, Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb, (IntResultsIndex + 3) % IntNb, (IntResultsIndex + 4) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotModInverse:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.ModInverse(Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_Jacobi:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("big.Jacobi(Int%d, Int%d)\n", (IntResultsIndex + 0) % IntNb, (IntResultsIndex + 1) % IntNb))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotModSqrt:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.ModSqrt(Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotLsh:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Lsh(Int%d, uint(%#+v) %% 0x10001)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, a.IntNgdotLsh.N))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotRsh:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Rsh(Int%d, uint(%#+v))\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, a.IntNgdotRsh.N))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotBit:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.Bit(int(%#+v))\n", IntResultsIndex, a.IntNgdotBit.I))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotSetBit:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.SetBit(Int%d, int(%#+v) %% 0x10001, uint(%#+v))\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, a.IntNgdotSetBit.I, a.IntNgdotSetBit.B))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotAnd:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.And(Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotAndNot:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.AndNot(Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotOr:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Or(Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotXor:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Xor(Int%d, Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb, (IntResultsIndex + 2) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotNot:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Not(Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotSqrt:
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Int%d.Sqrt(Int%d)\n", IntNb, IntResultsIndex, (IntResultsIndex + 1) % IntNb))
IntNb = IntNb + 1
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotText:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.Text(int(%#+v))\n", IntResultsIndex, a.IntNgdotText.Base))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotAppend:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.Append(%#+v, int(%#+v))\n", IntResultsIndex, a.IntNgdotAppend.Buf, a.IntNgdotAppend.Base))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotString:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.String()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotGobEncode:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.GobEncode()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotGobDecode:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.GobDecode(%#+v)\n", IntResultsIndex, a.IntNgdotGobDecode.Buf))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotAppendText:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.AppendText(%#+v)\n", IntResultsIndex, a.IntNgdotAppendText.B))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotMarshalText:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.MarshalText()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotUnmarshalText:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.UnmarshalText(%#+v)\n", IntResultsIndex, a.IntNgdotUnmarshalText.Text))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotMarshalJSON:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.MarshalJSON()\n", IntResultsIndex))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotUnmarshalJSON:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.UnmarshalJSON(%#+v)\n", IntResultsIndex, a.IntNgdotUnmarshalJSON.Text))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_IntNgdotProbablyPrime:
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d.ProbablyPrime(int(%#+v) %% 0x10001)\n", IntResultsIndex, a.IntNgdotProbablyPrime.N))
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_NewRat:
w.WriteString(fmt.Sprintf("Rat%d := big.NewRat(%#+v, %#+v)\n", RatNb, a.NewRat.A, a.NewRat.B))
RatNb = RatNb + 1
case *NgoloFuzzOne_RatNgdotSetFloat64:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d := Rat%d.SetFloat64(%#+v)\n", RatNb, RatResultsIndex, a.RatNgdotSetFloat64.F))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotFloat32:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.Float32()\n", RatResultsIndex))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotFloat64:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.Float64()\n", RatResultsIndex))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotSetFrac:
if RatNb == 0 {
continue
}
if IntNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d := Rat%d.SetFrac(Int%d, Int%d)\n", RatNb, RatResultsIndex, (IntResultsIndex + 0) % IntNb, (IntResultsIndex + 1) % IntNb))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_RatNgdotSetFrac64:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d := Rat%d.SetFrac64(%#+v, %#+v)\n", RatNb, RatResultsIndex, a.RatNgdotSetFrac64.A, a.RatNgdotSetFrac64.B))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotSetInt:
if RatNb == 0 {
continue
}
if IntNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d := Rat%d.SetInt(Int%d)\n", RatNb, RatResultsIndex, (IntResultsIndex + 0) % IntNb))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
IntResultsIndex = (IntResultsIndex + 1) % IntNb
case *NgoloFuzzOne_RatNgdotSetInt64:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d := Rat%d.SetInt64(%#+v)\n", RatNb, RatResultsIndex, a.RatNgdotSetInt64.X))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotSetUint64:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d := Rat%d.SetUint64(%#+v)\n", RatNb, RatResultsIndex, a.RatNgdotSetUint64.X))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotSet:
if RatNb == 0 {
continue
}
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d := Rat%d.Set(Rat%d)\n", RatNb, RatResultsIndex, (RatResultsIndex + 1) % RatNb))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotAbs:
if RatNb == 0 {
continue
}
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d := Rat%d.Abs(Rat%d)\n", RatNb, RatResultsIndex, (RatResultsIndex + 1) % RatNb))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotNeg:
if RatNb == 0 {
continue
}
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d := Rat%d.Neg(Rat%d)\n", RatNb, RatResultsIndex, (RatResultsIndex + 1) % RatNb))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotInv:
if RatNb == 0 {
continue
}
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d := Rat%d.Inv(Rat%d)\n", RatNb, RatResultsIndex, (RatResultsIndex + 1) % RatNb))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotSign:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.Sign()\n", RatResultsIndex))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotIsInt:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.IsInt()\n", RatResultsIndex))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotNum:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Rat%d.Num()\n", IntNb, RatResultsIndex))
IntNb = IntNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotDenom:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Int%d := Rat%d.Denom()\n", IntNb, RatResultsIndex))
IntNb = IntNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotCmp:
if RatNb == 0 {
continue
}
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.Cmp(Rat%d)\n", RatResultsIndex, (RatResultsIndex + 1) % RatNb))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotAdd:
if RatNb == 0 {
continue
}
if RatNb == 0 {
continue
}
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d := Rat%d.Add(Rat%d, Rat%d)\n", RatNb, RatResultsIndex, (RatResultsIndex + 1) % RatNb, (RatResultsIndex + 2) % RatNb))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
RatResultsIndex = (RatResultsIndex + 1) % RatNb
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotSub:
if RatNb == 0 {
continue
}
if RatNb == 0 {
continue
}
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d := Rat%d.Sub(Rat%d, Rat%d)\n", RatNb, RatResultsIndex, (RatResultsIndex + 1) % RatNb, (RatResultsIndex + 2) % RatNb))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
RatResultsIndex = (RatResultsIndex + 1) % RatNb
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotMul:
if RatNb == 0 {
continue
}
if RatNb == 0 {
continue
}
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d := Rat%d.Mul(Rat%d, Rat%d)\n", RatNb, RatResultsIndex, (RatResultsIndex + 1) % RatNb, (RatResultsIndex + 2) % RatNb))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
RatResultsIndex = (RatResultsIndex + 1) % RatNb
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotSetString:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d, _ := Rat%d.SetString(%#+v)\n", RatNb, RatResultsIndex, a.RatNgdotSetString.S))
RatNb = RatNb + 1
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotString:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.String()\n", RatResultsIndex))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotRatString:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.RatString()\n", RatResultsIndex))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotFloatString:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.FloatString(int(%#+v) %% 0x10001)\n", RatResultsIndex, a.RatNgdotFloatString.Prec))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotFloatPrec:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.FloatPrec()\n", RatResultsIndex))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotGobEncode:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.GobEncode()\n", RatResultsIndex))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotGobDecode:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.GobDecode(%#+v)\n", RatResultsIndex, a.RatNgdotGobDecode.Buf))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotAppendText:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.AppendText(%#+v)\n", RatResultsIndex, a.RatNgdotAppendText.B))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotMarshalText:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.MarshalText()\n", RatResultsIndex))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RatNgdotUnmarshalText:
if RatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Rat%d.UnmarshalText(%#+v)\n", RatResultsIndex, a.RatNgdotUnmarshalText.Text))
RatResultsIndex = (RatResultsIndex + 1) % RatNb
case *NgoloFuzzOne_RoundingModeNgdotString:
if RoundingModeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("RoundingMode%d.String()\n", RoundingModeResultsIndex))
RoundingModeResultsIndex = (RoundingModeResultsIndex + 1) % RoundingModeNb
case *NgoloFuzzOne_FloatNgdotSqrt:
if FloatNb == 0 {
continue
}
if FloatNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Float%d := Float%d.Sqrt(Float%d)\n", FloatNb, FloatResultsIndex, (FloatResultsIndex + 1) % FloatNb))
FloatNb = FloatNb + 1
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
FloatResultsIndex = (FloatResultsIndex + 1) % FloatNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_math_big
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type AccuracyNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AccuracyNgdotStringArgs) Reset() {
*x = AccuracyNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AccuracyNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AccuracyNgdotStringArgs) ProtoMessage() {}
func (x *AccuracyNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AccuracyNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*AccuracyNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type NewFloatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X float64 `protobuf:"fixed64,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewFloatArgs) Reset() {
*x = NewFloatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewFloatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewFloatArgs) ProtoMessage() {}
func (x *NewFloatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewFloatArgs.ProtoReflect.Descriptor instead.
func (*NewFloatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NewFloatArgs) GetX() float64 {
if x != nil {
return x.X
}
return 0
}
type FloatNgdotSetPrecArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Prec uint32 `protobuf:"varint,1,opt,name=prec,proto3" json:"prec,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSetPrecArgs) Reset() {
*x = FloatNgdotSetPrecArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSetPrecArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSetPrecArgs) ProtoMessage() {}
func (x *FloatNgdotSetPrecArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSetPrecArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotSetPrecArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *FloatNgdotSetPrecArgs) GetPrec() uint32 {
if x != nil {
return x.Prec
}
return 0
}
type FloatNgdotSetModeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSetModeArgs) Reset() {
*x = FloatNgdotSetModeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSetModeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSetModeArgs) ProtoMessage() {}
func (x *FloatNgdotSetModeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSetModeArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotSetModeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type FloatNgdotPrecArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotPrecArgs) Reset() {
*x = FloatNgdotPrecArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotPrecArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotPrecArgs) ProtoMessage() {}
func (x *FloatNgdotPrecArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotPrecArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotPrecArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type FloatNgdotMinPrecArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotMinPrecArgs) Reset() {
*x = FloatNgdotMinPrecArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotMinPrecArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotMinPrecArgs) ProtoMessage() {}
func (x *FloatNgdotMinPrecArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotMinPrecArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotMinPrecArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type FloatNgdotModeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotModeArgs) Reset() {
*x = FloatNgdotModeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotModeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotModeArgs) ProtoMessage() {}
func (x *FloatNgdotModeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotModeArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotModeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type FloatNgdotAccArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotAccArgs) Reset() {
*x = FloatNgdotAccArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotAccArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotAccArgs) ProtoMessage() {}
func (x *FloatNgdotAccArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotAccArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotAccArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type FloatNgdotSignArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSignArgs) Reset() {
*x = FloatNgdotSignArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSignArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSignArgs) ProtoMessage() {}
func (x *FloatNgdotSignArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSignArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotSignArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type FloatNgdotMantExpArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotMantExpArgs) Reset() {
*x = FloatNgdotMantExpArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotMantExpArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotMantExpArgs) ProtoMessage() {}
func (x *FloatNgdotMantExpArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotMantExpArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotMantExpArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type FloatNgdotSetMantExpArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Exp int64 `protobuf:"varint,1,opt,name=exp,proto3" json:"exp,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSetMantExpArgs) Reset() {
*x = FloatNgdotSetMantExpArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSetMantExpArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSetMantExpArgs) ProtoMessage() {}
func (x *FloatNgdotSetMantExpArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSetMantExpArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotSetMantExpArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *FloatNgdotSetMantExpArgs) GetExp() int64 {
if x != nil {
return x.Exp
}
return 0
}
type FloatNgdotSignbitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSignbitArgs) Reset() {
*x = FloatNgdotSignbitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSignbitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSignbitArgs) ProtoMessage() {}
func (x *FloatNgdotSignbitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSignbitArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotSignbitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type FloatNgdotIsInfArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotIsInfArgs) Reset() {
*x = FloatNgdotIsInfArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotIsInfArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotIsInfArgs) ProtoMessage() {}
func (x *FloatNgdotIsInfArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotIsInfArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotIsInfArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type FloatNgdotIsIntArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotIsIntArgs) Reset() {
*x = FloatNgdotIsIntArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotIsIntArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotIsIntArgs) ProtoMessage() {}
func (x *FloatNgdotIsIntArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotIsIntArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotIsIntArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type FloatNgdotSetUint64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSetUint64Args) Reset() {
*x = FloatNgdotSetUint64Args{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSetUint64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSetUint64Args) ProtoMessage() {}
func (x *FloatNgdotSetUint64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSetUint64Args.ProtoReflect.Descriptor instead.
func (*FloatNgdotSetUint64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *FloatNgdotSetUint64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
type FloatNgdotSetInt64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSetInt64Args) Reset() {
*x = FloatNgdotSetInt64Args{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSetInt64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSetInt64Args) ProtoMessage() {}
func (x *FloatNgdotSetInt64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSetInt64Args.ProtoReflect.Descriptor instead.
func (*FloatNgdotSetInt64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *FloatNgdotSetInt64Args) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
type FloatNgdotSetFloat64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X float64 `protobuf:"fixed64,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSetFloat64Args) Reset() {
*x = FloatNgdotSetFloat64Args{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSetFloat64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSetFloat64Args) ProtoMessage() {}
func (x *FloatNgdotSetFloat64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSetFloat64Args.ProtoReflect.Descriptor instead.
func (*FloatNgdotSetFloat64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *FloatNgdotSetFloat64Args) GetX() float64 {
if x != nil {
return x.X
}
return 0
}
type FloatNgdotSetIntArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSetIntArgs) Reset() {
*x = FloatNgdotSetIntArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSetIntArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSetIntArgs) ProtoMessage() {}
func (x *FloatNgdotSetIntArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSetIntArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotSetIntArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
type FloatNgdotSetRatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSetRatArgs) Reset() {
*x = FloatNgdotSetRatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSetRatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSetRatArgs) ProtoMessage() {}
func (x *FloatNgdotSetRatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSetRatArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotSetRatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
type FloatNgdotSetInfArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Signbit bool `protobuf:"varint,1,opt,name=signbit,proto3" json:"signbit,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSetInfArgs) Reset() {
*x = FloatNgdotSetInfArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSetInfArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSetInfArgs) ProtoMessage() {}
func (x *FloatNgdotSetInfArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSetInfArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotSetInfArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *FloatNgdotSetInfArgs) GetSignbit() bool {
if x != nil {
return x.Signbit
}
return false
}
type FloatNgdotSetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSetArgs) Reset() {
*x = FloatNgdotSetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSetArgs) ProtoMessage() {}
func (x *FloatNgdotSetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSetArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotSetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
type FloatNgdotCopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotCopyArgs) Reset() {
*x = FloatNgdotCopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotCopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotCopyArgs) ProtoMessage() {}
func (x *FloatNgdotCopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotCopyArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotCopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
type FloatNgdotUint64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotUint64Args) Reset() {
*x = FloatNgdotUint64Args{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotUint64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotUint64Args) ProtoMessage() {}
func (x *FloatNgdotUint64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotUint64Args.ProtoReflect.Descriptor instead.
func (*FloatNgdotUint64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
type FloatNgdotInt64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotInt64Args) Reset() {
*x = FloatNgdotInt64Args{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotInt64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotInt64Args) ProtoMessage() {}
func (x *FloatNgdotInt64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotInt64Args.ProtoReflect.Descriptor instead.
func (*FloatNgdotInt64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
type FloatNgdotFloat32Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotFloat32Args) Reset() {
*x = FloatNgdotFloat32Args{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotFloat32Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotFloat32Args) ProtoMessage() {}
func (x *FloatNgdotFloat32Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotFloat32Args.ProtoReflect.Descriptor instead.
func (*FloatNgdotFloat32Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
type FloatNgdotFloat64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotFloat64Args) Reset() {
*x = FloatNgdotFloat64Args{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotFloat64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotFloat64Args) ProtoMessage() {}
func (x *FloatNgdotFloat64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotFloat64Args.ProtoReflect.Descriptor instead.
func (*FloatNgdotFloat64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
type FloatNgdotIntArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotIntArgs) Reset() {
*x = FloatNgdotIntArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotIntArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotIntArgs) ProtoMessage() {}
func (x *FloatNgdotIntArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotIntArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotIntArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
type FloatNgdotRatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotRatArgs) Reset() {
*x = FloatNgdotRatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotRatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotRatArgs) ProtoMessage() {}
func (x *FloatNgdotRatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotRatArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotRatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
type FloatNgdotAbsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotAbsArgs) Reset() {
*x = FloatNgdotAbsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotAbsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotAbsArgs) ProtoMessage() {}
func (x *FloatNgdotAbsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotAbsArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotAbsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
type FloatNgdotNegArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotNegArgs) Reset() {
*x = FloatNgdotNegArgs{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotNegArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotNegArgs) ProtoMessage() {}
func (x *FloatNgdotNegArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotNegArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotNegArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
type FloatNgdotAddArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotAddArgs) Reset() {
*x = FloatNgdotAddArgs{}
mi := &file_ngolofuzz_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotAddArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotAddArgs) ProtoMessage() {}
func (x *FloatNgdotAddArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotAddArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotAddArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{30}
}
type FloatNgdotSubArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSubArgs) Reset() {
*x = FloatNgdotSubArgs{}
mi := &file_ngolofuzz_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSubArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSubArgs) ProtoMessage() {}
func (x *FloatNgdotSubArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSubArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotSubArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{31}
}
type FloatNgdotMulArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotMulArgs) Reset() {
*x = FloatNgdotMulArgs{}
mi := &file_ngolofuzz_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotMulArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotMulArgs) ProtoMessage() {}
func (x *FloatNgdotMulArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotMulArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotMulArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{32}
}
type FloatNgdotCmpArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotCmpArgs) Reset() {
*x = FloatNgdotCmpArgs{}
mi := &file_ngolofuzz_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotCmpArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotCmpArgs) ProtoMessage() {}
func (x *FloatNgdotCmpArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[33]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotCmpArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotCmpArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{33}
}
type FloatNgdotSetStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSetStringArgs) Reset() {
*x = FloatNgdotSetStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSetStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSetStringArgs) ProtoMessage() {}
func (x *FloatNgdotSetStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[34]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSetStringArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotSetStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{34}
}
func (x *FloatNgdotSetStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type FloatNgdotParseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Base int64 `protobuf:"varint,2,opt,name=base,proto3" json:"base,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotParseArgs) Reset() {
*x = FloatNgdotParseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotParseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotParseArgs) ProtoMessage() {}
func (x *FloatNgdotParseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[35]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotParseArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotParseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{35}
}
func (x *FloatNgdotParseArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *FloatNgdotParseArgs) GetBase() int64 {
if x != nil {
return x.Base
}
return 0
}
type ParseFloatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Base int64 `protobuf:"varint,2,opt,name=base,proto3" json:"base,omitempty"`
Prec uint32 `protobuf:"varint,3,opt,name=prec,proto3" json:"prec,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseFloatArgs) Reset() {
*x = ParseFloatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseFloatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseFloatArgs) ProtoMessage() {}
func (x *ParseFloatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[36]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseFloatArgs.ProtoReflect.Descriptor instead.
func (*ParseFloatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{36}
}
func (x *ParseFloatArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *ParseFloatArgs) GetBase() int64 {
if x != nil {
return x.Base
}
return 0
}
func (x *ParseFloatArgs) GetPrec() uint32 {
if x != nil {
return x.Prec
}
return 0
}
type FloatNgdotGobEncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotGobEncodeArgs) Reset() {
*x = FloatNgdotGobEncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotGobEncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotGobEncodeArgs) ProtoMessage() {}
func (x *FloatNgdotGobEncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[37]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotGobEncodeArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotGobEncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{37}
}
type FloatNgdotGobDecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotGobDecodeArgs) Reset() {
*x = FloatNgdotGobDecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotGobDecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotGobDecodeArgs) ProtoMessage() {}
func (x *FloatNgdotGobDecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[38]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotGobDecodeArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotGobDecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{38}
}
func (x *FloatNgdotGobDecodeArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
type FloatNgdotAppendTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotAppendTextArgs) Reset() {
*x = FloatNgdotAppendTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotAppendTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotAppendTextArgs) ProtoMessage() {}
func (x *FloatNgdotAppendTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[39]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotAppendTextArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotAppendTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{39}
}
func (x *FloatNgdotAppendTextArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type FloatNgdotMarshalTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotMarshalTextArgs) Reset() {
*x = FloatNgdotMarshalTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotMarshalTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotMarshalTextArgs) ProtoMessage() {}
func (x *FloatNgdotMarshalTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[40]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotMarshalTextArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotMarshalTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{40}
}
type FloatNgdotUnmarshalTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text []byte `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotUnmarshalTextArgs) Reset() {
*x = FloatNgdotUnmarshalTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotUnmarshalTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotUnmarshalTextArgs) ProtoMessage() {}
func (x *FloatNgdotUnmarshalTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[41]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotUnmarshalTextArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotUnmarshalTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{41}
}
func (x *FloatNgdotUnmarshalTextArgs) GetText() []byte {
if x != nil {
return x.Text
}
return nil
}
type FloatNgdotTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Format uint32 `protobuf:"varint,1,opt,name=format,proto3" json:"format,omitempty"`
Prec int64 `protobuf:"varint,2,opt,name=prec,proto3" json:"prec,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotTextArgs) Reset() {
*x = FloatNgdotTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotTextArgs) ProtoMessage() {}
func (x *FloatNgdotTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[42]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotTextArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{42}
}
func (x *FloatNgdotTextArgs) GetFormat() uint32 {
if x != nil {
return x.Format
}
return 0
}
func (x *FloatNgdotTextArgs) GetPrec() int64 {
if x != nil {
return x.Prec
}
return 0
}
type FloatNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotStringArgs) Reset() {
*x = FloatNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotStringArgs) ProtoMessage() {}
func (x *FloatNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[43]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{43}
}
type FloatNgdotAppendArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
Fmt uint32 `protobuf:"varint,2,opt,name=fmt,proto3" json:"fmt,omitempty"`
Prec int64 `protobuf:"varint,3,opt,name=prec,proto3" json:"prec,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotAppendArgs) Reset() {
*x = FloatNgdotAppendArgs{}
mi := &file_ngolofuzz_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotAppendArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotAppendArgs) ProtoMessage() {}
func (x *FloatNgdotAppendArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[44]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotAppendArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotAppendArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{44}
}
func (x *FloatNgdotAppendArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
func (x *FloatNgdotAppendArgs) GetFmt() uint32 {
if x != nil {
return x.Fmt
}
return 0
}
func (x *FloatNgdotAppendArgs) GetPrec() int64 {
if x != nil {
return x.Prec
}
return 0
}
type IntNgdotSignArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotSignArgs) Reset() {
*x = IntNgdotSignArgs{}
mi := &file_ngolofuzz_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotSignArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotSignArgs) ProtoMessage() {}
func (x *IntNgdotSignArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[45]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotSignArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotSignArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{45}
}
type IntNgdotSetInt64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotSetInt64Args) Reset() {
*x = IntNgdotSetInt64Args{}
mi := &file_ngolofuzz_proto_msgTypes[46]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotSetInt64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotSetInt64Args) ProtoMessage() {}
func (x *IntNgdotSetInt64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[46]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotSetInt64Args.ProtoReflect.Descriptor instead.
func (*IntNgdotSetInt64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{46}
}
func (x *IntNgdotSetInt64Args) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
type IntNgdotSetUint64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotSetUint64Args) Reset() {
*x = IntNgdotSetUint64Args{}
mi := &file_ngolofuzz_proto_msgTypes[47]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotSetUint64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotSetUint64Args) ProtoMessage() {}
func (x *IntNgdotSetUint64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[47]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotSetUint64Args.ProtoReflect.Descriptor instead.
func (*IntNgdotSetUint64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{47}
}
func (x *IntNgdotSetUint64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
type NewIntArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewIntArgs) Reset() {
*x = NewIntArgs{}
mi := &file_ngolofuzz_proto_msgTypes[48]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewIntArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewIntArgs) ProtoMessage() {}
func (x *NewIntArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[48]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewIntArgs.ProtoReflect.Descriptor instead.
func (*NewIntArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{48}
}
func (x *NewIntArgs) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
type IntNgdotSetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotSetArgs) Reset() {
*x = IntNgdotSetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[49]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotSetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotSetArgs) ProtoMessage() {}
func (x *IntNgdotSetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[49]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotSetArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotSetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{49}
}
type IntNgdotBitsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotBitsArgs) Reset() {
*x = IntNgdotBitsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[50]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotBitsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotBitsArgs) ProtoMessage() {}
func (x *IntNgdotBitsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[50]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotBitsArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotBitsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{50}
}
type IntNgdotAbsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotAbsArgs) Reset() {
*x = IntNgdotAbsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[51]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotAbsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotAbsArgs) ProtoMessage() {}
func (x *IntNgdotAbsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[51]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotAbsArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotAbsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{51}
}
type IntNgdotNegArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotNegArgs) Reset() {
*x = IntNgdotNegArgs{}
mi := &file_ngolofuzz_proto_msgTypes[52]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotNegArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotNegArgs) ProtoMessage() {}
func (x *IntNgdotNegArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[52]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotNegArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotNegArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{52}
}
type IntNgdotAddArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotAddArgs) Reset() {
*x = IntNgdotAddArgs{}
mi := &file_ngolofuzz_proto_msgTypes[53]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotAddArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotAddArgs) ProtoMessage() {}
func (x *IntNgdotAddArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[53]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotAddArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotAddArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{53}
}
type IntNgdotSubArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotSubArgs) Reset() {
*x = IntNgdotSubArgs{}
mi := &file_ngolofuzz_proto_msgTypes[54]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotSubArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotSubArgs) ProtoMessage() {}
func (x *IntNgdotSubArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[54]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotSubArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotSubArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{54}
}
type IntNgdotMulArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotMulArgs) Reset() {
*x = IntNgdotMulArgs{}
mi := &file_ngolofuzz_proto_msgTypes[55]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotMulArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotMulArgs) ProtoMessage() {}
func (x *IntNgdotMulArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[55]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotMulArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotMulArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{55}
}
type IntNgdotMulRangeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
A int64 `protobuf:"varint,1,opt,name=a,proto3" json:"a,omitempty"`
B int64 `protobuf:"varint,2,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotMulRangeArgs) Reset() {
*x = IntNgdotMulRangeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[56]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotMulRangeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotMulRangeArgs) ProtoMessage() {}
func (x *IntNgdotMulRangeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[56]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotMulRangeArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotMulRangeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{56}
}
func (x *IntNgdotMulRangeArgs) GetA() int64 {
if x != nil {
return x.A
}
return 0
}
func (x *IntNgdotMulRangeArgs) GetB() int64 {
if x != nil {
return x.B
}
return 0
}
type IntNgdotBinomialArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
K int64 `protobuf:"varint,2,opt,name=k,proto3" json:"k,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotBinomialArgs) Reset() {
*x = IntNgdotBinomialArgs{}
mi := &file_ngolofuzz_proto_msgTypes[57]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotBinomialArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotBinomialArgs) ProtoMessage() {}
func (x *IntNgdotBinomialArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[57]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotBinomialArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotBinomialArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{57}
}
func (x *IntNgdotBinomialArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
func (x *IntNgdotBinomialArgs) GetK() int64 {
if x != nil {
return x.K
}
return 0
}
type IntNgdotRemArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotRemArgs) Reset() {
*x = IntNgdotRemArgs{}
mi := &file_ngolofuzz_proto_msgTypes[58]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotRemArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotRemArgs) ProtoMessage() {}
func (x *IntNgdotRemArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[58]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotRemArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotRemArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{58}
}
type IntNgdotDivArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotDivArgs) Reset() {
*x = IntNgdotDivArgs{}
mi := &file_ngolofuzz_proto_msgTypes[59]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotDivArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotDivArgs) ProtoMessage() {}
func (x *IntNgdotDivArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[59]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotDivArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotDivArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{59}
}
type IntNgdotModArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotModArgs) Reset() {
*x = IntNgdotModArgs{}
mi := &file_ngolofuzz_proto_msgTypes[60]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotModArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotModArgs) ProtoMessage() {}
func (x *IntNgdotModArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[60]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotModArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotModArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{60}
}
type IntNgdotDivModArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotDivModArgs) Reset() {
*x = IntNgdotDivModArgs{}
mi := &file_ngolofuzz_proto_msgTypes[61]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotDivModArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotDivModArgs) ProtoMessage() {}
func (x *IntNgdotDivModArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[61]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotDivModArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotDivModArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{61}
}
type IntNgdotCmpArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotCmpArgs) Reset() {
*x = IntNgdotCmpArgs{}
mi := &file_ngolofuzz_proto_msgTypes[62]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotCmpArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotCmpArgs) ProtoMessage() {}
func (x *IntNgdotCmpArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[62]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotCmpArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotCmpArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{62}
}
type IntNgdotCmpAbsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotCmpAbsArgs) Reset() {
*x = IntNgdotCmpAbsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[63]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotCmpAbsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotCmpAbsArgs) ProtoMessage() {}
func (x *IntNgdotCmpAbsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[63]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotCmpAbsArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotCmpAbsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{63}
}
type IntNgdotInt64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotInt64Args) Reset() {
*x = IntNgdotInt64Args{}
mi := &file_ngolofuzz_proto_msgTypes[64]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotInt64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotInt64Args) ProtoMessage() {}
func (x *IntNgdotInt64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[64]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotInt64Args.ProtoReflect.Descriptor instead.
func (*IntNgdotInt64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{64}
}
type IntNgdotUint64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotUint64Args) Reset() {
*x = IntNgdotUint64Args{}
mi := &file_ngolofuzz_proto_msgTypes[65]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotUint64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotUint64Args) ProtoMessage() {}
func (x *IntNgdotUint64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[65]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotUint64Args.ProtoReflect.Descriptor instead.
func (*IntNgdotUint64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{65}
}
type IntNgdotIsInt64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotIsInt64Args) Reset() {
*x = IntNgdotIsInt64Args{}
mi := &file_ngolofuzz_proto_msgTypes[66]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotIsInt64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotIsInt64Args) ProtoMessage() {}
func (x *IntNgdotIsInt64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[66]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotIsInt64Args.ProtoReflect.Descriptor instead.
func (*IntNgdotIsInt64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{66}
}
type IntNgdotIsUint64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotIsUint64Args) Reset() {
*x = IntNgdotIsUint64Args{}
mi := &file_ngolofuzz_proto_msgTypes[67]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotIsUint64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotIsUint64Args) ProtoMessage() {}
func (x *IntNgdotIsUint64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[67]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotIsUint64Args.ProtoReflect.Descriptor instead.
func (*IntNgdotIsUint64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{67}
}
type IntNgdotFloat64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotFloat64Args) Reset() {
*x = IntNgdotFloat64Args{}
mi := &file_ngolofuzz_proto_msgTypes[68]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotFloat64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotFloat64Args) ProtoMessage() {}
func (x *IntNgdotFloat64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[68]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotFloat64Args.ProtoReflect.Descriptor instead.
func (*IntNgdotFloat64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{68}
}
type IntNgdotSetStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Base int64 `protobuf:"varint,2,opt,name=base,proto3" json:"base,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotSetStringArgs) Reset() {
*x = IntNgdotSetStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[69]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotSetStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotSetStringArgs) ProtoMessage() {}
func (x *IntNgdotSetStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[69]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotSetStringArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotSetStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{69}
}
func (x *IntNgdotSetStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *IntNgdotSetStringArgs) GetBase() int64 {
if x != nil {
return x.Base
}
return 0
}
type IntNgdotSetBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotSetBytesArgs) Reset() {
*x = IntNgdotSetBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[70]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotSetBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotSetBytesArgs) ProtoMessage() {}
func (x *IntNgdotSetBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[70]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotSetBytesArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotSetBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{70}
}
func (x *IntNgdotSetBytesArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
type IntNgdotBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotBytesArgs) Reset() {
*x = IntNgdotBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[71]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotBytesArgs) ProtoMessage() {}
func (x *IntNgdotBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[71]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotBytesArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{71}
}
type IntNgdotFillBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotFillBytesArgs) Reset() {
*x = IntNgdotFillBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[72]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotFillBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotFillBytesArgs) ProtoMessage() {}
func (x *IntNgdotFillBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[72]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotFillBytesArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotFillBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{72}
}
func (x *IntNgdotFillBytesArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
type IntNgdotBitLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotBitLenArgs) Reset() {
*x = IntNgdotBitLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[73]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotBitLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotBitLenArgs) ProtoMessage() {}
func (x *IntNgdotBitLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[73]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotBitLenArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotBitLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{73}
}
type IntNgdotTrailingZeroBitsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotTrailingZeroBitsArgs) Reset() {
*x = IntNgdotTrailingZeroBitsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[74]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotTrailingZeroBitsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotTrailingZeroBitsArgs) ProtoMessage() {}
func (x *IntNgdotTrailingZeroBitsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[74]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotTrailingZeroBitsArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotTrailingZeroBitsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{74}
}
type IntNgdotExpArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotExpArgs) Reset() {
*x = IntNgdotExpArgs{}
mi := &file_ngolofuzz_proto_msgTypes[75]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotExpArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotExpArgs) ProtoMessage() {}
func (x *IntNgdotExpArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[75]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotExpArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotExpArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{75}
}
type IntNgdotGCDArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotGCDArgs) Reset() {
*x = IntNgdotGCDArgs{}
mi := &file_ngolofuzz_proto_msgTypes[76]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotGCDArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotGCDArgs) ProtoMessage() {}
func (x *IntNgdotGCDArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[76]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotGCDArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotGCDArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{76}
}
type IntNgdotModInverseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotModInverseArgs) Reset() {
*x = IntNgdotModInverseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[77]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotModInverseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotModInverseArgs) ProtoMessage() {}
func (x *IntNgdotModInverseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[77]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotModInverseArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotModInverseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{77}
}
type JacobiArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *JacobiArgs) Reset() {
*x = JacobiArgs{}
mi := &file_ngolofuzz_proto_msgTypes[78]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *JacobiArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*JacobiArgs) ProtoMessage() {}
func (x *JacobiArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[78]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use JacobiArgs.ProtoReflect.Descriptor instead.
func (*JacobiArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{78}
}
type IntNgdotModSqrtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotModSqrtArgs) Reset() {
*x = IntNgdotModSqrtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[79]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotModSqrtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotModSqrtArgs) ProtoMessage() {}
func (x *IntNgdotModSqrtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[79]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotModSqrtArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotModSqrtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{79}
}
type IntNgdotLshArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N uint32 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotLshArgs) Reset() {
*x = IntNgdotLshArgs{}
mi := &file_ngolofuzz_proto_msgTypes[80]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotLshArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotLshArgs) ProtoMessage() {}
func (x *IntNgdotLshArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[80]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotLshArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotLshArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{80}
}
func (x *IntNgdotLshArgs) GetN() uint32 {
if x != nil {
return x.N
}
return 0
}
type IntNgdotRshArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N uint32 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotRshArgs) Reset() {
*x = IntNgdotRshArgs{}
mi := &file_ngolofuzz_proto_msgTypes[81]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotRshArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotRshArgs) ProtoMessage() {}
func (x *IntNgdotRshArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[81]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotRshArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotRshArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{81}
}
func (x *IntNgdotRshArgs) GetN() uint32 {
if x != nil {
return x.N
}
return 0
}
type IntNgdotBitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I int64 `protobuf:"varint,1,opt,name=i,proto3" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotBitArgs) Reset() {
*x = IntNgdotBitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[82]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotBitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotBitArgs) ProtoMessage() {}
func (x *IntNgdotBitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[82]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotBitArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotBitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{82}
}
func (x *IntNgdotBitArgs) GetI() int64 {
if x != nil {
return x.I
}
return 0
}
type IntNgdotSetBitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I int64 `protobuf:"varint,1,opt,name=i,proto3" json:"i,omitempty"`
B uint32 `protobuf:"varint,2,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotSetBitArgs) Reset() {
*x = IntNgdotSetBitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[83]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotSetBitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotSetBitArgs) ProtoMessage() {}
func (x *IntNgdotSetBitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[83]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotSetBitArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotSetBitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{83}
}
func (x *IntNgdotSetBitArgs) GetI() int64 {
if x != nil {
return x.I
}
return 0
}
func (x *IntNgdotSetBitArgs) GetB() uint32 {
if x != nil {
return x.B
}
return 0
}
type IntNgdotAndArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotAndArgs) Reset() {
*x = IntNgdotAndArgs{}
mi := &file_ngolofuzz_proto_msgTypes[84]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotAndArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotAndArgs) ProtoMessage() {}
func (x *IntNgdotAndArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[84]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotAndArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotAndArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{84}
}
type IntNgdotAndNotArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotAndNotArgs) Reset() {
*x = IntNgdotAndNotArgs{}
mi := &file_ngolofuzz_proto_msgTypes[85]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotAndNotArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotAndNotArgs) ProtoMessage() {}
func (x *IntNgdotAndNotArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[85]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotAndNotArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotAndNotArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{85}
}
type IntNgdotOrArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotOrArgs) Reset() {
*x = IntNgdotOrArgs{}
mi := &file_ngolofuzz_proto_msgTypes[86]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotOrArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotOrArgs) ProtoMessage() {}
func (x *IntNgdotOrArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[86]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotOrArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotOrArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{86}
}
type IntNgdotXorArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotXorArgs) Reset() {
*x = IntNgdotXorArgs{}
mi := &file_ngolofuzz_proto_msgTypes[87]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotXorArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotXorArgs) ProtoMessage() {}
func (x *IntNgdotXorArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[87]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotXorArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotXorArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{87}
}
type IntNgdotNotArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotNotArgs) Reset() {
*x = IntNgdotNotArgs{}
mi := &file_ngolofuzz_proto_msgTypes[88]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotNotArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotNotArgs) ProtoMessage() {}
func (x *IntNgdotNotArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[88]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotNotArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotNotArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{88}
}
type IntNgdotSqrtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotSqrtArgs) Reset() {
*x = IntNgdotSqrtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[89]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotSqrtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotSqrtArgs) ProtoMessage() {}
func (x *IntNgdotSqrtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[89]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotSqrtArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotSqrtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{89}
}
type IntNgdotTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Base int64 `protobuf:"varint,1,opt,name=base,proto3" json:"base,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotTextArgs) Reset() {
*x = IntNgdotTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[90]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotTextArgs) ProtoMessage() {}
func (x *IntNgdotTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[90]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotTextArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{90}
}
func (x *IntNgdotTextArgs) GetBase() int64 {
if x != nil {
return x.Base
}
return 0
}
type IntNgdotAppendArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
Base int64 `protobuf:"varint,2,opt,name=base,proto3" json:"base,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotAppendArgs) Reset() {
*x = IntNgdotAppendArgs{}
mi := &file_ngolofuzz_proto_msgTypes[91]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotAppendArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotAppendArgs) ProtoMessage() {}
func (x *IntNgdotAppendArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[91]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotAppendArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotAppendArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{91}
}
func (x *IntNgdotAppendArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
func (x *IntNgdotAppendArgs) GetBase() int64 {
if x != nil {
return x.Base
}
return 0
}
type IntNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotStringArgs) Reset() {
*x = IntNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[92]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotStringArgs) ProtoMessage() {}
func (x *IntNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[92]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{92}
}
type IntNgdotGobEncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotGobEncodeArgs) Reset() {
*x = IntNgdotGobEncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[93]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotGobEncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotGobEncodeArgs) ProtoMessage() {}
func (x *IntNgdotGobEncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[93]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotGobEncodeArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotGobEncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{93}
}
type IntNgdotGobDecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotGobDecodeArgs) Reset() {
*x = IntNgdotGobDecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[94]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotGobDecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotGobDecodeArgs) ProtoMessage() {}
func (x *IntNgdotGobDecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[94]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotGobDecodeArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotGobDecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{94}
}
func (x *IntNgdotGobDecodeArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
type IntNgdotAppendTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotAppendTextArgs) Reset() {
*x = IntNgdotAppendTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[95]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotAppendTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotAppendTextArgs) ProtoMessage() {}
func (x *IntNgdotAppendTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[95]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotAppendTextArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotAppendTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{95}
}
func (x *IntNgdotAppendTextArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type IntNgdotMarshalTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotMarshalTextArgs) Reset() {
*x = IntNgdotMarshalTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[96]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotMarshalTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotMarshalTextArgs) ProtoMessage() {}
func (x *IntNgdotMarshalTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[96]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotMarshalTextArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotMarshalTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{96}
}
type IntNgdotUnmarshalTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text []byte `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotUnmarshalTextArgs) Reset() {
*x = IntNgdotUnmarshalTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[97]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotUnmarshalTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotUnmarshalTextArgs) ProtoMessage() {}
func (x *IntNgdotUnmarshalTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[97]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotUnmarshalTextArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotUnmarshalTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{97}
}
func (x *IntNgdotUnmarshalTextArgs) GetText() []byte {
if x != nil {
return x.Text
}
return nil
}
type IntNgdotMarshalJSONArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotMarshalJSONArgs) Reset() {
*x = IntNgdotMarshalJSONArgs{}
mi := &file_ngolofuzz_proto_msgTypes[98]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotMarshalJSONArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotMarshalJSONArgs) ProtoMessage() {}
func (x *IntNgdotMarshalJSONArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[98]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotMarshalJSONArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotMarshalJSONArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{98}
}
type IntNgdotUnmarshalJSONArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text []byte `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotUnmarshalJSONArgs) Reset() {
*x = IntNgdotUnmarshalJSONArgs{}
mi := &file_ngolofuzz_proto_msgTypes[99]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotUnmarshalJSONArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotUnmarshalJSONArgs) ProtoMessage() {}
func (x *IntNgdotUnmarshalJSONArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[99]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotUnmarshalJSONArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotUnmarshalJSONArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{99}
}
func (x *IntNgdotUnmarshalJSONArgs) GetText() []byte {
if x != nil {
return x.Text
}
return nil
}
type IntNgdotProbablyPrimeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
N int64 `protobuf:"varint,1,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IntNgdotProbablyPrimeArgs) Reset() {
*x = IntNgdotProbablyPrimeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[100]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IntNgdotProbablyPrimeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntNgdotProbablyPrimeArgs) ProtoMessage() {}
func (x *IntNgdotProbablyPrimeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[100]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntNgdotProbablyPrimeArgs.ProtoReflect.Descriptor instead.
func (*IntNgdotProbablyPrimeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{100}
}
func (x *IntNgdotProbablyPrimeArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type NewRatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
A int64 `protobuf:"varint,1,opt,name=a,proto3" json:"a,omitempty"`
B int64 `protobuf:"varint,2,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewRatArgs) Reset() {
*x = NewRatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[101]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewRatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewRatArgs) ProtoMessage() {}
func (x *NewRatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[101]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewRatArgs.ProtoReflect.Descriptor instead.
func (*NewRatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{101}
}
func (x *NewRatArgs) GetA() int64 {
if x != nil {
return x.A
}
return 0
}
func (x *NewRatArgs) GetB() int64 {
if x != nil {
return x.B
}
return 0
}
type RatNgdotSetFloat64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
F float64 `protobuf:"fixed64,1,opt,name=f,proto3" json:"f,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotSetFloat64Args) Reset() {
*x = RatNgdotSetFloat64Args{}
mi := &file_ngolofuzz_proto_msgTypes[102]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotSetFloat64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotSetFloat64Args) ProtoMessage() {}
func (x *RatNgdotSetFloat64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[102]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotSetFloat64Args.ProtoReflect.Descriptor instead.
func (*RatNgdotSetFloat64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{102}
}
func (x *RatNgdotSetFloat64Args) GetF() float64 {
if x != nil {
return x.F
}
return 0
}
type RatNgdotFloat32Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotFloat32Args) Reset() {
*x = RatNgdotFloat32Args{}
mi := &file_ngolofuzz_proto_msgTypes[103]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotFloat32Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotFloat32Args) ProtoMessage() {}
func (x *RatNgdotFloat32Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[103]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotFloat32Args.ProtoReflect.Descriptor instead.
func (*RatNgdotFloat32Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{103}
}
type RatNgdotFloat64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotFloat64Args) Reset() {
*x = RatNgdotFloat64Args{}
mi := &file_ngolofuzz_proto_msgTypes[104]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotFloat64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotFloat64Args) ProtoMessage() {}
func (x *RatNgdotFloat64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[104]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotFloat64Args.ProtoReflect.Descriptor instead.
func (*RatNgdotFloat64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{104}
}
type RatNgdotSetFracArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotSetFracArgs) Reset() {
*x = RatNgdotSetFracArgs{}
mi := &file_ngolofuzz_proto_msgTypes[105]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotSetFracArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotSetFracArgs) ProtoMessage() {}
func (x *RatNgdotSetFracArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[105]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotSetFracArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotSetFracArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{105}
}
type RatNgdotSetFrac64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
A int64 `protobuf:"varint,1,opt,name=a,proto3" json:"a,omitempty"`
B int64 `protobuf:"varint,2,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotSetFrac64Args) Reset() {
*x = RatNgdotSetFrac64Args{}
mi := &file_ngolofuzz_proto_msgTypes[106]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotSetFrac64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotSetFrac64Args) ProtoMessage() {}
func (x *RatNgdotSetFrac64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[106]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotSetFrac64Args.ProtoReflect.Descriptor instead.
func (*RatNgdotSetFrac64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{106}
}
func (x *RatNgdotSetFrac64Args) GetA() int64 {
if x != nil {
return x.A
}
return 0
}
func (x *RatNgdotSetFrac64Args) GetB() int64 {
if x != nil {
return x.B
}
return 0
}
type RatNgdotSetIntArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotSetIntArgs) Reset() {
*x = RatNgdotSetIntArgs{}
mi := &file_ngolofuzz_proto_msgTypes[107]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotSetIntArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotSetIntArgs) ProtoMessage() {}
func (x *RatNgdotSetIntArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[107]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotSetIntArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotSetIntArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{107}
}
type RatNgdotSetInt64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X int64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotSetInt64Args) Reset() {
*x = RatNgdotSetInt64Args{}
mi := &file_ngolofuzz_proto_msgTypes[108]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotSetInt64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotSetInt64Args) ProtoMessage() {}
func (x *RatNgdotSetInt64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[108]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotSetInt64Args.ProtoReflect.Descriptor instead.
func (*RatNgdotSetInt64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{108}
}
func (x *RatNgdotSetInt64Args) GetX() int64 {
if x != nil {
return x.X
}
return 0
}
type RatNgdotSetUint64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotSetUint64Args) Reset() {
*x = RatNgdotSetUint64Args{}
mi := &file_ngolofuzz_proto_msgTypes[109]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotSetUint64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotSetUint64Args) ProtoMessage() {}
func (x *RatNgdotSetUint64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[109]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotSetUint64Args.ProtoReflect.Descriptor instead.
func (*RatNgdotSetUint64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{109}
}
func (x *RatNgdotSetUint64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
type RatNgdotSetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotSetArgs) Reset() {
*x = RatNgdotSetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[110]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotSetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotSetArgs) ProtoMessage() {}
func (x *RatNgdotSetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[110]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotSetArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotSetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{110}
}
type RatNgdotAbsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotAbsArgs) Reset() {
*x = RatNgdotAbsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[111]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotAbsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotAbsArgs) ProtoMessage() {}
func (x *RatNgdotAbsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[111]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotAbsArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotAbsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{111}
}
type RatNgdotNegArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotNegArgs) Reset() {
*x = RatNgdotNegArgs{}
mi := &file_ngolofuzz_proto_msgTypes[112]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotNegArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotNegArgs) ProtoMessage() {}
func (x *RatNgdotNegArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[112]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotNegArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotNegArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{112}
}
type RatNgdotInvArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotInvArgs) Reset() {
*x = RatNgdotInvArgs{}
mi := &file_ngolofuzz_proto_msgTypes[113]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotInvArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotInvArgs) ProtoMessage() {}
func (x *RatNgdotInvArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[113]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotInvArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotInvArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{113}
}
type RatNgdotSignArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotSignArgs) Reset() {
*x = RatNgdotSignArgs{}
mi := &file_ngolofuzz_proto_msgTypes[114]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotSignArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotSignArgs) ProtoMessage() {}
func (x *RatNgdotSignArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[114]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotSignArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotSignArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{114}
}
type RatNgdotIsIntArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotIsIntArgs) Reset() {
*x = RatNgdotIsIntArgs{}
mi := &file_ngolofuzz_proto_msgTypes[115]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotIsIntArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotIsIntArgs) ProtoMessage() {}
func (x *RatNgdotIsIntArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[115]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotIsIntArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotIsIntArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{115}
}
type RatNgdotNumArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotNumArgs) Reset() {
*x = RatNgdotNumArgs{}
mi := &file_ngolofuzz_proto_msgTypes[116]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotNumArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotNumArgs) ProtoMessage() {}
func (x *RatNgdotNumArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[116]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotNumArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotNumArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{116}
}
type RatNgdotDenomArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotDenomArgs) Reset() {
*x = RatNgdotDenomArgs{}
mi := &file_ngolofuzz_proto_msgTypes[117]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotDenomArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotDenomArgs) ProtoMessage() {}
func (x *RatNgdotDenomArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[117]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotDenomArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotDenomArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{117}
}
type RatNgdotCmpArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotCmpArgs) Reset() {
*x = RatNgdotCmpArgs{}
mi := &file_ngolofuzz_proto_msgTypes[118]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotCmpArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotCmpArgs) ProtoMessage() {}
func (x *RatNgdotCmpArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[118]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotCmpArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotCmpArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{118}
}
type RatNgdotAddArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotAddArgs) Reset() {
*x = RatNgdotAddArgs{}
mi := &file_ngolofuzz_proto_msgTypes[119]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotAddArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotAddArgs) ProtoMessage() {}
func (x *RatNgdotAddArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[119]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotAddArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotAddArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{119}
}
type RatNgdotSubArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotSubArgs) Reset() {
*x = RatNgdotSubArgs{}
mi := &file_ngolofuzz_proto_msgTypes[120]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotSubArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotSubArgs) ProtoMessage() {}
func (x *RatNgdotSubArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[120]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotSubArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotSubArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{120}
}
type RatNgdotMulArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotMulArgs) Reset() {
*x = RatNgdotMulArgs{}
mi := &file_ngolofuzz_proto_msgTypes[121]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotMulArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotMulArgs) ProtoMessage() {}
func (x *RatNgdotMulArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[121]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotMulArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotMulArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{121}
}
type RatNgdotSetStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotSetStringArgs) Reset() {
*x = RatNgdotSetStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[122]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotSetStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotSetStringArgs) ProtoMessage() {}
func (x *RatNgdotSetStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[122]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotSetStringArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotSetStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{122}
}
func (x *RatNgdotSetStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type RatNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotStringArgs) Reset() {
*x = RatNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[123]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotStringArgs) ProtoMessage() {}
func (x *RatNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[123]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{123}
}
type RatNgdotRatStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotRatStringArgs) Reset() {
*x = RatNgdotRatStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[124]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotRatStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotRatStringArgs) ProtoMessage() {}
func (x *RatNgdotRatStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[124]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotRatStringArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotRatStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{124}
}
type RatNgdotFloatStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Prec int64 `protobuf:"varint,1,opt,name=prec,proto3" json:"prec,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotFloatStringArgs) Reset() {
*x = RatNgdotFloatStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[125]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotFloatStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotFloatStringArgs) ProtoMessage() {}
func (x *RatNgdotFloatStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[125]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotFloatStringArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotFloatStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{125}
}
func (x *RatNgdotFloatStringArgs) GetPrec() int64 {
if x != nil {
return x.Prec
}
return 0
}
type RatNgdotFloatPrecArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotFloatPrecArgs) Reset() {
*x = RatNgdotFloatPrecArgs{}
mi := &file_ngolofuzz_proto_msgTypes[126]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotFloatPrecArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotFloatPrecArgs) ProtoMessage() {}
func (x *RatNgdotFloatPrecArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[126]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotFloatPrecArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotFloatPrecArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{126}
}
type RatNgdotGobEncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotGobEncodeArgs) Reset() {
*x = RatNgdotGobEncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[127]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotGobEncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotGobEncodeArgs) ProtoMessage() {}
func (x *RatNgdotGobEncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[127]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotGobEncodeArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotGobEncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{127}
}
type RatNgdotGobDecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotGobDecodeArgs) Reset() {
*x = RatNgdotGobDecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[128]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotGobDecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotGobDecodeArgs) ProtoMessage() {}
func (x *RatNgdotGobDecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[128]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotGobDecodeArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotGobDecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{128}
}
func (x *RatNgdotGobDecodeArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
type RatNgdotAppendTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotAppendTextArgs) Reset() {
*x = RatNgdotAppendTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[129]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotAppendTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotAppendTextArgs) ProtoMessage() {}
func (x *RatNgdotAppendTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[129]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotAppendTextArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotAppendTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{129}
}
func (x *RatNgdotAppendTextArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type RatNgdotMarshalTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotMarshalTextArgs) Reset() {
*x = RatNgdotMarshalTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[130]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotMarshalTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotMarshalTextArgs) ProtoMessage() {}
func (x *RatNgdotMarshalTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[130]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotMarshalTextArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotMarshalTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{130}
}
type RatNgdotUnmarshalTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text []byte `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RatNgdotUnmarshalTextArgs) Reset() {
*x = RatNgdotUnmarshalTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[131]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RatNgdotUnmarshalTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RatNgdotUnmarshalTextArgs) ProtoMessage() {}
func (x *RatNgdotUnmarshalTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[131]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RatNgdotUnmarshalTextArgs.ProtoReflect.Descriptor instead.
func (*RatNgdotUnmarshalTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{131}
}
func (x *RatNgdotUnmarshalTextArgs) GetText() []byte {
if x != nil {
return x.Text
}
return nil
}
type RoundingModeNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RoundingModeNgdotStringArgs) Reset() {
*x = RoundingModeNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[132]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RoundingModeNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RoundingModeNgdotStringArgs) ProtoMessage() {}
func (x *RoundingModeNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[132]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RoundingModeNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*RoundingModeNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{132}
}
type FloatNgdotSqrtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FloatNgdotSqrtArgs) Reset() {
*x = FloatNgdotSqrtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[133]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FloatNgdotSqrtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FloatNgdotSqrtArgs) ProtoMessage() {}
func (x *FloatNgdotSqrtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[133]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FloatNgdotSqrtArgs.ProtoReflect.Descriptor instead.
func (*FloatNgdotSqrtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{133}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_AccuracyNgdotString
// *NgoloFuzzOne_NewFloat
// *NgoloFuzzOne_FloatNgdotSetPrec
// *NgoloFuzzOne_FloatNgdotSetMode
// *NgoloFuzzOne_FloatNgdotPrec
// *NgoloFuzzOne_FloatNgdotMinPrec
// *NgoloFuzzOne_FloatNgdotMode
// *NgoloFuzzOne_FloatNgdotAcc
// *NgoloFuzzOne_FloatNgdotSign
// *NgoloFuzzOne_FloatNgdotMantExp
// *NgoloFuzzOne_FloatNgdotSetMantExp
// *NgoloFuzzOne_FloatNgdotSignbit
// *NgoloFuzzOne_FloatNgdotIsInf
// *NgoloFuzzOne_FloatNgdotIsInt
// *NgoloFuzzOne_FloatNgdotSetUint64
// *NgoloFuzzOne_FloatNgdotSetInt64
// *NgoloFuzzOne_FloatNgdotSetFloat64
// *NgoloFuzzOne_FloatNgdotSetInt
// *NgoloFuzzOne_FloatNgdotSetRat
// *NgoloFuzzOne_FloatNgdotSetInf
// *NgoloFuzzOne_FloatNgdotSet
// *NgoloFuzzOne_FloatNgdotCopy
// *NgoloFuzzOne_FloatNgdotUint64
// *NgoloFuzzOne_FloatNgdotInt64
// *NgoloFuzzOne_FloatNgdotFloat32
// *NgoloFuzzOne_FloatNgdotFloat64
// *NgoloFuzzOne_FloatNgdotInt
// *NgoloFuzzOne_FloatNgdotRat
// *NgoloFuzzOne_FloatNgdotAbs
// *NgoloFuzzOne_FloatNgdotNeg
// *NgoloFuzzOne_FloatNgdotAdd
// *NgoloFuzzOne_FloatNgdotSub
// *NgoloFuzzOne_FloatNgdotMul
// *NgoloFuzzOne_FloatNgdotCmp
// *NgoloFuzzOne_FloatNgdotSetString
// *NgoloFuzzOne_FloatNgdotParse
// *NgoloFuzzOne_ParseFloat
// *NgoloFuzzOne_FloatNgdotGobEncode
// *NgoloFuzzOne_FloatNgdotGobDecode
// *NgoloFuzzOne_FloatNgdotAppendText
// *NgoloFuzzOne_FloatNgdotMarshalText
// *NgoloFuzzOne_FloatNgdotUnmarshalText
// *NgoloFuzzOne_FloatNgdotText
// *NgoloFuzzOne_FloatNgdotString
// *NgoloFuzzOne_FloatNgdotAppend
// *NgoloFuzzOne_IntNgdotSign
// *NgoloFuzzOne_IntNgdotSetInt64
// *NgoloFuzzOne_IntNgdotSetUint64
// *NgoloFuzzOne_NewInt
// *NgoloFuzzOne_IntNgdotSet
// *NgoloFuzzOne_IntNgdotBits
// *NgoloFuzzOne_IntNgdotAbs
// *NgoloFuzzOne_IntNgdotNeg
// *NgoloFuzzOne_IntNgdotAdd
// *NgoloFuzzOne_IntNgdotSub
// *NgoloFuzzOne_IntNgdotMul
// *NgoloFuzzOne_IntNgdotMulRange
// *NgoloFuzzOne_IntNgdotBinomial
// *NgoloFuzzOne_IntNgdotRem
// *NgoloFuzzOne_IntNgdotDiv
// *NgoloFuzzOne_IntNgdotMod
// *NgoloFuzzOne_IntNgdotDivMod
// *NgoloFuzzOne_IntNgdotCmp
// *NgoloFuzzOne_IntNgdotCmpAbs
// *NgoloFuzzOne_IntNgdotInt64
// *NgoloFuzzOne_IntNgdotUint64
// *NgoloFuzzOne_IntNgdotIsInt64
// *NgoloFuzzOne_IntNgdotIsUint64
// *NgoloFuzzOne_IntNgdotFloat64
// *NgoloFuzzOne_IntNgdotSetString
// *NgoloFuzzOne_IntNgdotSetBytes
// *NgoloFuzzOne_IntNgdotBytes
// *NgoloFuzzOne_IntNgdotFillBytes
// *NgoloFuzzOne_IntNgdotBitLen
// *NgoloFuzzOne_IntNgdotTrailingZeroBits
// *NgoloFuzzOne_IntNgdotExp
// *NgoloFuzzOne_IntNgdotGCD
// *NgoloFuzzOne_IntNgdotModInverse
// *NgoloFuzzOne_Jacobi
// *NgoloFuzzOne_IntNgdotModSqrt
// *NgoloFuzzOne_IntNgdotLsh
// *NgoloFuzzOne_IntNgdotRsh
// *NgoloFuzzOne_IntNgdotBit
// *NgoloFuzzOne_IntNgdotSetBit
// *NgoloFuzzOne_IntNgdotAnd
// *NgoloFuzzOne_IntNgdotAndNot
// *NgoloFuzzOne_IntNgdotOr
// *NgoloFuzzOne_IntNgdotXor
// *NgoloFuzzOne_IntNgdotNot
// *NgoloFuzzOne_IntNgdotSqrt
// *NgoloFuzzOne_IntNgdotText
// *NgoloFuzzOne_IntNgdotAppend
// *NgoloFuzzOne_IntNgdotString
// *NgoloFuzzOne_IntNgdotGobEncode
// *NgoloFuzzOne_IntNgdotGobDecode
// *NgoloFuzzOne_IntNgdotAppendText
// *NgoloFuzzOne_IntNgdotMarshalText
// *NgoloFuzzOne_IntNgdotUnmarshalText
// *NgoloFuzzOne_IntNgdotMarshalJSON
// *NgoloFuzzOne_IntNgdotUnmarshalJSON
// *NgoloFuzzOne_IntNgdotProbablyPrime
// *NgoloFuzzOne_NewRat
// *NgoloFuzzOne_RatNgdotSetFloat64
// *NgoloFuzzOne_RatNgdotFloat32
// *NgoloFuzzOne_RatNgdotFloat64
// *NgoloFuzzOne_RatNgdotSetFrac
// *NgoloFuzzOne_RatNgdotSetFrac64
// *NgoloFuzzOne_RatNgdotSetInt
// *NgoloFuzzOne_RatNgdotSetInt64
// *NgoloFuzzOne_RatNgdotSetUint64
// *NgoloFuzzOne_RatNgdotSet
// *NgoloFuzzOne_RatNgdotAbs
// *NgoloFuzzOne_RatNgdotNeg
// *NgoloFuzzOne_RatNgdotInv
// *NgoloFuzzOne_RatNgdotSign
// *NgoloFuzzOne_RatNgdotIsInt
// *NgoloFuzzOne_RatNgdotNum
// *NgoloFuzzOne_RatNgdotDenom
// *NgoloFuzzOne_RatNgdotCmp
// *NgoloFuzzOne_RatNgdotAdd
// *NgoloFuzzOne_RatNgdotSub
// *NgoloFuzzOne_RatNgdotMul
// *NgoloFuzzOne_RatNgdotSetString
// *NgoloFuzzOne_RatNgdotString
// *NgoloFuzzOne_RatNgdotRatString
// *NgoloFuzzOne_RatNgdotFloatString
// *NgoloFuzzOne_RatNgdotFloatPrec
// *NgoloFuzzOne_RatNgdotGobEncode
// *NgoloFuzzOne_RatNgdotGobDecode
// *NgoloFuzzOne_RatNgdotAppendText
// *NgoloFuzzOne_RatNgdotMarshalText
// *NgoloFuzzOne_RatNgdotUnmarshalText
// *NgoloFuzzOne_RoundingModeNgdotString
// *NgoloFuzzOne_FloatNgdotSqrt
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[134]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[134]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{134}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetAccuracyNgdotString() *AccuracyNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AccuracyNgdotString); ok {
return x.AccuracyNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewFloat() *NewFloatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewFloat); ok {
return x.NewFloat
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSetPrec() *FloatNgdotSetPrecArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSetPrec); ok {
return x.FloatNgdotSetPrec
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSetMode() *FloatNgdotSetModeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSetMode); ok {
return x.FloatNgdotSetMode
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotPrec() *FloatNgdotPrecArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotPrec); ok {
return x.FloatNgdotPrec
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotMinPrec() *FloatNgdotMinPrecArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotMinPrec); ok {
return x.FloatNgdotMinPrec
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotMode() *FloatNgdotModeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotMode); ok {
return x.FloatNgdotMode
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotAcc() *FloatNgdotAccArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotAcc); ok {
return x.FloatNgdotAcc
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSign() *FloatNgdotSignArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSign); ok {
return x.FloatNgdotSign
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotMantExp() *FloatNgdotMantExpArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotMantExp); ok {
return x.FloatNgdotMantExp
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSetMantExp() *FloatNgdotSetMantExpArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSetMantExp); ok {
return x.FloatNgdotSetMantExp
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSignbit() *FloatNgdotSignbitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSignbit); ok {
return x.FloatNgdotSignbit
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotIsInf() *FloatNgdotIsInfArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotIsInf); ok {
return x.FloatNgdotIsInf
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotIsInt() *FloatNgdotIsIntArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotIsInt); ok {
return x.FloatNgdotIsInt
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSetUint64() *FloatNgdotSetUint64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSetUint64); ok {
return x.FloatNgdotSetUint64
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSetInt64() *FloatNgdotSetInt64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSetInt64); ok {
return x.FloatNgdotSetInt64
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSetFloat64() *FloatNgdotSetFloat64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSetFloat64); ok {
return x.FloatNgdotSetFloat64
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSetInt() *FloatNgdotSetIntArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSetInt); ok {
return x.FloatNgdotSetInt
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSetRat() *FloatNgdotSetRatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSetRat); ok {
return x.FloatNgdotSetRat
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSetInf() *FloatNgdotSetInfArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSetInf); ok {
return x.FloatNgdotSetInf
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSet() *FloatNgdotSetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSet); ok {
return x.FloatNgdotSet
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotCopy() *FloatNgdotCopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotCopy); ok {
return x.FloatNgdotCopy
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotUint64() *FloatNgdotUint64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotUint64); ok {
return x.FloatNgdotUint64
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotInt64() *FloatNgdotInt64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotInt64); ok {
return x.FloatNgdotInt64
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotFloat32() *FloatNgdotFloat32Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotFloat32); ok {
return x.FloatNgdotFloat32
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotFloat64() *FloatNgdotFloat64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotFloat64); ok {
return x.FloatNgdotFloat64
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotInt() *FloatNgdotIntArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotInt); ok {
return x.FloatNgdotInt
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotRat() *FloatNgdotRatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotRat); ok {
return x.FloatNgdotRat
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotAbs() *FloatNgdotAbsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotAbs); ok {
return x.FloatNgdotAbs
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotNeg() *FloatNgdotNegArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotNeg); ok {
return x.FloatNgdotNeg
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotAdd() *FloatNgdotAddArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotAdd); ok {
return x.FloatNgdotAdd
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSub() *FloatNgdotSubArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSub); ok {
return x.FloatNgdotSub
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotMul() *FloatNgdotMulArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotMul); ok {
return x.FloatNgdotMul
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotCmp() *FloatNgdotCmpArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotCmp); ok {
return x.FloatNgdotCmp
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSetString() *FloatNgdotSetStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSetString); ok {
return x.FloatNgdotSetString
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotParse() *FloatNgdotParseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotParse); ok {
return x.FloatNgdotParse
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseFloat() *ParseFloatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseFloat); ok {
return x.ParseFloat
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotGobEncode() *FloatNgdotGobEncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotGobEncode); ok {
return x.FloatNgdotGobEncode
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotGobDecode() *FloatNgdotGobDecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotGobDecode); ok {
return x.FloatNgdotGobDecode
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotAppendText() *FloatNgdotAppendTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotAppendText); ok {
return x.FloatNgdotAppendText
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotMarshalText() *FloatNgdotMarshalTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotMarshalText); ok {
return x.FloatNgdotMarshalText
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotUnmarshalText() *FloatNgdotUnmarshalTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotUnmarshalText); ok {
return x.FloatNgdotUnmarshalText
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotText() *FloatNgdotTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotText); ok {
return x.FloatNgdotText
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotString() *FloatNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotString); ok {
return x.FloatNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotAppend() *FloatNgdotAppendArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotAppend); ok {
return x.FloatNgdotAppend
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotSign() *IntNgdotSignArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotSign); ok {
return x.IntNgdotSign
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotSetInt64() *IntNgdotSetInt64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotSetInt64); ok {
return x.IntNgdotSetInt64
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotSetUint64() *IntNgdotSetUint64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotSetUint64); ok {
return x.IntNgdotSetUint64
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewInt() *NewIntArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewInt); ok {
return x.NewInt
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotSet() *IntNgdotSetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotSet); ok {
return x.IntNgdotSet
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotBits() *IntNgdotBitsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotBits); ok {
return x.IntNgdotBits
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotAbs() *IntNgdotAbsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotAbs); ok {
return x.IntNgdotAbs
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotNeg() *IntNgdotNegArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotNeg); ok {
return x.IntNgdotNeg
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotAdd() *IntNgdotAddArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotAdd); ok {
return x.IntNgdotAdd
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotSub() *IntNgdotSubArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotSub); ok {
return x.IntNgdotSub
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotMul() *IntNgdotMulArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotMul); ok {
return x.IntNgdotMul
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotMulRange() *IntNgdotMulRangeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotMulRange); ok {
return x.IntNgdotMulRange
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotBinomial() *IntNgdotBinomialArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotBinomial); ok {
return x.IntNgdotBinomial
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotRem() *IntNgdotRemArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotRem); ok {
return x.IntNgdotRem
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotDiv() *IntNgdotDivArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotDiv); ok {
return x.IntNgdotDiv
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotMod() *IntNgdotModArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotMod); ok {
return x.IntNgdotMod
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotDivMod() *IntNgdotDivModArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotDivMod); ok {
return x.IntNgdotDivMod
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotCmp() *IntNgdotCmpArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotCmp); ok {
return x.IntNgdotCmp
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotCmpAbs() *IntNgdotCmpAbsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotCmpAbs); ok {
return x.IntNgdotCmpAbs
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotInt64() *IntNgdotInt64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotInt64); ok {
return x.IntNgdotInt64
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotUint64() *IntNgdotUint64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotUint64); ok {
return x.IntNgdotUint64
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotIsInt64() *IntNgdotIsInt64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotIsInt64); ok {
return x.IntNgdotIsInt64
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotIsUint64() *IntNgdotIsUint64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotIsUint64); ok {
return x.IntNgdotIsUint64
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotFloat64() *IntNgdotFloat64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotFloat64); ok {
return x.IntNgdotFloat64
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotSetString() *IntNgdotSetStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotSetString); ok {
return x.IntNgdotSetString
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotSetBytes() *IntNgdotSetBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotSetBytes); ok {
return x.IntNgdotSetBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotBytes() *IntNgdotBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotBytes); ok {
return x.IntNgdotBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotFillBytes() *IntNgdotFillBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotFillBytes); ok {
return x.IntNgdotFillBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotBitLen() *IntNgdotBitLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotBitLen); ok {
return x.IntNgdotBitLen
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotTrailingZeroBits() *IntNgdotTrailingZeroBitsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotTrailingZeroBits); ok {
return x.IntNgdotTrailingZeroBits
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotExp() *IntNgdotExpArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotExp); ok {
return x.IntNgdotExp
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotGCD() *IntNgdotGCDArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotGCD); ok {
return x.IntNgdotGCD
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotModInverse() *IntNgdotModInverseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotModInverse); ok {
return x.IntNgdotModInverse
}
}
return nil
}
func (x *NgoloFuzzOne) GetJacobi() *JacobiArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Jacobi); ok {
return x.Jacobi
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotModSqrt() *IntNgdotModSqrtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotModSqrt); ok {
return x.IntNgdotModSqrt
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotLsh() *IntNgdotLshArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotLsh); ok {
return x.IntNgdotLsh
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotRsh() *IntNgdotRshArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotRsh); ok {
return x.IntNgdotRsh
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotBit() *IntNgdotBitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotBit); ok {
return x.IntNgdotBit
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotSetBit() *IntNgdotSetBitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotSetBit); ok {
return x.IntNgdotSetBit
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotAnd() *IntNgdotAndArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotAnd); ok {
return x.IntNgdotAnd
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotAndNot() *IntNgdotAndNotArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotAndNot); ok {
return x.IntNgdotAndNot
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotOr() *IntNgdotOrArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotOr); ok {
return x.IntNgdotOr
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotXor() *IntNgdotXorArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotXor); ok {
return x.IntNgdotXor
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotNot() *IntNgdotNotArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotNot); ok {
return x.IntNgdotNot
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotSqrt() *IntNgdotSqrtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotSqrt); ok {
return x.IntNgdotSqrt
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotText() *IntNgdotTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotText); ok {
return x.IntNgdotText
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotAppend() *IntNgdotAppendArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotAppend); ok {
return x.IntNgdotAppend
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotString() *IntNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotString); ok {
return x.IntNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotGobEncode() *IntNgdotGobEncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotGobEncode); ok {
return x.IntNgdotGobEncode
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotGobDecode() *IntNgdotGobDecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotGobDecode); ok {
return x.IntNgdotGobDecode
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotAppendText() *IntNgdotAppendTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotAppendText); ok {
return x.IntNgdotAppendText
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotMarshalText() *IntNgdotMarshalTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotMarshalText); ok {
return x.IntNgdotMarshalText
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotUnmarshalText() *IntNgdotUnmarshalTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotUnmarshalText); ok {
return x.IntNgdotUnmarshalText
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotMarshalJSON() *IntNgdotMarshalJSONArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotMarshalJSON); ok {
return x.IntNgdotMarshalJSON
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotUnmarshalJSON() *IntNgdotUnmarshalJSONArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotUnmarshalJSON); ok {
return x.IntNgdotUnmarshalJSON
}
}
return nil
}
func (x *NgoloFuzzOne) GetIntNgdotProbablyPrime() *IntNgdotProbablyPrimeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IntNgdotProbablyPrime); ok {
return x.IntNgdotProbablyPrime
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewRat() *NewRatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewRat); ok {
return x.NewRat
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotSetFloat64() *RatNgdotSetFloat64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotSetFloat64); ok {
return x.RatNgdotSetFloat64
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotFloat32() *RatNgdotFloat32Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotFloat32); ok {
return x.RatNgdotFloat32
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotFloat64() *RatNgdotFloat64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotFloat64); ok {
return x.RatNgdotFloat64
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotSetFrac() *RatNgdotSetFracArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotSetFrac); ok {
return x.RatNgdotSetFrac
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotSetFrac64() *RatNgdotSetFrac64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotSetFrac64); ok {
return x.RatNgdotSetFrac64
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotSetInt() *RatNgdotSetIntArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotSetInt); ok {
return x.RatNgdotSetInt
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotSetInt64() *RatNgdotSetInt64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotSetInt64); ok {
return x.RatNgdotSetInt64
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotSetUint64() *RatNgdotSetUint64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotSetUint64); ok {
return x.RatNgdotSetUint64
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotSet() *RatNgdotSetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotSet); ok {
return x.RatNgdotSet
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotAbs() *RatNgdotAbsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotAbs); ok {
return x.RatNgdotAbs
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotNeg() *RatNgdotNegArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotNeg); ok {
return x.RatNgdotNeg
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotInv() *RatNgdotInvArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotInv); ok {
return x.RatNgdotInv
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotSign() *RatNgdotSignArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotSign); ok {
return x.RatNgdotSign
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotIsInt() *RatNgdotIsIntArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotIsInt); ok {
return x.RatNgdotIsInt
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotNum() *RatNgdotNumArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotNum); ok {
return x.RatNgdotNum
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotDenom() *RatNgdotDenomArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotDenom); ok {
return x.RatNgdotDenom
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotCmp() *RatNgdotCmpArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotCmp); ok {
return x.RatNgdotCmp
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotAdd() *RatNgdotAddArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotAdd); ok {
return x.RatNgdotAdd
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotSub() *RatNgdotSubArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotSub); ok {
return x.RatNgdotSub
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotMul() *RatNgdotMulArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotMul); ok {
return x.RatNgdotMul
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotSetString() *RatNgdotSetStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotSetString); ok {
return x.RatNgdotSetString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotString() *RatNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotString); ok {
return x.RatNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotRatString() *RatNgdotRatStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotRatString); ok {
return x.RatNgdotRatString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotFloatString() *RatNgdotFloatStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotFloatString); ok {
return x.RatNgdotFloatString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotFloatPrec() *RatNgdotFloatPrecArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotFloatPrec); ok {
return x.RatNgdotFloatPrec
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotGobEncode() *RatNgdotGobEncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotGobEncode); ok {
return x.RatNgdotGobEncode
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotGobDecode() *RatNgdotGobDecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotGobDecode); ok {
return x.RatNgdotGobDecode
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotAppendText() *RatNgdotAppendTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotAppendText); ok {
return x.RatNgdotAppendText
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotMarshalText() *RatNgdotMarshalTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotMarshalText); ok {
return x.RatNgdotMarshalText
}
}
return nil
}
func (x *NgoloFuzzOne) GetRatNgdotUnmarshalText() *RatNgdotUnmarshalTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RatNgdotUnmarshalText); ok {
return x.RatNgdotUnmarshalText
}
}
return nil
}
func (x *NgoloFuzzOne) GetRoundingModeNgdotString() *RoundingModeNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RoundingModeNgdotString); ok {
return x.RoundingModeNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetFloatNgdotSqrt() *FloatNgdotSqrtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FloatNgdotSqrt); ok {
return x.FloatNgdotSqrt
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_AccuracyNgdotString struct {
AccuracyNgdotString *AccuracyNgdotStringArgs `protobuf:"bytes,1,opt,name=AccuracyNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_NewFloat struct {
NewFloat *NewFloatArgs `protobuf:"bytes,2,opt,name=NewFloat,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSetPrec struct {
FloatNgdotSetPrec *FloatNgdotSetPrecArgs `protobuf:"bytes,3,opt,name=FloatNgdotSetPrec,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSetMode struct {
FloatNgdotSetMode *FloatNgdotSetModeArgs `protobuf:"bytes,4,opt,name=FloatNgdotSetMode,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotPrec struct {
FloatNgdotPrec *FloatNgdotPrecArgs `protobuf:"bytes,5,opt,name=FloatNgdotPrec,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotMinPrec struct {
FloatNgdotMinPrec *FloatNgdotMinPrecArgs `protobuf:"bytes,6,opt,name=FloatNgdotMinPrec,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotMode struct {
FloatNgdotMode *FloatNgdotModeArgs `protobuf:"bytes,7,opt,name=FloatNgdotMode,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotAcc struct {
FloatNgdotAcc *FloatNgdotAccArgs `protobuf:"bytes,8,opt,name=FloatNgdotAcc,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSign struct {
FloatNgdotSign *FloatNgdotSignArgs `protobuf:"bytes,9,opt,name=FloatNgdotSign,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotMantExp struct {
FloatNgdotMantExp *FloatNgdotMantExpArgs `protobuf:"bytes,10,opt,name=FloatNgdotMantExp,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSetMantExp struct {
FloatNgdotSetMantExp *FloatNgdotSetMantExpArgs `protobuf:"bytes,11,opt,name=FloatNgdotSetMantExp,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSignbit struct {
FloatNgdotSignbit *FloatNgdotSignbitArgs `protobuf:"bytes,12,opt,name=FloatNgdotSignbit,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotIsInf struct {
FloatNgdotIsInf *FloatNgdotIsInfArgs `protobuf:"bytes,13,opt,name=FloatNgdotIsInf,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotIsInt struct {
FloatNgdotIsInt *FloatNgdotIsIntArgs `protobuf:"bytes,14,opt,name=FloatNgdotIsInt,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSetUint64 struct {
FloatNgdotSetUint64 *FloatNgdotSetUint64Args `protobuf:"bytes,15,opt,name=FloatNgdotSetUint64,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSetInt64 struct {
FloatNgdotSetInt64 *FloatNgdotSetInt64Args `protobuf:"bytes,16,opt,name=FloatNgdotSetInt64,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSetFloat64 struct {
FloatNgdotSetFloat64 *FloatNgdotSetFloat64Args `protobuf:"bytes,17,opt,name=FloatNgdotSetFloat64,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSetInt struct {
FloatNgdotSetInt *FloatNgdotSetIntArgs `protobuf:"bytes,18,opt,name=FloatNgdotSetInt,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSetRat struct {
FloatNgdotSetRat *FloatNgdotSetRatArgs `protobuf:"bytes,19,opt,name=FloatNgdotSetRat,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSetInf struct {
FloatNgdotSetInf *FloatNgdotSetInfArgs `protobuf:"bytes,20,opt,name=FloatNgdotSetInf,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSet struct {
FloatNgdotSet *FloatNgdotSetArgs `protobuf:"bytes,21,opt,name=FloatNgdotSet,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotCopy struct {
FloatNgdotCopy *FloatNgdotCopyArgs `protobuf:"bytes,22,opt,name=FloatNgdotCopy,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotUint64 struct {
FloatNgdotUint64 *FloatNgdotUint64Args `protobuf:"bytes,23,opt,name=FloatNgdotUint64,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotInt64 struct {
FloatNgdotInt64 *FloatNgdotInt64Args `protobuf:"bytes,24,opt,name=FloatNgdotInt64,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotFloat32 struct {
FloatNgdotFloat32 *FloatNgdotFloat32Args `protobuf:"bytes,25,opt,name=FloatNgdotFloat32,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotFloat64 struct {
FloatNgdotFloat64 *FloatNgdotFloat64Args `protobuf:"bytes,26,opt,name=FloatNgdotFloat64,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotInt struct {
FloatNgdotInt *FloatNgdotIntArgs `protobuf:"bytes,27,opt,name=FloatNgdotInt,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotRat struct {
FloatNgdotRat *FloatNgdotRatArgs `protobuf:"bytes,28,opt,name=FloatNgdotRat,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotAbs struct {
FloatNgdotAbs *FloatNgdotAbsArgs `protobuf:"bytes,29,opt,name=FloatNgdotAbs,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotNeg struct {
FloatNgdotNeg *FloatNgdotNegArgs `protobuf:"bytes,30,opt,name=FloatNgdotNeg,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotAdd struct {
FloatNgdotAdd *FloatNgdotAddArgs `protobuf:"bytes,31,opt,name=FloatNgdotAdd,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSub struct {
FloatNgdotSub *FloatNgdotSubArgs `protobuf:"bytes,32,opt,name=FloatNgdotSub,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotMul struct {
FloatNgdotMul *FloatNgdotMulArgs `protobuf:"bytes,33,opt,name=FloatNgdotMul,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotCmp struct {
FloatNgdotCmp *FloatNgdotCmpArgs `protobuf:"bytes,34,opt,name=FloatNgdotCmp,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSetString struct {
FloatNgdotSetString *FloatNgdotSetStringArgs `protobuf:"bytes,35,opt,name=FloatNgdotSetString,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotParse struct {
FloatNgdotParse *FloatNgdotParseArgs `protobuf:"bytes,36,opt,name=FloatNgdotParse,proto3,oneof"`
}
type NgoloFuzzOne_ParseFloat struct {
ParseFloat *ParseFloatArgs `protobuf:"bytes,37,opt,name=ParseFloat,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotGobEncode struct {
FloatNgdotGobEncode *FloatNgdotGobEncodeArgs `protobuf:"bytes,38,opt,name=FloatNgdotGobEncode,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotGobDecode struct {
FloatNgdotGobDecode *FloatNgdotGobDecodeArgs `protobuf:"bytes,39,opt,name=FloatNgdotGobDecode,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotAppendText struct {
FloatNgdotAppendText *FloatNgdotAppendTextArgs `protobuf:"bytes,40,opt,name=FloatNgdotAppendText,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotMarshalText struct {
FloatNgdotMarshalText *FloatNgdotMarshalTextArgs `protobuf:"bytes,41,opt,name=FloatNgdotMarshalText,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotUnmarshalText struct {
FloatNgdotUnmarshalText *FloatNgdotUnmarshalTextArgs `protobuf:"bytes,42,opt,name=FloatNgdotUnmarshalText,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotText struct {
FloatNgdotText *FloatNgdotTextArgs `protobuf:"bytes,43,opt,name=FloatNgdotText,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotString struct {
FloatNgdotString *FloatNgdotStringArgs `protobuf:"bytes,44,opt,name=FloatNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotAppend struct {
FloatNgdotAppend *FloatNgdotAppendArgs `protobuf:"bytes,45,opt,name=FloatNgdotAppend,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotSign struct {
IntNgdotSign *IntNgdotSignArgs `protobuf:"bytes,46,opt,name=IntNgdotSign,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotSetInt64 struct {
IntNgdotSetInt64 *IntNgdotSetInt64Args `protobuf:"bytes,47,opt,name=IntNgdotSetInt64,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotSetUint64 struct {
IntNgdotSetUint64 *IntNgdotSetUint64Args `protobuf:"bytes,48,opt,name=IntNgdotSetUint64,proto3,oneof"`
}
type NgoloFuzzOne_NewInt struct {
NewInt *NewIntArgs `protobuf:"bytes,49,opt,name=NewInt,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotSet struct {
IntNgdotSet *IntNgdotSetArgs `protobuf:"bytes,50,opt,name=IntNgdotSet,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotBits struct {
IntNgdotBits *IntNgdotBitsArgs `protobuf:"bytes,51,opt,name=IntNgdotBits,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotAbs struct {
IntNgdotAbs *IntNgdotAbsArgs `protobuf:"bytes,52,opt,name=IntNgdotAbs,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotNeg struct {
IntNgdotNeg *IntNgdotNegArgs `protobuf:"bytes,53,opt,name=IntNgdotNeg,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotAdd struct {
IntNgdotAdd *IntNgdotAddArgs `protobuf:"bytes,54,opt,name=IntNgdotAdd,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotSub struct {
IntNgdotSub *IntNgdotSubArgs `protobuf:"bytes,55,opt,name=IntNgdotSub,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotMul struct {
IntNgdotMul *IntNgdotMulArgs `protobuf:"bytes,56,opt,name=IntNgdotMul,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotMulRange struct {
IntNgdotMulRange *IntNgdotMulRangeArgs `protobuf:"bytes,57,opt,name=IntNgdotMulRange,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotBinomial struct {
IntNgdotBinomial *IntNgdotBinomialArgs `protobuf:"bytes,58,opt,name=IntNgdotBinomial,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotRem struct {
IntNgdotRem *IntNgdotRemArgs `protobuf:"bytes,59,opt,name=IntNgdotRem,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotDiv struct {
IntNgdotDiv *IntNgdotDivArgs `protobuf:"bytes,60,opt,name=IntNgdotDiv,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotMod struct {
IntNgdotMod *IntNgdotModArgs `protobuf:"bytes,61,opt,name=IntNgdotMod,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotDivMod struct {
IntNgdotDivMod *IntNgdotDivModArgs `protobuf:"bytes,62,opt,name=IntNgdotDivMod,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotCmp struct {
IntNgdotCmp *IntNgdotCmpArgs `protobuf:"bytes,63,opt,name=IntNgdotCmp,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotCmpAbs struct {
IntNgdotCmpAbs *IntNgdotCmpAbsArgs `protobuf:"bytes,64,opt,name=IntNgdotCmpAbs,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotInt64 struct {
IntNgdotInt64 *IntNgdotInt64Args `protobuf:"bytes,65,opt,name=IntNgdotInt64,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotUint64 struct {
IntNgdotUint64 *IntNgdotUint64Args `protobuf:"bytes,66,opt,name=IntNgdotUint64,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotIsInt64 struct {
IntNgdotIsInt64 *IntNgdotIsInt64Args `protobuf:"bytes,67,opt,name=IntNgdotIsInt64,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotIsUint64 struct {
IntNgdotIsUint64 *IntNgdotIsUint64Args `protobuf:"bytes,68,opt,name=IntNgdotIsUint64,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotFloat64 struct {
IntNgdotFloat64 *IntNgdotFloat64Args `protobuf:"bytes,69,opt,name=IntNgdotFloat64,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotSetString struct {
IntNgdotSetString *IntNgdotSetStringArgs `protobuf:"bytes,70,opt,name=IntNgdotSetString,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotSetBytes struct {
IntNgdotSetBytes *IntNgdotSetBytesArgs `protobuf:"bytes,71,opt,name=IntNgdotSetBytes,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotBytes struct {
IntNgdotBytes *IntNgdotBytesArgs `protobuf:"bytes,72,opt,name=IntNgdotBytes,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotFillBytes struct {
IntNgdotFillBytes *IntNgdotFillBytesArgs `protobuf:"bytes,73,opt,name=IntNgdotFillBytes,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotBitLen struct {
IntNgdotBitLen *IntNgdotBitLenArgs `protobuf:"bytes,74,opt,name=IntNgdotBitLen,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotTrailingZeroBits struct {
IntNgdotTrailingZeroBits *IntNgdotTrailingZeroBitsArgs `protobuf:"bytes,75,opt,name=IntNgdotTrailingZeroBits,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotExp struct {
IntNgdotExp *IntNgdotExpArgs `protobuf:"bytes,76,opt,name=IntNgdotExp,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotGCD struct {
IntNgdotGCD *IntNgdotGCDArgs `protobuf:"bytes,77,opt,name=IntNgdotGCD,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotModInverse struct {
IntNgdotModInverse *IntNgdotModInverseArgs `protobuf:"bytes,78,opt,name=IntNgdotModInverse,proto3,oneof"`
}
type NgoloFuzzOne_Jacobi struct {
Jacobi *JacobiArgs `protobuf:"bytes,79,opt,name=Jacobi,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotModSqrt struct {
IntNgdotModSqrt *IntNgdotModSqrtArgs `protobuf:"bytes,80,opt,name=IntNgdotModSqrt,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotLsh struct {
IntNgdotLsh *IntNgdotLshArgs `protobuf:"bytes,81,opt,name=IntNgdotLsh,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotRsh struct {
IntNgdotRsh *IntNgdotRshArgs `protobuf:"bytes,82,opt,name=IntNgdotRsh,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotBit struct {
IntNgdotBit *IntNgdotBitArgs `protobuf:"bytes,83,opt,name=IntNgdotBit,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotSetBit struct {
IntNgdotSetBit *IntNgdotSetBitArgs `protobuf:"bytes,84,opt,name=IntNgdotSetBit,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotAnd struct {
IntNgdotAnd *IntNgdotAndArgs `protobuf:"bytes,85,opt,name=IntNgdotAnd,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotAndNot struct {
IntNgdotAndNot *IntNgdotAndNotArgs `protobuf:"bytes,86,opt,name=IntNgdotAndNot,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotOr struct {
IntNgdotOr *IntNgdotOrArgs `protobuf:"bytes,87,opt,name=IntNgdotOr,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotXor struct {
IntNgdotXor *IntNgdotXorArgs `protobuf:"bytes,88,opt,name=IntNgdotXor,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotNot struct {
IntNgdotNot *IntNgdotNotArgs `protobuf:"bytes,89,opt,name=IntNgdotNot,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotSqrt struct {
IntNgdotSqrt *IntNgdotSqrtArgs `protobuf:"bytes,90,opt,name=IntNgdotSqrt,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotText struct {
IntNgdotText *IntNgdotTextArgs `protobuf:"bytes,91,opt,name=IntNgdotText,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotAppend struct {
IntNgdotAppend *IntNgdotAppendArgs `protobuf:"bytes,92,opt,name=IntNgdotAppend,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotString struct {
IntNgdotString *IntNgdotStringArgs `protobuf:"bytes,93,opt,name=IntNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotGobEncode struct {
IntNgdotGobEncode *IntNgdotGobEncodeArgs `protobuf:"bytes,94,opt,name=IntNgdotGobEncode,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotGobDecode struct {
IntNgdotGobDecode *IntNgdotGobDecodeArgs `protobuf:"bytes,95,opt,name=IntNgdotGobDecode,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotAppendText struct {
IntNgdotAppendText *IntNgdotAppendTextArgs `protobuf:"bytes,96,opt,name=IntNgdotAppendText,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotMarshalText struct {
IntNgdotMarshalText *IntNgdotMarshalTextArgs `protobuf:"bytes,97,opt,name=IntNgdotMarshalText,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotUnmarshalText struct {
IntNgdotUnmarshalText *IntNgdotUnmarshalTextArgs `protobuf:"bytes,98,opt,name=IntNgdotUnmarshalText,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotMarshalJSON struct {
IntNgdotMarshalJSON *IntNgdotMarshalJSONArgs `protobuf:"bytes,99,opt,name=IntNgdotMarshalJSON,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotUnmarshalJSON struct {
IntNgdotUnmarshalJSON *IntNgdotUnmarshalJSONArgs `protobuf:"bytes,100,opt,name=IntNgdotUnmarshalJSON,proto3,oneof"`
}
type NgoloFuzzOne_IntNgdotProbablyPrime struct {
IntNgdotProbablyPrime *IntNgdotProbablyPrimeArgs `protobuf:"bytes,101,opt,name=IntNgdotProbablyPrime,proto3,oneof"`
}
type NgoloFuzzOne_NewRat struct {
NewRat *NewRatArgs `protobuf:"bytes,102,opt,name=NewRat,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotSetFloat64 struct {
RatNgdotSetFloat64 *RatNgdotSetFloat64Args `protobuf:"bytes,103,opt,name=RatNgdotSetFloat64,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotFloat32 struct {
RatNgdotFloat32 *RatNgdotFloat32Args `protobuf:"bytes,104,opt,name=RatNgdotFloat32,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotFloat64 struct {
RatNgdotFloat64 *RatNgdotFloat64Args `protobuf:"bytes,105,opt,name=RatNgdotFloat64,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotSetFrac struct {
RatNgdotSetFrac *RatNgdotSetFracArgs `protobuf:"bytes,106,opt,name=RatNgdotSetFrac,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotSetFrac64 struct {
RatNgdotSetFrac64 *RatNgdotSetFrac64Args `protobuf:"bytes,107,opt,name=RatNgdotSetFrac64,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotSetInt struct {
RatNgdotSetInt *RatNgdotSetIntArgs `protobuf:"bytes,108,opt,name=RatNgdotSetInt,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotSetInt64 struct {
RatNgdotSetInt64 *RatNgdotSetInt64Args `protobuf:"bytes,109,opt,name=RatNgdotSetInt64,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotSetUint64 struct {
RatNgdotSetUint64 *RatNgdotSetUint64Args `protobuf:"bytes,110,opt,name=RatNgdotSetUint64,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotSet struct {
RatNgdotSet *RatNgdotSetArgs `protobuf:"bytes,111,opt,name=RatNgdotSet,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotAbs struct {
RatNgdotAbs *RatNgdotAbsArgs `protobuf:"bytes,112,opt,name=RatNgdotAbs,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotNeg struct {
RatNgdotNeg *RatNgdotNegArgs `protobuf:"bytes,113,opt,name=RatNgdotNeg,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotInv struct {
RatNgdotInv *RatNgdotInvArgs `protobuf:"bytes,114,opt,name=RatNgdotInv,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotSign struct {
RatNgdotSign *RatNgdotSignArgs `protobuf:"bytes,115,opt,name=RatNgdotSign,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotIsInt struct {
RatNgdotIsInt *RatNgdotIsIntArgs `protobuf:"bytes,116,opt,name=RatNgdotIsInt,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotNum struct {
RatNgdotNum *RatNgdotNumArgs `protobuf:"bytes,117,opt,name=RatNgdotNum,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotDenom struct {
RatNgdotDenom *RatNgdotDenomArgs `protobuf:"bytes,118,opt,name=RatNgdotDenom,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotCmp struct {
RatNgdotCmp *RatNgdotCmpArgs `protobuf:"bytes,119,opt,name=RatNgdotCmp,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotAdd struct {
RatNgdotAdd *RatNgdotAddArgs `protobuf:"bytes,120,opt,name=RatNgdotAdd,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotSub struct {
RatNgdotSub *RatNgdotSubArgs `protobuf:"bytes,121,opt,name=RatNgdotSub,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotMul struct {
RatNgdotMul *RatNgdotMulArgs `protobuf:"bytes,122,opt,name=RatNgdotMul,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotSetString struct {
RatNgdotSetString *RatNgdotSetStringArgs `protobuf:"bytes,123,opt,name=RatNgdotSetString,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotString struct {
RatNgdotString *RatNgdotStringArgs `protobuf:"bytes,124,opt,name=RatNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotRatString struct {
RatNgdotRatString *RatNgdotRatStringArgs `protobuf:"bytes,125,opt,name=RatNgdotRatString,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotFloatString struct {
RatNgdotFloatString *RatNgdotFloatStringArgs `protobuf:"bytes,126,opt,name=RatNgdotFloatString,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotFloatPrec struct {
RatNgdotFloatPrec *RatNgdotFloatPrecArgs `protobuf:"bytes,127,opt,name=RatNgdotFloatPrec,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotGobEncode struct {
RatNgdotGobEncode *RatNgdotGobEncodeArgs `protobuf:"bytes,128,opt,name=RatNgdotGobEncode,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotGobDecode struct {
RatNgdotGobDecode *RatNgdotGobDecodeArgs `protobuf:"bytes,129,opt,name=RatNgdotGobDecode,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotAppendText struct {
RatNgdotAppendText *RatNgdotAppendTextArgs `protobuf:"bytes,130,opt,name=RatNgdotAppendText,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotMarshalText struct {
RatNgdotMarshalText *RatNgdotMarshalTextArgs `protobuf:"bytes,131,opt,name=RatNgdotMarshalText,proto3,oneof"`
}
type NgoloFuzzOne_RatNgdotUnmarshalText struct {
RatNgdotUnmarshalText *RatNgdotUnmarshalTextArgs `protobuf:"bytes,132,opt,name=RatNgdotUnmarshalText,proto3,oneof"`
}
type NgoloFuzzOne_RoundingModeNgdotString struct {
RoundingModeNgdotString *RoundingModeNgdotStringArgs `protobuf:"bytes,133,opt,name=RoundingModeNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_FloatNgdotSqrt struct {
FloatNgdotSqrt *FloatNgdotSqrtArgs `protobuf:"bytes,134,opt,name=FloatNgdotSqrt,proto3,oneof"`
}
func (*NgoloFuzzOne_AccuracyNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewFloat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSetPrec) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSetMode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotPrec) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotMinPrec) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotMode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotAcc) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSign) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotMantExp) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSetMantExp) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSignbit) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotIsInf) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotIsInt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSetUint64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSetInt64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSetFloat64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSetInt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSetRat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSetInf) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSet) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotCopy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotUint64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotInt64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotFloat32) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotFloat64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotInt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotRat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotAbs) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotNeg) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotAdd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSub) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotMul) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotCmp) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSetString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotParse) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseFloat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotGobEncode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotGobDecode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotAppendText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotMarshalText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotUnmarshalText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotAppend) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotSign) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotSetInt64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotSetUint64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewInt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotSet) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotBits) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotAbs) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotNeg) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotAdd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotSub) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotMul) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotMulRange) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotBinomial) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotRem) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotDiv) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotMod) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotDivMod) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotCmp) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotCmpAbs) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotInt64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotUint64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotIsInt64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotIsUint64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotFloat64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotSetString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotSetBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotFillBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotBitLen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotTrailingZeroBits) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotExp) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotGCD) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotModInverse) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Jacobi) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotModSqrt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotLsh) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotRsh) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotBit) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotSetBit) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotAnd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotAndNot) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotOr) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotXor) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotNot) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotSqrt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotAppend) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotGobEncode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotGobDecode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotAppendText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotMarshalText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotUnmarshalText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotMarshalJSON) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotUnmarshalJSON) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IntNgdotProbablyPrime) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewRat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotSetFloat64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotFloat32) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotFloat64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotSetFrac) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotSetFrac64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotSetInt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotSetInt64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotSetUint64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotSet) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotAbs) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotNeg) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotInv) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotSign) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotIsInt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotNum) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotDenom) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotCmp) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotAdd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotSub) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotMul) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotSetString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotRatString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotFloatString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotFloatPrec) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotGobEncode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotGobDecode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotAppendText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotMarshalText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RatNgdotUnmarshalText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RoundingModeNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FloatNgdotSqrt) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[135]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[135]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{135}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[136]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[136]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{136}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x19\n" +
"\x17AccuracyNgdotStringArgs\"\x1c\n" +
"\fNewFloatArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x01R\x01x\"+\n" +
"\x15FloatNgdotSetPrecArgs\x12\x12\n" +
"\x04prec\x18\x01 \x01(\rR\x04prec\"\x17\n" +
"\x15FloatNgdotSetModeArgs\"\x14\n" +
"\x12FloatNgdotPrecArgs\"\x17\n" +
"\x15FloatNgdotMinPrecArgs\"\x14\n" +
"\x12FloatNgdotModeArgs\"\x13\n" +
"\x11FloatNgdotAccArgs\"\x14\n" +
"\x12FloatNgdotSignArgs\"\x17\n" +
"\x15FloatNgdotMantExpArgs\",\n" +
"\x18FloatNgdotSetMantExpArgs\x12\x10\n" +
"\x03exp\x18\x01 \x01(\x03R\x03exp\"\x17\n" +
"\x15FloatNgdotSignbitArgs\"\x15\n" +
"\x13FloatNgdotIsInfArgs\"\x15\n" +
"\x13FloatNgdotIsIntArgs\"'\n" +
"\x17FloatNgdotSetUint64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\"&\n" +
"\x16FloatNgdotSetInt64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\"(\n" +
"\x18FloatNgdotSetFloat64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x01R\x01x\"\x16\n" +
"\x14FloatNgdotSetIntArgs\"\x16\n" +
"\x14FloatNgdotSetRatArgs\"0\n" +
"\x14FloatNgdotSetInfArgs\x12\x18\n" +
"\asignbit\x18\x01 \x01(\bR\asignbit\"\x13\n" +
"\x11FloatNgdotSetArgs\"\x14\n" +
"\x12FloatNgdotCopyArgs\"\x16\n" +
"\x14FloatNgdotUint64Args\"\x15\n" +
"\x13FloatNgdotInt64Args\"\x17\n" +
"\x15FloatNgdotFloat32Args\"\x17\n" +
"\x15FloatNgdotFloat64Args\"\x13\n" +
"\x11FloatNgdotIntArgs\"\x13\n" +
"\x11FloatNgdotRatArgs\"\x13\n" +
"\x11FloatNgdotAbsArgs\"\x13\n" +
"\x11FloatNgdotNegArgs\"\x13\n" +
"\x11FloatNgdotAddArgs\"\x13\n" +
"\x11FloatNgdotSubArgs\"\x13\n" +
"\x11FloatNgdotMulArgs\"\x13\n" +
"\x11FloatNgdotCmpArgs\"'\n" +
"\x17FloatNgdotSetStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"7\n" +
"\x13FloatNgdotParseArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x12\n" +
"\x04base\x18\x02 \x01(\x03R\x04base\"F\n" +
"\x0eParseFloatArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x12\n" +
"\x04base\x18\x02 \x01(\x03R\x04base\x12\x12\n" +
"\x04prec\x18\x03 \x01(\rR\x04prec\"\x19\n" +
"\x17FloatNgdotGobEncodeArgs\"+\n" +
"\x17FloatNgdotGobDecodeArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\"(\n" +
"\x18FloatNgdotAppendTextArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"\x1b\n" +
"\x19FloatNgdotMarshalTextArgs\"1\n" +
"\x1bFloatNgdotUnmarshalTextArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\fR\x04text\"@\n" +
"\x12FloatNgdotTextArgs\x12\x16\n" +
"\x06format\x18\x01 \x01(\rR\x06format\x12\x12\n" +
"\x04prec\x18\x02 \x01(\x03R\x04prec\"\x16\n" +
"\x14FloatNgdotStringArgs\"N\n" +
"\x14FloatNgdotAppendArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\x12\x10\n" +
"\x03fmt\x18\x02 \x01(\rR\x03fmt\x12\x12\n" +
"\x04prec\x18\x03 \x01(\x03R\x04prec\"\x12\n" +
"\x10IntNgdotSignArgs\"$\n" +
"\x14IntNgdotSetInt64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\"%\n" +
"\x15IntNgdotSetUint64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\"\x1a\n" +
"\n" +
"NewIntArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\"\x11\n" +
"\x0fIntNgdotSetArgs\"\x12\n" +
"\x10IntNgdotBitsArgs\"\x11\n" +
"\x0fIntNgdotAbsArgs\"\x11\n" +
"\x0fIntNgdotNegArgs\"\x11\n" +
"\x0fIntNgdotAddArgs\"\x11\n" +
"\x0fIntNgdotSubArgs\"\x11\n" +
"\x0fIntNgdotMulArgs\"2\n" +
"\x14IntNgdotMulRangeArgs\x12\f\n" +
"\x01a\x18\x01 \x01(\x03R\x01a\x12\f\n" +
"\x01b\x18\x02 \x01(\x03R\x01b\"2\n" +
"\x14IntNgdotBinomialArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\x12\f\n" +
"\x01k\x18\x02 \x01(\x03R\x01k\"\x11\n" +
"\x0fIntNgdotRemArgs\"\x11\n" +
"\x0fIntNgdotDivArgs\"\x11\n" +
"\x0fIntNgdotModArgs\"\x14\n" +
"\x12IntNgdotDivModArgs\"\x11\n" +
"\x0fIntNgdotCmpArgs\"\x14\n" +
"\x12IntNgdotCmpAbsArgs\"\x13\n" +
"\x11IntNgdotInt64Args\"\x14\n" +
"\x12IntNgdotUint64Args\"\x15\n" +
"\x13IntNgdotIsInt64Args\"\x16\n" +
"\x14IntNgdotIsUint64Args\"\x15\n" +
"\x13IntNgdotFloat64Args\"9\n" +
"\x15IntNgdotSetStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x12\n" +
"\x04base\x18\x02 \x01(\x03R\x04base\"(\n" +
"\x14IntNgdotSetBytesArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\"\x13\n" +
"\x11IntNgdotBytesArgs\")\n" +
"\x15IntNgdotFillBytesArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\"\x14\n" +
"\x12IntNgdotBitLenArgs\"\x1e\n" +
"\x1cIntNgdotTrailingZeroBitsArgs\"\x11\n" +
"\x0fIntNgdotExpArgs\"\x11\n" +
"\x0fIntNgdotGCDArgs\"\x18\n" +
"\x16IntNgdotModInverseArgs\"\f\n" +
"\n" +
"JacobiArgs\"\x15\n" +
"\x13IntNgdotModSqrtArgs\"\x1f\n" +
"\x0fIntNgdotLshArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\rR\x01n\"\x1f\n" +
"\x0fIntNgdotRshArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\rR\x01n\"\x1f\n" +
"\x0fIntNgdotBitArgs\x12\f\n" +
"\x01i\x18\x01 \x01(\x03R\x01i\"0\n" +
"\x12IntNgdotSetBitArgs\x12\f\n" +
"\x01i\x18\x01 \x01(\x03R\x01i\x12\f\n" +
"\x01b\x18\x02 \x01(\rR\x01b\"\x11\n" +
"\x0fIntNgdotAndArgs\"\x14\n" +
"\x12IntNgdotAndNotArgs\"\x10\n" +
"\x0eIntNgdotOrArgs\"\x11\n" +
"\x0fIntNgdotXorArgs\"\x11\n" +
"\x0fIntNgdotNotArgs\"\x12\n" +
"\x10IntNgdotSqrtArgs\"&\n" +
"\x10IntNgdotTextArgs\x12\x12\n" +
"\x04base\x18\x01 \x01(\x03R\x04base\":\n" +
"\x12IntNgdotAppendArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\x12\x12\n" +
"\x04base\x18\x02 \x01(\x03R\x04base\"\x14\n" +
"\x12IntNgdotStringArgs\"\x17\n" +
"\x15IntNgdotGobEncodeArgs\")\n" +
"\x15IntNgdotGobDecodeArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\"&\n" +
"\x16IntNgdotAppendTextArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"\x19\n" +
"\x17IntNgdotMarshalTextArgs\"/\n" +
"\x19IntNgdotUnmarshalTextArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\fR\x04text\"\x19\n" +
"\x17IntNgdotMarshalJSONArgs\"/\n" +
"\x19IntNgdotUnmarshalJSONArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\fR\x04text\")\n" +
"\x19IntNgdotProbablyPrimeArgs\x12\f\n" +
"\x01n\x18\x01 \x01(\x03R\x01n\"(\n" +
"\n" +
"NewRatArgs\x12\f\n" +
"\x01a\x18\x01 \x01(\x03R\x01a\x12\f\n" +
"\x01b\x18\x02 \x01(\x03R\x01b\"&\n" +
"\x16RatNgdotSetFloat64Args\x12\f\n" +
"\x01f\x18\x01 \x01(\x01R\x01f\"\x15\n" +
"\x13RatNgdotFloat32Args\"\x15\n" +
"\x13RatNgdotFloat64Args\"\x15\n" +
"\x13RatNgdotSetFracArgs\"3\n" +
"\x15RatNgdotSetFrac64Args\x12\f\n" +
"\x01a\x18\x01 \x01(\x03R\x01a\x12\f\n" +
"\x01b\x18\x02 \x01(\x03R\x01b\"\x14\n" +
"\x12RatNgdotSetIntArgs\"$\n" +
"\x14RatNgdotSetInt64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x03R\x01x\"%\n" +
"\x15RatNgdotSetUint64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\"\x11\n" +
"\x0fRatNgdotSetArgs\"\x11\n" +
"\x0fRatNgdotAbsArgs\"\x11\n" +
"\x0fRatNgdotNegArgs\"\x11\n" +
"\x0fRatNgdotInvArgs\"\x12\n" +
"\x10RatNgdotSignArgs\"\x13\n" +
"\x11RatNgdotIsIntArgs\"\x11\n" +
"\x0fRatNgdotNumArgs\"\x13\n" +
"\x11RatNgdotDenomArgs\"\x11\n" +
"\x0fRatNgdotCmpArgs\"\x11\n" +
"\x0fRatNgdotAddArgs\"\x11\n" +
"\x0fRatNgdotSubArgs\"\x11\n" +
"\x0fRatNgdotMulArgs\"%\n" +
"\x15RatNgdotSetStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x14\n" +
"\x12RatNgdotStringArgs\"\x17\n" +
"\x15RatNgdotRatStringArgs\"-\n" +
"\x17RatNgdotFloatStringArgs\x12\x12\n" +
"\x04prec\x18\x01 \x01(\x03R\x04prec\"\x17\n" +
"\x15RatNgdotFloatPrecArgs\"\x17\n" +
"\x15RatNgdotGobEncodeArgs\")\n" +
"\x15RatNgdotGobDecodeArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\"&\n" +
"\x16RatNgdotAppendTextArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"\x19\n" +
"\x17RatNgdotMarshalTextArgs\"/\n" +
"\x19RatNgdotUnmarshalTextArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\fR\x04text\"\x1d\n" +
"\x1bRoundingModeNgdotStringArgs\"\x14\n" +
"\x12FloatNgdotSqrtArgs\"\xedN\n" +
"\fNgoloFuzzOne\x12V\n" +
"\x13AccuracyNgdotString\x18\x01 \x01(\v2\".ngolofuzz.AccuracyNgdotStringArgsH\x00R\x13AccuracyNgdotString\x125\n" +
"\bNewFloat\x18\x02 \x01(\v2\x17.ngolofuzz.NewFloatArgsH\x00R\bNewFloat\x12P\n" +
"\x11FloatNgdotSetPrec\x18\x03 \x01(\v2 .ngolofuzz.FloatNgdotSetPrecArgsH\x00R\x11FloatNgdotSetPrec\x12P\n" +
"\x11FloatNgdotSetMode\x18\x04 \x01(\v2 .ngolofuzz.FloatNgdotSetModeArgsH\x00R\x11FloatNgdotSetMode\x12G\n" +
"\x0eFloatNgdotPrec\x18\x05 \x01(\v2\x1d.ngolofuzz.FloatNgdotPrecArgsH\x00R\x0eFloatNgdotPrec\x12P\n" +
"\x11FloatNgdotMinPrec\x18\x06 \x01(\v2 .ngolofuzz.FloatNgdotMinPrecArgsH\x00R\x11FloatNgdotMinPrec\x12G\n" +
"\x0eFloatNgdotMode\x18\a \x01(\v2\x1d.ngolofuzz.FloatNgdotModeArgsH\x00R\x0eFloatNgdotMode\x12D\n" +
"\rFloatNgdotAcc\x18\b \x01(\v2\x1c.ngolofuzz.FloatNgdotAccArgsH\x00R\rFloatNgdotAcc\x12G\n" +
"\x0eFloatNgdotSign\x18\t \x01(\v2\x1d.ngolofuzz.FloatNgdotSignArgsH\x00R\x0eFloatNgdotSign\x12P\n" +
"\x11FloatNgdotMantExp\x18\n" +
" \x01(\v2 .ngolofuzz.FloatNgdotMantExpArgsH\x00R\x11FloatNgdotMantExp\x12Y\n" +
"\x14FloatNgdotSetMantExp\x18\v \x01(\v2#.ngolofuzz.FloatNgdotSetMantExpArgsH\x00R\x14FloatNgdotSetMantExp\x12P\n" +
"\x11FloatNgdotSignbit\x18\f \x01(\v2 .ngolofuzz.FloatNgdotSignbitArgsH\x00R\x11FloatNgdotSignbit\x12J\n" +
"\x0fFloatNgdotIsInf\x18\r \x01(\v2\x1e.ngolofuzz.FloatNgdotIsInfArgsH\x00R\x0fFloatNgdotIsInf\x12J\n" +
"\x0fFloatNgdotIsInt\x18\x0e \x01(\v2\x1e.ngolofuzz.FloatNgdotIsIntArgsH\x00R\x0fFloatNgdotIsInt\x12V\n" +
"\x13FloatNgdotSetUint64\x18\x0f \x01(\v2\".ngolofuzz.FloatNgdotSetUint64ArgsH\x00R\x13FloatNgdotSetUint64\x12S\n" +
"\x12FloatNgdotSetInt64\x18\x10 \x01(\v2!.ngolofuzz.FloatNgdotSetInt64ArgsH\x00R\x12FloatNgdotSetInt64\x12Y\n" +
"\x14FloatNgdotSetFloat64\x18\x11 \x01(\v2#.ngolofuzz.FloatNgdotSetFloat64ArgsH\x00R\x14FloatNgdotSetFloat64\x12M\n" +
"\x10FloatNgdotSetInt\x18\x12 \x01(\v2\x1f.ngolofuzz.FloatNgdotSetIntArgsH\x00R\x10FloatNgdotSetInt\x12M\n" +
"\x10FloatNgdotSetRat\x18\x13 \x01(\v2\x1f.ngolofuzz.FloatNgdotSetRatArgsH\x00R\x10FloatNgdotSetRat\x12M\n" +
"\x10FloatNgdotSetInf\x18\x14 \x01(\v2\x1f.ngolofuzz.FloatNgdotSetInfArgsH\x00R\x10FloatNgdotSetInf\x12D\n" +
"\rFloatNgdotSet\x18\x15 \x01(\v2\x1c.ngolofuzz.FloatNgdotSetArgsH\x00R\rFloatNgdotSet\x12G\n" +
"\x0eFloatNgdotCopy\x18\x16 \x01(\v2\x1d.ngolofuzz.FloatNgdotCopyArgsH\x00R\x0eFloatNgdotCopy\x12M\n" +
"\x10FloatNgdotUint64\x18\x17 \x01(\v2\x1f.ngolofuzz.FloatNgdotUint64ArgsH\x00R\x10FloatNgdotUint64\x12J\n" +
"\x0fFloatNgdotInt64\x18\x18 \x01(\v2\x1e.ngolofuzz.FloatNgdotInt64ArgsH\x00R\x0fFloatNgdotInt64\x12P\n" +
"\x11FloatNgdotFloat32\x18\x19 \x01(\v2 .ngolofuzz.FloatNgdotFloat32ArgsH\x00R\x11FloatNgdotFloat32\x12P\n" +
"\x11FloatNgdotFloat64\x18\x1a \x01(\v2 .ngolofuzz.FloatNgdotFloat64ArgsH\x00R\x11FloatNgdotFloat64\x12D\n" +
"\rFloatNgdotInt\x18\x1b \x01(\v2\x1c.ngolofuzz.FloatNgdotIntArgsH\x00R\rFloatNgdotInt\x12D\n" +
"\rFloatNgdotRat\x18\x1c \x01(\v2\x1c.ngolofuzz.FloatNgdotRatArgsH\x00R\rFloatNgdotRat\x12D\n" +
"\rFloatNgdotAbs\x18\x1d \x01(\v2\x1c.ngolofuzz.FloatNgdotAbsArgsH\x00R\rFloatNgdotAbs\x12D\n" +
"\rFloatNgdotNeg\x18\x1e \x01(\v2\x1c.ngolofuzz.FloatNgdotNegArgsH\x00R\rFloatNgdotNeg\x12D\n" +
"\rFloatNgdotAdd\x18\x1f \x01(\v2\x1c.ngolofuzz.FloatNgdotAddArgsH\x00R\rFloatNgdotAdd\x12D\n" +
"\rFloatNgdotSub\x18 \x01(\v2\x1c.ngolofuzz.FloatNgdotSubArgsH\x00R\rFloatNgdotSub\x12D\n" +
"\rFloatNgdotMul\x18! \x01(\v2\x1c.ngolofuzz.FloatNgdotMulArgsH\x00R\rFloatNgdotMul\x12D\n" +
"\rFloatNgdotCmp\x18\" \x01(\v2\x1c.ngolofuzz.FloatNgdotCmpArgsH\x00R\rFloatNgdotCmp\x12V\n" +
"\x13FloatNgdotSetString\x18# \x01(\v2\".ngolofuzz.FloatNgdotSetStringArgsH\x00R\x13FloatNgdotSetString\x12J\n" +
"\x0fFloatNgdotParse\x18$ \x01(\v2\x1e.ngolofuzz.FloatNgdotParseArgsH\x00R\x0fFloatNgdotParse\x12;\n" +
"\n" +
"ParseFloat\x18% \x01(\v2\x19.ngolofuzz.ParseFloatArgsH\x00R\n" +
"ParseFloat\x12V\n" +
"\x13FloatNgdotGobEncode\x18& \x01(\v2\".ngolofuzz.FloatNgdotGobEncodeArgsH\x00R\x13FloatNgdotGobEncode\x12V\n" +
"\x13FloatNgdotGobDecode\x18' \x01(\v2\".ngolofuzz.FloatNgdotGobDecodeArgsH\x00R\x13FloatNgdotGobDecode\x12Y\n" +
"\x14FloatNgdotAppendText\x18( \x01(\v2#.ngolofuzz.FloatNgdotAppendTextArgsH\x00R\x14FloatNgdotAppendText\x12\\\n" +
"\x15FloatNgdotMarshalText\x18) \x01(\v2$.ngolofuzz.FloatNgdotMarshalTextArgsH\x00R\x15FloatNgdotMarshalText\x12b\n" +
"\x17FloatNgdotUnmarshalText\x18* \x01(\v2&.ngolofuzz.FloatNgdotUnmarshalTextArgsH\x00R\x17FloatNgdotUnmarshalText\x12G\n" +
"\x0eFloatNgdotText\x18+ \x01(\v2\x1d.ngolofuzz.FloatNgdotTextArgsH\x00R\x0eFloatNgdotText\x12M\n" +
"\x10FloatNgdotString\x18, \x01(\v2\x1f.ngolofuzz.FloatNgdotStringArgsH\x00R\x10FloatNgdotString\x12M\n" +
"\x10FloatNgdotAppend\x18- \x01(\v2\x1f.ngolofuzz.FloatNgdotAppendArgsH\x00R\x10FloatNgdotAppend\x12A\n" +
"\fIntNgdotSign\x18. \x01(\v2\x1b.ngolofuzz.IntNgdotSignArgsH\x00R\fIntNgdotSign\x12M\n" +
"\x10IntNgdotSetInt64\x18/ \x01(\v2\x1f.ngolofuzz.IntNgdotSetInt64ArgsH\x00R\x10IntNgdotSetInt64\x12P\n" +
"\x11IntNgdotSetUint64\x180 \x01(\v2 .ngolofuzz.IntNgdotSetUint64ArgsH\x00R\x11IntNgdotSetUint64\x12/\n" +
"\x06NewInt\x181 \x01(\v2\x15.ngolofuzz.NewIntArgsH\x00R\x06NewInt\x12>\n" +
"\vIntNgdotSet\x182 \x01(\v2\x1a.ngolofuzz.IntNgdotSetArgsH\x00R\vIntNgdotSet\x12A\n" +
"\fIntNgdotBits\x183 \x01(\v2\x1b.ngolofuzz.IntNgdotBitsArgsH\x00R\fIntNgdotBits\x12>\n" +
"\vIntNgdotAbs\x184 \x01(\v2\x1a.ngolofuzz.IntNgdotAbsArgsH\x00R\vIntNgdotAbs\x12>\n" +
"\vIntNgdotNeg\x185 \x01(\v2\x1a.ngolofuzz.IntNgdotNegArgsH\x00R\vIntNgdotNeg\x12>\n" +
"\vIntNgdotAdd\x186 \x01(\v2\x1a.ngolofuzz.IntNgdotAddArgsH\x00R\vIntNgdotAdd\x12>\n" +
"\vIntNgdotSub\x187 \x01(\v2\x1a.ngolofuzz.IntNgdotSubArgsH\x00R\vIntNgdotSub\x12>\n" +
"\vIntNgdotMul\x188 \x01(\v2\x1a.ngolofuzz.IntNgdotMulArgsH\x00R\vIntNgdotMul\x12M\n" +
"\x10IntNgdotMulRange\x189 \x01(\v2\x1f.ngolofuzz.IntNgdotMulRangeArgsH\x00R\x10IntNgdotMulRange\x12M\n" +
"\x10IntNgdotBinomial\x18: \x01(\v2\x1f.ngolofuzz.IntNgdotBinomialArgsH\x00R\x10IntNgdotBinomial\x12>\n" +
"\vIntNgdotRem\x18; \x01(\v2\x1a.ngolofuzz.IntNgdotRemArgsH\x00R\vIntNgdotRem\x12>\n" +
"\vIntNgdotDiv\x18< \x01(\v2\x1a.ngolofuzz.IntNgdotDivArgsH\x00R\vIntNgdotDiv\x12>\n" +
"\vIntNgdotMod\x18= \x01(\v2\x1a.ngolofuzz.IntNgdotModArgsH\x00R\vIntNgdotMod\x12G\n" +
"\x0eIntNgdotDivMod\x18> \x01(\v2\x1d.ngolofuzz.IntNgdotDivModArgsH\x00R\x0eIntNgdotDivMod\x12>\n" +
"\vIntNgdotCmp\x18? \x01(\v2\x1a.ngolofuzz.IntNgdotCmpArgsH\x00R\vIntNgdotCmp\x12G\n" +
"\x0eIntNgdotCmpAbs\x18@ \x01(\v2\x1d.ngolofuzz.IntNgdotCmpAbsArgsH\x00R\x0eIntNgdotCmpAbs\x12D\n" +
"\rIntNgdotInt64\x18A \x01(\v2\x1c.ngolofuzz.IntNgdotInt64ArgsH\x00R\rIntNgdotInt64\x12G\n" +
"\x0eIntNgdotUint64\x18B \x01(\v2\x1d.ngolofuzz.IntNgdotUint64ArgsH\x00R\x0eIntNgdotUint64\x12J\n" +
"\x0fIntNgdotIsInt64\x18C \x01(\v2\x1e.ngolofuzz.IntNgdotIsInt64ArgsH\x00R\x0fIntNgdotIsInt64\x12M\n" +
"\x10IntNgdotIsUint64\x18D \x01(\v2\x1f.ngolofuzz.IntNgdotIsUint64ArgsH\x00R\x10IntNgdotIsUint64\x12J\n" +
"\x0fIntNgdotFloat64\x18E \x01(\v2\x1e.ngolofuzz.IntNgdotFloat64ArgsH\x00R\x0fIntNgdotFloat64\x12P\n" +
"\x11IntNgdotSetString\x18F \x01(\v2 .ngolofuzz.IntNgdotSetStringArgsH\x00R\x11IntNgdotSetString\x12M\n" +
"\x10IntNgdotSetBytes\x18G \x01(\v2\x1f.ngolofuzz.IntNgdotSetBytesArgsH\x00R\x10IntNgdotSetBytes\x12D\n" +
"\rIntNgdotBytes\x18H \x01(\v2\x1c.ngolofuzz.IntNgdotBytesArgsH\x00R\rIntNgdotBytes\x12P\n" +
"\x11IntNgdotFillBytes\x18I \x01(\v2 .ngolofuzz.IntNgdotFillBytesArgsH\x00R\x11IntNgdotFillBytes\x12G\n" +
"\x0eIntNgdotBitLen\x18J \x01(\v2\x1d.ngolofuzz.IntNgdotBitLenArgsH\x00R\x0eIntNgdotBitLen\x12e\n" +
"\x18IntNgdotTrailingZeroBits\x18K \x01(\v2'.ngolofuzz.IntNgdotTrailingZeroBitsArgsH\x00R\x18IntNgdotTrailingZeroBits\x12>\n" +
"\vIntNgdotExp\x18L \x01(\v2\x1a.ngolofuzz.IntNgdotExpArgsH\x00R\vIntNgdotExp\x12>\n" +
"\vIntNgdotGCD\x18M \x01(\v2\x1a.ngolofuzz.IntNgdotGCDArgsH\x00R\vIntNgdotGCD\x12S\n" +
"\x12IntNgdotModInverse\x18N \x01(\v2!.ngolofuzz.IntNgdotModInverseArgsH\x00R\x12IntNgdotModInverse\x12/\n" +
"\x06Jacobi\x18O \x01(\v2\x15.ngolofuzz.JacobiArgsH\x00R\x06Jacobi\x12J\n" +
"\x0fIntNgdotModSqrt\x18P \x01(\v2\x1e.ngolofuzz.IntNgdotModSqrtArgsH\x00R\x0fIntNgdotModSqrt\x12>\n" +
"\vIntNgdotLsh\x18Q \x01(\v2\x1a.ngolofuzz.IntNgdotLshArgsH\x00R\vIntNgdotLsh\x12>\n" +
"\vIntNgdotRsh\x18R \x01(\v2\x1a.ngolofuzz.IntNgdotRshArgsH\x00R\vIntNgdotRsh\x12>\n" +
"\vIntNgdotBit\x18S \x01(\v2\x1a.ngolofuzz.IntNgdotBitArgsH\x00R\vIntNgdotBit\x12G\n" +
"\x0eIntNgdotSetBit\x18T \x01(\v2\x1d.ngolofuzz.IntNgdotSetBitArgsH\x00R\x0eIntNgdotSetBit\x12>\n" +
"\vIntNgdotAnd\x18U \x01(\v2\x1a.ngolofuzz.IntNgdotAndArgsH\x00R\vIntNgdotAnd\x12G\n" +
"\x0eIntNgdotAndNot\x18V \x01(\v2\x1d.ngolofuzz.IntNgdotAndNotArgsH\x00R\x0eIntNgdotAndNot\x12;\n" +
"\n" +
"IntNgdotOr\x18W \x01(\v2\x19.ngolofuzz.IntNgdotOrArgsH\x00R\n" +
"IntNgdotOr\x12>\n" +
"\vIntNgdotXor\x18X \x01(\v2\x1a.ngolofuzz.IntNgdotXorArgsH\x00R\vIntNgdotXor\x12>\n" +
"\vIntNgdotNot\x18Y \x01(\v2\x1a.ngolofuzz.IntNgdotNotArgsH\x00R\vIntNgdotNot\x12A\n" +
"\fIntNgdotSqrt\x18Z \x01(\v2\x1b.ngolofuzz.IntNgdotSqrtArgsH\x00R\fIntNgdotSqrt\x12A\n" +
"\fIntNgdotText\x18[ \x01(\v2\x1b.ngolofuzz.IntNgdotTextArgsH\x00R\fIntNgdotText\x12G\n" +
"\x0eIntNgdotAppend\x18\\ \x01(\v2\x1d.ngolofuzz.IntNgdotAppendArgsH\x00R\x0eIntNgdotAppend\x12G\n" +
"\x0eIntNgdotString\x18] \x01(\v2\x1d.ngolofuzz.IntNgdotStringArgsH\x00R\x0eIntNgdotString\x12P\n" +
"\x11IntNgdotGobEncode\x18^ \x01(\v2 .ngolofuzz.IntNgdotGobEncodeArgsH\x00R\x11IntNgdotGobEncode\x12P\n" +
"\x11IntNgdotGobDecode\x18_ \x01(\v2 .ngolofuzz.IntNgdotGobDecodeArgsH\x00R\x11IntNgdotGobDecode\x12S\n" +
"\x12IntNgdotAppendText\x18` \x01(\v2!.ngolofuzz.IntNgdotAppendTextArgsH\x00R\x12IntNgdotAppendText\x12V\n" +
"\x13IntNgdotMarshalText\x18a \x01(\v2\".ngolofuzz.IntNgdotMarshalTextArgsH\x00R\x13IntNgdotMarshalText\x12\\\n" +
"\x15IntNgdotUnmarshalText\x18b \x01(\v2$.ngolofuzz.IntNgdotUnmarshalTextArgsH\x00R\x15IntNgdotUnmarshalText\x12V\n" +
"\x13IntNgdotMarshalJSON\x18c \x01(\v2\".ngolofuzz.IntNgdotMarshalJSONArgsH\x00R\x13IntNgdotMarshalJSON\x12\\\n" +
"\x15IntNgdotUnmarshalJSON\x18d \x01(\v2$.ngolofuzz.IntNgdotUnmarshalJSONArgsH\x00R\x15IntNgdotUnmarshalJSON\x12\\\n" +
"\x15IntNgdotProbablyPrime\x18e \x01(\v2$.ngolofuzz.IntNgdotProbablyPrimeArgsH\x00R\x15IntNgdotProbablyPrime\x12/\n" +
"\x06NewRat\x18f \x01(\v2\x15.ngolofuzz.NewRatArgsH\x00R\x06NewRat\x12S\n" +
"\x12RatNgdotSetFloat64\x18g \x01(\v2!.ngolofuzz.RatNgdotSetFloat64ArgsH\x00R\x12RatNgdotSetFloat64\x12J\n" +
"\x0fRatNgdotFloat32\x18h \x01(\v2\x1e.ngolofuzz.RatNgdotFloat32ArgsH\x00R\x0fRatNgdotFloat32\x12J\n" +
"\x0fRatNgdotFloat64\x18i \x01(\v2\x1e.ngolofuzz.RatNgdotFloat64ArgsH\x00R\x0fRatNgdotFloat64\x12J\n" +
"\x0fRatNgdotSetFrac\x18j \x01(\v2\x1e.ngolofuzz.RatNgdotSetFracArgsH\x00R\x0fRatNgdotSetFrac\x12P\n" +
"\x11RatNgdotSetFrac64\x18k \x01(\v2 .ngolofuzz.RatNgdotSetFrac64ArgsH\x00R\x11RatNgdotSetFrac64\x12G\n" +
"\x0eRatNgdotSetInt\x18l \x01(\v2\x1d.ngolofuzz.RatNgdotSetIntArgsH\x00R\x0eRatNgdotSetInt\x12M\n" +
"\x10RatNgdotSetInt64\x18m \x01(\v2\x1f.ngolofuzz.RatNgdotSetInt64ArgsH\x00R\x10RatNgdotSetInt64\x12P\n" +
"\x11RatNgdotSetUint64\x18n \x01(\v2 .ngolofuzz.RatNgdotSetUint64ArgsH\x00R\x11RatNgdotSetUint64\x12>\n" +
"\vRatNgdotSet\x18o \x01(\v2\x1a.ngolofuzz.RatNgdotSetArgsH\x00R\vRatNgdotSet\x12>\n" +
"\vRatNgdotAbs\x18p \x01(\v2\x1a.ngolofuzz.RatNgdotAbsArgsH\x00R\vRatNgdotAbs\x12>\n" +
"\vRatNgdotNeg\x18q \x01(\v2\x1a.ngolofuzz.RatNgdotNegArgsH\x00R\vRatNgdotNeg\x12>\n" +
"\vRatNgdotInv\x18r \x01(\v2\x1a.ngolofuzz.RatNgdotInvArgsH\x00R\vRatNgdotInv\x12A\n" +
"\fRatNgdotSign\x18s \x01(\v2\x1b.ngolofuzz.RatNgdotSignArgsH\x00R\fRatNgdotSign\x12D\n" +
"\rRatNgdotIsInt\x18t \x01(\v2\x1c.ngolofuzz.RatNgdotIsIntArgsH\x00R\rRatNgdotIsInt\x12>\n" +
"\vRatNgdotNum\x18u \x01(\v2\x1a.ngolofuzz.RatNgdotNumArgsH\x00R\vRatNgdotNum\x12D\n" +
"\rRatNgdotDenom\x18v \x01(\v2\x1c.ngolofuzz.RatNgdotDenomArgsH\x00R\rRatNgdotDenom\x12>\n" +
"\vRatNgdotCmp\x18w \x01(\v2\x1a.ngolofuzz.RatNgdotCmpArgsH\x00R\vRatNgdotCmp\x12>\n" +
"\vRatNgdotAdd\x18x \x01(\v2\x1a.ngolofuzz.RatNgdotAddArgsH\x00R\vRatNgdotAdd\x12>\n" +
"\vRatNgdotSub\x18y \x01(\v2\x1a.ngolofuzz.RatNgdotSubArgsH\x00R\vRatNgdotSub\x12>\n" +
"\vRatNgdotMul\x18z \x01(\v2\x1a.ngolofuzz.RatNgdotMulArgsH\x00R\vRatNgdotMul\x12P\n" +
"\x11RatNgdotSetString\x18{ \x01(\v2 .ngolofuzz.RatNgdotSetStringArgsH\x00R\x11RatNgdotSetString\x12G\n" +
"\x0eRatNgdotString\x18| \x01(\v2\x1d.ngolofuzz.RatNgdotStringArgsH\x00R\x0eRatNgdotString\x12P\n" +
"\x11RatNgdotRatString\x18} \x01(\v2 .ngolofuzz.RatNgdotRatStringArgsH\x00R\x11RatNgdotRatString\x12V\n" +
"\x13RatNgdotFloatString\x18~ \x01(\v2\".ngolofuzz.RatNgdotFloatStringArgsH\x00R\x13RatNgdotFloatString\x12P\n" +
"\x11RatNgdotFloatPrec\x18\x7f \x01(\v2 .ngolofuzz.RatNgdotFloatPrecArgsH\x00R\x11RatNgdotFloatPrec\x12Q\n" +
"\x11RatNgdotGobEncode\x18\x80\x01 \x01(\v2 .ngolofuzz.RatNgdotGobEncodeArgsH\x00R\x11RatNgdotGobEncode\x12Q\n" +
"\x11RatNgdotGobDecode\x18\x81\x01 \x01(\v2 .ngolofuzz.RatNgdotGobDecodeArgsH\x00R\x11RatNgdotGobDecode\x12T\n" +
"\x12RatNgdotAppendText\x18\x82\x01 \x01(\v2!.ngolofuzz.RatNgdotAppendTextArgsH\x00R\x12RatNgdotAppendText\x12W\n" +
"\x13RatNgdotMarshalText\x18\x83\x01 \x01(\v2\".ngolofuzz.RatNgdotMarshalTextArgsH\x00R\x13RatNgdotMarshalText\x12]\n" +
"\x15RatNgdotUnmarshalText\x18\x84\x01 \x01(\v2$.ngolofuzz.RatNgdotUnmarshalTextArgsH\x00R\x15RatNgdotUnmarshalText\x12c\n" +
"\x17RoundingModeNgdotString\x18\x85\x01 \x01(\v2&.ngolofuzz.RoundingModeNgdotStringArgsH\x00R\x17RoundingModeNgdotString\x12H\n" +
"\x0eFloatNgdotSqrt\x18\x86\x01 \x01(\v2\x1d.ngolofuzz.FloatNgdotSqrtArgsH\x00R\x0eFloatNgdotSqrtB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x15Z\x13./;fuzz_ng_math_bigb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 137)
var file_ngolofuzz_proto_goTypes = []any{
(*AccuracyNgdotStringArgs)(nil), // 0: ngolofuzz.AccuracyNgdotStringArgs
(*NewFloatArgs)(nil), // 1: ngolofuzz.NewFloatArgs
(*FloatNgdotSetPrecArgs)(nil), // 2: ngolofuzz.FloatNgdotSetPrecArgs
(*FloatNgdotSetModeArgs)(nil), // 3: ngolofuzz.FloatNgdotSetModeArgs
(*FloatNgdotPrecArgs)(nil), // 4: ngolofuzz.FloatNgdotPrecArgs
(*FloatNgdotMinPrecArgs)(nil), // 5: ngolofuzz.FloatNgdotMinPrecArgs
(*FloatNgdotModeArgs)(nil), // 6: ngolofuzz.FloatNgdotModeArgs
(*FloatNgdotAccArgs)(nil), // 7: ngolofuzz.FloatNgdotAccArgs
(*FloatNgdotSignArgs)(nil), // 8: ngolofuzz.FloatNgdotSignArgs
(*FloatNgdotMantExpArgs)(nil), // 9: ngolofuzz.FloatNgdotMantExpArgs
(*FloatNgdotSetMantExpArgs)(nil), // 10: ngolofuzz.FloatNgdotSetMantExpArgs
(*FloatNgdotSignbitArgs)(nil), // 11: ngolofuzz.FloatNgdotSignbitArgs
(*FloatNgdotIsInfArgs)(nil), // 12: ngolofuzz.FloatNgdotIsInfArgs
(*FloatNgdotIsIntArgs)(nil), // 13: ngolofuzz.FloatNgdotIsIntArgs
(*FloatNgdotSetUint64Args)(nil), // 14: ngolofuzz.FloatNgdotSetUint64Args
(*FloatNgdotSetInt64Args)(nil), // 15: ngolofuzz.FloatNgdotSetInt64Args
(*FloatNgdotSetFloat64Args)(nil), // 16: ngolofuzz.FloatNgdotSetFloat64Args
(*FloatNgdotSetIntArgs)(nil), // 17: ngolofuzz.FloatNgdotSetIntArgs
(*FloatNgdotSetRatArgs)(nil), // 18: ngolofuzz.FloatNgdotSetRatArgs
(*FloatNgdotSetInfArgs)(nil), // 19: ngolofuzz.FloatNgdotSetInfArgs
(*FloatNgdotSetArgs)(nil), // 20: ngolofuzz.FloatNgdotSetArgs
(*FloatNgdotCopyArgs)(nil), // 21: ngolofuzz.FloatNgdotCopyArgs
(*FloatNgdotUint64Args)(nil), // 22: ngolofuzz.FloatNgdotUint64Args
(*FloatNgdotInt64Args)(nil), // 23: ngolofuzz.FloatNgdotInt64Args
(*FloatNgdotFloat32Args)(nil), // 24: ngolofuzz.FloatNgdotFloat32Args
(*FloatNgdotFloat64Args)(nil), // 25: ngolofuzz.FloatNgdotFloat64Args
(*FloatNgdotIntArgs)(nil), // 26: ngolofuzz.FloatNgdotIntArgs
(*FloatNgdotRatArgs)(nil), // 27: ngolofuzz.FloatNgdotRatArgs
(*FloatNgdotAbsArgs)(nil), // 28: ngolofuzz.FloatNgdotAbsArgs
(*FloatNgdotNegArgs)(nil), // 29: ngolofuzz.FloatNgdotNegArgs
(*FloatNgdotAddArgs)(nil), // 30: ngolofuzz.FloatNgdotAddArgs
(*FloatNgdotSubArgs)(nil), // 31: ngolofuzz.FloatNgdotSubArgs
(*FloatNgdotMulArgs)(nil), // 32: ngolofuzz.FloatNgdotMulArgs
(*FloatNgdotCmpArgs)(nil), // 33: ngolofuzz.FloatNgdotCmpArgs
(*FloatNgdotSetStringArgs)(nil), // 34: ngolofuzz.FloatNgdotSetStringArgs
(*FloatNgdotParseArgs)(nil), // 35: ngolofuzz.FloatNgdotParseArgs
(*ParseFloatArgs)(nil), // 36: ngolofuzz.ParseFloatArgs
(*FloatNgdotGobEncodeArgs)(nil), // 37: ngolofuzz.FloatNgdotGobEncodeArgs
(*FloatNgdotGobDecodeArgs)(nil), // 38: ngolofuzz.FloatNgdotGobDecodeArgs
(*FloatNgdotAppendTextArgs)(nil), // 39: ngolofuzz.FloatNgdotAppendTextArgs
(*FloatNgdotMarshalTextArgs)(nil), // 40: ngolofuzz.FloatNgdotMarshalTextArgs
(*FloatNgdotUnmarshalTextArgs)(nil), // 41: ngolofuzz.FloatNgdotUnmarshalTextArgs
(*FloatNgdotTextArgs)(nil), // 42: ngolofuzz.FloatNgdotTextArgs
(*FloatNgdotStringArgs)(nil), // 43: ngolofuzz.FloatNgdotStringArgs
(*FloatNgdotAppendArgs)(nil), // 44: ngolofuzz.FloatNgdotAppendArgs
(*IntNgdotSignArgs)(nil), // 45: ngolofuzz.IntNgdotSignArgs
(*IntNgdotSetInt64Args)(nil), // 46: ngolofuzz.IntNgdotSetInt64Args
(*IntNgdotSetUint64Args)(nil), // 47: ngolofuzz.IntNgdotSetUint64Args
(*NewIntArgs)(nil), // 48: ngolofuzz.NewIntArgs
(*IntNgdotSetArgs)(nil), // 49: ngolofuzz.IntNgdotSetArgs
(*IntNgdotBitsArgs)(nil), // 50: ngolofuzz.IntNgdotBitsArgs
(*IntNgdotAbsArgs)(nil), // 51: ngolofuzz.IntNgdotAbsArgs
(*IntNgdotNegArgs)(nil), // 52: ngolofuzz.IntNgdotNegArgs
(*IntNgdotAddArgs)(nil), // 53: ngolofuzz.IntNgdotAddArgs
(*IntNgdotSubArgs)(nil), // 54: ngolofuzz.IntNgdotSubArgs
(*IntNgdotMulArgs)(nil), // 55: ngolofuzz.IntNgdotMulArgs
(*IntNgdotMulRangeArgs)(nil), // 56: ngolofuzz.IntNgdotMulRangeArgs
(*IntNgdotBinomialArgs)(nil), // 57: ngolofuzz.IntNgdotBinomialArgs
(*IntNgdotRemArgs)(nil), // 58: ngolofuzz.IntNgdotRemArgs
(*IntNgdotDivArgs)(nil), // 59: ngolofuzz.IntNgdotDivArgs
(*IntNgdotModArgs)(nil), // 60: ngolofuzz.IntNgdotModArgs
(*IntNgdotDivModArgs)(nil), // 61: ngolofuzz.IntNgdotDivModArgs
(*IntNgdotCmpArgs)(nil), // 62: ngolofuzz.IntNgdotCmpArgs
(*IntNgdotCmpAbsArgs)(nil), // 63: ngolofuzz.IntNgdotCmpAbsArgs
(*IntNgdotInt64Args)(nil), // 64: ngolofuzz.IntNgdotInt64Args
(*IntNgdotUint64Args)(nil), // 65: ngolofuzz.IntNgdotUint64Args
(*IntNgdotIsInt64Args)(nil), // 66: ngolofuzz.IntNgdotIsInt64Args
(*IntNgdotIsUint64Args)(nil), // 67: ngolofuzz.IntNgdotIsUint64Args
(*IntNgdotFloat64Args)(nil), // 68: ngolofuzz.IntNgdotFloat64Args
(*IntNgdotSetStringArgs)(nil), // 69: ngolofuzz.IntNgdotSetStringArgs
(*IntNgdotSetBytesArgs)(nil), // 70: ngolofuzz.IntNgdotSetBytesArgs
(*IntNgdotBytesArgs)(nil), // 71: ngolofuzz.IntNgdotBytesArgs
(*IntNgdotFillBytesArgs)(nil), // 72: ngolofuzz.IntNgdotFillBytesArgs
(*IntNgdotBitLenArgs)(nil), // 73: ngolofuzz.IntNgdotBitLenArgs
(*IntNgdotTrailingZeroBitsArgs)(nil), // 74: ngolofuzz.IntNgdotTrailingZeroBitsArgs
(*IntNgdotExpArgs)(nil), // 75: ngolofuzz.IntNgdotExpArgs
(*IntNgdotGCDArgs)(nil), // 76: ngolofuzz.IntNgdotGCDArgs
(*IntNgdotModInverseArgs)(nil), // 77: ngolofuzz.IntNgdotModInverseArgs
(*JacobiArgs)(nil), // 78: ngolofuzz.JacobiArgs
(*IntNgdotModSqrtArgs)(nil), // 79: ngolofuzz.IntNgdotModSqrtArgs
(*IntNgdotLshArgs)(nil), // 80: ngolofuzz.IntNgdotLshArgs
(*IntNgdotRshArgs)(nil), // 81: ngolofuzz.IntNgdotRshArgs
(*IntNgdotBitArgs)(nil), // 82: ngolofuzz.IntNgdotBitArgs
(*IntNgdotSetBitArgs)(nil), // 83: ngolofuzz.IntNgdotSetBitArgs
(*IntNgdotAndArgs)(nil), // 84: ngolofuzz.IntNgdotAndArgs
(*IntNgdotAndNotArgs)(nil), // 85: ngolofuzz.IntNgdotAndNotArgs
(*IntNgdotOrArgs)(nil), // 86: ngolofuzz.IntNgdotOrArgs
(*IntNgdotXorArgs)(nil), // 87: ngolofuzz.IntNgdotXorArgs
(*IntNgdotNotArgs)(nil), // 88: ngolofuzz.IntNgdotNotArgs
(*IntNgdotSqrtArgs)(nil), // 89: ngolofuzz.IntNgdotSqrtArgs
(*IntNgdotTextArgs)(nil), // 90: ngolofuzz.IntNgdotTextArgs
(*IntNgdotAppendArgs)(nil), // 91: ngolofuzz.IntNgdotAppendArgs
(*IntNgdotStringArgs)(nil), // 92: ngolofuzz.IntNgdotStringArgs
(*IntNgdotGobEncodeArgs)(nil), // 93: ngolofuzz.IntNgdotGobEncodeArgs
(*IntNgdotGobDecodeArgs)(nil), // 94: ngolofuzz.IntNgdotGobDecodeArgs
(*IntNgdotAppendTextArgs)(nil), // 95: ngolofuzz.IntNgdotAppendTextArgs
(*IntNgdotMarshalTextArgs)(nil), // 96: ngolofuzz.IntNgdotMarshalTextArgs
(*IntNgdotUnmarshalTextArgs)(nil), // 97: ngolofuzz.IntNgdotUnmarshalTextArgs
(*IntNgdotMarshalJSONArgs)(nil), // 98: ngolofuzz.IntNgdotMarshalJSONArgs
(*IntNgdotUnmarshalJSONArgs)(nil), // 99: ngolofuzz.IntNgdotUnmarshalJSONArgs
(*IntNgdotProbablyPrimeArgs)(nil), // 100: ngolofuzz.IntNgdotProbablyPrimeArgs
(*NewRatArgs)(nil), // 101: ngolofuzz.NewRatArgs
(*RatNgdotSetFloat64Args)(nil), // 102: ngolofuzz.RatNgdotSetFloat64Args
(*RatNgdotFloat32Args)(nil), // 103: ngolofuzz.RatNgdotFloat32Args
(*RatNgdotFloat64Args)(nil), // 104: ngolofuzz.RatNgdotFloat64Args
(*RatNgdotSetFracArgs)(nil), // 105: ngolofuzz.RatNgdotSetFracArgs
(*RatNgdotSetFrac64Args)(nil), // 106: ngolofuzz.RatNgdotSetFrac64Args
(*RatNgdotSetIntArgs)(nil), // 107: ngolofuzz.RatNgdotSetIntArgs
(*RatNgdotSetInt64Args)(nil), // 108: ngolofuzz.RatNgdotSetInt64Args
(*RatNgdotSetUint64Args)(nil), // 109: ngolofuzz.RatNgdotSetUint64Args
(*RatNgdotSetArgs)(nil), // 110: ngolofuzz.RatNgdotSetArgs
(*RatNgdotAbsArgs)(nil), // 111: ngolofuzz.RatNgdotAbsArgs
(*RatNgdotNegArgs)(nil), // 112: ngolofuzz.RatNgdotNegArgs
(*RatNgdotInvArgs)(nil), // 113: ngolofuzz.RatNgdotInvArgs
(*RatNgdotSignArgs)(nil), // 114: ngolofuzz.RatNgdotSignArgs
(*RatNgdotIsIntArgs)(nil), // 115: ngolofuzz.RatNgdotIsIntArgs
(*RatNgdotNumArgs)(nil), // 116: ngolofuzz.RatNgdotNumArgs
(*RatNgdotDenomArgs)(nil), // 117: ngolofuzz.RatNgdotDenomArgs
(*RatNgdotCmpArgs)(nil), // 118: ngolofuzz.RatNgdotCmpArgs
(*RatNgdotAddArgs)(nil), // 119: ngolofuzz.RatNgdotAddArgs
(*RatNgdotSubArgs)(nil), // 120: ngolofuzz.RatNgdotSubArgs
(*RatNgdotMulArgs)(nil), // 121: ngolofuzz.RatNgdotMulArgs
(*RatNgdotSetStringArgs)(nil), // 122: ngolofuzz.RatNgdotSetStringArgs
(*RatNgdotStringArgs)(nil), // 123: ngolofuzz.RatNgdotStringArgs
(*RatNgdotRatStringArgs)(nil), // 124: ngolofuzz.RatNgdotRatStringArgs
(*RatNgdotFloatStringArgs)(nil), // 125: ngolofuzz.RatNgdotFloatStringArgs
(*RatNgdotFloatPrecArgs)(nil), // 126: ngolofuzz.RatNgdotFloatPrecArgs
(*RatNgdotGobEncodeArgs)(nil), // 127: ngolofuzz.RatNgdotGobEncodeArgs
(*RatNgdotGobDecodeArgs)(nil), // 128: ngolofuzz.RatNgdotGobDecodeArgs
(*RatNgdotAppendTextArgs)(nil), // 129: ngolofuzz.RatNgdotAppendTextArgs
(*RatNgdotMarshalTextArgs)(nil), // 130: ngolofuzz.RatNgdotMarshalTextArgs
(*RatNgdotUnmarshalTextArgs)(nil), // 131: ngolofuzz.RatNgdotUnmarshalTextArgs
(*RoundingModeNgdotStringArgs)(nil), // 132: ngolofuzz.RoundingModeNgdotStringArgs
(*FloatNgdotSqrtArgs)(nil), // 133: ngolofuzz.FloatNgdotSqrtArgs
(*NgoloFuzzOne)(nil), // 134: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 135: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 136: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.AccuracyNgdotString:type_name -> ngolofuzz.AccuracyNgdotStringArgs
1, // 1: ngolofuzz.NgoloFuzzOne.NewFloat:type_name -> ngolofuzz.NewFloatArgs
2, // 2: ngolofuzz.NgoloFuzzOne.FloatNgdotSetPrec:type_name -> ngolofuzz.FloatNgdotSetPrecArgs
3, // 3: ngolofuzz.NgoloFuzzOne.FloatNgdotSetMode:type_name -> ngolofuzz.FloatNgdotSetModeArgs
4, // 4: ngolofuzz.NgoloFuzzOne.FloatNgdotPrec:type_name -> ngolofuzz.FloatNgdotPrecArgs
5, // 5: ngolofuzz.NgoloFuzzOne.FloatNgdotMinPrec:type_name -> ngolofuzz.FloatNgdotMinPrecArgs
6, // 6: ngolofuzz.NgoloFuzzOne.FloatNgdotMode:type_name -> ngolofuzz.FloatNgdotModeArgs
7, // 7: ngolofuzz.NgoloFuzzOne.FloatNgdotAcc:type_name -> ngolofuzz.FloatNgdotAccArgs
8, // 8: ngolofuzz.NgoloFuzzOne.FloatNgdotSign:type_name -> ngolofuzz.FloatNgdotSignArgs
9, // 9: ngolofuzz.NgoloFuzzOne.FloatNgdotMantExp:type_name -> ngolofuzz.FloatNgdotMantExpArgs
10, // 10: ngolofuzz.NgoloFuzzOne.FloatNgdotSetMantExp:type_name -> ngolofuzz.FloatNgdotSetMantExpArgs
11, // 11: ngolofuzz.NgoloFuzzOne.FloatNgdotSignbit:type_name -> ngolofuzz.FloatNgdotSignbitArgs
12, // 12: ngolofuzz.NgoloFuzzOne.FloatNgdotIsInf:type_name -> ngolofuzz.FloatNgdotIsInfArgs
13, // 13: ngolofuzz.NgoloFuzzOne.FloatNgdotIsInt:type_name -> ngolofuzz.FloatNgdotIsIntArgs
14, // 14: ngolofuzz.NgoloFuzzOne.FloatNgdotSetUint64:type_name -> ngolofuzz.FloatNgdotSetUint64Args
15, // 15: ngolofuzz.NgoloFuzzOne.FloatNgdotSetInt64:type_name -> ngolofuzz.FloatNgdotSetInt64Args
16, // 16: ngolofuzz.NgoloFuzzOne.FloatNgdotSetFloat64:type_name -> ngolofuzz.FloatNgdotSetFloat64Args
17, // 17: ngolofuzz.NgoloFuzzOne.FloatNgdotSetInt:type_name -> ngolofuzz.FloatNgdotSetIntArgs
18, // 18: ngolofuzz.NgoloFuzzOne.FloatNgdotSetRat:type_name -> ngolofuzz.FloatNgdotSetRatArgs
19, // 19: ngolofuzz.NgoloFuzzOne.FloatNgdotSetInf:type_name -> ngolofuzz.FloatNgdotSetInfArgs
20, // 20: ngolofuzz.NgoloFuzzOne.FloatNgdotSet:type_name -> ngolofuzz.FloatNgdotSetArgs
21, // 21: ngolofuzz.NgoloFuzzOne.FloatNgdotCopy:type_name -> ngolofuzz.FloatNgdotCopyArgs
22, // 22: ngolofuzz.NgoloFuzzOne.FloatNgdotUint64:type_name -> ngolofuzz.FloatNgdotUint64Args
23, // 23: ngolofuzz.NgoloFuzzOne.FloatNgdotInt64:type_name -> ngolofuzz.FloatNgdotInt64Args
24, // 24: ngolofuzz.NgoloFuzzOne.FloatNgdotFloat32:type_name -> ngolofuzz.FloatNgdotFloat32Args
25, // 25: ngolofuzz.NgoloFuzzOne.FloatNgdotFloat64:type_name -> ngolofuzz.FloatNgdotFloat64Args
26, // 26: ngolofuzz.NgoloFuzzOne.FloatNgdotInt:type_name -> ngolofuzz.FloatNgdotIntArgs
27, // 27: ngolofuzz.NgoloFuzzOne.FloatNgdotRat:type_name -> ngolofuzz.FloatNgdotRatArgs
28, // 28: ngolofuzz.NgoloFuzzOne.FloatNgdotAbs:type_name -> ngolofuzz.FloatNgdotAbsArgs
29, // 29: ngolofuzz.NgoloFuzzOne.FloatNgdotNeg:type_name -> ngolofuzz.FloatNgdotNegArgs
30, // 30: ngolofuzz.NgoloFuzzOne.FloatNgdotAdd:type_name -> ngolofuzz.FloatNgdotAddArgs
31, // 31: ngolofuzz.NgoloFuzzOne.FloatNgdotSub:type_name -> ngolofuzz.FloatNgdotSubArgs
32, // 32: ngolofuzz.NgoloFuzzOne.FloatNgdotMul:type_name -> ngolofuzz.FloatNgdotMulArgs
33, // 33: ngolofuzz.NgoloFuzzOne.FloatNgdotCmp:type_name -> ngolofuzz.FloatNgdotCmpArgs
34, // 34: ngolofuzz.NgoloFuzzOne.FloatNgdotSetString:type_name -> ngolofuzz.FloatNgdotSetStringArgs
35, // 35: ngolofuzz.NgoloFuzzOne.FloatNgdotParse:type_name -> ngolofuzz.FloatNgdotParseArgs
36, // 36: ngolofuzz.NgoloFuzzOne.ParseFloat:type_name -> ngolofuzz.ParseFloatArgs
37, // 37: ngolofuzz.NgoloFuzzOne.FloatNgdotGobEncode:type_name -> ngolofuzz.FloatNgdotGobEncodeArgs
38, // 38: ngolofuzz.NgoloFuzzOne.FloatNgdotGobDecode:type_name -> ngolofuzz.FloatNgdotGobDecodeArgs
39, // 39: ngolofuzz.NgoloFuzzOne.FloatNgdotAppendText:type_name -> ngolofuzz.FloatNgdotAppendTextArgs
40, // 40: ngolofuzz.NgoloFuzzOne.FloatNgdotMarshalText:type_name -> ngolofuzz.FloatNgdotMarshalTextArgs
41, // 41: ngolofuzz.NgoloFuzzOne.FloatNgdotUnmarshalText:type_name -> ngolofuzz.FloatNgdotUnmarshalTextArgs
42, // 42: ngolofuzz.NgoloFuzzOne.FloatNgdotText:type_name -> ngolofuzz.FloatNgdotTextArgs
43, // 43: ngolofuzz.NgoloFuzzOne.FloatNgdotString:type_name -> ngolofuzz.FloatNgdotStringArgs
44, // 44: ngolofuzz.NgoloFuzzOne.FloatNgdotAppend:type_name -> ngolofuzz.FloatNgdotAppendArgs
45, // 45: ngolofuzz.NgoloFuzzOne.IntNgdotSign:type_name -> ngolofuzz.IntNgdotSignArgs
46, // 46: ngolofuzz.NgoloFuzzOne.IntNgdotSetInt64:type_name -> ngolofuzz.IntNgdotSetInt64Args
47, // 47: ngolofuzz.NgoloFuzzOne.IntNgdotSetUint64:type_name -> ngolofuzz.IntNgdotSetUint64Args
48, // 48: ngolofuzz.NgoloFuzzOne.NewInt:type_name -> ngolofuzz.NewIntArgs
49, // 49: ngolofuzz.NgoloFuzzOne.IntNgdotSet:type_name -> ngolofuzz.IntNgdotSetArgs
50, // 50: ngolofuzz.NgoloFuzzOne.IntNgdotBits:type_name -> ngolofuzz.IntNgdotBitsArgs
51, // 51: ngolofuzz.NgoloFuzzOne.IntNgdotAbs:type_name -> ngolofuzz.IntNgdotAbsArgs
52, // 52: ngolofuzz.NgoloFuzzOne.IntNgdotNeg:type_name -> ngolofuzz.IntNgdotNegArgs
53, // 53: ngolofuzz.NgoloFuzzOne.IntNgdotAdd:type_name -> ngolofuzz.IntNgdotAddArgs
54, // 54: ngolofuzz.NgoloFuzzOne.IntNgdotSub:type_name -> ngolofuzz.IntNgdotSubArgs
55, // 55: ngolofuzz.NgoloFuzzOne.IntNgdotMul:type_name -> ngolofuzz.IntNgdotMulArgs
56, // 56: ngolofuzz.NgoloFuzzOne.IntNgdotMulRange:type_name -> ngolofuzz.IntNgdotMulRangeArgs
57, // 57: ngolofuzz.NgoloFuzzOne.IntNgdotBinomial:type_name -> ngolofuzz.IntNgdotBinomialArgs
58, // 58: ngolofuzz.NgoloFuzzOne.IntNgdotRem:type_name -> ngolofuzz.IntNgdotRemArgs
59, // 59: ngolofuzz.NgoloFuzzOne.IntNgdotDiv:type_name -> ngolofuzz.IntNgdotDivArgs
60, // 60: ngolofuzz.NgoloFuzzOne.IntNgdotMod:type_name -> ngolofuzz.IntNgdotModArgs
61, // 61: ngolofuzz.NgoloFuzzOne.IntNgdotDivMod:type_name -> ngolofuzz.IntNgdotDivModArgs
62, // 62: ngolofuzz.NgoloFuzzOne.IntNgdotCmp:type_name -> ngolofuzz.IntNgdotCmpArgs
63, // 63: ngolofuzz.NgoloFuzzOne.IntNgdotCmpAbs:type_name -> ngolofuzz.IntNgdotCmpAbsArgs
64, // 64: ngolofuzz.NgoloFuzzOne.IntNgdotInt64:type_name -> ngolofuzz.IntNgdotInt64Args
65, // 65: ngolofuzz.NgoloFuzzOne.IntNgdotUint64:type_name -> ngolofuzz.IntNgdotUint64Args
66, // 66: ngolofuzz.NgoloFuzzOne.IntNgdotIsInt64:type_name -> ngolofuzz.IntNgdotIsInt64Args
67, // 67: ngolofuzz.NgoloFuzzOne.IntNgdotIsUint64:type_name -> ngolofuzz.IntNgdotIsUint64Args
68, // 68: ngolofuzz.NgoloFuzzOne.IntNgdotFloat64:type_name -> ngolofuzz.IntNgdotFloat64Args
69, // 69: ngolofuzz.NgoloFuzzOne.IntNgdotSetString:type_name -> ngolofuzz.IntNgdotSetStringArgs
70, // 70: ngolofuzz.NgoloFuzzOne.IntNgdotSetBytes:type_name -> ngolofuzz.IntNgdotSetBytesArgs
71, // 71: ngolofuzz.NgoloFuzzOne.IntNgdotBytes:type_name -> ngolofuzz.IntNgdotBytesArgs
72, // 72: ngolofuzz.NgoloFuzzOne.IntNgdotFillBytes:type_name -> ngolofuzz.IntNgdotFillBytesArgs
73, // 73: ngolofuzz.NgoloFuzzOne.IntNgdotBitLen:type_name -> ngolofuzz.IntNgdotBitLenArgs
74, // 74: ngolofuzz.NgoloFuzzOne.IntNgdotTrailingZeroBits:type_name -> ngolofuzz.IntNgdotTrailingZeroBitsArgs
75, // 75: ngolofuzz.NgoloFuzzOne.IntNgdotExp:type_name -> ngolofuzz.IntNgdotExpArgs
76, // 76: ngolofuzz.NgoloFuzzOne.IntNgdotGCD:type_name -> ngolofuzz.IntNgdotGCDArgs
77, // 77: ngolofuzz.NgoloFuzzOne.IntNgdotModInverse:type_name -> ngolofuzz.IntNgdotModInverseArgs
78, // 78: ngolofuzz.NgoloFuzzOne.Jacobi:type_name -> ngolofuzz.JacobiArgs
79, // 79: ngolofuzz.NgoloFuzzOne.IntNgdotModSqrt:type_name -> ngolofuzz.IntNgdotModSqrtArgs
80, // 80: ngolofuzz.NgoloFuzzOne.IntNgdotLsh:type_name -> ngolofuzz.IntNgdotLshArgs
81, // 81: ngolofuzz.NgoloFuzzOne.IntNgdotRsh:type_name -> ngolofuzz.IntNgdotRshArgs
82, // 82: ngolofuzz.NgoloFuzzOne.IntNgdotBit:type_name -> ngolofuzz.IntNgdotBitArgs
83, // 83: ngolofuzz.NgoloFuzzOne.IntNgdotSetBit:type_name -> ngolofuzz.IntNgdotSetBitArgs
84, // 84: ngolofuzz.NgoloFuzzOne.IntNgdotAnd:type_name -> ngolofuzz.IntNgdotAndArgs
85, // 85: ngolofuzz.NgoloFuzzOne.IntNgdotAndNot:type_name -> ngolofuzz.IntNgdotAndNotArgs
86, // 86: ngolofuzz.NgoloFuzzOne.IntNgdotOr:type_name -> ngolofuzz.IntNgdotOrArgs
87, // 87: ngolofuzz.NgoloFuzzOne.IntNgdotXor:type_name -> ngolofuzz.IntNgdotXorArgs
88, // 88: ngolofuzz.NgoloFuzzOne.IntNgdotNot:type_name -> ngolofuzz.IntNgdotNotArgs
89, // 89: ngolofuzz.NgoloFuzzOne.IntNgdotSqrt:type_name -> ngolofuzz.IntNgdotSqrtArgs
90, // 90: ngolofuzz.NgoloFuzzOne.IntNgdotText:type_name -> ngolofuzz.IntNgdotTextArgs
91, // 91: ngolofuzz.NgoloFuzzOne.IntNgdotAppend:type_name -> ngolofuzz.IntNgdotAppendArgs
92, // 92: ngolofuzz.NgoloFuzzOne.IntNgdotString:type_name -> ngolofuzz.IntNgdotStringArgs
93, // 93: ngolofuzz.NgoloFuzzOne.IntNgdotGobEncode:type_name -> ngolofuzz.IntNgdotGobEncodeArgs
94, // 94: ngolofuzz.NgoloFuzzOne.IntNgdotGobDecode:type_name -> ngolofuzz.IntNgdotGobDecodeArgs
95, // 95: ngolofuzz.NgoloFuzzOne.IntNgdotAppendText:type_name -> ngolofuzz.IntNgdotAppendTextArgs
96, // 96: ngolofuzz.NgoloFuzzOne.IntNgdotMarshalText:type_name -> ngolofuzz.IntNgdotMarshalTextArgs
97, // 97: ngolofuzz.NgoloFuzzOne.IntNgdotUnmarshalText:type_name -> ngolofuzz.IntNgdotUnmarshalTextArgs
98, // 98: ngolofuzz.NgoloFuzzOne.IntNgdotMarshalJSON:type_name -> ngolofuzz.IntNgdotMarshalJSONArgs
99, // 99: ngolofuzz.NgoloFuzzOne.IntNgdotUnmarshalJSON:type_name -> ngolofuzz.IntNgdotUnmarshalJSONArgs
100, // 100: ngolofuzz.NgoloFuzzOne.IntNgdotProbablyPrime:type_name -> ngolofuzz.IntNgdotProbablyPrimeArgs
101, // 101: ngolofuzz.NgoloFuzzOne.NewRat:type_name -> ngolofuzz.NewRatArgs
102, // 102: ngolofuzz.NgoloFuzzOne.RatNgdotSetFloat64:type_name -> ngolofuzz.RatNgdotSetFloat64Args
103, // 103: ngolofuzz.NgoloFuzzOne.RatNgdotFloat32:type_name -> ngolofuzz.RatNgdotFloat32Args
104, // 104: ngolofuzz.NgoloFuzzOne.RatNgdotFloat64:type_name -> ngolofuzz.RatNgdotFloat64Args
105, // 105: ngolofuzz.NgoloFuzzOne.RatNgdotSetFrac:type_name -> ngolofuzz.RatNgdotSetFracArgs
106, // 106: ngolofuzz.NgoloFuzzOne.RatNgdotSetFrac64:type_name -> ngolofuzz.RatNgdotSetFrac64Args
107, // 107: ngolofuzz.NgoloFuzzOne.RatNgdotSetInt:type_name -> ngolofuzz.RatNgdotSetIntArgs
108, // 108: ngolofuzz.NgoloFuzzOne.RatNgdotSetInt64:type_name -> ngolofuzz.RatNgdotSetInt64Args
109, // 109: ngolofuzz.NgoloFuzzOne.RatNgdotSetUint64:type_name -> ngolofuzz.RatNgdotSetUint64Args
110, // 110: ngolofuzz.NgoloFuzzOne.RatNgdotSet:type_name -> ngolofuzz.RatNgdotSetArgs
111, // 111: ngolofuzz.NgoloFuzzOne.RatNgdotAbs:type_name -> ngolofuzz.RatNgdotAbsArgs
112, // 112: ngolofuzz.NgoloFuzzOne.RatNgdotNeg:type_name -> ngolofuzz.RatNgdotNegArgs
113, // 113: ngolofuzz.NgoloFuzzOne.RatNgdotInv:type_name -> ngolofuzz.RatNgdotInvArgs
114, // 114: ngolofuzz.NgoloFuzzOne.RatNgdotSign:type_name -> ngolofuzz.RatNgdotSignArgs
115, // 115: ngolofuzz.NgoloFuzzOne.RatNgdotIsInt:type_name -> ngolofuzz.RatNgdotIsIntArgs
116, // 116: ngolofuzz.NgoloFuzzOne.RatNgdotNum:type_name -> ngolofuzz.RatNgdotNumArgs
117, // 117: ngolofuzz.NgoloFuzzOne.RatNgdotDenom:type_name -> ngolofuzz.RatNgdotDenomArgs
118, // 118: ngolofuzz.NgoloFuzzOne.RatNgdotCmp:type_name -> ngolofuzz.RatNgdotCmpArgs
119, // 119: ngolofuzz.NgoloFuzzOne.RatNgdotAdd:type_name -> ngolofuzz.RatNgdotAddArgs
120, // 120: ngolofuzz.NgoloFuzzOne.RatNgdotSub:type_name -> ngolofuzz.RatNgdotSubArgs
121, // 121: ngolofuzz.NgoloFuzzOne.RatNgdotMul:type_name -> ngolofuzz.RatNgdotMulArgs
122, // 122: ngolofuzz.NgoloFuzzOne.RatNgdotSetString:type_name -> ngolofuzz.RatNgdotSetStringArgs
123, // 123: ngolofuzz.NgoloFuzzOne.RatNgdotString:type_name -> ngolofuzz.RatNgdotStringArgs
124, // 124: ngolofuzz.NgoloFuzzOne.RatNgdotRatString:type_name -> ngolofuzz.RatNgdotRatStringArgs
125, // 125: ngolofuzz.NgoloFuzzOne.RatNgdotFloatString:type_name -> ngolofuzz.RatNgdotFloatStringArgs
126, // 126: ngolofuzz.NgoloFuzzOne.RatNgdotFloatPrec:type_name -> ngolofuzz.RatNgdotFloatPrecArgs
127, // 127: ngolofuzz.NgoloFuzzOne.RatNgdotGobEncode:type_name -> ngolofuzz.RatNgdotGobEncodeArgs
128, // 128: ngolofuzz.NgoloFuzzOne.RatNgdotGobDecode:type_name -> ngolofuzz.RatNgdotGobDecodeArgs
129, // 129: ngolofuzz.NgoloFuzzOne.RatNgdotAppendText:type_name -> ngolofuzz.RatNgdotAppendTextArgs
130, // 130: ngolofuzz.NgoloFuzzOne.RatNgdotMarshalText:type_name -> ngolofuzz.RatNgdotMarshalTextArgs
131, // 131: ngolofuzz.NgoloFuzzOne.RatNgdotUnmarshalText:type_name -> ngolofuzz.RatNgdotUnmarshalTextArgs
132, // 132: ngolofuzz.NgoloFuzzOne.RoundingModeNgdotString:type_name -> ngolofuzz.RoundingModeNgdotStringArgs
133, // 133: ngolofuzz.NgoloFuzzOne.FloatNgdotSqrt:type_name -> ngolofuzz.FloatNgdotSqrtArgs
134, // 134: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
135, // [135:135] is the sub-list for method output_type
135, // [135:135] is the sub-list for method input_type
135, // [135:135] is the sub-list for extension type_name
135, // [135:135] is the sub-list for extension extendee
0, // [0:135] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[134].OneofWrappers = []any{
(*NgoloFuzzOne_AccuracyNgdotString)(nil),
(*NgoloFuzzOne_NewFloat)(nil),
(*NgoloFuzzOne_FloatNgdotSetPrec)(nil),
(*NgoloFuzzOne_FloatNgdotSetMode)(nil),
(*NgoloFuzzOne_FloatNgdotPrec)(nil),
(*NgoloFuzzOne_FloatNgdotMinPrec)(nil),
(*NgoloFuzzOne_FloatNgdotMode)(nil),
(*NgoloFuzzOne_FloatNgdotAcc)(nil),
(*NgoloFuzzOne_FloatNgdotSign)(nil),
(*NgoloFuzzOne_FloatNgdotMantExp)(nil),
(*NgoloFuzzOne_FloatNgdotSetMantExp)(nil),
(*NgoloFuzzOne_FloatNgdotSignbit)(nil),
(*NgoloFuzzOne_FloatNgdotIsInf)(nil),
(*NgoloFuzzOne_FloatNgdotIsInt)(nil),
(*NgoloFuzzOne_FloatNgdotSetUint64)(nil),
(*NgoloFuzzOne_FloatNgdotSetInt64)(nil),
(*NgoloFuzzOne_FloatNgdotSetFloat64)(nil),
(*NgoloFuzzOne_FloatNgdotSetInt)(nil),
(*NgoloFuzzOne_FloatNgdotSetRat)(nil),
(*NgoloFuzzOne_FloatNgdotSetInf)(nil),
(*NgoloFuzzOne_FloatNgdotSet)(nil),
(*NgoloFuzzOne_FloatNgdotCopy)(nil),
(*NgoloFuzzOne_FloatNgdotUint64)(nil),
(*NgoloFuzzOne_FloatNgdotInt64)(nil),
(*NgoloFuzzOne_FloatNgdotFloat32)(nil),
(*NgoloFuzzOne_FloatNgdotFloat64)(nil),
(*NgoloFuzzOne_FloatNgdotInt)(nil),
(*NgoloFuzzOne_FloatNgdotRat)(nil),
(*NgoloFuzzOne_FloatNgdotAbs)(nil),
(*NgoloFuzzOne_FloatNgdotNeg)(nil),
(*NgoloFuzzOne_FloatNgdotAdd)(nil),
(*NgoloFuzzOne_FloatNgdotSub)(nil),
(*NgoloFuzzOne_FloatNgdotMul)(nil),
(*NgoloFuzzOne_FloatNgdotCmp)(nil),
(*NgoloFuzzOne_FloatNgdotSetString)(nil),
(*NgoloFuzzOne_FloatNgdotParse)(nil),
(*NgoloFuzzOne_ParseFloat)(nil),
(*NgoloFuzzOne_FloatNgdotGobEncode)(nil),
(*NgoloFuzzOne_FloatNgdotGobDecode)(nil),
(*NgoloFuzzOne_FloatNgdotAppendText)(nil),
(*NgoloFuzzOne_FloatNgdotMarshalText)(nil),
(*NgoloFuzzOne_FloatNgdotUnmarshalText)(nil),
(*NgoloFuzzOne_FloatNgdotText)(nil),
(*NgoloFuzzOne_FloatNgdotString)(nil),
(*NgoloFuzzOne_FloatNgdotAppend)(nil),
(*NgoloFuzzOne_IntNgdotSign)(nil),
(*NgoloFuzzOne_IntNgdotSetInt64)(nil),
(*NgoloFuzzOne_IntNgdotSetUint64)(nil),
(*NgoloFuzzOne_NewInt)(nil),
(*NgoloFuzzOne_IntNgdotSet)(nil),
(*NgoloFuzzOne_IntNgdotBits)(nil),
(*NgoloFuzzOne_IntNgdotAbs)(nil),
(*NgoloFuzzOne_IntNgdotNeg)(nil),
(*NgoloFuzzOne_IntNgdotAdd)(nil),
(*NgoloFuzzOne_IntNgdotSub)(nil),
(*NgoloFuzzOne_IntNgdotMul)(nil),
(*NgoloFuzzOne_IntNgdotMulRange)(nil),
(*NgoloFuzzOne_IntNgdotBinomial)(nil),
(*NgoloFuzzOne_IntNgdotRem)(nil),
(*NgoloFuzzOne_IntNgdotDiv)(nil),
(*NgoloFuzzOne_IntNgdotMod)(nil),
(*NgoloFuzzOne_IntNgdotDivMod)(nil),
(*NgoloFuzzOne_IntNgdotCmp)(nil),
(*NgoloFuzzOne_IntNgdotCmpAbs)(nil),
(*NgoloFuzzOne_IntNgdotInt64)(nil),
(*NgoloFuzzOne_IntNgdotUint64)(nil),
(*NgoloFuzzOne_IntNgdotIsInt64)(nil),
(*NgoloFuzzOne_IntNgdotIsUint64)(nil),
(*NgoloFuzzOne_IntNgdotFloat64)(nil),
(*NgoloFuzzOne_IntNgdotSetString)(nil),
(*NgoloFuzzOne_IntNgdotSetBytes)(nil),
(*NgoloFuzzOne_IntNgdotBytes)(nil),
(*NgoloFuzzOne_IntNgdotFillBytes)(nil),
(*NgoloFuzzOne_IntNgdotBitLen)(nil),
(*NgoloFuzzOne_IntNgdotTrailingZeroBits)(nil),
(*NgoloFuzzOne_IntNgdotExp)(nil),
(*NgoloFuzzOne_IntNgdotGCD)(nil),
(*NgoloFuzzOne_IntNgdotModInverse)(nil),
(*NgoloFuzzOne_Jacobi)(nil),
(*NgoloFuzzOne_IntNgdotModSqrt)(nil),
(*NgoloFuzzOne_IntNgdotLsh)(nil),
(*NgoloFuzzOne_IntNgdotRsh)(nil),
(*NgoloFuzzOne_IntNgdotBit)(nil),
(*NgoloFuzzOne_IntNgdotSetBit)(nil),
(*NgoloFuzzOne_IntNgdotAnd)(nil),
(*NgoloFuzzOne_IntNgdotAndNot)(nil),
(*NgoloFuzzOne_IntNgdotOr)(nil),
(*NgoloFuzzOne_IntNgdotXor)(nil),
(*NgoloFuzzOne_IntNgdotNot)(nil),
(*NgoloFuzzOne_IntNgdotSqrt)(nil),
(*NgoloFuzzOne_IntNgdotText)(nil),
(*NgoloFuzzOne_IntNgdotAppend)(nil),
(*NgoloFuzzOne_IntNgdotString)(nil),
(*NgoloFuzzOne_IntNgdotGobEncode)(nil),
(*NgoloFuzzOne_IntNgdotGobDecode)(nil),
(*NgoloFuzzOne_IntNgdotAppendText)(nil),
(*NgoloFuzzOne_IntNgdotMarshalText)(nil),
(*NgoloFuzzOne_IntNgdotUnmarshalText)(nil),
(*NgoloFuzzOne_IntNgdotMarshalJSON)(nil),
(*NgoloFuzzOne_IntNgdotUnmarshalJSON)(nil),
(*NgoloFuzzOne_IntNgdotProbablyPrime)(nil),
(*NgoloFuzzOne_NewRat)(nil),
(*NgoloFuzzOne_RatNgdotSetFloat64)(nil),
(*NgoloFuzzOne_RatNgdotFloat32)(nil),
(*NgoloFuzzOne_RatNgdotFloat64)(nil),
(*NgoloFuzzOne_RatNgdotSetFrac)(nil),
(*NgoloFuzzOne_RatNgdotSetFrac64)(nil),
(*NgoloFuzzOne_RatNgdotSetInt)(nil),
(*NgoloFuzzOne_RatNgdotSetInt64)(nil),
(*NgoloFuzzOne_RatNgdotSetUint64)(nil),
(*NgoloFuzzOne_RatNgdotSet)(nil),
(*NgoloFuzzOne_RatNgdotAbs)(nil),
(*NgoloFuzzOne_RatNgdotNeg)(nil),
(*NgoloFuzzOne_RatNgdotInv)(nil),
(*NgoloFuzzOne_RatNgdotSign)(nil),
(*NgoloFuzzOne_RatNgdotIsInt)(nil),
(*NgoloFuzzOne_RatNgdotNum)(nil),
(*NgoloFuzzOne_RatNgdotDenom)(nil),
(*NgoloFuzzOne_RatNgdotCmp)(nil),
(*NgoloFuzzOne_RatNgdotAdd)(nil),
(*NgoloFuzzOne_RatNgdotSub)(nil),
(*NgoloFuzzOne_RatNgdotMul)(nil),
(*NgoloFuzzOne_RatNgdotSetString)(nil),
(*NgoloFuzzOne_RatNgdotString)(nil),
(*NgoloFuzzOne_RatNgdotRatString)(nil),
(*NgoloFuzzOne_RatNgdotFloatString)(nil),
(*NgoloFuzzOne_RatNgdotFloatPrec)(nil),
(*NgoloFuzzOne_RatNgdotGobEncode)(nil),
(*NgoloFuzzOne_RatNgdotGobDecode)(nil),
(*NgoloFuzzOne_RatNgdotAppendText)(nil),
(*NgoloFuzzOne_RatNgdotMarshalText)(nil),
(*NgoloFuzzOne_RatNgdotUnmarshalText)(nil),
(*NgoloFuzzOne_RoundingModeNgdotString)(nil),
(*NgoloFuzzOne_FloatNgdotSqrt)(nil),
}
file_ngolofuzz_proto_msgTypes[135].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 137,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_math_bits
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"math/bits"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_LeadingZeros:
arg0 := uint(a.LeadingZeros.X)
bits.LeadingZeros(arg0)
case *NgoloFuzzOne_LeadingZeros8:
arg0 := uint8(a.LeadingZeros8.X)
bits.LeadingZeros8(arg0)
case *NgoloFuzzOne_LeadingZeros16:
arg0 := uint16(a.LeadingZeros16.X)
bits.LeadingZeros16(arg0)
case *NgoloFuzzOne_LeadingZeros32:
bits.LeadingZeros32(a.LeadingZeros32.X)
case *NgoloFuzzOne_LeadingZeros64:
bits.LeadingZeros64(a.LeadingZeros64.X)
case *NgoloFuzzOne_TrailingZeros:
arg0 := uint(a.TrailingZeros.X)
bits.TrailingZeros(arg0)
case *NgoloFuzzOne_TrailingZeros8:
arg0 := uint8(a.TrailingZeros8.X)
bits.TrailingZeros8(arg0)
case *NgoloFuzzOne_TrailingZeros16:
arg0 := uint16(a.TrailingZeros16.X)
bits.TrailingZeros16(arg0)
case *NgoloFuzzOne_TrailingZeros32:
bits.TrailingZeros32(a.TrailingZeros32.X)
case *NgoloFuzzOne_TrailingZeros64:
bits.TrailingZeros64(a.TrailingZeros64.X)
case *NgoloFuzzOne_OnesCount:
arg0 := uint(a.OnesCount.X)
bits.OnesCount(arg0)
case *NgoloFuzzOne_OnesCount8:
arg0 := uint8(a.OnesCount8.X)
bits.OnesCount8(arg0)
case *NgoloFuzzOne_OnesCount16:
arg0 := uint16(a.OnesCount16.X)
bits.OnesCount16(arg0)
case *NgoloFuzzOne_OnesCount32:
bits.OnesCount32(a.OnesCount32.X)
case *NgoloFuzzOne_OnesCount64:
bits.OnesCount64(a.OnesCount64.X)
case *NgoloFuzzOne_RotateLeft:
arg0 := uint(a.RotateLeft.X)
arg1 := int(a.RotateLeft.K)
bits.RotateLeft(arg0, arg1)
case *NgoloFuzzOne_RotateLeft8:
arg0 := uint8(a.RotateLeft8.X)
arg1 := int(a.RotateLeft8.K)
bits.RotateLeft8(arg0, arg1)
case *NgoloFuzzOne_RotateLeft16:
arg0 := uint16(a.RotateLeft16.X)
arg1 := int(a.RotateLeft16.K)
bits.RotateLeft16(arg0, arg1)
case *NgoloFuzzOne_RotateLeft32:
arg1 := int(a.RotateLeft32.K)
bits.RotateLeft32(a.RotateLeft32.X, arg1)
case *NgoloFuzzOne_RotateLeft64:
arg1 := int(a.RotateLeft64.K)
bits.RotateLeft64(a.RotateLeft64.X, arg1)
case *NgoloFuzzOne_Reverse:
arg0 := uint(a.Reverse.X)
bits.Reverse(arg0)
case *NgoloFuzzOne_Reverse8:
arg0 := uint8(a.Reverse8.X)
bits.Reverse8(arg0)
case *NgoloFuzzOne_Reverse16:
arg0 := uint16(a.Reverse16.X)
bits.Reverse16(arg0)
case *NgoloFuzzOne_Reverse32:
bits.Reverse32(a.Reverse32.X)
case *NgoloFuzzOne_Reverse64:
bits.Reverse64(a.Reverse64.X)
case *NgoloFuzzOne_ReverseBytes:
arg0 := uint(a.ReverseBytes.X)
bits.ReverseBytes(arg0)
case *NgoloFuzzOne_ReverseBytes16:
arg0 := uint16(a.ReverseBytes16.X)
bits.ReverseBytes16(arg0)
case *NgoloFuzzOne_ReverseBytes32:
bits.ReverseBytes32(a.ReverseBytes32.X)
case *NgoloFuzzOne_ReverseBytes64:
bits.ReverseBytes64(a.ReverseBytes64.X)
case *NgoloFuzzOne_Len:
arg0 := uint(a.Len.X)
bits.Len(arg0)
case *NgoloFuzzOne_Len8:
arg0 := uint8(a.Len8.X)
bits.Len8(arg0)
case *NgoloFuzzOne_Len16:
arg0 := uint16(a.Len16.X)
bits.Len16(arg0)
case *NgoloFuzzOne_Len32:
bits.Len32(a.Len32.X)
case *NgoloFuzzOne_Len64:
bits.Len64(a.Len64.X)
case *NgoloFuzzOne_Add:
arg0 := uint(a.Add.X)
arg1 := uint(a.Add.Y)
arg2 := uint(a.Add.Carry)
bits.Add(arg0, arg1, arg2)
case *NgoloFuzzOne_Add32:
bits.Add32(a.Add32.X, a.Add32.Y, a.Add32.Carry)
case *NgoloFuzzOne_Add64:
bits.Add64(a.Add64.X, a.Add64.Y, a.Add64.Carry)
case *NgoloFuzzOne_Sub:
arg0 := uint(a.Sub.X)
arg1 := uint(a.Sub.Y)
arg2 := uint(a.Sub.Borrow)
bits.Sub(arg0, arg1, arg2)
case *NgoloFuzzOne_Sub32:
bits.Sub32(a.Sub32.X, a.Sub32.Y, a.Sub32.Borrow)
case *NgoloFuzzOne_Sub64:
bits.Sub64(a.Sub64.X, a.Sub64.Y, a.Sub64.Borrow)
case *NgoloFuzzOne_Mul:
arg0 := uint(a.Mul.X)
arg1 := uint(a.Mul.Y)
bits.Mul(arg0, arg1)
case *NgoloFuzzOne_Mul32:
bits.Mul32(a.Mul32.X, a.Mul32.Y)
case *NgoloFuzzOne_Mul64:
bits.Mul64(a.Mul64.X, a.Mul64.Y)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_LeadingZeros:
w.WriteString(fmt.Sprintf("bits.LeadingZeros(uint(%#+v))\n", a.LeadingZeros.X))
case *NgoloFuzzOne_LeadingZeros8:
w.WriteString(fmt.Sprintf("bits.LeadingZeros8(uint8(%#+v))\n", a.LeadingZeros8.X))
case *NgoloFuzzOne_LeadingZeros16:
w.WriteString(fmt.Sprintf("bits.LeadingZeros16(uint16(%#+v))\n", a.LeadingZeros16.X))
case *NgoloFuzzOne_LeadingZeros32:
w.WriteString(fmt.Sprintf("bits.LeadingZeros32(%#+v)\n", a.LeadingZeros32.X))
case *NgoloFuzzOne_LeadingZeros64:
w.WriteString(fmt.Sprintf("bits.LeadingZeros64(%#+v)\n", a.LeadingZeros64.X))
case *NgoloFuzzOne_TrailingZeros:
w.WriteString(fmt.Sprintf("bits.TrailingZeros(uint(%#+v))\n", a.TrailingZeros.X))
case *NgoloFuzzOne_TrailingZeros8:
w.WriteString(fmt.Sprintf("bits.TrailingZeros8(uint8(%#+v))\n", a.TrailingZeros8.X))
case *NgoloFuzzOne_TrailingZeros16:
w.WriteString(fmt.Sprintf("bits.TrailingZeros16(uint16(%#+v))\n", a.TrailingZeros16.X))
case *NgoloFuzzOne_TrailingZeros32:
w.WriteString(fmt.Sprintf("bits.TrailingZeros32(%#+v)\n", a.TrailingZeros32.X))
case *NgoloFuzzOne_TrailingZeros64:
w.WriteString(fmt.Sprintf("bits.TrailingZeros64(%#+v)\n", a.TrailingZeros64.X))
case *NgoloFuzzOne_OnesCount:
w.WriteString(fmt.Sprintf("bits.OnesCount(uint(%#+v))\n", a.OnesCount.X))
case *NgoloFuzzOne_OnesCount8:
w.WriteString(fmt.Sprintf("bits.OnesCount8(uint8(%#+v))\n", a.OnesCount8.X))
case *NgoloFuzzOne_OnesCount16:
w.WriteString(fmt.Sprintf("bits.OnesCount16(uint16(%#+v))\n", a.OnesCount16.X))
case *NgoloFuzzOne_OnesCount32:
w.WriteString(fmt.Sprintf("bits.OnesCount32(%#+v)\n", a.OnesCount32.X))
case *NgoloFuzzOne_OnesCount64:
w.WriteString(fmt.Sprintf("bits.OnesCount64(%#+v)\n", a.OnesCount64.X))
case *NgoloFuzzOne_RotateLeft:
w.WriteString(fmt.Sprintf("bits.RotateLeft(uint(%#+v), int(%#+v))\n", a.RotateLeft.X, a.RotateLeft.K))
case *NgoloFuzzOne_RotateLeft8:
w.WriteString(fmt.Sprintf("bits.RotateLeft8(uint8(%#+v), int(%#+v))\n", a.RotateLeft8.X, a.RotateLeft8.K))
case *NgoloFuzzOne_RotateLeft16:
w.WriteString(fmt.Sprintf("bits.RotateLeft16(uint16(%#+v), int(%#+v))\n", a.RotateLeft16.X, a.RotateLeft16.K))
case *NgoloFuzzOne_RotateLeft32:
w.WriteString(fmt.Sprintf("bits.RotateLeft32(%#+v, int(%#+v))\n", a.RotateLeft32.X, a.RotateLeft32.K))
case *NgoloFuzzOne_RotateLeft64:
w.WriteString(fmt.Sprintf("bits.RotateLeft64(%#+v, int(%#+v))\n", a.RotateLeft64.X, a.RotateLeft64.K))
case *NgoloFuzzOne_Reverse:
w.WriteString(fmt.Sprintf("bits.Reverse(uint(%#+v))\n", a.Reverse.X))
case *NgoloFuzzOne_Reverse8:
w.WriteString(fmt.Sprintf("bits.Reverse8(uint8(%#+v))\n", a.Reverse8.X))
case *NgoloFuzzOne_Reverse16:
w.WriteString(fmt.Sprintf("bits.Reverse16(uint16(%#+v))\n", a.Reverse16.X))
case *NgoloFuzzOne_Reverse32:
w.WriteString(fmt.Sprintf("bits.Reverse32(%#+v)\n", a.Reverse32.X))
case *NgoloFuzzOne_Reverse64:
w.WriteString(fmt.Sprintf("bits.Reverse64(%#+v)\n", a.Reverse64.X))
case *NgoloFuzzOne_ReverseBytes:
w.WriteString(fmt.Sprintf("bits.ReverseBytes(uint(%#+v))\n", a.ReverseBytes.X))
case *NgoloFuzzOne_ReverseBytes16:
w.WriteString(fmt.Sprintf("bits.ReverseBytes16(uint16(%#+v))\n", a.ReverseBytes16.X))
case *NgoloFuzzOne_ReverseBytes32:
w.WriteString(fmt.Sprintf("bits.ReverseBytes32(%#+v)\n", a.ReverseBytes32.X))
case *NgoloFuzzOne_ReverseBytes64:
w.WriteString(fmt.Sprintf("bits.ReverseBytes64(%#+v)\n", a.ReverseBytes64.X))
case *NgoloFuzzOne_Len:
w.WriteString(fmt.Sprintf("bits.Len(uint(%#+v))\n", a.Len.X))
case *NgoloFuzzOne_Len8:
w.WriteString(fmt.Sprintf("bits.Len8(uint8(%#+v))\n", a.Len8.X))
case *NgoloFuzzOne_Len16:
w.WriteString(fmt.Sprintf("bits.Len16(uint16(%#+v))\n", a.Len16.X))
case *NgoloFuzzOne_Len32:
w.WriteString(fmt.Sprintf("bits.Len32(%#+v)\n", a.Len32.X))
case *NgoloFuzzOne_Len64:
w.WriteString(fmt.Sprintf("bits.Len64(%#+v)\n", a.Len64.X))
case *NgoloFuzzOne_Add:
w.WriteString(fmt.Sprintf("bits.Add(uint(%#+v), uint(%#+v), uint(%#+v))\n", a.Add.X, a.Add.Y, a.Add.Carry))
case *NgoloFuzzOne_Add32:
w.WriteString(fmt.Sprintf("bits.Add32(%#+v, %#+v, %#+v)\n", a.Add32.X, a.Add32.Y, a.Add32.Carry))
case *NgoloFuzzOne_Add64:
w.WriteString(fmt.Sprintf("bits.Add64(%#+v, %#+v, %#+v)\n", a.Add64.X, a.Add64.Y, a.Add64.Carry))
case *NgoloFuzzOne_Sub:
w.WriteString(fmt.Sprintf("bits.Sub(uint(%#+v), uint(%#+v), uint(%#+v))\n", a.Sub.X, a.Sub.Y, a.Sub.Borrow))
case *NgoloFuzzOne_Sub32:
w.WriteString(fmt.Sprintf("bits.Sub32(%#+v, %#+v, %#+v)\n", a.Sub32.X, a.Sub32.Y, a.Sub32.Borrow))
case *NgoloFuzzOne_Sub64:
w.WriteString(fmt.Sprintf("bits.Sub64(%#+v, %#+v, %#+v)\n", a.Sub64.X, a.Sub64.Y, a.Sub64.Borrow))
case *NgoloFuzzOne_Mul:
w.WriteString(fmt.Sprintf("bits.Mul(uint(%#+v), uint(%#+v))\n", a.Mul.X, a.Mul.Y))
case *NgoloFuzzOne_Mul32:
w.WriteString(fmt.Sprintf("bits.Mul32(%#+v, %#+v)\n", a.Mul32.X, a.Mul32.Y))
case *NgoloFuzzOne_Mul64:
w.WriteString(fmt.Sprintf("bits.Mul64(%#+v, %#+v)\n", a.Mul64.X, a.Mul64.Y))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_math_bits
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type LeadingZerosArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LeadingZerosArgs) Reset() {
*x = LeadingZerosArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LeadingZerosArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LeadingZerosArgs) ProtoMessage() {}
func (x *LeadingZerosArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LeadingZerosArgs.ProtoReflect.Descriptor instead.
func (*LeadingZerosArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *LeadingZerosArgs) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type LeadingZeros8Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LeadingZeros8Args) Reset() {
*x = LeadingZeros8Args{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LeadingZeros8Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LeadingZeros8Args) ProtoMessage() {}
func (x *LeadingZeros8Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LeadingZeros8Args.ProtoReflect.Descriptor instead.
func (*LeadingZeros8Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *LeadingZeros8Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type LeadingZeros16Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LeadingZeros16Args) Reset() {
*x = LeadingZeros16Args{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LeadingZeros16Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LeadingZeros16Args) ProtoMessage() {}
func (x *LeadingZeros16Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LeadingZeros16Args.ProtoReflect.Descriptor instead.
func (*LeadingZeros16Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *LeadingZeros16Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type LeadingZeros32Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LeadingZeros32Args) Reset() {
*x = LeadingZeros32Args{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LeadingZeros32Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LeadingZeros32Args) ProtoMessage() {}
func (x *LeadingZeros32Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LeadingZeros32Args.ProtoReflect.Descriptor instead.
func (*LeadingZeros32Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *LeadingZeros32Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type LeadingZeros64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LeadingZeros64Args) Reset() {
*x = LeadingZeros64Args{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LeadingZeros64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LeadingZeros64Args) ProtoMessage() {}
func (x *LeadingZeros64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LeadingZeros64Args.ProtoReflect.Descriptor instead.
func (*LeadingZeros64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *LeadingZeros64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
type TrailingZerosArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TrailingZerosArgs) Reset() {
*x = TrailingZerosArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TrailingZerosArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrailingZerosArgs) ProtoMessage() {}
func (x *TrailingZerosArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrailingZerosArgs.ProtoReflect.Descriptor instead.
func (*TrailingZerosArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *TrailingZerosArgs) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type TrailingZeros8Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TrailingZeros8Args) Reset() {
*x = TrailingZeros8Args{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TrailingZeros8Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrailingZeros8Args) ProtoMessage() {}
func (x *TrailingZeros8Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrailingZeros8Args.ProtoReflect.Descriptor instead.
func (*TrailingZeros8Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *TrailingZeros8Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type TrailingZeros16Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TrailingZeros16Args) Reset() {
*x = TrailingZeros16Args{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TrailingZeros16Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrailingZeros16Args) ProtoMessage() {}
func (x *TrailingZeros16Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrailingZeros16Args.ProtoReflect.Descriptor instead.
func (*TrailingZeros16Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *TrailingZeros16Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type TrailingZeros32Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TrailingZeros32Args) Reset() {
*x = TrailingZeros32Args{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TrailingZeros32Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrailingZeros32Args) ProtoMessage() {}
func (x *TrailingZeros32Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrailingZeros32Args.ProtoReflect.Descriptor instead.
func (*TrailingZeros32Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *TrailingZeros32Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type TrailingZeros64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TrailingZeros64Args) Reset() {
*x = TrailingZeros64Args{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TrailingZeros64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrailingZeros64Args) ProtoMessage() {}
func (x *TrailingZeros64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrailingZeros64Args.ProtoReflect.Descriptor instead.
func (*TrailingZeros64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *TrailingZeros64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
type OnesCountArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OnesCountArgs) Reset() {
*x = OnesCountArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OnesCountArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OnesCountArgs) ProtoMessage() {}
func (x *OnesCountArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OnesCountArgs.ProtoReflect.Descriptor instead.
func (*OnesCountArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *OnesCountArgs) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type OnesCount8Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OnesCount8Args) Reset() {
*x = OnesCount8Args{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OnesCount8Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OnesCount8Args) ProtoMessage() {}
func (x *OnesCount8Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OnesCount8Args.ProtoReflect.Descriptor instead.
func (*OnesCount8Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *OnesCount8Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type OnesCount16Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OnesCount16Args) Reset() {
*x = OnesCount16Args{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OnesCount16Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OnesCount16Args) ProtoMessage() {}
func (x *OnesCount16Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OnesCount16Args.ProtoReflect.Descriptor instead.
func (*OnesCount16Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *OnesCount16Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type OnesCount32Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OnesCount32Args) Reset() {
*x = OnesCount32Args{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OnesCount32Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OnesCount32Args) ProtoMessage() {}
func (x *OnesCount32Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OnesCount32Args.ProtoReflect.Descriptor instead.
func (*OnesCount32Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *OnesCount32Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type OnesCount64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OnesCount64Args) Reset() {
*x = OnesCount64Args{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OnesCount64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OnesCount64Args) ProtoMessage() {}
func (x *OnesCount64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OnesCount64Args.ProtoReflect.Descriptor instead.
func (*OnesCount64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *OnesCount64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
type RotateLeftArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
K int64 `protobuf:"varint,2,opt,name=k,proto3" json:"k,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RotateLeftArgs) Reset() {
*x = RotateLeftArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RotateLeftArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RotateLeftArgs) ProtoMessage() {}
func (x *RotateLeftArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RotateLeftArgs.ProtoReflect.Descriptor instead.
func (*RotateLeftArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *RotateLeftArgs) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
func (x *RotateLeftArgs) GetK() int64 {
if x != nil {
return x.K
}
return 0
}
type RotateLeft8Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
K int64 `protobuf:"varint,2,opt,name=k,proto3" json:"k,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RotateLeft8Args) Reset() {
*x = RotateLeft8Args{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RotateLeft8Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RotateLeft8Args) ProtoMessage() {}
func (x *RotateLeft8Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RotateLeft8Args.ProtoReflect.Descriptor instead.
func (*RotateLeft8Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *RotateLeft8Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
func (x *RotateLeft8Args) GetK() int64 {
if x != nil {
return x.K
}
return 0
}
type RotateLeft16Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
K int64 `protobuf:"varint,2,opt,name=k,proto3" json:"k,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RotateLeft16Args) Reset() {
*x = RotateLeft16Args{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RotateLeft16Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RotateLeft16Args) ProtoMessage() {}
func (x *RotateLeft16Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RotateLeft16Args.ProtoReflect.Descriptor instead.
func (*RotateLeft16Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *RotateLeft16Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
func (x *RotateLeft16Args) GetK() int64 {
if x != nil {
return x.K
}
return 0
}
type RotateLeft32Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
K int64 `protobuf:"varint,2,opt,name=k,proto3" json:"k,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RotateLeft32Args) Reset() {
*x = RotateLeft32Args{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RotateLeft32Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RotateLeft32Args) ProtoMessage() {}
func (x *RotateLeft32Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RotateLeft32Args.ProtoReflect.Descriptor instead.
func (*RotateLeft32Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *RotateLeft32Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
func (x *RotateLeft32Args) GetK() int64 {
if x != nil {
return x.K
}
return 0
}
type RotateLeft64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
K int64 `protobuf:"varint,2,opt,name=k,proto3" json:"k,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RotateLeft64Args) Reset() {
*x = RotateLeft64Args{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RotateLeft64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RotateLeft64Args) ProtoMessage() {}
func (x *RotateLeft64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RotateLeft64Args.ProtoReflect.Descriptor instead.
func (*RotateLeft64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *RotateLeft64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
func (x *RotateLeft64Args) GetK() int64 {
if x != nil {
return x.K
}
return 0
}
type ReverseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReverseArgs) Reset() {
*x = ReverseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReverseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReverseArgs) ProtoMessage() {}
func (x *ReverseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReverseArgs.ProtoReflect.Descriptor instead.
func (*ReverseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *ReverseArgs) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type Reverse8Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Reverse8Args) Reset() {
*x = Reverse8Args{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Reverse8Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Reverse8Args) ProtoMessage() {}
func (x *Reverse8Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Reverse8Args.ProtoReflect.Descriptor instead.
func (*Reverse8Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
func (x *Reverse8Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type Reverse16Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Reverse16Args) Reset() {
*x = Reverse16Args{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Reverse16Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Reverse16Args) ProtoMessage() {}
func (x *Reverse16Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Reverse16Args.ProtoReflect.Descriptor instead.
func (*Reverse16Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
func (x *Reverse16Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type Reverse32Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Reverse32Args) Reset() {
*x = Reverse32Args{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Reverse32Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Reverse32Args) ProtoMessage() {}
func (x *Reverse32Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Reverse32Args.ProtoReflect.Descriptor instead.
func (*Reverse32Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
func (x *Reverse32Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type Reverse64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Reverse64Args) Reset() {
*x = Reverse64Args{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Reverse64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Reverse64Args) ProtoMessage() {}
func (x *Reverse64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Reverse64Args.ProtoReflect.Descriptor instead.
func (*Reverse64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
func (x *Reverse64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
type ReverseBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReverseBytesArgs) Reset() {
*x = ReverseBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReverseBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReverseBytesArgs) ProtoMessage() {}
func (x *ReverseBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReverseBytesArgs.ProtoReflect.Descriptor instead.
func (*ReverseBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
func (x *ReverseBytesArgs) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type ReverseBytes16Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReverseBytes16Args) Reset() {
*x = ReverseBytes16Args{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReverseBytes16Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReverseBytes16Args) ProtoMessage() {}
func (x *ReverseBytes16Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReverseBytes16Args.ProtoReflect.Descriptor instead.
func (*ReverseBytes16Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
func (x *ReverseBytes16Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type ReverseBytes32Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReverseBytes32Args) Reset() {
*x = ReverseBytes32Args{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReverseBytes32Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReverseBytes32Args) ProtoMessage() {}
func (x *ReverseBytes32Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReverseBytes32Args.ProtoReflect.Descriptor instead.
func (*ReverseBytes32Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
func (x *ReverseBytes32Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type ReverseBytes64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReverseBytes64Args) Reset() {
*x = ReverseBytes64Args{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReverseBytes64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReverseBytes64Args) ProtoMessage() {}
func (x *ReverseBytes64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReverseBytes64Args.ProtoReflect.Descriptor instead.
func (*ReverseBytes64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
func (x *ReverseBytes64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
type LenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LenArgs) Reset() {
*x = LenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LenArgs) ProtoMessage() {}
func (x *LenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LenArgs.ProtoReflect.Descriptor instead.
func (*LenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
func (x *LenArgs) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type Len8Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Len8Args) Reset() {
*x = Len8Args{}
mi := &file_ngolofuzz_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Len8Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Len8Args) ProtoMessage() {}
func (x *Len8Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Len8Args.ProtoReflect.Descriptor instead.
func (*Len8Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{30}
}
func (x *Len8Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type Len16Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Len16Args) Reset() {
*x = Len16Args{}
mi := &file_ngolofuzz_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Len16Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Len16Args) ProtoMessage() {}
func (x *Len16Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Len16Args.ProtoReflect.Descriptor instead.
func (*Len16Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{31}
}
func (x *Len16Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type Len32Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Len32Args) Reset() {
*x = Len32Args{}
mi := &file_ngolofuzz_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Len32Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Len32Args) ProtoMessage() {}
func (x *Len32Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Len32Args.ProtoReflect.Descriptor instead.
func (*Len32Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{32}
}
func (x *Len32Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
type Len64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Len64Args) Reset() {
*x = Len64Args{}
mi := &file_ngolofuzz_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Len64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Len64Args) ProtoMessage() {}
func (x *Len64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[33]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Len64Args.ProtoReflect.Descriptor instead.
func (*Len64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{33}
}
func (x *Len64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
type AddArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y uint32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
Carry uint32 `protobuf:"varint,3,opt,name=carry,proto3" json:"carry,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AddArgs) Reset() {
*x = AddArgs{}
mi := &file_ngolofuzz_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AddArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AddArgs) ProtoMessage() {}
func (x *AddArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[34]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AddArgs.ProtoReflect.Descriptor instead.
func (*AddArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{34}
}
func (x *AddArgs) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
func (x *AddArgs) GetY() uint32 {
if x != nil {
return x.Y
}
return 0
}
func (x *AddArgs) GetCarry() uint32 {
if x != nil {
return x.Carry
}
return 0
}
type Add32Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y uint32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
Carry uint32 `protobuf:"varint,3,opt,name=carry,proto3" json:"carry,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Add32Args) Reset() {
*x = Add32Args{}
mi := &file_ngolofuzz_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Add32Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Add32Args) ProtoMessage() {}
func (x *Add32Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[35]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Add32Args.ProtoReflect.Descriptor instead.
func (*Add32Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{35}
}
func (x *Add32Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
func (x *Add32Args) GetY() uint32 {
if x != nil {
return x.Y
}
return 0
}
func (x *Add32Args) GetCarry() uint32 {
if x != nil {
return x.Carry
}
return 0
}
type Add64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y uint64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
Carry uint64 `protobuf:"varint,3,opt,name=carry,proto3" json:"carry,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Add64Args) Reset() {
*x = Add64Args{}
mi := &file_ngolofuzz_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Add64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Add64Args) ProtoMessage() {}
func (x *Add64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[36]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Add64Args.ProtoReflect.Descriptor instead.
func (*Add64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{36}
}
func (x *Add64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
func (x *Add64Args) GetY() uint64 {
if x != nil {
return x.Y
}
return 0
}
func (x *Add64Args) GetCarry() uint64 {
if x != nil {
return x.Carry
}
return 0
}
type SubArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y uint32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
Borrow uint32 `protobuf:"varint,3,opt,name=borrow,proto3" json:"borrow,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SubArgs) Reset() {
*x = SubArgs{}
mi := &file_ngolofuzz_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SubArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SubArgs) ProtoMessage() {}
func (x *SubArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[37]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SubArgs.ProtoReflect.Descriptor instead.
func (*SubArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{37}
}
func (x *SubArgs) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
func (x *SubArgs) GetY() uint32 {
if x != nil {
return x.Y
}
return 0
}
func (x *SubArgs) GetBorrow() uint32 {
if x != nil {
return x.Borrow
}
return 0
}
type Sub32Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y uint32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
Borrow uint32 `protobuf:"varint,3,opt,name=borrow,proto3" json:"borrow,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Sub32Args) Reset() {
*x = Sub32Args{}
mi := &file_ngolofuzz_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Sub32Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Sub32Args) ProtoMessage() {}
func (x *Sub32Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[38]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Sub32Args.ProtoReflect.Descriptor instead.
func (*Sub32Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{38}
}
func (x *Sub32Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
func (x *Sub32Args) GetY() uint32 {
if x != nil {
return x.Y
}
return 0
}
func (x *Sub32Args) GetBorrow() uint32 {
if x != nil {
return x.Borrow
}
return 0
}
type Sub64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y uint64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
Borrow uint64 `protobuf:"varint,3,opt,name=borrow,proto3" json:"borrow,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Sub64Args) Reset() {
*x = Sub64Args{}
mi := &file_ngolofuzz_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Sub64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Sub64Args) ProtoMessage() {}
func (x *Sub64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[39]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Sub64Args.ProtoReflect.Descriptor instead.
func (*Sub64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{39}
}
func (x *Sub64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
func (x *Sub64Args) GetY() uint64 {
if x != nil {
return x.Y
}
return 0
}
func (x *Sub64Args) GetBorrow() uint64 {
if x != nil {
return x.Borrow
}
return 0
}
type MulArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y uint32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MulArgs) Reset() {
*x = MulArgs{}
mi := &file_ngolofuzz_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MulArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MulArgs) ProtoMessage() {}
func (x *MulArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[40]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MulArgs.ProtoReflect.Descriptor instead.
func (*MulArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{40}
}
func (x *MulArgs) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
func (x *MulArgs) GetY() uint32 {
if x != nil {
return x.Y
}
return 0
}
type Mul32Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y uint32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Mul32Args) Reset() {
*x = Mul32Args{}
mi := &file_ngolofuzz_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Mul32Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Mul32Args) ProtoMessage() {}
func (x *Mul32Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[41]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Mul32Args.ProtoReflect.Descriptor instead.
func (*Mul32Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{41}
}
func (x *Mul32Args) GetX() uint32 {
if x != nil {
return x.X
}
return 0
}
func (x *Mul32Args) GetY() uint32 {
if x != nil {
return x.Y
}
return 0
}
type Mul64Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
X uint64 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"`
Y uint64 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Mul64Args) Reset() {
*x = Mul64Args{}
mi := &file_ngolofuzz_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Mul64Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Mul64Args) ProtoMessage() {}
func (x *Mul64Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[42]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Mul64Args.ProtoReflect.Descriptor instead.
func (*Mul64Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{42}
}
func (x *Mul64Args) GetX() uint64 {
if x != nil {
return x.X
}
return 0
}
func (x *Mul64Args) GetY() uint64 {
if x != nil {
return x.Y
}
return 0
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_LeadingZeros
// *NgoloFuzzOne_LeadingZeros8
// *NgoloFuzzOne_LeadingZeros16
// *NgoloFuzzOne_LeadingZeros32
// *NgoloFuzzOne_LeadingZeros64
// *NgoloFuzzOne_TrailingZeros
// *NgoloFuzzOne_TrailingZeros8
// *NgoloFuzzOne_TrailingZeros16
// *NgoloFuzzOne_TrailingZeros32
// *NgoloFuzzOne_TrailingZeros64
// *NgoloFuzzOne_OnesCount
// *NgoloFuzzOne_OnesCount8
// *NgoloFuzzOne_OnesCount16
// *NgoloFuzzOne_OnesCount32
// *NgoloFuzzOne_OnesCount64
// *NgoloFuzzOne_RotateLeft
// *NgoloFuzzOne_RotateLeft8
// *NgoloFuzzOne_RotateLeft16
// *NgoloFuzzOne_RotateLeft32
// *NgoloFuzzOne_RotateLeft64
// *NgoloFuzzOne_Reverse
// *NgoloFuzzOne_Reverse8
// *NgoloFuzzOne_Reverse16
// *NgoloFuzzOne_Reverse32
// *NgoloFuzzOne_Reverse64
// *NgoloFuzzOne_ReverseBytes
// *NgoloFuzzOne_ReverseBytes16
// *NgoloFuzzOne_ReverseBytes32
// *NgoloFuzzOne_ReverseBytes64
// *NgoloFuzzOne_Len
// *NgoloFuzzOne_Len8
// *NgoloFuzzOne_Len16
// *NgoloFuzzOne_Len32
// *NgoloFuzzOne_Len64
// *NgoloFuzzOne_Add
// *NgoloFuzzOne_Add32
// *NgoloFuzzOne_Add64
// *NgoloFuzzOne_Sub
// *NgoloFuzzOne_Sub32
// *NgoloFuzzOne_Sub64
// *NgoloFuzzOne_Mul
// *NgoloFuzzOne_Mul32
// *NgoloFuzzOne_Mul64
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[43]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{43}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetLeadingZeros() *LeadingZerosArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LeadingZeros); ok {
return x.LeadingZeros
}
}
return nil
}
func (x *NgoloFuzzOne) GetLeadingZeros8() *LeadingZeros8Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LeadingZeros8); ok {
return x.LeadingZeros8
}
}
return nil
}
func (x *NgoloFuzzOne) GetLeadingZeros16() *LeadingZeros16Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LeadingZeros16); ok {
return x.LeadingZeros16
}
}
return nil
}
func (x *NgoloFuzzOne) GetLeadingZeros32() *LeadingZeros32Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LeadingZeros32); ok {
return x.LeadingZeros32
}
}
return nil
}
func (x *NgoloFuzzOne) GetLeadingZeros64() *LeadingZeros64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LeadingZeros64); ok {
return x.LeadingZeros64
}
}
return nil
}
func (x *NgoloFuzzOne) GetTrailingZeros() *TrailingZerosArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TrailingZeros); ok {
return x.TrailingZeros
}
}
return nil
}
func (x *NgoloFuzzOne) GetTrailingZeros8() *TrailingZeros8Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TrailingZeros8); ok {
return x.TrailingZeros8
}
}
return nil
}
func (x *NgoloFuzzOne) GetTrailingZeros16() *TrailingZeros16Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TrailingZeros16); ok {
return x.TrailingZeros16
}
}
return nil
}
func (x *NgoloFuzzOne) GetTrailingZeros32() *TrailingZeros32Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TrailingZeros32); ok {
return x.TrailingZeros32
}
}
return nil
}
func (x *NgoloFuzzOne) GetTrailingZeros64() *TrailingZeros64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TrailingZeros64); ok {
return x.TrailingZeros64
}
}
return nil
}
func (x *NgoloFuzzOne) GetOnesCount() *OnesCountArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_OnesCount); ok {
return x.OnesCount
}
}
return nil
}
func (x *NgoloFuzzOne) GetOnesCount8() *OnesCount8Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_OnesCount8); ok {
return x.OnesCount8
}
}
return nil
}
func (x *NgoloFuzzOne) GetOnesCount16() *OnesCount16Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_OnesCount16); ok {
return x.OnesCount16
}
}
return nil
}
func (x *NgoloFuzzOne) GetOnesCount32() *OnesCount32Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_OnesCount32); ok {
return x.OnesCount32
}
}
return nil
}
func (x *NgoloFuzzOne) GetOnesCount64() *OnesCount64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_OnesCount64); ok {
return x.OnesCount64
}
}
return nil
}
func (x *NgoloFuzzOne) GetRotateLeft() *RotateLeftArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RotateLeft); ok {
return x.RotateLeft
}
}
return nil
}
func (x *NgoloFuzzOne) GetRotateLeft8() *RotateLeft8Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RotateLeft8); ok {
return x.RotateLeft8
}
}
return nil
}
func (x *NgoloFuzzOne) GetRotateLeft16() *RotateLeft16Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RotateLeft16); ok {
return x.RotateLeft16
}
}
return nil
}
func (x *NgoloFuzzOne) GetRotateLeft32() *RotateLeft32Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RotateLeft32); ok {
return x.RotateLeft32
}
}
return nil
}
func (x *NgoloFuzzOne) GetRotateLeft64() *RotateLeft64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RotateLeft64); ok {
return x.RotateLeft64
}
}
return nil
}
func (x *NgoloFuzzOne) GetReverse() *ReverseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Reverse); ok {
return x.Reverse
}
}
return nil
}
func (x *NgoloFuzzOne) GetReverse8() *Reverse8Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Reverse8); ok {
return x.Reverse8
}
}
return nil
}
func (x *NgoloFuzzOne) GetReverse16() *Reverse16Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Reverse16); ok {
return x.Reverse16
}
}
return nil
}
func (x *NgoloFuzzOne) GetReverse32() *Reverse32Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Reverse32); ok {
return x.Reverse32
}
}
return nil
}
func (x *NgoloFuzzOne) GetReverse64() *Reverse64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Reverse64); ok {
return x.Reverse64
}
}
return nil
}
func (x *NgoloFuzzOne) GetReverseBytes() *ReverseBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReverseBytes); ok {
return x.ReverseBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetReverseBytes16() *ReverseBytes16Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReverseBytes16); ok {
return x.ReverseBytes16
}
}
return nil
}
func (x *NgoloFuzzOne) GetReverseBytes32() *ReverseBytes32Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReverseBytes32); ok {
return x.ReverseBytes32
}
}
return nil
}
func (x *NgoloFuzzOne) GetReverseBytes64() *ReverseBytes64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReverseBytes64); ok {
return x.ReverseBytes64
}
}
return nil
}
func (x *NgoloFuzzOne) GetLen() *LenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Len); ok {
return x.Len
}
}
return nil
}
func (x *NgoloFuzzOne) GetLen8() *Len8Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Len8); ok {
return x.Len8
}
}
return nil
}
func (x *NgoloFuzzOne) GetLen16() *Len16Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Len16); ok {
return x.Len16
}
}
return nil
}
func (x *NgoloFuzzOne) GetLen32() *Len32Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Len32); ok {
return x.Len32
}
}
return nil
}
func (x *NgoloFuzzOne) GetLen64() *Len64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Len64); ok {
return x.Len64
}
}
return nil
}
func (x *NgoloFuzzOne) GetAdd() *AddArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Add); ok {
return x.Add
}
}
return nil
}
func (x *NgoloFuzzOne) GetAdd32() *Add32Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Add32); ok {
return x.Add32
}
}
return nil
}
func (x *NgoloFuzzOne) GetAdd64() *Add64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Add64); ok {
return x.Add64
}
}
return nil
}
func (x *NgoloFuzzOne) GetSub() *SubArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sub); ok {
return x.Sub
}
}
return nil
}
func (x *NgoloFuzzOne) GetSub32() *Sub32Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sub32); ok {
return x.Sub32
}
}
return nil
}
func (x *NgoloFuzzOne) GetSub64() *Sub64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Sub64); ok {
return x.Sub64
}
}
return nil
}
func (x *NgoloFuzzOne) GetMul() *MulArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Mul); ok {
return x.Mul
}
}
return nil
}
func (x *NgoloFuzzOne) GetMul32() *Mul32Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Mul32); ok {
return x.Mul32
}
}
return nil
}
func (x *NgoloFuzzOne) GetMul64() *Mul64Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Mul64); ok {
return x.Mul64
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_LeadingZeros struct {
LeadingZeros *LeadingZerosArgs `protobuf:"bytes,1,opt,name=LeadingZeros,proto3,oneof"`
}
type NgoloFuzzOne_LeadingZeros8 struct {
LeadingZeros8 *LeadingZeros8Args `protobuf:"bytes,2,opt,name=LeadingZeros8,proto3,oneof"`
}
type NgoloFuzzOne_LeadingZeros16 struct {
LeadingZeros16 *LeadingZeros16Args `protobuf:"bytes,3,opt,name=LeadingZeros16,proto3,oneof"`
}
type NgoloFuzzOne_LeadingZeros32 struct {
LeadingZeros32 *LeadingZeros32Args `protobuf:"bytes,4,opt,name=LeadingZeros32,proto3,oneof"`
}
type NgoloFuzzOne_LeadingZeros64 struct {
LeadingZeros64 *LeadingZeros64Args `protobuf:"bytes,5,opt,name=LeadingZeros64,proto3,oneof"`
}
type NgoloFuzzOne_TrailingZeros struct {
TrailingZeros *TrailingZerosArgs `protobuf:"bytes,6,opt,name=TrailingZeros,proto3,oneof"`
}
type NgoloFuzzOne_TrailingZeros8 struct {
TrailingZeros8 *TrailingZeros8Args `protobuf:"bytes,7,opt,name=TrailingZeros8,proto3,oneof"`
}
type NgoloFuzzOne_TrailingZeros16 struct {
TrailingZeros16 *TrailingZeros16Args `protobuf:"bytes,8,opt,name=TrailingZeros16,proto3,oneof"`
}
type NgoloFuzzOne_TrailingZeros32 struct {
TrailingZeros32 *TrailingZeros32Args `protobuf:"bytes,9,opt,name=TrailingZeros32,proto3,oneof"`
}
type NgoloFuzzOne_TrailingZeros64 struct {
TrailingZeros64 *TrailingZeros64Args `protobuf:"bytes,10,opt,name=TrailingZeros64,proto3,oneof"`
}
type NgoloFuzzOne_OnesCount struct {
OnesCount *OnesCountArgs `protobuf:"bytes,11,opt,name=OnesCount,proto3,oneof"`
}
type NgoloFuzzOne_OnesCount8 struct {
OnesCount8 *OnesCount8Args `protobuf:"bytes,12,opt,name=OnesCount8,proto3,oneof"`
}
type NgoloFuzzOne_OnesCount16 struct {
OnesCount16 *OnesCount16Args `protobuf:"bytes,13,opt,name=OnesCount16,proto3,oneof"`
}
type NgoloFuzzOne_OnesCount32 struct {
OnesCount32 *OnesCount32Args `protobuf:"bytes,14,opt,name=OnesCount32,proto3,oneof"`
}
type NgoloFuzzOne_OnesCount64 struct {
OnesCount64 *OnesCount64Args `protobuf:"bytes,15,opt,name=OnesCount64,proto3,oneof"`
}
type NgoloFuzzOne_RotateLeft struct {
RotateLeft *RotateLeftArgs `protobuf:"bytes,16,opt,name=RotateLeft,proto3,oneof"`
}
type NgoloFuzzOne_RotateLeft8 struct {
RotateLeft8 *RotateLeft8Args `protobuf:"bytes,17,opt,name=RotateLeft8,proto3,oneof"`
}
type NgoloFuzzOne_RotateLeft16 struct {
RotateLeft16 *RotateLeft16Args `protobuf:"bytes,18,opt,name=RotateLeft16,proto3,oneof"`
}
type NgoloFuzzOne_RotateLeft32 struct {
RotateLeft32 *RotateLeft32Args `protobuf:"bytes,19,opt,name=RotateLeft32,proto3,oneof"`
}
type NgoloFuzzOne_RotateLeft64 struct {
RotateLeft64 *RotateLeft64Args `protobuf:"bytes,20,opt,name=RotateLeft64,proto3,oneof"`
}
type NgoloFuzzOne_Reverse struct {
Reverse *ReverseArgs `protobuf:"bytes,21,opt,name=Reverse,proto3,oneof"`
}
type NgoloFuzzOne_Reverse8 struct {
Reverse8 *Reverse8Args `protobuf:"bytes,22,opt,name=Reverse8,proto3,oneof"`
}
type NgoloFuzzOne_Reverse16 struct {
Reverse16 *Reverse16Args `protobuf:"bytes,23,opt,name=Reverse16,proto3,oneof"`
}
type NgoloFuzzOne_Reverse32 struct {
Reverse32 *Reverse32Args `protobuf:"bytes,24,opt,name=Reverse32,proto3,oneof"`
}
type NgoloFuzzOne_Reverse64 struct {
Reverse64 *Reverse64Args `protobuf:"bytes,25,opt,name=Reverse64,proto3,oneof"`
}
type NgoloFuzzOne_ReverseBytes struct {
ReverseBytes *ReverseBytesArgs `protobuf:"bytes,26,opt,name=ReverseBytes,proto3,oneof"`
}
type NgoloFuzzOne_ReverseBytes16 struct {
ReverseBytes16 *ReverseBytes16Args `protobuf:"bytes,27,opt,name=ReverseBytes16,proto3,oneof"`
}
type NgoloFuzzOne_ReverseBytes32 struct {
ReverseBytes32 *ReverseBytes32Args `protobuf:"bytes,28,opt,name=ReverseBytes32,proto3,oneof"`
}
type NgoloFuzzOne_ReverseBytes64 struct {
ReverseBytes64 *ReverseBytes64Args `protobuf:"bytes,29,opt,name=ReverseBytes64,proto3,oneof"`
}
type NgoloFuzzOne_Len struct {
Len *LenArgs `protobuf:"bytes,30,opt,name=Len,proto3,oneof"`
}
type NgoloFuzzOne_Len8 struct {
Len8 *Len8Args `protobuf:"bytes,31,opt,name=Len8,proto3,oneof"`
}
type NgoloFuzzOne_Len16 struct {
Len16 *Len16Args `protobuf:"bytes,32,opt,name=Len16,proto3,oneof"`
}
type NgoloFuzzOne_Len32 struct {
Len32 *Len32Args `protobuf:"bytes,33,opt,name=Len32,proto3,oneof"`
}
type NgoloFuzzOne_Len64 struct {
Len64 *Len64Args `protobuf:"bytes,34,opt,name=Len64,proto3,oneof"`
}
type NgoloFuzzOne_Add struct {
Add *AddArgs `protobuf:"bytes,35,opt,name=Add,proto3,oneof"`
}
type NgoloFuzzOne_Add32 struct {
Add32 *Add32Args `protobuf:"bytes,36,opt,name=Add32,proto3,oneof"`
}
type NgoloFuzzOne_Add64 struct {
Add64 *Add64Args `protobuf:"bytes,37,opt,name=Add64,proto3,oneof"`
}
type NgoloFuzzOne_Sub struct {
Sub *SubArgs `protobuf:"bytes,38,opt,name=Sub,proto3,oneof"`
}
type NgoloFuzzOne_Sub32 struct {
Sub32 *Sub32Args `protobuf:"bytes,39,opt,name=Sub32,proto3,oneof"`
}
type NgoloFuzzOne_Sub64 struct {
Sub64 *Sub64Args `protobuf:"bytes,40,opt,name=Sub64,proto3,oneof"`
}
type NgoloFuzzOne_Mul struct {
Mul *MulArgs `protobuf:"bytes,41,opt,name=Mul,proto3,oneof"`
}
type NgoloFuzzOne_Mul32 struct {
Mul32 *Mul32Args `protobuf:"bytes,42,opt,name=Mul32,proto3,oneof"`
}
type NgoloFuzzOne_Mul64 struct {
Mul64 *Mul64Args `protobuf:"bytes,43,opt,name=Mul64,proto3,oneof"`
}
func (*NgoloFuzzOne_LeadingZeros) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LeadingZeros8) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LeadingZeros16) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LeadingZeros32) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LeadingZeros64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TrailingZeros) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TrailingZeros8) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TrailingZeros16) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TrailingZeros32) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TrailingZeros64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_OnesCount) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_OnesCount8) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_OnesCount16) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_OnesCount32) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_OnesCount64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RotateLeft) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RotateLeft8) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RotateLeft16) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RotateLeft32) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RotateLeft64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Reverse) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Reverse8) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Reverse16) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Reverse32) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Reverse64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReverseBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReverseBytes16) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReverseBytes32) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReverseBytes64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Len) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Len8) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Len16) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Len32) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Len64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Add) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Add32) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Add64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sub) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sub32) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Sub64) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Mul) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Mul32) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Mul64) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[44]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{44}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[45]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{45}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\" \n" +
"\x10LeadingZerosArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"!\n" +
"\x11LeadingZeros8Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\"\n" +
"\x12LeadingZeros16Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\"\n" +
"\x12LeadingZeros32Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\"\n" +
"\x12LeadingZeros64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\"!\n" +
"\x11TrailingZerosArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\"\n" +
"\x12TrailingZeros8Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"#\n" +
"\x13TrailingZeros16Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"#\n" +
"\x13TrailingZeros32Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"#\n" +
"\x13TrailingZeros64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\"\x1d\n" +
"\rOnesCountArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\x1e\n" +
"\x0eOnesCount8Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\x1f\n" +
"\x0fOnesCount16Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\x1f\n" +
"\x0fOnesCount32Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\x1f\n" +
"\x0fOnesCount64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\",\n" +
"\x0eRotateLeftArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\x12\f\n" +
"\x01k\x18\x02 \x01(\x03R\x01k\"-\n" +
"\x0fRotateLeft8Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\x12\f\n" +
"\x01k\x18\x02 \x01(\x03R\x01k\".\n" +
"\x10RotateLeft16Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\x12\f\n" +
"\x01k\x18\x02 \x01(\x03R\x01k\".\n" +
"\x10RotateLeft32Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\x12\f\n" +
"\x01k\x18\x02 \x01(\x03R\x01k\".\n" +
"\x10RotateLeft64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\x12\f\n" +
"\x01k\x18\x02 \x01(\x03R\x01k\"\x1b\n" +
"\vReverseArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\x1c\n" +
"\fReverse8Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\x1d\n" +
"\rReverse16Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\x1d\n" +
"\rReverse32Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\x1d\n" +
"\rReverse64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\" \n" +
"\x10ReverseBytesArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\"\n" +
"\x12ReverseBytes16Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\"\n" +
"\x12ReverseBytes32Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\"\n" +
"\x12ReverseBytes64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\"\x17\n" +
"\aLenArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\x18\n" +
"\bLen8Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\x19\n" +
"\tLen16Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\x19\n" +
"\tLen32Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\"\x19\n" +
"\tLen64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\";\n" +
"\aAddArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\rR\x01y\x12\x14\n" +
"\x05carry\x18\x03 \x01(\rR\x05carry\"=\n" +
"\tAdd32Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\rR\x01y\x12\x14\n" +
"\x05carry\x18\x03 \x01(\rR\x05carry\"=\n" +
"\tAdd64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x04R\x01y\x12\x14\n" +
"\x05carry\x18\x03 \x01(\x04R\x05carry\"=\n" +
"\aSubArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\rR\x01y\x12\x16\n" +
"\x06borrow\x18\x03 \x01(\rR\x06borrow\"?\n" +
"\tSub32Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\rR\x01y\x12\x16\n" +
"\x06borrow\x18\x03 \x01(\rR\x06borrow\"?\n" +
"\tSub64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x04R\x01y\x12\x16\n" +
"\x06borrow\x18\x03 \x01(\x04R\x06borrow\"%\n" +
"\aMulArgs\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\rR\x01y\"'\n" +
"\tMul32Args\x12\f\n" +
"\x01x\x18\x01 \x01(\rR\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\rR\x01y\"'\n" +
"\tMul64Args\x12\f\n" +
"\x01x\x18\x01 \x01(\x04R\x01x\x12\f\n" +
"\x01y\x18\x02 \x01(\x04R\x01y\"\x8a\x14\n" +
"\fNgoloFuzzOne\x12A\n" +
"\fLeadingZeros\x18\x01 \x01(\v2\x1b.ngolofuzz.LeadingZerosArgsH\x00R\fLeadingZeros\x12D\n" +
"\rLeadingZeros8\x18\x02 \x01(\v2\x1c.ngolofuzz.LeadingZeros8ArgsH\x00R\rLeadingZeros8\x12G\n" +
"\x0eLeadingZeros16\x18\x03 \x01(\v2\x1d.ngolofuzz.LeadingZeros16ArgsH\x00R\x0eLeadingZeros16\x12G\n" +
"\x0eLeadingZeros32\x18\x04 \x01(\v2\x1d.ngolofuzz.LeadingZeros32ArgsH\x00R\x0eLeadingZeros32\x12G\n" +
"\x0eLeadingZeros64\x18\x05 \x01(\v2\x1d.ngolofuzz.LeadingZeros64ArgsH\x00R\x0eLeadingZeros64\x12D\n" +
"\rTrailingZeros\x18\x06 \x01(\v2\x1c.ngolofuzz.TrailingZerosArgsH\x00R\rTrailingZeros\x12G\n" +
"\x0eTrailingZeros8\x18\a \x01(\v2\x1d.ngolofuzz.TrailingZeros8ArgsH\x00R\x0eTrailingZeros8\x12J\n" +
"\x0fTrailingZeros16\x18\b \x01(\v2\x1e.ngolofuzz.TrailingZeros16ArgsH\x00R\x0fTrailingZeros16\x12J\n" +
"\x0fTrailingZeros32\x18\t \x01(\v2\x1e.ngolofuzz.TrailingZeros32ArgsH\x00R\x0fTrailingZeros32\x12J\n" +
"\x0fTrailingZeros64\x18\n" +
" \x01(\v2\x1e.ngolofuzz.TrailingZeros64ArgsH\x00R\x0fTrailingZeros64\x128\n" +
"\tOnesCount\x18\v \x01(\v2\x18.ngolofuzz.OnesCountArgsH\x00R\tOnesCount\x12;\n" +
"\n" +
"OnesCount8\x18\f \x01(\v2\x19.ngolofuzz.OnesCount8ArgsH\x00R\n" +
"OnesCount8\x12>\n" +
"\vOnesCount16\x18\r \x01(\v2\x1a.ngolofuzz.OnesCount16ArgsH\x00R\vOnesCount16\x12>\n" +
"\vOnesCount32\x18\x0e \x01(\v2\x1a.ngolofuzz.OnesCount32ArgsH\x00R\vOnesCount32\x12>\n" +
"\vOnesCount64\x18\x0f \x01(\v2\x1a.ngolofuzz.OnesCount64ArgsH\x00R\vOnesCount64\x12;\n" +
"\n" +
"RotateLeft\x18\x10 \x01(\v2\x19.ngolofuzz.RotateLeftArgsH\x00R\n" +
"RotateLeft\x12>\n" +
"\vRotateLeft8\x18\x11 \x01(\v2\x1a.ngolofuzz.RotateLeft8ArgsH\x00R\vRotateLeft8\x12A\n" +
"\fRotateLeft16\x18\x12 \x01(\v2\x1b.ngolofuzz.RotateLeft16ArgsH\x00R\fRotateLeft16\x12A\n" +
"\fRotateLeft32\x18\x13 \x01(\v2\x1b.ngolofuzz.RotateLeft32ArgsH\x00R\fRotateLeft32\x12A\n" +
"\fRotateLeft64\x18\x14 \x01(\v2\x1b.ngolofuzz.RotateLeft64ArgsH\x00R\fRotateLeft64\x122\n" +
"\aReverse\x18\x15 \x01(\v2\x16.ngolofuzz.ReverseArgsH\x00R\aReverse\x125\n" +
"\bReverse8\x18\x16 \x01(\v2\x17.ngolofuzz.Reverse8ArgsH\x00R\bReverse8\x128\n" +
"\tReverse16\x18\x17 \x01(\v2\x18.ngolofuzz.Reverse16ArgsH\x00R\tReverse16\x128\n" +
"\tReverse32\x18\x18 \x01(\v2\x18.ngolofuzz.Reverse32ArgsH\x00R\tReverse32\x128\n" +
"\tReverse64\x18\x19 \x01(\v2\x18.ngolofuzz.Reverse64ArgsH\x00R\tReverse64\x12A\n" +
"\fReverseBytes\x18\x1a \x01(\v2\x1b.ngolofuzz.ReverseBytesArgsH\x00R\fReverseBytes\x12G\n" +
"\x0eReverseBytes16\x18\x1b \x01(\v2\x1d.ngolofuzz.ReverseBytes16ArgsH\x00R\x0eReverseBytes16\x12G\n" +
"\x0eReverseBytes32\x18\x1c \x01(\v2\x1d.ngolofuzz.ReverseBytes32ArgsH\x00R\x0eReverseBytes32\x12G\n" +
"\x0eReverseBytes64\x18\x1d \x01(\v2\x1d.ngolofuzz.ReverseBytes64ArgsH\x00R\x0eReverseBytes64\x12&\n" +
"\x03Len\x18\x1e \x01(\v2\x12.ngolofuzz.LenArgsH\x00R\x03Len\x12)\n" +
"\x04Len8\x18\x1f \x01(\v2\x13.ngolofuzz.Len8ArgsH\x00R\x04Len8\x12,\n" +
"\x05Len16\x18 \x01(\v2\x14.ngolofuzz.Len16ArgsH\x00R\x05Len16\x12,\n" +
"\x05Len32\x18! \x01(\v2\x14.ngolofuzz.Len32ArgsH\x00R\x05Len32\x12,\n" +
"\x05Len64\x18\" \x01(\v2\x14.ngolofuzz.Len64ArgsH\x00R\x05Len64\x12&\n" +
"\x03Add\x18# \x01(\v2\x12.ngolofuzz.AddArgsH\x00R\x03Add\x12,\n" +
"\x05Add32\x18$ \x01(\v2\x14.ngolofuzz.Add32ArgsH\x00R\x05Add32\x12,\n" +
"\x05Add64\x18% \x01(\v2\x14.ngolofuzz.Add64ArgsH\x00R\x05Add64\x12&\n" +
"\x03Sub\x18& \x01(\v2\x12.ngolofuzz.SubArgsH\x00R\x03Sub\x12,\n" +
"\x05Sub32\x18' \x01(\v2\x14.ngolofuzz.Sub32ArgsH\x00R\x05Sub32\x12,\n" +
"\x05Sub64\x18( \x01(\v2\x14.ngolofuzz.Sub64ArgsH\x00R\x05Sub64\x12&\n" +
"\x03Mul\x18) \x01(\v2\x12.ngolofuzz.MulArgsH\x00R\x03Mul\x12,\n" +
"\x05Mul32\x18* \x01(\v2\x14.ngolofuzz.Mul32ArgsH\x00R\x05Mul32\x12,\n" +
"\x05Mul64\x18+ \x01(\v2\x14.ngolofuzz.Mul64ArgsH\x00R\x05Mul64B\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x16Z\x14./;fuzz_ng_math_bitsb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 46)
var file_ngolofuzz_proto_goTypes = []any{
(*LeadingZerosArgs)(nil), // 0: ngolofuzz.LeadingZerosArgs
(*LeadingZeros8Args)(nil), // 1: ngolofuzz.LeadingZeros8Args
(*LeadingZeros16Args)(nil), // 2: ngolofuzz.LeadingZeros16Args
(*LeadingZeros32Args)(nil), // 3: ngolofuzz.LeadingZeros32Args
(*LeadingZeros64Args)(nil), // 4: ngolofuzz.LeadingZeros64Args
(*TrailingZerosArgs)(nil), // 5: ngolofuzz.TrailingZerosArgs
(*TrailingZeros8Args)(nil), // 6: ngolofuzz.TrailingZeros8Args
(*TrailingZeros16Args)(nil), // 7: ngolofuzz.TrailingZeros16Args
(*TrailingZeros32Args)(nil), // 8: ngolofuzz.TrailingZeros32Args
(*TrailingZeros64Args)(nil), // 9: ngolofuzz.TrailingZeros64Args
(*OnesCountArgs)(nil), // 10: ngolofuzz.OnesCountArgs
(*OnesCount8Args)(nil), // 11: ngolofuzz.OnesCount8Args
(*OnesCount16Args)(nil), // 12: ngolofuzz.OnesCount16Args
(*OnesCount32Args)(nil), // 13: ngolofuzz.OnesCount32Args
(*OnesCount64Args)(nil), // 14: ngolofuzz.OnesCount64Args
(*RotateLeftArgs)(nil), // 15: ngolofuzz.RotateLeftArgs
(*RotateLeft8Args)(nil), // 16: ngolofuzz.RotateLeft8Args
(*RotateLeft16Args)(nil), // 17: ngolofuzz.RotateLeft16Args
(*RotateLeft32Args)(nil), // 18: ngolofuzz.RotateLeft32Args
(*RotateLeft64Args)(nil), // 19: ngolofuzz.RotateLeft64Args
(*ReverseArgs)(nil), // 20: ngolofuzz.ReverseArgs
(*Reverse8Args)(nil), // 21: ngolofuzz.Reverse8Args
(*Reverse16Args)(nil), // 22: ngolofuzz.Reverse16Args
(*Reverse32Args)(nil), // 23: ngolofuzz.Reverse32Args
(*Reverse64Args)(nil), // 24: ngolofuzz.Reverse64Args
(*ReverseBytesArgs)(nil), // 25: ngolofuzz.ReverseBytesArgs
(*ReverseBytes16Args)(nil), // 26: ngolofuzz.ReverseBytes16Args
(*ReverseBytes32Args)(nil), // 27: ngolofuzz.ReverseBytes32Args
(*ReverseBytes64Args)(nil), // 28: ngolofuzz.ReverseBytes64Args
(*LenArgs)(nil), // 29: ngolofuzz.LenArgs
(*Len8Args)(nil), // 30: ngolofuzz.Len8Args
(*Len16Args)(nil), // 31: ngolofuzz.Len16Args
(*Len32Args)(nil), // 32: ngolofuzz.Len32Args
(*Len64Args)(nil), // 33: ngolofuzz.Len64Args
(*AddArgs)(nil), // 34: ngolofuzz.AddArgs
(*Add32Args)(nil), // 35: ngolofuzz.Add32Args
(*Add64Args)(nil), // 36: ngolofuzz.Add64Args
(*SubArgs)(nil), // 37: ngolofuzz.SubArgs
(*Sub32Args)(nil), // 38: ngolofuzz.Sub32Args
(*Sub64Args)(nil), // 39: ngolofuzz.Sub64Args
(*MulArgs)(nil), // 40: ngolofuzz.MulArgs
(*Mul32Args)(nil), // 41: ngolofuzz.Mul32Args
(*Mul64Args)(nil), // 42: ngolofuzz.Mul64Args
(*NgoloFuzzOne)(nil), // 43: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 44: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 45: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.LeadingZeros:type_name -> ngolofuzz.LeadingZerosArgs
1, // 1: ngolofuzz.NgoloFuzzOne.LeadingZeros8:type_name -> ngolofuzz.LeadingZeros8Args
2, // 2: ngolofuzz.NgoloFuzzOne.LeadingZeros16:type_name -> ngolofuzz.LeadingZeros16Args
3, // 3: ngolofuzz.NgoloFuzzOne.LeadingZeros32:type_name -> ngolofuzz.LeadingZeros32Args
4, // 4: ngolofuzz.NgoloFuzzOne.LeadingZeros64:type_name -> ngolofuzz.LeadingZeros64Args
5, // 5: ngolofuzz.NgoloFuzzOne.TrailingZeros:type_name -> ngolofuzz.TrailingZerosArgs
6, // 6: ngolofuzz.NgoloFuzzOne.TrailingZeros8:type_name -> ngolofuzz.TrailingZeros8Args
7, // 7: ngolofuzz.NgoloFuzzOne.TrailingZeros16:type_name -> ngolofuzz.TrailingZeros16Args
8, // 8: ngolofuzz.NgoloFuzzOne.TrailingZeros32:type_name -> ngolofuzz.TrailingZeros32Args
9, // 9: ngolofuzz.NgoloFuzzOne.TrailingZeros64:type_name -> ngolofuzz.TrailingZeros64Args
10, // 10: ngolofuzz.NgoloFuzzOne.OnesCount:type_name -> ngolofuzz.OnesCountArgs
11, // 11: ngolofuzz.NgoloFuzzOne.OnesCount8:type_name -> ngolofuzz.OnesCount8Args
12, // 12: ngolofuzz.NgoloFuzzOne.OnesCount16:type_name -> ngolofuzz.OnesCount16Args
13, // 13: ngolofuzz.NgoloFuzzOne.OnesCount32:type_name -> ngolofuzz.OnesCount32Args
14, // 14: ngolofuzz.NgoloFuzzOne.OnesCount64:type_name -> ngolofuzz.OnesCount64Args
15, // 15: ngolofuzz.NgoloFuzzOne.RotateLeft:type_name -> ngolofuzz.RotateLeftArgs
16, // 16: ngolofuzz.NgoloFuzzOne.RotateLeft8:type_name -> ngolofuzz.RotateLeft8Args
17, // 17: ngolofuzz.NgoloFuzzOne.RotateLeft16:type_name -> ngolofuzz.RotateLeft16Args
18, // 18: ngolofuzz.NgoloFuzzOne.RotateLeft32:type_name -> ngolofuzz.RotateLeft32Args
19, // 19: ngolofuzz.NgoloFuzzOne.RotateLeft64:type_name -> ngolofuzz.RotateLeft64Args
20, // 20: ngolofuzz.NgoloFuzzOne.Reverse:type_name -> ngolofuzz.ReverseArgs
21, // 21: ngolofuzz.NgoloFuzzOne.Reverse8:type_name -> ngolofuzz.Reverse8Args
22, // 22: ngolofuzz.NgoloFuzzOne.Reverse16:type_name -> ngolofuzz.Reverse16Args
23, // 23: ngolofuzz.NgoloFuzzOne.Reverse32:type_name -> ngolofuzz.Reverse32Args
24, // 24: ngolofuzz.NgoloFuzzOne.Reverse64:type_name -> ngolofuzz.Reverse64Args
25, // 25: ngolofuzz.NgoloFuzzOne.ReverseBytes:type_name -> ngolofuzz.ReverseBytesArgs
26, // 26: ngolofuzz.NgoloFuzzOne.ReverseBytes16:type_name -> ngolofuzz.ReverseBytes16Args
27, // 27: ngolofuzz.NgoloFuzzOne.ReverseBytes32:type_name -> ngolofuzz.ReverseBytes32Args
28, // 28: ngolofuzz.NgoloFuzzOne.ReverseBytes64:type_name -> ngolofuzz.ReverseBytes64Args
29, // 29: ngolofuzz.NgoloFuzzOne.Len:type_name -> ngolofuzz.LenArgs
30, // 30: ngolofuzz.NgoloFuzzOne.Len8:type_name -> ngolofuzz.Len8Args
31, // 31: ngolofuzz.NgoloFuzzOne.Len16:type_name -> ngolofuzz.Len16Args
32, // 32: ngolofuzz.NgoloFuzzOne.Len32:type_name -> ngolofuzz.Len32Args
33, // 33: ngolofuzz.NgoloFuzzOne.Len64:type_name -> ngolofuzz.Len64Args
34, // 34: ngolofuzz.NgoloFuzzOne.Add:type_name -> ngolofuzz.AddArgs
35, // 35: ngolofuzz.NgoloFuzzOne.Add32:type_name -> ngolofuzz.Add32Args
36, // 36: ngolofuzz.NgoloFuzzOne.Add64:type_name -> ngolofuzz.Add64Args
37, // 37: ngolofuzz.NgoloFuzzOne.Sub:type_name -> ngolofuzz.SubArgs
38, // 38: ngolofuzz.NgoloFuzzOne.Sub32:type_name -> ngolofuzz.Sub32Args
39, // 39: ngolofuzz.NgoloFuzzOne.Sub64:type_name -> ngolofuzz.Sub64Args
40, // 40: ngolofuzz.NgoloFuzzOne.Mul:type_name -> ngolofuzz.MulArgs
41, // 41: ngolofuzz.NgoloFuzzOne.Mul32:type_name -> ngolofuzz.Mul32Args
42, // 42: ngolofuzz.NgoloFuzzOne.Mul64:type_name -> ngolofuzz.Mul64Args
43, // 43: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
44, // [44:44] is the sub-list for method output_type
44, // [44:44] is the sub-list for method input_type
44, // [44:44] is the sub-list for extension type_name
44, // [44:44] is the sub-list for extension extendee
0, // [0:44] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[43].OneofWrappers = []any{
(*NgoloFuzzOne_LeadingZeros)(nil),
(*NgoloFuzzOne_LeadingZeros8)(nil),
(*NgoloFuzzOne_LeadingZeros16)(nil),
(*NgoloFuzzOne_LeadingZeros32)(nil),
(*NgoloFuzzOne_LeadingZeros64)(nil),
(*NgoloFuzzOne_TrailingZeros)(nil),
(*NgoloFuzzOne_TrailingZeros8)(nil),
(*NgoloFuzzOne_TrailingZeros16)(nil),
(*NgoloFuzzOne_TrailingZeros32)(nil),
(*NgoloFuzzOne_TrailingZeros64)(nil),
(*NgoloFuzzOne_OnesCount)(nil),
(*NgoloFuzzOne_OnesCount8)(nil),
(*NgoloFuzzOne_OnesCount16)(nil),
(*NgoloFuzzOne_OnesCount32)(nil),
(*NgoloFuzzOne_OnesCount64)(nil),
(*NgoloFuzzOne_RotateLeft)(nil),
(*NgoloFuzzOne_RotateLeft8)(nil),
(*NgoloFuzzOne_RotateLeft16)(nil),
(*NgoloFuzzOne_RotateLeft32)(nil),
(*NgoloFuzzOne_RotateLeft64)(nil),
(*NgoloFuzzOne_Reverse)(nil),
(*NgoloFuzzOne_Reverse8)(nil),
(*NgoloFuzzOne_Reverse16)(nil),
(*NgoloFuzzOne_Reverse32)(nil),
(*NgoloFuzzOne_Reverse64)(nil),
(*NgoloFuzzOne_ReverseBytes)(nil),
(*NgoloFuzzOne_ReverseBytes16)(nil),
(*NgoloFuzzOne_ReverseBytes32)(nil),
(*NgoloFuzzOne_ReverseBytes64)(nil),
(*NgoloFuzzOne_Len)(nil),
(*NgoloFuzzOne_Len8)(nil),
(*NgoloFuzzOne_Len16)(nil),
(*NgoloFuzzOne_Len32)(nil),
(*NgoloFuzzOne_Len64)(nil),
(*NgoloFuzzOne_Add)(nil),
(*NgoloFuzzOne_Add32)(nil),
(*NgoloFuzzOne_Add64)(nil),
(*NgoloFuzzOne_Sub)(nil),
(*NgoloFuzzOne_Sub32)(nil),
(*NgoloFuzzOne_Sub64)(nil),
(*NgoloFuzzOne_Mul)(nil),
(*NgoloFuzzOne_Mul32)(nil),
(*NgoloFuzzOne_Mul64)(nil),
}
file_ngolofuzz_proto_msgTypes[44].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 46,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_mime
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"mime"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_FormatMediaType:
mime.FormatMediaType(a.FormatMediaType.T, a.FormatMediaType.Param)
case *NgoloFuzzOne_ParseMediaType:
_, _, r2 := mime.ParseMediaType(a.ParseMediaType.V)
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_TypeByExtension:
mime.TypeByExtension(a.TypeByExtension.Ext)
case *NgoloFuzzOne_ExtensionsByType:
_, r1 := mime.ExtensionsByType(a.ExtensionsByType.Typ)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_AddExtensionType:
r0 := mime.AddExtensionType(a.AddExtensionType.Ext, a.AddExtensionType.Typ)
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_FormatMediaType:
w.WriteString(fmt.Sprintf("mime.FormatMediaType(%#+v, %#+v)\n", a.FormatMediaType.T, a.FormatMediaType.Param))
case *NgoloFuzzOne_ParseMediaType:
w.WriteString(fmt.Sprintf("mime.ParseMediaType(%#+v)\n", a.ParseMediaType.V))
case *NgoloFuzzOne_TypeByExtension:
w.WriteString(fmt.Sprintf("mime.TypeByExtension(%#+v)\n", a.TypeByExtension.Ext))
case *NgoloFuzzOne_ExtensionsByType:
w.WriteString(fmt.Sprintf("mime.ExtensionsByType(%#+v)\n", a.ExtensionsByType.Typ))
case *NgoloFuzzOne_AddExtensionType:
w.WriteString(fmt.Sprintf("mime.AddExtensionType(%#+v, %#+v)\n", a.AddExtensionType.Ext, a.AddExtensionType.Typ))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_mime
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type FormatMediaTypeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
T string `protobuf:"bytes,1,opt,name=t,proto3" json:"t,omitempty"`
Param map[string]string `protobuf:"bytes,2,rep,name=param,proto3" json:"param,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FormatMediaTypeArgs) Reset() {
*x = FormatMediaTypeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FormatMediaTypeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FormatMediaTypeArgs) ProtoMessage() {}
func (x *FormatMediaTypeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FormatMediaTypeArgs.ProtoReflect.Descriptor instead.
func (*FormatMediaTypeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *FormatMediaTypeArgs) GetT() string {
if x != nil {
return x.T
}
return ""
}
func (x *FormatMediaTypeArgs) GetParam() map[string]string {
if x != nil {
return x.Param
}
return nil
}
type ParseMediaTypeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V string `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseMediaTypeArgs) Reset() {
*x = ParseMediaTypeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseMediaTypeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseMediaTypeArgs) ProtoMessage() {}
func (x *ParseMediaTypeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseMediaTypeArgs.ProtoReflect.Descriptor instead.
func (*ParseMediaTypeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *ParseMediaTypeArgs) GetV() string {
if x != nil {
return x.V
}
return ""
}
type TypeByExtensionArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ext string `protobuf:"bytes,1,opt,name=ext,proto3" json:"ext,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TypeByExtensionArgs) Reset() {
*x = TypeByExtensionArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TypeByExtensionArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TypeByExtensionArgs) ProtoMessage() {}
func (x *TypeByExtensionArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TypeByExtensionArgs.ProtoReflect.Descriptor instead.
func (*TypeByExtensionArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *TypeByExtensionArgs) GetExt() string {
if x != nil {
return x.Ext
}
return ""
}
type ExtensionsByTypeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Typ string `protobuf:"bytes,1,opt,name=typ,proto3" json:"typ,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExtensionsByTypeArgs) Reset() {
*x = ExtensionsByTypeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExtensionsByTypeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExtensionsByTypeArgs) ProtoMessage() {}
func (x *ExtensionsByTypeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExtensionsByTypeArgs.ProtoReflect.Descriptor instead.
func (*ExtensionsByTypeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ExtensionsByTypeArgs) GetTyp() string {
if x != nil {
return x.Typ
}
return ""
}
type AddExtensionTypeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ext string `protobuf:"bytes,1,opt,name=ext,proto3" json:"ext,omitempty"`
Typ string `protobuf:"bytes,2,opt,name=typ,proto3" json:"typ,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AddExtensionTypeArgs) Reset() {
*x = AddExtensionTypeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AddExtensionTypeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AddExtensionTypeArgs) ProtoMessage() {}
func (x *AddExtensionTypeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AddExtensionTypeArgs.ProtoReflect.Descriptor instead.
func (*AddExtensionTypeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *AddExtensionTypeArgs) GetExt() string {
if x != nil {
return x.Ext
}
return ""
}
func (x *AddExtensionTypeArgs) GetTyp() string {
if x != nil {
return x.Typ
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_FormatMediaType
// *NgoloFuzzOne_ParseMediaType
// *NgoloFuzzOne_TypeByExtension
// *NgoloFuzzOne_ExtensionsByType
// *NgoloFuzzOne_AddExtensionType
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetFormatMediaType() *FormatMediaTypeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FormatMediaType); ok {
return x.FormatMediaType
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseMediaType() *ParseMediaTypeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseMediaType); ok {
return x.ParseMediaType
}
}
return nil
}
func (x *NgoloFuzzOne) GetTypeByExtension() *TypeByExtensionArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TypeByExtension); ok {
return x.TypeByExtension
}
}
return nil
}
func (x *NgoloFuzzOne) GetExtensionsByType() *ExtensionsByTypeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ExtensionsByType); ok {
return x.ExtensionsByType
}
}
return nil
}
func (x *NgoloFuzzOne) GetAddExtensionType() *AddExtensionTypeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AddExtensionType); ok {
return x.AddExtensionType
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_FormatMediaType struct {
FormatMediaType *FormatMediaTypeArgs `protobuf:"bytes,1,opt,name=FormatMediaType,proto3,oneof"`
}
type NgoloFuzzOne_ParseMediaType struct {
ParseMediaType *ParseMediaTypeArgs `protobuf:"bytes,2,opt,name=ParseMediaType,proto3,oneof"`
}
type NgoloFuzzOne_TypeByExtension struct {
TypeByExtension *TypeByExtensionArgs `protobuf:"bytes,3,opt,name=TypeByExtension,proto3,oneof"`
}
type NgoloFuzzOne_ExtensionsByType struct {
ExtensionsByType *ExtensionsByTypeArgs `protobuf:"bytes,4,opt,name=ExtensionsByType,proto3,oneof"`
}
type NgoloFuzzOne_AddExtensionType struct {
AddExtensionType *AddExtensionTypeArgs `protobuf:"bytes,5,opt,name=AddExtensionType,proto3,oneof"`
}
func (*NgoloFuzzOne_FormatMediaType) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseMediaType) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TypeByExtension) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ExtensionsByType) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AddExtensionType) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x9e\x01\n" +
"\x13FormatMediaTypeArgs\x12\f\n" +
"\x01t\x18\x01 \x01(\tR\x01t\x12?\n" +
"\x05param\x18\x02 \x03(\v2).ngolofuzz.FormatMediaTypeArgs.ParamEntryR\x05param\x1a8\n" +
"\n" +
"ParamEntry\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\"\n" +
"\x12ParseMediaTypeArgs\x12\f\n" +
"\x01v\x18\x01 \x01(\tR\x01v\"'\n" +
"\x13TypeByExtensionArgs\x12\x10\n" +
"\x03ext\x18\x01 \x01(\tR\x03ext\"(\n" +
"\x14ExtensionsByTypeArgs\x12\x10\n" +
"\x03typ\x18\x01 \x01(\tR\x03typ\":\n" +
"\x14AddExtensionTypeArgs\x12\x10\n" +
"\x03ext\x18\x01 \x01(\tR\x03ext\x12\x10\n" +
"\x03typ\x18\x02 \x01(\tR\x03typ\"\x95\x03\n" +
"\fNgoloFuzzOne\x12J\n" +
"\x0fFormatMediaType\x18\x01 \x01(\v2\x1e.ngolofuzz.FormatMediaTypeArgsH\x00R\x0fFormatMediaType\x12G\n" +
"\x0eParseMediaType\x18\x02 \x01(\v2\x1d.ngolofuzz.ParseMediaTypeArgsH\x00R\x0eParseMediaType\x12J\n" +
"\x0fTypeByExtension\x18\x03 \x01(\v2\x1e.ngolofuzz.TypeByExtensionArgsH\x00R\x0fTypeByExtension\x12M\n" +
"\x10ExtensionsByType\x18\x04 \x01(\v2\x1f.ngolofuzz.ExtensionsByTypeArgsH\x00R\x10ExtensionsByType\x12M\n" +
"\x10AddExtensionType\x18\x05 \x01(\v2\x1f.ngolofuzz.AddExtensionTypeArgsH\x00R\x10AddExtensionTypeB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x11Z\x0f./;fuzz_ng_mimeb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
var file_ngolofuzz_proto_goTypes = []any{
(*FormatMediaTypeArgs)(nil), // 0: ngolofuzz.FormatMediaTypeArgs
(*ParseMediaTypeArgs)(nil), // 1: ngolofuzz.ParseMediaTypeArgs
(*TypeByExtensionArgs)(nil), // 2: ngolofuzz.TypeByExtensionArgs
(*ExtensionsByTypeArgs)(nil), // 3: ngolofuzz.ExtensionsByTypeArgs
(*AddExtensionTypeArgs)(nil), // 4: ngolofuzz.AddExtensionTypeArgs
(*NgoloFuzzOne)(nil), // 5: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 6: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 7: ngolofuzz.NgoloFuzzList
nil, // 8: ngolofuzz.FormatMediaTypeArgs.ParamEntry
}
var file_ngolofuzz_proto_depIdxs = []int32{
8, // 0: ngolofuzz.FormatMediaTypeArgs.param:type_name -> ngolofuzz.FormatMediaTypeArgs.ParamEntry
0, // 1: ngolofuzz.NgoloFuzzOne.FormatMediaType:type_name -> ngolofuzz.FormatMediaTypeArgs
1, // 2: ngolofuzz.NgoloFuzzOne.ParseMediaType:type_name -> ngolofuzz.ParseMediaTypeArgs
2, // 3: ngolofuzz.NgoloFuzzOne.TypeByExtension:type_name -> ngolofuzz.TypeByExtensionArgs
3, // 4: ngolofuzz.NgoloFuzzOne.ExtensionsByType:type_name -> ngolofuzz.ExtensionsByTypeArgs
4, // 5: ngolofuzz.NgoloFuzzOne.AddExtensionType:type_name -> ngolofuzz.AddExtensionTypeArgs
5, // 6: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
7, // [7:7] is the sub-list for method output_type
7, // [7:7] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[5].OneofWrappers = []any{
(*NgoloFuzzOne_FormatMediaType)(nil),
(*NgoloFuzzOne_ParseMediaType)(nil),
(*NgoloFuzzOne_TypeByExtension)(nil),
(*NgoloFuzzOne_ExtensionsByType)(nil),
(*NgoloFuzzOne_AddExtensionType)(nil),
}
file_ngolofuzz_proto_msgTypes[6].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_mime_multipart
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"mime/multipart"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ReaderResults []*multipart.Reader
ReaderResultsIndex := 0
var WriterResults []*multipart.Writer
WriterResultsIndex := 0
var FormResults []*multipart.Form
FormResultsIndex := 0
var PartResults []*multipart.Part
PartResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ReaderNgdotReadForm:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.ReadForm(a.ReaderNgdotReadForm.MaxMemory)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FormNgdotRemoveAll:
if len(FormResults) == 0 {
continue
}
arg0 := FormResults[FormResultsIndex]
FormResultsIndex = (FormResultsIndex + 1) % len(FormResults)
r0 := arg0.RemoveAll()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_PartNgdotFormName:
if len(PartResults) == 0 {
continue
}
arg0 := PartResults[PartResultsIndex]
PartResultsIndex = (PartResultsIndex + 1) % len(PartResults)
arg0.FormName()
case *NgoloFuzzOne_PartNgdotFileName:
if len(PartResults) == 0 {
continue
}
arg0 := PartResults[PartResultsIndex]
PartResultsIndex = (PartResultsIndex + 1) % len(PartResults)
arg0.FileName()
case *NgoloFuzzOne_NewReader:
arg0 := bytes.NewReader(a.NewReader.R)
r0 := multipart.NewReader(arg0, a.NewReader.Boundary)
if r0 != nil{
ReaderResults = append(ReaderResults, r0)
}
case *NgoloFuzzOne_PartNgdotRead:
if len(PartResults) == 0 {
continue
}
arg0 := PartResults[PartResultsIndex]
PartResultsIndex = (PartResultsIndex + 1) % len(PartResults)
_, r1 := arg0.Read(a.PartNgdotRead.D)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_PartNgdotClose:
if len(PartResults) == 0 {
continue
}
arg0 := PartResults[PartResultsIndex]
PartResultsIndex = (PartResultsIndex + 1) % len(PartResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotNextPart:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
r0, r1 := arg0.NextPart()
if r0 != nil{
PartResults = append(PartResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotNextRawPart:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
r0, r1 := arg0.NextRawPart()
if r0 != nil{
PartResults = append(PartResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewWriter:
arg0 := bytes.NewBuffer(a.NewWriter.W)
r0 := multipart.NewWriter(arg0)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
case *NgoloFuzzOne_WriterNgdotBoundary:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg0.Boundary()
case *NgoloFuzzOne_WriterNgdotSetBoundary:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.SetBoundary(a.WriterNgdotSetBoundary.Boundary)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotFormDataContentType:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg0.FormDataContentType()
case *NgoloFuzzOne_WriterNgdotCreateFormFile:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
_, r1 := arg0.CreateFormFile(a.WriterNgdotCreateFormFile.Fieldname, a.WriterNgdotCreateFormFile.Filename)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotCreateFormField:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
_, r1 := arg0.CreateFormField(a.WriterNgdotCreateFormField.Fieldname)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FileContentDisposition:
multipart.FileContentDisposition(a.FileContentDisposition.Fieldname, a.FileContentDisposition.Filename)
case *NgoloFuzzOne_WriterNgdotWriteField:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.WriteField(a.WriterNgdotWriteField.Fieldname, a.WriterNgdotWriteField.Value)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotClose:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ReaderNb := 0
ReaderResultsIndex := 0
WriterNb := 0
WriterResultsIndex := 0
FormNb := 0
FormResultsIndex := 0
PartNb := 0
PartResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ReaderNgdotReadForm:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadForm(%#+v)\n", ReaderResultsIndex, a.ReaderNgdotReadForm.MaxMemory))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_FormNgdotRemoveAll:
if FormNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Form%d.RemoveAll()\n", FormResultsIndex))
FormResultsIndex = (FormResultsIndex + 1) % FormNb
case *NgoloFuzzOne_PartNgdotFormName:
if PartNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Part%d.FormName()\n", PartResultsIndex))
PartResultsIndex = (PartResultsIndex + 1) % PartNb
case *NgoloFuzzOne_PartNgdotFileName:
if PartNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Part%d.FileName()\n", PartResultsIndex))
PartResultsIndex = (PartResultsIndex + 1) % PartNb
case *NgoloFuzzOne_NewReader:
w.WriteString(fmt.Sprintf("Reader%d := multipart.NewReader(bytes.NewReader(%#+v), %#+v)\n", ReaderNb, a.NewReader.R, a.NewReader.Boundary))
ReaderNb = ReaderNb + 1
case *NgoloFuzzOne_PartNgdotRead:
if PartNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Part%d.Read(%#+v)\n", PartResultsIndex, a.PartNgdotRead.D))
PartResultsIndex = (PartResultsIndex + 1) % PartNb
case *NgoloFuzzOne_PartNgdotClose:
if PartNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Part%d.Close()\n", PartResultsIndex))
PartResultsIndex = (PartResultsIndex + 1) % PartNb
case *NgoloFuzzOne_ReaderNgdotNextPart:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Part%d, _ := Reader%d.NextPart()\n", PartNb, ReaderResultsIndex))
PartNb = PartNb + 1
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotNextRawPart:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Part%d, _ := Reader%d.NextRawPart()\n", PartNb, ReaderResultsIndex))
PartNb = PartNb + 1
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_NewWriter:
w.WriteString(fmt.Sprintf("Writer%d := multipart.NewWriter(bytes.NewBuffer(%#+v))\n", WriterNb, a.NewWriter.W))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_WriterNgdotBoundary:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Boundary()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotSetBoundary:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.SetBoundary(%#+v)\n", WriterResultsIndex, a.WriterNgdotSetBoundary.Boundary))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotFormDataContentType:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.FormDataContentType()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotCreateFormFile:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.CreateFormFile(%#+v, %#+v)\n", WriterResultsIndex, a.WriterNgdotCreateFormFile.Fieldname, a.WriterNgdotCreateFormFile.Filename))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotCreateFormField:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.CreateFormField(%#+v)\n", WriterResultsIndex, a.WriterNgdotCreateFormField.Fieldname))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_FileContentDisposition:
w.WriteString(fmt.Sprintf("multipart.FileContentDisposition(%#+v, %#+v)\n", a.FileContentDisposition.Fieldname, a.FileContentDisposition.Filename))
case *NgoloFuzzOne_WriterNgdotWriteField:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.WriteField(%#+v, %#+v)\n", WriterResultsIndex, a.WriterNgdotWriteField.Fieldname, a.WriterNgdotWriteField.Value))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotClose:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Close()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_mime_multipart
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ReaderNgdotReadFormArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
MaxMemory int64 `protobuf:"varint,1,opt,name=maxMemory,proto3" json:"maxMemory,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadFormArgs) Reset() {
*x = ReaderNgdotReadFormArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadFormArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadFormArgs) ProtoMessage() {}
func (x *ReaderNgdotReadFormArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadFormArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadFormArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *ReaderNgdotReadFormArgs) GetMaxMemory() int64 {
if x != nil {
return x.MaxMemory
}
return 0
}
type FormNgdotRemoveAllArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FormNgdotRemoveAllArgs) Reset() {
*x = FormNgdotRemoveAllArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FormNgdotRemoveAllArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FormNgdotRemoveAllArgs) ProtoMessage() {}
func (x *FormNgdotRemoveAllArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FormNgdotRemoveAllArgs.ProtoReflect.Descriptor instead.
func (*FormNgdotRemoveAllArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type PartNgdotFormNameArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PartNgdotFormNameArgs) Reset() {
*x = PartNgdotFormNameArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PartNgdotFormNameArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PartNgdotFormNameArgs) ProtoMessage() {}
func (x *PartNgdotFormNameArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PartNgdotFormNameArgs.ProtoReflect.Descriptor instead.
func (*PartNgdotFormNameArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type PartNgdotFileNameArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PartNgdotFileNameArgs) Reset() {
*x = PartNgdotFileNameArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PartNgdotFileNameArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PartNgdotFileNameArgs) ProtoMessage() {}
func (x *PartNgdotFileNameArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PartNgdotFileNameArgs.ProtoReflect.Descriptor instead.
func (*PartNgdotFileNameArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type NewReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
Boundary string `protobuf:"bytes,2,opt,name=boundary,proto3" json:"boundary,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderArgs) Reset() {
*x = NewReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderArgs) ProtoMessage() {}
func (x *NewReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderArgs.ProtoReflect.Descriptor instead.
func (*NewReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NewReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
func (x *NewReaderArgs) GetBoundary() string {
if x != nil {
return x.Boundary
}
return ""
}
type PartNgdotReadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
D []byte `protobuf:"bytes,1,opt,name=d,proto3" json:"d,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PartNgdotReadArgs) Reset() {
*x = PartNgdotReadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PartNgdotReadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PartNgdotReadArgs) ProtoMessage() {}
func (x *PartNgdotReadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PartNgdotReadArgs.ProtoReflect.Descriptor instead.
func (*PartNgdotReadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *PartNgdotReadArgs) GetD() []byte {
if x != nil {
return x.D
}
return nil
}
type PartNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PartNgdotCloseArgs) Reset() {
*x = PartNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PartNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PartNgdotCloseArgs) ProtoMessage() {}
func (x *PartNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PartNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*PartNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type ReaderNgdotNextPartArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotNextPartArgs) Reset() {
*x = ReaderNgdotNextPartArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotNextPartArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotNextPartArgs) ProtoMessage() {}
func (x *ReaderNgdotNextPartArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotNextPartArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotNextPartArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type ReaderNgdotNextRawPartArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotNextRawPartArgs) Reset() {
*x = ReaderNgdotNextRawPartArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotNextRawPartArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotNextRawPartArgs) ProtoMessage() {}
func (x *ReaderNgdotNextRawPartArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotNextRawPartArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotNextRawPartArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type NewWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterArgs) Reset() {
*x = NewWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterArgs) ProtoMessage() {}
func (x *NewWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterArgs.ProtoReflect.Descriptor instead.
func (*NewWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NewWriterArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type WriterNgdotBoundaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotBoundaryArgs) Reset() {
*x = WriterNgdotBoundaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotBoundaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotBoundaryArgs) ProtoMessage() {}
func (x *WriterNgdotBoundaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotBoundaryArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotBoundaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
type WriterNgdotSetBoundaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Boundary string `protobuf:"bytes,1,opt,name=boundary,proto3" json:"boundary,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotSetBoundaryArgs) Reset() {
*x = WriterNgdotSetBoundaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotSetBoundaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotSetBoundaryArgs) ProtoMessage() {}
func (x *WriterNgdotSetBoundaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotSetBoundaryArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotSetBoundaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *WriterNgdotSetBoundaryArgs) GetBoundary() string {
if x != nil {
return x.Boundary
}
return ""
}
type WriterNgdotFormDataContentTypeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotFormDataContentTypeArgs) Reset() {
*x = WriterNgdotFormDataContentTypeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotFormDataContentTypeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotFormDataContentTypeArgs) ProtoMessage() {}
func (x *WriterNgdotFormDataContentTypeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotFormDataContentTypeArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotFormDataContentTypeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type WriterNgdotCreateFormFileArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Fieldname string `protobuf:"bytes,1,opt,name=fieldname,proto3" json:"fieldname,omitempty"`
Filename string `protobuf:"bytes,2,opt,name=filename,proto3" json:"filename,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCreateFormFileArgs) Reset() {
*x = WriterNgdotCreateFormFileArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCreateFormFileArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCreateFormFileArgs) ProtoMessage() {}
func (x *WriterNgdotCreateFormFileArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCreateFormFileArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCreateFormFileArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *WriterNgdotCreateFormFileArgs) GetFieldname() string {
if x != nil {
return x.Fieldname
}
return ""
}
func (x *WriterNgdotCreateFormFileArgs) GetFilename() string {
if x != nil {
return x.Filename
}
return ""
}
type WriterNgdotCreateFormFieldArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Fieldname string `protobuf:"bytes,1,opt,name=fieldname,proto3" json:"fieldname,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCreateFormFieldArgs) Reset() {
*x = WriterNgdotCreateFormFieldArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCreateFormFieldArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCreateFormFieldArgs) ProtoMessage() {}
func (x *WriterNgdotCreateFormFieldArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCreateFormFieldArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCreateFormFieldArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *WriterNgdotCreateFormFieldArgs) GetFieldname() string {
if x != nil {
return x.Fieldname
}
return ""
}
type FileContentDispositionArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Fieldname string `protobuf:"bytes,1,opt,name=fieldname,proto3" json:"fieldname,omitempty"`
Filename string `protobuf:"bytes,2,opt,name=filename,proto3" json:"filename,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FileContentDispositionArgs) Reset() {
*x = FileContentDispositionArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FileContentDispositionArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FileContentDispositionArgs) ProtoMessage() {}
func (x *FileContentDispositionArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FileContentDispositionArgs.ProtoReflect.Descriptor instead.
func (*FileContentDispositionArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *FileContentDispositionArgs) GetFieldname() string {
if x != nil {
return x.Fieldname
}
return ""
}
func (x *FileContentDispositionArgs) GetFilename() string {
if x != nil {
return x.Filename
}
return ""
}
type WriterNgdotWriteFieldArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Fieldname string `protobuf:"bytes,1,opt,name=fieldname,proto3" json:"fieldname,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteFieldArgs) Reset() {
*x = WriterNgdotWriteFieldArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteFieldArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteFieldArgs) ProtoMessage() {}
func (x *WriterNgdotWriteFieldArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteFieldArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteFieldArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *WriterNgdotWriteFieldArgs) GetFieldname() string {
if x != nil {
return x.Fieldname
}
return ""
}
func (x *WriterNgdotWriteFieldArgs) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
type WriterNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCloseArgs) Reset() {
*x = WriterNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCloseArgs) ProtoMessage() {}
func (x *WriterNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_ReaderNgdotReadForm
// *NgoloFuzzOne_FormNgdotRemoveAll
// *NgoloFuzzOne_PartNgdotFormName
// *NgoloFuzzOne_PartNgdotFileName
// *NgoloFuzzOne_NewReader
// *NgoloFuzzOne_PartNgdotRead
// *NgoloFuzzOne_PartNgdotClose
// *NgoloFuzzOne_ReaderNgdotNextPart
// *NgoloFuzzOne_ReaderNgdotNextRawPart
// *NgoloFuzzOne_NewWriter
// *NgoloFuzzOne_WriterNgdotBoundary
// *NgoloFuzzOne_WriterNgdotSetBoundary
// *NgoloFuzzOne_WriterNgdotFormDataContentType
// *NgoloFuzzOne_WriterNgdotCreateFormFile
// *NgoloFuzzOne_WriterNgdotCreateFormField
// *NgoloFuzzOne_FileContentDisposition
// *NgoloFuzzOne_WriterNgdotWriteField
// *NgoloFuzzOne_WriterNgdotClose
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadForm() *ReaderNgdotReadFormArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadForm); ok {
return x.ReaderNgdotReadForm
}
}
return nil
}
func (x *NgoloFuzzOne) GetFormNgdotRemoveAll() *FormNgdotRemoveAllArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FormNgdotRemoveAll); ok {
return x.FormNgdotRemoveAll
}
}
return nil
}
func (x *NgoloFuzzOne) GetPartNgdotFormName() *PartNgdotFormNameArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PartNgdotFormName); ok {
return x.PartNgdotFormName
}
}
return nil
}
func (x *NgoloFuzzOne) GetPartNgdotFileName() *PartNgdotFileNameArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PartNgdotFileName); ok {
return x.PartNgdotFileName
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewReader() *NewReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReader); ok {
return x.NewReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetPartNgdotRead() *PartNgdotReadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PartNgdotRead); ok {
return x.PartNgdotRead
}
}
return nil
}
func (x *NgoloFuzzOne) GetPartNgdotClose() *PartNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PartNgdotClose); ok {
return x.PartNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotNextPart() *ReaderNgdotNextPartArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotNextPart); ok {
return x.ReaderNgdotNextPart
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotNextRawPart() *ReaderNgdotNextRawPartArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotNextRawPart); ok {
return x.ReaderNgdotNextRawPart
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriter() *NewWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriter); ok {
return x.NewWriter
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotBoundary() *WriterNgdotBoundaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotBoundary); ok {
return x.WriterNgdotBoundary
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotSetBoundary() *WriterNgdotSetBoundaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotSetBoundary); ok {
return x.WriterNgdotSetBoundary
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotFormDataContentType() *WriterNgdotFormDataContentTypeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotFormDataContentType); ok {
return x.WriterNgdotFormDataContentType
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotCreateFormFile() *WriterNgdotCreateFormFileArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotCreateFormFile); ok {
return x.WriterNgdotCreateFormFile
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotCreateFormField() *WriterNgdotCreateFormFieldArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotCreateFormField); ok {
return x.WriterNgdotCreateFormField
}
}
return nil
}
func (x *NgoloFuzzOne) GetFileContentDisposition() *FileContentDispositionArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FileContentDisposition); ok {
return x.FileContentDisposition
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWriteField() *WriterNgdotWriteFieldArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWriteField); ok {
return x.WriterNgdotWriteField
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotClose() *WriterNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotClose); ok {
return x.WriterNgdotClose
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_ReaderNgdotReadForm struct {
ReaderNgdotReadForm *ReaderNgdotReadFormArgs `protobuf:"bytes,1,opt,name=ReaderNgdotReadForm,proto3,oneof"`
}
type NgoloFuzzOne_FormNgdotRemoveAll struct {
FormNgdotRemoveAll *FormNgdotRemoveAllArgs `protobuf:"bytes,2,opt,name=FormNgdotRemoveAll,proto3,oneof"`
}
type NgoloFuzzOne_PartNgdotFormName struct {
PartNgdotFormName *PartNgdotFormNameArgs `protobuf:"bytes,3,opt,name=PartNgdotFormName,proto3,oneof"`
}
type NgoloFuzzOne_PartNgdotFileName struct {
PartNgdotFileName *PartNgdotFileNameArgs `protobuf:"bytes,4,opt,name=PartNgdotFileName,proto3,oneof"`
}
type NgoloFuzzOne_NewReader struct {
NewReader *NewReaderArgs `protobuf:"bytes,5,opt,name=NewReader,proto3,oneof"`
}
type NgoloFuzzOne_PartNgdotRead struct {
PartNgdotRead *PartNgdotReadArgs `protobuf:"bytes,6,opt,name=PartNgdotRead,proto3,oneof"`
}
type NgoloFuzzOne_PartNgdotClose struct {
PartNgdotClose *PartNgdotCloseArgs `protobuf:"bytes,7,opt,name=PartNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotNextPart struct {
ReaderNgdotNextPart *ReaderNgdotNextPartArgs `protobuf:"bytes,8,opt,name=ReaderNgdotNextPart,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotNextRawPart struct {
ReaderNgdotNextRawPart *ReaderNgdotNextRawPartArgs `protobuf:"bytes,9,opt,name=ReaderNgdotNextRawPart,proto3,oneof"`
}
type NgoloFuzzOne_NewWriter struct {
NewWriter *NewWriterArgs `protobuf:"bytes,10,opt,name=NewWriter,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotBoundary struct {
WriterNgdotBoundary *WriterNgdotBoundaryArgs `protobuf:"bytes,11,opt,name=WriterNgdotBoundary,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotSetBoundary struct {
WriterNgdotSetBoundary *WriterNgdotSetBoundaryArgs `protobuf:"bytes,12,opt,name=WriterNgdotSetBoundary,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotFormDataContentType struct {
WriterNgdotFormDataContentType *WriterNgdotFormDataContentTypeArgs `protobuf:"bytes,13,opt,name=WriterNgdotFormDataContentType,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotCreateFormFile struct {
WriterNgdotCreateFormFile *WriterNgdotCreateFormFileArgs `protobuf:"bytes,14,opt,name=WriterNgdotCreateFormFile,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotCreateFormField struct {
WriterNgdotCreateFormField *WriterNgdotCreateFormFieldArgs `protobuf:"bytes,15,opt,name=WriterNgdotCreateFormField,proto3,oneof"`
}
type NgoloFuzzOne_FileContentDisposition struct {
FileContentDisposition *FileContentDispositionArgs `protobuf:"bytes,16,opt,name=FileContentDisposition,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWriteField struct {
WriterNgdotWriteField *WriterNgdotWriteFieldArgs `protobuf:"bytes,17,opt,name=WriterNgdotWriteField,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotClose struct {
WriterNgdotClose *WriterNgdotCloseArgs `protobuf:"bytes,18,opt,name=WriterNgdotClose,proto3,oneof"`
}
func (*NgoloFuzzOne_ReaderNgdotReadForm) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FormNgdotRemoveAll) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PartNgdotFormName) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PartNgdotFileName) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PartNgdotRead) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PartNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotNextPart) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotNextRawPart) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotBoundary) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotSetBoundary) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotFormDataContentType) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotCreateFormFile) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotCreateFormField) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FileContentDisposition) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWriteField) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotClose) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"7\n" +
"\x17ReaderNgdotReadFormArgs\x12\x1c\n" +
"\tmaxMemory\x18\x01 \x01(\x03R\tmaxMemory\"\x18\n" +
"\x16FormNgdotRemoveAllArgs\"\x17\n" +
"\x15PartNgdotFormNameArgs\"\x17\n" +
"\x15PartNgdotFileNameArgs\"9\n" +
"\rNewReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\x12\x1a\n" +
"\bboundary\x18\x02 \x01(\tR\bboundary\"!\n" +
"\x11PartNgdotReadArgs\x12\f\n" +
"\x01d\x18\x01 \x01(\fR\x01d\"\x14\n" +
"\x12PartNgdotCloseArgs\"\x19\n" +
"\x17ReaderNgdotNextPartArgs\"\x1c\n" +
"\x1aReaderNgdotNextRawPartArgs\"\x1d\n" +
"\rNewWriterArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"\x19\n" +
"\x17WriterNgdotBoundaryArgs\"8\n" +
"\x1aWriterNgdotSetBoundaryArgs\x12\x1a\n" +
"\bboundary\x18\x01 \x01(\tR\bboundary\"$\n" +
"\"WriterNgdotFormDataContentTypeArgs\"Y\n" +
"\x1dWriterNgdotCreateFormFileArgs\x12\x1c\n" +
"\tfieldname\x18\x01 \x01(\tR\tfieldname\x12\x1a\n" +
"\bfilename\x18\x02 \x01(\tR\bfilename\">\n" +
"\x1eWriterNgdotCreateFormFieldArgs\x12\x1c\n" +
"\tfieldname\x18\x01 \x01(\tR\tfieldname\"V\n" +
"\x1aFileContentDispositionArgs\x12\x1c\n" +
"\tfieldname\x18\x01 \x01(\tR\tfieldname\x12\x1a\n" +
"\bfilename\x18\x02 \x01(\tR\bfilename\"O\n" +
"\x19WriterNgdotWriteFieldArgs\x12\x1c\n" +
"\tfieldname\x18\x01 \x01(\tR\tfieldname\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value\"\x16\n" +
"\x14WriterNgdotCloseArgs\"\xba\f\n" +
"\fNgoloFuzzOne\x12V\n" +
"\x13ReaderNgdotReadForm\x18\x01 \x01(\v2\".ngolofuzz.ReaderNgdotReadFormArgsH\x00R\x13ReaderNgdotReadForm\x12S\n" +
"\x12FormNgdotRemoveAll\x18\x02 \x01(\v2!.ngolofuzz.FormNgdotRemoveAllArgsH\x00R\x12FormNgdotRemoveAll\x12P\n" +
"\x11PartNgdotFormName\x18\x03 \x01(\v2 .ngolofuzz.PartNgdotFormNameArgsH\x00R\x11PartNgdotFormName\x12P\n" +
"\x11PartNgdotFileName\x18\x04 \x01(\v2 .ngolofuzz.PartNgdotFileNameArgsH\x00R\x11PartNgdotFileName\x128\n" +
"\tNewReader\x18\x05 \x01(\v2\x18.ngolofuzz.NewReaderArgsH\x00R\tNewReader\x12D\n" +
"\rPartNgdotRead\x18\x06 \x01(\v2\x1c.ngolofuzz.PartNgdotReadArgsH\x00R\rPartNgdotRead\x12G\n" +
"\x0ePartNgdotClose\x18\a \x01(\v2\x1d.ngolofuzz.PartNgdotCloseArgsH\x00R\x0ePartNgdotClose\x12V\n" +
"\x13ReaderNgdotNextPart\x18\b \x01(\v2\".ngolofuzz.ReaderNgdotNextPartArgsH\x00R\x13ReaderNgdotNextPart\x12_\n" +
"\x16ReaderNgdotNextRawPart\x18\t \x01(\v2%.ngolofuzz.ReaderNgdotNextRawPartArgsH\x00R\x16ReaderNgdotNextRawPart\x128\n" +
"\tNewWriter\x18\n" +
" \x01(\v2\x18.ngolofuzz.NewWriterArgsH\x00R\tNewWriter\x12V\n" +
"\x13WriterNgdotBoundary\x18\v \x01(\v2\".ngolofuzz.WriterNgdotBoundaryArgsH\x00R\x13WriterNgdotBoundary\x12_\n" +
"\x16WriterNgdotSetBoundary\x18\f \x01(\v2%.ngolofuzz.WriterNgdotSetBoundaryArgsH\x00R\x16WriterNgdotSetBoundary\x12w\n" +
"\x1eWriterNgdotFormDataContentType\x18\r \x01(\v2-.ngolofuzz.WriterNgdotFormDataContentTypeArgsH\x00R\x1eWriterNgdotFormDataContentType\x12h\n" +
"\x19WriterNgdotCreateFormFile\x18\x0e \x01(\v2(.ngolofuzz.WriterNgdotCreateFormFileArgsH\x00R\x19WriterNgdotCreateFormFile\x12k\n" +
"\x1aWriterNgdotCreateFormField\x18\x0f \x01(\v2).ngolofuzz.WriterNgdotCreateFormFieldArgsH\x00R\x1aWriterNgdotCreateFormField\x12_\n" +
"\x16FileContentDisposition\x18\x10 \x01(\v2%.ngolofuzz.FileContentDispositionArgsH\x00R\x16FileContentDisposition\x12\\\n" +
"\x15WriterNgdotWriteField\x18\x11 \x01(\v2$.ngolofuzz.WriterNgdotWriteFieldArgsH\x00R\x15WriterNgdotWriteField\x12M\n" +
"\x10WriterNgdotClose\x18\x12 \x01(\v2\x1f.ngolofuzz.WriterNgdotCloseArgsH\x00R\x10WriterNgdotCloseB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1bZ\x19./;fuzz_ng_mime_multipartb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
var file_ngolofuzz_proto_goTypes = []any{
(*ReaderNgdotReadFormArgs)(nil), // 0: ngolofuzz.ReaderNgdotReadFormArgs
(*FormNgdotRemoveAllArgs)(nil), // 1: ngolofuzz.FormNgdotRemoveAllArgs
(*PartNgdotFormNameArgs)(nil), // 2: ngolofuzz.PartNgdotFormNameArgs
(*PartNgdotFileNameArgs)(nil), // 3: ngolofuzz.PartNgdotFileNameArgs
(*NewReaderArgs)(nil), // 4: ngolofuzz.NewReaderArgs
(*PartNgdotReadArgs)(nil), // 5: ngolofuzz.PartNgdotReadArgs
(*PartNgdotCloseArgs)(nil), // 6: ngolofuzz.PartNgdotCloseArgs
(*ReaderNgdotNextPartArgs)(nil), // 7: ngolofuzz.ReaderNgdotNextPartArgs
(*ReaderNgdotNextRawPartArgs)(nil), // 8: ngolofuzz.ReaderNgdotNextRawPartArgs
(*NewWriterArgs)(nil), // 9: ngolofuzz.NewWriterArgs
(*WriterNgdotBoundaryArgs)(nil), // 10: ngolofuzz.WriterNgdotBoundaryArgs
(*WriterNgdotSetBoundaryArgs)(nil), // 11: ngolofuzz.WriterNgdotSetBoundaryArgs
(*WriterNgdotFormDataContentTypeArgs)(nil), // 12: ngolofuzz.WriterNgdotFormDataContentTypeArgs
(*WriterNgdotCreateFormFileArgs)(nil), // 13: ngolofuzz.WriterNgdotCreateFormFileArgs
(*WriterNgdotCreateFormFieldArgs)(nil), // 14: ngolofuzz.WriterNgdotCreateFormFieldArgs
(*FileContentDispositionArgs)(nil), // 15: ngolofuzz.FileContentDispositionArgs
(*WriterNgdotWriteFieldArgs)(nil), // 16: ngolofuzz.WriterNgdotWriteFieldArgs
(*WriterNgdotCloseArgs)(nil), // 17: ngolofuzz.WriterNgdotCloseArgs
(*NgoloFuzzOne)(nil), // 18: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 19: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 20: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadForm:type_name -> ngolofuzz.ReaderNgdotReadFormArgs
1, // 1: ngolofuzz.NgoloFuzzOne.FormNgdotRemoveAll:type_name -> ngolofuzz.FormNgdotRemoveAllArgs
2, // 2: ngolofuzz.NgoloFuzzOne.PartNgdotFormName:type_name -> ngolofuzz.PartNgdotFormNameArgs
3, // 3: ngolofuzz.NgoloFuzzOne.PartNgdotFileName:type_name -> ngolofuzz.PartNgdotFileNameArgs
4, // 4: ngolofuzz.NgoloFuzzOne.NewReader:type_name -> ngolofuzz.NewReaderArgs
5, // 5: ngolofuzz.NgoloFuzzOne.PartNgdotRead:type_name -> ngolofuzz.PartNgdotReadArgs
6, // 6: ngolofuzz.NgoloFuzzOne.PartNgdotClose:type_name -> ngolofuzz.PartNgdotCloseArgs
7, // 7: ngolofuzz.NgoloFuzzOne.ReaderNgdotNextPart:type_name -> ngolofuzz.ReaderNgdotNextPartArgs
8, // 8: ngolofuzz.NgoloFuzzOne.ReaderNgdotNextRawPart:type_name -> ngolofuzz.ReaderNgdotNextRawPartArgs
9, // 9: ngolofuzz.NgoloFuzzOne.NewWriter:type_name -> ngolofuzz.NewWriterArgs
10, // 10: ngolofuzz.NgoloFuzzOne.WriterNgdotBoundary:type_name -> ngolofuzz.WriterNgdotBoundaryArgs
11, // 11: ngolofuzz.NgoloFuzzOne.WriterNgdotSetBoundary:type_name -> ngolofuzz.WriterNgdotSetBoundaryArgs
12, // 12: ngolofuzz.NgoloFuzzOne.WriterNgdotFormDataContentType:type_name -> ngolofuzz.WriterNgdotFormDataContentTypeArgs
13, // 13: ngolofuzz.NgoloFuzzOne.WriterNgdotCreateFormFile:type_name -> ngolofuzz.WriterNgdotCreateFormFileArgs
14, // 14: ngolofuzz.NgoloFuzzOne.WriterNgdotCreateFormField:type_name -> ngolofuzz.WriterNgdotCreateFormFieldArgs
15, // 15: ngolofuzz.NgoloFuzzOne.FileContentDisposition:type_name -> ngolofuzz.FileContentDispositionArgs
16, // 16: ngolofuzz.NgoloFuzzOne.WriterNgdotWriteField:type_name -> ngolofuzz.WriterNgdotWriteFieldArgs
17, // 17: ngolofuzz.NgoloFuzzOne.WriterNgdotClose:type_name -> ngolofuzz.WriterNgdotCloseArgs
18, // 18: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
19, // [19:19] is the sub-list for method output_type
19, // [19:19] is the sub-list for method input_type
19, // [19:19] is the sub-list for extension type_name
19, // [19:19] is the sub-list for extension extendee
0, // [0:19] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[18].OneofWrappers = []any{
(*NgoloFuzzOne_ReaderNgdotReadForm)(nil),
(*NgoloFuzzOne_FormNgdotRemoveAll)(nil),
(*NgoloFuzzOne_PartNgdotFormName)(nil),
(*NgoloFuzzOne_PartNgdotFileName)(nil),
(*NgoloFuzzOne_NewReader)(nil),
(*NgoloFuzzOne_PartNgdotRead)(nil),
(*NgoloFuzzOne_PartNgdotClose)(nil),
(*NgoloFuzzOne_ReaderNgdotNextPart)(nil),
(*NgoloFuzzOne_ReaderNgdotNextRawPart)(nil),
(*NgoloFuzzOne_NewWriter)(nil),
(*NgoloFuzzOne_WriterNgdotBoundary)(nil),
(*NgoloFuzzOne_WriterNgdotSetBoundary)(nil),
(*NgoloFuzzOne_WriterNgdotFormDataContentType)(nil),
(*NgoloFuzzOne_WriterNgdotCreateFormFile)(nil),
(*NgoloFuzzOne_WriterNgdotCreateFormField)(nil),
(*NgoloFuzzOne_FileContentDisposition)(nil),
(*NgoloFuzzOne_WriterNgdotWriteField)(nil),
(*NgoloFuzzOne_WriterNgdotClose)(nil),
}
file_ngolofuzz_proto_msgTypes[19].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 21,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_mime_quotedprintable
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"mime/quotedprintable"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ReaderResults []*quotedprintable.Reader
ReaderResultsIndex := 0
var WriterResults []*quotedprintable.Writer
WriterResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReader:
arg0 := bytes.NewReader(a.NewReader.R)
r0 := quotedprintable.NewReader(arg0)
if r0 != nil{
ReaderResults = append(ReaderResults, r0)
}
case *NgoloFuzzOne_ReaderNgdotRead:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.Read(a.ReaderNgdotRead.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewWriter:
arg0 := bytes.NewBuffer(a.NewWriter.W)
r0 := quotedprintable.NewWriter(arg0)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
case *NgoloFuzzOne_WriterNgdotWrite:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
_, r1 := arg0.Write(a.WriterNgdotWrite.P)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotClose:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ReaderNb := 0
ReaderResultsIndex := 0
WriterNb := 0
WriterResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewReader:
w.WriteString(fmt.Sprintf("Reader%d := quotedprintable.NewReader(bytes.NewReader(%#+v))\n", ReaderNb, a.NewReader.R))
ReaderNb = ReaderNb + 1
case *NgoloFuzzOne_ReaderNgdotRead:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Read(%#+v)\n", ReaderResultsIndex, a.ReaderNgdotRead.P))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_NewWriter:
w.WriteString(fmt.Sprintf("Writer%d := quotedprintable.NewWriter(bytes.NewBuffer(%#+v))\n", WriterNb, a.NewWriter.W))
WriterNb = WriterNb + 1
case *NgoloFuzzOne_WriterNgdotWrite:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Write(%#+v)\n", WriterResultsIndex, a.WriterNgdotWrite.P))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotClose:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Close()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_mime_quotedprintable
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderArgs) Reset() {
*x = NewReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderArgs) ProtoMessage() {}
func (x *NewReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderArgs.ProtoReflect.Descriptor instead.
func (*NewReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type ReaderNgdotReadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadArgs) Reset() {
*x = ReaderNgdotReadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadArgs) ProtoMessage() {}
func (x *ReaderNgdotReadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *ReaderNgdotReadArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type NewWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterArgs) Reset() {
*x = NewWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterArgs) ProtoMessage() {}
func (x *NewWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterArgs.ProtoReflect.Descriptor instead.
func (*NewWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NewWriterArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type WriterNgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteArgs) Reset() {
*x = WriterNgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteArgs) ProtoMessage() {}
func (x *WriterNgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *WriterNgdotWriteArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type WriterNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotCloseArgs) Reset() {
*x = WriterNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotCloseArgs) ProtoMessage() {}
func (x *WriterNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewReader
// *NgoloFuzzOne_ReaderNgdotRead
// *NgoloFuzzOne_NewWriter
// *NgoloFuzzOne_WriterNgdotWrite
// *NgoloFuzzOne_WriterNgdotClose
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewReader() *NewReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReader); ok {
return x.NewReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotRead() *ReaderNgdotReadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotRead); ok {
return x.ReaderNgdotRead
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriter() *NewWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriter); ok {
return x.NewWriter
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWrite() *WriterNgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWrite); ok {
return x.WriterNgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotClose() *WriterNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotClose); ok {
return x.WriterNgdotClose
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewReader struct {
NewReader *NewReaderArgs `protobuf:"bytes,1,opt,name=NewReader,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotRead struct {
ReaderNgdotRead *ReaderNgdotReadArgs `protobuf:"bytes,2,opt,name=ReaderNgdotRead,proto3,oneof"`
}
type NgoloFuzzOne_NewWriter struct {
NewWriter *NewWriterArgs `protobuf:"bytes,3,opt,name=NewWriter,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWrite struct {
WriterNgdotWrite *WriterNgdotWriteArgs `protobuf:"bytes,4,opt,name=WriterNgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotClose struct {
WriterNgdotClose *WriterNgdotCloseArgs `protobuf:"bytes,5,opt,name=WriterNgdotClose,proto3,oneof"`
}
func (*NgoloFuzzOne_NewReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotRead) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotClose) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1d\n" +
"\rNewReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"#\n" +
"\x13ReaderNgdotReadArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"\x1d\n" +
"\rNewWriterArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"$\n" +
"\x14WriterNgdotWriteArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"\x16\n" +
"\x14WriterNgdotCloseArgs\"\xf4\x02\n" +
"\fNgoloFuzzOne\x128\n" +
"\tNewReader\x18\x01 \x01(\v2\x18.ngolofuzz.NewReaderArgsH\x00R\tNewReader\x12J\n" +
"\x0fReaderNgdotRead\x18\x02 \x01(\v2\x1e.ngolofuzz.ReaderNgdotReadArgsH\x00R\x0fReaderNgdotRead\x128\n" +
"\tNewWriter\x18\x03 \x01(\v2\x18.ngolofuzz.NewWriterArgsH\x00R\tNewWriter\x12M\n" +
"\x10WriterNgdotWrite\x18\x04 \x01(\v2\x1f.ngolofuzz.WriterNgdotWriteArgsH\x00R\x10WriterNgdotWrite\x12M\n" +
"\x10WriterNgdotClose\x18\x05 \x01(\v2\x1f.ngolofuzz.WriterNgdotCloseArgsH\x00R\x10WriterNgdotCloseB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB!Z\x1f./;fuzz_ng_mime_quotedprintableb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_ngolofuzz_proto_goTypes = []any{
(*NewReaderArgs)(nil), // 0: ngolofuzz.NewReaderArgs
(*ReaderNgdotReadArgs)(nil), // 1: ngolofuzz.ReaderNgdotReadArgs
(*NewWriterArgs)(nil), // 2: ngolofuzz.NewWriterArgs
(*WriterNgdotWriteArgs)(nil), // 3: ngolofuzz.WriterNgdotWriteArgs
(*WriterNgdotCloseArgs)(nil), // 4: ngolofuzz.WriterNgdotCloseArgs
(*NgoloFuzzOne)(nil), // 5: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 6: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 7: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewReader:type_name -> ngolofuzz.NewReaderArgs
1, // 1: ngolofuzz.NgoloFuzzOne.ReaderNgdotRead:type_name -> ngolofuzz.ReaderNgdotReadArgs
2, // 2: ngolofuzz.NgoloFuzzOne.NewWriter:type_name -> ngolofuzz.NewWriterArgs
3, // 3: ngolofuzz.NgoloFuzzOne.WriterNgdotWrite:type_name -> ngolofuzz.WriterNgdotWriteArgs
4, // 4: ngolofuzz.NgoloFuzzOne.WriterNgdotClose:type_name -> ngolofuzz.WriterNgdotCloseArgs
5, // 5: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
6, // [6:6] is the sub-list for method output_type
6, // [6:6] is the sub-list for method input_type
6, // [6:6] is the sub-list for extension type_name
6, // [6:6] is the sub-list for extension extendee
0, // [0:6] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[5].OneofWrappers = []any{
(*NgoloFuzzOne_NewReader)(nil),
(*NgoloFuzzOne_ReaderNgdotRead)(nil),
(*NgoloFuzzOne_NewWriter)(nil),
(*NgoloFuzzOne_WriterNgdotWrite)(nil),
(*NgoloFuzzOne_WriterNgdotClose)(nil),
}
file_ngolofuzz_proto_msgTypes[6].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 8,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_net_http_cgi
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"net/http/cgi"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Request:
_, r1 := cgi.Request()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_RequestFromMap:
_, r1 := cgi.RequestFromMap(a.RequestFromMap.Params)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Request:
w.WriteString(fmt.Sprintf("cgi.Request()\n"))
case *NgoloFuzzOne_RequestFromMap:
w.WriteString(fmt.Sprintf("cgi.RequestFromMap(%#+v)\n", a.RequestFromMap.Params))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_net_http_cgi
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type RequestArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RequestArgs) Reset() {
*x = RequestArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RequestArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RequestArgs) ProtoMessage() {}
func (x *RequestArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RequestArgs.ProtoReflect.Descriptor instead.
func (*RequestArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type RequestFromMapArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Params map[string]string `protobuf:"bytes,1,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RequestFromMapArgs) Reset() {
*x = RequestFromMapArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RequestFromMapArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RequestFromMapArgs) ProtoMessage() {}
func (x *RequestFromMapArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RequestFromMapArgs.ProtoReflect.Descriptor instead.
func (*RequestFromMapArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *RequestFromMapArgs) GetParams() map[string]string {
if x != nil {
return x.Params
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Request
// *NgoloFuzzOne_RequestFromMap
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetRequest() *RequestArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Request); ok {
return x.Request
}
}
return nil
}
func (x *NgoloFuzzOne) GetRequestFromMap() *RequestFromMapArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RequestFromMap); ok {
return x.RequestFromMap
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Request struct {
Request *RequestArgs `protobuf:"bytes,1,opt,name=Request,proto3,oneof"`
}
type NgoloFuzzOne_RequestFromMap struct {
RequestFromMap *RequestFromMapArgs `protobuf:"bytes,2,opt,name=RequestFromMap,proto3,oneof"`
}
func (*NgoloFuzzOne_Request) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RequestFromMap) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\r\n" +
"\vRequestArgs\"\x92\x01\n" +
"\x12RequestFromMapArgs\x12A\n" +
"\x06params\x18\x01 \x03(\v2).ngolofuzz.RequestFromMapArgs.ParamsEntryR\x06params\x1a9\n" +
"\vParamsEntry\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x93\x01\n" +
"\fNgoloFuzzOne\x122\n" +
"\aRequest\x18\x01 \x01(\v2\x16.ngolofuzz.RequestArgsH\x00R\aRequest\x12G\n" +
"\x0eRequestFromMap\x18\x02 \x01(\v2\x1d.ngolofuzz.RequestFromMapArgsH\x00R\x0eRequestFromMapB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x19Z\x17./;fuzz_ng_net_http_cgib\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_ngolofuzz_proto_goTypes = []any{
(*RequestArgs)(nil), // 0: ngolofuzz.RequestArgs
(*RequestFromMapArgs)(nil), // 1: ngolofuzz.RequestFromMapArgs
(*NgoloFuzzOne)(nil), // 2: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 3: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 4: ngolofuzz.NgoloFuzzList
nil, // 5: ngolofuzz.RequestFromMapArgs.ParamsEntry
}
var file_ngolofuzz_proto_depIdxs = []int32{
5, // 0: ngolofuzz.RequestFromMapArgs.params:type_name -> ngolofuzz.RequestFromMapArgs.ParamsEntry
0, // 1: ngolofuzz.NgoloFuzzOne.Request:type_name -> ngolofuzz.RequestArgs
1, // 2: ngolofuzz.NgoloFuzzOne.RequestFromMap:type_name -> ngolofuzz.RequestFromMapArgs
2, // 3: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzOne_Request)(nil),
(*NgoloFuzzOne_RequestFromMap)(nil),
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_net_http_httptest
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"net/http/httptest"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ResponseRecorderResults []*httptest.ResponseRecorder
ResponseRecorderResultsIndex := 0
var ServerResults []*httptest.Server
ServerResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewRequest:
arg2 := bytes.NewReader(a.NewRequest.Body)
httptest.NewRequest(a.NewRequest.Method, a.NewRequest.Target, arg2)
case *NgoloFuzzOne_NewRecorder:
httptest.NewRecorder()
case *NgoloFuzzOne_ResponseRecorderNgdotHeader:
if len(ResponseRecorderResults) == 0 {
continue
}
arg0 := ResponseRecorderResults[ResponseRecorderResultsIndex]
ResponseRecorderResultsIndex = (ResponseRecorderResultsIndex + 1) % len(ResponseRecorderResults)
arg0.Header()
case *NgoloFuzzOne_ResponseRecorderNgdotWrite:
if len(ResponseRecorderResults) == 0 {
continue
}
arg0 := ResponseRecorderResults[ResponseRecorderResultsIndex]
ResponseRecorderResultsIndex = (ResponseRecorderResultsIndex + 1) % len(ResponseRecorderResults)
_, r1 := arg0.Write(a.ResponseRecorderNgdotWrite.Buf)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ResponseRecorderNgdotWriteString:
if len(ResponseRecorderResults) == 0 {
continue
}
arg0 := ResponseRecorderResults[ResponseRecorderResultsIndex]
ResponseRecorderResultsIndex = (ResponseRecorderResultsIndex + 1) % len(ResponseRecorderResults)
_, r1 := arg0.WriteString(a.ResponseRecorderNgdotWriteString.Str)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ResponseRecorderNgdotWriteHeader:
if len(ResponseRecorderResults) == 0 {
continue
}
arg0 := ResponseRecorderResults[ResponseRecorderResultsIndex]
ResponseRecorderResultsIndex = (ResponseRecorderResultsIndex + 1) % len(ResponseRecorderResults)
arg1 := int(a.ResponseRecorderNgdotWriteHeader.Code)
arg0.WriteHeader(arg1)
case *NgoloFuzzOne_ResponseRecorderNgdotFlush:
if len(ResponseRecorderResults) == 0 {
continue
}
arg0 := ResponseRecorderResults[ResponseRecorderResultsIndex]
ResponseRecorderResultsIndex = (ResponseRecorderResultsIndex + 1) % len(ResponseRecorderResults)
arg0.Flush()
case *NgoloFuzzOne_ResponseRecorderNgdotResult:
if len(ResponseRecorderResults) == 0 {
continue
}
arg0 := ResponseRecorderResults[ResponseRecorderResultsIndex]
ResponseRecorderResultsIndex = (ResponseRecorderResultsIndex + 1) % len(ResponseRecorderResults)
arg0.Result()
case *NgoloFuzzOne_ServerNgdotStart:
if len(ServerResults) == 0 {
continue
}
arg0 := ServerResults[ServerResultsIndex]
ServerResultsIndex = (ServerResultsIndex + 1) % len(ServerResults)
arg0.Start()
case *NgoloFuzzOne_ServerNgdotStartTLS:
if len(ServerResults) == 0 {
continue
}
arg0 := ServerResults[ServerResultsIndex]
ServerResultsIndex = (ServerResultsIndex + 1) % len(ServerResults)
arg0.StartTLS()
case *NgoloFuzzOne_ServerNgdotClose:
if len(ServerResults) == 0 {
continue
}
arg0 := ServerResults[ServerResultsIndex]
ServerResultsIndex = (ServerResultsIndex + 1) % len(ServerResults)
arg0.Close()
case *NgoloFuzzOne_ServerNgdotCloseClientConnections:
if len(ServerResults) == 0 {
continue
}
arg0 := ServerResults[ServerResultsIndex]
ServerResultsIndex = (ServerResultsIndex + 1) % len(ServerResults)
arg0.CloseClientConnections()
case *NgoloFuzzOne_ServerNgdotCertificate:
if len(ServerResults) == 0 {
continue
}
arg0 := ServerResults[ServerResultsIndex]
ServerResultsIndex = (ServerResultsIndex + 1) % len(ServerResults)
arg0.Certificate()
case *NgoloFuzzOne_ServerNgdotClient:
if len(ServerResults) == 0 {
continue
}
arg0 := ServerResults[ServerResultsIndex]
ServerResultsIndex = (ServerResultsIndex + 1) % len(ServerResults)
arg0.Client()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ResponseRecorderNb := 0
ResponseRecorderResultsIndex := 0
ServerNb := 0
ServerResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewRequest:
w.WriteString(fmt.Sprintf("httptest.NewRequest(%#+v, %#+v, bytes.NewReader(%#+v))\n", a.NewRequest.Method, a.NewRequest.Target, a.NewRequest.Body))
case *NgoloFuzzOne_NewRecorder:
w.WriteString(fmt.Sprintf("httptest.NewRecorder()\n"))
case *NgoloFuzzOne_ResponseRecorderNgdotHeader:
if ResponseRecorderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ResponseRecorder%d.Header()\n", ResponseRecorderResultsIndex))
ResponseRecorderResultsIndex = (ResponseRecorderResultsIndex + 1) % ResponseRecorderNb
case *NgoloFuzzOne_ResponseRecorderNgdotWrite:
if ResponseRecorderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ResponseRecorder%d.Write(%#+v)\n", ResponseRecorderResultsIndex, a.ResponseRecorderNgdotWrite.Buf))
ResponseRecorderResultsIndex = (ResponseRecorderResultsIndex + 1) % ResponseRecorderNb
case *NgoloFuzzOne_ResponseRecorderNgdotWriteString:
if ResponseRecorderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ResponseRecorder%d.WriteString(%#+v)\n", ResponseRecorderResultsIndex, a.ResponseRecorderNgdotWriteString.Str))
ResponseRecorderResultsIndex = (ResponseRecorderResultsIndex + 1) % ResponseRecorderNb
case *NgoloFuzzOne_ResponseRecorderNgdotWriteHeader:
if ResponseRecorderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ResponseRecorder%d.WriteHeader(int(%#+v))\n", ResponseRecorderResultsIndex, a.ResponseRecorderNgdotWriteHeader.Code))
ResponseRecorderResultsIndex = (ResponseRecorderResultsIndex + 1) % ResponseRecorderNb
case *NgoloFuzzOne_ResponseRecorderNgdotFlush:
if ResponseRecorderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ResponseRecorder%d.Flush()\n", ResponseRecorderResultsIndex))
ResponseRecorderResultsIndex = (ResponseRecorderResultsIndex + 1) % ResponseRecorderNb
case *NgoloFuzzOne_ResponseRecorderNgdotResult:
if ResponseRecorderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ResponseRecorder%d.Result()\n", ResponseRecorderResultsIndex))
ResponseRecorderResultsIndex = (ResponseRecorderResultsIndex + 1) % ResponseRecorderNb
case *NgoloFuzzOne_ServerNgdotStart:
if ServerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Server%d.Start()\n", ServerResultsIndex))
ServerResultsIndex = (ServerResultsIndex + 1) % ServerNb
case *NgoloFuzzOne_ServerNgdotStartTLS:
if ServerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Server%d.StartTLS()\n", ServerResultsIndex))
ServerResultsIndex = (ServerResultsIndex + 1) % ServerNb
case *NgoloFuzzOne_ServerNgdotClose:
if ServerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Server%d.Close()\n", ServerResultsIndex))
ServerResultsIndex = (ServerResultsIndex + 1) % ServerNb
case *NgoloFuzzOne_ServerNgdotCloseClientConnections:
if ServerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Server%d.CloseClientConnections()\n", ServerResultsIndex))
ServerResultsIndex = (ServerResultsIndex + 1) % ServerNb
case *NgoloFuzzOne_ServerNgdotCertificate:
if ServerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Server%d.Certificate()\n", ServerResultsIndex))
ServerResultsIndex = (ServerResultsIndex + 1) % ServerNb
case *NgoloFuzzOne_ServerNgdotClient:
if ServerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Server%d.Client()\n", ServerResultsIndex))
ServerResultsIndex = (ServerResultsIndex + 1) % ServerNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_net_http_httptest
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewRequestArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Method string `protobuf:"bytes,1,opt,name=method,proto3" json:"method,omitempty"`
Target string `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"`
Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewRequestArgs) Reset() {
*x = NewRequestArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewRequestArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewRequestArgs) ProtoMessage() {}
func (x *NewRequestArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewRequestArgs.ProtoReflect.Descriptor instead.
func (*NewRequestArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewRequestArgs) GetMethod() string {
if x != nil {
return x.Method
}
return ""
}
func (x *NewRequestArgs) GetTarget() string {
if x != nil {
return x.Target
}
return ""
}
func (x *NewRequestArgs) GetBody() []byte {
if x != nil {
return x.Body
}
return nil
}
type NewRecorderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewRecorderArgs) Reset() {
*x = NewRecorderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewRecorderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewRecorderArgs) ProtoMessage() {}
func (x *NewRecorderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewRecorderArgs.ProtoReflect.Descriptor instead.
func (*NewRecorderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type ResponseRecorderNgdotHeaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ResponseRecorderNgdotHeaderArgs) Reset() {
*x = ResponseRecorderNgdotHeaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ResponseRecorderNgdotHeaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResponseRecorderNgdotHeaderArgs) ProtoMessage() {}
func (x *ResponseRecorderNgdotHeaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResponseRecorderNgdotHeaderArgs.ProtoReflect.Descriptor instead.
func (*ResponseRecorderNgdotHeaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type ResponseRecorderNgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ResponseRecorderNgdotWriteArgs) Reset() {
*x = ResponseRecorderNgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ResponseRecorderNgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResponseRecorderNgdotWriteArgs) ProtoMessage() {}
func (x *ResponseRecorderNgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResponseRecorderNgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*ResponseRecorderNgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ResponseRecorderNgdotWriteArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
type ResponseRecorderNgdotWriteStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Str string `protobuf:"bytes,1,opt,name=str,proto3" json:"str,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ResponseRecorderNgdotWriteStringArgs) Reset() {
*x = ResponseRecorderNgdotWriteStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ResponseRecorderNgdotWriteStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResponseRecorderNgdotWriteStringArgs) ProtoMessage() {}
func (x *ResponseRecorderNgdotWriteStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResponseRecorderNgdotWriteStringArgs.ProtoReflect.Descriptor instead.
func (*ResponseRecorderNgdotWriteStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *ResponseRecorderNgdotWriteStringArgs) GetStr() string {
if x != nil {
return x.Str
}
return ""
}
type ResponseRecorderNgdotWriteHeaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Code int64 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ResponseRecorderNgdotWriteHeaderArgs) Reset() {
*x = ResponseRecorderNgdotWriteHeaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ResponseRecorderNgdotWriteHeaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResponseRecorderNgdotWriteHeaderArgs) ProtoMessage() {}
func (x *ResponseRecorderNgdotWriteHeaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResponseRecorderNgdotWriteHeaderArgs.ProtoReflect.Descriptor instead.
func (*ResponseRecorderNgdotWriteHeaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *ResponseRecorderNgdotWriteHeaderArgs) GetCode() int64 {
if x != nil {
return x.Code
}
return 0
}
type ResponseRecorderNgdotFlushArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ResponseRecorderNgdotFlushArgs) Reset() {
*x = ResponseRecorderNgdotFlushArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ResponseRecorderNgdotFlushArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResponseRecorderNgdotFlushArgs) ProtoMessage() {}
func (x *ResponseRecorderNgdotFlushArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResponseRecorderNgdotFlushArgs.ProtoReflect.Descriptor instead.
func (*ResponseRecorderNgdotFlushArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type ResponseRecorderNgdotResultArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ResponseRecorderNgdotResultArgs) Reset() {
*x = ResponseRecorderNgdotResultArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ResponseRecorderNgdotResultArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResponseRecorderNgdotResultArgs) ProtoMessage() {}
func (x *ResponseRecorderNgdotResultArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResponseRecorderNgdotResultArgs.ProtoReflect.Descriptor instead.
func (*ResponseRecorderNgdotResultArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type ServerNgdotStartArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ServerNgdotStartArgs) Reset() {
*x = ServerNgdotStartArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ServerNgdotStartArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServerNgdotStartArgs) ProtoMessage() {}
func (x *ServerNgdotStartArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServerNgdotStartArgs.ProtoReflect.Descriptor instead.
func (*ServerNgdotStartArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type ServerNgdotStartTLSArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ServerNgdotStartTLSArgs) Reset() {
*x = ServerNgdotStartTLSArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ServerNgdotStartTLSArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServerNgdotStartTLSArgs) ProtoMessage() {}
func (x *ServerNgdotStartTLSArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServerNgdotStartTLSArgs.ProtoReflect.Descriptor instead.
func (*ServerNgdotStartTLSArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type ServerNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ServerNgdotCloseArgs) Reset() {
*x = ServerNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ServerNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServerNgdotCloseArgs) ProtoMessage() {}
func (x *ServerNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServerNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*ServerNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
type ServerNgdotCloseClientConnectionsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ServerNgdotCloseClientConnectionsArgs) Reset() {
*x = ServerNgdotCloseClientConnectionsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ServerNgdotCloseClientConnectionsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServerNgdotCloseClientConnectionsArgs) ProtoMessage() {}
func (x *ServerNgdotCloseClientConnectionsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServerNgdotCloseClientConnectionsArgs.ProtoReflect.Descriptor instead.
func (*ServerNgdotCloseClientConnectionsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type ServerNgdotCertificateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ServerNgdotCertificateArgs) Reset() {
*x = ServerNgdotCertificateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ServerNgdotCertificateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServerNgdotCertificateArgs) ProtoMessage() {}
func (x *ServerNgdotCertificateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServerNgdotCertificateArgs.ProtoReflect.Descriptor instead.
func (*ServerNgdotCertificateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type ServerNgdotClientArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ServerNgdotClientArgs) Reset() {
*x = ServerNgdotClientArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ServerNgdotClientArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServerNgdotClientArgs) ProtoMessage() {}
func (x *ServerNgdotClientArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServerNgdotClientArgs.ProtoReflect.Descriptor instead.
func (*ServerNgdotClientArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewRequest
// *NgoloFuzzOne_NewRecorder
// *NgoloFuzzOne_ResponseRecorderNgdotHeader
// *NgoloFuzzOne_ResponseRecorderNgdotWrite
// *NgoloFuzzOne_ResponseRecorderNgdotWriteString
// *NgoloFuzzOne_ResponseRecorderNgdotWriteHeader
// *NgoloFuzzOne_ResponseRecorderNgdotFlush
// *NgoloFuzzOne_ResponseRecorderNgdotResult
// *NgoloFuzzOne_ServerNgdotStart
// *NgoloFuzzOne_ServerNgdotStartTLS
// *NgoloFuzzOne_ServerNgdotClose
// *NgoloFuzzOne_ServerNgdotCloseClientConnections
// *NgoloFuzzOne_ServerNgdotCertificate
// *NgoloFuzzOne_ServerNgdotClient
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewRequest() *NewRequestArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewRequest); ok {
return x.NewRequest
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewRecorder() *NewRecorderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewRecorder); ok {
return x.NewRecorder
}
}
return nil
}
func (x *NgoloFuzzOne) GetResponseRecorderNgdotHeader() *ResponseRecorderNgdotHeaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ResponseRecorderNgdotHeader); ok {
return x.ResponseRecorderNgdotHeader
}
}
return nil
}
func (x *NgoloFuzzOne) GetResponseRecorderNgdotWrite() *ResponseRecorderNgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ResponseRecorderNgdotWrite); ok {
return x.ResponseRecorderNgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetResponseRecorderNgdotWriteString() *ResponseRecorderNgdotWriteStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ResponseRecorderNgdotWriteString); ok {
return x.ResponseRecorderNgdotWriteString
}
}
return nil
}
func (x *NgoloFuzzOne) GetResponseRecorderNgdotWriteHeader() *ResponseRecorderNgdotWriteHeaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ResponseRecorderNgdotWriteHeader); ok {
return x.ResponseRecorderNgdotWriteHeader
}
}
return nil
}
func (x *NgoloFuzzOne) GetResponseRecorderNgdotFlush() *ResponseRecorderNgdotFlushArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ResponseRecorderNgdotFlush); ok {
return x.ResponseRecorderNgdotFlush
}
}
return nil
}
func (x *NgoloFuzzOne) GetResponseRecorderNgdotResult() *ResponseRecorderNgdotResultArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ResponseRecorderNgdotResult); ok {
return x.ResponseRecorderNgdotResult
}
}
return nil
}
func (x *NgoloFuzzOne) GetServerNgdotStart() *ServerNgdotStartArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ServerNgdotStart); ok {
return x.ServerNgdotStart
}
}
return nil
}
func (x *NgoloFuzzOne) GetServerNgdotStartTLS() *ServerNgdotStartTLSArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ServerNgdotStartTLS); ok {
return x.ServerNgdotStartTLS
}
}
return nil
}
func (x *NgoloFuzzOne) GetServerNgdotClose() *ServerNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ServerNgdotClose); ok {
return x.ServerNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetServerNgdotCloseClientConnections() *ServerNgdotCloseClientConnectionsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ServerNgdotCloseClientConnections); ok {
return x.ServerNgdotCloseClientConnections
}
}
return nil
}
func (x *NgoloFuzzOne) GetServerNgdotCertificate() *ServerNgdotCertificateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ServerNgdotCertificate); ok {
return x.ServerNgdotCertificate
}
}
return nil
}
func (x *NgoloFuzzOne) GetServerNgdotClient() *ServerNgdotClientArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ServerNgdotClient); ok {
return x.ServerNgdotClient
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewRequest struct {
NewRequest *NewRequestArgs `protobuf:"bytes,1,opt,name=NewRequest,proto3,oneof"`
}
type NgoloFuzzOne_NewRecorder struct {
NewRecorder *NewRecorderArgs `protobuf:"bytes,2,opt,name=NewRecorder,proto3,oneof"`
}
type NgoloFuzzOne_ResponseRecorderNgdotHeader struct {
ResponseRecorderNgdotHeader *ResponseRecorderNgdotHeaderArgs `protobuf:"bytes,3,opt,name=ResponseRecorderNgdotHeader,proto3,oneof"`
}
type NgoloFuzzOne_ResponseRecorderNgdotWrite struct {
ResponseRecorderNgdotWrite *ResponseRecorderNgdotWriteArgs `protobuf:"bytes,4,opt,name=ResponseRecorderNgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_ResponseRecorderNgdotWriteString struct {
ResponseRecorderNgdotWriteString *ResponseRecorderNgdotWriteStringArgs `protobuf:"bytes,5,opt,name=ResponseRecorderNgdotWriteString,proto3,oneof"`
}
type NgoloFuzzOne_ResponseRecorderNgdotWriteHeader struct {
ResponseRecorderNgdotWriteHeader *ResponseRecorderNgdotWriteHeaderArgs `protobuf:"bytes,6,opt,name=ResponseRecorderNgdotWriteHeader,proto3,oneof"`
}
type NgoloFuzzOne_ResponseRecorderNgdotFlush struct {
ResponseRecorderNgdotFlush *ResponseRecorderNgdotFlushArgs `protobuf:"bytes,7,opt,name=ResponseRecorderNgdotFlush,proto3,oneof"`
}
type NgoloFuzzOne_ResponseRecorderNgdotResult struct {
ResponseRecorderNgdotResult *ResponseRecorderNgdotResultArgs `protobuf:"bytes,8,opt,name=ResponseRecorderNgdotResult,proto3,oneof"`
}
type NgoloFuzzOne_ServerNgdotStart struct {
ServerNgdotStart *ServerNgdotStartArgs `protobuf:"bytes,9,opt,name=ServerNgdotStart,proto3,oneof"`
}
type NgoloFuzzOne_ServerNgdotStartTLS struct {
ServerNgdotStartTLS *ServerNgdotStartTLSArgs `protobuf:"bytes,10,opt,name=ServerNgdotStartTLS,proto3,oneof"`
}
type NgoloFuzzOne_ServerNgdotClose struct {
ServerNgdotClose *ServerNgdotCloseArgs `protobuf:"bytes,11,opt,name=ServerNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_ServerNgdotCloseClientConnections struct {
ServerNgdotCloseClientConnections *ServerNgdotCloseClientConnectionsArgs `protobuf:"bytes,12,opt,name=ServerNgdotCloseClientConnections,proto3,oneof"`
}
type NgoloFuzzOne_ServerNgdotCertificate struct {
ServerNgdotCertificate *ServerNgdotCertificateArgs `protobuf:"bytes,13,opt,name=ServerNgdotCertificate,proto3,oneof"`
}
type NgoloFuzzOne_ServerNgdotClient struct {
ServerNgdotClient *ServerNgdotClientArgs `protobuf:"bytes,14,opt,name=ServerNgdotClient,proto3,oneof"`
}
func (*NgoloFuzzOne_NewRequest) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewRecorder) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ResponseRecorderNgdotHeader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ResponseRecorderNgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ResponseRecorderNgdotWriteString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ResponseRecorderNgdotWriteHeader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ResponseRecorderNgdotFlush) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ResponseRecorderNgdotResult) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ServerNgdotStart) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ServerNgdotStartTLS) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ServerNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ServerNgdotCloseClientConnections) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ServerNgdotCertificate) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ServerNgdotClient) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"T\n" +
"\x0eNewRequestArgs\x12\x16\n" +
"\x06method\x18\x01 \x01(\tR\x06method\x12\x16\n" +
"\x06target\x18\x02 \x01(\tR\x06target\x12\x12\n" +
"\x04body\x18\x03 \x01(\fR\x04body\"\x11\n" +
"\x0fNewRecorderArgs\"!\n" +
"\x1fResponseRecorderNgdotHeaderArgs\"2\n" +
"\x1eResponseRecorderNgdotWriteArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\"8\n" +
"$ResponseRecorderNgdotWriteStringArgs\x12\x10\n" +
"\x03str\x18\x01 \x01(\tR\x03str\":\n" +
"$ResponseRecorderNgdotWriteHeaderArgs\x12\x12\n" +
"\x04code\x18\x01 \x01(\x03R\x04code\" \n" +
"\x1eResponseRecorderNgdotFlushArgs\"!\n" +
"\x1fResponseRecorderNgdotResultArgs\"\x16\n" +
"\x14ServerNgdotStartArgs\"\x19\n" +
"\x17ServerNgdotStartTLSArgs\"\x16\n" +
"\x14ServerNgdotCloseArgs\"'\n" +
"%ServerNgdotCloseClientConnectionsArgs\"\x1c\n" +
"\x1aServerNgdotCertificateArgs\"\x17\n" +
"\x15ServerNgdotClientArgs\"\xf7\n" +
"\n" +
"\fNgoloFuzzOne\x12;\n" +
"\n" +
"NewRequest\x18\x01 \x01(\v2\x19.ngolofuzz.NewRequestArgsH\x00R\n" +
"NewRequest\x12>\n" +
"\vNewRecorder\x18\x02 \x01(\v2\x1a.ngolofuzz.NewRecorderArgsH\x00R\vNewRecorder\x12n\n" +
"\x1bResponseRecorderNgdotHeader\x18\x03 \x01(\v2*.ngolofuzz.ResponseRecorderNgdotHeaderArgsH\x00R\x1bResponseRecorderNgdotHeader\x12k\n" +
"\x1aResponseRecorderNgdotWrite\x18\x04 \x01(\v2).ngolofuzz.ResponseRecorderNgdotWriteArgsH\x00R\x1aResponseRecorderNgdotWrite\x12}\n" +
" ResponseRecorderNgdotWriteString\x18\x05 \x01(\v2/.ngolofuzz.ResponseRecorderNgdotWriteStringArgsH\x00R ResponseRecorderNgdotWriteString\x12}\n" +
" ResponseRecorderNgdotWriteHeader\x18\x06 \x01(\v2/.ngolofuzz.ResponseRecorderNgdotWriteHeaderArgsH\x00R ResponseRecorderNgdotWriteHeader\x12k\n" +
"\x1aResponseRecorderNgdotFlush\x18\a \x01(\v2).ngolofuzz.ResponseRecorderNgdotFlushArgsH\x00R\x1aResponseRecorderNgdotFlush\x12n\n" +
"\x1bResponseRecorderNgdotResult\x18\b \x01(\v2*.ngolofuzz.ResponseRecorderNgdotResultArgsH\x00R\x1bResponseRecorderNgdotResult\x12M\n" +
"\x10ServerNgdotStart\x18\t \x01(\v2\x1f.ngolofuzz.ServerNgdotStartArgsH\x00R\x10ServerNgdotStart\x12V\n" +
"\x13ServerNgdotStartTLS\x18\n" +
" \x01(\v2\".ngolofuzz.ServerNgdotStartTLSArgsH\x00R\x13ServerNgdotStartTLS\x12M\n" +
"\x10ServerNgdotClose\x18\v \x01(\v2\x1f.ngolofuzz.ServerNgdotCloseArgsH\x00R\x10ServerNgdotClose\x12\x80\x01\n" +
"!ServerNgdotCloseClientConnections\x18\f \x01(\v20.ngolofuzz.ServerNgdotCloseClientConnectionsArgsH\x00R!ServerNgdotCloseClientConnections\x12_\n" +
"\x16ServerNgdotCertificate\x18\r \x01(\v2%.ngolofuzz.ServerNgdotCertificateArgsH\x00R\x16ServerNgdotCertificate\x12P\n" +
"\x11ServerNgdotClient\x18\x0e \x01(\v2 .ngolofuzz.ServerNgdotClientArgsH\x00R\x11ServerNgdotClientB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1eZ\x1c./;fuzz_ng_net_http_httptestb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 17)
var file_ngolofuzz_proto_goTypes = []any{
(*NewRequestArgs)(nil), // 0: ngolofuzz.NewRequestArgs
(*NewRecorderArgs)(nil), // 1: ngolofuzz.NewRecorderArgs
(*ResponseRecorderNgdotHeaderArgs)(nil), // 2: ngolofuzz.ResponseRecorderNgdotHeaderArgs
(*ResponseRecorderNgdotWriteArgs)(nil), // 3: ngolofuzz.ResponseRecorderNgdotWriteArgs
(*ResponseRecorderNgdotWriteStringArgs)(nil), // 4: ngolofuzz.ResponseRecorderNgdotWriteStringArgs
(*ResponseRecorderNgdotWriteHeaderArgs)(nil), // 5: ngolofuzz.ResponseRecorderNgdotWriteHeaderArgs
(*ResponseRecorderNgdotFlushArgs)(nil), // 6: ngolofuzz.ResponseRecorderNgdotFlushArgs
(*ResponseRecorderNgdotResultArgs)(nil), // 7: ngolofuzz.ResponseRecorderNgdotResultArgs
(*ServerNgdotStartArgs)(nil), // 8: ngolofuzz.ServerNgdotStartArgs
(*ServerNgdotStartTLSArgs)(nil), // 9: ngolofuzz.ServerNgdotStartTLSArgs
(*ServerNgdotCloseArgs)(nil), // 10: ngolofuzz.ServerNgdotCloseArgs
(*ServerNgdotCloseClientConnectionsArgs)(nil), // 11: ngolofuzz.ServerNgdotCloseClientConnectionsArgs
(*ServerNgdotCertificateArgs)(nil), // 12: ngolofuzz.ServerNgdotCertificateArgs
(*ServerNgdotClientArgs)(nil), // 13: ngolofuzz.ServerNgdotClientArgs
(*NgoloFuzzOne)(nil), // 14: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 15: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 16: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewRequest:type_name -> ngolofuzz.NewRequestArgs
1, // 1: ngolofuzz.NgoloFuzzOne.NewRecorder:type_name -> ngolofuzz.NewRecorderArgs
2, // 2: ngolofuzz.NgoloFuzzOne.ResponseRecorderNgdotHeader:type_name -> ngolofuzz.ResponseRecorderNgdotHeaderArgs
3, // 3: ngolofuzz.NgoloFuzzOne.ResponseRecorderNgdotWrite:type_name -> ngolofuzz.ResponseRecorderNgdotWriteArgs
4, // 4: ngolofuzz.NgoloFuzzOne.ResponseRecorderNgdotWriteString:type_name -> ngolofuzz.ResponseRecorderNgdotWriteStringArgs
5, // 5: ngolofuzz.NgoloFuzzOne.ResponseRecorderNgdotWriteHeader:type_name -> ngolofuzz.ResponseRecorderNgdotWriteHeaderArgs
6, // 6: ngolofuzz.NgoloFuzzOne.ResponseRecorderNgdotFlush:type_name -> ngolofuzz.ResponseRecorderNgdotFlushArgs
7, // 7: ngolofuzz.NgoloFuzzOne.ResponseRecorderNgdotResult:type_name -> ngolofuzz.ResponseRecorderNgdotResultArgs
8, // 8: ngolofuzz.NgoloFuzzOne.ServerNgdotStart:type_name -> ngolofuzz.ServerNgdotStartArgs
9, // 9: ngolofuzz.NgoloFuzzOne.ServerNgdotStartTLS:type_name -> ngolofuzz.ServerNgdotStartTLSArgs
10, // 10: ngolofuzz.NgoloFuzzOne.ServerNgdotClose:type_name -> ngolofuzz.ServerNgdotCloseArgs
11, // 11: ngolofuzz.NgoloFuzzOne.ServerNgdotCloseClientConnections:type_name -> ngolofuzz.ServerNgdotCloseClientConnectionsArgs
12, // 12: ngolofuzz.NgoloFuzzOne.ServerNgdotCertificate:type_name -> ngolofuzz.ServerNgdotCertificateArgs
13, // 13: ngolofuzz.NgoloFuzzOne.ServerNgdotClient:type_name -> ngolofuzz.ServerNgdotClientArgs
14, // 14: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
15, // [15:15] is the sub-list for method output_type
15, // [15:15] is the sub-list for method input_type
15, // [15:15] is the sub-list for extension type_name
15, // [15:15] is the sub-list for extension extendee
0, // [0:15] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[14].OneofWrappers = []any{
(*NgoloFuzzOne_NewRequest)(nil),
(*NgoloFuzzOne_NewRecorder)(nil),
(*NgoloFuzzOne_ResponseRecorderNgdotHeader)(nil),
(*NgoloFuzzOne_ResponseRecorderNgdotWrite)(nil),
(*NgoloFuzzOne_ResponseRecorderNgdotWriteString)(nil),
(*NgoloFuzzOne_ResponseRecorderNgdotWriteHeader)(nil),
(*NgoloFuzzOne_ResponseRecorderNgdotFlush)(nil),
(*NgoloFuzzOne_ResponseRecorderNgdotResult)(nil),
(*NgoloFuzzOne_ServerNgdotStart)(nil),
(*NgoloFuzzOne_ServerNgdotStartTLS)(nil),
(*NgoloFuzzOne_ServerNgdotClose)(nil),
(*NgoloFuzzOne_ServerNgdotCloseClientConnections)(nil),
(*NgoloFuzzOne_ServerNgdotCertificate)(nil),
(*NgoloFuzzOne_ServerNgdotClient)(nil),
}
file_ngolofuzz_proto_msgTypes[15].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 17,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_net_http_pprof
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"net/http/pprof"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Handler:
pprof.Handler(a.Handler.Name)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Handler:
w.WriteString(fmt.Sprintf("pprof.Handler(%#+v)\n", a.Handler.Name))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_net_http_pprof
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type HandlerArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HandlerArgs) Reset() {
*x = HandlerArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HandlerArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HandlerArgs) ProtoMessage() {}
func (x *HandlerArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HandlerArgs.ProtoReflect.Descriptor instead.
func (*HandlerArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *HandlerArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Handler
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetHandler() *HandlerArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Handler); ok {
return x.Handler
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Handler struct {
Handler *HandlerArgs `protobuf:"bytes,1,opt,name=Handler,proto3,oneof"`
}
func (*NgoloFuzzOne_Handler) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"!\n" +
"\vHandlerArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"J\n" +
"\fNgoloFuzzOne\x122\n" +
"\aHandler\x18\x01 \x01(\v2\x16.ngolofuzz.HandlerArgsH\x00R\aHandlerB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1bZ\x19./;fuzz_ng_net_http_pprofb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_ngolofuzz_proto_goTypes = []any{
(*HandlerArgs)(nil), // 0: ngolofuzz.HandlerArgs
(*NgoloFuzzOne)(nil), // 1: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 2: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 3: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Handler:type_name -> ngolofuzz.HandlerArgs
1, // 1: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[1].OneofWrappers = []any{
(*NgoloFuzzOne_Handler)(nil),
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_net_mail
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"net/mail"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var AddressResults []*mail.Address
AddressResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ReadMessage:
arg0 := bytes.NewReader(a.ReadMessage.R)
_, r1 := mail.ReadMessage(arg0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ParseDate:
_, r1 := mail.ParseDate(a.ParseDate.Date)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ParseAddress:
_, r1 := mail.ParseAddress(a.ParseAddress.Address)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ParseAddressList:
_, r1 := mail.ParseAddressList(a.ParseAddressList.List)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_AddressNgdotString:
if len(AddressResults) == 0 {
continue
}
arg0 := AddressResults[AddressResultsIndex]
AddressResultsIndex = (AddressResultsIndex + 1) % len(AddressResults)
arg0.String()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
AddressNb := 0
AddressResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ReadMessage:
w.WriteString(fmt.Sprintf("mail.ReadMessage(bytes.NewReader(%#+v))\n", a.ReadMessage.R))
case *NgoloFuzzOne_ParseDate:
w.WriteString(fmt.Sprintf("mail.ParseDate(%#+v)\n", a.ParseDate.Date))
case *NgoloFuzzOne_ParseAddress:
w.WriteString(fmt.Sprintf("mail.ParseAddress(%#+v)\n", a.ParseAddress.Address))
case *NgoloFuzzOne_ParseAddressList:
w.WriteString(fmt.Sprintf("mail.ParseAddressList(%#+v)\n", a.ParseAddressList.List))
case *NgoloFuzzOne_AddressNgdotString:
if AddressNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Address%d.String()\n", AddressResultsIndex))
AddressResultsIndex = (AddressResultsIndex + 1) % AddressNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_net_mail
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ReadMessageArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReadMessageArgs) Reset() {
*x = ReadMessageArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReadMessageArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReadMessageArgs) ProtoMessage() {}
func (x *ReadMessageArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReadMessageArgs.ProtoReflect.Descriptor instead.
func (*ReadMessageArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *ReadMessageArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type ParseDateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Date string `protobuf:"bytes,1,opt,name=date,proto3" json:"date,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseDateArgs) Reset() {
*x = ParseDateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseDateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseDateArgs) ProtoMessage() {}
func (x *ParseDateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseDateArgs.ProtoReflect.Descriptor instead.
func (*ParseDateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *ParseDateArgs) GetDate() string {
if x != nil {
return x.Date
}
return ""
}
type ParseAddressArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseAddressArgs) Reset() {
*x = ParseAddressArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseAddressArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseAddressArgs) ProtoMessage() {}
func (x *ParseAddressArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseAddressArgs.ProtoReflect.Descriptor instead.
func (*ParseAddressArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *ParseAddressArgs) GetAddress() string {
if x != nil {
return x.Address
}
return ""
}
type ParseAddressListArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
List string `protobuf:"bytes,1,opt,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseAddressListArgs) Reset() {
*x = ParseAddressListArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseAddressListArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseAddressListArgs) ProtoMessage() {}
func (x *ParseAddressListArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseAddressListArgs.ProtoReflect.Descriptor instead.
func (*ParseAddressListArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ParseAddressListArgs) GetList() string {
if x != nil {
return x.List
}
return ""
}
type AddressNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AddressNgdotStringArgs) Reset() {
*x = AddressNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AddressNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AddressNgdotStringArgs) ProtoMessage() {}
func (x *AddressNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AddressNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*AddressNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_ReadMessage
// *NgoloFuzzOne_ParseDate
// *NgoloFuzzOne_ParseAddress
// *NgoloFuzzOne_ParseAddressList
// *NgoloFuzzOne_AddressNgdotString
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetReadMessage() *ReadMessageArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReadMessage); ok {
return x.ReadMessage
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseDate() *ParseDateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseDate); ok {
return x.ParseDate
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseAddress() *ParseAddressArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseAddress); ok {
return x.ParseAddress
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseAddressList() *ParseAddressListArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseAddressList); ok {
return x.ParseAddressList
}
}
return nil
}
func (x *NgoloFuzzOne) GetAddressNgdotString() *AddressNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AddressNgdotString); ok {
return x.AddressNgdotString
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_ReadMessage struct {
ReadMessage *ReadMessageArgs `protobuf:"bytes,1,opt,name=ReadMessage,proto3,oneof"`
}
type NgoloFuzzOne_ParseDate struct {
ParseDate *ParseDateArgs `protobuf:"bytes,2,opt,name=ParseDate,proto3,oneof"`
}
type NgoloFuzzOne_ParseAddress struct {
ParseAddress *ParseAddressArgs `protobuf:"bytes,3,opt,name=ParseAddress,proto3,oneof"`
}
type NgoloFuzzOne_ParseAddressList struct {
ParseAddressList *ParseAddressListArgs `protobuf:"bytes,4,opt,name=ParseAddressList,proto3,oneof"`
}
type NgoloFuzzOne_AddressNgdotString struct {
AddressNgdotString *AddressNgdotStringArgs `protobuf:"bytes,5,opt,name=AddressNgdotString,proto3,oneof"`
}
func (*NgoloFuzzOne_ReadMessage) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseDate) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseAddress) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseAddressList) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AddressNgdotString) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1f\n" +
"\x0fReadMessageArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"#\n" +
"\rParseDateArgs\x12\x12\n" +
"\x04date\x18\x01 \x01(\tR\x04date\",\n" +
"\x10ParseAddressArgs\x12\x18\n" +
"\aaddress\x18\x01 \x01(\tR\aaddress\"*\n" +
"\x14ParseAddressListArgs\x12\x12\n" +
"\x04list\x18\x01 \x01(\tR\x04list\"\x18\n" +
"\x16AddressNgdotStringArgs\"\xf7\x02\n" +
"\fNgoloFuzzOne\x12>\n" +
"\vReadMessage\x18\x01 \x01(\v2\x1a.ngolofuzz.ReadMessageArgsH\x00R\vReadMessage\x128\n" +
"\tParseDate\x18\x02 \x01(\v2\x18.ngolofuzz.ParseDateArgsH\x00R\tParseDate\x12A\n" +
"\fParseAddress\x18\x03 \x01(\v2\x1b.ngolofuzz.ParseAddressArgsH\x00R\fParseAddress\x12M\n" +
"\x10ParseAddressList\x18\x04 \x01(\v2\x1f.ngolofuzz.ParseAddressListArgsH\x00R\x10ParseAddressList\x12S\n" +
"\x12AddressNgdotString\x18\x05 \x01(\v2!.ngolofuzz.AddressNgdotStringArgsH\x00R\x12AddressNgdotStringB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x15Z\x13./;fuzz_ng_net_mailb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_ngolofuzz_proto_goTypes = []any{
(*ReadMessageArgs)(nil), // 0: ngolofuzz.ReadMessageArgs
(*ParseDateArgs)(nil), // 1: ngolofuzz.ParseDateArgs
(*ParseAddressArgs)(nil), // 2: ngolofuzz.ParseAddressArgs
(*ParseAddressListArgs)(nil), // 3: ngolofuzz.ParseAddressListArgs
(*AddressNgdotStringArgs)(nil), // 4: ngolofuzz.AddressNgdotStringArgs
(*NgoloFuzzOne)(nil), // 5: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 6: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 7: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.ReadMessage:type_name -> ngolofuzz.ReadMessageArgs
1, // 1: ngolofuzz.NgoloFuzzOne.ParseDate:type_name -> ngolofuzz.ParseDateArgs
2, // 2: ngolofuzz.NgoloFuzzOne.ParseAddress:type_name -> ngolofuzz.ParseAddressArgs
3, // 3: ngolofuzz.NgoloFuzzOne.ParseAddressList:type_name -> ngolofuzz.ParseAddressListArgs
4, // 4: ngolofuzz.NgoloFuzzOne.AddressNgdotString:type_name -> ngolofuzz.AddressNgdotStringArgs
5, // 5: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
6, // [6:6] is the sub-list for method output_type
6, // [6:6] is the sub-list for method input_type
6, // [6:6] is the sub-list for extension type_name
6, // [6:6] is the sub-list for extension extendee
0, // [0:6] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[5].OneofWrappers = []any{
(*NgoloFuzzOne_ReadMessage)(nil),
(*NgoloFuzzOne_ParseDate)(nil),
(*NgoloFuzzOne_ParseAddress)(nil),
(*NgoloFuzzOne_ParseAddressList)(nil),
(*NgoloFuzzOne_AddressNgdotString)(nil),
}
file_ngolofuzz_proto_msgTypes[6].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 8,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_net_rpc_jsonrpc
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"net/rpc/jsonrpc"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Dial:
_, r1 := jsonrpc.Dial(a.Dial.Network, a.Dial.Address)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Dial:
w.WriteString(fmt.Sprintf("jsonrpc.Dial(%#+v, %#+v)\n", a.Dial.Network, a.Dial.Address))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_net_rpc_jsonrpc
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type DialArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Network string `protobuf:"bytes,1,opt,name=network,proto3" json:"network,omitempty"`
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DialArgs) Reset() {
*x = DialArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DialArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DialArgs) ProtoMessage() {}
func (x *DialArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DialArgs.ProtoReflect.Descriptor instead.
func (*DialArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *DialArgs) GetNetwork() string {
if x != nil {
return x.Network
}
return ""
}
func (x *DialArgs) GetAddress() string {
if x != nil {
return x.Address
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Dial
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetDial() *DialArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Dial); ok {
return x.Dial
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Dial struct {
Dial *DialArgs `protobuf:"bytes,1,opt,name=Dial,proto3,oneof"`
}
func (*NgoloFuzzOne_Dial) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\">\n" +
"\bDialArgs\x12\x18\n" +
"\anetwork\x18\x01 \x01(\tR\anetwork\x12\x18\n" +
"\aaddress\x18\x02 \x01(\tR\aaddress\"A\n" +
"\fNgoloFuzzOne\x12)\n" +
"\x04Dial\x18\x01 \x01(\v2\x13.ngolofuzz.DialArgsH\x00R\x04DialB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1cZ\x1a./;fuzz_ng_net_rpc_jsonrpcb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_ngolofuzz_proto_goTypes = []any{
(*DialArgs)(nil), // 0: ngolofuzz.DialArgs
(*NgoloFuzzOne)(nil), // 1: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 2: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 3: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Dial:type_name -> ngolofuzz.DialArgs
1, // 1: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[1].OneofWrappers = []any{
(*NgoloFuzzOne_Dial)(nil),
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_net_smtp
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"net/smtp"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var AuthResults []*smtp.Auth
AuthResultsIndex := 0
var ClientResults []*smtp.Client
ClientResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_PlainAuth:
r0 := smtp.PlainAuth(a.PlainAuth.Identity, a.PlainAuth.Username, a.PlainAuth.Password, a.PlainAuth.Host)
AuthResults = append(AuthResults, &r0)
case *NgoloFuzzOne_CRAMMD5Auth:
r0 := smtp.CRAMMD5Auth(a.CRAMMD5Auth.Username, a.CRAMMD5Auth.Secret)
AuthResults = append(AuthResults, &r0)
case *NgoloFuzzOne_Dial:
r0, r1 := smtp.Dial(a.Dial.Addr)
if r0 != nil{
ClientResults = append(ClientResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewClient:
arg0 := CreateFuzzingConn(a.NewClient.Conn)
r0, r1 := smtp.NewClient(arg0, a.NewClient.Host)
if r0 != nil{
ClientResults = append(ClientResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ClientNgdotClose:
if len(ClientResults) == 0 {
continue
}
arg0 := ClientResults[ClientResultsIndex]
ClientResultsIndex = (ClientResultsIndex + 1) % len(ClientResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ClientNgdotHello:
if len(ClientResults) == 0 {
continue
}
arg0 := ClientResults[ClientResultsIndex]
ClientResultsIndex = (ClientResultsIndex + 1) % len(ClientResults)
r0 := arg0.Hello(a.ClientNgdotHello.LocalName)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ClientNgdotTLSConnectionState:
if len(ClientResults) == 0 {
continue
}
arg0 := ClientResults[ClientResultsIndex]
ClientResultsIndex = (ClientResultsIndex + 1) % len(ClientResults)
arg0.TLSConnectionState()
case *NgoloFuzzOne_ClientNgdotVerify:
if len(ClientResults) == 0 {
continue
}
arg0 := ClientResults[ClientResultsIndex]
ClientResultsIndex = (ClientResultsIndex + 1) % len(ClientResults)
r0 := arg0.Verify(a.ClientNgdotVerify.Addr)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ClientNgdotAuth:
if len(ClientResults) == 0 {
continue
}
arg0 := ClientResults[ClientResultsIndex]
ClientResultsIndex = (ClientResultsIndex + 1) % len(ClientResults)
if len(AuthResults) == 0 {
continue
}
arg1 := *AuthResults[AuthResultsIndex]
AuthResultsIndex = (AuthResultsIndex + 1) % len(AuthResults)
r0 := arg0.Auth(arg1)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ClientNgdotMail:
if len(ClientResults) == 0 {
continue
}
arg0 := ClientResults[ClientResultsIndex]
ClientResultsIndex = (ClientResultsIndex + 1) % len(ClientResults)
r0 := arg0.Mail(a.ClientNgdotMail.From)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ClientNgdotRcpt:
if len(ClientResults) == 0 {
continue
}
arg0 := ClientResults[ClientResultsIndex]
ClientResultsIndex = (ClientResultsIndex + 1) % len(ClientResults)
r0 := arg0.Rcpt(a.ClientNgdotRcpt.To)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ClientNgdotData:
if len(ClientResults) == 0 {
continue
}
arg0 := ClientResults[ClientResultsIndex]
ClientResultsIndex = (ClientResultsIndex + 1) % len(ClientResults)
_, r1 := arg0.Data()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_SendMail:
if len(AuthResults) == 0 {
continue
}
arg1 := *AuthResults[AuthResultsIndex]
AuthResultsIndex = (AuthResultsIndex + 1) % len(AuthResults)
r0 := smtp.SendMail(a.SendMail.Addr, arg1, a.SendMail.From, a.SendMail.To, a.SendMail.Msg)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ClientNgdotExtension:
if len(ClientResults) == 0 {
continue
}
arg0 := ClientResults[ClientResultsIndex]
ClientResultsIndex = (ClientResultsIndex + 1) % len(ClientResults)
arg0.Extension(a.ClientNgdotExtension.Ext)
case *NgoloFuzzOne_ClientNgdotReset:
if len(ClientResults) == 0 {
continue
}
arg0 := ClientResults[ClientResultsIndex]
ClientResultsIndex = (ClientResultsIndex + 1) % len(ClientResults)
r0 := arg0.Reset()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ClientNgdotNoop:
if len(ClientResults) == 0 {
continue
}
arg0 := ClientResults[ClientResultsIndex]
ClientResultsIndex = (ClientResultsIndex + 1) % len(ClientResults)
r0 := arg0.Noop()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ClientNgdotQuit:
if len(ClientResults) == 0 {
continue
}
arg0 := ClientResults[ClientResultsIndex]
ClientResultsIndex = (ClientResultsIndex + 1) % len(ClientResults)
r0 := arg0.Quit()
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
AuthNb := 0
AuthResultsIndex := 0
ClientNb := 0
ClientResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_PlainAuth:
w.WriteString(fmt.Sprintf("Auth%d := smtp.PlainAuth(%#+v, %#+v, %#+v, %#+v)\n", AuthNb, a.PlainAuth.Identity, a.PlainAuth.Username, a.PlainAuth.Password, a.PlainAuth.Host))
AuthNb = AuthNb + 1
case *NgoloFuzzOne_CRAMMD5Auth:
w.WriteString(fmt.Sprintf("Auth%d := smtp.CRAMMD5Auth(%#+v, %#+v)\n", AuthNb, a.CRAMMD5Auth.Username, a.CRAMMD5Auth.Secret))
AuthNb = AuthNb + 1
case *NgoloFuzzOne_Dial:
w.WriteString(fmt.Sprintf("Client%d, _ := smtp.Dial(%#+v)\n", ClientNb, a.Dial.Addr))
ClientNb = ClientNb + 1
case *NgoloFuzzOne_NewClient:
w.WriteString(fmt.Sprintf("Client%d, _ := smtp.NewClient(CreateFuzzingConn(%#+v), %#+v)\n", ClientNb, a.NewClient.Conn, a.NewClient.Host))
ClientNb = ClientNb + 1
case *NgoloFuzzOne_ClientNgdotClose:
if ClientNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Client%d.Close()\n", ClientResultsIndex))
ClientResultsIndex = (ClientResultsIndex + 1) % ClientNb
case *NgoloFuzzOne_ClientNgdotHello:
if ClientNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Client%d.Hello(%#+v)\n", ClientResultsIndex, a.ClientNgdotHello.LocalName))
ClientResultsIndex = (ClientResultsIndex + 1) % ClientNb
case *NgoloFuzzOne_ClientNgdotTLSConnectionState:
if ClientNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Client%d.TLSConnectionState()\n", ClientResultsIndex))
ClientResultsIndex = (ClientResultsIndex + 1) % ClientNb
case *NgoloFuzzOne_ClientNgdotVerify:
if ClientNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Client%d.Verify(%#+v)\n", ClientResultsIndex, a.ClientNgdotVerify.Addr))
ClientResultsIndex = (ClientResultsIndex + 1) % ClientNb
case *NgoloFuzzOne_ClientNgdotAuth:
if ClientNb == 0 {
continue
}
if AuthNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Client%d.Auth(Auth%d)\n", ClientResultsIndex, (AuthResultsIndex + 0) % AuthNb))
ClientResultsIndex = (ClientResultsIndex + 1) % ClientNb
AuthResultsIndex = (AuthResultsIndex + 1) % AuthNb
case *NgoloFuzzOne_ClientNgdotMail:
if ClientNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Client%d.Mail(%#+v)\n", ClientResultsIndex, a.ClientNgdotMail.From))
ClientResultsIndex = (ClientResultsIndex + 1) % ClientNb
case *NgoloFuzzOne_ClientNgdotRcpt:
if ClientNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Client%d.Rcpt(%#+v)\n", ClientResultsIndex, a.ClientNgdotRcpt.To))
ClientResultsIndex = (ClientResultsIndex + 1) % ClientNb
case *NgoloFuzzOne_ClientNgdotData:
if ClientNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Client%d.Data()\n", ClientResultsIndex))
ClientResultsIndex = (ClientResultsIndex + 1) % ClientNb
case *NgoloFuzzOne_SendMail:
if AuthNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("smtp.SendMail(%#+v, Auth%d, %#+v, %#+v, %#+v)\n", a.SendMail.Addr, (AuthResultsIndex + 0) % AuthNb, a.SendMail.From, a.SendMail.To, a.SendMail.Msg))
AuthResultsIndex = (AuthResultsIndex + 1) % AuthNb
case *NgoloFuzzOne_ClientNgdotExtension:
if ClientNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Client%d.Extension(%#+v)\n", ClientResultsIndex, a.ClientNgdotExtension.Ext))
ClientResultsIndex = (ClientResultsIndex + 1) % ClientNb
case *NgoloFuzzOne_ClientNgdotReset:
if ClientNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Client%d.Reset()\n", ClientResultsIndex))
ClientResultsIndex = (ClientResultsIndex + 1) % ClientNb
case *NgoloFuzzOne_ClientNgdotNoop:
if ClientNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Client%d.Noop()\n", ClientResultsIndex))
ClientResultsIndex = (ClientResultsIndex + 1) % ClientNb
case *NgoloFuzzOne_ClientNgdotQuit:
if ClientNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Client%d.Quit()\n", ClientResultsIndex))
ClientResultsIndex = (ClientResultsIndex + 1) % ClientNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_net_smtp
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type PlainAuthArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"`
Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
Password string `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"`
Host string `protobuf:"bytes,4,opt,name=host,proto3" json:"host,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PlainAuthArgs) Reset() {
*x = PlainAuthArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PlainAuthArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PlainAuthArgs) ProtoMessage() {}
func (x *PlainAuthArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PlainAuthArgs.ProtoReflect.Descriptor instead.
func (*PlainAuthArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *PlainAuthArgs) GetIdentity() string {
if x != nil {
return x.Identity
}
return ""
}
func (x *PlainAuthArgs) GetUsername() string {
if x != nil {
return x.Username
}
return ""
}
func (x *PlainAuthArgs) GetPassword() string {
if x != nil {
return x.Password
}
return ""
}
func (x *PlainAuthArgs) GetHost() string {
if x != nil {
return x.Host
}
return ""
}
type CRAMMD5AuthArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CRAMMD5AuthArgs) Reset() {
*x = CRAMMD5AuthArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CRAMMD5AuthArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CRAMMD5AuthArgs) ProtoMessage() {}
func (x *CRAMMD5AuthArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CRAMMD5AuthArgs.ProtoReflect.Descriptor instead.
func (*CRAMMD5AuthArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *CRAMMD5AuthArgs) GetUsername() string {
if x != nil {
return x.Username
}
return ""
}
func (x *CRAMMD5AuthArgs) GetSecret() string {
if x != nil {
return x.Secret
}
return ""
}
type DialArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DialArgs) Reset() {
*x = DialArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DialArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DialArgs) ProtoMessage() {}
func (x *DialArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DialArgs.ProtoReflect.Descriptor instead.
func (*DialArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *DialArgs) GetAddr() string {
if x != nil {
return x.Addr
}
return ""
}
type NewClientArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Conn []byte `protobuf:"bytes,1,opt,name=conn,proto3" json:"conn,omitempty"`
Host string `protobuf:"bytes,2,opt,name=host,proto3" json:"host,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewClientArgs) Reset() {
*x = NewClientArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewClientArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewClientArgs) ProtoMessage() {}
func (x *NewClientArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewClientArgs.ProtoReflect.Descriptor instead.
func (*NewClientArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NewClientArgs) GetConn() []byte {
if x != nil {
return x.Conn
}
return nil
}
func (x *NewClientArgs) GetHost() string {
if x != nil {
return x.Host
}
return ""
}
type ClientNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientNgdotCloseArgs) Reset() {
*x = ClientNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientNgdotCloseArgs) ProtoMessage() {}
func (x *ClientNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*ClientNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type ClientNgdotHelloArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
LocalName string `protobuf:"bytes,1,opt,name=localName,proto3" json:"localName,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientNgdotHelloArgs) Reset() {
*x = ClientNgdotHelloArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientNgdotHelloArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientNgdotHelloArgs) ProtoMessage() {}
func (x *ClientNgdotHelloArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientNgdotHelloArgs.ProtoReflect.Descriptor instead.
func (*ClientNgdotHelloArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *ClientNgdotHelloArgs) GetLocalName() string {
if x != nil {
return x.LocalName
}
return ""
}
type ClientNgdotTLSConnectionStateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientNgdotTLSConnectionStateArgs) Reset() {
*x = ClientNgdotTLSConnectionStateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientNgdotTLSConnectionStateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientNgdotTLSConnectionStateArgs) ProtoMessage() {}
func (x *ClientNgdotTLSConnectionStateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientNgdotTLSConnectionStateArgs.ProtoReflect.Descriptor instead.
func (*ClientNgdotTLSConnectionStateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type ClientNgdotVerifyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientNgdotVerifyArgs) Reset() {
*x = ClientNgdotVerifyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientNgdotVerifyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientNgdotVerifyArgs) ProtoMessage() {}
func (x *ClientNgdotVerifyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientNgdotVerifyArgs.ProtoReflect.Descriptor instead.
func (*ClientNgdotVerifyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *ClientNgdotVerifyArgs) GetAddr() string {
if x != nil {
return x.Addr
}
return ""
}
type ClientNgdotAuthArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientNgdotAuthArgs) Reset() {
*x = ClientNgdotAuthArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientNgdotAuthArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientNgdotAuthArgs) ProtoMessage() {}
func (x *ClientNgdotAuthArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientNgdotAuthArgs.ProtoReflect.Descriptor instead.
func (*ClientNgdotAuthArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type ClientNgdotMailArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
From string `protobuf:"bytes,1,opt,name=from,proto3" json:"from,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientNgdotMailArgs) Reset() {
*x = ClientNgdotMailArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientNgdotMailArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientNgdotMailArgs) ProtoMessage() {}
func (x *ClientNgdotMailArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientNgdotMailArgs.ProtoReflect.Descriptor instead.
func (*ClientNgdotMailArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *ClientNgdotMailArgs) GetFrom() string {
if x != nil {
return x.From
}
return ""
}
type ClientNgdotRcptArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
To string `protobuf:"bytes,1,opt,name=to,proto3" json:"to,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientNgdotRcptArgs) Reset() {
*x = ClientNgdotRcptArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientNgdotRcptArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientNgdotRcptArgs) ProtoMessage() {}
func (x *ClientNgdotRcptArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientNgdotRcptArgs.ProtoReflect.Descriptor instead.
func (*ClientNgdotRcptArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *ClientNgdotRcptArgs) GetTo() string {
if x != nil {
return x.To
}
return ""
}
type ClientNgdotDataArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientNgdotDataArgs) Reset() {
*x = ClientNgdotDataArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientNgdotDataArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientNgdotDataArgs) ProtoMessage() {}
func (x *ClientNgdotDataArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientNgdotDataArgs.ProtoReflect.Descriptor instead.
func (*ClientNgdotDataArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type SendMailArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"`
From string `protobuf:"bytes,2,opt,name=from,proto3" json:"from,omitempty"`
To []string `protobuf:"bytes,3,rep,name=to,proto3" json:"to,omitempty"`
Msg []byte `protobuf:"bytes,4,opt,name=msg,proto3" json:"msg,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SendMailArgs) Reset() {
*x = SendMailArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SendMailArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SendMailArgs) ProtoMessage() {}
func (x *SendMailArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SendMailArgs.ProtoReflect.Descriptor instead.
func (*SendMailArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *SendMailArgs) GetAddr() string {
if x != nil {
return x.Addr
}
return ""
}
func (x *SendMailArgs) GetFrom() string {
if x != nil {
return x.From
}
return ""
}
func (x *SendMailArgs) GetTo() []string {
if x != nil {
return x.To
}
return nil
}
func (x *SendMailArgs) GetMsg() []byte {
if x != nil {
return x.Msg
}
return nil
}
type ClientNgdotExtensionArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ext string `protobuf:"bytes,1,opt,name=ext,proto3" json:"ext,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientNgdotExtensionArgs) Reset() {
*x = ClientNgdotExtensionArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientNgdotExtensionArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientNgdotExtensionArgs) ProtoMessage() {}
func (x *ClientNgdotExtensionArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientNgdotExtensionArgs.ProtoReflect.Descriptor instead.
func (*ClientNgdotExtensionArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *ClientNgdotExtensionArgs) GetExt() string {
if x != nil {
return x.Ext
}
return ""
}
type ClientNgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientNgdotResetArgs) Reset() {
*x = ClientNgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientNgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientNgdotResetArgs) ProtoMessage() {}
func (x *ClientNgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientNgdotResetArgs.ProtoReflect.Descriptor instead.
func (*ClientNgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type ClientNgdotNoopArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientNgdotNoopArgs) Reset() {
*x = ClientNgdotNoopArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientNgdotNoopArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientNgdotNoopArgs) ProtoMessage() {}
func (x *ClientNgdotNoopArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientNgdotNoopArgs.ProtoReflect.Descriptor instead.
func (*ClientNgdotNoopArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
type ClientNgdotQuitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClientNgdotQuitArgs) Reset() {
*x = ClientNgdotQuitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClientNgdotQuitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientNgdotQuitArgs) ProtoMessage() {}
func (x *ClientNgdotQuitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientNgdotQuitArgs.ProtoReflect.Descriptor instead.
func (*ClientNgdotQuitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_PlainAuth
// *NgoloFuzzOne_CRAMMD5Auth
// *NgoloFuzzOne_Dial
// *NgoloFuzzOne_NewClient
// *NgoloFuzzOne_ClientNgdotClose
// *NgoloFuzzOne_ClientNgdotHello
// *NgoloFuzzOne_ClientNgdotTLSConnectionState
// *NgoloFuzzOne_ClientNgdotVerify
// *NgoloFuzzOne_ClientNgdotAuth
// *NgoloFuzzOne_ClientNgdotMail
// *NgoloFuzzOne_ClientNgdotRcpt
// *NgoloFuzzOne_ClientNgdotData
// *NgoloFuzzOne_SendMail
// *NgoloFuzzOne_ClientNgdotExtension
// *NgoloFuzzOne_ClientNgdotReset
// *NgoloFuzzOne_ClientNgdotNoop
// *NgoloFuzzOne_ClientNgdotQuit
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetPlainAuth() *PlainAuthArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PlainAuth); ok {
return x.PlainAuth
}
}
return nil
}
func (x *NgoloFuzzOne) GetCRAMMD5Auth() *CRAMMD5AuthArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CRAMMD5Auth); ok {
return x.CRAMMD5Auth
}
}
return nil
}
func (x *NgoloFuzzOne) GetDial() *DialArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Dial); ok {
return x.Dial
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewClient() *NewClientArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewClient); ok {
return x.NewClient
}
}
return nil
}
func (x *NgoloFuzzOne) GetClientNgdotClose() *ClientNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClientNgdotClose); ok {
return x.ClientNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetClientNgdotHello() *ClientNgdotHelloArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClientNgdotHello); ok {
return x.ClientNgdotHello
}
}
return nil
}
func (x *NgoloFuzzOne) GetClientNgdotTLSConnectionState() *ClientNgdotTLSConnectionStateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClientNgdotTLSConnectionState); ok {
return x.ClientNgdotTLSConnectionState
}
}
return nil
}
func (x *NgoloFuzzOne) GetClientNgdotVerify() *ClientNgdotVerifyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClientNgdotVerify); ok {
return x.ClientNgdotVerify
}
}
return nil
}
func (x *NgoloFuzzOne) GetClientNgdotAuth() *ClientNgdotAuthArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClientNgdotAuth); ok {
return x.ClientNgdotAuth
}
}
return nil
}
func (x *NgoloFuzzOne) GetClientNgdotMail() *ClientNgdotMailArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClientNgdotMail); ok {
return x.ClientNgdotMail
}
}
return nil
}
func (x *NgoloFuzzOne) GetClientNgdotRcpt() *ClientNgdotRcptArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClientNgdotRcpt); ok {
return x.ClientNgdotRcpt
}
}
return nil
}
func (x *NgoloFuzzOne) GetClientNgdotData() *ClientNgdotDataArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClientNgdotData); ok {
return x.ClientNgdotData
}
}
return nil
}
func (x *NgoloFuzzOne) GetSendMail() *SendMailArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SendMail); ok {
return x.SendMail
}
}
return nil
}
func (x *NgoloFuzzOne) GetClientNgdotExtension() *ClientNgdotExtensionArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClientNgdotExtension); ok {
return x.ClientNgdotExtension
}
}
return nil
}
func (x *NgoloFuzzOne) GetClientNgdotReset() *ClientNgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClientNgdotReset); ok {
return x.ClientNgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetClientNgdotNoop() *ClientNgdotNoopArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClientNgdotNoop); ok {
return x.ClientNgdotNoop
}
}
return nil
}
func (x *NgoloFuzzOne) GetClientNgdotQuit() *ClientNgdotQuitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClientNgdotQuit); ok {
return x.ClientNgdotQuit
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_PlainAuth struct {
PlainAuth *PlainAuthArgs `protobuf:"bytes,1,opt,name=PlainAuth,proto3,oneof"`
}
type NgoloFuzzOne_CRAMMD5Auth struct {
CRAMMD5Auth *CRAMMD5AuthArgs `protobuf:"bytes,2,opt,name=CRAMMD5Auth,proto3,oneof"`
}
type NgoloFuzzOne_Dial struct {
Dial *DialArgs `protobuf:"bytes,3,opt,name=Dial,proto3,oneof"`
}
type NgoloFuzzOne_NewClient struct {
NewClient *NewClientArgs `protobuf:"bytes,4,opt,name=NewClient,proto3,oneof"`
}
type NgoloFuzzOne_ClientNgdotClose struct {
ClientNgdotClose *ClientNgdotCloseArgs `protobuf:"bytes,5,opt,name=ClientNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_ClientNgdotHello struct {
ClientNgdotHello *ClientNgdotHelloArgs `protobuf:"bytes,6,opt,name=ClientNgdotHello,proto3,oneof"`
}
type NgoloFuzzOne_ClientNgdotTLSConnectionState struct {
ClientNgdotTLSConnectionState *ClientNgdotTLSConnectionStateArgs `protobuf:"bytes,7,opt,name=ClientNgdotTLSConnectionState,proto3,oneof"`
}
type NgoloFuzzOne_ClientNgdotVerify struct {
ClientNgdotVerify *ClientNgdotVerifyArgs `protobuf:"bytes,8,opt,name=ClientNgdotVerify,proto3,oneof"`
}
type NgoloFuzzOne_ClientNgdotAuth struct {
ClientNgdotAuth *ClientNgdotAuthArgs `protobuf:"bytes,9,opt,name=ClientNgdotAuth,proto3,oneof"`
}
type NgoloFuzzOne_ClientNgdotMail struct {
ClientNgdotMail *ClientNgdotMailArgs `protobuf:"bytes,10,opt,name=ClientNgdotMail,proto3,oneof"`
}
type NgoloFuzzOne_ClientNgdotRcpt struct {
ClientNgdotRcpt *ClientNgdotRcptArgs `protobuf:"bytes,11,opt,name=ClientNgdotRcpt,proto3,oneof"`
}
type NgoloFuzzOne_ClientNgdotData struct {
ClientNgdotData *ClientNgdotDataArgs `protobuf:"bytes,12,opt,name=ClientNgdotData,proto3,oneof"`
}
type NgoloFuzzOne_SendMail struct {
SendMail *SendMailArgs `protobuf:"bytes,13,opt,name=SendMail,proto3,oneof"`
}
type NgoloFuzzOne_ClientNgdotExtension struct {
ClientNgdotExtension *ClientNgdotExtensionArgs `protobuf:"bytes,14,opt,name=ClientNgdotExtension,proto3,oneof"`
}
type NgoloFuzzOne_ClientNgdotReset struct {
ClientNgdotReset *ClientNgdotResetArgs `protobuf:"bytes,15,opt,name=ClientNgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_ClientNgdotNoop struct {
ClientNgdotNoop *ClientNgdotNoopArgs `protobuf:"bytes,16,opt,name=ClientNgdotNoop,proto3,oneof"`
}
type NgoloFuzzOne_ClientNgdotQuit struct {
ClientNgdotQuit *ClientNgdotQuitArgs `protobuf:"bytes,17,opt,name=ClientNgdotQuit,proto3,oneof"`
}
func (*NgoloFuzzOne_PlainAuth) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CRAMMD5Auth) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Dial) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewClient) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClientNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClientNgdotHello) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClientNgdotTLSConnectionState) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClientNgdotVerify) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClientNgdotAuth) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClientNgdotMail) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClientNgdotRcpt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClientNgdotData) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SendMail) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClientNgdotExtension) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClientNgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClientNgdotNoop) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClientNgdotQuit) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"w\n" +
"\rPlainAuthArgs\x12\x1a\n" +
"\bidentity\x18\x01 \x01(\tR\bidentity\x12\x1a\n" +
"\busername\x18\x02 \x01(\tR\busername\x12\x1a\n" +
"\bpassword\x18\x03 \x01(\tR\bpassword\x12\x12\n" +
"\x04host\x18\x04 \x01(\tR\x04host\"E\n" +
"\x0fCRAMMD5AuthArgs\x12\x1a\n" +
"\busername\x18\x01 \x01(\tR\busername\x12\x16\n" +
"\x06secret\x18\x02 \x01(\tR\x06secret\"\x1e\n" +
"\bDialArgs\x12\x12\n" +
"\x04addr\x18\x01 \x01(\tR\x04addr\"7\n" +
"\rNewClientArgs\x12\x12\n" +
"\x04conn\x18\x01 \x01(\fR\x04conn\x12\x12\n" +
"\x04host\x18\x02 \x01(\tR\x04host\"\x16\n" +
"\x14ClientNgdotCloseArgs\"4\n" +
"\x14ClientNgdotHelloArgs\x12\x1c\n" +
"\tlocalName\x18\x01 \x01(\tR\tlocalName\"#\n" +
"!ClientNgdotTLSConnectionStateArgs\"+\n" +
"\x15ClientNgdotVerifyArgs\x12\x12\n" +
"\x04addr\x18\x01 \x01(\tR\x04addr\"\x15\n" +
"\x13ClientNgdotAuthArgs\")\n" +
"\x13ClientNgdotMailArgs\x12\x12\n" +
"\x04from\x18\x01 \x01(\tR\x04from\"%\n" +
"\x13ClientNgdotRcptArgs\x12\x0e\n" +
"\x02to\x18\x01 \x01(\tR\x02to\"\x15\n" +
"\x13ClientNgdotDataArgs\"X\n" +
"\fSendMailArgs\x12\x12\n" +
"\x04addr\x18\x01 \x01(\tR\x04addr\x12\x12\n" +
"\x04from\x18\x02 \x01(\tR\x04from\x12\x0e\n" +
"\x02to\x18\x03 \x03(\tR\x02to\x12\x10\n" +
"\x03msg\x18\x04 \x01(\fR\x03msg\",\n" +
"\x18ClientNgdotExtensionArgs\x12\x10\n" +
"\x03ext\x18\x01 \x01(\tR\x03ext\"\x16\n" +
"\x14ClientNgdotResetArgs\"\x15\n" +
"\x13ClientNgdotNoopArgs\"\x15\n" +
"\x13ClientNgdotQuitArgs\"\x84\n" +
"\n" +
"\fNgoloFuzzOne\x128\n" +
"\tPlainAuth\x18\x01 \x01(\v2\x18.ngolofuzz.PlainAuthArgsH\x00R\tPlainAuth\x12>\n" +
"\vCRAMMD5Auth\x18\x02 \x01(\v2\x1a.ngolofuzz.CRAMMD5AuthArgsH\x00R\vCRAMMD5Auth\x12)\n" +
"\x04Dial\x18\x03 \x01(\v2\x13.ngolofuzz.DialArgsH\x00R\x04Dial\x128\n" +
"\tNewClient\x18\x04 \x01(\v2\x18.ngolofuzz.NewClientArgsH\x00R\tNewClient\x12M\n" +
"\x10ClientNgdotClose\x18\x05 \x01(\v2\x1f.ngolofuzz.ClientNgdotCloseArgsH\x00R\x10ClientNgdotClose\x12M\n" +
"\x10ClientNgdotHello\x18\x06 \x01(\v2\x1f.ngolofuzz.ClientNgdotHelloArgsH\x00R\x10ClientNgdotHello\x12t\n" +
"\x1dClientNgdotTLSConnectionState\x18\a \x01(\v2,.ngolofuzz.ClientNgdotTLSConnectionStateArgsH\x00R\x1dClientNgdotTLSConnectionState\x12P\n" +
"\x11ClientNgdotVerify\x18\b \x01(\v2 .ngolofuzz.ClientNgdotVerifyArgsH\x00R\x11ClientNgdotVerify\x12J\n" +
"\x0fClientNgdotAuth\x18\t \x01(\v2\x1e.ngolofuzz.ClientNgdotAuthArgsH\x00R\x0fClientNgdotAuth\x12J\n" +
"\x0fClientNgdotMail\x18\n" +
" \x01(\v2\x1e.ngolofuzz.ClientNgdotMailArgsH\x00R\x0fClientNgdotMail\x12J\n" +
"\x0fClientNgdotRcpt\x18\v \x01(\v2\x1e.ngolofuzz.ClientNgdotRcptArgsH\x00R\x0fClientNgdotRcpt\x12J\n" +
"\x0fClientNgdotData\x18\f \x01(\v2\x1e.ngolofuzz.ClientNgdotDataArgsH\x00R\x0fClientNgdotData\x125\n" +
"\bSendMail\x18\r \x01(\v2\x17.ngolofuzz.SendMailArgsH\x00R\bSendMail\x12Y\n" +
"\x14ClientNgdotExtension\x18\x0e \x01(\v2#.ngolofuzz.ClientNgdotExtensionArgsH\x00R\x14ClientNgdotExtension\x12M\n" +
"\x10ClientNgdotReset\x18\x0f \x01(\v2\x1f.ngolofuzz.ClientNgdotResetArgsH\x00R\x10ClientNgdotReset\x12J\n" +
"\x0fClientNgdotNoop\x18\x10 \x01(\v2\x1e.ngolofuzz.ClientNgdotNoopArgsH\x00R\x0fClientNgdotNoop\x12J\n" +
"\x0fClientNgdotQuit\x18\x11 \x01(\v2\x1e.ngolofuzz.ClientNgdotQuitArgsH\x00R\x0fClientNgdotQuitB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x15Z\x13./;fuzz_ng_net_smtpb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 20)
var file_ngolofuzz_proto_goTypes = []any{
(*PlainAuthArgs)(nil), // 0: ngolofuzz.PlainAuthArgs
(*CRAMMD5AuthArgs)(nil), // 1: ngolofuzz.CRAMMD5AuthArgs
(*DialArgs)(nil), // 2: ngolofuzz.DialArgs
(*NewClientArgs)(nil), // 3: ngolofuzz.NewClientArgs
(*ClientNgdotCloseArgs)(nil), // 4: ngolofuzz.ClientNgdotCloseArgs
(*ClientNgdotHelloArgs)(nil), // 5: ngolofuzz.ClientNgdotHelloArgs
(*ClientNgdotTLSConnectionStateArgs)(nil), // 6: ngolofuzz.ClientNgdotTLSConnectionStateArgs
(*ClientNgdotVerifyArgs)(nil), // 7: ngolofuzz.ClientNgdotVerifyArgs
(*ClientNgdotAuthArgs)(nil), // 8: ngolofuzz.ClientNgdotAuthArgs
(*ClientNgdotMailArgs)(nil), // 9: ngolofuzz.ClientNgdotMailArgs
(*ClientNgdotRcptArgs)(nil), // 10: ngolofuzz.ClientNgdotRcptArgs
(*ClientNgdotDataArgs)(nil), // 11: ngolofuzz.ClientNgdotDataArgs
(*SendMailArgs)(nil), // 12: ngolofuzz.SendMailArgs
(*ClientNgdotExtensionArgs)(nil), // 13: ngolofuzz.ClientNgdotExtensionArgs
(*ClientNgdotResetArgs)(nil), // 14: ngolofuzz.ClientNgdotResetArgs
(*ClientNgdotNoopArgs)(nil), // 15: ngolofuzz.ClientNgdotNoopArgs
(*ClientNgdotQuitArgs)(nil), // 16: ngolofuzz.ClientNgdotQuitArgs
(*NgoloFuzzOne)(nil), // 17: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 18: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 19: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.PlainAuth:type_name -> ngolofuzz.PlainAuthArgs
1, // 1: ngolofuzz.NgoloFuzzOne.CRAMMD5Auth:type_name -> ngolofuzz.CRAMMD5AuthArgs
2, // 2: ngolofuzz.NgoloFuzzOne.Dial:type_name -> ngolofuzz.DialArgs
3, // 3: ngolofuzz.NgoloFuzzOne.NewClient:type_name -> ngolofuzz.NewClientArgs
4, // 4: ngolofuzz.NgoloFuzzOne.ClientNgdotClose:type_name -> ngolofuzz.ClientNgdotCloseArgs
5, // 5: ngolofuzz.NgoloFuzzOne.ClientNgdotHello:type_name -> ngolofuzz.ClientNgdotHelloArgs
6, // 6: ngolofuzz.NgoloFuzzOne.ClientNgdotTLSConnectionState:type_name -> ngolofuzz.ClientNgdotTLSConnectionStateArgs
7, // 7: ngolofuzz.NgoloFuzzOne.ClientNgdotVerify:type_name -> ngolofuzz.ClientNgdotVerifyArgs
8, // 8: ngolofuzz.NgoloFuzzOne.ClientNgdotAuth:type_name -> ngolofuzz.ClientNgdotAuthArgs
9, // 9: ngolofuzz.NgoloFuzzOne.ClientNgdotMail:type_name -> ngolofuzz.ClientNgdotMailArgs
10, // 10: ngolofuzz.NgoloFuzzOne.ClientNgdotRcpt:type_name -> ngolofuzz.ClientNgdotRcptArgs
11, // 11: ngolofuzz.NgoloFuzzOne.ClientNgdotData:type_name -> ngolofuzz.ClientNgdotDataArgs
12, // 12: ngolofuzz.NgoloFuzzOne.SendMail:type_name -> ngolofuzz.SendMailArgs
13, // 13: ngolofuzz.NgoloFuzzOne.ClientNgdotExtension:type_name -> ngolofuzz.ClientNgdotExtensionArgs
14, // 14: ngolofuzz.NgoloFuzzOne.ClientNgdotReset:type_name -> ngolofuzz.ClientNgdotResetArgs
15, // 15: ngolofuzz.NgoloFuzzOne.ClientNgdotNoop:type_name -> ngolofuzz.ClientNgdotNoopArgs
16, // 16: ngolofuzz.NgoloFuzzOne.ClientNgdotQuit:type_name -> ngolofuzz.ClientNgdotQuitArgs
17, // 17: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
18, // [18:18] is the sub-list for method output_type
18, // [18:18] is the sub-list for method input_type
18, // [18:18] is the sub-list for extension type_name
18, // [18:18] is the sub-list for extension extendee
0, // [0:18] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[17].OneofWrappers = []any{
(*NgoloFuzzOne_PlainAuth)(nil),
(*NgoloFuzzOne_CRAMMD5Auth)(nil),
(*NgoloFuzzOne_Dial)(nil),
(*NgoloFuzzOne_NewClient)(nil),
(*NgoloFuzzOne_ClientNgdotClose)(nil),
(*NgoloFuzzOne_ClientNgdotHello)(nil),
(*NgoloFuzzOne_ClientNgdotTLSConnectionState)(nil),
(*NgoloFuzzOne_ClientNgdotVerify)(nil),
(*NgoloFuzzOne_ClientNgdotAuth)(nil),
(*NgoloFuzzOne_ClientNgdotMail)(nil),
(*NgoloFuzzOne_ClientNgdotRcpt)(nil),
(*NgoloFuzzOne_ClientNgdotData)(nil),
(*NgoloFuzzOne_SendMail)(nil),
(*NgoloFuzzOne_ClientNgdotExtension)(nil),
(*NgoloFuzzOne_ClientNgdotReset)(nil),
(*NgoloFuzzOne_ClientNgdotNoop)(nil),
(*NgoloFuzzOne_ClientNgdotQuit)(nil),
}
file_ngolofuzz_proto_msgTypes[18].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 20,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_net_textproto
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"net/textproto"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ReaderResults []*textproto.Reader
ReaderResultsIndex := 0
var ConnResults []*textproto.Conn
ConnResultsIndex := 0
var WriterResults []*textproto.Writer
WriterResultsIndex := 0
var MIMEHeaderResults []*textproto.MIMEHeader
MIMEHeaderResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_MIMEHeaderNgdotAdd:
if len(MIMEHeaderResults) == 0 {
continue
}
arg0 := MIMEHeaderResults[MIMEHeaderResultsIndex]
MIMEHeaderResultsIndex = (MIMEHeaderResultsIndex + 1) % len(MIMEHeaderResults)
arg0.Add(a.MIMEHeaderNgdotAdd.Key, a.MIMEHeaderNgdotAdd.Value)
case *NgoloFuzzOne_MIMEHeaderNgdotSet:
if len(MIMEHeaderResults) == 0 {
continue
}
arg0 := MIMEHeaderResults[MIMEHeaderResultsIndex]
MIMEHeaderResultsIndex = (MIMEHeaderResultsIndex + 1) % len(MIMEHeaderResults)
arg0.Set(a.MIMEHeaderNgdotSet.Key, a.MIMEHeaderNgdotSet.Value)
case *NgoloFuzzOne_MIMEHeaderNgdotGet:
if len(MIMEHeaderResults) == 0 {
continue
}
arg0 := MIMEHeaderResults[MIMEHeaderResultsIndex]
MIMEHeaderResultsIndex = (MIMEHeaderResultsIndex + 1) % len(MIMEHeaderResults)
arg0.Get(a.MIMEHeaderNgdotGet.Key)
case *NgoloFuzzOne_MIMEHeaderNgdotValues:
if len(MIMEHeaderResults) == 0 {
continue
}
arg0 := MIMEHeaderResults[MIMEHeaderResultsIndex]
MIMEHeaderResultsIndex = (MIMEHeaderResultsIndex + 1) % len(MIMEHeaderResults)
arg0.Values(a.MIMEHeaderNgdotValues.Key)
case *NgoloFuzzOne_MIMEHeaderNgdotDel:
if len(MIMEHeaderResults) == 0 {
continue
}
arg0 := MIMEHeaderResults[MIMEHeaderResultsIndex]
MIMEHeaderResultsIndex = (MIMEHeaderResultsIndex + 1) % len(MIMEHeaderResults)
arg0.Del(a.MIMEHeaderNgdotDel.Key)
case *NgoloFuzzOne_NewReader:
arg0 := CreateBufioReader(a.NewReader.R)
r0 := textproto.NewReader(arg0)
if r0 != nil{
ReaderResults = append(ReaderResults, r0)
}
case *NgoloFuzzOne_ReaderNgdotReadLine:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.ReadLine()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadLineBytes:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.ReadLineBytes()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadContinuedLine:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.ReadContinuedLine()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadContinuedLineBytes:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.ReadContinuedLineBytes()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadCodeLine:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg1 := int(a.ReaderNgdotReadCodeLine.ExpectCode)
_, _, r2 := arg0.ReadCodeLine(arg1)
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadResponse:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg1 := int(a.ReaderNgdotReadResponse.ExpectCode)
_, _, r2 := arg0.ReadResponse(arg1)
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotDotReader:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg0.DotReader()
case *NgoloFuzzOne_ReaderNgdotReadDotBytes:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.ReadDotBytes()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadDotLines:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.ReadDotLines()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadMIMEHeader:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
r0, r1 := arg0.ReadMIMEHeader()
MIMEHeaderResults = append(MIMEHeaderResults, &r0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_CanonicalMIMEHeaderKey:
textproto.CanonicalMIMEHeaderKey(a.CanonicalMIMEHeaderKey.S)
case *NgoloFuzzOne_ConnNgdotClose:
if len(ConnResults) == 0 {
continue
}
arg0 := ConnResults[ConnResultsIndex]
ConnResultsIndex = (ConnResultsIndex + 1) % len(ConnResults)
r0 := arg0.Close()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_Dial:
r0, r1 := textproto.Dial(a.Dial.Network, a.Dial.Addr)
if r0 != nil{
ConnResults = append(ConnResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TrimString:
textproto.TrimString(a.TrimString.S)
case *NgoloFuzzOne_TrimBytes:
textproto.TrimBytes(a.TrimBytes.B)
case *NgoloFuzzOne_WriterNgdotDotWriter:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg0.DotWriter()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ReaderNb := 0
ReaderResultsIndex := 0
ConnNb := 0
ConnResultsIndex := 0
WriterNb := 0
WriterResultsIndex := 0
MIMEHeaderNb := 0
MIMEHeaderResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_MIMEHeaderNgdotAdd:
if MIMEHeaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("MIMEHeader%d.Add(%#+v, %#+v)\n", MIMEHeaderResultsIndex, a.MIMEHeaderNgdotAdd.Key, a.MIMEHeaderNgdotAdd.Value))
MIMEHeaderResultsIndex = (MIMEHeaderResultsIndex + 1) % MIMEHeaderNb
case *NgoloFuzzOne_MIMEHeaderNgdotSet:
if MIMEHeaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("MIMEHeader%d.Set(%#+v, %#+v)\n", MIMEHeaderResultsIndex, a.MIMEHeaderNgdotSet.Key, a.MIMEHeaderNgdotSet.Value))
MIMEHeaderResultsIndex = (MIMEHeaderResultsIndex + 1) % MIMEHeaderNb
case *NgoloFuzzOne_MIMEHeaderNgdotGet:
if MIMEHeaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("MIMEHeader%d.Get(%#+v)\n", MIMEHeaderResultsIndex, a.MIMEHeaderNgdotGet.Key))
MIMEHeaderResultsIndex = (MIMEHeaderResultsIndex + 1) % MIMEHeaderNb
case *NgoloFuzzOne_MIMEHeaderNgdotValues:
if MIMEHeaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("MIMEHeader%d.Values(%#+v)\n", MIMEHeaderResultsIndex, a.MIMEHeaderNgdotValues.Key))
MIMEHeaderResultsIndex = (MIMEHeaderResultsIndex + 1) % MIMEHeaderNb
case *NgoloFuzzOne_MIMEHeaderNgdotDel:
if MIMEHeaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("MIMEHeader%d.Del(%#+v)\n", MIMEHeaderResultsIndex, a.MIMEHeaderNgdotDel.Key))
MIMEHeaderResultsIndex = (MIMEHeaderResultsIndex + 1) % MIMEHeaderNb
case *NgoloFuzzOne_NewReader:
w.WriteString(fmt.Sprintf("Reader%d := textproto.NewReader(CreateBufioReader(%#+v))\n", ReaderNb, a.NewReader.R))
ReaderNb = ReaderNb + 1
case *NgoloFuzzOne_ReaderNgdotReadLine:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadLine()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadLineBytes:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadLineBytes()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadContinuedLine:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadContinuedLine()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadContinuedLineBytes:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadContinuedLineBytes()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadCodeLine:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadCodeLine(int(%#+v))\n", ReaderResultsIndex, a.ReaderNgdotReadCodeLine.ExpectCode))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadResponse:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadResponse(int(%#+v))\n", ReaderResultsIndex, a.ReaderNgdotReadResponse.ExpectCode))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotDotReader:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.DotReader()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadDotBytes:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadDotBytes()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadDotLines:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadDotLines()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadMIMEHeader:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("MIMEHeader%d, _ := Reader%d.ReadMIMEHeader()\n", MIMEHeaderNb, ReaderResultsIndex))
MIMEHeaderNb = MIMEHeaderNb + 1
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_CanonicalMIMEHeaderKey:
w.WriteString(fmt.Sprintf("textproto.CanonicalMIMEHeaderKey(%#+v)\n", a.CanonicalMIMEHeaderKey.S))
case *NgoloFuzzOne_ConnNgdotClose:
if ConnNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Conn%d.Close()\n", ConnResultsIndex))
ConnResultsIndex = (ConnResultsIndex + 1) % ConnNb
case *NgoloFuzzOne_Dial:
w.WriteString(fmt.Sprintf("Conn%d, _ := textproto.Dial(%#+v, %#+v)\n", ConnNb, a.Dial.Network, a.Dial.Addr))
ConnNb = ConnNb + 1
case *NgoloFuzzOne_TrimString:
w.WriteString(fmt.Sprintf("textproto.TrimString(%#+v)\n", a.TrimString.S))
case *NgoloFuzzOne_TrimBytes:
w.WriteString(fmt.Sprintf("textproto.TrimBytes(%#+v)\n", a.TrimBytes.B))
case *NgoloFuzzOne_WriterNgdotDotWriter:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.DotWriter()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_net_textproto
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type MIMEHeaderNgdotAddArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MIMEHeaderNgdotAddArgs) Reset() {
*x = MIMEHeaderNgdotAddArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MIMEHeaderNgdotAddArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MIMEHeaderNgdotAddArgs) ProtoMessage() {}
func (x *MIMEHeaderNgdotAddArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MIMEHeaderNgdotAddArgs.ProtoReflect.Descriptor instead.
func (*MIMEHeaderNgdotAddArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *MIMEHeaderNgdotAddArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *MIMEHeaderNgdotAddArgs) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
type MIMEHeaderNgdotSetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MIMEHeaderNgdotSetArgs) Reset() {
*x = MIMEHeaderNgdotSetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MIMEHeaderNgdotSetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MIMEHeaderNgdotSetArgs) ProtoMessage() {}
func (x *MIMEHeaderNgdotSetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MIMEHeaderNgdotSetArgs.ProtoReflect.Descriptor instead.
func (*MIMEHeaderNgdotSetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *MIMEHeaderNgdotSetArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *MIMEHeaderNgdotSetArgs) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
type MIMEHeaderNgdotGetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MIMEHeaderNgdotGetArgs) Reset() {
*x = MIMEHeaderNgdotGetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MIMEHeaderNgdotGetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MIMEHeaderNgdotGetArgs) ProtoMessage() {}
func (x *MIMEHeaderNgdotGetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MIMEHeaderNgdotGetArgs.ProtoReflect.Descriptor instead.
func (*MIMEHeaderNgdotGetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *MIMEHeaderNgdotGetArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
type MIMEHeaderNgdotValuesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MIMEHeaderNgdotValuesArgs) Reset() {
*x = MIMEHeaderNgdotValuesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MIMEHeaderNgdotValuesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MIMEHeaderNgdotValuesArgs) ProtoMessage() {}
func (x *MIMEHeaderNgdotValuesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MIMEHeaderNgdotValuesArgs.ProtoReflect.Descriptor instead.
func (*MIMEHeaderNgdotValuesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *MIMEHeaderNgdotValuesArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
type MIMEHeaderNgdotDelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MIMEHeaderNgdotDelArgs) Reset() {
*x = MIMEHeaderNgdotDelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MIMEHeaderNgdotDelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MIMEHeaderNgdotDelArgs) ProtoMessage() {}
func (x *MIMEHeaderNgdotDelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MIMEHeaderNgdotDelArgs.ProtoReflect.Descriptor instead.
func (*MIMEHeaderNgdotDelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *MIMEHeaderNgdotDelArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
type NewReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderArgs) Reset() {
*x = NewReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderArgs) ProtoMessage() {}
func (x *NewReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderArgs.ProtoReflect.Descriptor instead.
func (*NewReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NewReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type ReaderNgdotReadLineArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadLineArgs) Reset() {
*x = ReaderNgdotReadLineArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadLineArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadLineArgs) ProtoMessage() {}
func (x *ReaderNgdotReadLineArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadLineArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadLineArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type ReaderNgdotReadLineBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadLineBytesArgs) Reset() {
*x = ReaderNgdotReadLineBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadLineBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadLineBytesArgs) ProtoMessage() {}
func (x *ReaderNgdotReadLineBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadLineBytesArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadLineBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type ReaderNgdotReadContinuedLineArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadContinuedLineArgs) Reset() {
*x = ReaderNgdotReadContinuedLineArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadContinuedLineArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadContinuedLineArgs) ProtoMessage() {}
func (x *ReaderNgdotReadContinuedLineArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadContinuedLineArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadContinuedLineArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type ReaderNgdotReadContinuedLineBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadContinuedLineBytesArgs) Reset() {
*x = ReaderNgdotReadContinuedLineBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadContinuedLineBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadContinuedLineBytesArgs) ProtoMessage() {}
func (x *ReaderNgdotReadContinuedLineBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadContinuedLineBytesArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadContinuedLineBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type ReaderNgdotReadCodeLineArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
ExpectCode int64 `protobuf:"varint,1,opt,name=expectCode,proto3" json:"expectCode,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadCodeLineArgs) Reset() {
*x = ReaderNgdotReadCodeLineArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadCodeLineArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadCodeLineArgs) ProtoMessage() {}
func (x *ReaderNgdotReadCodeLineArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadCodeLineArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadCodeLineArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *ReaderNgdotReadCodeLineArgs) GetExpectCode() int64 {
if x != nil {
return x.ExpectCode
}
return 0
}
type ReaderNgdotReadResponseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
ExpectCode int64 `protobuf:"varint,1,opt,name=expectCode,proto3" json:"expectCode,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadResponseArgs) Reset() {
*x = ReaderNgdotReadResponseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadResponseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadResponseArgs) ProtoMessage() {}
func (x *ReaderNgdotReadResponseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadResponseArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadResponseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *ReaderNgdotReadResponseArgs) GetExpectCode() int64 {
if x != nil {
return x.ExpectCode
}
return 0
}
type ReaderNgdotDotReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotDotReaderArgs) Reset() {
*x = ReaderNgdotDotReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotDotReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotDotReaderArgs) ProtoMessage() {}
func (x *ReaderNgdotDotReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotDotReaderArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotDotReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type ReaderNgdotReadDotBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadDotBytesArgs) Reset() {
*x = ReaderNgdotReadDotBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadDotBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadDotBytesArgs) ProtoMessage() {}
func (x *ReaderNgdotReadDotBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadDotBytesArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadDotBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type ReaderNgdotReadDotLinesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadDotLinesArgs) Reset() {
*x = ReaderNgdotReadDotLinesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadDotLinesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadDotLinesArgs) ProtoMessage() {}
func (x *ReaderNgdotReadDotLinesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadDotLinesArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadDotLinesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type ReaderNgdotReadMIMEHeaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadMIMEHeaderArgs) Reset() {
*x = ReaderNgdotReadMIMEHeaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadMIMEHeaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadMIMEHeaderArgs) ProtoMessage() {}
func (x *ReaderNgdotReadMIMEHeaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadMIMEHeaderArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadMIMEHeaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
type CanonicalMIMEHeaderKeyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CanonicalMIMEHeaderKeyArgs) Reset() {
*x = CanonicalMIMEHeaderKeyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CanonicalMIMEHeaderKeyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CanonicalMIMEHeaderKeyArgs) ProtoMessage() {}
func (x *CanonicalMIMEHeaderKeyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CanonicalMIMEHeaderKeyArgs.ProtoReflect.Descriptor instead.
func (*CanonicalMIMEHeaderKeyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *CanonicalMIMEHeaderKeyArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type ConnNgdotCloseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ConnNgdotCloseArgs) Reset() {
*x = ConnNgdotCloseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ConnNgdotCloseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ConnNgdotCloseArgs) ProtoMessage() {}
func (x *ConnNgdotCloseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ConnNgdotCloseArgs.ProtoReflect.Descriptor instead.
func (*ConnNgdotCloseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
type DialArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Network string `protobuf:"bytes,1,opt,name=network,proto3" json:"network,omitempty"`
Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DialArgs) Reset() {
*x = DialArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DialArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DialArgs) ProtoMessage() {}
func (x *DialArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DialArgs.ProtoReflect.Descriptor instead.
func (*DialArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *DialArgs) GetNetwork() string {
if x != nil {
return x.Network
}
return ""
}
func (x *DialArgs) GetAddr() string {
if x != nil {
return x.Addr
}
return ""
}
type TrimStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TrimStringArgs) Reset() {
*x = TrimStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TrimStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrimStringArgs) ProtoMessage() {}
func (x *TrimStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrimStringArgs.ProtoReflect.Descriptor instead.
func (*TrimStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *TrimStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type TrimBytesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TrimBytesArgs) Reset() {
*x = TrimBytesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TrimBytesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrimBytesArgs) ProtoMessage() {}
func (x *TrimBytesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrimBytesArgs.ProtoReflect.Descriptor instead.
func (*TrimBytesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *TrimBytesArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type WriterNgdotDotWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotDotWriterArgs) Reset() {
*x = WriterNgdotDotWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotDotWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotDotWriterArgs) ProtoMessage() {}
func (x *WriterNgdotDotWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotDotWriterArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotDotWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_MIMEHeaderNgdotAdd
// *NgoloFuzzOne_MIMEHeaderNgdotSet
// *NgoloFuzzOne_MIMEHeaderNgdotGet
// *NgoloFuzzOne_MIMEHeaderNgdotValues
// *NgoloFuzzOne_MIMEHeaderNgdotDel
// *NgoloFuzzOne_NewReader
// *NgoloFuzzOne_ReaderNgdotReadLine
// *NgoloFuzzOne_ReaderNgdotReadLineBytes
// *NgoloFuzzOne_ReaderNgdotReadContinuedLine
// *NgoloFuzzOne_ReaderNgdotReadContinuedLineBytes
// *NgoloFuzzOne_ReaderNgdotReadCodeLine
// *NgoloFuzzOne_ReaderNgdotReadResponse
// *NgoloFuzzOne_ReaderNgdotDotReader
// *NgoloFuzzOne_ReaderNgdotReadDotBytes
// *NgoloFuzzOne_ReaderNgdotReadDotLines
// *NgoloFuzzOne_ReaderNgdotReadMIMEHeader
// *NgoloFuzzOne_CanonicalMIMEHeaderKey
// *NgoloFuzzOne_ConnNgdotClose
// *NgoloFuzzOne_Dial
// *NgoloFuzzOne_TrimString
// *NgoloFuzzOne_TrimBytes
// *NgoloFuzzOne_WriterNgdotDotWriter
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetMIMEHeaderNgdotAdd() *MIMEHeaderNgdotAddArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MIMEHeaderNgdotAdd); ok {
return x.MIMEHeaderNgdotAdd
}
}
return nil
}
func (x *NgoloFuzzOne) GetMIMEHeaderNgdotSet() *MIMEHeaderNgdotSetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MIMEHeaderNgdotSet); ok {
return x.MIMEHeaderNgdotSet
}
}
return nil
}
func (x *NgoloFuzzOne) GetMIMEHeaderNgdotGet() *MIMEHeaderNgdotGetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MIMEHeaderNgdotGet); ok {
return x.MIMEHeaderNgdotGet
}
}
return nil
}
func (x *NgoloFuzzOne) GetMIMEHeaderNgdotValues() *MIMEHeaderNgdotValuesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MIMEHeaderNgdotValues); ok {
return x.MIMEHeaderNgdotValues
}
}
return nil
}
func (x *NgoloFuzzOne) GetMIMEHeaderNgdotDel() *MIMEHeaderNgdotDelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MIMEHeaderNgdotDel); ok {
return x.MIMEHeaderNgdotDel
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewReader() *NewReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReader); ok {
return x.NewReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadLine() *ReaderNgdotReadLineArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadLine); ok {
return x.ReaderNgdotReadLine
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadLineBytes() *ReaderNgdotReadLineBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadLineBytes); ok {
return x.ReaderNgdotReadLineBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadContinuedLine() *ReaderNgdotReadContinuedLineArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadContinuedLine); ok {
return x.ReaderNgdotReadContinuedLine
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadContinuedLineBytes() *ReaderNgdotReadContinuedLineBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadContinuedLineBytes); ok {
return x.ReaderNgdotReadContinuedLineBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadCodeLine() *ReaderNgdotReadCodeLineArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadCodeLine); ok {
return x.ReaderNgdotReadCodeLine
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadResponse() *ReaderNgdotReadResponseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadResponse); ok {
return x.ReaderNgdotReadResponse
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotDotReader() *ReaderNgdotDotReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotDotReader); ok {
return x.ReaderNgdotDotReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadDotBytes() *ReaderNgdotReadDotBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadDotBytes); ok {
return x.ReaderNgdotReadDotBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadDotLines() *ReaderNgdotReadDotLinesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadDotLines); ok {
return x.ReaderNgdotReadDotLines
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadMIMEHeader() *ReaderNgdotReadMIMEHeaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadMIMEHeader); ok {
return x.ReaderNgdotReadMIMEHeader
}
}
return nil
}
func (x *NgoloFuzzOne) GetCanonicalMIMEHeaderKey() *CanonicalMIMEHeaderKeyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CanonicalMIMEHeaderKey); ok {
return x.CanonicalMIMEHeaderKey
}
}
return nil
}
func (x *NgoloFuzzOne) GetConnNgdotClose() *ConnNgdotCloseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ConnNgdotClose); ok {
return x.ConnNgdotClose
}
}
return nil
}
func (x *NgoloFuzzOne) GetDial() *DialArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Dial); ok {
return x.Dial
}
}
return nil
}
func (x *NgoloFuzzOne) GetTrimString() *TrimStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TrimString); ok {
return x.TrimString
}
}
return nil
}
func (x *NgoloFuzzOne) GetTrimBytes() *TrimBytesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TrimBytes); ok {
return x.TrimBytes
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotDotWriter() *WriterNgdotDotWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotDotWriter); ok {
return x.WriterNgdotDotWriter
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_MIMEHeaderNgdotAdd struct {
MIMEHeaderNgdotAdd *MIMEHeaderNgdotAddArgs `protobuf:"bytes,1,opt,name=MIMEHeaderNgdotAdd,proto3,oneof"`
}
type NgoloFuzzOne_MIMEHeaderNgdotSet struct {
MIMEHeaderNgdotSet *MIMEHeaderNgdotSetArgs `protobuf:"bytes,2,opt,name=MIMEHeaderNgdotSet,proto3,oneof"`
}
type NgoloFuzzOne_MIMEHeaderNgdotGet struct {
MIMEHeaderNgdotGet *MIMEHeaderNgdotGetArgs `protobuf:"bytes,3,opt,name=MIMEHeaderNgdotGet,proto3,oneof"`
}
type NgoloFuzzOne_MIMEHeaderNgdotValues struct {
MIMEHeaderNgdotValues *MIMEHeaderNgdotValuesArgs `protobuf:"bytes,4,opt,name=MIMEHeaderNgdotValues,proto3,oneof"`
}
type NgoloFuzzOne_MIMEHeaderNgdotDel struct {
MIMEHeaderNgdotDel *MIMEHeaderNgdotDelArgs `protobuf:"bytes,5,opt,name=MIMEHeaderNgdotDel,proto3,oneof"`
}
type NgoloFuzzOne_NewReader struct {
NewReader *NewReaderArgs `protobuf:"bytes,6,opt,name=NewReader,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadLine struct {
ReaderNgdotReadLine *ReaderNgdotReadLineArgs `protobuf:"bytes,7,opt,name=ReaderNgdotReadLine,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadLineBytes struct {
ReaderNgdotReadLineBytes *ReaderNgdotReadLineBytesArgs `protobuf:"bytes,8,opt,name=ReaderNgdotReadLineBytes,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadContinuedLine struct {
ReaderNgdotReadContinuedLine *ReaderNgdotReadContinuedLineArgs `protobuf:"bytes,9,opt,name=ReaderNgdotReadContinuedLine,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadContinuedLineBytes struct {
ReaderNgdotReadContinuedLineBytes *ReaderNgdotReadContinuedLineBytesArgs `protobuf:"bytes,10,opt,name=ReaderNgdotReadContinuedLineBytes,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadCodeLine struct {
ReaderNgdotReadCodeLine *ReaderNgdotReadCodeLineArgs `protobuf:"bytes,11,opt,name=ReaderNgdotReadCodeLine,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadResponse struct {
ReaderNgdotReadResponse *ReaderNgdotReadResponseArgs `protobuf:"bytes,12,opt,name=ReaderNgdotReadResponse,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotDotReader struct {
ReaderNgdotDotReader *ReaderNgdotDotReaderArgs `protobuf:"bytes,13,opt,name=ReaderNgdotDotReader,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadDotBytes struct {
ReaderNgdotReadDotBytes *ReaderNgdotReadDotBytesArgs `protobuf:"bytes,14,opt,name=ReaderNgdotReadDotBytes,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadDotLines struct {
ReaderNgdotReadDotLines *ReaderNgdotReadDotLinesArgs `protobuf:"bytes,15,opt,name=ReaderNgdotReadDotLines,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadMIMEHeader struct {
ReaderNgdotReadMIMEHeader *ReaderNgdotReadMIMEHeaderArgs `protobuf:"bytes,16,opt,name=ReaderNgdotReadMIMEHeader,proto3,oneof"`
}
type NgoloFuzzOne_CanonicalMIMEHeaderKey struct {
CanonicalMIMEHeaderKey *CanonicalMIMEHeaderKeyArgs `protobuf:"bytes,17,opt,name=CanonicalMIMEHeaderKey,proto3,oneof"`
}
type NgoloFuzzOne_ConnNgdotClose struct {
ConnNgdotClose *ConnNgdotCloseArgs `protobuf:"bytes,18,opt,name=ConnNgdotClose,proto3,oneof"`
}
type NgoloFuzzOne_Dial struct {
Dial *DialArgs `protobuf:"bytes,19,opt,name=Dial,proto3,oneof"`
}
type NgoloFuzzOne_TrimString struct {
TrimString *TrimStringArgs `protobuf:"bytes,20,opt,name=TrimString,proto3,oneof"`
}
type NgoloFuzzOne_TrimBytes struct {
TrimBytes *TrimBytesArgs `protobuf:"bytes,21,opt,name=TrimBytes,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotDotWriter struct {
WriterNgdotDotWriter *WriterNgdotDotWriterArgs `protobuf:"bytes,22,opt,name=WriterNgdotDotWriter,proto3,oneof"`
}
func (*NgoloFuzzOne_MIMEHeaderNgdotAdd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MIMEHeaderNgdotSet) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MIMEHeaderNgdotGet) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MIMEHeaderNgdotValues) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MIMEHeaderNgdotDel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadLine) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadLineBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadContinuedLine) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadContinuedLineBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadCodeLine) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadResponse) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotDotReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadDotBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadDotLines) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadMIMEHeader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CanonicalMIMEHeaderKey) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ConnNgdotClose) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Dial) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TrimString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TrimBytes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotDotWriter) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"@\n" +
"\x16MIMEHeaderNgdotAddArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value\"@\n" +
"\x16MIMEHeaderNgdotSetArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value\"*\n" +
"\x16MIMEHeaderNgdotGetArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\"-\n" +
"\x19MIMEHeaderNgdotValuesArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\"*\n" +
"\x16MIMEHeaderNgdotDelArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\"\x1d\n" +
"\rNewReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x19\n" +
"\x17ReaderNgdotReadLineArgs\"\x1e\n" +
"\x1cReaderNgdotReadLineBytesArgs\"\"\n" +
" ReaderNgdotReadContinuedLineArgs\"'\n" +
"%ReaderNgdotReadContinuedLineBytesArgs\"=\n" +
"\x1bReaderNgdotReadCodeLineArgs\x12\x1e\n" +
"\n" +
"expectCode\x18\x01 \x01(\x03R\n" +
"expectCode\"=\n" +
"\x1bReaderNgdotReadResponseArgs\x12\x1e\n" +
"\n" +
"expectCode\x18\x01 \x01(\x03R\n" +
"expectCode\"\x1a\n" +
"\x18ReaderNgdotDotReaderArgs\"\x1d\n" +
"\x1bReaderNgdotReadDotBytesArgs\"\x1d\n" +
"\x1bReaderNgdotReadDotLinesArgs\"\x1f\n" +
"\x1dReaderNgdotReadMIMEHeaderArgs\"*\n" +
"\x1aCanonicalMIMEHeaderKeyArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x14\n" +
"\x12ConnNgdotCloseArgs\"8\n" +
"\bDialArgs\x12\x18\n" +
"\anetwork\x18\x01 \x01(\tR\anetwork\x12\x12\n" +
"\x04addr\x18\x02 \x01(\tR\x04addr\"\x1e\n" +
"\x0eTrimStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1d\n" +
"\rTrimBytesArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"\x1a\n" +
"\x18WriterNgdotDotWriterArgs\"\xb3\x0f\n" +
"\fNgoloFuzzOne\x12S\n" +
"\x12MIMEHeaderNgdotAdd\x18\x01 \x01(\v2!.ngolofuzz.MIMEHeaderNgdotAddArgsH\x00R\x12MIMEHeaderNgdotAdd\x12S\n" +
"\x12MIMEHeaderNgdotSet\x18\x02 \x01(\v2!.ngolofuzz.MIMEHeaderNgdotSetArgsH\x00R\x12MIMEHeaderNgdotSet\x12S\n" +
"\x12MIMEHeaderNgdotGet\x18\x03 \x01(\v2!.ngolofuzz.MIMEHeaderNgdotGetArgsH\x00R\x12MIMEHeaderNgdotGet\x12\\\n" +
"\x15MIMEHeaderNgdotValues\x18\x04 \x01(\v2$.ngolofuzz.MIMEHeaderNgdotValuesArgsH\x00R\x15MIMEHeaderNgdotValues\x12S\n" +
"\x12MIMEHeaderNgdotDel\x18\x05 \x01(\v2!.ngolofuzz.MIMEHeaderNgdotDelArgsH\x00R\x12MIMEHeaderNgdotDel\x128\n" +
"\tNewReader\x18\x06 \x01(\v2\x18.ngolofuzz.NewReaderArgsH\x00R\tNewReader\x12V\n" +
"\x13ReaderNgdotReadLine\x18\a \x01(\v2\".ngolofuzz.ReaderNgdotReadLineArgsH\x00R\x13ReaderNgdotReadLine\x12e\n" +
"\x18ReaderNgdotReadLineBytes\x18\b \x01(\v2'.ngolofuzz.ReaderNgdotReadLineBytesArgsH\x00R\x18ReaderNgdotReadLineBytes\x12q\n" +
"\x1cReaderNgdotReadContinuedLine\x18\t \x01(\v2+.ngolofuzz.ReaderNgdotReadContinuedLineArgsH\x00R\x1cReaderNgdotReadContinuedLine\x12\x80\x01\n" +
"!ReaderNgdotReadContinuedLineBytes\x18\n" +
" \x01(\v20.ngolofuzz.ReaderNgdotReadContinuedLineBytesArgsH\x00R!ReaderNgdotReadContinuedLineBytes\x12b\n" +
"\x17ReaderNgdotReadCodeLine\x18\v \x01(\v2&.ngolofuzz.ReaderNgdotReadCodeLineArgsH\x00R\x17ReaderNgdotReadCodeLine\x12b\n" +
"\x17ReaderNgdotReadResponse\x18\f \x01(\v2&.ngolofuzz.ReaderNgdotReadResponseArgsH\x00R\x17ReaderNgdotReadResponse\x12Y\n" +
"\x14ReaderNgdotDotReader\x18\r \x01(\v2#.ngolofuzz.ReaderNgdotDotReaderArgsH\x00R\x14ReaderNgdotDotReader\x12b\n" +
"\x17ReaderNgdotReadDotBytes\x18\x0e \x01(\v2&.ngolofuzz.ReaderNgdotReadDotBytesArgsH\x00R\x17ReaderNgdotReadDotBytes\x12b\n" +
"\x17ReaderNgdotReadDotLines\x18\x0f \x01(\v2&.ngolofuzz.ReaderNgdotReadDotLinesArgsH\x00R\x17ReaderNgdotReadDotLines\x12h\n" +
"\x19ReaderNgdotReadMIMEHeader\x18\x10 \x01(\v2(.ngolofuzz.ReaderNgdotReadMIMEHeaderArgsH\x00R\x19ReaderNgdotReadMIMEHeader\x12_\n" +
"\x16CanonicalMIMEHeaderKey\x18\x11 \x01(\v2%.ngolofuzz.CanonicalMIMEHeaderKeyArgsH\x00R\x16CanonicalMIMEHeaderKey\x12G\n" +
"\x0eConnNgdotClose\x18\x12 \x01(\v2\x1d.ngolofuzz.ConnNgdotCloseArgsH\x00R\x0eConnNgdotClose\x12)\n" +
"\x04Dial\x18\x13 \x01(\v2\x13.ngolofuzz.DialArgsH\x00R\x04Dial\x12;\n" +
"\n" +
"TrimString\x18\x14 \x01(\v2\x19.ngolofuzz.TrimStringArgsH\x00R\n" +
"TrimString\x128\n" +
"\tTrimBytes\x18\x15 \x01(\v2\x18.ngolofuzz.TrimBytesArgsH\x00R\tTrimBytes\x12Y\n" +
"\x14WriterNgdotDotWriter\x18\x16 \x01(\v2#.ngolofuzz.WriterNgdotDotWriterArgsH\x00R\x14WriterNgdotDotWriterB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_net_textprotob\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 25)
var file_ngolofuzz_proto_goTypes = []any{
(*MIMEHeaderNgdotAddArgs)(nil), // 0: ngolofuzz.MIMEHeaderNgdotAddArgs
(*MIMEHeaderNgdotSetArgs)(nil), // 1: ngolofuzz.MIMEHeaderNgdotSetArgs
(*MIMEHeaderNgdotGetArgs)(nil), // 2: ngolofuzz.MIMEHeaderNgdotGetArgs
(*MIMEHeaderNgdotValuesArgs)(nil), // 3: ngolofuzz.MIMEHeaderNgdotValuesArgs
(*MIMEHeaderNgdotDelArgs)(nil), // 4: ngolofuzz.MIMEHeaderNgdotDelArgs
(*NewReaderArgs)(nil), // 5: ngolofuzz.NewReaderArgs
(*ReaderNgdotReadLineArgs)(nil), // 6: ngolofuzz.ReaderNgdotReadLineArgs
(*ReaderNgdotReadLineBytesArgs)(nil), // 7: ngolofuzz.ReaderNgdotReadLineBytesArgs
(*ReaderNgdotReadContinuedLineArgs)(nil), // 8: ngolofuzz.ReaderNgdotReadContinuedLineArgs
(*ReaderNgdotReadContinuedLineBytesArgs)(nil), // 9: ngolofuzz.ReaderNgdotReadContinuedLineBytesArgs
(*ReaderNgdotReadCodeLineArgs)(nil), // 10: ngolofuzz.ReaderNgdotReadCodeLineArgs
(*ReaderNgdotReadResponseArgs)(nil), // 11: ngolofuzz.ReaderNgdotReadResponseArgs
(*ReaderNgdotDotReaderArgs)(nil), // 12: ngolofuzz.ReaderNgdotDotReaderArgs
(*ReaderNgdotReadDotBytesArgs)(nil), // 13: ngolofuzz.ReaderNgdotReadDotBytesArgs
(*ReaderNgdotReadDotLinesArgs)(nil), // 14: ngolofuzz.ReaderNgdotReadDotLinesArgs
(*ReaderNgdotReadMIMEHeaderArgs)(nil), // 15: ngolofuzz.ReaderNgdotReadMIMEHeaderArgs
(*CanonicalMIMEHeaderKeyArgs)(nil), // 16: ngolofuzz.CanonicalMIMEHeaderKeyArgs
(*ConnNgdotCloseArgs)(nil), // 17: ngolofuzz.ConnNgdotCloseArgs
(*DialArgs)(nil), // 18: ngolofuzz.DialArgs
(*TrimStringArgs)(nil), // 19: ngolofuzz.TrimStringArgs
(*TrimBytesArgs)(nil), // 20: ngolofuzz.TrimBytesArgs
(*WriterNgdotDotWriterArgs)(nil), // 21: ngolofuzz.WriterNgdotDotWriterArgs
(*NgoloFuzzOne)(nil), // 22: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 23: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 24: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.MIMEHeaderNgdotAdd:type_name -> ngolofuzz.MIMEHeaderNgdotAddArgs
1, // 1: ngolofuzz.NgoloFuzzOne.MIMEHeaderNgdotSet:type_name -> ngolofuzz.MIMEHeaderNgdotSetArgs
2, // 2: ngolofuzz.NgoloFuzzOne.MIMEHeaderNgdotGet:type_name -> ngolofuzz.MIMEHeaderNgdotGetArgs
3, // 3: ngolofuzz.NgoloFuzzOne.MIMEHeaderNgdotValues:type_name -> ngolofuzz.MIMEHeaderNgdotValuesArgs
4, // 4: ngolofuzz.NgoloFuzzOne.MIMEHeaderNgdotDel:type_name -> ngolofuzz.MIMEHeaderNgdotDelArgs
5, // 5: ngolofuzz.NgoloFuzzOne.NewReader:type_name -> ngolofuzz.NewReaderArgs
6, // 6: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadLine:type_name -> ngolofuzz.ReaderNgdotReadLineArgs
7, // 7: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadLineBytes:type_name -> ngolofuzz.ReaderNgdotReadLineBytesArgs
8, // 8: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadContinuedLine:type_name -> ngolofuzz.ReaderNgdotReadContinuedLineArgs
9, // 9: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadContinuedLineBytes:type_name -> ngolofuzz.ReaderNgdotReadContinuedLineBytesArgs
10, // 10: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadCodeLine:type_name -> ngolofuzz.ReaderNgdotReadCodeLineArgs
11, // 11: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadResponse:type_name -> ngolofuzz.ReaderNgdotReadResponseArgs
12, // 12: ngolofuzz.NgoloFuzzOne.ReaderNgdotDotReader:type_name -> ngolofuzz.ReaderNgdotDotReaderArgs
13, // 13: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadDotBytes:type_name -> ngolofuzz.ReaderNgdotReadDotBytesArgs
14, // 14: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadDotLines:type_name -> ngolofuzz.ReaderNgdotReadDotLinesArgs
15, // 15: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadMIMEHeader:type_name -> ngolofuzz.ReaderNgdotReadMIMEHeaderArgs
16, // 16: ngolofuzz.NgoloFuzzOne.CanonicalMIMEHeaderKey:type_name -> ngolofuzz.CanonicalMIMEHeaderKeyArgs
17, // 17: ngolofuzz.NgoloFuzzOne.ConnNgdotClose:type_name -> ngolofuzz.ConnNgdotCloseArgs
18, // 18: ngolofuzz.NgoloFuzzOne.Dial:type_name -> ngolofuzz.DialArgs
19, // 19: ngolofuzz.NgoloFuzzOne.TrimString:type_name -> ngolofuzz.TrimStringArgs
20, // 20: ngolofuzz.NgoloFuzzOne.TrimBytes:type_name -> ngolofuzz.TrimBytesArgs
21, // 21: ngolofuzz.NgoloFuzzOne.WriterNgdotDotWriter:type_name -> ngolofuzz.WriterNgdotDotWriterArgs
22, // 22: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
23, // [23:23] is the sub-list for method output_type
23, // [23:23] is the sub-list for method input_type
23, // [23:23] is the sub-list for extension type_name
23, // [23:23] is the sub-list for extension extendee
0, // [0:23] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[22].OneofWrappers = []any{
(*NgoloFuzzOne_MIMEHeaderNgdotAdd)(nil),
(*NgoloFuzzOne_MIMEHeaderNgdotSet)(nil),
(*NgoloFuzzOne_MIMEHeaderNgdotGet)(nil),
(*NgoloFuzzOne_MIMEHeaderNgdotValues)(nil),
(*NgoloFuzzOne_MIMEHeaderNgdotDel)(nil),
(*NgoloFuzzOne_NewReader)(nil),
(*NgoloFuzzOne_ReaderNgdotReadLine)(nil),
(*NgoloFuzzOne_ReaderNgdotReadLineBytes)(nil),
(*NgoloFuzzOne_ReaderNgdotReadContinuedLine)(nil),
(*NgoloFuzzOne_ReaderNgdotReadContinuedLineBytes)(nil),
(*NgoloFuzzOne_ReaderNgdotReadCodeLine)(nil),
(*NgoloFuzzOne_ReaderNgdotReadResponse)(nil),
(*NgoloFuzzOne_ReaderNgdotDotReader)(nil),
(*NgoloFuzzOne_ReaderNgdotReadDotBytes)(nil),
(*NgoloFuzzOne_ReaderNgdotReadDotLines)(nil),
(*NgoloFuzzOne_ReaderNgdotReadMIMEHeader)(nil),
(*NgoloFuzzOne_CanonicalMIMEHeaderKey)(nil),
(*NgoloFuzzOne_ConnNgdotClose)(nil),
(*NgoloFuzzOne_Dial)(nil),
(*NgoloFuzzOne_TrimString)(nil),
(*NgoloFuzzOne_TrimBytes)(nil),
(*NgoloFuzzOne_WriterNgdotDotWriter)(nil),
}
file_ngolofuzz_proto_msgTypes[23].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 25,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_net_url
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"net/url"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var URLResults []*url.URL
URLResultsIndex := 0
var UserinfoResults []*url.Userinfo
UserinfoResultsIndex := 0
var ValuesResults []*url.Values
ValuesResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_QueryUnescape:
_, r1 := url.QueryUnescape(a.QueryUnescape.S)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_PathUnescape:
_, r1 := url.PathUnescape(a.PathUnescape.S)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_QueryEscape:
url.QueryEscape(a.QueryEscape.S)
case *NgoloFuzzOne_PathEscape:
url.PathEscape(a.PathEscape.S)
case *NgoloFuzzOne_User:
r0 := url.User(a.User.Username)
if r0 != nil{
UserinfoResults = append(UserinfoResults, r0)
}
case *NgoloFuzzOne_UserPassword:
r0 := url.UserPassword(a.UserPassword.Username, a.UserPassword.Password)
if r0 != nil{
UserinfoResults = append(UserinfoResults, r0)
}
case *NgoloFuzzOne_UserinfoNgdotUsername:
if len(UserinfoResults) == 0 {
continue
}
arg0 := UserinfoResults[UserinfoResultsIndex]
UserinfoResultsIndex = (UserinfoResultsIndex + 1) % len(UserinfoResults)
arg0.Username()
case *NgoloFuzzOne_UserinfoNgdotPassword:
if len(UserinfoResults) == 0 {
continue
}
arg0 := UserinfoResults[UserinfoResultsIndex]
UserinfoResultsIndex = (UserinfoResultsIndex + 1) % len(UserinfoResults)
arg0.Password()
case *NgoloFuzzOne_UserinfoNgdotString:
if len(UserinfoResults) == 0 {
continue
}
arg0 := UserinfoResults[UserinfoResultsIndex]
UserinfoResultsIndex = (UserinfoResultsIndex + 1) % len(UserinfoResults)
arg0.String()
case *NgoloFuzzOne_Parse:
_, r1 := url.Parse(a.Parse.RawURL)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ParseRequestURI:
_, r1 := url.ParseRequestURI(a.ParseRequestURI.RawURL)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_URLNgdotEscapedPath:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
arg0.EscapedPath()
case *NgoloFuzzOne_URLNgdotEscapedFragment:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
arg0.EscapedFragment()
case *NgoloFuzzOne_URLNgdotString:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
arg0.String()
case *NgoloFuzzOne_URLNgdotRedacted:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
arg0.Redacted()
case *NgoloFuzzOne_ValuesNgdotGet:
if len(ValuesResults) == 0 {
continue
}
arg0 := ValuesResults[ValuesResultsIndex]
ValuesResultsIndex = (ValuesResultsIndex + 1) % len(ValuesResults)
arg0.Get(a.ValuesNgdotGet.Key)
case *NgoloFuzzOne_ValuesNgdotSet:
if len(ValuesResults) == 0 {
continue
}
arg0 := ValuesResults[ValuesResultsIndex]
ValuesResultsIndex = (ValuesResultsIndex + 1) % len(ValuesResults)
arg0.Set(a.ValuesNgdotSet.Key, a.ValuesNgdotSet.Value)
case *NgoloFuzzOne_ValuesNgdotAdd:
if len(ValuesResults) == 0 {
continue
}
arg0 := ValuesResults[ValuesResultsIndex]
ValuesResultsIndex = (ValuesResultsIndex + 1) % len(ValuesResults)
arg0.Add(a.ValuesNgdotAdd.Key, a.ValuesNgdotAdd.Value)
case *NgoloFuzzOne_ValuesNgdotDel:
if len(ValuesResults) == 0 {
continue
}
arg0 := ValuesResults[ValuesResultsIndex]
ValuesResultsIndex = (ValuesResultsIndex + 1) % len(ValuesResults)
arg0.Del(a.ValuesNgdotDel.Key)
case *NgoloFuzzOne_ValuesNgdotHas:
if len(ValuesResults) == 0 {
continue
}
arg0 := ValuesResults[ValuesResultsIndex]
ValuesResultsIndex = (ValuesResultsIndex + 1) % len(ValuesResults)
arg0.Has(a.ValuesNgdotHas.Key)
case *NgoloFuzzOne_ValuesNgdotClone:
if len(ValuesResults) == 0 {
continue
}
arg0 := ValuesResults[ValuesResultsIndex]
ValuesResultsIndex = (ValuesResultsIndex + 1) % len(ValuesResults)
r0 := arg0.Clone()
ValuesResults = append(ValuesResults, &r0)
case *NgoloFuzzOne_ParseQuery:
r0, r1 := url.ParseQuery(a.ParseQuery.Query)
ValuesResults = append(ValuesResults, &r0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ValuesNgdotEncode:
if len(ValuesResults) == 0 {
continue
}
arg0 := ValuesResults[ValuesResultsIndex]
ValuesResultsIndex = (ValuesResultsIndex + 1) % len(ValuesResults)
arg0.Encode()
case *NgoloFuzzOne_URLNgdotIsAbs:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
arg0.IsAbs()
case *NgoloFuzzOne_URLNgdotParse:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
_, r1 := arg0.Parse(a.URLNgdotParse.Ref)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_URLNgdotResolveReference:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
if len(URLResults) == 0 {
continue
}
arg1 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
arg0.ResolveReference(arg1)
case *NgoloFuzzOne_URLNgdotQuery:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
r0 := arg0.Query()
ValuesResults = append(ValuesResults, &r0)
case *NgoloFuzzOne_URLNgdotRequestURI:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
arg0.RequestURI()
case *NgoloFuzzOne_URLNgdotHostname:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
arg0.Hostname()
case *NgoloFuzzOne_URLNgdotPort:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
arg0.Port()
case *NgoloFuzzOne_URLNgdotMarshalBinary:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
_, r1 := arg0.MarshalBinary()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_URLNgdotAppendBinary:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
_, r1 := arg0.AppendBinary(a.URLNgdotAppendBinary.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_URLNgdotUnmarshalBinary:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
r0 := arg0.UnmarshalBinary(a.URLNgdotUnmarshalBinary.Text)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_URLNgdotClone:
if len(URLResults) == 0 {
continue
}
arg0 := URLResults[URLResultsIndex]
URLResultsIndex = (URLResultsIndex + 1) % len(URLResults)
arg0.Clone()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
URLNb := 0
URLResultsIndex := 0
UserinfoNb := 0
UserinfoResultsIndex := 0
ValuesNb := 0
ValuesResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_QueryUnescape:
w.WriteString(fmt.Sprintf("url.QueryUnescape(%#+v)\n", a.QueryUnescape.S))
case *NgoloFuzzOne_PathUnescape:
w.WriteString(fmt.Sprintf("url.PathUnescape(%#+v)\n", a.PathUnescape.S))
case *NgoloFuzzOne_QueryEscape:
w.WriteString(fmt.Sprintf("url.QueryEscape(%#+v)\n", a.QueryEscape.S))
case *NgoloFuzzOne_PathEscape:
w.WriteString(fmt.Sprintf("url.PathEscape(%#+v)\n", a.PathEscape.S))
case *NgoloFuzzOne_User:
w.WriteString(fmt.Sprintf("Userinfo%d := url.User(%#+v)\n", UserinfoNb, a.User.Username))
UserinfoNb = UserinfoNb + 1
case *NgoloFuzzOne_UserPassword:
w.WriteString(fmt.Sprintf("Userinfo%d := url.UserPassword(%#+v, %#+v)\n", UserinfoNb, a.UserPassword.Username, a.UserPassword.Password))
UserinfoNb = UserinfoNb + 1
case *NgoloFuzzOne_UserinfoNgdotUsername:
if UserinfoNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Userinfo%d.Username()\n", UserinfoResultsIndex))
UserinfoResultsIndex = (UserinfoResultsIndex + 1) % UserinfoNb
case *NgoloFuzzOne_UserinfoNgdotPassword:
if UserinfoNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Userinfo%d.Password()\n", UserinfoResultsIndex))
UserinfoResultsIndex = (UserinfoResultsIndex + 1) % UserinfoNb
case *NgoloFuzzOne_UserinfoNgdotString:
if UserinfoNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Userinfo%d.String()\n", UserinfoResultsIndex))
UserinfoResultsIndex = (UserinfoResultsIndex + 1) % UserinfoNb
case *NgoloFuzzOne_Parse:
w.WriteString(fmt.Sprintf("url.Parse(%#+v)\n", a.Parse.RawURL))
case *NgoloFuzzOne_ParseRequestURI:
w.WriteString(fmt.Sprintf("url.ParseRequestURI(%#+v)\n", a.ParseRequestURI.RawURL))
case *NgoloFuzzOne_URLNgdotEscapedPath:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.EscapedPath()\n", URLResultsIndex))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_URLNgdotEscapedFragment:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.EscapedFragment()\n", URLResultsIndex))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_URLNgdotString:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.String()\n", URLResultsIndex))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_URLNgdotRedacted:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.Redacted()\n", URLResultsIndex))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_ValuesNgdotGet:
if ValuesNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Values%d.Get(%#+v)\n", ValuesResultsIndex, a.ValuesNgdotGet.Key))
ValuesResultsIndex = (ValuesResultsIndex + 1) % ValuesNb
case *NgoloFuzzOne_ValuesNgdotSet:
if ValuesNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Values%d.Set(%#+v, %#+v)\n", ValuesResultsIndex, a.ValuesNgdotSet.Key, a.ValuesNgdotSet.Value))
ValuesResultsIndex = (ValuesResultsIndex + 1) % ValuesNb
case *NgoloFuzzOne_ValuesNgdotAdd:
if ValuesNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Values%d.Add(%#+v, %#+v)\n", ValuesResultsIndex, a.ValuesNgdotAdd.Key, a.ValuesNgdotAdd.Value))
ValuesResultsIndex = (ValuesResultsIndex + 1) % ValuesNb
case *NgoloFuzzOne_ValuesNgdotDel:
if ValuesNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Values%d.Del(%#+v)\n", ValuesResultsIndex, a.ValuesNgdotDel.Key))
ValuesResultsIndex = (ValuesResultsIndex + 1) % ValuesNb
case *NgoloFuzzOne_ValuesNgdotHas:
if ValuesNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Values%d.Has(%#+v)\n", ValuesResultsIndex, a.ValuesNgdotHas.Key))
ValuesResultsIndex = (ValuesResultsIndex + 1) % ValuesNb
case *NgoloFuzzOne_ValuesNgdotClone:
if ValuesNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Values%d := Values%d.Clone()\n", ValuesNb, ValuesResultsIndex))
ValuesNb = ValuesNb + 1
ValuesResultsIndex = (ValuesResultsIndex + 1) % ValuesNb
case *NgoloFuzzOne_ParseQuery:
w.WriteString(fmt.Sprintf("Values%d, _ := url.ParseQuery(%#+v)\n", ValuesNb, a.ParseQuery.Query))
ValuesNb = ValuesNb + 1
case *NgoloFuzzOne_ValuesNgdotEncode:
if ValuesNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Values%d.Encode()\n", ValuesResultsIndex))
ValuesResultsIndex = (ValuesResultsIndex + 1) % ValuesNb
case *NgoloFuzzOne_URLNgdotIsAbs:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.IsAbs()\n", URLResultsIndex))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_URLNgdotParse:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.Parse(%#+v)\n", URLResultsIndex, a.URLNgdotParse.Ref))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_URLNgdotResolveReference:
if URLNb == 0 {
continue
}
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.ResolveReference(URL%d)\n", URLResultsIndex, (URLResultsIndex + 1) % URLNb))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_URLNgdotQuery:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Values%d := URL%d.Query()\n", ValuesNb, URLResultsIndex))
ValuesNb = ValuesNb + 1
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_URLNgdotRequestURI:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.RequestURI()\n", URLResultsIndex))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_URLNgdotHostname:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.Hostname()\n", URLResultsIndex))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_URLNgdotPort:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.Port()\n", URLResultsIndex))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_URLNgdotMarshalBinary:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.MarshalBinary()\n", URLResultsIndex))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_URLNgdotAppendBinary:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.AppendBinary(%#+v)\n", URLResultsIndex, a.URLNgdotAppendBinary.B))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_URLNgdotUnmarshalBinary:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.UnmarshalBinary(%#+v)\n", URLResultsIndex, a.URLNgdotUnmarshalBinary.Text))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
case *NgoloFuzzOne_URLNgdotClone:
if URLNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("URL%d.Clone()\n", URLResultsIndex))
URLResultsIndex = (URLResultsIndex + 1) % URLNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_net_url
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type QueryUnescapeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *QueryUnescapeArgs) Reset() {
*x = QueryUnescapeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *QueryUnescapeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*QueryUnescapeArgs) ProtoMessage() {}
func (x *QueryUnescapeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use QueryUnescapeArgs.ProtoReflect.Descriptor instead.
func (*QueryUnescapeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *QueryUnescapeArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type PathUnescapeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PathUnescapeArgs) Reset() {
*x = PathUnescapeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PathUnescapeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PathUnescapeArgs) ProtoMessage() {}
func (x *PathUnescapeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PathUnescapeArgs.ProtoReflect.Descriptor instead.
func (*PathUnescapeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *PathUnescapeArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type QueryEscapeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *QueryEscapeArgs) Reset() {
*x = QueryEscapeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *QueryEscapeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*QueryEscapeArgs) ProtoMessage() {}
func (x *QueryEscapeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use QueryEscapeArgs.ProtoReflect.Descriptor instead.
func (*QueryEscapeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *QueryEscapeArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type PathEscapeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PathEscapeArgs) Reset() {
*x = PathEscapeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PathEscapeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PathEscapeArgs) ProtoMessage() {}
func (x *PathEscapeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PathEscapeArgs.ProtoReflect.Descriptor instead.
func (*PathEscapeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *PathEscapeArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type UserArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UserArgs) Reset() {
*x = UserArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UserArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UserArgs) ProtoMessage() {}
func (x *UserArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UserArgs.ProtoReflect.Descriptor instead.
func (*UserArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *UserArgs) GetUsername() string {
if x != nil {
return x.Username
}
return ""
}
type UserPasswordArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UserPasswordArgs) Reset() {
*x = UserPasswordArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UserPasswordArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UserPasswordArgs) ProtoMessage() {}
func (x *UserPasswordArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UserPasswordArgs.ProtoReflect.Descriptor instead.
func (*UserPasswordArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *UserPasswordArgs) GetUsername() string {
if x != nil {
return x.Username
}
return ""
}
func (x *UserPasswordArgs) GetPassword() string {
if x != nil {
return x.Password
}
return ""
}
type UserinfoNgdotUsernameArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UserinfoNgdotUsernameArgs) Reset() {
*x = UserinfoNgdotUsernameArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UserinfoNgdotUsernameArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UserinfoNgdotUsernameArgs) ProtoMessage() {}
func (x *UserinfoNgdotUsernameArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UserinfoNgdotUsernameArgs.ProtoReflect.Descriptor instead.
func (*UserinfoNgdotUsernameArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type UserinfoNgdotPasswordArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UserinfoNgdotPasswordArgs) Reset() {
*x = UserinfoNgdotPasswordArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UserinfoNgdotPasswordArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UserinfoNgdotPasswordArgs) ProtoMessage() {}
func (x *UserinfoNgdotPasswordArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UserinfoNgdotPasswordArgs.ProtoReflect.Descriptor instead.
func (*UserinfoNgdotPasswordArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type UserinfoNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UserinfoNgdotStringArgs) Reset() {
*x = UserinfoNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UserinfoNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UserinfoNgdotStringArgs) ProtoMessage() {}
func (x *UserinfoNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UserinfoNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*UserinfoNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type ParseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
RawURL string `protobuf:"bytes,1,opt,name=rawURL,proto3" json:"rawURL,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseArgs) Reset() {
*x = ParseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseArgs) ProtoMessage() {}
func (x *ParseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseArgs.ProtoReflect.Descriptor instead.
func (*ParseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *ParseArgs) GetRawURL() string {
if x != nil {
return x.RawURL
}
return ""
}
type ParseRequestURIArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
RawURL string `protobuf:"bytes,1,opt,name=rawURL,proto3" json:"rawURL,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseRequestURIArgs) Reset() {
*x = ParseRequestURIArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseRequestURIArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseRequestURIArgs) ProtoMessage() {}
func (x *ParseRequestURIArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseRequestURIArgs.ProtoReflect.Descriptor instead.
func (*ParseRequestURIArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *ParseRequestURIArgs) GetRawURL() string {
if x != nil {
return x.RawURL
}
return ""
}
type URLNgdotEscapedPathArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotEscapedPathArgs) Reset() {
*x = URLNgdotEscapedPathArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotEscapedPathArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotEscapedPathArgs) ProtoMessage() {}
func (x *URLNgdotEscapedPathArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotEscapedPathArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotEscapedPathArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type URLNgdotEscapedFragmentArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotEscapedFragmentArgs) Reset() {
*x = URLNgdotEscapedFragmentArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotEscapedFragmentArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotEscapedFragmentArgs) ProtoMessage() {}
func (x *URLNgdotEscapedFragmentArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotEscapedFragmentArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotEscapedFragmentArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type URLNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotStringArgs) Reset() {
*x = URLNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotStringArgs) ProtoMessage() {}
func (x *URLNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type URLNgdotRedactedArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotRedactedArgs) Reset() {
*x = URLNgdotRedactedArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotRedactedArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotRedactedArgs) ProtoMessage() {}
func (x *URLNgdotRedactedArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotRedactedArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotRedactedArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type ValuesNgdotGetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValuesNgdotGetArgs) Reset() {
*x = ValuesNgdotGetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValuesNgdotGetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValuesNgdotGetArgs) ProtoMessage() {}
func (x *ValuesNgdotGetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValuesNgdotGetArgs.ProtoReflect.Descriptor instead.
func (*ValuesNgdotGetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *ValuesNgdotGetArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
type ValuesNgdotSetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValuesNgdotSetArgs) Reset() {
*x = ValuesNgdotSetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValuesNgdotSetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValuesNgdotSetArgs) ProtoMessage() {}
func (x *ValuesNgdotSetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValuesNgdotSetArgs.ProtoReflect.Descriptor instead.
func (*ValuesNgdotSetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *ValuesNgdotSetArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *ValuesNgdotSetArgs) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
type ValuesNgdotAddArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValuesNgdotAddArgs) Reset() {
*x = ValuesNgdotAddArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValuesNgdotAddArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValuesNgdotAddArgs) ProtoMessage() {}
func (x *ValuesNgdotAddArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValuesNgdotAddArgs.ProtoReflect.Descriptor instead.
func (*ValuesNgdotAddArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *ValuesNgdotAddArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *ValuesNgdotAddArgs) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
type ValuesNgdotDelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValuesNgdotDelArgs) Reset() {
*x = ValuesNgdotDelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValuesNgdotDelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValuesNgdotDelArgs) ProtoMessage() {}
func (x *ValuesNgdotDelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValuesNgdotDelArgs.ProtoReflect.Descriptor instead.
func (*ValuesNgdotDelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *ValuesNgdotDelArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
type ValuesNgdotHasArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValuesNgdotHasArgs) Reset() {
*x = ValuesNgdotHasArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValuesNgdotHasArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValuesNgdotHasArgs) ProtoMessage() {}
func (x *ValuesNgdotHasArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValuesNgdotHasArgs.ProtoReflect.Descriptor instead.
func (*ValuesNgdotHasArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *ValuesNgdotHasArgs) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
type ValuesNgdotCloneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValuesNgdotCloneArgs) Reset() {
*x = ValuesNgdotCloneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValuesNgdotCloneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValuesNgdotCloneArgs) ProtoMessage() {}
func (x *ValuesNgdotCloneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValuesNgdotCloneArgs.ProtoReflect.Descriptor instead.
func (*ValuesNgdotCloneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
type ParseQueryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseQueryArgs) Reset() {
*x = ParseQueryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseQueryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseQueryArgs) ProtoMessage() {}
func (x *ParseQueryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseQueryArgs.ProtoReflect.Descriptor instead.
func (*ParseQueryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
func (x *ParseQueryArgs) GetQuery() string {
if x != nil {
return x.Query
}
return ""
}
type ValuesNgdotEncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValuesNgdotEncodeArgs) Reset() {
*x = ValuesNgdotEncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValuesNgdotEncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValuesNgdotEncodeArgs) ProtoMessage() {}
func (x *ValuesNgdotEncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValuesNgdotEncodeArgs.ProtoReflect.Descriptor instead.
func (*ValuesNgdotEncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
type URLNgdotIsAbsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotIsAbsArgs) Reset() {
*x = URLNgdotIsAbsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotIsAbsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotIsAbsArgs) ProtoMessage() {}
func (x *URLNgdotIsAbsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotIsAbsArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotIsAbsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
type URLNgdotParseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotParseArgs) Reset() {
*x = URLNgdotParseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotParseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotParseArgs) ProtoMessage() {}
func (x *URLNgdotParseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotParseArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotParseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
func (x *URLNgdotParseArgs) GetRef() string {
if x != nil {
return x.Ref
}
return ""
}
type URLNgdotResolveReferenceArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotResolveReferenceArgs) Reset() {
*x = URLNgdotResolveReferenceArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotResolveReferenceArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotResolveReferenceArgs) ProtoMessage() {}
func (x *URLNgdotResolveReferenceArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotResolveReferenceArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotResolveReferenceArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
type URLNgdotQueryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotQueryArgs) Reset() {
*x = URLNgdotQueryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotQueryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotQueryArgs) ProtoMessage() {}
func (x *URLNgdotQueryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotQueryArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotQueryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
type URLNgdotRequestURIArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotRequestURIArgs) Reset() {
*x = URLNgdotRequestURIArgs{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotRequestURIArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotRequestURIArgs) ProtoMessage() {}
func (x *URLNgdotRequestURIArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotRequestURIArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotRequestURIArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
type URLNgdotHostnameArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotHostnameArgs) Reset() {
*x = URLNgdotHostnameArgs{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotHostnameArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotHostnameArgs) ProtoMessage() {}
func (x *URLNgdotHostnameArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotHostnameArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotHostnameArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
type URLNgdotPortArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotPortArgs) Reset() {
*x = URLNgdotPortArgs{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotPortArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotPortArgs) ProtoMessage() {}
func (x *URLNgdotPortArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotPortArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotPortArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
type URLNgdotMarshalBinaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotMarshalBinaryArgs) Reset() {
*x = URLNgdotMarshalBinaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotMarshalBinaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotMarshalBinaryArgs) ProtoMessage() {}
func (x *URLNgdotMarshalBinaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotMarshalBinaryArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotMarshalBinaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{30}
}
type URLNgdotAppendBinaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotAppendBinaryArgs) Reset() {
*x = URLNgdotAppendBinaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotAppendBinaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotAppendBinaryArgs) ProtoMessage() {}
func (x *URLNgdotAppendBinaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotAppendBinaryArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotAppendBinaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{31}
}
func (x *URLNgdotAppendBinaryArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type URLNgdotUnmarshalBinaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text []byte `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotUnmarshalBinaryArgs) Reset() {
*x = URLNgdotUnmarshalBinaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotUnmarshalBinaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotUnmarshalBinaryArgs) ProtoMessage() {}
func (x *URLNgdotUnmarshalBinaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotUnmarshalBinaryArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotUnmarshalBinaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{32}
}
func (x *URLNgdotUnmarshalBinaryArgs) GetText() []byte {
if x != nil {
return x.Text
}
return nil
}
type URLNgdotCloneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *URLNgdotCloneArgs) Reset() {
*x = URLNgdotCloneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *URLNgdotCloneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*URLNgdotCloneArgs) ProtoMessage() {}
func (x *URLNgdotCloneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[33]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use URLNgdotCloneArgs.ProtoReflect.Descriptor instead.
func (*URLNgdotCloneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{33}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_QueryUnescape
// *NgoloFuzzOne_PathUnescape
// *NgoloFuzzOne_QueryEscape
// *NgoloFuzzOne_PathEscape
// *NgoloFuzzOne_User
// *NgoloFuzzOne_UserPassword
// *NgoloFuzzOne_UserinfoNgdotUsername
// *NgoloFuzzOne_UserinfoNgdotPassword
// *NgoloFuzzOne_UserinfoNgdotString
// *NgoloFuzzOne_Parse
// *NgoloFuzzOne_ParseRequestURI
// *NgoloFuzzOne_URLNgdotEscapedPath
// *NgoloFuzzOne_URLNgdotEscapedFragment
// *NgoloFuzzOne_URLNgdotString
// *NgoloFuzzOne_URLNgdotRedacted
// *NgoloFuzzOne_ValuesNgdotGet
// *NgoloFuzzOne_ValuesNgdotSet
// *NgoloFuzzOne_ValuesNgdotAdd
// *NgoloFuzzOne_ValuesNgdotDel
// *NgoloFuzzOne_ValuesNgdotHas
// *NgoloFuzzOne_ValuesNgdotClone
// *NgoloFuzzOne_ParseQuery
// *NgoloFuzzOne_ValuesNgdotEncode
// *NgoloFuzzOne_URLNgdotIsAbs
// *NgoloFuzzOne_URLNgdotParse
// *NgoloFuzzOne_URLNgdotResolveReference
// *NgoloFuzzOne_URLNgdotQuery
// *NgoloFuzzOne_URLNgdotRequestURI
// *NgoloFuzzOne_URLNgdotHostname
// *NgoloFuzzOne_URLNgdotPort
// *NgoloFuzzOne_URLNgdotMarshalBinary
// *NgoloFuzzOne_URLNgdotAppendBinary
// *NgoloFuzzOne_URLNgdotUnmarshalBinary
// *NgoloFuzzOne_URLNgdotClone
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[34]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{34}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetQueryUnescape() *QueryUnescapeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_QueryUnescape); ok {
return x.QueryUnescape
}
}
return nil
}
func (x *NgoloFuzzOne) GetPathUnescape() *PathUnescapeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PathUnescape); ok {
return x.PathUnescape
}
}
return nil
}
func (x *NgoloFuzzOne) GetQueryEscape() *QueryEscapeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_QueryEscape); ok {
return x.QueryEscape
}
}
return nil
}
func (x *NgoloFuzzOne) GetPathEscape() *PathEscapeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PathEscape); ok {
return x.PathEscape
}
}
return nil
}
func (x *NgoloFuzzOne) GetUser() *UserArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_User); ok {
return x.User
}
}
return nil
}
func (x *NgoloFuzzOne) GetUserPassword() *UserPasswordArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UserPassword); ok {
return x.UserPassword
}
}
return nil
}
func (x *NgoloFuzzOne) GetUserinfoNgdotUsername() *UserinfoNgdotUsernameArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UserinfoNgdotUsername); ok {
return x.UserinfoNgdotUsername
}
}
return nil
}
func (x *NgoloFuzzOne) GetUserinfoNgdotPassword() *UserinfoNgdotPasswordArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UserinfoNgdotPassword); ok {
return x.UserinfoNgdotPassword
}
}
return nil
}
func (x *NgoloFuzzOne) GetUserinfoNgdotString() *UserinfoNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UserinfoNgdotString); ok {
return x.UserinfoNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetParse() *ParseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Parse); ok {
return x.Parse
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseRequestURI() *ParseRequestURIArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseRequestURI); ok {
return x.ParseRequestURI
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotEscapedPath() *URLNgdotEscapedPathArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotEscapedPath); ok {
return x.URLNgdotEscapedPath
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotEscapedFragment() *URLNgdotEscapedFragmentArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotEscapedFragment); ok {
return x.URLNgdotEscapedFragment
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotString() *URLNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotString); ok {
return x.URLNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotRedacted() *URLNgdotRedactedArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotRedacted); ok {
return x.URLNgdotRedacted
}
}
return nil
}
func (x *NgoloFuzzOne) GetValuesNgdotGet() *ValuesNgdotGetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValuesNgdotGet); ok {
return x.ValuesNgdotGet
}
}
return nil
}
func (x *NgoloFuzzOne) GetValuesNgdotSet() *ValuesNgdotSetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValuesNgdotSet); ok {
return x.ValuesNgdotSet
}
}
return nil
}
func (x *NgoloFuzzOne) GetValuesNgdotAdd() *ValuesNgdotAddArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValuesNgdotAdd); ok {
return x.ValuesNgdotAdd
}
}
return nil
}
func (x *NgoloFuzzOne) GetValuesNgdotDel() *ValuesNgdotDelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValuesNgdotDel); ok {
return x.ValuesNgdotDel
}
}
return nil
}
func (x *NgoloFuzzOne) GetValuesNgdotHas() *ValuesNgdotHasArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValuesNgdotHas); ok {
return x.ValuesNgdotHas
}
}
return nil
}
func (x *NgoloFuzzOne) GetValuesNgdotClone() *ValuesNgdotCloneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValuesNgdotClone); ok {
return x.ValuesNgdotClone
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseQuery() *ParseQueryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseQuery); ok {
return x.ParseQuery
}
}
return nil
}
func (x *NgoloFuzzOne) GetValuesNgdotEncode() *ValuesNgdotEncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValuesNgdotEncode); ok {
return x.ValuesNgdotEncode
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotIsAbs() *URLNgdotIsAbsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotIsAbs); ok {
return x.URLNgdotIsAbs
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotParse() *URLNgdotParseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotParse); ok {
return x.URLNgdotParse
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotResolveReference() *URLNgdotResolveReferenceArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotResolveReference); ok {
return x.URLNgdotResolveReference
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotQuery() *URLNgdotQueryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotQuery); ok {
return x.URLNgdotQuery
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotRequestURI() *URLNgdotRequestURIArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotRequestURI); ok {
return x.URLNgdotRequestURI
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotHostname() *URLNgdotHostnameArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotHostname); ok {
return x.URLNgdotHostname
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotPort() *URLNgdotPortArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotPort); ok {
return x.URLNgdotPort
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotMarshalBinary() *URLNgdotMarshalBinaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotMarshalBinary); ok {
return x.URLNgdotMarshalBinary
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotAppendBinary() *URLNgdotAppendBinaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotAppendBinary); ok {
return x.URLNgdotAppendBinary
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotUnmarshalBinary() *URLNgdotUnmarshalBinaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotUnmarshalBinary); ok {
return x.URLNgdotUnmarshalBinary
}
}
return nil
}
func (x *NgoloFuzzOne) GetURLNgdotClone() *URLNgdotCloneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_URLNgdotClone); ok {
return x.URLNgdotClone
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_QueryUnescape struct {
QueryUnescape *QueryUnescapeArgs `protobuf:"bytes,1,opt,name=QueryUnescape,proto3,oneof"`
}
type NgoloFuzzOne_PathUnescape struct {
PathUnescape *PathUnescapeArgs `protobuf:"bytes,2,opt,name=PathUnescape,proto3,oneof"`
}
type NgoloFuzzOne_QueryEscape struct {
QueryEscape *QueryEscapeArgs `protobuf:"bytes,3,opt,name=QueryEscape,proto3,oneof"`
}
type NgoloFuzzOne_PathEscape struct {
PathEscape *PathEscapeArgs `protobuf:"bytes,4,opt,name=PathEscape,proto3,oneof"`
}
type NgoloFuzzOne_User struct {
User *UserArgs `protobuf:"bytes,5,opt,name=User,proto3,oneof"`
}
type NgoloFuzzOne_UserPassword struct {
UserPassword *UserPasswordArgs `protobuf:"bytes,6,opt,name=UserPassword,proto3,oneof"`
}
type NgoloFuzzOne_UserinfoNgdotUsername struct {
UserinfoNgdotUsername *UserinfoNgdotUsernameArgs `protobuf:"bytes,7,opt,name=UserinfoNgdotUsername,proto3,oneof"`
}
type NgoloFuzzOne_UserinfoNgdotPassword struct {
UserinfoNgdotPassword *UserinfoNgdotPasswordArgs `protobuf:"bytes,8,opt,name=UserinfoNgdotPassword,proto3,oneof"`
}
type NgoloFuzzOne_UserinfoNgdotString struct {
UserinfoNgdotString *UserinfoNgdotStringArgs `protobuf:"bytes,9,opt,name=UserinfoNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_Parse struct {
Parse *ParseArgs `protobuf:"bytes,10,opt,name=Parse,proto3,oneof"`
}
type NgoloFuzzOne_ParseRequestURI struct {
ParseRequestURI *ParseRequestURIArgs `protobuf:"bytes,11,opt,name=ParseRequestURI,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotEscapedPath struct {
URLNgdotEscapedPath *URLNgdotEscapedPathArgs `protobuf:"bytes,12,opt,name=URLNgdotEscapedPath,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotEscapedFragment struct {
URLNgdotEscapedFragment *URLNgdotEscapedFragmentArgs `protobuf:"bytes,13,opt,name=URLNgdotEscapedFragment,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotString struct {
URLNgdotString *URLNgdotStringArgs `protobuf:"bytes,14,opt,name=URLNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotRedacted struct {
URLNgdotRedacted *URLNgdotRedactedArgs `protobuf:"bytes,15,opt,name=URLNgdotRedacted,proto3,oneof"`
}
type NgoloFuzzOne_ValuesNgdotGet struct {
ValuesNgdotGet *ValuesNgdotGetArgs `protobuf:"bytes,16,opt,name=ValuesNgdotGet,proto3,oneof"`
}
type NgoloFuzzOne_ValuesNgdotSet struct {
ValuesNgdotSet *ValuesNgdotSetArgs `protobuf:"bytes,17,opt,name=ValuesNgdotSet,proto3,oneof"`
}
type NgoloFuzzOne_ValuesNgdotAdd struct {
ValuesNgdotAdd *ValuesNgdotAddArgs `protobuf:"bytes,18,opt,name=ValuesNgdotAdd,proto3,oneof"`
}
type NgoloFuzzOne_ValuesNgdotDel struct {
ValuesNgdotDel *ValuesNgdotDelArgs `protobuf:"bytes,19,opt,name=ValuesNgdotDel,proto3,oneof"`
}
type NgoloFuzzOne_ValuesNgdotHas struct {
ValuesNgdotHas *ValuesNgdotHasArgs `protobuf:"bytes,20,opt,name=ValuesNgdotHas,proto3,oneof"`
}
type NgoloFuzzOne_ValuesNgdotClone struct {
ValuesNgdotClone *ValuesNgdotCloneArgs `protobuf:"bytes,21,opt,name=ValuesNgdotClone,proto3,oneof"`
}
type NgoloFuzzOne_ParseQuery struct {
ParseQuery *ParseQueryArgs `protobuf:"bytes,22,opt,name=ParseQuery,proto3,oneof"`
}
type NgoloFuzzOne_ValuesNgdotEncode struct {
ValuesNgdotEncode *ValuesNgdotEncodeArgs `protobuf:"bytes,23,opt,name=ValuesNgdotEncode,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotIsAbs struct {
URLNgdotIsAbs *URLNgdotIsAbsArgs `protobuf:"bytes,24,opt,name=URLNgdotIsAbs,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotParse struct {
URLNgdotParse *URLNgdotParseArgs `protobuf:"bytes,25,opt,name=URLNgdotParse,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotResolveReference struct {
URLNgdotResolveReference *URLNgdotResolveReferenceArgs `protobuf:"bytes,26,opt,name=URLNgdotResolveReference,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotQuery struct {
URLNgdotQuery *URLNgdotQueryArgs `protobuf:"bytes,27,opt,name=URLNgdotQuery,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotRequestURI struct {
URLNgdotRequestURI *URLNgdotRequestURIArgs `protobuf:"bytes,28,opt,name=URLNgdotRequestURI,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotHostname struct {
URLNgdotHostname *URLNgdotHostnameArgs `protobuf:"bytes,29,opt,name=URLNgdotHostname,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotPort struct {
URLNgdotPort *URLNgdotPortArgs `protobuf:"bytes,30,opt,name=URLNgdotPort,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotMarshalBinary struct {
URLNgdotMarshalBinary *URLNgdotMarshalBinaryArgs `protobuf:"bytes,31,opt,name=URLNgdotMarshalBinary,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotAppendBinary struct {
URLNgdotAppendBinary *URLNgdotAppendBinaryArgs `protobuf:"bytes,32,opt,name=URLNgdotAppendBinary,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotUnmarshalBinary struct {
URLNgdotUnmarshalBinary *URLNgdotUnmarshalBinaryArgs `protobuf:"bytes,33,opt,name=URLNgdotUnmarshalBinary,proto3,oneof"`
}
type NgoloFuzzOne_URLNgdotClone struct {
URLNgdotClone *URLNgdotCloneArgs `protobuf:"bytes,34,opt,name=URLNgdotClone,proto3,oneof"`
}
func (*NgoloFuzzOne_QueryUnescape) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PathUnescape) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_QueryEscape) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PathEscape) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_User) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UserPassword) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UserinfoNgdotUsername) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UserinfoNgdotPassword) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UserinfoNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Parse) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseRequestURI) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotEscapedPath) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotEscapedFragment) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotRedacted) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValuesNgdotGet) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValuesNgdotSet) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValuesNgdotAdd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValuesNgdotDel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValuesNgdotHas) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValuesNgdotClone) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseQuery) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValuesNgdotEncode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotIsAbs) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotParse) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotResolveReference) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotQuery) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotRequestURI) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotHostname) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotPort) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotMarshalBinary) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotAppendBinary) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotUnmarshalBinary) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_URLNgdotClone) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[35]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{35}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[36]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{36}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"!\n" +
"\x11QueryUnescapeArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\" \n" +
"\x10PathUnescapeArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1f\n" +
"\x0fQueryEscapeArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1e\n" +
"\x0ePathEscapeArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"&\n" +
"\bUserArgs\x12\x1a\n" +
"\busername\x18\x01 \x01(\tR\busername\"J\n" +
"\x10UserPasswordArgs\x12\x1a\n" +
"\busername\x18\x01 \x01(\tR\busername\x12\x1a\n" +
"\bpassword\x18\x02 \x01(\tR\bpassword\"\x1b\n" +
"\x19UserinfoNgdotUsernameArgs\"\x1b\n" +
"\x19UserinfoNgdotPasswordArgs\"\x19\n" +
"\x17UserinfoNgdotStringArgs\"#\n" +
"\tParseArgs\x12\x16\n" +
"\x06rawURL\x18\x01 \x01(\tR\x06rawURL\"-\n" +
"\x13ParseRequestURIArgs\x12\x16\n" +
"\x06rawURL\x18\x01 \x01(\tR\x06rawURL\"\x19\n" +
"\x17URLNgdotEscapedPathArgs\"\x1d\n" +
"\x1bURLNgdotEscapedFragmentArgs\"\x14\n" +
"\x12URLNgdotStringArgs\"\x16\n" +
"\x14URLNgdotRedactedArgs\"&\n" +
"\x12ValuesNgdotGetArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\"<\n" +
"\x12ValuesNgdotSetArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value\"<\n" +
"\x12ValuesNgdotAddArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value\"&\n" +
"\x12ValuesNgdotDelArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\"&\n" +
"\x12ValuesNgdotHasArgs\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\"\x16\n" +
"\x14ValuesNgdotCloneArgs\"&\n" +
"\x0eParseQueryArgs\x12\x14\n" +
"\x05query\x18\x01 \x01(\tR\x05query\"\x17\n" +
"\x15ValuesNgdotEncodeArgs\"\x13\n" +
"\x11URLNgdotIsAbsArgs\"%\n" +
"\x11URLNgdotParseArgs\x12\x10\n" +
"\x03ref\x18\x01 \x01(\tR\x03ref\"\x1e\n" +
"\x1cURLNgdotResolveReferenceArgs\"\x13\n" +
"\x11URLNgdotQueryArgs\"\x18\n" +
"\x16URLNgdotRequestURIArgs\"\x16\n" +
"\x14URLNgdotHostnameArgs\"\x12\n" +
"\x10URLNgdotPortArgs\"\x1b\n" +
"\x19URLNgdotMarshalBinaryArgs\"(\n" +
"\x18URLNgdotAppendBinaryArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"1\n" +
"\x1bURLNgdotUnmarshalBinaryArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\fR\x04text\"\x13\n" +
"\x11URLNgdotCloneArgs\"\xba\x14\n" +
"\fNgoloFuzzOne\x12D\n" +
"\rQueryUnescape\x18\x01 \x01(\v2\x1c.ngolofuzz.QueryUnescapeArgsH\x00R\rQueryUnescape\x12A\n" +
"\fPathUnescape\x18\x02 \x01(\v2\x1b.ngolofuzz.PathUnescapeArgsH\x00R\fPathUnescape\x12>\n" +
"\vQueryEscape\x18\x03 \x01(\v2\x1a.ngolofuzz.QueryEscapeArgsH\x00R\vQueryEscape\x12;\n" +
"\n" +
"PathEscape\x18\x04 \x01(\v2\x19.ngolofuzz.PathEscapeArgsH\x00R\n" +
"PathEscape\x12)\n" +
"\x04User\x18\x05 \x01(\v2\x13.ngolofuzz.UserArgsH\x00R\x04User\x12A\n" +
"\fUserPassword\x18\x06 \x01(\v2\x1b.ngolofuzz.UserPasswordArgsH\x00R\fUserPassword\x12\\\n" +
"\x15UserinfoNgdotUsername\x18\a \x01(\v2$.ngolofuzz.UserinfoNgdotUsernameArgsH\x00R\x15UserinfoNgdotUsername\x12\\\n" +
"\x15UserinfoNgdotPassword\x18\b \x01(\v2$.ngolofuzz.UserinfoNgdotPasswordArgsH\x00R\x15UserinfoNgdotPassword\x12V\n" +
"\x13UserinfoNgdotString\x18\t \x01(\v2\".ngolofuzz.UserinfoNgdotStringArgsH\x00R\x13UserinfoNgdotString\x12,\n" +
"\x05Parse\x18\n" +
" \x01(\v2\x14.ngolofuzz.ParseArgsH\x00R\x05Parse\x12J\n" +
"\x0fParseRequestURI\x18\v \x01(\v2\x1e.ngolofuzz.ParseRequestURIArgsH\x00R\x0fParseRequestURI\x12V\n" +
"\x13URLNgdotEscapedPath\x18\f \x01(\v2\".ngolofuzz.URLNgdotEscapedPathArgsH\x00R\x13URLNgdotEscapedPath\x12b\n" +
"\x17URLNgdotEscapedFragment\x18\r \x01(\v2&.ngolofuzz.URLNgdotEscapedFragmentArgsH\x00R\x17URLNgdotEscapedFragment\x12G\n" +
"\x0eURLNgdotString\x18\x0e \x01(\v2\x1d.ngolofuzz.URLNgdotStringArgsH\x00R\x0eURLNgdotString\x12M\n" +
"\x10URLNgdotRedacted\x18\x0f \x01(\v2\x1f.ngolofuzz.URLNgdotRedactedArgsH\x00R\x10URLNgdotRedacted\x12G\n" +
"\x0eValuesNgdotGet\x18\x10 \x01(\v2\x1d.ngolofuzz.ValuesNgdotGetArgsH\x00R\x0eValuesNgdotGet\x12G\n" +
"\x0eValuesNgdotSet\x18\x11 \x01(\v2\x1d.ngolofuzz.ValuesNgdotSetArgsH\x00R\x0eValuesNgdotSet\x12G\n" +
"\x0eValuesNgdotAdd\x18\x12 \x01(\v2\x1d.ngolofuzz.ValuesNgdotAddArgsH\x00R\x0eValuesNgdotAdd\x12G\n" +
"\x0eValuesNgdotDel\x18\x13 \x01(\v2\x1d.ngolofuzz.ValuesNgdotDelArgsH\x00R\x0eValuesNgdotDel\x12G\n" +
"\x0eValuesNgdotHas\x18\x14 \x01(\v2\x1d.ngolofuzz.ValuesNgdotHasArgsH\x00R\x0eValuesNgdotHas\x12M\n" +
"\x10ValuesNgdotClone\x18\x15 \x01(\v2\x1f.ngolofuzz.ValuesNgdotCloneArgsH\x00R\x10ValuesNgdotClone\x12;\n" +
"\n" +
"ParseQuery\x18\x16 \x01(\v2\x19.ngolofuzz.ParseQueryArgsH\x00R\n" +
"ParseQuery\x12P\n" +
"\x11ValuesNgdotEncode\x18\x17 \x01(\v2 .ngolofuzz.ValuesNgdotEncodeArgsH\x00R\x11ValuesNgdotEncode\x12D\n" +
"\rURLNgdotIsAbs\x18\x18 \x01(\v2\x1c.ngolofuzz.URLNgdotIsAbsArgsH\x00R\rURLNgdotIsAbs\x12D\n" +
"\rURLNgdotParse\x18\x19 \x01(\v2\x1c.ngolofuzz.URLNgdotParseArgsH\x00R\rURLNgdotParse\x12e\n" +
"\x18URLNgdotResolveReference\x18\x1a \x01(\v2'.ngolofuzz.URLNgdotResolveReferenceArgsH\x00R\x18URLNgdotResolveReference\x12D\n" +
"\rURLNgdotQuery\x18\x1b \x01(\v2\x1c.ngolofuzz.URLNgdotQueryArgsH\x00R\rURLNgdotQuery\x12S\n" +
"\x12URLNgdotRequestURI\x18\x1c \x01(\v2!.ngolofuzz.URLNgdotRequestURIArgsH\x00R\x12URLNgdotRequestURI\x12M\n" +
"\x10URLNgdotHostname\x18\x1d \x01(\v2\x1f.ngolofuzz.URLNgdotHostnameArgsH\x00R\x10URLNgdotHostname\x12A\n" +
"\fURLNgdotPort\x18\x1e \x01(\v2\x1b.ngolofuzz.URLNgdotPortArgsH\x00R\fURLNgdotPort\x12\\\n" +
"\x15URLNgdotMarshalBinary\x18\x1f \x01(\v2$.ngolofuzz.URLNgdotMarshalBinaryArgsH\x00R\x15URLNgdotMarshalBinary\x12Y\n" +
"\x14URLNgdotAppendBinary\x18 \x01(\v2#.ngolofuzz.URLNgdotAppendBinaryArgsH\x00R\x14URLNgdotAppendBinary\x12b\n" +
"\x17URLNgdotUnmarshalBinary\x18! \x01(\v2&.ngolofuzz.URLNgdotUnmarshalBinaryArgsH\x00R\x17URLNgdotUnmarshalBinary\x12D\n" +
"\rURLNgdotClone\x18\" \x01(\v2\x1c.ngolofuzz.URLNgdotCloneArgsH\x00R\rURLNgdotCloneB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x14Z\x12./;fuzz_ng_net_urlb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 37)
var file_ngolofuzz_proto_goTypes = []any{
(*QueryUnescapeArgs)(nil), // 0: ngolofuzz.QueryUnescapeArgs
(*PathUnescapeArgs)(nil), // 1: ngolofuzz.PathUnescapeArgs
(*QueryEscapeArgs)(nil), // 2: ngolofuzz.QueryEscapeArgs
(*PathEscapeArgs)(nil), // 3: ngolofuzz.PathEscapeArgs
(*UserArgs)(nil), // 4: ngolofuzz.UserArgs
(*UserPasswordArgs)(nil), // 5: ngolofuzz.UserPasswordArgs
(*UserinfoNgdotUsernameArgs)(nil), // 6: ngolofuzz.UserinfoNgdotUsernameArgs
(*UserinfoNgdotPasswordArgs)(nil), // 7: ngolofuzz.UserinfoNgdotPasswordArgs
(*UserinfoNgdotStringArgs)(nil), // 8: ngolofuzz.UserinfoNgdotStringArgs
(*ParseArgs)(nil), // 9: ngolofuzz.ParseArgs
(*ParseRequestURIArgs)(nil), // 10: ngolofuzz.ParseRequestURIArgs
(*URLNgdotEscapedPathArgs)(nil), // 11: ngolofuzz.URLNgdotEscapedPathArgs
(*URLNgdotEscapedFragmentArgs)(nil), // 12: ngolofuzz.URLNgdotEscapedFragmentArgs
(*URLNgdotStringArgs)(nil), // 13: ngolofuzz.URLNgdotStringArgs
(*URLNgdotRedactedArgs)(nil), // 14: ngolofuzz.URLNgdotRedactedArgs
(*ValuesNgdotGetArgs)(nil), // 15: ngolofuzz.ValuesNgdotGetArgs
(*ValuesNgdotSetArgs)(nil), // 16: ngolofuzz.ValuesNgdotSetArgs
(*ValuesNgdotAddArgs)(nil), // 17: ngolofuzz.ValuesNgdotAddArgs
(*ValuesNgdotDelArgs)(nil), // 18: ngolofuzz.ValuesNgdotDelArgs
(*ValuesNgdotHasArgs)(nil), // 19: ngolofuzz.ValuesNgdotHasArgs
(*ValuesNgdotCloneArgs)(nil), // 20: ngolofuzz.ValuesNgdotCloneArgs
(*ParseQueryArgs)(nil), // 21: ngolofuzz.ParseQueryArgs
(*ValuesNgdotEncodeArgs)(nil), // 22: ngolofuzz.ValuesNgdotEncodeArgs
(*URLNgdotIsAbsArgs)(nil), // 23: ngolofuzz.URLNgdotIsAbsArgs
(*URLNgdotParseArgs)(nil), // 24: ngolofuzz.URLNgdotParseArgs
(*URLNgdotResolveReferenceArgs)(nil), // 25: ngolofuzz.URLNgdotResolveReferenceArgs
(*URLNgdotQueryArgs)(nil), // 26: ngolofuzz.URLNgdotQueryArgs
(*URLNgdotRequestURIArgs)(nil), // 27: ngolofuzz.URLNgdotRequestURIArgs
(*URLNgdotHostnameArgs)(nil), // 28: ngolofuzz.URLNgdotHostnameArgs
(*URLNgdotPortArgs)(nil), // 29: ngolofuzz.URLNgdotPortArgs
(*URLNgdotMarshalBinaryArgs)(nil), // 30: ngolofuzz.URLNgdotMarshalBinaryArgs
(*URLNgdotAppendBinaryArgs)(nil), // 31: ngolofuzz.URLNgdotAppendBinaryArgs
(*URLNgdotUnmarshalBinaryArgs)(nil), // 32: ngolofuzz.URLNgdotUnmarshalBinaryArgs
(*URLNgdotCloneArgs)(nil), // 33: ngolofuzz.URLNgdotCloneArgs
(*NgoloFuzzOne)(nil), // 34: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 35: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 36: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.QueryUnescape:type_name -> ngolofuzz.QueryUnescapeArgs
1, // 1: ngolofuzz.NgoloFuzzOne.PathUnescape:type_name -> ngolofuzz.PathUnescapeArgs
2, // 2: ngolofuzz.NgoloFuzzOne.QueryEscape:type_name -> ngolofuzz.QueryEscapeArgs
3, // 3: ngolofuzz.NgoloFuzzOne.PathEscape:type_name -> ngolofuzz.PathEscapeArgs
4, // 4: ngolofuzz.NgoloFuzzOne.User:type_name -> ngolofuzz.UserArgs
5, // 5: ngolofuzz.NgoloFuzzOne.UserPassword:type_name -> ngolofuzz.UserPasswordArgs
6, // 6: ngolofuzz.NgoloFuzzOne.UserinfoNgdotUsername:type_name -> ngolofuzz.UserinfoNgdotUsernameArgs
7, // 7: ngolofuzz.NgoloFuzzOne.UserinfoNgdotPassword:type_name -> ngolofuzz.UserinfoNgdotPasswordArgs
8, // 8: ngolofuzz.NgoloFuzzOne.UserinfoNgdotString:type_name -> ngolofuzz.UserinfoNgdotStringArgs
9, // 9: ngolofuzz.NgoloFuzzOne.Parse:type_name -> ngolofuzz.ParseArgs
10, // 10: ngolofuzz.NgoloFuzzOne.ParseRequestURI:type_name -> ngolofuzz.ParseRequestURIArgs
11, // 11: ngolofuzz.NgoloFuzzOne.URLNgdotEscapedPath:type_name -> ngolofuzz.URLNgdotEscapedPathArgs
12, // 12: ngolofuzz.NgoloFuzzOne.URLNgdotEscapedFragment:type_name -> ngolofuzz.URLNgdotEscapedFragmentArgs
13, // 13: ngolofuzz.NgoloFuzzOne.URLNgdotString:type_name -> ngolofuzz.URLNgdotStringArgs
14, // 14: ngolofuzz.NgoloFuzzOne.URLNgdotRedacted:type_name -> ngolofuzz.URLNgdotRedactedArgs
15, // 15: ngolofuzz.NgoloFuzzOne.ValuesNgdotGet:type_name -> ngolofuzz.ValuesNgdotGetArgs
16, // 16: ngolofuzz.NgoloFuzzOne.ValuesNgdotSet:type_name -> ngolofuzz.ValuesNgdotSetArgs
17, // 17: ngolofuzz.NgoloFuzzOne.ValuesNgdotAdd:type_name -> ngolofuzz.ValuesNgdotAddArgs
18, // 18: ngolofuzz.NgoloFuzzOne.ValuesNgdotDel:type_name -> ngolofuzz.ValuesNgdotDelArgs
19, // 19: ngolofuzz.NgoloFuzzOne.ValuesNgdotHas:type_name -> ngolofuzz.ValuesNgdotHasArgs
20, // 20: ngolofuzz.NgoloFuzzOne.ValuesNgdotClone:type_name -> ngolofuzz.ValuesNgdotCloneArgs
21, // 21: ngolofuzz.NgoloFuzzOne.ParseQuery:type_name -> ngolofuzz.ParseQueryArgs
22, // 22: ngolofuzz.NgoloFuzzOne.ValuesNgdotEncode:type_name -> ngolofuzz.ValuesNgdotEncodeArgs
23, // 23: ngolofuzz.NgoloFuzzOne.URLNgdotIsAbs:type_name -> ngolofuzz.URLNgdotIsAbsArgs
24, // 24: ngolofuzz.NgoloFuzzOne.URLNgdotParse:type_name -> ngolofuzz.URLNgdotParseArgs
25, // 25: ngolofuzz.NgoloFuzzOne.URLNgdotResolveReference:type_name -> ngolofuzz.URLNgdotResolveReferenceArgs
26, // 26: ngolofuzz.NgoloFuzzOne.URLNgdotQuery:type_name -> ngolofuzz.URLNgdotQueryArgs
27, // 27: ngolofuzz.NgoloFuzzOne.URLNgdotRequestURI:type_name -> ngolofuzz.URLNgdotRequestURIArgs
28, // 28: ngolofuzz.NgoloFuzzOne.URLNgdotHostname:type_name -> ngolofuzz.URLNgdotHostnameArgs
29, // 29: ngolofuzz.NgoloFuzzOne.URLNgdotPort:type_name -> ngolofuzz.URLNgdotPortArgs
30, // 30: ngolofuzz.NgoloFuzzOne.URLNgdotMarshalBinary:type_name -> ngolofuzz.URLNgdotMarshalBinaryArgs
31, // 31: ngolofuzz.NgoloFuzzOne.URLNgdotAppendBinary:type_name -> ngolofuzz.URLNgdotAppendBinaryArgs
32, // 32: ngolofuzz.NgoloFuzzOne.URLNgdotUnmarshalBinary:type_name -> ngolofuzz.URLNgdotUnmarshalBinaryArgs
33, // 33: ngolofuzz.NgoloFuzzOne.URLNgdotClone:type_name -> ngolofuzz.URLNgdotCloneArgs
34, // 34: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
35, // [35:35] is the sub-list for method output_type
35, // [35:35] is the sub-list for method input_type
35, // [35:35] is the sub-list for extension type_name
35, // [35:35] is the sub-list for extension extendee
0, // [0:35] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[34].OneofWrappers = []any{
(*NgoloFuzzOne_QueryUnescape)(nil),
(*NgoloFuzzOne_PathUnescape)(nil),
(*NgoloFuzzOne_QueryEscape)(nil),
(*NgoloFuzzOne_PathEscape)(nil),
(*NgoloFuzzOne_User)(nil),
(*NgoloFuzzOne_UserPassword)(nil),
(*NgoloFuzzOne_UserinfoNgdotUsername)(nil),
(*NgoloFuzzOne_UserinfoNgdotPassword)(nil),
(*NgoloFuzzOne_UserinfoNgdotString)(nil),
(*NgoloFuzzOne_Parse)(nil),
(*NgoloFuzzOne_ParseRequestURI)(nil),
(*NgoloFuzzOne_URLNgdotEscapedPath)(nil),
(*NgoloFuzzOne_URLNgdotEscapedFragment)(nil),
(*NgoloFuzzOne_URLNgdotString)(nil),
(*NgoloFuzzOne_URLNgdotRedacted)(nil),
(*NgoloFuzzOne_ValuesNgdotGet)(nil),
(*NgoloFuzzOne_ValuesNgdotSet)(nil),
(*NgoloFuzzOne_ValuesNgdotAdd)(nil),
(*NgoloFuzzOne_ValuesNgdotDel)(nil),
(*NgoloFuzzOne_ValuesNgdotHas)(nil),
(*NgoloFuzzOne_ValuesNgdotClone)(nil),
(*NgoloFuzzOne_ParseQuery)(nil),
(*NgoloFuzzOne_ValuesNgdotEncode)(nil),
(*NgoloFuzzOne_URLNgdotIsAbs)(nil),
(*NgoloFuzzOne_URLNgdotParse)(nil),
(*NgoloFuzzOne_URLNgdotResolveReference)(nil),
(*NgoloFuzzOne_URLNgdotQuery)(nil),
(*NgoloFuzzOne_URLNgdotRequestURI)(nil),
(*NgoloFuzzOne_URLNgdotHostname)(nil),
(*NgoloFuzzOne_URLNgdotPort)(nil),
(*NgoloFuzzOne_URLNgdotMarshalBinary)(nil),
(*NgoloFuzzOne_URLNgdotAppendBinary)(nil),
(*NgoloFuzzOne_URLNgdotUnmarshalBinary)(nil),
(*NgoloFuzzOne_URLNgdotClone)(nil),
}
file_ngolofuzz_proto_msgTypes[35].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 37,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_os_exec
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"os/exec"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var CmdResults []*exec.Cmd
CmdResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_CmdNgdotString:
if len(CmdResults) == 0 {
continue
}
arg0 := CmdResults[CmdResultsIndex]
CmdResultsIndex = (CmdResultsIndex + 1) % len(CmdResults)
arg0.String()
case *NgoloFuzzOne_CmdNgdotRun:
if len(CmdResults) == 0 {
continue
}
arg0 := CmdResults[CmdResultsIndex]
CmdResultsIndex = (CmdResultsIndex + 1) % len(CmdResults)
r0 := arg0.Run()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_CmdNgdotStart:
if len(CmdResults) == 0 {
continue
}
arg0 := CmdResults[CmdResultsIndex]
CmdResultsIndex = (CmdResultsIndex + 1) % len(CmdResults)
r0 := arg0.Start()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_CmdNgdotWait:
if len(CmdResults) == 0 {
continue
}
arg0 := CmdResults[CmdResultsIndex]
CmdResultsIndex = (CmdResultsIndex + 1) % len(CmdResults)
r0 := arg0.Wait()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_CmdNgdotOutput:
if len(CmdResults) == 0 {
continue
}
arg0 := CmdResults[CmdResultsIndex]
CmdResultsIndex = (CmdResultsIndex + 1) % len(CmdResults)
_, r1 := arg0.Output()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_CmdNgdotCombinedOutput:
if len(CmdResults) == 0 {
continue
}
arg0 := CmdResults[CmdResultsIndex]
CmdResultsIndex = (CmdResultsIndex + 1) % len(CmdResults)
_, r1 := arg0.CombinedOutput()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_CmdNgdotStdinPipe:
if len(CmdResults) == 0 {
continue
}
arg0 := CmdResults[CmdResultsIndex]
CmdResultsIndex = (CmdResultsIndex + 1) % len(CmdResults)
_, r1 := arg0.StdinPipe()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_CmdNgdotStdoutPipe:
if len(CmdResults) == 0 {
continue
}
arg0 := CmdResults[CmdResultsIndex]
CmdResultsIndex = (CmdResultsIndex + 1) % len(CmdResults)
_, r1 := arg0.StdoutPipe()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_CmdNgdotStderrPipe:
if len(CmdResults) == 0 {
continue
}
arg0 := CmdResults[CmdResultsIndex]
CmdResultsIndex = (CmdResultsIndex + 1) % len(CmdResults)
_, r1 := arg0.StderrPipe()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_CmdNgdotEnviron:
if len(CmdResults) == 0 {
continue
}
arg0 := CmdResults[CmdResultsIndex]
CmdResultsIndex = (CmdResultsIndex + 1) % len(CmdResults)
arg0.Environ()
case *NgoloFuzzOne_LookPath:
_, r1 := exec.LookPath(a.LookPath.File)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
CmdNb := 0
CmdResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_CmdNgdotString:
if CmdNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Cmd%d.String()\n", CmdResultsIndex))
CmdResultsIndex = (CmdResultsIndex + 1) % CmdNb
case *NgoloFuzzOne_CmdNgdotRun:
if CmdNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Cmd%d.Run()\n", CmdResultsIndex))
CmdResultsIndex = (CmdResultsIndex + 1) % CmdNb
case *NgoloFuzzOne_CmdNgdotStart:
if CmdNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Cmd%d.Start()\n", CmdResultsIndex))
CmdResultsIndex = (CmdResultsIndex + 1) % CmdNb
case *NgoloFuzzOne_CmdNgdotWait:
if CmdNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Cmd%d.Wait()\n", CmdResultsIndex))
CmdResultsIndex = (CmdResultsIndex + 1) % CmdNb
case *NgoloFuzzOne_CmdNgdotOutput:
if CmdNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Cmd%d.Output()\n", CmdResultsIndex))
CmdResultsIndex = (CmdResultsIndex + 1) % CmdNb
case *NgoloFuzzOne_CmdNgdotCombinedOutput:
if CmdNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Cmd%d.CombinedOutput()\n", CmdResultsIndex))
CmdResultsIndex = (CmdResultsIndex + 1) % CmdNb
case *NgoloFuzzOne_CmdNgdotStdinPipe:
if CmdNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Cmd%d.StdinPipe()\n", CmdResultsIndex))
CmdResultsIndex = (CmdResultsIndex + 1) % CmdNb
case *NgoloFuzzOne_CmdNgdotStdoutPipe:
if CmdNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Cmd%d.StdoutPipe()\n", CmdResultsIndex))
CmdResultsIndex = (CmdResultsIndex + 1) % CmdNb
case *NgoloFuzzOne_CmdNgdotStderrPipe:
if CmdNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Cmd%d.StderrPipe()\n", CmdResultsIndex))
CmdResultsIndex = (CmdResultsIndex + 1) % CmdNb
case *NgoloFuzzOne_CmdNgdotEnviron:
if CmdNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Cmd%d.Environ()\n", CmdResultsIndex))
CmdResultsIndex = (CmdResultsIndex + 1) % CmdNb
case *NgoloFuzzOne_LookPath:
w.WriteString(fmt.Sprintf("exec.LookPath(%#+v)\n", a.LookPath.File))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_os_exec
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type CmdNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CmdNgdotStringArgs) Reset() {
*x = CmdNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CmdNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CmdNgdotStringArgs) ProtoMessage() {}
func (x *CmdNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CmdNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*CmdNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type CmdNgdotRunArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CmdNgdotRunArgs) Reset() {
*x = CmdNgdotRunArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CmdNgdotRunArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CmdNgdotRunArgs) ProtoMessage() {}
func (x *CmdNgdotRunArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CmdNgdotRunArgs.ProtoReflect.Descriptor instead.
func (*CmdNgdotRunArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type CmdNgdotStartArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CmdNgdotStartArgs) Reset() {
*x = CmdNgdotStartArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CmdNgdotStartArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CmdNgdotStartArgs) ProtoMessage() {}
func (x *CmdNgdotStartArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CmdNgdotStartArgs.ProtoReflect.Descriptor instead.
func (*CmdNgdotStartArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type CmdNgdotWaitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CmdNgdotWaitArgs) Reset() {
*x = CmdNgdotWaitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CmdNgdotWaitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CmdNgdotWaitArgs) ProtoMessage() {}
func (x *CmdNgdotWaitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CmdNgdotWaitArgs.ProtoReflect.Descriptor instead.
func (*CmdNgdotWaitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type CmdNgdotOutputArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CmdNgdotOutputArgs) Reset() {
*x = CmdNgdotOutputArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CmdNgdotOutputArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CmdNgdotOutputArgs) ProtoMessage() {}
func (x *CmdNgdotOutputArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CmdNgdotOutputArgs.ProtoReflect.Descriptor instead.
func (*CmdNgdotOutputArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type CmdNgdotCombinedOutputArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CmdNgdotCombinedOutputArgs) Reset() {
*x = CmdNgdotCombinedOutputArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CmdNgdotCombinedOutputArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CmdNgdotCombinedOutputArgs) ProtoMessage() {}
func (x *CmdNgdotCombinedOutputArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CmdNgdotCombinedOutputArgs.ProtoReflect.Descriptor instead.
func (*CmdNgdotCombinedOutputArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type CmdNgdotStdinPipeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CmdNgdotStdinPipeArgs) Reset() {
*x = CmdNgdotStdinPipeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CmdNgdotStdinPipeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CmdNgdotStdinPipeArgs) ProtoMessage() {}
func (x *CmdNgdotStdinPipeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CmdNgdotStdinPipeArgs.ProtoReflect.Descriptor instead.
func (*CmdNgdotStdinPipeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type CmdNgdotStdoutPipeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CmdNgdotStdoutPipeArgs) Reset() {
*x = CmdNgdotStdoutPipeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CmdNgdotStdoutPipeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CmdNgdotStdoutPipeArgs) ProtoMessage() {}
func (x *CmdNgdotStdoutPipeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CmdNgdotStdoutPipeArgs.ProtoReflect.Descriptor instead.
func (*CmdNgdotStdoutPipeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type CmdNgdotStderrPipeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CmdNgdotStderrPipeArgs) Reset() {
*x = CmdNgdotStderrPipeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CmdNgdotStderrPipeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CmdNgdotStderrPipeArgs) ProtoMessage() {}
func (x *CmdNgdotStderrPipeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CmdNgdotStderrPipeArgs.ProtoReflect.Descriptor instead.
func (*CmdNgdotStderrPipeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type CmdNgdotEnvironArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CmdNgdotEnvironArgs) Reset() {
*x = CmdNgdotEnvironArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CmdNgdotEnvironArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CmdNgdotEnvironArgs) ProtoMessage() {}
func (x *CmdNgdotEnvironArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CmdNgdotEnvironArgs.ProtoReflect.Descriptor instead.
func (*CmdNgdotEnvironArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type LookPathArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
File string `protobuf:"bytes,1,opt,name=file,proto3" json:"file,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LookPathArgs) Reset() {
*x = LookPathArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LookPathArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LookPathArgs) ProtoMessage() {}
func (x *LookPathArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LookPathArgs.ProtoReflect.Descriptor instead.
func (*LookPathArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *LookPathArgs) GetFile() string {
if x != nil {
return x.File
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_CmdNgdotString
// *NgoloFuzzOne_CmdNgdotRun
// *NgoloFuzzOne_CmdNgdotStart
// *NgoloFuzzOne_CmdNgdotWait
// *NgoloFuzzOne_CmdNgdotOutput
// *NgoloFuzzOne_CmdNgdotCombinedOutput
// *NgoloFuzzOne_CmdNgdotStdinPipe
// *NgoloFuzzOne_CmdNgdotStdoutPipe
// *NgoloFuzzOne_CmdNgdotStderrPipe
// *NgoloFuzzOne_CmdNgdotEnviron
// *NgoloFuzzOne_LookPath
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetCmdNgdotString() *CmdNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CmdNgdotString); ok {
return x.CmdNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetCmdNgdotRun() *CmdNgdotRunArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CmdNgdotRun); ok {
return x.CmdNgdotRun
}
}
return nil
}
func (x *NgoloFuzzOne) GetCmdNgdotStart() *CmdNgdotStartArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CmdNgdotStart); ok {
return x.CmdNgdotStart
}
}
return nil
}
func (x *NgoloFuzzOne) GetCmdNgdotWait() *CmdNgdotWaitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CmdNgdotWait); ok {
return x.CmdNgdotWait
}
}
return nil
}
func (x *NgoloFuzzOne) GetCmdNgdotOutput() *CmdNgdotOutputArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CmdNgdotOutput); ok {
return x.CmdNgdotOutput
}
}
return nil
}
func (x *NgoloFuzzOne) GetCmdNgdotCombinedOutput() *CmdNgdotCombinedOutputArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CmdNgdotCombinedOutput); ok {
return x.CmdNgdotCombinedOutput
}
}
return nil
}
func (x *NgoloFuzzOne) GetCmdNgdotStdinPipe() *CmdNgdotStdinPipeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CmdNgdotStdinPipe); ok {
return x.CmdNgdotStdinPipe
}
}
return nil
}
func (x *NgoloFuzzOne) GetCmdNgdotStdoutPipe() *CmdNgdotStdoutPipeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CmdNgdotStdoutPipe); ok {
return x.CmdNgdotStdoutPipe
}
}
return nil
}
func (x *NgoloFuzzOne) GetCmdNgdotStderrPipe() *CmdNgdotStderrPipeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CmdNgdotStderrPipe); ok {
return x.CmdNgdotStderrPipe
}
}
return nil
}
func (x *NgoloFuzzOne) GetCmdNgdotEnviron() *CmdNgdotEnvironArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CmdNgdotEnviron); ok {
return x.CmdNgdotEnviron
}
}
return nil
}
func (x *NgoloFuzzOne) GetLookPath() *LookPathArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LookPath); ok {
return x.LookPath
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_CmdNgdotString struct {
CmdNgdotString *CmdNgdotStringArgs `protobuf:"bytes,1,opt,name=CmdNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_CmdNgdotRun struct {
CmdNgdotRun *CmdNgdotRunArgs `protobuf:"bytes,2,opt,name=CmdNgdotRun,proto3,oneof"`
}
type NgoloFuzzOne_CmdNgdotStart struct {
CmdNgdotStart *CmdNgdotStartArgs `protobuf:"bytes,3,opt,name=CmdNgdotStart,proto3,oneof"`
}
type NgoloFuzzOne_CmdNgdotWait struct {
CmdNgdotWait *CmdNgdotWaitArgs `protobuf:"bytes,4,opt,name=CmdNgdotWait,proto3,oneof"`
}
type NgoloFuzzOne_CmdNgdotOutput struct {
CmdNgdotOutput *CmdNgdotOutputArgs `protobuf:"bytes,5,opt,name=CmdNgdotOutput,proto3,oneof"`
}
type NgoloFuzzOne_CmdNgdotCombinedOutput struct {
CmdNgdotCombinedOutput *CmdNgdotCombinedOutputArgs `protobuf:"bytes,6,opt,name=CmdNgdotCombinedOutput,proto3,oneof"`
}
type NgoloFuzzOne_CmdNgdotStdinPipe struct {
CmdNgdotStdinPipe *CmdNgdotStdinPipeArgs `protobuf:"bytes,7,opt,name=CmdNgdotStdinPipe,proto3,oneof"`
}
type NgoloFuzzOne_CmdNgdotStdoutPipe struct {
CmdNgdotStdoutPipe *CmdNgdotStdoutPipeArgs `protobuf:"bytes,8,opt,name=CmdNgdotStdoutPipe,proto3,oneof"`
}
type NgoloFuzzOne_CmdNgdotStderrPipe struct {
CmdNgdotStderrPipe *CmdNgdotStderrPipeArgs `protobuf:"bytes,9,opt,name=CmdNgdotStderrPipe,proto3,oneof"`
}
type NgoloFuzzOne_CmdNgdotEnviron struct {
CmdNgdotEnviron *CmdNgdotEnvironArgs `protobuf:"bytes,10,opt,name=CmdNgdotEnviron,proto3,oneof"`
}
type NgoloFuzzOne_LookPath struct {
LookPath *LookPathArgs `protobuf:"bytes,11,opt,name=LookPath,proto3,oneof"`
}
func (*NgoloFuzzOne_CmdNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CmdNgdotRun) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CmdNgdotStart) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CmdNgdotWait) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CmdNgdotOutput) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CmdNgdotCombinedOutput) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CmdNgdotStdinPipe) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CmdNgdotStdoutPipe) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CmdNgdotStderrPipe) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CmdNgdotEnviron) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LookPath) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x14\n" +
"\x12CmdNgdotStringArgs\"\x11\n" +
"\x0fCmdNgdotRunArgs\"\x13\n" +
"\x11CmdNgdotStartArgs\"\x12\n" +
"\x10CmdNgdotWaitArgs\"\x14\n" +
"\x12CmdNgdotOutputArgs\"\x1c\n" +
"\x1aCmdNgdotCombinedOutputArgs\"\x17\n" +
"\x15CmdNgdotStdinPipeArgs\"\x18\n" +
"\x16CmdNgdotStdoutPipeArgs\"\x18\n" +
"\x16CmdNgdotStderrPipeArgs\"\x15\n" +
"\x13CmdNgdotEnvironArgs\"\"\n" +
"\fLookPathArgs\x12\x12\n" +
"\x04file\x18\x01 \x01(\tR\x04file\"\xd1\x06\n" +
"\fNgoloFuzzOne\x12G\n" +
"\x0eCmdNgdotString\x18\x01 \x01(\v2\x1d.ngolofuzz.CmdNgdotStringArgsH\x00R\x0eCmdNgdotString\x12>\n" +
"\vCmdNgdotRun\x18\x02 \x01(\v2\x1a.ngolofuzz.CmdNgdotRunArgsH\x00R\vCmdNgdotRun\x12D\n" +
"\rCmdNgdotStart\x18\x03 \x01(\v2\x1c.ngolofuzz.CmdNgdotStartArgsH\x00R\rCmdNgdotStart\x12A\n" +
"\fCmdNgdotWait\x18\x04 \x01(\v2\x1b.ngolofuzz.CmdNgdotWaitArgsH\x00R\fCmdNgdotWait\x12G\n" +
"\x0eCmdNgdotOutput\x18\x05 \x01(\v2\x1d.ngolofuzz.CmdNgdotOutputArgsH\x00R\x0eCmdNgdotOutput\x12_\n" +
"\x16CmdNgdotCombinedOutput\x18\x06 \x01(\v2%.ngolofuzz.CmdNgdotCombinedOutputArgsH\x00R\x16CmdNgdotCombinedOutput\x12P\n" +
"\x11CmdNgdotStdinPipe\x18\a \x01(\v2 .ngolofuzz.CmdNgdotStdinPipeArgsH\x00R\x11CmdNgdotStdinPipe\x12S\n" +
"\x12CmdNgdotStdoutPipe\x18\b \x01(\v2!.ngolofuzz.CmdNgdotStdoutPipeArgsH\x00R\x12CmdNgdotStdoutPipe\x12S\n" +
"\x12CmdNgdotStderrPipe\x18\t \x01(\v2!.ngolofuzz.CmdNgdotStderrPipeArgsH\x00R\x12CmdNgdotStderrPipe\x12J\n" +
"\x0fCmdNgdotEnviron\x18\n" +
" \x01(\v2\x1e.ngolofuzz.CmdNgdotEnvironArgsH\x00R\x0fCmdNgdotEnviron\x125\n" +
"\bLookPath\x18\v \x01(\v2\x17.ngolofuzz.LookPathArgsH\x00R\bLookPathB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x14Z\x12./;fuzz_ng_os_execb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 14)
var file_ngolofuzz_proto_goTypes = []any{
(*CmdNgdotStringArgs)(nil), // 0: ngolofuzz.CmdNgdotStringArgs
(*CmdNgdotRunArgs)(nil), // 1: ngolofuzz.CmdNgdotRunArgs
(*CmdNgdotStartArgs)(nil), // 2: ngolofuzz.CmdNgdotStartArgs
(*CmdNgdotWaitArgs)(nil), // 3: ngolofuzz.CmdNgdotWaitArgs
(*CmdNgdotOutputArgs)(nil), // 4: ngolofuzz.CmdNgdotOutputArgs
(*CmdNgdotCombinedOutputArgs)(nil), // 5: ngolofuzz.CmdNgdotCombinedOutputArgs
(*CmdNgdotStdinPipeArgs)(nil), // 6: ngolofuzz.CmdNgdotStdinPipeArgs
(*CmdNgdotStdoutPipeArgs)(nil), // 7: ngolofuzz.CmdNgdotStdoutPipeArgs
(*CmdNgdotStderrPipeArgs)(nil), // 8: ngolofuzz.CmdNgdotStderrPipeArgs
(*CmdNgdotEnvironArgs)(nil), // 9: ngolofuzz.CmdNgdotEnvironArgs
(*LookPathArgs)(nil), // 10: ngolofuzz.LookPathArgs
(*NgoloFuzzOne)(nil), // 11: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 12: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 13: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.CmdNgdotString:type_name -> ngolofuzz.CmdNgdotStringArgs
1, // 1: ngolofuzz.NgoloFuzzOne.CmdNgdotRun:type_name -> ngolofuzz.CmdNgdotRunArgs
2, // 2: ngolofuzz.NgoloFuzzOne.CmdNgdotStart:type_name -> ngolofuzz.CmdNgdotStartArgs
3, // 3: ngolofuzz.NgoloFuzzOne.CmdNgdotWait:type_name -> ngolofuzz.CmdNgdotWaitArgs
4, // 4: ngolofuzz.NgoloFuzzOne.CmdNgdotOutput:type_name -> ngolofuzz.CmdNgdotOutputArgs
5, // 5: ngolofuzz.NgoloFuzzOne.CmdNgdotCombinedOutput:type_name -> ngolofuzz.CmdNgdotCombinedOutputArgs
6, // 6: ngolofuzz.NgoloFuzzOne.CmdNgdotStdinPipe:type_name -> ngolofuzz.CmdNgdotStdinPipeArgs
7, // 7: ngolofuzz.NgoloFuzzOne.CmdNgdotStdoutPipe:type_name -> ngolofuzz.CmdNgdotStdoutPipeArgs
8, // 8: ngolofuzz.NgoloFuzzOne.CmdNgdotStderrPipe:type_name -> ngolofuzz.CmdNgdotStderrPipeArgs
9, // 9: ngolofuzz.NgoloFuzzOne.CmdNgdotEnviron:type_name -> ngolofuzz.CmdNgdotEnvironArgs
10, // 10: ngolofuzz.NgoloFuzzOne.LookPath:type_name -> ngolofuzz.LookPathArgs
11, // 11: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
12, // [12:12] is the sub-list for method output_type
12, // [12:12] is the sub-list for method input_type
12, // [12:12] is the sub-list for extension type_name
12, // [12:12] is the sub-list for extension extendee
0, // [0:12] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[11].OneofWrappers = []any{
(*NgoloFuzzOne_CmdNgdotString)(nil),
(*NgoloFuzzOne_CmdNgdotRun)(nil),
(*NgoloFuzzOne_CmdNgdotStart)(nil),
(*NgoloFuzzOne_CmdNgdotWait)(nil),
(*NgoloFuzzOne_CmdNgdotOutput)(nil),
(*NgoloFuzzOne_CmdNgdotCombinedOutput)(nil),
(*NgoloFuzzOne_CmdNgdotStdinPipe)(nil),
(*NgoloFuzzOne_CmdNgdotStdoutPipe)(nil),
(*NgoloFuzzOne_CmdNgdotStderrPipe)(nil),
(*NgoloFuzzOne_CmdNgdotEnviron)(nil),
(*NgoloFuzzOne_LookPath)(nil),
}
file_ngolofuzz_proto_msgTypes[12].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 14,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_os_user
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"os/user"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var UserResults []*user.User
UserResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Current:
_, r1 := user.Current()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Lookup:
_, r1 := user.Lookup(a.Lookup.Username)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_LookupId:
_, r1 := user.LookupId(a.LookupId.Uid)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_LookupGroup:
_, r1 := user.LookupGroup(a.LookupGroup.Name)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_LookupGroupId:
_, r1 := user.LookupGroupId(a.LookupGroupId.Gid)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_UserNgdotGroupIds:
if len(UserResults) == 0 {
continue
}
arg0 := UserResults[UserResultsIndex]
UserResultsIndex = (UserResultsIndex + 1) % len(UserResults)
_, r1 := arg0.GroupIds()
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
UserNb := 0
UserResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Current:
w.WriteString(fmt.Sprintf("user.Current()\n"))
case *NgoloFuzzOne_Lookup:
w.WriteString(fmt.Sprintf("user.Lookup(%#+v)\n", a.Lookup.Username))
case *NgoloFuzzOne_LookupId:
w.WriteString(fmt.Sprintf("user.LookupId(%#+v)\n", a.LookupId.Uid))
case *NgoloFuzzOne_LookupGroup:
w.WriteString(fmt.Sprintf("user.LookupGroup(%#+v)\n", a.LookupGroup.Name))
case *NgoloFuzzOne_LookupGroupId:
w.WriteString(fmt.Sprintf("user.LookupGroupId(%#+v)\n", a.LookupGroupId.Gid))
case *NgoloFuzzOne_UserNgdotGroupIds:
if UserNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("User%d.GroupIds()\n", UserResultsIndex))
UserResultsIndex = (UserResultsIndex + 1) % UserNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_os_user
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type CurrentArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CurrentArgs) Reset() {
*x = CurrentArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CurrentArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CurrentArgs) ProtoMessage() {}
func (x *CurrentArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CurrentArgs.ProtoReflect.Descriptor instead.
func (*CurrentArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type LookupArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LookupArgs) Reset() {
*x = LookupArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LookupArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LookupArgs) ProtoMessage() {}
func (x *LookupArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LookupArgs.ProtoReflect.Descriptor instead.
func (*LookupArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *LookupArgs) GetUsername() string {
if x != nil {
return x.Username
}
return ""
}
type LookupIdArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Uid string `protobuf:"bytes,1,opt,name=uid,proto3" json:"uid,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LookupIdArgs) Reset() {
*x = LookupIdArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LookupIdArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LookupIdArgs) ProtoMessage() {}
func (x *LookupIdArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LookupIdArgs.ProtoReflect.Descriptor instead.
func (*LookupIdArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *LookupIdArgs) GetUid() string {
if x != nil {
return x.Uid
}
return ""
}
type LookupGroupArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LookupGroupArgs) Reset() {
*x = LookupGroupArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LookupGroupArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LookupGroupArgs) ProtoMessage() {}
func (x *LookupGroupArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LookupGroupArgs.ProtoReflect.Descriptor instead.
func (*LookupGroupArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *LookupGroupArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type LookupGroupIdArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Gid string `protobuf:"bytes,1,opt,name=gid,proto3" json:"gid,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LookupGroupIdArgs) Reset() {
*x = LookupGroupIdArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LookupGroupIdArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LookupGroupIdArgs) ProtoMessage() {}
func (x *LookupGroupIdArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LookupGroupIdArgs.ProtoReflect.Descriptor instead.
func (*LookupGroupIdArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *LookupGroupIdArgs) GetGid() string {
if x != nil {
return x.Gid
}
return ""
}
type UserNgdotGroupIdsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UserNgdotGroupIdsArgs) Reset() {
*x = UserNgdotGroupIdsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UserNgdotGroupIdsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UserNgdotGroupIdsArgs) ProtoMessage() {}
func (x *UserNgdotGroupIdsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UserNgdotGroupIdsArgs.ProtoReflect.Descriptor instead.
func (*UserNgdotGroupIdsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Current
// *NgoloFuzzOne_Lookup
// *NgoloFuzzOne_LookupId
// *NgoloFuzzOne_LookupGroup
// *NgoloFuzzOne_LookupGroupId
// *NgoloFuzzOne_UserNgdotGroupIds
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetCurrent() *CurrentArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Current); ok {
return x.Current
}
}
return nil
}
func (x *NgoloFuzzOne) GetLookup() *LookupArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Lookup); ok {
return x.Lookup
}
}
return nil
}
func (x *NgoloFuzzOne) GetLookupId() *LookupIdArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LookupId); ok {
return x.LookupId
}
}
return nil
}
func (x *NgoloFuzzOne) GetLookupGroup() *LookupGroupArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LookupGroup); ok {
return x.LookupGroup
}
}
return nil
}
func (x *NgoloFuzzOne) GetLookupGroupId() *LookupGroupIdArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LookupGroupId); ok {
return x.LookupGroupId
}
}
return nil
}
func (x *NgoloFuzzOne) GetUserNgdotGroupIds() *UserNgdotGroupIdsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UserNgdotGroupIds); ok {
return x.UserNgdotGroupIds
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Current struct {
Current *CurrentArgs `protobuf:"bytes,1,opt,name=Current,proto3,oneof"`
}
type NgoloFuzzOne_Lookup struct {
Lookup *LookupArgs `protobuf:"bytes,2,opt,name=Lookup,proto3,oneof"`
}
type NgoloFuzzOne_LookupId struct {
LookupId *LookupIdArgs `protobuf:"bytes,3,opt,name=LookupId,proto3,oneof"`
}
type NgoloFuzzOne_LookupGroup struct {
LookupGroup *LookupGroupArgs `protobuf:"bytes,4,opt,name=LookupGroup,proto3,oneof"`
}
type NgoloFuzzOne_LookupGroupId struct {
LookupGroupId *LookupGroupIdArgs `protobuf:"bytes,5,opt,name=LookupGroupId,proto3,oneof"`
}
type NgoloFuzzOne_UserNgdotGroupIds struct {
UserNgdotGroupIds *UserNgdotGroupIdsArgs `protobuf:"bytes,6,opt,name=UserNgdotGroupIds,proto3,oneof"`
}
func (*NgoloFuzzOne_Current) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Lookup) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LookupId) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LookupGroup) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LookupGroupId) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UserNgdotGroupIds) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\r\n" +
"\vCurrentArgs\"(\n" +
"\n" +
"LookupArgs\x12\x1a\n" +
"\busername\x18\x01 \x01(\tR\busername\" \n" +
"\fLookupIdArgs\x12\x10\n" +
"\x03uid\x18\x01 \x01(\tR\x03uid\"%\n" +
"\x0fLookupGroupArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"%\n" +
"\x11LookupGroupIdArgs\x12\x10\n" +
"\x03gid\x18\x01 \x01(\tR\x03gid\"\x17\n" +
"\x15UserNgdotGroupIdsArgs\"\x8a\x03\n" +
"\fNgoloFuzzOne\x122\n" +
"\aCurrent\x18\x01 \x01(\v2\x16.ngolofuzz.CurrentArgsH\x00R\aCurrent\x12/\n" +
"\x06Lookup\x18\x02 \x01(\v2\x15.ngolofuzz.LookupArgsH\x00R\x06Lookup\x125\n" +
"\bLookupId\x18\x03 \x01(\v2\x17.ngolofuzz.LookupIdArgsH\x00R\bLookupId\x12>\n" +
"\vLookupGroup\x18\x04 \x01(\v2\x1a.ngolofuzz.LookupGroupArgsH\x00R\vLookupGroup\x12D\n" +
"\rLookupGroupId\x18\x05 \x01(\v2\x1c.ngolofuzz.LookupGroupIdArgsH\x00R\rLookupGroupId\x12P\n" +
"\x11UserNgdotGroupIds\x18\x06 \x01(\v2 .ngolofuzz.UserNgdotGroupIdsArgsH\x00R\x11UserNgdotGroupIdsB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x14Z\x12./;fuzz_ng_os_userb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
var file_ngolofuzz_proto_goTypes = []any{
(*CurrentArgs)(nil), // 0: ngolofuzz.CurrentArgs
(*LookupArgs)(nil), // 1: ngolofuzz.LookupArgs
(*LookupIdArgs)(nil), // 2: ngolofuzz.LookupIdArgs
(*LookupGroupArgs)(nil), // 3: ngolofuzz.LookupGroupArgs
(*LookupGroupIdArgs)(nil), // 4: ngolofuzz.LookupGroupIdArgs
(*UserNgdotGroupIdsArgs)(nil), // 5: ngolofuzz.UserNgdotGroupIdsArgs
(*NgoloFuzzOne)(nil), // 6: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 7: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 8: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Current:type_name -> ngolofuzz.CurrentArgs
1, // 1: ngolofuzz.NgoloFuzzOne.Lookup:type_name -> ngolofuzz.LookupArgs
2, // 2: ngolofuzz.NgoloFuzzOne.LookupId:type_name -> ngolofuzz.LookupIdArgs
3, // 3: ngolofuzz.NgoloFuzzOne.LookupGroup:type_name -> ngolofuzz.LookupGroupArgs
4, // 4: ngolofuzz.NgoloFuzzOne.LookupGroupId:type_name -> ngolofuzz.LookupGroupIdArgs
5, // 5: ngolofuzz.NgoloFuzzOne.UserNgdotGroupIds:type_name -> ngolofuzz.UserNgdotGroupIdsArgs
6, // 6: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
7, // [7:7] is the sub-list for method output_type
7, // [7:7] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[6].OneofWrappers = []any{
(*NgoloFuzzOne_Current)(nil),
(*NgoloFuzzOne_Lookup)(nil),
(*NgoloFuzzOne_LookupId)(nil),
(*NgoloFuzzOne_LookupGroup)(nil),
(*NgoloFuzzOne_LookupGroupId)(nil),
(*NgoloFuzzOne_UserNgdotGroupIds)(nil),
}
file_ngolofuzz_proto_msgTypes[7].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_path
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"path"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Match:
_, r1 := path.Match(a.Match.Pattern, a.Match.Name)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Clean:
path.Clean(a.Clean.Path)
case *NgoloFuzzOne_Split:
path.Split(a.Split.Path)
case *NgoloFuzzOne_Ext:
path.Ext(a.Ext.Path)
case *NgoloFuzzOne_Base:
path.Base(a.Base.Path)
case *NgoloFuzzOne_IsAbs:
path.IsAbs(a.IsAbs.Path)
case *NgoloFuzzOne_Dir:
path.Dir(a.Dir.Path)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Match:
w.WriteString(fmt.Sprintf("path.Match(%#+v, %#+v)\n", a.Match.Pattern, a.Match.Name))
case *NgoloFuzzOne_Clean:
w.WriteString(fmt.Sprintf("path.Clean(%#+v)\n", a.Clean.Path))
case *NgoloFuzzOne_Split:
w.WriteString(fmt.Sprintf("path.Split(%#+v)\n", a.Split.Path))
case *NgoloFuzzOne_Ext:
w.WriteString(fmt.Sprintf("path.Ext(%#+v)\n", a.Ext.Path))
case *NgoloFuzzOne_Base:
w.WriteString(fmt.Sprintf("path.Base(%#+v)\n", a.Base.Path))
case *NgoloFuzzOne_IsAbs:
w.WriteString(fmt.Sprintf("path.IsAbs(%#+v)\n", a.IsAbs.Path))
case *NgoloFuzzOne_Dir:
w.WriteString(fmt.Sprintf("path.Dir(%#+v)\n", a.Dir.Path))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_path
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type MatchArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Pattern string `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MatchArgs) Reset() {
*x = MatchArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MatchArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MatchArgs) ProtoMessage() {}
func (x *MatchArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MatchArgs.ProtoReflect.Descriptor instead.
func (*MatchArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *MatchArgs) GetPattern() string {
if x != nil {
return x.Pattern
}
return ""
}
func (x *MatchArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type CleanArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CleanArgs) Reset() {
*x = CleanArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CleanArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CleanArgs) ProtoMessage() {}
func (x *CleanArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CleanArgs.ProtoReflect.Descriptor instead.
func (*CleanArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *CleanArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type SplitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SplitArgs) Reset() {
*x = SplitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SplitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SplitArgs) ProtoMessage() {}
func (x *SplitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SplitArgs.ProtoReflect.Descriptor instead.
func (*SplitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *SplitArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type ExtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExtArgs) Reset() {
*x = ExtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExtArgs) ProtoMessage() {}
func (x *ExtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExtArgs.ProtoReflect.Descriptor instead.
func (*ExtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ExtArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type BaseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BaseArgs) Reset() {
*x = BaseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BaseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BaseArgs) ProtoMessage() {}
func (x *BaseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BaseArgs.ProtoReflect.Descriptor instead.
func (*BaseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *BaseArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type IsAbsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsAbsArgs) Reset() {
*x = IsAbsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsAbsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsAbsArgs) ProtoMessage() {}
func (x *IsAbsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsAbsArgs.ProtoReflect.Descriptor instead.
func (*IsAbsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *IsAbsArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type DirArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DirArgs) Reset() {
*x = DirArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DirArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DirArgs) ProtoMessage() {}
func (x *DirArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DirArgs.ProtoReflect.Descriptor instead.
func (*DirArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *DirArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Match
// *NgoloFuzzOne_Clean
// *NgoloFuzzOne_Split
// *NgoloFuzzOne_Ext
// *NgoloFuzzOne_Base
// *NgoloFuzzOne_IsAbs
// *NgoloFuzzOne_Dir
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetMatch() *MatchArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Match); ok {
return x.Match
}
}
return nil
}
func (x *NgoloFuzzOne) GetClean() *CleanArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Clean); ok {
return x.Clean
}
}
return nil
}
func (x *NgoloFuzzOne) GetSplit() *SplitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Split); ok {
return x.Split
}
}
return nil
}
func (x *NgoloFuzzOne) GetExt() *ExtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Ext); ok {
return x.Ext
}
}
return nil
}
func (x *NgoloFuzzOne) GetBase() *BaseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Base); ok {
return x.Base
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsAbs() *IsAbsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsAbs); ok {
return x.IsAbs
}
}
return nil
}
func (x *NgoloFuzzOne) GetDir() *DirArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Dir); ok {
return x.Dir
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Match struct {
Match *MatchArgs `protobuf:"bytes,1,opt,name=Match,proto3,oneof"`
}
type NgoloFuzzOne_Clean struct {
Clean *CleanArgs `protobuf:"bytes,2,opt,name=Clean,proto3,oneof"`
}
type NgoloFuzzOne_Split struct {
Split *SplitArgs `protobuf:"bytes,3,opt,name=Split,proto3,oneof"`
}
type NgoloFuzzOne_Ext struct {
Ext *ExtArgs `protobuf:"bytes,4,opt,name=Ext,proto3,oneof"`
}
type NgoloFuzzOne_Base struct {
Base *BaseArgs `protobuf:"bytes,5,opt,name=Base,proto3,oneof"`
}
type NgoloFuzzOne_IsAbs struct {
IsAbs *IsAbsArgs `protobuf:"bytes,6,opt,name=IsAbs,proto3,oneof"`
}
type NgoloFuzzOne_Dir struct {
Dir *DirArgs `protobuf:"bytes,7,opt,name=Dir,proto3,oneof"`
}
func (*NgoloFuzzOne_Match) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Clean) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Split) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Ext) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Base) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsAbs) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Dir) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"9\n" +
"\tMatchArgs\x12\x18\n" +
"\apattern\x18\x01 \x01(\tR\apattern\x12\x12\n" +
"\x04name\x18\x02 \x01(\tR\x04name\"\x1f\n" +
"\tCleanArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"\x1f\n" +
"\tSplitArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"\x1d\n" +
"\aExtArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"\x1e\n" +
"\bBaseArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"\x1f\n" +
"\tIsAbsArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"\x1d\n" +
"\aDirArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"\xc9\x02\n" +
"\fNgoloFuzzOne\x12,\n" +
"\x05Match\x18\x01 \x01(\v2\x14.ngolofuzz.MatchArgsH\x00R\x05Match\x12,\n" +
"\x05Clean\x18\x02 \x01(\v2\x14.ngolofuzz.CleanArgsH\x00R\x05Clean\x12,\n" +
"\x05Split\x18\x03 \x01(\v2\x14.ngolofuzz.SplitArgsH\x00R\x05Split\x12&\n" +
"\x03Ext\x18\x04 \x01(\v2\x12.ngolofuzz.ExtArgsH\x00R\x03Ext\x12)\n" +
"\x04Base\x18\x05 \x01(\v2\x13.ngolofuzz.BaseArgsH\x00R\x04Base\x12,\n" +
"\x05IsAbs\x18\x06 \x01(\v2\x14.ngolofuzz.IsAbsArgsH\x00R\x05IsAbs\x12&\n" +
"\x03Dir\x18\a \x01(\v2\x12.ngolofuzz.DirArgsH\x00R\x03DirB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x11Z\x0f./;fuzz_ng_pathb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
var file_ngolofuzz_proto_goTypes = []any{
(*MatchArgs)(nil), // 0: ngolofuzz.MatchArgs
(*CleanArgs)(nil), // 1: ngolofuzz.CleanArgs
(*SplitArgs)(nil), // 2: ngolofuzz.SplitArgs
(*ExtArgs)(nil), // 3: ngolofuzz.ExtArgs
(*BaseArgs)(nil), // 4: ngolofuzz.BaseArgs
(*IsAbsArgs)(nil), // 5: ngolofuzz.IsAbsArgs
(*DirArgs)(nil), // 6: ngolofuzz.DirArgs
(*NgoloFuzzOne)(nil), // 7: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 8: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 9: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Match:type_name -> ngolofuzz.MatchArgs
1, // 1: ngolofuzz.NgoloFuzzOne.Clean:type_name -> ngolofuzz.CleanArgs
2, // 2: ngolofuzz.NgoloFuzzOne.Split:type_name -> ngolofuzz.SplitArgs
3, // 3: ngolofuzz.NgoloFuzzOne.Ext:type_name -> ngolofuzz.ExtArgs
4, // 4: ngolofuzz.NgoloFuzzOne.Base:type_name -> ngolofuzz.BaseArgs
5, // 5: ngolofuzz.NgoloFuzzOne.IsAbs:type_name -> ngolofuzz.IsAbsArgs
6, // 6: ngolofuzz.NgoloFuzzOne.Dir:type_name -> ngolofuzz.DirArgs
7, // 7: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
8, // [8:8] is the sub-list for method output_type
8, // [8:8] is the sub-list for method input_type
8, // [8:8] is the sub-list for extension type_name
8, // [8:8] is the sub-list for extension extendee
0, // [0:8] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[7].OneofWrappers = []any{
(*NgoloFuzzOne_Match)(nil),
(*NgoloFuzzOne_Clean)(nil),
(*NgoloFuzzOne_Split)(nil),
(*NgoloFuzzOne_Ext)(nil),
(*NgoloFuzzOne_Base)(nil),
(*NgoloFuzzOne_IsAbs)(nil),
(*NgoloFuzzOne_Dir)(nil),
}
file_ngolofuzz_proto_msgTypes[8].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 10,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_path_filepath
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"path/filepath"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Match:
_, r1 := filepath.Match(a.Match.Pattern, a.Match.Name)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Clean:
filepath.Clean(a.Clean.Path)
case *NgoloFuzzOne_IsLocal:
filepath.IsLocal(a.IsLocal.Path)
case *NgoloFuzzOne_Localize:
_, r1 := filepath.Localize(a.Localize.Path)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ToSlash:
filepath.ToSlash(a.ToSlash.Path)
case *NgoloFuzzOne_FromSlash:
filepath.FromSlash(a.FromSlash.Path)
case *NgoloFuzzOne_SplitList:
filepath.SplitList(a.SplitList.Path)
case *NgoloFuzzOne_Split:
filepath.Split(a.Split.Path)
case *NgoloFuzzOne_Ext:
filepath.Ext(a.Ext.Path)
case *NgoloFuzzOne_EvalSymlinks:
_, r1 := filepath.EvalSymlinks(a.EvalSymlinks.Path)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_IsAbs:
filepath.IsAbs(a.IsAbs.Path)
case *NgoloFuzzOne_Abs:
_, r1 := filepath.Abs(a.Abs.Path)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Rel:
_, r1 := filepath.Rel(a.Rel.BasePath, a.Rel.TargPath)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Base:
filepath.Base(a.Base.Path)
case *NgoloFuzzOne_Dir:
filepath.Dir(a.Dir.Path)
case *NgoloFuzzOne_VolumeName:
filepath.VolumeName(a.VolumeName.Path)
case *NgoloFuzzOne_HasPrefix:
filepath.HasPrefix(a.HasPrefix.P, a.HasPrefix.Prefix)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Match:
w.WriteString(fmt.Sprintf("filepath.Match(%#+v, %#+v)\n", a.Match.Pattern, a.Match.Name))
case *NgoloFuzzOne_Clean:
w.WriteString(fmt.Sprintf("filepath.Clean(%#+v)\n", a.Clean.Path))
case *NgoloFuzzOne_IsLocal:
w.WriteString(fmt.Sprintf("filepath.IsLocal(%#+v)\n", a.IsLocal.Path))
case *NgoloFuzzOne_Localize:
w.WriteString(fmt.Sprintf("filepath.Localize(%#+v)\n", a.Localize.Path))
case *NgoloFuzzOne_ToSlash:
w.WriteString(fmt.Sprintf("filepath.ToSlash(%#+v)\n", a.ToSlash.Path))
case *NgoloFuzzOne_FromSlash:
w.WriteString(fmt.Sprintf("filepath.FromSlash(%#+v)\n", a.FromSlash.Path))
case *NgoloFuzzOne_SplitList:
w.WriteString(fmt.Sprintf("filepath.SplitList(%#+v)\n", a.SplitList.Path))
case *NgoloFuzzOne_Split:
w.WriteString(fmt.Sprintf("filepath.Split(%#+v)\n", a.Split.Path))
case *NgoloFuzzOne_Ext:
w.WriteString(fmt.Sprintf("filepath.Ext(%#+v)\n", a.Ext.Path))
case *NgoloFuzzOne_EvalSymlinks:
w.WriteString(fmt.Sprintf("filepath.EvalSymlinks(%#+v)\n", a.EvalSymlinks.Path))
case *NgoloFuzzOne_IsAbs:
w.WriteString(fmt.Sprintf("filepath.IsAbs(%#+v)\n", a.IsAbs.Path))
case *NgoloFuzzOne_Abs:
w.WriteString(fmt.Sprintf("filepath.Abs(%#+v)\n", a.Abs.Path))
case *NgoloFuzzOne_Rel:
w.WriteString(fmt.Sprintf("filepath.Rel(%#+v, %#+v)\n", a.Rel.BasePath, a.Rel.TargPath))
case *NgoloFuzzOne_Base:
w.WriteString(fmt.Sprintf("filepath.Base(%#+v)\n", a.Base.Path))
case *NgoloFuzzOne_Dir:
w.WriteString(fmt.Sprintf("filepath.Dir(%#+v)\n", a.Dir.Path))
case *NgoloFuzzOne_VolumeName:
w.WriteString(fmt.Sprintf("filepath.VolumeName(%#+v)\n", a.VolumeName.Path))
case *NgoloFuzzOne_HasPrefix:
w.WriteString(fmt.Sprintf("filepath.HasPrefix(%#+v, %#+v)\n", a.HasPrefix.P, a.HasPrefix.Prefix))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_path_filepath
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type MatchArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Pattern string `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MatchArgs) Reset() {
*x = MatchArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MatchArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MatchArgs) ProtoMessage() {}
func (x *MatchArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MatchArgs.ProtoReflect.Descriptor instead.
func (*MatchArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *MatchArgs) GetPattern() string {
if x != nil {
return x.Pattern
}
return ""
}
func (x *MatchArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type CleanArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CleanArgs) Reset() {
*x = CleanArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CleanArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CleanArgs) ProtoMessage() {}
func (x *CleanArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CleanArgs.ProtoReflect.Descriptor instead.
func (*CleanArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *CleanArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type IsLocalArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsLocalArgs) Reset() {
*x = IsLocalArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsLocalArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsLocalArgs) ProtoMessage() {}
func (x *IsLocalArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsLocalArgs.ProtoReflect.Descriptor instead.
func (*IsLocalArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *IsLocalArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type LocalizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LocalizeArgs) Reset() {
*x = LocalizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LocalizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LocalizeArgs) ProtoMessage() {}
func (x *LocalizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LocalizeArgs.ProtoReflect.Descriptor instead.
func (*LocalizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *LocalizeArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type ToSlashArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToSlashArgs) Reset() {
*x = ToSlashArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToSlashArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToSlashArgs) ProtoMessage() {}
func (x *ToSlashArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToSlashArgs.ProtoReflect.Descriptor instead.
func (*ToSlashArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *ToSlashArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type FromSlashArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FromSlashArgs) Reset() {
*x = FromSlashArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FromSlashArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FromSlashArgs) ProtoMessage() {}
func (x *FromSlashArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FromSlashArgs.ProtoReflect.Descriptor instead.
func (*FromSlashArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *FromSlashArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type SplitListArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SplitListArgs) Reset() {
*x = SplitListArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SplitListArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SplitListArgs) ProtoMessage() {}
func (x *SplitListArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SplitListArgs.ProtoReflect.Descriptor instead.
func (*SplitListArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *SplitListArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type SplitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SplitArgs) Reset() {
*x = SplitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SplitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SplitArgs) ProtoMessage() {}
func (x *SplitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SplitArgs.ProtoReflect.Descriptor instead.
func (*SplitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *SplitArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type ExtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExtArgs) Reset() {
*x = ExtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExtArgs) ProtoMessage() {}
func (x *ExtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExtArgs.ProtoReflect.Descriptor instead.
func (*ExtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *ExtArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type EvalSymlinksArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EvalSymlinksArgs) Reset() {
*x = EvalSymlinksArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EvalSymlinksArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EvalSymlinksArgs) ProtoMessage() {}
func (x *EvalSymlinksArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EvalSymlinksArgs.ProtoReflect.Descriptor instead.
func (*EvalSymlinksArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *EvalSymlinksArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type IsAbsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsAbsArgs) Reset() {
*x = IsAbsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsAbsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsAbsArgs) ProtoMessage() {}
func (x *IsAbsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsAbsArgs.ProtoReflect.Descriptor instead.
func (*IsAbsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *IsAbsArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type AbsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AbsArgs) Reset() {
*x = AbsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AbsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AbsArgs) ProtoMessage() {}
func (x *AbsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AbsArgs.ProtoReflect.Descriptor instead.
func (*AbsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *AbsArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type RelArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
BasePath string `protobuf:"bytes,1,opt,name=basePath,proto3" json:"basePath,omitempty"`
TargPath string `protobuf:"bytes,2,opt,name=targPath,proto3" json:"targPath,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RelArgs) Reset() {
*x = RelArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RelArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RelArgs) ProtoMessage() {}
func (x *RelArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RelArgs.ProtoReflect.Descriptor instead.
func (*RelArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *RelArgs) GetBasePath() string {
if x != nil {
return x.BasePath
}
return ""
}
func (x *RelArgs) GetTargPath() string {
if x != nil {
return x.TargPath
}
return ""
}
type BaseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BaseArgs) Reset() {
*x = BaseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BaseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BaseArgs) ProtoMessage() {}
func (x *BaseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BaseArgs.ProtoReflect.Descriptor instead.
func (*BaseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *BaseArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type DirArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DirArgs) Reset() {
*x = DirArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DirArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DirArgs) ProtoMessage() {}
func (x *DirArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DirArgs.ProtoReflect.Descriptor instead.
func (*DirArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *DirArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type VolumeNameArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *VolumeNameArgs) Reset() {
*x = VolumeNameArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *VolumeNameArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*VolumeNameArgs) ProtoMessage() {}
func (x *VolumeNameArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use VolumeNameArgs.ProtoReflect.Descriptor instead.
func (*VolumeNameArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *VolumeNameArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type HasPrefixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P string `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HasPrefixArgs) Reset() {
*x = HasPrefixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HasPrefixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HasPrefixArgs) ProtoMessage() {}
func (x *HasPrefixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HasPrefixArgs.ProtoReflect.Descriptor instead.
func (*HasPrefixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *HasPrefixArgs) GetP() string {
if x != nil {
return x.P
}
return ""
}
func (x *HasPrefixArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Match
// *NgoloFuzzOne_Clean
// *NgoloFuzzOne_IsLocal
// *NgoloFuzzOne_Localize
// *NgoloFuzzOne_ToSlash
// *NgoloFuzzOne_FromSlash
// *NgoloFuzzOne_SplitList
// *NgoloFuzzOne_Split
// *NgoloFuzzOne_Ext
// *NgoloFuzzOne_EvalSymlinks
// *NgoloFuzzOne_IsAbs
// *NgoloFuzzOne_Abs
// *NgoloFuzzOne_Rel
// *NgoloFuzzOne_Base
// *NgoloFuzzOne_Dir
// *NgoloFuzzOne_VolumeName
// *NgoloFuzzOne_HasPrefix
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetMatch() *MatchArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Match); ok {
return x.Match
}
}
return nil
}
func (x *NgoloFuzzOne) GetClean() *CleanArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Clean); ok {
return x.Clean
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsLocal() *IsLocalArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsLocal); ok {
return x.IsLocal
}
}
return nil
}
func (x *NgoloFuzzOne) GetLocalize() *LocalizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Localize); ok {
return x.Localize
}
}
return nil
}
func (x *NgoloFuzzOne) GetToSlash() *ToSlashArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ToSlash); ok {
return x.ToSlash
}
}
return nil
}
func (x *NgoloFuzzOne) GetFromSlash() *FromSlashArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FromSlash); ok {
return x.FromSlash
}
}
return nil
}
func (x *NgoloFuzzOne) GetSplitList() *SplitListArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SplitList); ok {
return x.SplitList
}
}
return nil
}
func (x *NgoloFuzzOne) GetSplit() *SplitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Split); ok {
return x.Split
}
}
return nil
}
func (x *NgoloFuzzOne) GetExt() *ExtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Ext); ok {
return x.Ext
}
}
return nil
}
func (x *NgoloFuzzOne) GetEvalSymlinks() *EvalSymlinksArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EvalSymlinks); ok {
return x.EvalSymlinks
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsAbs() *IsAbsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsAbs); ok {
return x.IsAbs
}
}
return nil
}
func (x *NgoloFuzzOne) GetAbs() *AbsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Abs); ok {
return x.Abs
}
}
return nil
}
func (x *NgoloFuzzOne) GetRel() *RelArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Rel); ok {
return x.Rel
}
}
return nil
}
func (x *NgoloFuzzOne) GetBase() *BaseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Base); ok {
return x.Base
}
}
return nil
}
func (x *NgoloFuzzOne) GetDir() *DirArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Dir); ok {
return x.Dir
}
}
return nil
}
func (x *NgoloFuzzOne) GetVolumeName() *VolumeNameArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_VolumeName); ok {
return x.VolumeName
}
}
return nil
}
func (x *NgoloFuzzOne) GetHasPrefix() *HasPrefixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_HasPrefix); ok {
return x.HasPrefix
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Match struct {
Match *MatchArgs `protobuf:"bytes,1,opt,name=Match,proto3,oneof"`
}
type NgoloFuzzOne_Clean struct {
Clean *CleanArgs `protobuf:"bytes,2,opt,name=Clean,proto3,oneof"`
}
type NgoloFuzzOne_IsLocal struct {
IsLocal *IsLocalArgs `protobuf:"bytes,3,opt,name=IsLocal,proto3,oneof"`
}
type NgoloFuzzOne_Localize struct {
Localize *LocalizeArgs `protobuf:"bytes,4,opt,name=Localize,proto3,oneof"`
}
type NgoloFuzzOne_ToSlash struct {
ToSlash *ToSlashArgs `protobuf:"bytes,5,opt,name=ToSlash,proto3,oneof"`
}
type NgoloFuzzOne_FromSlash struct {
FromSlash *FromSlashArgs `protobuf:"bytes,6,opt,name=FromSlash,proto3,oneof"`
}
type NgoloFuzzOne_SplitList struct {
SplitList *SplitListArgs `protobuf:"bytes,7,opt,name=SplitList,proto3,oneof"`
}
type NgoloFuzzOne_Split struct {
Split *SplitArgs `protobuf:"bytes,8,opt,name=Split,proto3,oneof"`
}
type NgoloFuzzOne_Ext struct {
Ext *ExtArgs `protobuf:"bytes,9,opt,name=Ext,proto3,oneof"`
}
type NgoloFuzzOne_EvalSymlinks struct {
EvalSymlinks *EvalSymlinksArgs `protobuf:"bytes,10,opt,name=EvalSymlinks,proto3,oneof"`
}
type NgoloFuzzOne_IsAbs struct {
IsAbs *IsAbsArgs `protobuf:"bytes,11,opt,name=IsAbs,proto3,oneof"`
}
type NgoloFuzzOne_Abs struct {
Abs *AbsArgs `protobuf:"bytes,12,opt,name=Abs,proto3,oneof"`
}
type NgoloFuzzOne_Rel struct {
Rel *RelArgs `protobuf:"bytes,13,opt,name=Rel,proto3,oneof"`
}
type NgoloFuzzOne_Base struct {
Base *BaseArgs `protobuf:"bytes,14,opt,name=Base,proto3,oneof"`
}
type NgoloFuzzOne_Dir struct {
Dir *DirArgs `protobuf:"bytes,15,opt,name=Dir,proto3,oneof"`
}
type NgoloFuzzOne_VolumeName struct {
VolumeName *VolumeNameArgs `protobuf:"bytes,16,opt,name=VolumeName,proto3,oneof"`
}
type NgoloFuzzOne_HasPrefix struct {
HasPrefix *HasPrefixArgs `protobuf:"bytes,17,opt,name=HasPrefix,proto3,oneof"`
}
func (*NgoloFuzzOne_Match) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Clean) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsLocal) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Localize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ToSlash) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FromSlash) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SplitList) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Split) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Ext) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EvalSymlinks) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsAbs) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Abs) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Rel) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Base) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Dir) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_VolumeName) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_HasPrefix) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"9\n" +
"\tMatchArgs\x12\x18\n" +
"\apattern\x18\x01 \x01(\tR\apattern\x12\x12\n" +
"\x04name\x18\x02 \x01(\tR\x04name\"\x1f\n" +
"\tCleanArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"!\n" +
"\vIsLocalArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"\"\n" +
"\fLocalizeArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"!\n" +
"\vToSlashArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"#\n" +
"\rFromSlashArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"#\n" +
"\rSplitListArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"\x1f\n" +
"\tSplitArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"\x1d\n" +
"\aExtArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"&\n" +
"\x10EvalSymlinksArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"\x1f\n" +
"\tIsAbsArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"\x1d\n" +
"\aAbsArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"A\n" +
"\aRelArgs\x12\x1a\n" +
"\bbasePath\x18\x01 \x01(\tR\bbasePath\x12\x1a\n" +
"\btargPath\x18\x02 \x01(\tR\btargPath\"\x1e\n" +
"\bBaseArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"\x1d\n" +
"\aDirArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"$\n" +
"\x0eVolumeNameArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"5\n" +
"\rHasPrefixArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\tR\x01p\x12\x16\n" +
"\x06prefix\x18\x02 \x01(\tR\x06prefix\"\xe6\x06\n" +
"\fNgoloFuzzOne\x12,\n" +
"\x05Match\x18\x01 \x01(\v2\x14.ngolofuzz.MatchArgsH\x00R\x05Match\x12,\n" +
"\x05Clean\x18\x02 \x01(\v2\x14.ngolofuzz.CleanArgsH\x00R\x05Clean\x122\n" +
"\aIsLocal\x18\x03 \x01(\v2\x16.ngolofuzz.IsLocalArgsH\x00R\aIsLocal\x125\n" +
"\bLocalize\x18\x04 \x01(\v2\x17.ngolofuzz.LocalizeArgsH\x00R\bLocalize\x122\n" +
"\aToSlash\x18\x05 \x01(\v2\x16.ngolofuzz.ToSlashArgsH\x00R\aToSlash\x128\n" +
"\tFromSlash\x18\x06 \x01(\v2\x18.ngolofuzz.FromSlashArgsH\x00R\tFromSlash\x128\n" +
"\tSplitList\x18\a \x01(\v2\x18.ngolofuzz.SplitListArgsH\x00R\tSplitList\x12,\n" +
"\x05Split\x18\b \x01(\v2\x14.ngolofuzz.SplitArgsH\x00R\x05Split\x12&\n" +
"\x03Ext\x18\t \x01(\v2\x12.ngolofuzz.ExtArgsH\x00R\x03Ext\x12A\n" +
"\fEvalSymlinks\x18\n" +
" \x01(\v2\x1b.ngolofuzz.EvalSymlinksArgsH\x00R\fEvalSymlinks\x12,\n" +
"\x05IsAbs\x18\v \x01(\v2\x14.ngolofuzz.IsAbsArgsH\x00R\x05IsAbs\x12&\n" +
"\x03Abs\x18\f \x01(\v2\x12.ngolofuzz.AbsArgsH\x00R\x03Abs\x12&\n" +
"\x03Rel\x18\r \x01(\v2\x12.ngolofuzz.RelArgsH\x00R\x03Rel\x12)\n" +
"\x04Base\x18\x0e \x01(\v2\x13.ngolofuzz.BaseArgsH\x00R\x04Base\x12&\n" +
"\x03Dir\x18\x0f \x01(\v2\x12.ngolofuzz.DirArgsH\x00R\x03Dir\x12;\n" +
"\n" +
"VolumeName\x18\x10 \x01(\v2\x19.ngolofuzz.VolumeNameArgsH\x00R\n" +
"VolumeName\x128\n" +
"\tHasPrefix\x18\x11 \x01(\v2\x18.ngolofuzz.HasPrefixArgsH\x00R\tHasPrefixB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_path_filepathb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 20)
var file_ngolofuzz_proto_goTypes = []any{
(*MatchArgs)(nil), // 0: ngolofuzz.MatchArgs
(*CleanArgs)(nil), // 1: ngolofuzz.CleanArgs
(*IsLocalArgs)(nil), // 2: ngolofuzz.IsLocalArgs
(*LocalizeArgs)(nil), // 3: ngolofuzz.LocalizeArgs
(*ToSlashArgs)(nil), // 4: ngolofuzz.ToSlashArgs
(*FromSlashArgs)(nil), // 5: ngolofuzz.FromSlashArgs
(*SplitListArgs)(nil), // 6: ngolofuzz.SplitListArgs
(*SplitArgs)(nil), // 7: ngolofuzz.SplitArgs
(*ExtArgs)(nil), // 8: ngolofuzz.ExtArgs
(*EvalSymlinksArgs)(nil), // 9: ngolofuzz.EvalSymlinksArgs
(*IsAbsArgs)(nil), // 10: ngolofuzz.IsAbsArgs
(*AbsArgs)(nil), // 11: ngolofuzz.AbsArgs
(*RelArgs)(nil), // 12: ngolofuzz.RelArgs
(*BaseArgs)(nil), // 13: ngolofuzz.BaseArgs
(*DirArgs)(nil), // 14: ngolofuzz.DirArgs
(*VolumeNameArgs)(nil), // 15: ngolofuzz.VolumeNameArgs
(*HasPrefixArgs)(nil), // 16: ngolofuzz.HasPrefixArgs
(*NgoloFuzzOne)(nil), // 17: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 18: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 19: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Match:type_name -> ngolofuzz.MatchArgs
1, // 1: ngolofuzz.NgoloFuzzOne.Clean:type_name -> ngolofuzz.CleanArgs
2, // 2: ngolofuzz.NgoloFuzzOne.IsLocal:type_name -> ngolofuzz.IsLocalArgs
3, // 3: ngolofuzz.NgoloFuzzOne.Localize:type_name -> ngolofuzz.LocalizeArgs
4, // 4: ngolofuzz.NgoloFuzzOne.ToSlash:type_name -> ngolofuzz.ToSlashArgs
5, // 5: ngolofuzz.NgoloFuzzOne.FromSlash:type_name -> ngolofuzz.FromSlashArgs
6, // 6: ngolofuzz.NgoloFuzzOne.SplitList:type_name -> ngolofuzz.SplitListArgs
7, // 7: ngolofuzz.NgoloFuzzOne.Split:type_name -> ngolofuzz.SplitArgs
8, // 8: ngolofuzz.NgoloFuzzOne.Ext:type_name -> ngolofuzz.ExtArgs
9, // 9: ngolofuzz.NgoloFuzzOne.EvalSymlinks:type_name -> ngolofuzz.EvalSymlinksArgs
10, // 10: ngolofuzz.NgoloFuzzOne.IsAbs:type_name -> ngolofuzz.IsAbsArgs
11, // 11: ngolofuzz.NgoloFuzzOne.Abs:type_name -> ngolofuzz.AbsArgs
12, // 12: ngolofuzz.NgoloFuzzOne.Rel:type_name -> ngolofuzz.RelArgs
13, // 13: ngolofuzz.NgoloFuzzOne.Base:type_name -> ngolofuzz.BaseArgs
14, // 14: ngolofuzz.NgoloFuzzOne.Dir:type_name -> ngolofuzz.DirArgs
15, // 15: ngolofuzz.NgoloFuzzOne.VolumeName:type_name -> ngolofuzz.VolumeNameArgs
16, // 16: ngolofuzz.NgoloFuzzOne.HasPrefix:type_name -> ngolofuzz.HasPrefixArgs
17, // 17: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
18, // [18:18] is the sub-list for method output_type
18, // [18:18] is the sub-list for method input_type
18, // [18:18] is the sub-list for extension type_name
18, // [18:18] is the sub-list for extension extendee
0, // [0:18] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[17].OneofWrappers = []any{
(*NgoloFuzzOne_Match)(nil),
(*NgoloFuzzOne_Clean)(nil),
(*NgoloFuzzOne_IsLocal)(nil),
(*NgoloFuzzOne_Localize)(nil),
(*NgoloFuzzOne_ToSlash)(nil),
(*NgoloFuzzOne_FromSlash)(nil),
(*NgoloFuzzOne_SplitList)(nil),
(*NgoloFuzzOne_Split)(nil),
(*NgoloFuzzOne_Ext)(nil),
(*NgoloFuzzOne_EvalSymlinks)(nil),
(*NgoloFuzzOne_IsAbs)(nil),
(*NgoloFuzzOne_Abs)(nil),
(*NgoloFuzzOne_Rel)(nil),
(*NgoloFuzzOne_Base)(nil),
(*NgoloFuzzOne_Dir)(nil),
(*NgoloFuzzOne_VolumeName)(nil),
(*NgoloFuzzOne_HasPrefix)(nil),
}
file_ngolofuzz_proto_msgTypes[18].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 20,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_plugin
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"plugin"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var PluginResults []*plugin.Plugin
PluginResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Open:
r0, r1 := plugin.Open(a.Open.Path)
if r0 != nil{
PluginResults = append(PluginResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_PluginNgdotLookup:
if len(PluginResults) == 0 {
continue
}
arg0 := PluginResults[PluginResultsIndex]
PluginResultsIndex = (PluginResultsIndex + 1) % len(PluginResults)
_, r1 := arg0.Lookup(a.PluginNgdotLookup.SymName)
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
PluginNb := 0
PluginResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Open:
w.WriteString(fmt.Sprintf("Plugin%d, _ := plugin.Open(%#+v)\n", PluginNb, a.Open.Path))
PluginNb = PluginNb + 1
case *NgoloFuzzOne_PluginNgdotLookup:
if PluginNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Plugin%d.Lookup(%#+v)\n", PluginResultsIndex, a.PluginNgdotLookup.SymName))
PluginResultsIndex = (PluginResultsIndex + 1) % PluginNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_plugin
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type OpenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OpenArgs) Reset() {
*x = OpenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OpenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OpenArgs) ProtoMessage() {}
func (x *OpenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OpenArgs.ProtoReflect.Descriptor instead.
func (*OpenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *OpenArgs) GetPath() string {
if x != nil {
return x.Path
}
return ""
}
type PluginNgdotLookupArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
SymName string `protobuf:"bytes,1,opt,name=symName,proto3" json:"symName,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PluginNgdotLookupArgs) Reset() {
*x = PluginNgdotLookupArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PluginNgdotLookupArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PluginNgdotLookupArgs) ProtoMessage() {}
func (x *PluginNgdotLookupArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PluginNgdotLookupArgs.ProtoReflect.Descriptor instead.
func (*PluginNgdotLookupArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *PluginNgdotLookupArgs) GetSymName() string {
if x != nil {
return x.SymName
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Open
// *NgoloFuzzOne_PluginNgdotLookup
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetOpen() *OpenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Open); ok {
return x.Open
}
}
return nil
}
func (x *NgoloFuzzOne) GetPluginNgdotLookup() *PluginNgdotLookupArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PluginNgdotLookup); ok {
return x.PluginNgdotLookup
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Open struct {
Open *OpenArgs `protobuf:"bytes,1,opt,name=Open,proto3,oneof"`
}
type NgoloFuzzOne_PluginNgdotLookup struct {
PluginNgdotLookup *PluginNgdotLookupArgs `protobuf:"bytes,2,opt,name=PluginNgdotLookup,proto3,oneof"`
}
func (*NgoloFuzzOne_Open) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PluginNgdotLookup) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1e\n" +
"\bOpenArgs\x12\x12\n" +
"\x04path\x18\x01 \x01(\tR\x04path\"1\n" +
"\x15PluginNgdotLookupArgs\x12\x18\n" +
"\asymName\x18\x01 \x01(\tR\asymName\"\x93\x01\n" +
"\fNgoloFuzzOne\x12)\n" +
"\x04Open\x18\x01 \x01(\v2\x13.ngolofuzz.OpenArgsH\x00R\x04Open\x12P\n" +
"\x11PluginNgdotLookup\x18\x02 \x01(\v2 .ngolofuzz.PluginNgdotLookupArgsH\x00R\x11PluginNgdotLookupB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x13Z\x11./;fuzz_ng_pluginb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_ngolofuzz_proto_goTypes = []any{
(*OpenArgs)(nil), // 0: ngolofuzz.OpenArgs
(*PluginNgdotLookupArgs)(nil), // 1: ngolofuzz.PluginNgdotLookupArgs
(*NgoloFuzzOne)(nil), // 2: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 3: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 4: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Open:type_name -> ngolofuzz.OpenArgs
1, // 1: ngolofuzz.NgoloFuzzOne.PluginNgdotLookup:type_name -> ngolofuzz.PluginNgdotLookupArgs
2, // 2: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[2].OneofWrappers = []any{
(*NgoloFuzzOne_Open)(nil),
(*NgoloFuzzOne_PluginNgdotLookup)(nil),
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_regexp
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"regexp"
"runtime"
"strings"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var RegexpResults []*regexp.Regexp
RegexpResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_RegexpNgdotString:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.String()
case *NgoloFuzzOne_RegexpNgdotCopy:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
r0 := arg0.Copy()
if r0 != nil{
RegexpResults = append(RegexpResults, r0)
}
case *NgoloFuzzOne_Compile:
r0, r1 := regexp.Compile(a.Compile.Expr)
if r0 != nil{
RegexpResults = append(RegexpResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_CompilePOSIX:
r0, r1 := regexp.CompilePOSIX(a.CompilePOSIX.Expr)
if r0 != nil{
RegexpResults = append(RegexpResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_RegexpNgdotLongest:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.Longest()
case *NgoloFuzzOne_RegexpNgdotNumSubexp:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.NumSubexp()
case *NgoloFuzzOne_RegexpNgdotSubexpNames:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.SubexpNames()
case *NgoloFuzzOne_RegexpNgdotSubexpIndex:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.SubexpIndex(a.RegexpNgdotSubexpIndex.Name)
case *NgoloFuzzOne_RegexpNgdotLiteralPrefix:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.LiteralPrefix()
case *NgoloFuzzOne_RegexpNgdotMatchReader:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg1 := strings.NewReader(a.RegexpNgdotMatchReader.R)
arg0.MatchReader(arg1)
case *NgoloFuzzOne_RegexpNgdotMatchString:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.MatchString(a.RegexpNgdotMatchString.S)
case *NgoloFuzzOne_RegexpNgdotMatch:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.Match(a.RegexpNgdotMatch.B)
case *NgoloFuzzOne_MatchReader:
arg1 := strings.NewReader(a.MatchReader.R)
_, r1 := regexp.MatchReader(a.MatchReader.Pattern, arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_MatchString:
_, r1 := regexp.MatchString(a.MatchString.Pattern, a.MatchString.S)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Match:
_, r1 := regexp.Match(a.Match.Pattern, a.Match.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_QuoteMeta:
regexp.QuoteMeta(a.QuoteMeta.S)
case *NgoloFuzzOne_RegexpNgdotFind:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.Find(a.RegexpNgdotFind.B)
case *NgoloFuzzOne_RegexpNgdotFindIndex:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.FindIndex(a.RegexpNgdotFindIndex.B)
case *NgoloFuzzOne_RegexpNgdotFindString:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.FindString(a.RegexpNgdotFindString.S)
case *NgoloFuzzOne_RegexpNgdotFindStringIndex:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.FindStringIndex(a.RegexpNgdotFindStringIndex.S)
case *NgoloFuzzOne_RegexpNgdotFindReaderIndex:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg1 := strings.NewReader(a.RegexpNgdotFindReaderIndex.R)
arg0.FindReaderIndex(arg1)
case *NgoloFuzzOne_RegexpNgdotFindSubmatch:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.FindSubmatch(a.RegexpNgdotFindSubmatch.B)
case *NgoloFuzzOne_RegexpNgdotFindSubmatchIndex:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.FindSubmatchIndex(a.RegexpNgdotFindSubmatchIndex.B)
case *NgoloFuzzOne_RegexpNgdotFindStringSubmatch:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.FindStringSubmatch(a.RegexpNgdotFindStringSubmatch.S)
case *NgoloFuzzOne_RegexpNgdotFindStringSubmatchIndex:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.FindStringSubmatchIndex(a.RegexpNgdotFindStringSubmatchIndex.S)
case *NgoloFuzzOne_RegexpNgdotFindReaderSubmatchIndex:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg1 := strings.NewReader(a.RegexpNgdotFindReaderSubmatchIndex.R)
arg0.FindReaderSubmatchIndex(arg1)
case *NgoloFuzzOne_RegexpNgdotFindAll:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg2 := int(a.RegexpNgdotFindAll.N)
arg0.FindAll(a.RegexpNgdotFindAll.B, arg2)
case *NgoloFuzzOne_RegexpNgdotFindAllIndex:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg2 := int(a.RegexpNgdotFindAllIndex.N)
arg0.FindAllIndex(a.RegexpNgdotFindAllIndex.B, arg2)
case *NgoloFuzzOne_RegexpNgdotFindAllSubmatch:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg2 := int(a.RegexpNgdotFindAllSubmatch.N)
arg0.FindAllSubmatch(a.RegexpNgdotFindAllSubmatch.B, arg2)
case *NgoloFuzzOne_RegexpNgdotFindAllSubmatchIndex:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg2 := int(a.RegexpNgdotFindAllSubmatchIndex.N)
arg0.FindAllSubmatchIndex(a.RegexpNgdotFindAllSubmatchIndex.B, arg2)
case *NgoloFuzzOne_RegexpNgdotSplit:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg2 := int(a.RegexpNgdotSplit.N)
arg0.Split(a.RegexpNgdotSplit.S, arg2 % 0x10001)
case *NgoloFuzzOne_RegexpNgdotAppendText:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
_, r1 := arg0.AppendText(a.RegexpNgdotAppendText.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_RegexpNgdotMarshalText:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
_, r1 := arg0.MarshalText()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_RegexpNgdotUnmarshalText:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
r0 := arg0.UnmarshalText(a.RegexpNgdotUnmarshalText.Text)
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
RegexpNb := 0
RegexpResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_RegexpNgdotString:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.String()\n", RegexpResultsIndex))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotCopy:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d := Regexp%d.Copy()\n", RegexpNb, RegexpResultsIndex))
RegexpNb = RegexpNb + 1
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_Compile:
w.WriteString(fmt.Sprintf("Regexp%d, _ := regexp.Compile(%#+v)\n", RegexpNb, a.Compile.Expr))
RegexpNb = RegexpNb + 1
case *NgoloFuzzOne_CompilePOSIX:
w.WriteString(fmt.Sprintf("Regexp%d, _ := regexp.CompilePOSIX(%#+v)\n", RegexpNb, a.CompilePOSIX.Expr))
RegexpNb = RegexpNb + 1
case *NgoloFuzzOne_RegexpNgdotLongest:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.Longest()\n", RegexpResultsIndex))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotNumSubexp:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.NumSubexp()\n", RegexpResultsIndex))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotSubexpNames:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.SubexpNames()\n", RegexpResultsIndex))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotSubexpIndex:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.SubexpIndex(%#+v)\n", RegexpResultsIndex, a.RegexpNgdotSubexpIndex.Name))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotLiteralPrefix:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.LiteralPrefix()\n", RegexpResultsIndex))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotMatchReader:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.MatchReader(strings.NewReader(%#+v))\n", RegexpResultsIndex, a.RegexpNgdotMatchReader.R))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotMatchString:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.MatchString(%#+v)\n", RegexpResultsIndex, a.RegexpNgdotMatchString.S))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotMatch:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.Match(%#+v)\n", RegexpResultsIndex, a.RegexpNgdotMatch.B))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_MatchReader:
w.WriteString(fmt.Sprintf("regexp.MatchReader(%#+v, strings.NewReader(%#+v))\n", a.MatchReader.Pattern, a.MatchReader.R))
case *NgoloFuzzOne_MatchString:
w.WriteString(fmt.Sprintf("regexp.MatchString(%#+v, %#+v)\n", a.MatchString.Pattern, a.MatchString.S))
case *NgoloFuzzOne_Match:
w.WriteString(fmt.Sprintf("regexp.Match(%#+v, %#+v)\n", a.Match.Pattern, a.Match.B))
case *NgoloFuzzOne_QuoteMeta:
w.WriteString(fmt.Sprintf("regexp.QuoteMeta(%#+v)\n", a.QuoteMeta.S))
case *NgoloFuzzOne_RegexpNgdotFind:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.Find(%#+v)\n", RegexpResultsIndex, a.RegexpNgdotFind.B))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotFindIndex:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.FindIndex(%#+v)\n", RegexpResultsIndex, a.RegexpNgdotFindIndex.B))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotFindString:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.FindString(%#+v)\n", RegexpResultsIndex, a.RegexpNgdotFindString.S))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotFindStringIndex:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.FindStringIndex(%#+v)\n", RegexpResultsIndex, a.RegexpNgdotFindStringIndex.S))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotFindReaderIndex:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.FindReaderIndex(strings.NewReader(%#+v))\n", RegexpResultsIndex, a.RegexpNgdotFindReaderIndex.R))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotFindSubmatch:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.FindSubmatch(%#+v)\n", RegexpResultsIndex, a.RegexpNgdotFindSubmatch.B))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotFindSubmatchIndex:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.FindSubmatchIndex(%#+v)\n", RegexpResultsIndex, a.RegexpNgdotFindSubmatchIndex.B))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotFindStringSubmatch:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.FindStringSubmatch(%#+v)\n", RegexpResultsIndex, a.RegexpNgdotFindStringSubmatch.S))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotFindStringSubmatchIndex:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.FindStringSubmatchIndex(%#+v)\n", RegexpResultsIndex, a.RegexpNgdotFindStringSubmatchIndex.S))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotFindReaderSubmatchIndex:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.FindReaderSubmatchIndex(strings.NewReader(%#+v))\n", RegexpResultsIndex, a.RegexpNgdotFindReaderSubmatchIndex.R))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotFindAll:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.FindAll(%#+v, int(%#+v))\n", RegexpResultsIndex, a.RegexpNgdotFindAll.B, a.RegexpNgdotFindAll.N))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotFindAllIndex:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.FindAllIndex(%#+v, int(%#+v))\n", RegexpResultsIndex, a.RegexpNgdotFindAllIndex.B, a.RegexpNgdotFindAllIndex.N))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotFindAllSubmatch:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.FindAllSubmatch(%#+v, int(%#+v))\n", RegexpResultsIndex, a.RegexpNgdotFindAllSubmatch.B, a.RegexpNgdotFindAllSubmatch.N))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotFindAllSubmatchIndex:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.FindAllSubmatchIndex(%#+v, int(%#+v))\n", RegexpResultsIndex, a.RegexpNgdotFindAllSubmatchIndex.B, a.RegexpNgdotFindAllSubmatchIndex.N))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotSplit:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.Split(%#+v, int(%#+v) %% 0x10001)\n", RegexpResultsIndex, a.RegexpNgdotSplit.S, a.RegexpNgdotSplit.N))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotAppendText:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.AppendText(%#+v)\n", RegexpResultsIndex, a.RegexpNgdotAppendText.B))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotMarshalText:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.MarshalText()\n", RegexpResultsIndex))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotUnmarshalText:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.UnmarshalText(%#+v)\n", RegexpResultsIndex, a.RegexpNgdotUnmarshalText.Text))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_regexp
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type RegexpNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotStringArgs) Reset() {
*x = RegexpNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotStringArgs) ProtoMessage() {}
func (x *RegexpNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type RegexpNgdotCopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotCopyArgs) Reset() {
*x = RegexpNgdotCopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotCopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotCopyArgs) ProtoMessage() {}
func (x *RegexpNgdotCopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotCopyArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotCopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type CompileArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Expr string `protobuf:"bytes,1,opt,name=expr,proto3" json:"expr,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CompileArgs) Reset() {
*x = CompileArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CompileArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CompileArgs) ProtoMessage() {}
func (x *CompileArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CompileArgs.ProtoReflect.Descriptor instead.
func (*CompileArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *CompileArgs) GetExpr() string {
if x != nil {
return x.Expr
}
return ""
}
type CompilePOSIXArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Expr string `protobuf:"bytes,1,opt,name=expr,proto3" json:"expr,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CompilePOSIXArgs) Reset() {
*x = CompilePOSIXArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CompilePOSIXArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CompilePOSIXArgs) ProtoMessage() {}
func (x *CompilePOSIXArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CompilePOSIXArgs.ProtoReflect.Descriptor instead.
func (*CompilePOSIXArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *CompilePOSIXArgs) GetExpr() string {
if x != nil {
return x.Expr
}
return ""
}
type RegexpNgdotLongestArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotLongestArgs) Reset() {
*x = RegexpNgdotLongestArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotLongestArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotLongestArgs) ProtoMessage() {}
func (x *RegexpNgdotLongestArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotLongestArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotLongestArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type RegexpNgdotNumSubexpArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotNumSubexpArgs) Reset() {
*x = RegexpNgdotNumSubexpArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotNumSubexpArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotNumSubexpArgs) ProtoMessage() {}
func (x *RegexpNgdotNumSubexpArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotNumSubexpArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotNumSubexpArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type RegexpNgdotSubexpNamesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotSubexpNamesArgs) Reset() {
*x = RegexpNgdotSubexpNamesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotSubexpNamesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotSubexpNamesArgs) ProtoMessage() {}
func (x *RegexpNgdotSubexpNamesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotSubexpNamesArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotSubexpNamesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type RegexpNgdotSubexpIndexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotSubexpIndexArgs) Reset() {
*x = RegexpNgdotSubexpIndexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotSubexpIndexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotSubexpIndexArgs) ProtoMessage() {}
func (x *RegexpNgdotSubexpIndexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotSubexpIndexArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotSubexpIndexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *RegexpNgdotSubexpIndexArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type RegexpNgdotLiteralPrefixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotLiteralPrefixArgs) Reset() {
*x = RegexpNgdotLiteralPrefixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotLiteralPrefixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotLiteralPrefixArgs) ProtoMessage() {}
func (x *RegexpNgdotLiteralPrefixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotLiteralPrefixArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotLiteralPrefixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type RegexpNgdotMatchReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotMatchReaderArgs) Reset() {
*x = RegexpNgdotMatchReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotMatchReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotMatchReaderArgs) ProtoMessage() {}
func (x *RegexpNgdotMatchReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotMatchReaderArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotMatchReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *RegexpNgdotMatchReaderArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type RegexpNgdotMatchStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotMatchStringArgs) Reset() {
*x = RegexpNgdotMatchStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotMatchStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotMatchStringArgs) ProtoMessage() {}
func (x *RegexpNgdotMatchStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotMatchStringArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotMatchStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *RegexpNgdotMatchStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type RegexpNgdotMatchArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotMatchArgs) Reset() {
*x = RegexpNgdotMatchArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotMatchArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotMatchArgs) ProtoMessage() {}
func (x *RegexpNgdotMatchArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotMatchArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotMatchArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *RegexpNgdotMatchArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type MatchReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Pattern string `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"`
R string `protobuf:"bytes,2,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MatchReaderArgs) Reset() {
*x = MatchReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MatchReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MatchReaderArgs) ProtoMessage() {}
func (x *MatchReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MatchReaderArgs.ProtoReflect.Descriptor instead.
func (*MatchReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *MatchReaderArgs) GetPattern() string {
if x != nil {
return x.Pattern
}
return ""
}
func (x *MatchReaderArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type MatchStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Pattern string `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"`
S string `protobuf:"bytes,2,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MatchStringArgs) Reset() {
*x = MatchStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MatchStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MatchStringArgs) ProtoMessage() {}
func (x *MatchStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MatchStringArgs.ProtoReflect.Descriptor instead.
func (*MatchStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *MatchStringArgs) GetPattern() string {
if x != nil {
return x.Pattern
}
return ""
}
func (x *MatchStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type MatchArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Pattern string `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"`
B []byte `protobuf:"bytes,2,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MatchArgs) Reset() {
*x = MatchArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MatchArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MatchArgs) ProtoMessage() {}
func (x *MatchArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MatchArgs.ProtoReflect.Descriptor instead.
func (*MatchArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *MatchArgs) GetPattern() string {
if x != nil {
return x.Pattern
}
return ""
}
func (x *MatchArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type QuoteMetaArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *QuoteMetaArgs) Reset() {
*x = QuoteMetaArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *QuoteMetaArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*QuoteMetaArgs) ProtoMessage() {}
func (x *QuoteMetaArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use QuoteMetaArgs.ProtoReflect.Descriptor instead.
func (*QuoteMetaArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *QuoteMetaArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type RegexpNgdotFindArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindArgs) Reset() {
*x = RegexpNgdotFindArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindArgs) ProtoMessage() {}
func (x *RegexpNgdotFindArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *RegexpNgdotFindArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type RegexpNgdotFindIndexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindIndexArgs) Reset() {
*x = RegexpNgdotFindIndexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindIndexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindIndexArgs) ProtoMessage() {}
func (x *RegexpNgdotFindIndexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindIndexArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindIndexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *RegexpNgdotFindIndexArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type RegexpNgdotFindStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindStringArgs) Reset() {
*x = RegexpNgdotFindStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindStringArgs) ProtoMessage() {}
func (x *RegexpNgdotFindStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindStringArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *RegexpNgdotFindStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type RegexpNgdotFindStringIndexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindStringIndexArgs) Reset() {
*x = RegexpNgdotFindStringIndexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindStringIndexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindStringIndexArgs) ProtoMessage() {}
func (x *RegexpNgdotFindStringIndexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindStringIndexArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindStringIndexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *RegexpNgdotFindStringIndexArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type RegexpNgdotFindReaderIndexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindReaderIndexArgs) Reset() {
*x = RegexpNgdotFindReaderIndexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindReaderIndexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindReaderIndexArgs) ProtoMessage() {}
func (x *RegexpNgdotFindReaderIndexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindReaderIndexArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindReaderIndexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *RegexpNgdotFindReaderIndexArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type RegexpNgdotFindSubmatchArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindSubmatchArgs) Reset() {
*x = RegexpNgdotFindSubmatchArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindSubmatchArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindSubmatchArgs) ProtoMessage() {}
func (x *RegexpNgdotFindSubmatchArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindSubmatchArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindSubmatchArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
func (x *RegexpNgdotFindSubmatchArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type RegexpNgdotFindSubmatchIndexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindSubmatchIndexArgs) Reset() {
*x = RegexpNgdotFindSubmatchIndexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindSubmatchIndexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindSubmatchIndexArgs) ProtoMessage() {}
func (x *RegexpNgdotFindSubmatchIndexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindSubmatchIndexArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindSubmatchIndexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
func (x *RegexpNgdotFindSubmatchIndexArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type RegexpNgdotFindStringSubmatchArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindStringSubmatchArgs) Reset() {
*x = RegexpNgdotFindStringSubmatchArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindStringSubmatchArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindStringSubmatchArgs) ProtoMessage() {}
func (x *RegexpNgdotFindStringSubmatchArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindStringSubmatchArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindStringSubmatchArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
func (x *RegexpNgdotFindStringSubmatchArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type RegexpNgdotFindStringSubmatchIndexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindStringSubmatchIndexArgs) Reset() {
*x = RegexpNgdotFindStringSubmatchIndexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindStringSubmatchIndexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindStringSubmatchIndexArgs) ProtoMessage() {}
func (x *RegexpNgdotFindStringSubmatchIndexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindStringSubmatchIndexArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindStringSubmatchIndexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
func (x *RegexpNgdotFindStringSubmatchIndexArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type RegexpNgdotFindReaderSubmatchIndexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindReaderSubmatchIndexArgs) Reset() {
*x = RegexpNgdotFindReaderSubmatchIndexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindReaderSubmatchIndexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindReaderSubmatchIndexArgs) ProtoMessage() {}
func (x *RegexpNgdotFindReaderSubmatchIndexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindReaderSubmatchIndexArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindReaderSubmatchIndexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
func (x *RegexpNgdotFindReaderSubmatchIndexArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type RegexpNgdotFindAllArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
N int64 `protobuf:"varint,2,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindAllArgs) Reset() {
*x = RegexpNgdotFindAllArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindAllArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindAllArgs) ProtoMessage() {}
func (x *RegexpNgdotFindAllArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindAllArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindAllArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
func (x *RegexpNgdotFindAllArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
func (x *RegexpNgdotFindAllArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type RegexpNgdotFindAllIndexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
N int64 `protobuf:"varint,2,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindAllIndexArgs) Reset() {
*x = RegexpNgdotFindAllIndexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindAllIndexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindAllIndexArgs) ProtoMessage() {}
func (x *RegexpNgdotFindAllIndexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindAllIndexArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindAllIndexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
func (x *RegexpNgdotFindAllIndexArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
func (x *RegexpNgdotFindAllIndexArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type RegexpNgdotFindAllSubmatchArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
N int64 `protobuf:"varint,2,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindAllSubmatchArgs) Reset() {
*x = RegexpNgdotFindAllSubmatchArgs{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindAllSubmatchArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindAllSubmatchArgs) ProtoMessage() {}
func (x *RegexpNgdotFindAllSubmatchArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindAllSubmatchArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindAllSubmatchArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
func (x *RegexpNgdotFindAllSubmatchArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
func (x *RegexpNgdotFindAllSubmatchArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type RegexpNgdotFindAllSubmatchIndexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
N int64 `protobuf:"varint,2,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotFindAllSubmatchIndexArgs) Reset() {
*x = RegexpNgdotFindAllSubmatchIndexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotFindAllSubmatchIndexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotFindAllSubmatchIndexArgs) ProtoMessage() {}
func (x *RegexpNgdotFindAllSubmatchIndexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotFindAllSubmatchIndexArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotFindAllSubmatchIndexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
func (x *RegexpNgdotFindAllSubmatchIndexArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
func (x *RegexpNgdotFindAllSubmatchIndexArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type RegexpNgdotSplitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
N int64 `protobuf:"varint,2,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotSplitArgs) Reset() {
*x = RegexpNgdotSplitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotSplitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotSplitArgs) ProtoMessage() {}
func (x *RegexpNgdotSplitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotSplitArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotSplitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{30}
}
func (x *RegexpNgdotSplitArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *RegexpNgdotSplitArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type RegexpNgdotAppendTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotAppendTextArgs) Reset() {
*x = RegexpNgdotAppendTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotAppendTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotAppendTextArgs) ProtoMessage() {}
func (x *RegexpNgdotAppendTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotAppendTextArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotAppendTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{31}
}
func (x *RegexpNgdotAppendTextArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type RegexpNgdotMarshalTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotMarshalTextArgs) Reset() {
*x = RegexpNgdotMarshalTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotMarshalTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotMarshalTextArgs) ProtoMessage() {}
func (x *RegexpNgdotMarshalTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotMarshalTextArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotMarshalTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{32}
}
type RegexpNgdotUnmarshalTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text []byte `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotUnmarshalTextArgs) Reset() {
*x = RegexpNgdotUnmarshalTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotUnmarshalTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotUnmarshalTextArgs) ProtoMessage() {}
func (x *RegexpNgdotUnmarshalTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[33]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotUnmarshalTextArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotUnmarshalTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{33}
}
func (x *RegexpNgdotUnmarshalTextArgs) GetText() []byte {
if x != nil {
return x.Text
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_RegexpNgdotString
// *NgoloFuzzOne_RegexpNgdotCopy
// *NgoloFuzzOne_Compile
// *NgoloFuzzOne_CompilePOSIX
// *NgoloFuzzOne_RegexpNgdotLongest
// *NgoloFuzzOne_RegexpNgdotNumSubexp
// *NgoloFuzzOne_RegexpNgdotSubexpNames
// *NgoloFuzzOne_RegexpNgdotSubexpIndex
// *NgoloFuzzOne_RegexpNgdotLiteralPrefix
// *NgoloFuzzOne_RegexpNgdotMatchReader
// *NgoloFuzzOne_RegexpNgdotMatchString
// *NgoloFuzzOne_RegexpNgdotMatch
// *NgoloFuzzOne_MatchReader
// *NgoloFuzzOne_MatchString
// *NgoloFuzzOne_Match
// *NgoloFuzzOne_QuoteMeta
// *NgoloFuzzOne_RegexpNgdotFind
// *NgoloFuzzOne_RegexpNgdotFindIndex
// *NgoloFuzzOne_RegexpNgdotFindString
// *NgoloFuzzOne_RegexpNgdotFindStringIndex
// *NgoloFuzzOne_RegexpNgdotFindReaderIndex
// *NgoloFuzzOne_RegexpNgdotFindSubmatch
// *NgoloFuzzOne_RegexpNgdotFindSubmatchIndex
// *NgoloFuzzOne_RegexpNgdotFindStringSubmatch
// *NgoloFuzzOne_RegexpNgdotFindStringSubmatchIndex
// *NgoloFuzzOne_RegexpNgdotFindReaderSubmatchIndex
// *NgoloFuzzOne_RegexpNgdotFindAll
// *NgoloFuzzOne_RegexpNgdotFindAllIndex
// *NgoloFuzzOne_RegexpNgdotFindAllSubmatch
// *NgoloFuzzOne_RegexpNgdotFindAllSubmatchIndex
// *NgoloFuzzOne_RegexpNgdotSplit
// *NgoloFuzzOne_RegexpNgdotAppendText
// *NgoloFuzzOne_RegexpNgdotMarshalText
// *NgoloFuzzOne_RegexpNgdotUnmarshalText
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[34]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{34}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotString() *RegexpNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotString); ok {
return x.RegexpNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotCopy() *RegexpNgdotCopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotCopy); ok {
return x.RegexpNgdotCopy
}
}
return nil
}
func (x *NgoloFuzzOne) GetCompile() *CompileArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Compile); ok {
return x.Compile
}
}
return nil
}
func (x *NgoloFuzzOne) GetCompilePOSIX() *CompilePOSIXArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CompilePOSIX); ok {
return x.CompilePOSIX
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotLongest() *RegexpNgdotLongestArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotLongest); ok {
return x.RegexpNgdotLongest
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotNumSubexp() *RegexpNgdotNumSubexpArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotNumSubexp); ok {
return x.RegexpNgdotNumSubexp
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotSubexpNames() *RegexpNgdotSubexpNamesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotSubexpNames); ok {
return x.RegexpNgdotSubexpNames
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotSubexpIndex() *RegexpNgdotSubexpIndexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotSubexpIndex); ok {
return x.RegexpNgdotSubexpIndex
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotLiteralPrefix() *RegexpNgdotLiteralPrefixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotLiteralPrefix); ok {
return x.RegexpNgdotLiteralPrefix
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotMatchReader() *RegexpNgdotMatchReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotMatchReader); ok {
return x.RegexpNgdotMatchReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotMatchString() *RegexpNgdotMatchStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotMatchString); ok {
return x.RegexpNgdotMatchString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotMatch() *RegexpNgdotMatchArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotMatch); ok {
return x.RegexpNgdotMatch
}
}
return nil
}
func (x *NgoloFuzzOne) GetMatchReader() *MatchReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MatchReader); ok {
return x.MatchReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetMatchString() *MatchStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MatchString); ok {
return x.MatchString
}
}
return nil
}
func (x *NgoloFuzzOne) GetMatch() *MatchArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Match); ok {
return x.Match
}
}
return nil
}
func (x *NgoloFuzzOne) GetQuoteMeta() *QuoteMetaArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_QuoteMeta); ok {
return x.QuoteMeta
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFind() *RegexpNgdotFindArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFind); ok {
return x.RegexpNgdotFind
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFindIndex() *RegexpNgdotFindIndexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFindIndex); ok {
return x.RegexpNgdotFindIndex
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFindString() *RegexpNgdotFindStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFindString); ok {
return x.RegexpNgdotFindString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFindStringIndex() *RegexpNgdotFindStringIndexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFindStringIndex); ok {
return x.RegexpNgdotFindStringIndex
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFindReaderIndex() *RegexpNgdotFindReaderIndexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFindReaderIndex); ok {
return x.RegexpNgdotFindReaderIndex
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFindSubmatch() *RegexpNgdotFindSubmatchArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFindSubmatch); ok {
return x.RegexpNgdotFindSubmatch
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFindSubmatchIndex() *RegexpNgdotFindSubmatchIndexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFindSubmatchIndex); ok {
return x.RegexpNgdotFindSubmatchIndex
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFindStringSubmatch() *RegexpNgdotFindStringSubmatchArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFindStringSubmatch); ok {
return x.RegexpNgdotFindStringSubmatch
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFindStringSubmatchIndex() *RegexpNgdotFindStringSubmatchIndexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFindStringSubmatchIndex); ok {
return x.RegexpNgdotFindStringSubmatchIndex
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFindReaderSubmatchIndex() *RegexpNgdotFindReaderSubmatchIndexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFindReaderSubmatchIndex); ok {
return x.RegexpNgdotFindReaderSubmatchIndex
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFindAll() *RegexpNgdotFindAllArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFindAll); ok {
return x.RegexpNgdotFindAll
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFindAllIndex() *RegexpNgdotFindAllIndexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFindAllIndex); ok {
return x.RegexpNgdotFindAllIndex
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFindAllSubmatch() *RegexpNgdotFindAllSubmatchArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFindAllSubmatch); ok {
return x.RegexpNgdotFindAllSubmatch
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotFindAllSubmatchIndex() *RegexpNgdotFindAllSubmatchIndexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotFindAllSubmatchIndex); ok {
return x.RegexpNgdotFindAllSubmatchIndex
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotSplit() *RegexpNgdotSplitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotSplit); ok {
return x.RegexpNgdotSplit
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotAppendText() *RegexpNgdotAppendTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotAppendText); ok {
return x.RegexpNgdotAppendText
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotMarshalText() *RegexpNgdotMarshalTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotMarshalText); ok {
return x.RegexpNgdotMarshalText
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotUnmarshalText() *RegexpNgdotUnmarshalTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotUnmarshalText); ok {
return x.RegexpNgdotUnmarshalText
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_RegexpNgdotString struct {
RegexpNgdotString *RegexpNgdotStringArgs `protobuf:"bytes,1,opt,name=RegexpNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotCopy struct {
RegexpNgdotCopy *RegexpNgdotCopyArgs `protobuf:"bytes,2,opt,name=RegexpNgdotCopy,proto3,oneof"`
}
type NgoloFuzzOne_Compile struct {
Compile *CompileArgs `protobuf:"bytes,3,opt,name=Compile,proto3,oneof"`
}
type NgoloFuzzOne_CompilePOSIX struct {
CompilePOSIX *CompilePOSIXArgs `protobuf:"bytes,4,opt,name=CompilePOSIX,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotLongest struct {
RegexpNgdotLongest *RegexpNgdotLongestArgs `protobuf:"bytes,5,opt,name=RegexpNgdotLongest,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotNumSubexp struct {
RegexpNgdotNumSubexp *RegexpNgdotNumSubexpArgs `protobuf:"bytes,6,opt,name=RegexpNgdotNumSubexp,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotSubexpNames struct {
RegexpNgdotSubexpNames *RegexpNgdotSubexpNamesArgs `protobuf:"bytes,7,opt,name=RegexpNgdotSubexpNames,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotSubexpIndex struct {
RegexpNgdotSubexpIndex *RegexpNgdotSubexpIndexArgs `protobuf:"bytes,8,opt,name=RegexpNgdotSubexpIndex,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotLiteralPrefix struct {
RegexpNgdotLiteralPrefix *RegexpNgdotLiteralPrefixArgs `protobuf:"bytes,9,opt,name=RegexpNgdotLiteralPrefix,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotMatchReader struct {
RegexpNgdotMatchReader *RegexpNgdotMatchReaderArgs `protobuf:"bytes,10,opt,name=RegexpNgdotMatchReader,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotMatchString struct {
RegexpNgdotMatchString *RegexpNgdotMatchStringArgs `protobuf:"bytes,11,opt,name=RegexpNgdotMatchString,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotMatch struct {
RegexpNgdotMatch *RegexpNgdotMatchArgs `protobuf:"bytes,12,opt,name=RegexpNgdotMatch,proto3,oneof"`
}
type NgoloFuzzOne_MatchReader struct {
MatchReader *MatchReaderArgs `protobuf:"bytes,13,opt,name=MatchReader,proto3,oneof"`
}
type NgoloFuzzOne_MatchString struct {
MatchString *MatchStringArgs `protobuf:"bytes,14,opt,name=MatchString,proto3,oneof"`
}
type NgoloFuzzOne_Match struct {
Match *MatchArgs `protobuf:"bytes,15,opt,name=Match,proto3,oneof"`
}
type NgoloFuzzOne_QuoteMeta struct {
QuoteMeta *QuoteMetaArgs `protobuf:"bytes,16,opt,name=QuoteMeta,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFind struct {
RegexpNgdotFind *RegexpNgdotFindArgs `protobuf:"bytes,17,opt,name=RegexpNgdotFind,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFindIndex struct {
RegexpNgdotFindIndex *RegexpNgdotFindIndexArgs `protobuf:"bytes,18,opt,name=RegexpNgdotFindIndex,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFindString struct {
RegexpNgdotFindString *RegexpNgdotFindStringArgs `protobuf:"bytes,19,opt,name=RegexpNgdotFindString,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFindStringIndex struct {
RegexpNgdotFindStringIndex *RegexpNgdotFindStringIndexArgs `protobuf:"bytes,20,opt,name=RegexpNgdotFindStringIndex,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFindReaderIndex struct {
RegexpNgdotFindReaderIndex *RegexpNgdotFindReaderIndexArgs `protobuf:"bytes,21,opt,name=RegexpNgdotFindReaderIndex,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFindSubmatch struct {
RegexpNgdotFindSubmatch *RegexpNgdotFindSubmatchArgs `protobuf:"bytes,22,opt,name=RegexpNgdotFindSubmatch,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFindSubmatchIndex struct {
RegexpNgdotFindSubmatchIndex *RegexpNgdotFindSubmatchIndexArgs `protobuf:"bytes,23,opt,name=RegexpNgdotFindSubmatchIndex,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFindStringSubmatch struct {
RegexpNgdotFindStringSubmatch *RegexpNgdotFindStringSubmatchArgs `protobuf:"bytes,24,opt,name=RegexpNgdotFindStringSubmatch,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFindStringSubmatchIndex struct {
RegexpNgdotFindStringSubmatchIndex *RegexpNgdotFindStringSubmatchIndexArgs `protobuf:"bytes,25,opt,name=RegexpNgdotFindStringSubmatchIndex,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFindReaderSubmatchIndex struct {
RegexpNgdotFindReaderSubmatchIndex *RegexpNgdotFindReaderSubmatchIndexArgs `protobuf:"bytes,26,opt,name=RegexpNgdotFindReaderSubmatchIndex,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFindAll struct {
RegexpNgdotFindAll *RegexpNgdotFindAllArgs `protobuf:"bytes,27,opt,name=RegexpNgdotFindAll,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFindAllIndex struct {
RegexpNgdotFindAllIndex *RegexpNgdotFindAllIndexArgs `protobuf:"bytes,28,opt,name=RegexpNgdotFindAllIndex,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFindAllSubmatch struct {
RegexpNgdotFindAllSubmatch *RegexpNgdotFindAllSubmatchArgs `protobuf:"bytes,29,opt,name=RegexpNgdotFindAllSubmatch,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotFindAllSubmatchIndex struct {
RegexpNgdotFindAllSubmatchIndex *RegexpNgdotFindAllSubmatchIndexArgs `protobuf:"bytes,30,opt,name=RegexpNgdotFindAllSubmatchIndex,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotSplit struct {
RegexpNgdotSplit *RegexpNgdotSplitArgs `protobuf:"bytes,31,opt,name=RegexpNgdotSplit,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotAppendText struct {
RegexpNgdotAppendText *RegexpNgdotAppendTextArgs `protobuf:"bytes,32,opt,name=RegexpNgdotAppendText,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotMarshalText struct {
RegexpNgdotMarshalText *RegexpNgdotMarshalTextArgs `protobuf:"bytes,33,opt,name=RegexpNgdotMarshalText,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotUnmarshalText struct {
RegexpNgdotUnmarshalText *RegexpNgdotUnmarshalTextArgs `protobuf:"bytes,34,opt,name=RegexpNgdotUnmarshalText,proto3,oneof"`
}
func (*NgoloFuzzOne_RegexpNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotCopy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Compile) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CompilePOSIX) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotLongest) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotNumSubexp) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotSubexpNames) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotSubexpIndex) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotLiteralPrefix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotMatchReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotMatchString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotMatch) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MatchReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MatchString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Match) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_QuoteMeta) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFind) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFindIndex) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFindString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFindStringIndex) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFindReaderIndex) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFindSubmatch) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFindSubmatchIndex) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFindStringSubmatch) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFindStringSubmatchIndex) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFindReaderSubmatchIndex) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFindAll) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFindAllIndex) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFindAllSubmatch) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotFindAllSubmatchIndex) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotSplit) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotAppendText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotMarshalText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotUnmarshalText) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[35]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{35}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[36]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{36}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x17\n" +
"\x15RegexpNgdotStringArgs\"\x15\n" +
"\x13RegexpNgdotCopyArgs\"!\n" +
"\vCompileArgs\x12\x12\n" +
"\x04expr\x18\x01 \x01(\tR\x04expr\"&\n" +
"\x10CompilePOSIXArgs\x12\x12\n" +
"\x04expr\x18\x01 \x01(\tR\x04expr\"\x18\n" +
"\x16RegexpNgdotLongestArgs\"\x1a\n" +
"\x18RegexpNgdotNumSubexpArgs\"\x1c\n" +
"\x1aRegexpNgdotSubexpNamesArgs\"0\n" +
"\x1aRegexpNgdotSubexpIndexArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x1e\n" +
"\x1cRegexpNgdotLiteralPrefixArgs\"*\n" +
"\x1aRegexpNgdotMatchReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"*\n" +
"\x1aRegexpNgdotMatchStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"$\n" +
"\x14RegexpNgdotMatchArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"9\n" +
"\x0fMatchReaderArgs\x12\x18\n" +
"\apattern\x18\x01 \x01(\tR\apattern\x12\f\n" +
"\x01r\x18\x02 \x01(\tR\x01r\"9\n" +
"\x0fMatchStringArgs\x12\x18\n" +
"\apattern\x18\x01 \x01(\tR\apattern\x12\f\n" +
"\x01s\x18\x02 \x01(\tR\x01s\"3\n" +
"\tMatchArgs\x12\x18\n" +
"\apattern\x18\x01 \x01(\tR\apattern\x12\f\n" +
"\x01b\x18\x02 \x01(\fR\x01b\"\x1d\n" +
"\rQuoteMetaArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"#\n" +
"\x13RegexpNgdotFindArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"(\n" +
"\x18RegexpNgdotFindIndexArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\")\n" +
"\x19RegexpNgdotFindStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\".\n" +
"\x1eRegexpNgdotFindStringIndexArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\".\n" +
"\x1eRegexpNgdotFindReaderIndexArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"+\n" +
"\x1bRegexpNgdotFindSubmatchArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"0\n" +
" RegexpNgdotFindSubmatchIndexArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"1\n" +
"!RegexpNgdotFindStringSubmatchArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"6\n" +
"&RegexpNgdotFindStringSubmatchIndexArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"6\n" +
"&RegexpNgdotFindReaderSubmatchIndexArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"4\n" +
"\x16RegexpNgdotFindAllArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\x12\f\n" +
"\x01n\x18\x02 \x01(\x03R\x01n\"9\n" +
"\x1bRegexpNgdotFindAllIndexArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\x12\f\n" +
"\x01n\x18\x02 \x01(\x03R\x01n\"<\n" +
"\x1eRegexpNgdotFindAllSubmatchArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\x12\f\n" +
"\x01n\x18\x02 \x01(\x03R\x01n\"A\n" +
"#RegexpNgdotFindAllSubmatchIndexArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\x12\f\n" +
"\x01n\x18\x02 \x01(\x03R\x01n\"2\n" +
"\x14RegexpNgdotSplitArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\f\n" +
"\x01n\x18\x02 \x01(\x03R\x01n\")\n" +
"\x19RegexpNgdotAppendTextArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"\x1c\n" +
"\x1aRegexpNgdotMarshalTextArgs\"2\n" +
"\x1cRegexpNgdotUnmarshalTextArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\fR\x04text\"\xcc\x18\n" +
"\fNgoloFuzzOne\x12P\n" +
"\x11RegexpNgdotString\x18\x01 \x01(\v2 .ngolofuzz.RegexpNgdotStringArgsH\x00R\x11RegexpNgdotString\x12J\n" +
"\x0fRegexpNgdotCopy\x18\x02 \x01(\v2\x1e.ngolofuzz.RegexpNgdotCopyArgsH\x00R\x0fRegexpNgdotCopy\x122\n" +
"\aCompile\x18\x03 \x01(\v2\x16.ngolofuzz.CompileArgsH\x00R\aCompile\x12A\n" +
"\fCompilePOSIX\x18\x04 \x01(\v2\x1b.ngolofuzz.CompilePOSIXArgsH\x00R\fCompilePOSIX\x12S\n" +
"\x12RegexpNgdotLongest\x18\x05 \x01(\v2!.ngolofuzz.RegexpNgdotLongestArgsH\x00R\x12RegexpNgdotLongest\x12Y\n" +
"\x14RegexpNgdotNumSubexp\x18\x06 \x01(\v2#.ngolofuzz.RegexpNgdotNumSubexpArgsH\x00R\x14RegexpNgdotNumSubexp\x12_\n" +
"\x16RegexpNgdotSubexpNames\x18\a \x01(\v2%.ngolofuzz.RegexpNgdotSubexpNamesArgsH\x00R\x16RegexpNgdotSubexpNames\x12_\n" +
"\x16RegexpNgdotSubexpIndex\x18\b \x01(\v2%.ngolofuzz.RegexpNgdotSubexpIndexArgsH\x00R\x16RegexpNgdotSubexpIndex\x12e\n" +
"\x18RegexpNgdotLiteralPrefix\x18\t \x01(\v2'.ngolofuzz.RegexpNgdotLiteralPrefixArgsH\x00R\x18RegexpNgdotLiteralPrefix\x12_\n" +
"\x16RegexpNgdotMatchReader\x18\n" +
" \x01(\v2%.ngolofuzz.RegexpNgdotMatchReaderArgsH\x00R\x16RegexpNgdotMatchReader\x12_\n" +
"\x16RegexpNgdotMatchString\x18\v \x01(\v2%.ngolofuzz.RegexpNgdotMatchStringArgsH\x00R\x16RegexpNgdotMatchString\x12M\n" +
"\x10RegexpNgdotMatch\x18\f \x01(\v2\x1f.ngolofuzz.RegexpNgdotMatchArgsH\x00R\x10RegexpNgdotMatch\x12>\n" +
"\vMatchReader\x18\r \x01(\v2\x1a.ngolofuzz.MatchReaderArgsH\x00R\vMatchReader\x12>\n" +
"\vMatchString\x18\x0e \x01(\v2\x1a.ngolofuzz.MatchStringArgsH\x00R\vMatchString\x12,\n" +
"\x05Match\x18\x0f \x01(\v2\x14.ngolofuzz.MatchArgsH\x00R\x05Match\x128\n" +
"\tQuoteMeta\x18\x10 \x01(\v2\x18.ngolofuzz.QuoteMetaArgsH\x00R\tQuoteMeta\x12J\n" +
"\x0fRegexpNgdotFind\x18\x11 \x01(\v2\x1e.ngolofuzz.RegexpNgdotFindArgsH\x00R\x0fRegexpNgdotFind\x12Y\n" +
"\x14RegexpNgdotFindIndex\x18\x12 \x01(\v2#.ngolofuzz.RegexpNgdotFindIndexArgsH\x00R\x14RegexpNgdotFindIndex\x12\\\n" +
"\x15RegexpNgdotFindString\x18\x13 \x01(\v2$.ngolofuzz.RegexpNgdotFindStringArgsH\x00R\x15RegexpNgdotFindString\x12k\n" +
"\x1aRegexpNgdotFindStringIndex\x18\x14 \x01(\v2).ngolofuzz.RegexpNgdotFindStringIndexArgsH\x00R\x1aRegexpNgdotFindStringIndex\x12k\n" +
"\x1aRegexpNgdotFindReaderIndex\x18\x15 \x01(\v2).ngolofuzz.RegexpNgdotFindReaderIndexArgsH\x00R\x1aRegexpNgdotFindReaderIndex\x12b\n" +
"\x17RegexpNgdotFindSubmatch\x18\x16 \x01(\v2&.ngolofuzz.RegexpNgdotFindSubmatchArgsH\x00R\x17RegexpNgdotFindSubmatch\x12q\n" +
"\x1cRegexpNgdotFindSubmatchIndex\x18\x17 \x01(\v2+.ngolofuzz.RegexpNgdotFindSubmatchIndexArgsH\x00R\x1cRegexpNgdotFindSubmatchIndex\x12t\n" +
"\x1dRegexpNgdotFindStringSubmatch\x18\x18 \x01(\v2,.ngolofuzz.RegexpNgdotFindStringSubmatchArgsH\x00R\x1dRegexpNgdotFindStringSubmatch\x12\x83\x01\n" +
"\"RegexpNgdotFindStringSubmatchIndex\x18\x19 \x01(\v21.ngolofuzz.RegexpNgdotFindStringSubmatchIndexArgsH\x00R\"RegexpNgdotFindStringSubmatchIndex\x12\x83\x01\n" +
"\"RegexpNgdotFindReaderSubmatchIndex\x18\x1a \x01(\v21.ngolofuzz.RegexpNgdotFindReaderSubmatchIndexArgsH\x00R\"RegexpNgdotFindReaderSubmatchIndex\x12S\n" +
"\x12RegexpNgdotFindAll\x18\x1b \x01(\v2!.ngolofuzz.RegexpNgdotFindAllArgsH\x00R\x12RegexpNgdotFindAll\x12b\n" +
"\x17RegexpNgdotFindAllIndex\x18\x1c \x01(\v2&.ngolofuzz.RegexpNgdotFindAllIndexArgsH\x00R\x17RegexpNgdotFindAllIndex\x12k\n" +
"\x1aRegexpNgdotFindAllSubmatch\x18\x1d \x01(\v2).ngolofuzz.RegexpNgdotFindAllSubmatchArgsH\x00R\x1aRegexpNgdotFindAllSubmatch\x12z\n" +
"\x1fRegexpNgdotFindAllSubmatchIndex\x18\x1e \x01(\v2..ngolofuzz.RegexpNgdotFindAllSubmatchIndexArgsH\x00R\x1fRegexpNgdotFindAllSubmatchIndex\x12M\n" +
"\x10RegexpNgdotSplit\x18\x1f \x01(\v2\x1f.ngolofuzz.RegexpNgdotSplitArgsH\x00R\x10RegexpNgdotSplit\x12\\\n" +
"\x15RegexpNgdotAppendText\x18 \x01(\v2$.ngolofuzz.RegexpNgdotAppendTextArgsH\x00R\x15RegexpNgdotAppendText\x12_\n" +
"\x16RegexpNgdotMarshalText\x18! \x01(\v2%.ngolofuzz.RegexpNgdotMarshalTextArgsH\x00R\x16RegexpNgdotMarshalText\x12e\n" +
"\x18RegexpNgdotUnmarshalText\x18\" \x01(\v2'.ngolofuzz.RegexpNgdotUnmarshalTextArgsH\x00R\x18RegexpNgdotUnmarshalTextB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x13Z\x11./;fuzz_ng_regexpb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 37)
var file_ngolofuzz_proto_goTypes = []any{
(*RegexpNgdotStringArgs)(nil), // 0: ngolofuzz.RegexpNgdotStringArgs
(*RegexpNgdotCopyArgs)(nil), // 1: ngolofuzz.RegexpNgdotCopyArgs
(*CompileArgs)(nil), // 2: ngolofuzz.CompileArgs
(*CompilePOSIXArgs)(nil), // 3: ngolofuzz.CompilePOSIXArgs
(*RegexpNgdotLongestArgs)(nil), // 4: ngolofuzz.RegexpNgdotLongestArgs
(*RegexpNgdotNumSubexpArgs)(nil), // 5: ngolofuzz.RegexpNgdotNumSubexpArgs
(*RegexpNgdotSubexpNamesArgs)(nil), // 6: ngolofuzz.RegexpNgdotSubexpNamesArgs
(*RegexpNgdotSubexpIndexArgs)(nil), // 7: ngolofuzz.RegexpNgdotSubexpIndexArgs
(*RegexpNgdotLiteralPrefixArgs)(nil), // 8: ngolofuzz.RegexpNgdotLiteralPrefixArgs
(*RegexpNgdotMatchReaderArgs)(nil), // 9: ngolofuzz.RegexpNgdotMatchReaderArgs
(*RegexpNgdotMatchStringArgs)(nil), // 10: ngolofuzz.RegexpNgdotMatchStringArgs
(*RegexpNgdotMatchArgs)(nil), // 11: ngolofuzz.RegexpNgdotMatchArgs
(*MatchReaderArgs)(nil), // 12: ngolofuzz.MatchReaderArgs
(*MatchStringArgs)(nil), // 13: ngolofuzz.MatchStringArgs
(*MatchArgs)(nil), // 14: ngolofuzz.MatchArgs
(*QuoteMetaArgs)(nil), // 15: ngolofuzz.QuoteMetaArgs
(*RegexpNgdotFindArgs)(nil), // 16: ngolofuzz.RegexpNgdotFindArgs
(*RegexpNgdotFindIndexArgs)(nil), // 17: ngolofuzz.RegexpNgdotFindIndexArgs
(*RegexpNgdotFindStringArgs)(nil), // 18: ngolofuzz.RegexpNgdotFindStringArgs
(*RegexpNgdotFindStringIndexArgs)(nil), // 19: ngolofuzz.RegexpNgdotFindStringIndexArgs
(*RegexpNgdotFindReaderIndexArgs)(nil), // 20: ngolofuzz.RegexpNgdotFindReaderIndexArgs
(*RegexpNgdotFindSubmatchArgs)(nil), // 21: ngolofuzz.RegexpNgdotFindSubmatchArgs
(*RegexpNgdotFindSubmatchIndexArgs)(nil), // 22: ngolofuzz.RegexpNgdotFindSubmatchIndexArgs
(*RegexpNgdotFindStringSubmatchArgs)(nil), // 23: ngolofuzz.RegexpNgdotFindStringSubmatchArgs
(*RegexpNgdotFindStringSubmatchIndexArgs)(nil), // 24: ngolofuzz.RegexpNgdotFindStringSubmatchIndexArgs
(*RegexpNgdotFindReaderSubmatchIndexArgs)(nil), // 25: ngolofuzz.RegexpNgdotFindReaderSubmatchIndexArgs
(*RegexpNgdotFindAllArgs)(nil), // 26: ngolofuzz.RegexpNgdotFindAllArgs
(*RegexpNgdotFindAllIndexArgs)(nil), // 27: ngolofuzz.RegexpNgdotFindAllIndexArgs
(*RegexpNgdotFindAllSubmatchArgs)(nil), // 28: ngolofuzz.RegexpNgdotFindAllSubmatchArgs
(*RegexpNgdotFindAllSubmatchIndexArgs)(nil), // 29: ngolofuzz.RegexpNgdotFindAllSubmatchIndexArgs
(*RegexpNgdotSplitArgs)(nil), // 30: ngolofuzz.RegexpNgdotSplitArgs
(*RegexpNgdotAppendTextArgs)(nil), // 31: ngolofuzz.RegexpNgdotAppendTextArgs
(*RegexpNgdotMarshalTextArgs)(nil), // 32: ngolofuzz.RegexpNgdotMarshalTextArgs
(*RegexpNgdotUnmarshalTextArgs)(nil), // 33: ngolofuzz.RegexpNgdotUnmarshalTextArgs
(*NgoloFuzzOne)(nil), // 34: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 35: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 36: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.RegexpNgdotString:type_name -> ngolofuzz.RegexpNgdotStringArgs
1, // 1: ngolofuzz.NgoloFuzzOne.RegexpNgdotCopy:type_name -> ngolofuzz.RegexpNgdotCopyArgs
2, // 2: ngolofuzz.NgoloFuzzOne.Compile:type_name -> ngolofuzz.CompileArgs
3, // 3: ngolofuzz.NgoloFuzzOne.CompilePOSIX:type_name -> ngolofuzz.CompilePOSIXArgs
4, // 4: ngolofuzz.NgoloFuzzOne.RegexpNgdotLongest:type_name -> ngolofuzz.RegexpNgdotLongestArgs
5, // 5: ngolofuzz.NgoloFuzzOne.RegexpNgdotNumSubexp:type_name -> ngolofuzz.RegexpNgdotNumSubexpArgs
6, // 6: ngolofuzz.NgoloFuzzOne.RegexpNgdotSubexpNames:type_name -> ngolofuzz.RegexpNgdotSubexpNamesArgs
7, // 7: ngolofuzz.NgoloFuzzOne.RegexpNgdotSubexpIndex:type_name -> ngolofuzz.RegexpNgdotSubexpIndexArgs
8, // 8: ngolofuzz.NgoloFuzzOne.RegexpNgdotLiteralPrefix:type_name -> ngolofuzz.RegexpNgdotLiteralPrefixArgs
9, // 9: ngolofuzz.NgoloFuzzOne.RegexpNgdotMatchReader:type_name -> ngolofuzz.RegexpNgdotMatchReaderArgs
10, // 10: ngolofuzz.NgoloFuzzOne.RegexpNgdotMatchString:type_name -> ngolofuzz.RegexpNgdotMatchStringArgs
11, // 11: ngolofuzz.NgoloFuzzOne.RegexpNgdotMatch:type_name -> ngolofuzz.RegexpNgdotMatchArgs
12, // 12: ngolofuzz.NgoloFuzzOne.MatchReader:type_name -> ngolofuzz.MatchReaderArgs
13, // 13: ngolofuzz.NgoloFuzzOne.MatchString:type_name -> ngolofuzz.MatchStringArgs
14, // 14: ngolofuzz.NgoloFuzzOne.Match:type_name -> ngolofuzz.MatchArgs
15, // 15: ngolofuzz.NgoloFuzzOne.QuoteMeta:type_name -> ngolofuzz.QuoteMetaArgs
16, // 16: ngolofuzz.NgoloFuzzOne.RegexpNgdotFind:type_name -> ngolofuzz.RegexpNgdotFindArgs
17, // 17: ngolofuzz.NgoloFuzzOne.RegexpNgdotFindIndex:type_name -> ngolofuzz.RegexpNgdotFindIndexArgs
18, // 18: ngolofuzz.NgoloFuzzOne.RegexpNgdotFindString:type_name -> ngolofuzz.RegexpNgdotFindStringArgs
19, // 19: ngolofuzz.NgoloFuzzOne.RegexpNgdotFindStringIndex:type_name -> ngolofuzz.RegexpNgdotFindStringIndexArgs
20, // 20: ngolofuzz.NgoloFuzzOne.RegexpNgdotFindReaderIndex:type_name -> ngolofuzz.RegexpNgdotFindReaderIndexArgs
21, // 21: ngolofuzz.NgoloFuzzOne.RegexpNgdotFindSubmatch:type_name -> ngolofuzz.RegexpNgdotFindSubmatchArgs
22, // 22: ngolofuzz.NgoloFuzzOne.RegexpNgdotFindSubmatchIndex:type_name -> ngolofuzz.RegexpNgdotFindSubmatchIndexArgs
23, // 23: ngolofuzz.NgoloFuzzOne.RegexpNgdotFindStringSubmatch:type_name -> ngolofuzz.RegexpNgdotFindStringSubmatchArgs
24, // 24: ngolofuzz.NgoloFuzzOne.RegexpNgdotFindStringSubmatchIndex:type_name -> ngolofuzz.RegexpNgdotFindStringSubmatchIndexArgs
25, // 25: ngolofuzz.NgoloFuzzOne.RegexpNgdotFindReaderSubmatchIndex:type_name -> ngolofuzz.RegexpNgdotFindReaderSubmatchIndexArgs
26, // 26: ngolofuzz.NgoloFuzzOne.RegexpNgdotFindAll:type_name -> ngolofuzz.RegexpNgdotFindAllArgs
27, // 27: ngolofuzz.NgoloFuzzOne.RegexpNgdotFindAllIndex:type_name -> ngolofuzz.RegexpNgdotFindAllIndexArgs
28, // 28: ngolofuzz.NgoloFuzzOne.RegexpNgdotFindAllSubmatch:type_name -> ngolofuzz.RegexpNgdotFindAllSubmatchArgs
29, // 29: ngolofuzz.NgoloFuzzOne.RegexpNgdotFindAllSubmatchIndex:type_name -> ngolofuzz.RegexpNgdotFindAllSubmatchIndexArgs
30, // 30: ngolofuzz.NgoloFuzzOne.RegexpNgdotSplit:type_name -> ngolofuzz.RegexpNgdotSplitArgs
31, // 31: ngolofuzz.NgoloFuzzOne.RegexpNgdotAppendText:type_name -> ngolofuzz.RegexpNgdotAppendTextArgs
32, // 32: ngolofuzz.NgoloFuzzOne.RegexpNgdotMarshalText:type_name -> ngolofuzz.RegexpNgdotMarshalTextArgs
33, // 33: ngolofuzz.NgoloFuzzOne.RegexpNgdotUnmarshalText:type_name -> ngolofuzz.RegexpNgdotUnmarshalTextArgs
34, // 34: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
35, // [35:35] is the sub-list for method output_type
35, // [35:35] is the sub-list for method input_type
35, // [35:35] is the sub-list for extension type_name
35, // [35:35] is the sub-list for extension extendee
0, // [0:35] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[34].OneofWrappers = []any{
(*NgoloFuzzOne_RegexpNgdotString)(nil),
(*NgoloFuzzOne_RegexpNgdotCopy)(nil),
(*NgoloFuzzOne_Compile)(nil),
(*NgoloFuzzOne_CompilePOSIX)(nil),
(*NgoloFuzzOne_RegexpNgdotLongest)(nil),
(*NgoloFuzzOne_RegexpNgdotNumSubexp)(nil),
(*NgoloFuzzOne_RegexpNgdotSubexpNames)(nil),
(*NgoloFuzzOne_RegexpNgdotSubexpIndex)(nil),
(*NgoloFuzzOne_RegexpNgdotLiteralPrefix)(nil),
(*NgoloFuzzOne_RegexpNgdotMatchReader)(nil),
(*NgoloFuzzOne_RegexpNgdotMatchString)(nil),
(*NgoloFuzzOne_RegexpNgdotMatch)(nil),
(*NgoloFuzzOne_MatchReader)(nil),
(*NgoloFuzzOne_MatchString)(nil),
(*NgoloFuzzOne_Match)(nil),
(*NgoloFuzzOne_QuoteMeta)(nil),
(*NgoloFuzzOne_RegexpNgdotFind)(nil),
(*NgoloFuzzOne_RegexpNgdotFindIndex)(nil),
(*NgoloFuzzOne_RegexpNgdotFindString)(nil),
(*NgoloFuzzOne_RegexpNgdotFindStringIndex)(nil),
(*NgoloFuzzOne_RegexpNgdotFindReaderIndex)(nil),
(*NgoloFuzzOne_RegexpNgdotFindSubmatch)(nil),
(*NgoloFuzzOne_RegexpNgdotFindSubmatchIndex)(nil),
(*NgoloFuzzOne_RegexpNgdotFindStringSubmatch)(nil),
(*NgoloFuzzOne_RegexpNgdotFindStringSubmatchIndex)(nil),
(*NgoloFuzzOne_RegexpNgdotFindReaderSubmatchIndex)(nil),
(*NgoloFuzzOne_RegexpNgdotFindAll)(nil),
(*NgoloFuzzOne_RegexpNgdotFindAllIndex)(nil),
(*NgoloFuzzOne_RegexpNgdotFindAllSubmatch)(nil),
(*NgoloFuzzOne_RegexpNgdotFindAllSubmatchIndex)(nil),
(*NgoloFuzzOne_RegexpNgdotSplit)(nil),
(*NgoloFuzzOne_RegexpNgdotAppendText)(nil),
(*NgoloFuzzOne_RegexpNgdotMarshalText)(nil),
(*NgoloFuzzOne_RegexpNgdotUnmarshalText)(nil),
}
file_ngolofuzz_proto_msgTypes[35].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 37,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_regexp_syntax
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"regexp/syntax"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func InstOpNewFromFuzz(p InstOpEnum) syntax.InstOp{
switch p {
case 1:
return syntax.InstAltMatch
case 2:
return syntax.InstCapture
case 3:
return syntax.InstEmptyWidth
case 4:
return syntax.InstMatch
case 5:
return syntax.InstFail
case 6:
return syntax.InstNop
case 7:
return syntax.InstRune
case 8:
return syntax.InstRune1
case 9:
return syntax.InstRuneAny
case 10:
return syntax.InstRuneAnyNotNL
}
return syntax.InstAlt
}
func ConvertInstOpNewFromFuzz(a []InstOpEnum) []syntax.InstOp{
r := make([]syntax.InstOp, len(a))
for i := range a {
r[i] = InstOpNewFromFuzz(a[i])
}
return r
}
func OpNewFromFuzz(p OpEnum) syntax.Op{
switch p {
case 1:
return syntax.OpEmptyMatch
case 2:
return syntax.OpLiteral
case 3:
return syntax.OpCharClass
case 4:
return syntax.OpAnyCharNotNL
case 5:
return syntax.OpAnyChar
case 6:
return syntax.OpBeginLine
case 7:
return syntax.OpEndLine
case 8:
return syntax.OpBeginText
case 9:
return syntax.OpEndText
case 10:
return syntax.OpWordBoundary
case 11:
return syntax.OpNoWordBoundary
case 12:
return syntax.OpCapture
case 13:
return syntax.OpStar
case 14:
return syntax.OpPlus
case 15:
return syntax.OpQuest
case 16:
return syntax.OpRepeat
case 17:
return syntax.OpConcat
case 18:
return syntax.OpAlternate
}
return syntax.OpNoMatch
}
func ConvertOpNewFromFuzz(a []OpEnum) []syntax.Op{
r := make([]syntax.Op, len(a))
for i := range a {
r[i] = OpNewFromFuzz(a[i])
}
return r
}
func ErrorCodeNewFromFuzz(p ErrorCodeEnum) syntax.ErrorCode{
switch p {
case 1:
return syntax.ErrInvalidCharClass
case 2:
return syntax.ErrInvalidCharRange
case 3:
return syntax.ErrInvalidEscape
case 4:
return syntax.ErrInvalidNamedCapture
case 5:
return syntax.ErrInvalidPerlOp
case 6:
return syntax.ErrInvalidRepeatOp
case 7:
return syntax.ErrInvalidRepeatSize
case 8:
return syntax.ErrInvalidUTF8
case 9:
return syntax.ErrMissingBracket
case 10:
return syntax.ErrMissingParen
case 11:
return syntax.ErrMissingRepeatArgument
case 12:
return syntax.ErrTrailingBackslash
case 13:
return syntax.ErrUnexpectedParen
case 14:
return syntax.ErrNestingDepth
case 15:
return syntax.ErrLarge
}
return syntax.ErrInternalError
}
func ConvertErrorCodeNewFromFuzz(a []ErrorCodeEnum) []syntax.ErrorCode{
r := make([]syntax.ErrorCode, len(a))
for i := range a {
r[i] = ErrorCodeNewFromFuzz(a[i])
}
return r
}
func FlagsNewFromFuzz(p FlagsEnum) syntax.Flags{
switch p {
case 1:
return syntax.Literal
case 2:
return syntax.ClassNL
case 3:
return syntax.DotNL
case 4:
return syntax.OneLine
case 5:
return syntax.NonGreedy
case 6:
return syntax.PerlX
case 7:
return syntax.UnicodeGroups
case 8:
return syntax.WasDollar
case 9:
return syntax.Simple
case 10:
return syntax.MatchNL
case 11:
return syntax.Perl
case 12:
return syntax.POSIX
}
return syntax.FoldCase
}
func ConvertFlagsNewFromFuzz(a []FlagsEnum) []syntax.Flags{
r := make([]syntax.Flags, len(a))
for i := range a {
r[i] = FlagsNewFromFuzz(a[i])
}
return r
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ProgResults []*syntax.Prog
ProgResultsIndex := 0
var RegexpResults []*syntax.Regexp
RegexpResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Compile:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
_, r1 := syntax.Compile(arg0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_OpNgdotString:
arg0 := OpNewFromFuzz(a.OpNgdotString.I)
arg0.String()
case *NgoloFuzzOne_ErrorCodeNgdotString:
arg0 := ErrorCodeNewFromFuzz(a.ErrorCodeNgdotString.E)
arg0.String()
case *NgoloFuzzOne_Parse:
arg1 := FlagsNewFromFuzz(a.Parse.Flags)
_, r1 := syntax.Parse(a.Parse.S, arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_InstOpNgdotString:
arg0 := InstOpNewFromFuzz(a.InstOpNgdotString.I)
arg0.String()
case *NgoloFuzzOne_EmptyOpContext:
arg0 := GetRune(a.EmptyOpContext.R1)
arg1 := GetRune(a.EmptyOpContext.R2)
syntax.EmptyOpContext(arg0, arg1)
case *NgoloFuzzOne_IsWordChar:
arg0 := GetRune(a.IsWordChar.R)
syntax.IsWordChar(arg0)
case *NgoloFuzzOne_ProgNgdotString:
if len(ProgResults) == 0 {
continue
}
arg0 := ProgResults[ProgResultsIndex]
ProgResultsIndex = (ProgResultsIndex + 1) % len(ProgResults)
arg0.String()
case *NgoloFuzzOne_ProgNgdotPrefix:
if len(ProgResults) == 0 {
continue
}
arg0 := ProgResults[ProgResultsIndex]
ProgResultsIndex = (ProgResultsIndex + 1) % len(ProgResults)
arg0.Prefix()
case *NgoloFuzzOne_ProgNgdotStartCond:
if len(ProgResults) == 0 {
continue
}
arg0 := ProgResults[ProgResultsIndex]
ProgResultsIndex = (ProgResultsIndex + 1) % len(ProgResults)
arg0.StartCond()
case *NgoloFuzzOne_RegexpNgdotEqual:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
if len(RegexpResults) == 0 {
continue
}
arg1 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.Equal(arg1)
case *NgoloFuzzOne_RegexpNgdotString:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.String()
case *NgoloFuzzOne_RegexpNgdotMaxCap:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.MaxCap()
case *NgoloFuzzOne_RegexpNgdotCapNames:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.CapNames()
case *NgoloFuzzOne_RegexpNgdotSimplify:
if len(RegexpResults) == 0 {
continue
}
arg0 := RegexpResults[RegexpResultsIndex]
RegexpResultsIndex = (RegexpResultsIndex + 1) % len(RegexpResults)
arg0.Simplify()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ProgNb := 0
ProgResultsIndex := 0
RegexpNb := 0
RegexpResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Compile:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("syntax.Compile(Regexp%d)\n", (RegexpResultsIndex + 0) % RegexpNb))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_OpNgdotString:
w.WriteString(fmt.Sprintf("OpNewFromFuzz(%#+v).String()\n", a.OpNgdotString.I))
case *NgoloFuzzOne_ErrorCodeNgdotString:
w.WriteString(fmt.Sprintf("ErrorCodeNewFromFuzz(%#+v).String()\n", a.ErrorCodeNgdotString.E))
case *NgoloFuzzOne_Parse:
w.WriteString(fmt.Sprintf("syntax.Parse(%#+v, FlagsNewFromFuzz(%#+v))\n", a.Parse.S, a.Parse.Flags))
case *NgoloFuzzOne_InstOpNgdotString:
w.WriteString(fmt.Sprintf("InstOpNewFromFuzz(%#+v).String()\n", a.InstOpNgdotString.I))
case *NgoloFuzzOne_EmptyOpContext:
w.WriteString(fmt.Sprintf("syntax.EmptyOpContext(GetRune(%#+v), GetRune(%#+v))\n", a.EmptyOpContext.R1, a.EmptyOpContext.R2))
case *NgoloFuzzOne_IsWordChar:
w.WriteString(fmt.Sprintf("syntax.IsWordChar(GetRune(%#+v))\n", a.IsWordChar.R))
case *NgoloFuzzOne_ProgNgdotString:
if ProgNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Prog%d.String()\n", ProgResultsIndex))
ProgResultsIndex = (ProgResultsIndex + 1) % ProgNb
case *NgoloFuzzOne_ProgNgdotPrefix:
if ProgNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Prog%d.Prefix()\n", ProgResultsIndex))
ProgResultsIndex = (ProgResultsIndex + 1) % ProgNb
case *NgoloFuzzOne_ProgNgdotStartCond:
if ProgNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Prog%d.StartCond()\n", ProgResultsIndex))
ProgResultsIndex = (ProgResultsIndex + 1) % ProgNb
case *NgoloFuzzOne_RegexpNgdotEqual:
if RegexpNb == 0 {
continue
}
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.Equal(Regexp%d)\n", RegexpResultsIndex, (RegexpResultsIndex + 1) % RegexpNb))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotString:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.String()\n", RegexpResultsIndex))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotMaxCap:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.MaxCap()\n", RegexpResultsIndex))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotCapNames:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.CapNames()\n", RegexpResultsIndex))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
case *NgoloFuzzOne_RegexpNgdotSimplify:
if RegexpNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Regexp%d.Simplify()\n", RegexpResultsIndex))
RegexpResultsIndex = (RegexpResultsIndex + 1) % RegexpNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_regexp_syntax
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type InstOpEnum int32
const (
InstOpEnum_InstAlt InstOpEnum = 0
InstOpEnum_InstAltMatch InstOpEnum = 1
InstOpEnum_InstCapture InstOpEnum = 2
InstOpEnum_InstEmptyWidth InstOpEnum = 3
InstOpEnum_InstMatch InstOpEnum = 4
InstOpEnum_InstFail InstOpEnum = 5
InstOpEnum_InstNop InstOpEnum = 6
InstOpEnum_InstRune InstOpEnum = 7
InstOpEnum_InstRune1 InstOpEnum = 8
InstOpEnum_InstRuneAny InstOpEnum = 9
InstOpEnum_InstRuneAnyNotNL InstOpEnum = 10
)
// Enum value maps for InstOpEnum.
var (
InstOpEnum_name = map[int32]string{
0: "InstAlt",
1: "InstAltMatch",
2: "InstCapture",
3: "InstEmptyWidth",
4: "InstMatch",
5: "InstFail",
6: "InstNop",
7: "InstRune",
8: "InstRune1",
9: "InstRuneAny",
10: "InstRuneAnyNotNL",
}
InstOpEnum_value = map[string]int32{
"InstAlt": 0,
"InstAltMatch": 1,
"InstCapture": 2,
"InstEmptyWidth": 3,
"InstMatch": 4,
"InstFail": 5,
"InstNop": 6,
"InstRune": 7,
"InstRune1": 8,
"InstRuneAny": 9,
"InstRuneAnyNotNL": 10,
}
)
func (x InstOpEnum) Enum() *InstOpEnum {
p := new(InstOpEnum)
*p = x
return p
}
func (x InstOpEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (InstOpEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[0].Descriptor()
}
func (InstOpEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[0]
}
func (x InstOpEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use InstOpEnum.Descriptor instead.
func (InstOpEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type OpEnum int32
const (
OpEnum_OpNoMatch OpEnum = 0
OpEnum_OpEmptyMatch OpEnum = 1
OpEnum_OpLiteral OpEnum = 2
OpEnum_OpCharClass OpEnum = 3
OpEnum_OpAnyCharNotNL OpEnum = 4
OpEnum_OpAnyChar OpEnum = 5
OpEnum_OpBeginLine OpEnum = 6
OpEnum_OpEndLine OpEnum = 7
OpEnum_OpBeginText OpEnum = 8
OpEnum_OpEndText OpEnum = 9
OpEnum_OpWordBoundary OpEnum = 10
OpEnum_OpNoWordBoundary OpEnum = 11
OpEnum_OpCapture OpEnum = 12
OpEnum_OpStar OpEnum = 13
OpEnum_OpPlus OpEnum = 14
OpEnum_OpQuest OpEnum = 15
OpEnum_OpRepeat OpEnum = 16
OpEnum_OpConcat OpEnum = 17
OpEnum_OpAlternate OpEnum = 18
)
// Enum value maps for OpEnum.
var (
OpEnum_name = map[int32]string{
0: "OpNoMatch",
1: "OpEmptyMatch",
2: "OpLiteral",
3: "OpCharClass",
4: "OpAnyCharNotNL",
5: "OpAnyChar",
6: "OpBeginLine",
7: "OpEndLine",
8: "OpBeginText",
9: "OpEndText",
10: "OpWordBoundary",
11: "OpNoWordBoundary",
12: "OpCapture",
13: "OpStar",
14: "OpPlus",
15: "OpQuest",
16: "OpRepeat",
17: "OpConcat",
18: "OpAlternate",
}
OpEnum_value = map[string]int32{
"OpNoMatch": 0,
"OpEmptyMatch": 1,
"OpLiteral": 2,
"OpCharClass": 3,
"OpAnyCharNotNL": 4,
"OpAnyChar": 5,
"OpBeginLine": 6,
"OpEndLine": 7,
"OpBeginText": 8,
"OpEndText": 9,
"OpWordBoundary": 10,
"OpNoWordBoundary": 11,
"OpCapture": 12,
"OpStar": 13,
"OpPlus": 14,
"OpQuest": 15,
"OpRepeat": 16,
"OpConcat": 17,
"OpAlternate": 18,
}
)
func (x OpEnum) Enum() *OpEnum {
p := new(OpEnum)
*p = x
return p
}
func (x OpEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (OpEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[1].Descriptor()
}
func (OpEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[1]
}
func (x OpEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use OpEnum.Descriptor instead.
func (OpEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type ErrorCodeEnum int32
const (
ErrorCodeEnum_ErrInternalError ErrorCodeEnum = 0
ErrorCodeEnum_ErrInvalidCharClass ErrorCodeEnum = 1
ErrorCodeEnum_ErrInvalidCharRange ErrorCodeEnum = 2
ErrorCodeEnum_ErrInvalidEscape ErrorCodeEnum = 3
ErrorCodeEnum_ErrInvalidNamedCapture ErrorCodeEnum = 4
ErrorCodeEnum_ErrInvalidPerlOp ErrorCodeEnum = 5
ErrorCodeEnum_ErrInvalidRepeatOp ErrorCodeEnum = 6
ErrorCodeEnum_ErrInvalidRepeatSize ErrorCodeEnum = 7
ErrorCodeEnum_ErrInvalidUTF8 ErrorCodeEnum = 8
ErrorCodeEnum_ErrMissingBracket ErrorCodeEnum = 9
ErrorCodeEnum_ErrMissingParen ErrorCodeEnum = 10
ErrorCodeEnum_ErrMissingRepeatArgument ErrorCodeEnum = 11
ErrorCodeEnum_ErrTrailingBackslash ErrorCodeEnum = 12
ErrorCodeEnum_ErrUnexpectedParen ErrorCodeEnum = 13
ErrorCodeEnum_ErrNestingDepth ErrorCodeEnum = 14
ErrorCodeEnum_ErrLarge ErrorCodeEnum = 15
)
// Enum value maps for ErrorCodeEnum.
var (
ErrorCodeEnum_name = map[int32]string{
0: "ErrInternalError",
1: "ErrInvalidCharClass",
2: "ErrInvalidCharRange",
3: "ErrInvalidEscape",
4: "ErrInvalidNamedCapture",
5: "ErrInvalidPerlOp",
6: "ErrInvalidRepeatOp",
7: "ErrInvalidRepeatSize",
8: "ErrInvalidUTF8",
9: "ErrMissingBracket",
10: "ErrMissingParen",
11: "ErrMissingRepeatArgument",
12: "ErrTrailingBackslash",
13: "ErrUnexpectedParen",
14: "ErrNestingDepth",
15: "ErrLarge",
}
ErrorCodeEnum_value = map[string]int32{
"ErrInternalError": 0,
"ErrInvalidCharClass": 1,
"ErrInvalidCharRange": 2,
"ErrInvalidEscape": 3,
"ErrInvalidNamedCapture": 4,
"ErrInvalidPerlOp": 5,
"ErrInvalidRepeatOp": 6,
"ErrInvalidRepeatSize": 7,
"ErrInvalidUTF8": 8,
"ErrMissingBracket": 9,
"ErrMissingParen": 10,
"ErrMissingRepeatArgument": 11,
"ErrTrailingBackslash": 12,
"ErrUnexpectedParen": 13,
"ErrNestingDepth": 14,
"ErrLarge": 15,
}
)
func (x ErrorCodeEnum) Enum() *ErrorCodeEnum {
p := new(ErrorCodeEnum)
*p = x
return p
}
func (x ErrorCodeEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (ErrorCodeEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[2].Descriptor()
}
func (ErrorCodeEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[2]
}
func (x ErrorCodeEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use ErrorCodeEnum.Descriptor instead.
func (ErrorCodeEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type FlagsEnum int32
const (
FlagsEnum_FoldCase FlagsEnum = 0
FlagsEnum_Literal FlagsEnum = 1
FlagsEnum_ClassNL FlagsEnum = 2
FlagsEnum_DotNL FlagsEnum = 3
FlagsEnum_OneLine FlagsEnum = 4
FlagsEnum_NonGreedy FlagsEnum = 5
FlagsEnum_PerlX FlagsEnum = 6
FlagsEnum_UnicodeGroups FlagsEnum = 7
FlagsEnum_WasDollar FlagsEnum = 8
FlagsEnum_Simple FlagsEnum = 9
FlagsEnum_MatchNL FlagsEnum = 10
FlagsEnum_Perl FlagsEnum = 11
FlagsEnum_POSIX FlagsEnum = 12
)
// Enum value maps for FlagsEnum.
var (
FlagsEnum_name = map[int32]string{
0: "FoldCase",
1: "Literal",
2: "ClassNL",
3: "DotNL",
4: "OneLine",
5: "NonGreedy",
6: "PerlX",
7: "UnicodeGroups",
8: "WasDollar",
9: "Simple",
10: "MatchNL",
11: "Perl",
12: "POSIX",
}
FlagsEnum_value = map[string]int32{
"FoldCase": 0,
"Literal": 1,
"ClassNL": 2,
"DotNL": 3,
"OneLine": 4,
"NonGreedy": 5,
"PerlX": 6,
"UnicodeGroups": 7,
"WasDollar": 8,
"Simple": 9,
"MatchNL": 10,
"Perl": 11,
"POSIX": 12,
}
)
func (x FlagsEnum) Enum() *FlagsEnum {
p := new(FlagsEnum)
*p = x
return p
}
func (x FlagsEnum) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (FlagsEnum) Descriptor() protoreflect.EnumDescriptor {
return file_ngolofuzz_proto_enumTypes[3].Descriptor()
}
func (FlagsEnum) Type() protoreflect.EnumType {
return &file_ngolofuzz_proto_enumTypes[3]
}
func (x FlagsEnum) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use FlagsEnum.Descriptor instead.
func (FlagsEnum) EnumDescriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type CompileArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CompileArgs) Reset() {
*x = CompileArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CompileArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CompileArgs) ProtoMessage() {}
func (x *CompileArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CompileArgs.ProtoReflect.Descriptor instead.
func (*CompileArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type OpNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I OpEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.OpEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OpNgdotStringArgs) Reset() {
*x = OpNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OpNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OpNgdotStringArgs) ProtoMessage() {}
func (x *OpNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OpNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*OpNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *OpNgdotStringArgs) GetI() OpEnum {
if x != nil {
return x.I
}
return OpEnum_OpNoMatch
}
type ErrorCodeNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
E ErrorCodeEnum `protobuf:"varint,1,opt,name=e,proto3,enum=ngolofuzz.ErrorCodeEnum" json:"e,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ErrorCodeNgdotStringArgs) Reset() {
*x = ErrorCodeNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ErrorCodeNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ErrorCodeNgdotStringArgs) ProtoMessage() {}
func (x *ErrorCodeNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ErrorCodeNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*ErrorCodeNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *ErrorCodeNgdotStringArgs) GetE() ErrorCodeEnum {
if x != nil {
return x.E
}
return ErrorCodeEnum_ErrInternalError
}
type ParseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Flags FlagsEnum `protobuf:"varint,2,opt,name=flags,proto3,enum=ngolofuzz.FlagsEnum" json:"flags,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseArgs) Reset() {
*x = ParseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseArgs) ProtoMessage() {}
func (x *ParseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseArgs.ProtoReflect.Descriptor instead.
func (*ParseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ParseArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *ParseArgs) GetFlags() FlagsEnum {
if x != nil {
return x.Flags
}
return FlagsEnum_FoldCase
}
type InstOpNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I InstOpEnum `protobuf:"varint,1,opt,name=i,proto3,enum=ngolofuzz.InstOpEnum" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *InstOpNgdotStringArgs) Reset() {
*x = InstOpNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *InstOpNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*InstOpNgdotStringArgs) ProtoMessage() {}
func (x *InstOpNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use InstOpNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*InstOpNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *InstOpNgdotStringArgs) GetI() InstOpEnum {
if x != nil {
return x.I
}
return InstOpEnum_InstAlt
}
type EmptyOpContextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R1 string `protobuf:"bytes,1,opt,name=r1,proto3" json:"r1,omitempty"`
R2 string `protobuf:"bytes,2,opt,name=r2,proto3" json:"r2,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EmptyOpContextArgs) Reset() {
*x = EmptyOpContextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EmptyOpContextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EmptyOpContextArgs) ProtoMessage() {}
func (x *EmptyOpContextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EmptyOpContextArgs.ProtoReflect.Descriptor instead.
func (*EmptyOpContextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *EmptyOpContextArgs) GetR1() string {
if x != nil {
return x.R1
}
return ""
}
func (x *EmptyOpContextArgs) GetR2() string {
if x != nil {
return x.R2
}
return ""
}
type IsWordCharArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsWordCharArgs) Reset() {
*x = IsWordCharArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsWordCharArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsWordCharArgs) ProtoMessage() {}
func (x *IsWordCharArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsWordCharArgs.ProtoReflect.Descriptor instead.
func (*IsWordCharArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *IsWordCharArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type ProgNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ProgNgdotStringArgs) Reset() {
*x = ProgNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ProgNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ProgNgdotStringArgs) ProtoMessage() {}
func (x *ProgNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ProgNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*ProgNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type ProgNgdotPrefixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ProgNgdotPrefixArgs) Reset() {
*x = ProgNgdotPrefixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ProgNgdotPrefixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ProgNgdotPrefixArgs) ProtoMessage() {}
func (x *ProgNgdotPrefixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ProgNgdotPrefixArgs.ProtoReflect.Descriptor instead.
func (*ProgNgdotPrefixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type ProgNgdotStartCondArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ProgNgdotStartCondArgs) Reset() {
*x = ProgNgdotStartCondArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ProgNgdotStartCondArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ProgNgdotStartCondArgs) ProtoMessage() {}
func (x *ProgNgdotStartCondArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ProgNgdotStartCondArgs.ProtoReflect.Descriptor instead.
func (*ProgNgdotStartCondArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type RegexpNgdotEqualArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotEqualArgs) Reset() {
*x = RegexpNgdotEqualArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotEqualArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotEqualArgs) ProtoMessage() {}
func (x *RegexpNgdotEqualArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotEqualArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotEqualArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
type RegexpNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotStringArgs) Reset() {
*x = RegexpNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotStringArgs) ProtoMessage() {}
func (x *RegexpNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type RegexpNgdotMaxCapArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotMaxCapArgs) Reset() {
*x = RegexpNgdotMaxCapArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotMaxCapArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotMaxCapArgs) ProtoMessage() {}
func (x *RegexpNgdotMaxCapArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotMaxCapArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotMaxCapArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type RegexpNgdotCapNamesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotCapNamesArgs) Reset() {
*x = RegexpNgdotCapNamesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotCapNamesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotCapNamesArgs) ProtoMessage() {}
func (x *RegexpNgdotCapNamesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotCapNamesArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotCapNamesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type RegexpNgdotSimplifyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegexpNgdotSimplifyArgs) Reset() {
*x = RegexpNgdotSimplifyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegexpNgdotSimplifyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegexpNgdotSimplifyArgs) ProtoMessage() {}
func (x *RegexpNgdotSimplifyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegexpNgdotSimplifyArgs.ProtoReflect.Descriptor instead.
func (*RegexpNgdotSimplifyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Compile
// *NgoloFuzzOne_OpNgdotString
// *NgoloFuzzOne_ErrorCodeNgdotString
// *NgoloFuzzOne_Parse
// *NgoloFuzzOne_InstOpNgdotString
// *NgoloFuzzOne_EmptyOpContext
// *NgoloFuzzOne_IsWordChar
// *NgoloFuzzOne_ProgNgdotString
// *NgoloFuzzOne_ProgNgdotPrefix
// *NgoloFuzzOne_ProgNgdotStartCond
// *NgoloFuzzOne_RegexpNgdotEqual
// *NgoloFuzzOne_RegexpNgdotString
// *NgoloFuzzOne_RegexpNgdotMaxCap
// *NgoloFuzzOne_RegexpNgdotCapNames
// *NgoloFuzzOne_RegexpNgdotSimplify
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetCompile() *CompileArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Compile); ok {
return x.Compile
}
}
return nil
}
func (x *NgoloFuzzOne) GetOpNgdotString() *OpNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_OpNgdotString); ok {
return x.OpNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetErrorCodeNgdotString() *ErrorCodeNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ErrorCodeNgdotString); ok {
return x.ErrorCodeNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetParse() *ParseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Parse); ok {
return x.Parse
}
}
return nil
}
func (x *NgoloFuzzOne) GetInstOpNgdotString() *InstOpNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_InstOpNgdotString); ok {
return x.InstOpNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetEmptyOpContext() *EmptyOpContextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EmptyOpContext); ok {
return x.EmptyOpContext
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsWordChar() *IsWordCharArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsWordChar); ok {
return x.IsWordChar
}
}
return nil
}
func (x *NgoloFuzzOne) GetProgNgdotString() *ProgNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ProgNgdotString); ok {
return x.ProgNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetProgNgdotPrefix() *ProgNgdotPrefixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ProgNgdotPrefix); ok {
return x.ProgNgdotPrefix
}
}
return nil
}
func (x *NgoloFuzzOne) GetProgNgdotStartCond() *ProgNgdotStartCondArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ProgNgdotStartCond); ok {
return x.ProgNgdotStartCond
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotEqual() *RegexpNgdotEqualArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotEqual); ok {
return x.RegexpNgdotEqual
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotString() *RegexpNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotString); ok {
return x.RegexpNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotMaxCap() *RegexpNgdotMaxCapArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotMaxCap); ok {
return x.RegexpNgdotMaxCap
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotCapNames() *RegexpNgdotCapNamesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotCapNames); ok {
return x.RegexpNgdotCapNames
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegexpNgdotSimplify() *RegexpNgdotSimplifyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegexpNgdotSimplify); ok {
return x.RegexpNgdotSimplify
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Compile struct {
Compile *CompileArgs `protobuf:"bytes,1,opt,name=Compile,proto3,oneof"`
}
type NgoloFuzzOne_OpNgdotString struct {
OpNgdotString *OpNgdotStringArgs `protobuf:"bytes,2,opt,name=OpNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_ErrorCodeNgdotString struct {
ErrorCodeNgdotString *ErrorCodeNgdotStringArgs `protobuf:"bytes,3,opt,name=ErrorCodeNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_Parse struct {
Parse *ParseArgs `protobuf:"bytes,4,opt,name=Parse,proto3,oneof"`
}
type NgoloFuzzOne_InstOpNgdotString struct {
InstOpNgdotString *InstOpNgdotStringArgs `protobuf:"bytes,5,opt,name=InstOpNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_EmptyOpContext struct {
EmptyOpContext *EmptyOpContextArgs `protobuf:"bytes,6,opt,name=EmptyOpContext,proto3,oneof"`
}
type NgoloFuzzOne_IsWordChar struct {
IsWordChar *IsWordCharArgs `protobuf:"bytes,7,opt,name=IsWordChar,proto3,oneof"`
}
type NgoloFuzzOne_ProgNgdotString struct {
ProgNgdotString *ProgNgdotStringArgs `protobuf:"bytes,8,opt,name=ProgNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_ProgNgdotPrefix struct {
ProgNgdotPrefix *ProgNgdotPrefixArgs `protobuf:"bytes,9,opt,name=ProgNgdotPrefix,proto3,oneof"`
}
type NgoloFuzzOne_ProgNgdotStartCond struct {
ProgNgdotStartCond *ProgNgdotStartCondArgs `protobuf:"bytes,10,opt,name=ProgNgdotStartCond,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotEqual struct {
RegexpNgdotEqual *RegexpNgdotEqualArgs `protobuf:"bytes,11,opt,name=RegexpNgdotEqual,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotString struct {
RegexpNgdotString *RegexpNgdotStringArgs `protobuf:"bytes,12,opt,name=RegexpNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotMaxCap struct {
RegexpNgdotMaxCap *RegexpNgdotMaxCapArgs `protobuf:"bytes,13,opt,name=RegexpNgdotMaxCap,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotCapNames struct {
RegexpNgdotCapNames *RegexpNgdotCapNamesArgs `protobuf:"bytes,14,opt,name=RegexpNgdotCapNames,proto3,oneof"`
}
type NgoloFuzzOne_RegexpNgdotSimplify struct {
RegexpNgdotSimplify *RegexpNgdotSimplifyArgs `protobuf:"bytes,15,opt,name=RegexpNgdotSimplify,proto3,oneof"`
}
func (*NgoloFuzzOne_Compile) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_OpNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ErrorCodeNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Parse) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_InstOpNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EmptyOpContext) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsWordChar) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ProgNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ProgNgdotPrefix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ProgNgdotStartCond) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotEqual) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotMaxCap) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotCapNames) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegexpNgdotSimplify) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\r\n" +
"\vCompileArgs\"4\n" +
"\x11OpNgdotStringArgs\x12\x1f\n" +
"\x01i\x18\x01 \x01(\x0e2\x11.ngolofuzz.OpEnumR\x01i\"B\n" +
"\x18ErrorCodeNgdotStringArgs\x12&\n" +
"\x01e\x18\x01 \x01(\x0e2\x18.ngolofuzz.ErrorCodeEnumR\x01e\"E\n" +
"\tParseArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12*\n" +
"\x05flags\x18\x02 \x01(\x0e2\x14.ngolofuzz.FlagsEnumR\x05flags\"<\n" +
"\x15InstOpNgdotStringArgs\x12#\n" +
"\x01i\x18\x01 \x01(\x0e2\x15.ngolofuzz.InstOpEnumR\x01i\"4\n" +
"\x12EmptyOpContextArgs\x12\x0e\n" +
"\x02r1\x18\x01 \x01(\tR\x02r1\x12\x0e\n" +
"\x02r2\x18\x02 \x01(\tR\x02r2\"\x1e\n" +
"\x0eIsWordCharArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x15\n" +
"\x13ProgNgdotStringArgs\"\x15\n" +
"\x13ProgNgdotPrefixArgs\"\x18\n" +
"\x16ProgNgdotStartCondArgs\"\x16\n" +
"\x14RegexpNgdotEqualArgs\"\x17\n" +
"\x15RegexpNgdotStringArgs\"\x17\n" +
"\x15RegexpNgdotMaxCapArgs\"\x19\n" +
"\x17RegexpNgdotCapNamesArgs\"\x19\n" +
"\x17RegexpNgdotSimplifyArgs\"\x81\t\n" +
"\fNgoloFuzzOne\x122\n" +
"\aCompile\x18\x01 \x01(\v2\x16.ngolofuzz.CompileArgsH\x00R\aCompile\x12D\n" +
"\rOpNgdotString\x18\x02 \x01(\v2\x1c.ngolofuzz.OpNgdotStringArgsH\x00R\rOpNgdotString\x12Y\n" +
"\x14ErrorCodeNgdotString\x18\x03 \x01(\v2#.ngolofuzz.ErrorCodeNgdotStringArgsH\x00R\x14ErrorCodeNgdotString\x12,\n" +
"\x05Parse\x18\x04 \x01(\v2\x14.ngolofuzz.ParseArgsH\x00R\x05Parse\x12P\n" +
"\x11InstOpNgdotString\x18\x05 \x01(\v2 .ngolofuzz.InstOpNgdotStringArgsH\x00R\x11InstOpNgdotString\x12G\n" +
"\x0eEmptyOpContext\x18\x06 \x01(\v2\x1d.ngolofuzz.EmptyOpContextArgsH\x00R\x0eEmptyOpContext\x12;\n" +
"\n" +
"IsWordChar\x18\a \x01(\v2\x19.ngolofuzz.IsWordCharArgsH\x00R\n" +
"IsWordChar\x12J\n" +
"\x0fProgNgdotString\x18\b \x01(\v2\x1e.ngolofuzz.ProgNgdotStringArgsH\x00R\x0fProgNgdotString\x12J\n" +
"\x0fProgNgdotPrefix\x18\t \x01(\v2\x1e.ngolofuzz.ProgNgdotPrefixArgsH\x00R\x0fProgNgdotPrefix\x12S\n" +
"\x12ProgNgdotStartCond\x18\n" +
" \x01(\v2!.ngolofuzz.ProgNgdotStartCondArgsH\x00R\x12ProgNgdotStartCond\x12M\n" +
"\x10RegexpNgdotEqual\x18\v \x01(\v2\x1f.ngolofuzz.RegexpNgdotEqualArgsH\x00R\x10RegexpNgdotEqual\x12P\n" +
"\x11RegexpNgdotString\x18\f \x01(\v2 .ngolofuzz.RegexpNgdotStringArgsH\x00R\x11RegexpNgdotString\x12P\n" +
"\x11RegexpNgdotMaxCap\x18\r \x01(\v2 .ngolofuzz.RegexpNgdotMaxCapArgsH\x00R\x11RegexpNgdotMaxCap\x12V\n" +
"\x13RegexpNgdotCapNames\x18\x0e \x01(\v2\".ngolofuzz.RegexpNgdotCapNamesArgsH\x00R\x13RegexpNgdotCapNames\x12V\n" +
"\x13RegexpNgdotSimplify\x18\x0f \x01(\v2\".ngolofuzz.RegexpNgdotSimplifyArgsH\x00R\x13RegexpNgdotSimplifyB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04list*\xbe\x01\n" +
"\n" +
"InstOpEnum\x12\v\n" +
"\aInstAlt\x10\x00\x12\x10\n" +
"\fInstAltMatch\x10\x01\x12\x0f\n" +
"\vInstCapture\x10\x02\x12\x12\n" +
"\x0eInstEmptyWidth\x10\x03\x12\r\n" +
"\tInstMatch\x10\x04\x12\f\n" +
"\bInstFail\x10\x05\x12\v\n" +
"\aInstNop\x10\x06\x12\f\n" +
"\bInstRune\x10\a\x12\r\n" +
"\tInstRune1\x10\b\x12\x0f\n" +
"\vInstRuneAny\x10\t\x12\x14\n" +
"\x10InstRuneAnyNotNL\x10\n" +
"*\xb7\x02\n" +
"\x06OpEnum\x12\r\n" +
"\tOpNoMatch\x10\x00\x12\x10\n" +
"\fOpEmptyMatch\x10\x01\x12\r\n" +
"\tOpLiteral\x10\x02\x12\x0f\n" +
"\vOpCharClass\x10\x03\x12\x12\n" +
"\x0eOpAnyCharNotNL\x10\x04\x12\r\n" +
"\tOpAnyChar\x10\x05\x12\x0f\n" +
"\vOpBeginLine\x10\x06\x12\r\n" +
"\tOpEndLine\x10\a\x12\x0f\n" +
"\vOpBeginText\x10\b\x12\r\n" +
"\tOpEndText\x10\t\x12\x12\n" +
"\x0eOpWordBoundary\x10\n" +
"\x12\x14\n" +
"\x10OpNoWordBoundary\x10\v\x12\r\n" +
"\tOpCapture\x10\f\x12\n" +
"\n" +
"\x06OpStar\x10\r\x12\n" +
"\n" +
"\x06OpPlus\x10\x0e\x12\v\n" +
"\aOpQuest\x10\x0f\x12\f\n" +
"\bOpRepeat\x10\x10\x12\f\n" +
"\bOpConcat\x10\x11\x12\x0f\n" +
"\vOpAlternate\x10\x12*\x84\x03\n" +
"\rErrorCodeEnum\x12\x14\n" +
"\x10ErrInternalError\x10\x00\x12\x17\n" +
"\x13ErrInvalidCharClass\x10\x01\x12\x17\n" +
"\x13ErrInvalidCharRange\x10\x02\x12\x14\n" +
"\x10ErrInvalidEscape\x10\x03\x12\x1a\n" +
"\x16ErrInvalidNamedCapture\x10\x04\x12\x14\n" +
"\x10ErrInvalidPerlOp\x10\x05\x12\x16\n" +
"\x12ErrInvalidRepeatOp\x10\x06\x12\x18\n" +
"\x14ErrInvalidRepeatSize\x10\a\x12\x12\n" +
"\x0eErrInvalidUTF8\x10\b\x12\x15\n" +
"\x11ErrMissingBracket\x10\t\x12\x13\n" +
"\x0fErrMissingParen\x10\n" +
"\x12\x1c\n" +
"\x18ErrMissingRepeatArgument\x10\v\x12\x18\n" +
"\x14ErrTrailingBackslash\x10\f\x12\x16\n" +
"\x12ErrUnexpectedParen\x10\r\x12\x13\n" +
"\x0fErrNestingDepth\x10\x0e\x12\f\n" +
"\bErrLarge\x10\x0f*\xb5\x01\n" +
"\tFlagsEnum\x12\f\n" +
"\bFoldCase\x10\x00\x12\v\n" +
"\aLiteral\x10\x01\x12\v\n" +
"\aClassNL\x10\x02\x12\t\n" +
"\x05DotNL\x10\x03\x12\v\n" +
"\aOneLine\x10\x04\x12\r\n" +
"\tNonGreedy\x10\x05\x12\t\n" +
"\x05PerlX\x10\x06\x12\x11\n" +
"\rUnicodeGroups\x10\a\x12\r\n" +
"\tWasDollar\x10\b\x12\n" +
"\n" +
"\x06Simple\x10\t\x12\v\n" +
"\aMatchNL\x10\n" +
"\x12\b\n" +
"\x04Perl\x10\v\x12\t\n" +
"\x05POSIX\x10\fB\x1aZ\x18./;fuzz_ng_regexp_syntaxb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 18)
var file_ngolofuzz_proto_goTypes = []any{
(InstOpEnum)(0), // 0: ngolofuzz.InstOpEnum
(OpEnum)(0), // 1: ngolofuzz.OpEnum
(ErrorCodeEnum)(0), // 2: ngolofuzz.ErrorCodeEnum
(FlagsEnum)(0), // 3: ngolofuzz.FlagsEnum
(*CompileArgs)(nil), // 4: ngolofuzz.CompileArgs
(*OpNgdotStringArgs)(nil), // 5: ngolofuzz.OpNgdotStringArgs
(*ErrorCodeNgdotStringArgs)(nil), // 6: ngolofuzz.ErrorCodeNgdotStringArgs
(*ParseArgs)(nil), // 7: ngolofuzz.ParseArgs
(*InstOpNgdotStringArgs)(nil), // 8: ngolofuzz.InstOpNgdotStringArgs
(*EmptyOpContextArgs)(nil), // 9: ngolofuzz.EmptyOpContextArgs
(*IsWordCharArgs)(nil), // 10: ngolofuzz.IsWordCharArgs
(*ProgNgdotStringArgs)(nil), // 11: ngolofuzz.ProgNgdotStringArgs
(*ProgNgdotPrefixArgs)(nil), // 12: ngolofuzz.ProgNgdotPrefixArgs
(*ProgNgdotStartCondArgs)(nil), // 13: ngolofuzz.ProgNgdotStartCondArgs
(*RegexpNgdotEqualArgs)(nil), // 14: ngolofuzz.RegexpNgdotEqualArgs
(*RegexpNgdotStringArgs)(nil), // 15: ngolofuzz.RegexpNgdotStringArgs
(*RegexpNgdotMaxCapArgs)(nil), // 16: ngolofuzz.RegexpNgdotMaxCapArgs
(*RegexpNgdotCapNamesArgs)(nil), // 17: ngolofuzz.RegexpNgdotCapNamesArgs
(*RegexpNgdotSimplifyArgs)(nil), // 18: ngolofuzz.RegexpNgdotSimplifyArgs
(*NgoloFuzzOne)(nil), // 19: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 20: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 21: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
1, // 0: ngolofuzz.OpNgdotStringArgs.i:type_name -> ngolofuzz.OpEnum
2, // 1: ngolofuzz.ErrorCodeNgdotStringArgs.e:type_name -> ngolofuzz.ErrorCodeEnum
3, // 2: ngolofuzz.ParseArgs.flags:type_name -> ngolofuzz.FlagsEnum
0, // 3: ngolofuzz.InstOpNgdotStringArgs.i:type_name -> ngolofuzz.InstOpEnum
4, // 4: ngolofuzz.NgoloFuzzOne.Compile:type_name -> ngolofuzz.CompileArgs
5, // 5: ngolofuzz.NgoloFuzzOne.OpNgdotString:type_name -> ngolofuzz.OpNgdotStringArgs
6, // 6: ngolofuzz.NgoloFuzzOne.ErrorCodeNgdotString:type_name -> ngolofuzz.ErrorCodeNgdotStringArgs
7, // 7: ngolofuzz.NgoloFuzzOne.Parse:type_name -> ngolofuzz.ParseArgs
8, // 8: ngolofuzz.NgoloFuzzOne.InstOpNgdotString:type_name -> ngolofuzz.InstOpNgdotStringArgs
9, // 9: ngolofuzz.NgoloFuzzOne.EmptyOpContext:type_name -> ngolofuzz.EmptyOpContextArgs
10, // 10: ngolofuzz.NgoloFuzzOne.IsWordChar:type_name -> ngolofuzz.IsWordCharArgs
11, // 11: ngolofuzz.NgoloFuzzOne.ProgNgdotString:type_name -> ngolofuzz.ProgNgdotStringArgs
12, // 12: ngolofuzz.NgoloFuzzOne.ProgNgdotPrefix:type_name -> ngolofuzz.ProgNgdotPrefixArgs
13, // 13: ngolofuzz.NgoloFuzzOne.ProgNgdotStartCond:type_name -> ngolofuzz.ProgNgdotStartCondArgs
14, // 14: ngolofuzz.NgoloFuzzOne.RegexpNgdotEqual:type_name -> ngolofuzz.RegexpNgdotEqualArgs
15, // 15: ngolofuzz.NgoloFuzzOne.RegexpNgdotString:type_name -> ngolofuzz.RegexpNgdotStringArgs
16, // 16: ngolofuzz.NgoloFuzzOne.RegexpNgdotMaxCap:type_name -> ngolofuzz.RegexpNgdotMaxCapArgs
17, // 17: ngolofuzz.NgoloFuzzOne.RegexpNgdotCapNames:type_name -> ngolofuzz.RegexpNgdotCapNamesArgs
18, // 18: ngolofuzz.NgoloFuzzOne.RegexpNgdotSimplify:type_name -> ngolofuzz.RegexpNgdotSimplifyArgs
19, // 19: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
20, // [20:20] is the sub-list for method output_type
20, // [20:20] is the sub-list for method input_type
20, // [20:20] is the sub-list for extension type_name
20, // [20:20] is the sub-list for extension extendee
0, // [0:20] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[15].OneofWrappers = []any{
(*NgoloFuzzOne_Compile)(nil),
(*NgoloFuzzOne_OpNgdotString)(nil),
(*NgoloFuzzOne_ErrorCodeNgdotString)(nil),
(*NgoloFuzzOne_Parse)(nil),
(*NgoloFuzzOne_InstOpNgdotString)(nil),
(*NgoloFuzzOne_EmptyOpContext)(nil),
(*NgoloFuzzOne_IsWordChar)(nil),
(*NgoloFuzzOne_ProgNgdotString)(nil),
(*NgoloFuzzOne_ProgNgdotPrefix)(nil),
(*NgoloFuzzOne_ProgNgdotStartCond)(nil),
(*NgoloFuzzOne_RegexpNgdotEqual)(nil),
(*NgoloFuzzOne_RegexpNgdotString)(nil),
(*NgoloFuzzOne_RegexpNgdotMaxCap)(nil),
(*NgoloFuzzOne_RegexpNgdotCapNames)(nil),
(*NgoloFuzzOne_RegexpNgdotSimplify)(nil),
}
file_ngolofuzz_proto_msgTypes[16].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 4,
NumMessages: 18,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
EnumInfos: file_ngolofuzz_proto_enumTypes,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_runtime
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func BlockProfileRecordNewFromFuzz(p *BlockProfileRecordStruct) *runtime.BlockProfileRecord{
if p == nil {
return nil
}
return &runtime.BlockProfileRecord{
Count: p.Count,
Cycles: p.Cycles,
}
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var FramesResults []*runtime.Frames
FramesResultsIndex := 0
var FuncResults []*runtime.Func
FuncResultsIndex := 0
var CleanupResults []*runtime.Cleanup
CleanupResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_SetCPUProfileRate:
arg0 := int(a.SetCPUProfileRate.Hz)
runtime.SetCPUProfileRate(arg0)
case *NgoloFuzzOne_CPUProfile:
runtime.CPUProfile()
case *NgoloFuzzOne_NumCPU:
runtime.NumCPU()
case *NgoloFuzzOne_NumCgoCall:
runtime.NumCgoCall()
case *NgoloFuzzOne_NumGoroutine:
runtime.NumGoroutine()
case *NgoloFuzzOne_Caller:
arg0 := int(a.Caller.Skip)
runtime.Caller(arg0)
case *NgoloFuzzOne_GOROOT:
runtime.GOROOT()
case *NgoloFuzzOne_Version:
runtime.Version()
case *NgoloFuzzOne_CleanupNgdotStop:
if len(CleanupResults) == 0 {
continue
}
arg0 := CleanupResults[CleanupResultsIndex]
CleanupResultsIndex = (CleanupResultsIndex + 1) % len(CleanupResults)
arg0.Stop()
case *NgoloFuzzOne_KeepAlive:
runtime.KeepAlive(a.KeepAlive.X)
case *NgoloFuzzOne_GC:
runtime.GC()
case *NgoloFuzzOne_SetBlockProfileRate:
arg0 := int(a.SetBlockProfileRate.Rate)
runtime.SetBlockProfileRate(arg0)
case *NgoloFuzzOne_SetMutexProfileFraction:
arg0 := int(a.SetMutexProfileFraction.Rate)
runtime.SetMutexProfileFraction(arg0)
case *NgoloFuzzOne_Stack:
runtime.Stack(a.Stack.Buf, a.Stack.All)
case *NgoloFuzzOne_Gosched:
runtime.Gosched()
case *NgoloFuzzOne_Breakpoint:
runtime.Breakpoint()
case *NgoloFuzzOne_LockOSThread:
runtime.LockOSThread()
case *NgoloFuzzOne_UnlockOSThread:
runtime.UnlockOSThread()
case *NgoloFuzzOne_FramesNgdotNext:
if len(FramesResults) == 0 {
continue
}
arg0 := FramesResults[FramesResultsIndex]
FramesResultsIndex = (FramesResultsIndex + 1) % len(FramesResults)
arg0.Next()
case *NgoloFuzzOne_FuncNgdotName:
if len(FuncResults) == 0 {
continue
}
arg0 := FuncResults[FuncResultsIndex]
FuncResultsIndex = (FuncResultsIndex + 1) % len(FuncResults)
arg0.Name()
case *NgoloFuzzOne_FuncNgdotEntry:
if len(FuncResults) == 0 {
continue
}
arg0 := FuncResults[FuncResultsIndex]
FuncResultsIndex = (FuncResultsIndex + 1) % len(FuncResults)
arg0.Entry()
case *NgoloFuzzOne_StartTrace:
r0 := runtime.StartTrace()
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
FramesNb := 0
FramesResultsIndex := 0
FuncNb := 0
FuncResultsIndex := 0
CleanupNb := 0
CleanupResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_SetCPUProfileRate:
w.WriteString(fmt.Sprintf("runtime.SetCPUProfileRate(int(%#+v))\n", a.SetCPUProfileRate.Hz))
case *NgoloFuzzOne_CPUProfile:
w.WriteString(fmt.Sprintf("runtime.CPUProfile()\n"))
case *NgoloFuzzOne_NumCPU:
w.WriteString(fmt.Sprintf("runtime.NumCPU()\n"))
case *NgoloFuzzOne_NumCgoCall:
w.WriteString(fmt.Sprintf("runtime.NumCgoCall()\n"))
case *NgoloFuzzOne_NumGoroutine:
w.WriteString(fmt.Sprintf("runtime.NumGoroutine()\n"))
case *NgoloFuzzOne_Caller:
w.WriteString(fmt.Sprintf("runtime.Caller(int(%#+v))\n", a.Caller.Skip))
case *NgoloFuzzOne_GOROOT:
w.WriteString(fmt.Sprintf("runtime.GOROOT()\n"))
case *NgoloFuzzOne_Version:
w.WriteString(fmt.Sprintf("runtime.Version()\n"))
case *NgoloFuzzOne_CleanupNgdotStop:
if CleanupNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Cleanup%d.Stop()\n", CleanupResultsIndex))
CleanupResultsIndex = (CleanupResultsIndex + 1) % CleanupNb
case *NgoloFuzzOne_KeepAlive:
w.WriteString(fmt.Sprintf("runtime.KeepAlive(%#+v)\n", a.KeepAlive.X))
case *NgoloFuzzOne_GC:
w.WriteString(fmt.Sprintf("runtime.GC()\n"))
case *NgoloFuzzOne_SetBlockProfileRate:
w.WriteString(fmt.Sprintf("runtime.SetBlockProfileRate(int(%#+v))\n", a.SetBlockProfileRate.Rate))
case *NgoloFuzzOne_SetMutexProfileFraction:
w.WriteString(fmt.Sprintf("runtime.SetMutexProfileFraction(int(%#+v))\n", a.SetMutexProfileFraction.Rate))
case *NgoloFuzzOne_Stack:
w.WriteString(fmt.Sprintf("runtime.Stack(%#+v, %#+v)\n", a.Stack.Buf, a.Stack.All))
case *NgoloFuzzOne_Gosched:
w.WriteString(fmt.Sprintf("runtime.Gosched()\n"))
case *NgoloFuzzOne_Breakpoint:
w.WriteString(fmt.Sprintf("runtime.Breakpoint()\n"))
case *NgoloFuzzOne_LockOSThread:
w.WriteString(fmt.Sprintf("runtime.LockOSThread()\n"))
case *NgoloFuzzOne_UnlockOSThread:
w.WriteString(fmt.Sprintf("runtime.UnlockOSThread()\n"))
case *NgoloFuzzOne_FramesNgdotNext:
if FramesNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Frames%d.Next()\n", FramesResultsIndex))
FramesResultsIndex = (FramesResultsIndex + 1) % FramesNb
case *NgoloFuzzOne_FuncNgdotName:
if FuncNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Func%d.Name()\n", FuncResultsIndex))
FuncResultsIndex = (FuncResultsIndex + 1) % FuncNb
case *NgoloFuzzOne_FuncNgdotEntry:
if FuncNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Func%d.Entry()\n", FuncResultsIndex))
FuncResultsIndex = (FuncResultsIndex + 1) % FuncNb
case *NgoloFuzzOne_StartTrace:
w.WriteString(fmt.Sprintf("runtime.StartTrace()\n"))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_runtime
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type BlockProfileRecordStruct struct {
state protoimpl.MessageState `protogen:"open.v1"`
Count int64 `protobuf:"varint,1,opt,name=Count,proto3" json:"Count,omitempty"`
Cycles int64 `protobuf:"varint,2,opt,name=Cycles,proto3" json:"Cycles,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BlockProfileRecordStruct) Reset() {
*x = BlockProfileRecordStruct{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BlockProfileRecordStruct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BlockProfileRecordStruct) ProtoMessage() {}
func (x *BlockProfileRecordStruct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BlockProfileRecordStruct.ProtoReflect.Descriptor instead.
func (*BlockProfileRecordStruct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *BlockProfileRecordStruct) GetCount() int64 {
if x != nil {
return x.Count
}
return 0
}
func (x *BlockProfileRecordStruct) GetCycles() int64 {
if x != nil {
return x.Cycles
}
return 0
}
type SetCPUProfileRateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Hz int64 `protobuf:"varint,1,opt,name=hz,proto3" json:"hz,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetCPUProfileRateArgs) Reset() {
*x = SetCPUProfileRateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetCPUProfileRateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetCPUProfileRateArgs) ProtoMessage() {}
func (x *SetCPUProfileRateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetCPUProfileRateArgs.ProtoReflect.Descriptor instead.
func (*SetCPUProfileRateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *SetCPUProfileRateArgs) GetHz() int64 {
if x != nil {
return x.Hz
}
return 0
}
type CPUProfileArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CPUProfileArgs) Reset() {
*x = CPUProfileArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CPUProfileArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CPUProfileArgs) ProtoMessage() {}
func (x *CPUProfileArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CPUProfileArgs.ProtoReflect.Descriptor instead.
func (*CPUProfileArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type NumCPUArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NumCPUArgs) Reset() {
*x = NumCPUArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NumCPUArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NumCPUArgs) ProtoMessage() {}
func (x *NumCPUArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NumCPUArgs.ProtoReflect.Descriptor instead.
func (*NumCPUArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type NumCgoCallArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NumCgoCallArgs) Reset() {
*x = NumCgoCallArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NumCgoCallArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NumCgoCallArgs) ProtoMessage() {}
func (x *NumCgoCallArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NumCgoCallArgs.ProtoReflect.Descriptor instead.
func (*NumCgoCallArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type NumGoroutineArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NumGoroutineArgs) Reset() {
*x = NumGoroutineArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NumGoroutineArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NumGoroutineArgs) ProtoMessage() {}
func (x *NumGoroutineArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NumGoroutineArgs.ProtoReflect.Descriptor instead.
func (*NumGoroutineArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type CallerArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Skip int64 `protobuf:"varint,1,opt,name=skip,proto3" json:"skip,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CallerArgs) Reset() {
*x = CallerArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CallerArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CallerArgs) ProtoMessage() {}
func (x *CallerArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CallerArgs.ProtoReflect.Descriptor instead.
func (*CallerArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *CallerArgs) GetSkip() int64 {
if x != nil {
return x.Skip
}
return 0
}
type GOROOTArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GOROOTArgs) Reset() {
*x = GOROOTArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GOROOTArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GOROOTArgs) ProtoMessage() {}
func (x *GOROOTArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GOROOTArgs.ProtoReflect.Descriptor instead.
func (*GOROOTArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type VersionArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *VersionArgs) Reset() {
*x = VersionArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *VersionArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*VersionArgs) ProtoMessage() {}
func (x *VersionArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use VersionArgs.ProtoReflect.Descriptor instead.
func (*VersionArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type CleanupNgdotStopArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CleanupNgdotStopArgs) Reset() {
*x = CleanupNgdotStopArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CleanupNgdotStopArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CleanupNgdotStopArgs) ProtoMessage() {}
func (x *CleanupNgdotStopArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CleanupNgdotStopArgs.ProtoReflect.Descriptor instead.
func (*CleanupNgdotStopArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type KeepAliveArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
X *NgoloFuzzAny `protobuf:"bytes,1,opt,name=x,proto3" json:"x,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *KeepAliveArgs) Reset() {
*x = KeepAliveArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *KeepAliveArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*KeepAliveArgs) ProtoMessage() {}
func (x *KeepAliveArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use KeepAliveArgs.ProtoReflect.Descriptor instead.
func (*KeepAliveArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *KeepAliveArgs) GetX() *NgoloFuzzAny {
if x != nil {
return x.X
}
return nil
}
type GCArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GCArgs) Reset() {
*x = GCArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GCArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GCArgs) ProtoMessage() {}
func (x *GCArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GCArgs.ProtoReflect.Descriptor instead.
func (*GCArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type SetBlockProfileRateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Rate int64 `protobuf:"varint,1,opt,name=rate,proto3" json:"rate,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetBlockProfileRateArgs) Reset() {
*x = SetBlockProfileRateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetBlockProfileRateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetBlockProfileRateArgs) ProtoMessage() {}
func (x *SetBlockProfileRateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetBlockProfileRateArgs.ProtoReflect.Descriptor instead.
func (*SetBlockProfileRateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *SetBlockProfileRateArgs) GetRate() int64 {
if x != nil {
return x.Rate
}
return 0
}
type SetMutexProfileFractionArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Rate int64 `protobuf:"varint,1,opt,name=rate,proto3" json:"rate,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetMutexProfileFractionArgs) Reset() {
*x = SetMutexProfileFractionArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetMutexProfileFractionArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetMutexProfileFractionArgs) ProtoMessage() {}
func (x *SetMutexProfileFractionArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetMutexProfileFractionArgs.ProtoReflect.Descriptor instead.
func (*SetMutexProfileFractionArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *SetMutexProfileFractionArgs) GetRate() int64 {
if x != nil {
return x.Rate
}
return 0
}
type StackArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
All bool `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StackArgs) Reset() {
*x = StackArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StackArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StackArgs) ProtoMessage() {}
func (x *StackArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StackArgs.ProtoReflect.Descriptor instead.
func (*StackArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *StackArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
func (x *StackArgs) GetAll() bool {
if x != nil {
return x.All
}
return false
}
type GoschedArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GoschedArgs) Reset() {
*x = GoschedArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GoschedArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GoschedArgs) ProtoMessage() {}
func (x *GoschedArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GoschedArgs.ProtoReflect.Descriptor instead.
func (*GoschedArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
type BreakpointArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BreakpointArgs) Reset() {
*x = BreakpointArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BreakpointArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BreakpointArgs) ProtoMessage() {}
func (x *BreakpointArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BreakpointArgs.ProtoReflect.Descriptor instead.
func (*BreakpointArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
type LockOSThreadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LockOSThreadArgs) Reset() {
*x = LockOSThreadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LockOSThreadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LockOSThreadArgs) ProtoMessage() {}
func (x *LockOSThreadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LockOSThreadArgs.ProtoReflect.Descriptor instead.
func (*LockOSThreadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
type UnlockOSThreadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnlockOSThreadArgs) Reset() {
*x = UnlockOSThreadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnlockOSThreadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnlockOSThreadArgs) ProtoMessage() {}
func (x *UnlockOSThreadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnlockOSThreadArgs.ProtoReflect.Descriptor instead.
func (*UnlockOSThreadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
type FramesNgdotNextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FramesNgdotNextArgs) Reset() {
*x = FramesNgdotNextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FramesNgdotNextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FramesNgdotNextArgs) ProtoMessage() {}
func (x *FramesNgdotNextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FramesNgdotNextArgs.ProtoReflect.Descriptor instead.
func (*FramesNgdotNextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
type FuncNgdotNameArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FuncNgdotNameArgs) Reset() {
*x = FuncNgdotNameArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FuncNgdotNameArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FuncNgdotNameArgs) ProtoMessage() {}
func (x *FuncNgdotNameArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FuncNgdotNameArgs.ProtoReflect.Descriptor instead.
func (*FuncNgdotNameArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
type FuncNgdotEntryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FuncNgdotEntryArgs) Reset() {
*x = FuncNgdotEntryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FuncNgdotEntryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FuncNgdotEntryArgs) ProtoMessage() {}
func (x *FuncNgdotEntryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FuncNgdotEntryArgs.ProtoReflect.Descriptor instead.
func (*FuncNgdotEntryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
type StartTraceArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StartTraceArgs) Reset() {
*x = StartTraceArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StartTraceArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StartTraceArgs) ProtoMessage() {}
func (x *StartTraceArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StartTraceArgs.ProtoReflect.Descriptor instead.
func (*StartTraceArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_SetCPUProfileRate
// *NgoloFuzzOne_CPUProfile
// *NgoloFuzzOne_NumCPU
// *NgoloFuzzOne_NumCgoCall
// *NgoloFuzzOne_NumGoroutine
// *NgoloFuzzOne_Caller
// *NgoloFuzzOne_GOROOT
// *NgoloFuzzOne_Version
// *NgoloFuzzOne_CleanupNgdotStop
// *NgoloFuzzOne_KeepAlive
// *NgoloFuzzOne_GC
// *NgoloFuzzOne_SetBlockProfileRate
// *NgoloFuzzOne_SetMutexProfileFraction
// *NgoloFuzzOne_Stack
// *NgoloFuzzOne_Gosched
// *NgoloFuzzOne_Breakpoint
// *NgoloFuzzOne_LockOSThread
// *NgoloFuzzOne_UnlockOSThread
// *NgoloFuzzOne_FramesNgdotNext
// *NgoloFuzzOne_FuncNgdotName
// *NgoloFuzzOne_FuncNgdotEntry
// *NgoloFuzzOne_StartTrace
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetSetCPUProfileRate() *SetCPUProfileRateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SetCPUProfileRate); ok {
return x.SetCPUProfileRate
}
}
return nil
}
func (x *NgoloFuzzOne) GetCPUProfile() *CPUProfileArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CPUProfile); ok {
return x.CPUProfile
}
}
return nil
}
func (x *NgoloFuzzOne) GetNumCPU() *NumCPUArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NumCPU); ok {
return x.NumCPU
}
}
return nil
}
func (x *NgoloFuzzOne) GetNumCgoCall() *NumCgoCallArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NumCgoCall); ok {
return x.NumCgoCall
}
}
return nil
}
func (x *NgoloFuzzOne) GetNumGoroutine() *NumGoroutineArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NumGoroutine); ok {
return x.NumGoroutine
}
}
return nil
}
func (x *NgoloFuzzOne) GetCaller() *CallerArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Caller); ok {
return x.Caller
}
}
return nil
}
func (x *NgoloFuzzOne) GetGOROOT() *GOROOTArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GOROOT); ok {
return x.GOROOT
}
}
return nil
}
func (x *NgoloFuzzOne) GetVersion() *VersionArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Version); ok {
return x.Version
}
}
return nil
}
func (x *NgoloFuzzOne) GetCleanupNgdotStop() *CleanupNgdotStopArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CleanupNgdotStop); ok {
return x.CleanupNgdotStop
}
}
return nil
}
func (x *NgoloFuzzOne) GetKeepAlive() *KeepAliveArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_KeepAlive); ok {
return x.KeepAlive
}
}
return nil
}
func (x *NgoloFuzzOne) GetGC() *GCArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_GC); ok {
return x.GC
}
}
return nil
}
func (x *NgoloFuzzOne) GetSetBlockProfileRate() *SetBlockProfileRateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SetBlockProfileRate); ok {
return x.SetBlockProfileRate
}
}
return nil
}
func (x *NgoloFuzzOne) GetSetMutexProfileFraction() *SetMutexProfileFractionArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SetMutexProfileFraction); ok {
return x.SetMutexProfileFraction
}
}
return nil
}
func (x *NgoloFuzzOne) GetStack() *StackArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Stack); ok {
return x.Stack
}
}
return nil
}
func (x *NgoloFuzzOne) GetGosched() *GoschedArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Gosched); ok {
return x.Gosched
}
}
return nil
}
func (x *NgoloFuzzOne) GetBreakpoint() *BreakpointArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Breakpoint); ok {
return x.Breakpoint
}
}
return nil
}
func (x *NgoloFuzzOne) GetLockOSThread() *LockOSThreadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LockOSThread); ok {
return x.LockOSThread
}
}
return nil
}
func (x *NgoloFuzzOne) GetUnlockOSThread() *UnlockOSThreadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UnlockOSThread); ok {
return x.UnlockOSThread
}
}
return nil
}
func (x *NgoloFuzzOne) GetFramesNgdotNext() *FramesNgdotNextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FramesNgdotNext); ok {
return x.FramesNgdotNext
}
}
return nil
}
func (x *NgoloFuzzOne) GetFuncNgdotName() *FuncNgdotNameArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FuncNgdotName); ok {
return x.FuncNgdotName
}
}
return nil
}
func (x *NgoloFuzzOne) GetFuncNgdotEntry() *FuncNgdotEntryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FuncNgdotEntry); ok {
return x.FuncNgdotEntry
}
}
return nil
}
func (x *NgoloFuzzOne) GetStartTrace() *StartTraceArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_StartTrace); ok {
return x.StartTrace
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_SetCPUProfileRate struct {
SetCPUProfileRate *SetCPUProfileRateArgs `protobuf:"bytes,1,opt,name=SetCPUProfileRate,proto3,oneof"`
}
type NgoloFuzzOne_CPUProfile struct {
CPUProfile *CPUProfileArgs `protobuf:"bytes,2,opt,name=CPUProfile,proto3,oneof"`
}
type NgoloFuzzOne_NumCPU struct {
NumCPU *NumCPUArgs `protobuf:"bytes,3,opt,name=NumCPU,proto3,oneof"`
}
type NgoloFuzzOne_NumCgoCall struct {
NumCgoCall *NumCgoCallArgs `protobuf:"bytes,4,opt,name=NumCgoCall,proto3,oneof"`
}
type NgoloFuzzOne_NumGoroutine struct {
NumGoroutine *NumGoroutineArgs `protobuf:"bytes,5,opt,name=NumGoroutine,proto3,oneof"`
}
type NgoloFuzzOne_Caller struct {
Caller *CallerArgs `protobuf:"bytes,6,opt,name=Caller,proto3,oneof"`
}
type NgoloFuzzOne_GOROOT struct {
GOROOT *GOROOTArgs `protobuf:"bytes,7,opt,name=GOROOT,proto3,oneof"`
}
type NgoloFuzzOne_Version struct {
Version *VersionArgs `protobuf:"bytes,8,opt,name=Version,proto3,oneof"`
}
type NgoloFuzzOne_CleanupNgdotStop struct {
CleanupNgdotStop *CleanupNgdotStopArgs `protobuf:"bytes,9,opt,name=CleanupNgdotStop,proto3,oneof"`
}
type NgoloFuzzOne_KeepAlive struct {
KeepAlive *KeepAliveArgs `protobuf:"bytes,10,opt,name=KeepAlive,proto3,oneof"`
}
type NgoloFuzzOne_GC struct {
GC *GCArgs `protobuf:"bytes,11,opt,name=GC,proto3,oneof"`
}
type NgoloFuzzOne_SetBlockProfileRate struct {
SetBlockProfileRate *SetBlockProfileRateArgs `protobuf:"bytes,12,opt,name=SetBlockProfileRate,proto3,oneof"`
}
type NgoloFuzzOne_SetMutexProfileFraction struct {
SetMutexProfileFraction *SetMutexProfileFractionArgs `protobuf:"bytes,13,opt,name=SetMutexProfileFraction,proto3,oneof"`
}
type NgoloFuzzOne_Stack struct {
Stack *StackArgs `protobuf:"bytes,14,opt,name=Stack,proto3,oneof"`
}
type NgoloFuzzOne_Gosched struct {
Gosched *GoschedArgs `protobuf:"bytes,15,opt,name=Gosched,proto3,oneof"`
}
type NgoloFuzzOne_Breakpoint struct {
Breakpoint *BreakpointArgs `protobuf:"bytes,16,opt,name=Breakpoint,proto3,oneof"`
}
type NgoloFuzzOne_LockOSThread struct {
LockOSThread *LockOSThreadArgs `protobuf:"bytes,17,opt,name=LockOSThread,proto3,oneof"`
}
type NgoloFuzzOne_UnlockOSThread struct {
UnlockOSThread *UnlockOSThreadArgs `protobuf:"bytes,18,opt,name=UnlockOSThread,proto3,oneof"`
}
type NgoloFuzzOne_FramesNgdotNext struct {
FramesNgdotNext *FramesNgdotNextArgs `protobuf:"bytes,19,opt,name=FramesNgdotNext,proto3,oneof"`
}
type NgoloFuzzOne_FuncNgdotName struct {
FuncNgdotName *FuncNgdotNameArgs `protobuf:"bytes,20,opt,name=FuncNgdotName,proto3,oneof"`
}
type NgoloFuzzOne_FuncNgdotEntry struct {
FuncNgdotEntry *FuncNgdotEntryArgs `protobuf:"bytes,21,opt,name=FuncNgdotEntry,proto3,oneof"`
}
type NgoloFuzzOne_StartTrace struct {
StartTrace *StartTraceArgs `protobuf:"bytes,22,opt,name=StartTrace,proto3,oneof"`
}
func (*NgoloFuzzOne_SetCPUProfileRate) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CPUProfile) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NumCPU) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NumCgoCall) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NumGoroutine) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Caller) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GOROOT) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Version) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CleanupNgdotStop) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_KeepAlive) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_GC) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SetBlockProfileRate) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SetMutexProfileFraction) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Stack) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Gosched) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Breakpoint) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LockOSThread) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UnlockOSThread) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FramesNgdotNext) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FuncNgdotName) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FuncNgdotEntry) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_StartTrace) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"H\n" +
"\x18BlockProfileRecordStruct\x12\x14\n" +
"\x05Count\x18\x01 \x01(\x03R\x05Count\x12\x16\n" +
"\x06Cycles\x18\x02 \x01(\x03R\x06Cycles\"'\n" +
"\x15SetCPUProfileRateArgs\x12\x0e\n" +
"\x02hz\x18\x01 \x01(\x03R\x02hz\"\x10\n" +
"\x0eCPUProfileArgs\"\f\n" +
"\n" +
"NumCPUArgs\"\x10\n" +
"\x0eNumCgoCallArgs\"\x12\n" +
"\x10NumGoroutineArgs\" \n" +
"\n" +
"CallerArgs\x12\x12\n" +
"\x04skip\x18\x01 \x01(\x03R\x04skip\"\f\n" +
"\n" +
"GOROOTArgs\"\r\n" +
"\vVersionArgs\"\x16\n" +
"\x14CleanupNgdotStopArgs\"6\n" +
"\rKeepAliveArgs\x12%\n" +
"\x01x\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01x\"\b\n" +
"\x06GCArgs\"-\n" +
"\x17SetBlockProfileRateArgs\x12\x12\n" +
"\x04rate\x18\x01 \x01(\x03R\x04rate\"1\n" +
"\x1bSetMutexProfileFractionArgs\x12\x12\n" +
"\x04rate\x18\x01 \x01(\x03R\x04rate\"/\n" +
"\tStackArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\x12\x10\n" +
"\x03all\x18\x02 \x01(\bR\x03all\"\r\n" +
"\vGoschedArgs\"\x10\n" +
"\x0eBreakpointArgs\"\x12\n" +
"\x10LockOSThreadArgs\"\x14\n" +
"\x12UnlockOSThreadArgs\"\x15\n" +
"\x13FramesNgdotNextArgs\"\x13\n" +
"\x11FuncNgdotNameArgs\"\x14\n" +
"\x12FuncNgdotEntryArgs\"\x10\n" +
"\x0eStartTraceArgs\"\x99\v\n" +
"\fNgoloFuzzOne\x12P\n" +
"\x11SetCPUProfileRate\x18\x01 \x01(\v2 .ngolofuzz.SetCPUProfileRateArgsH\x00R\x11SetCPUProfileRate\x12;\n" +
"\n" +
"CPUProfile\x18\x02 \x01(\v2\x19.ngolofuzz.CPUProfileArgsH\x00R\n" +
"CPUProfile\x12/\n" +
"\x06NumCPU\x18\x03 \x01(\v2\x15.ngolofuzz.NumCPUArgsH\x00R\x06NumCPU\x12;\n" +
"\n" +
"NumCgoCall\x18\x04 \x01(\v2\x19.ngolofuzz.NumCgoCallArgsH\x00R\n" +
"NumCgoCall\x12A\n" +
"\fNumGoroutine\x18\x05 \x01(\v2\x1b.ngolofuzz.NumGoroutineArgsH\x00R\fNumGoroutine\x12/\n" +
"\x06Caller\x18\x06 \x01(\v2\x15.ngolofuzz.CallerArgsH\x00R\x06Caller\x12/\n" +
"\x06GOROOT\x18\a \x01(\v2\x15.ngolofuzz.GOROOTArgsH\x00R\x06GOROOT\x122\n" +
"\aVersion\x18\b \x01(\v2\x16.ngolofuzz.VersionArgsH\x00R\aVersion\x12M\n" +
"\x10CleanupNgdotStop\x18\t \x01(\v2\x1f.ngolofuzz.CleanupNgdotStopArgsH\x00R\x10CleanupNgdotStop\x128\n" +
"\tKeepAlive\x18\n" +
" \x01(\v2\x18.ngolofuzz.KeepAliveArgsH\x00R\tKeepAlive\x12#\n" +
"\x02GC\x18\v \x01(\v2\x11.ngolofuzz.GCArgsH\x00R\x02GC\x12V\n" +
"\x13SetBlockProfileRate\x18\f \x01(\v2\".ngolofuzz.SetBlockProfileRateArgsH\x00R\x13SetBlockProfileRate\x12b\n" +
"\x17SetMutexProfileFraction\x18\r \x01(\v2&.ngolofuzz.SetMutexProfileFractionArgsH\x00R\x17SetMutexProfileFraction\x12,\n" +
"\x05Stack\x18\x0e \x01(\v2\x14.ngolofuzz.StackArgsH\x00R\x05Stack\x122\n" +
"\aGosched\x18\x0f \x01(\v2\x16.ngolofuzz.GoschedArgsH\x00R\aGosched\x12;\n" +
"\n" +
"Breakpoint\x18\x10 \x01(\v2\x19.ngolofuzz.BreakpointArgsH\x00R\n" +
"Breakpoint\x12A\n" +
"\fLockOSThread\x18\x11 \x01(\v2\x1b.ngolofuzz.LockOSThreadArgsH\x00R\fLockOSThread\x12G\n" +
"\x0eUnlockOSThread\x18\x12 \x01(\v2\x1d.ngolofuzz.UnlockOSThreadArgsH\x00R\x0eUnlockOSThread\x12J\n" +
"\x0fFramesNgdotNext\x18\x13 \x01(\v2\x1e.ngolofuzz.FramesNgdotNextArgsH\x00R\x0fFramesNgdotNext\x12D\n" +
"\rFuncNgdotName\x18\x14 \x01(\v2\x1c.ngolofuzz.FuncNgdotNameArgsH\x00R\rFuncNgdotName\x12G\n" +
"\x0eFuncNgdotEntry\x18\x15 \x01(\v2\x1d.ngolofuzz.FuncNgdotEntryArgsH\x00R\x0eFuncNgdotEntry\x12;\n" +
"\n" +
"StartTrace\x18\x16 \x01(\v2\x19.ngolofuzz.StartTraceArgsH\x00R\n" +
"StartTraceB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x14Z\x12./;fuzz_ng_runtimeb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 26)
var file_ngolofuzz_proto_goTypes = []any{
(*BlockProfileRecordStruct)(nil), // 0: ngolofuzz.BlockProfileRecordStruct
(*SetCPUProfileRateArgs)(nil), // 1: ngolofuzz.SetCPUProfileRateArgs
(*CPUProfileArgs)(nil), // 2: ngolofuzz.CPUProfileArgs
(*NumCPUArgs)(nil), // 3: ngolofuzz.NumCPUArgs
(*NumCgoCallArgs)(nil), // 4: ngolofuzz.NumCgoCallArgs
(*NumGoroutineArgs)(nil), // 5: ngolofuzz.NumGoroutineArgs
(*CallerArgs)(nil), // 6: ngolofuzz.CallerArgs
(*GOROOTArgs)(nil), // 7: ngolofuzz.GOROOTArgs
(*VersionArgs)(nil), // 8: ngolofuzz.VersionArgs
(*CleanupNgdotStopArgs)(nil), // 9: ngolofuzz.CleanupNgdotStopArgs
(*KeepAliveArgs)(nil), // 10: ngolofuzz.KeepAliveArgs
(*GCArgs)(nil), // 11: ngolofuzz.GCArgs
(*SetBlockProfileRateArgs)(nil), // 12: ngolofuzz.SetBlockProfileRateArgs
(*SetMutexProfileFractionArgs)(nil), // 13: ngolofuzz.SetMutexProfileFractionArgs
(*StackArgs)(nil), // 14: ngolofuzz.StackArgs
(*GoschedArgs)(nil), // 15: ngolofuzz.GoschedArgs
(*BreakpointArgs)(nil), // 16: ngolofuzz.BreakpointArgs
(*LockOSThreadArgs)(nil), // 17: ngolofuzz.LockOSThreadArgs
(*UnlockOSThreadArgs)(nil), // 18: ngolofuzz.UnlockOSThreadArgs
(*FramesNgdotNextArgs)(nil), // 19: ngolofuzz.FramesNgdotNextArgs
(*FuncNgdotNameArgs)(nil), // 20: ngolofuzz.FuncNgdotNameArgs
(*FuncNgdotEntryArgs)(nil), // 21: ngolofuzz.FuncNgdotEntryArgs
(*StartTraceArgs)(nil), // 22: ngolofuzz.StartTraceArgs
(*NgoloFuzzOne)(nil), // 23: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 24: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 25: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
24, // 0: ngolofuzz.KeepAliveArgs.x:type_name -> ngolofuzz.NgoloFuzzAny
1, // 1: ngolofuzz.NgoloFuzzOne.SetCPUProfileRate:type_name -> ngolofuzz.SetCPUProfileRateArgs
2, // 2: ngolofuzz.NgoloFuzzOne.CPUProfile:type_name -> ngolofuzz.CPUProfileArgs
3, // 3: ngolofuzz.NgoloFuzzOne.NumCPU:type_name -> ngolofuzz.NumCPUArgs
4, // 4: ngolofuzz.NgoloFuzzOne.NumCgoCall:type_name -> ngolofuzz.NumCgoCallArgs
5, // 5: ngolofuzz.NgoloFuzzOne.NumGoroutine:type_name -> ngolofuzz.NumGoroutineArgs
6, // 6: ngolofuzz.NgoloFuzzOne.Caller:type_name -> ngolofuzz.CallerArgs
7, // 7: ngolofuzz.NgoloFuzzOne.GOROOT:type_name -> ngolofuzz.GOROOTArgs
8, // 8: ngolofuzz.NgoloFuzzOne.Version:type_name -> ngolofuzz.VersionArgs
9, // 9: ngolofuzz.NgoloFuzzOne.CleanupNgdotStop:type_name -> ngolofuzz.CleanupNgdotStopArgs
10, // 10: ngolofuzz.NgoloFuzzOne.KeepAlive:type_name -> ngolofuzz.KeepAliveArgs
11, // 11: ngolofuzz.NgoloFuzzOne.GC:type_name -> ngolofuzz.GCArgs
12, // 12: ngolofuzz.NgoloFuzzOne.SetBlockProfileRate:type_name -> ngolofuzz.SetBlockProfileRateArgs
13, // 13: ngolofuzz.NgoloFuzzOne.SetMutexProfileFraction:type_name -> ngolofuzz.SetMutexProfileFractionArgs
14, // 14: ngolofuzz.NgoloFuzzOne.Stack:type_name -> ngolofuzz.StackArgs
15, // 15: ngolofuzz.NgoloFuzzOne.Gosched:type_name -> ngolofuzz.GoschedArgs
16, // 16: ngolofuzz.NgoloFuzzOne.Breakpoint:type_name -> ngolofuzz.BreakpointArgs
17, // 17: ngolofuzz.NgoloFuzzOne.LockOSThread:type_name -> ngolofuzz.LockOSThreadArgs
18, // 18: ngolofuzz.NgoloFuzzOne.UnlockOSThread:type_name -> ngolofuzz.UnlockOSThreadArgs
19, // 19: ngolofuzz.NgoloFuzzOne.FramesNgdotNext:type_name -> ngolofuzz.FramesNgdotNextArgs
20, // 20: ngolofuzz.NgoloFuzzOne.FuncNgdotName:type_name -> ngolofuzz.FuncNgdotNameArgs
21, // 21: ngolofuzz.NgoloFuzzOne.FuncNgdotEntry:type_name -> ngolofuzz.FuncNgdotEntryArgs
22, // 22: ngolofuzz.NgoloFuzzOne.StartTrace:type_name -> ngolofuzz.StartTraceArgs
23, // 23: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
24, // [24:24] is the sub-list for method output_type
24, // [24:24] is the sub-list for method input_type
24, // [24:24] is the sub-list for extension type_name
24, // [24:24] is the sub-list for extension extendee
0, // [0:24] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[23].OneofWrappers = []any{
(*NgoloFuzzOne_SetCPUProfileRate)(nil),
(*NgoloFuzzOne_CPUProfile)(nil),
(*NgoloFuzzOne_NumCPU)(nil),
(*NgoloFuzzOne_NumCgoCall)(nil),
(*NgoloFuzzOne_NumGoroutine)(nil),
(*NgoloFuzzOne_Caller)(nil),
(*NgoloFuzzOne_GOROOT)(nil),
(*NgoloFuzzOne_Version)(nil),
(*NgoloFuzzOne_CleanupNgdotStop)(nil),
(*NgoloFuzzOne_KeepAlive)(nil),
(*NgoloFuzzOne_GC)(nil),
(*NgoloFuzzOne_SetBlockProfileRate)(nil),
(*NgoloFuzzOne_SetMutexProfileFraction)(nil),
(*NgoloFuzzOne_Stack)(nil),
(*NgoloFuzzOne_Gosched)(nil),
(*NgoloFuzzOne_Breakpoint)(nil),
(*NgoloFuzzOne_LockOSThread)(nil),
(*NgoloFuzzOne_UnlockOSThread)(nil),
(*NgoloFuzzOne_FramesNgdotNext)(nil),
(*NgoloFuzzOne_FuncNgdotName)(nil),
(*NgoloFuzzOne_FuncNgdotEntry)(nil),
(*NgoloFuzzOne_StartTrace)(nil),
}
file_ngolofuzz_proto_msgTypes[24].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 26,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_runtime_cgo
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"runtime/cgo"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var HandleResults []*cgo.Handle
HandleResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewHandle:
r0 := cgo.NewHandle(a.NewHandle.V)
HandleResults = append(HandleResults, &r0)
case *NgoloFuzzOne_HandleNgdotValue:
if len(HandleResults) == 0 {
continue
}
arg0 := HandleResults[HandleResultsIndex]
HandleResultsIndex = (HandleResultsIndex + 1) % len(HandleResults)
arg0.Value()
case *NgoloFuzzOne_HandleNgdotDelete:
if len(HandleResults) == 0 {
continue
}
arg0 := HandleResults[HandleResultsIndex]
HandleResultsIndex = (HandleResultsIndex + 1) % len(HandleResults)
arg0.Delete()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
HandleNb := 0
HandleResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewHandle:
w.WriteString(fmt.Sprintf("Handle%d := cgo.NewHandle(%#+v)\n", HandleNb, a.NewHandle.V))
HandleNb = HandleNb + 1
case *NgoloFuzzOne_HandleNgdotValue:
if HandleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Handle%d.Value()\n", HandleResultsIndex))
HandleResultsIndex = (HandleResultsIndex + 1) % HandleNb
case *NgoloFuzzOne_HandleNgdotDelete:
if HandleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Handle%d.Delete()\n", HandleResultsIndex))
HandleResultsIndex = (HandleResultsIndex + 1) % HandleNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_runtime_cgo
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewHandleArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
V *NgoloFuzzAny `protobuf:"bytes,1,opt,name=v,proto3" json:"v,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewHandleArgs) Reset() {
*x = NewHandleArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewHandleArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewHandleArgs) ProtoMessage() {}
func (x *NewHandleArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewHandleArgs.ProtoReflect.Descriptor instead.
func (*NewHandleArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewHandleArgs) GetV() *NgoloFuzzAny {
if x != nil {
return x.V
}
return nil
}
type HandleNgdotValueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HandleNgdotValueArgs) Reset() {
*x = HandleNgdotValueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HandleNgdotValueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HandleNgdotValueArgs) ProtoMessage() {}
func (x *HandleNgdotValueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HandleNgdotValueArgs.ProtoReflect.Descriptor instead.
func (*HandleNgdotValueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type HandleNgdotDeleteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HandleNgdotDeleteArgs) Reset() {
*x = HandleNgdotDeleteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HandleNgdotDeleteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HandleNgdotDeleteArgs) ProtoMessage() {}
func (x *HandleNgdotDeleteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HandleNgdotDeleteArgs.ProtoReflect.Descriptor instead.
func (*HandleNgdotDeleteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewHandle
// *NgoloFuzzOne_HandleNgdotValue
// *NgoloFuzzOne_HandleNgdotDelete
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewHandle() *NewHandleArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewHandle); ok {
return x.NewHandle
}
}
return nil
}
func (x *NgoloFuzzOne) GetHandleNgdotValue() *HandleNgdotValueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_HandleNgdotValue); ok {
return x.HandleNgdotValue
}
}
return nil
}
func (x *NgoloFuzzOne) GetHandleNgdotDelete() *HandleNgdotDeleteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_HandleNgdotDelete); ok {
return x.HandleNgdotDelete
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewHandle struct {
NewHandle *NewHandleArgs `protobuf:"bytes,1,opt,name=NewHandle,proto3,oneof"`
}
type NgoloFuzzOne_HandleNgdotValue struct {
HandleNgdotValue *HandleNgdotValueArgs `protobuf:"bytes,2,opt,name=HandleNgdotValue,proto3,oneof"`
}
type NgoloFuzzOne_HandleNgdotDelete struct {
HandleNgdotDelete *HandleNgdotDeleteArgs `protobuf:"bytes,3,opt,name=HandleNgdotDelete,proto3,oneof"`
}
func (*NgoloFuzzOne_NewHandle) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_HandleNgdotValue) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_HandleNgdotDelete) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"6\n" +
"\rNewHandleArgs\x12%\n" +
"\x01v\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x01v\"\x16\n" +
"\x14HandleNgdotValueArgs\"\x17\n" +
"\x15HandleNgdotDeleteArgs\"\xf1\x01\n" +
"\fNgoloFuzzOne\x128\n" +
"\tNewHandle\x18\x01 \x01(\v2\x18.ngolofuzz.NewHandleArgsH\x00R\tNewHandle\x12M\n" +
"\x10HandleNgdotValue\x18\x02 \x01(\v2\x1f.ngolofuzz.HandleNgdotValueArgsH\x00R\x10HandleNgdotValue\x12P\n" +
"\x11HandleNgdotDelete\x18\x03 \x01(\v2 .ngolofuzz.HandleNgdotDeleteArgsH\x00R\x11HandleNgdotDeleteB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x18Z\x16./;fuzz_ng_runtime_cgob\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_ngolofuzz_proto_goTypes = []any{
(*NewHandleArgs)(nil), // 0: ngolofuzz.NewHandleArgs
(*HandleNgdotValueArgs)(nil), // 1: ngolofuzz.HandleNgdotValueArgs
(*HandleNgdotDeleteArgs)(nil), // 2: ngolofuzz.HandleNgdotDeleteArgs
(*NgoloFuzzOne)(nil), // 3: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 4: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 5: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
4, // 0: ngolofuzz.NewHandleArgs.v:type_name -> ngolofuzz.NgoloFuzzAny
0, // 1: ngolofuzz.NgoloFuzzOne.NewHandle:type_name -> ngolofuzz.NewHandleArgs
1, // 2: ngolofuzz.NgoloFuzzOne.HandleNgdotValue:type_name -> ngolofuzz.HandleNgdotValueArgs
2, // 3: ngolofuzz.NgoloFuzzOne.HandleNgdotDelete:type_name -> ngolofuzz.HandleNgdotDeleteArgs
3, // 4: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
5, // [5:5] is the sub-list for method output_type
5, // [5:5] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[3].OneofWrappers = []any{
(*NgoloFuzzOne_NewHandle)(nil),
(*NgoloFuzzOne_HandleNgdotValue)(nil),
(*NgoloFuzzOne_HandleNgdotDelete)(nil),
}
file_ngolofuzz_proto_msgTypes[4].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_runtime_coverage
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"runtime/coverage"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_WriteMetaDir:
r0 := coverage.WriteMetaDir(a.WriteMetaDir.Dir)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriteMeta:
arg0 := bytes.NewBuffer(a.WriteMeta.W)
r0 := coverage.WriteMeta(arg0)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriteCountersDir:
r0 := coverage.WriteCountersDir(a.WriteCountersDir.Dir)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriteCounters:
arg0 := bytes.NewBuffer(a.WriteCounters.W)
r0 := coverage.WriteCounters(arg0)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ClearCounters:
r0 := coverage.ClearCounters()
if r0 != nil{
r0.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_WriteMetaDir:
w.WriteString(fmt.Sprintf("coverage.WriteMetaDir(%#+v)\n", a.WriteMetaDir.Dir))
case *NgoloFuzzOne_WriteMeta:
w.WriteString(fmt.Sprintf("coverage.WriteMeta(bytes.NewBuffer(%#+v))\n", a.WriteMeta.W))
case *NgoloFuzzOne_WriteCountersDir:
w.WriteString(fmt.Sprintf("coverage.WriteCountersDir(%#+v)\n", a.WriteCountersDir.Dir))
case *NgoloFuzzOne_WriteCounters:
w.WriteString(fmt.Sprintf("coverage.WriteCounters(bytes.NewBuffer(%#+v))\n", a.WriteCounters.W))
case *NgoloFuzzOne_ClearCounters:
w.WriteString(fmt.Sprintf("coverage.ClearCounters()\n"))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_runtime_coverage
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type WriteMetaDirArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriteMetaDirArgs) Reset() {
*x = WriteMetaDirArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriteMetaDirArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriteMetaDirArgs) ProtoMessage() {}
func (x *WriteMetaDirArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriteMetaDirArgs.ProtoReflect.Descriptor instead.
func (*WriteMetaDirArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *WriteMetaDirArgs) GetDir() string {
if x != nil {
return x.Dir
}
return ""
}
type WriteMetaArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriteMetaArgs) Reset() {
*x = WriteMetaArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriteMetaArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriteMetaArgs) ProtoMessage() {}
func (x *WriteMetaArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriteMetaArgs.ProtoReflect.Descriptor instead.
func (*WriteMetaArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *WriteMetaArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type WriteCountersDirArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriteCountersDirArgs) Reset() {
*x = WriteCountersDirArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriteCountersDirArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriteCountersDirArgs) ProtoMessage() {}
func (x *WriteCountersDirArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriteCountersDirArgs.ProtoReflect.Descriptor instead.
func (*WriteCountersDirArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *WriteCountersDirArgs) GetDir() string {
if x != nil {
return x.Dir
}
return ""
}
type WriteCountersArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriteCountersArgs) Reset() {
*x = WriteCountersArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriteCountersArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriteCountersArgs) ProtoMessage() {}
func (x *WriteCountersArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriteCountersArgs.ProtoReflect.Descriptor instead.
func (*WriteCountersArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *WriteCountersArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type ClearCountersArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClearCountersArgs) Reset() {
*x = ClearCountersArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClearCountersArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClearCountersArgs) ProtoMessage() {}
func (x *ClearCountersArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClearCountersArgs.ProtoReflect.Descriptor instead.
func (*ClearCountersArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_WriteMetaDir
// *NgoloFuzzOne_WriteMeta
// *NgoloFuzzOne_WriteCountersDir
// *NgoloFuzzOne_WriteCounters
// *NgoloFuzzOne_ClearCounters
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetWriteMetaDir() *WriteMetaDirArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriteMetaDir); ok {
return x.WriteMetaDir
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriteMeta() *WriteMetaArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriteMeta); ok {
return x.WriteMeta
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriteCountersDir() *WriteCountersDirArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriteCountersDir); ok {
return x.WriteCountersDir
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriteCounters() *WriteCountersArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriteCounters); ok {
return x.WriteCounters
}
}
return nil
}
func (x *NgoloFuzzOne) GetClearCounters() *ClearCountersArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClearCounters); ok {
return x.ClearCounters
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_WriteMetaDir struct {
WriteMetaDir *WriteMetaDirArgs `protobuf:"bytes,1,opt,name=WriteMetaDir,proto3,oneof"`
}
type NgoloFuzzOne_WriteMeta struct {
WriteMeta *WriteMetaArgs `protobuf:"bytes,2,opt,name=WriteMeta,proto3,oneof"`
}
type NgoloFuzzOne_WriteCountersDir struct {
WriteCountersDir *WriteCountersDirArgs `protobuf:"bytes,3,opt,name=WriteCountersDir,proto3,oneof"`
}
type NgoloFuzzOne_WriteCounters struct {
WriteCounters *WriteCountersArgs `protobuf:"bytes,4,opt,name=WriteCounters,proto3,oneof"`
}
type NgoloFuzzOne_ClearCounters struct {
ClearCounters *ClearCountersArgs `protobuf:"bytes,5,opt,name=ClearCounters,proto3,oneof"`
}
func (*NgoloFuzzOne_WriteMetaDir) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriteMeta) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriteCountersDir) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriteCounters) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClearCounters) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"$\n" +
"\x10WriteMetaDirArgs\x12\x10\n" +
"\x03dir\x18\x01 \x01(\tR\x03dir\"\x1d\n" +
"\rWriteMetaArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"(\n" +
"\x14WriteCountersDirArgs\x12\x10\n" +
"\x03dir\x18\x01 \x01(\tR\x03dir\"!\n" +
"\x11WriteCountersArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"\x13\n" +
"\x11ClearCountersArgs\"\xee\x02\n" +
"\fNgoloFuzzOne\x12A\n" +
"\fWriteMetaDir\x18\x01 \x01(\v2\x1b.ngolofuzz.WriteMetaDirArgsH\x00R\fWriteMetaDir\x128\n" +
"\tWriteMeta\x18\x02 \x01(\v2\x18.ngolofuzz.WriteMetaArgsH\x00R\tWriteMeta\x12M\n" +
"\x10WriteCountersDir\x18\x03 \x01(\v2\x1f.ngolofuzz.WriteCountersDirArgsH\x00R\x10WriteCountersDir\x12D\n" +
"\rWriteCounters\x18\x04 \x01(\v2\x1c.ngolofuzz.WriteCountersArgsH\x00R\rWriteCounters\x12D\n" +
"\rClearCounters\x18\x05 \x01(\v2\x1c.ngolofuzz.ClearCountersArgsH\x00R\rClearCountersB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1dZ\x1b./;fuzz_ng_runtime_coverageb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_ngolofuzz_proto_goTypes = []any{
(*WriteMetaDirArgs)(nil), // 0: ngolofuzz.WriteMetaDirArgs
(*WriteMetaArgs)(nil), // 1: ngolofuzz.WriteMetaArgs
(*WriteCountersDirArgs)(nil), // 2: ngolofuzz.WriteCountersDirArgs
(*WriteCountersArgs)(nil), // 3: ngolofuzz.WriteCountersArgs
(*ClearCountersArgs)(nil), // 4: ngolofuzz.ClearCountersArgs
(*NgoloFuzzOne)(nil), // 5: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 6: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 7: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.WriteMetaDir:type_name -> ngolofuzz.WriteMetaDirArgs
1, // 1: ngolofuzz.NgoloFuzzOne.WriteMeta:type_name -> ngolofuzz.WriteMetaArgs
2, // 2: ngolofuzz.NgoloFuzzOne.WriteCountersDir:type_name -> ngolofuzz.WriteCountersDirArgs
3, // 3: ngolofuzz.NgoloFuzzOne.WriteCounters:type_name -> ngolofuzz.WriteCountersArgs
4, // 4: ngolofuzz.NgoloFuzzOne.ClearCounters:type_name -> ngolofuzz.ClearCountersArgs
5, // 5: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
6, // [6:6] is the sub-list for method output_type
6, // [6:6] is the sub-list for method input_type
6, // [6:6] is the sub-list for extension type_name
6, // [6:6] is the sub-list for extension extendee
0, // [0:6] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[5].OneofWrappers = []any{
(*NgoloFuzzOne_WriteMetaDir)(nil),
(*NgoloFuzzOne_WriteMeta)(nil),
(*NgoloFuzzOne_WriteCountersDir)(nil),
(*NgoloFuzzOne_WriteCounters)(nil),
(*NgoloFuzzOne_ClearCounters)(nil),
}
file_ngolofuzz_proto_msgTypes[6].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 8,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_runtime_debug
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"runtime/debug"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var BuildInfoResults []*debug.BuildInfo
BuildInfoResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_SetGCPercent:
arg0 := int(a.SetGCPercent.Percent)
debug.SetGCPercent(arg0)
case *NgoloFuzzOne_FreeOSMemory:
debug.FreeOSMemory()
case *NgoloFuzzOne_SetPanicOnFault:
debug.SetPanicOnFault(a.SetPanicOnFault.Enabled)
case *NgoloFuzzOne_SetTraceback:
debug.SetTraceback(a.SetTraceback.Level)
case *NgoloFuzzOne_SetMemoryLimit:
debug.SetMemoryLimit(a.SetMemoryLimit.Limit)
case *NgoloFuzzOne_ReadBuildInfo:
debug.ReadBuildInfo()
case *NgoloFuzzOne_BuildInfoNgdotString:
if len(BuildInfoResults) == 0 {
continue
}
arg0 := BuildInfoResults[BuildInfoResultsIndex]
BuildInfoResultsIndex = (BuildInfoResultsIndex + 1) % len(BuildInfoResults)
arg0.String()
case *NgoloFuzzOne_ParseBuildInfo:
_, r1 := debug.ParseBuildInfo(a.ParseBuildInfo.Data)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_PrintStack:
debug.PrintStack()
case *NgoloFuzzOne_Stack:
debug.Stack()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
BuildInfoNb := 0
BuildInfoResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_SetGCPercent:
w.WriteString(fmt.Sprintf("debug.SetGCPercent(int(%#+v))\n", a.SetGCPercent.Percent))
case *NgoloFuzzOne_FreeOSMemory:
w.WriteString(fmt.Sprintf("debug.FreeOSMemory()\n"))
case *NgoloFuzzOne_SetPanicOnFault:
w.WriteString(fmt.Sprintf("debug.SetPanicOnFault(%#+v)\n", a.SetPanicOnFault.Enabled))
case *NgoloFuzzOne_SetTraceback:
w.WriteString(fmt.Sprintf("debug.SetTraceback(%#+v)\n", a.SetTraceback.Level))
case *NgoloFuzzOne_SetMemoryLimit:
w.WriteString(fmt.Sprintf("debug.SetMemoryLimit(%#+v)\n", a.SetMemoryLimit.Limit))
case *NgoloFuzzOne_ReadBuildInfo:
w.WriteString(fmt.Sprintf("debug.ReadBuildInfo()\n"))
case *NgoloFuzzOne_BuildInfoNgdotString:
if BuildInfoNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("BuildInfo%d.String()\n", BuildInfoResultsIndex))
BuildInfoResultsIndex = (BuildInfoResultsIndex + 1) % BuildInfoNb
case *NgoloFuzzOne_ParseBuildInfo:
w.WriteString(fmt.Sprintf("debug.ParseBuildInfo(%#+v)\n", a.ParseBuildInfo.Data))
case *NgoloFuzzOne_PrintStack:
w.WriteString(fmt.Sprintf("debug.PrintStack()\n"))
case *NgoloFuzzOne_Stack:
w.WriteString(fmt.Sprintf("debug.Stack()\n"))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_runtime_debug
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type SetGCPercentArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Percent int64 `protobuf:"varint,1,opt,name=percent,proto3" json:"percent,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetGCPercentArgs) Reset() {
*x = SetGCPercentArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetGCPercentArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetGCPercentArgs) ProtoMessage() {}
func (x *SetGCPercentArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetGCPercentArgs.ProtoReflect.Descriptor instead.
func (*SetGCPercentArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *SetGCPercentArgs) GetPercent() int64 {
if x != nil {
return x.Percent
}
return 0
}
type FreeOSMemoryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FreeOSMemoryArgs) Reset() {
*x = FreeOSMemoryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FreeOSMemoryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FreeOSMemoryArgs) ProtoMessage() {}
func (x *FreeOSMemoryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FreeOSMemoryArgs.ProtoReflect.Descriptor instead.
func (*FreeOSMemoryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type SetPanicOnFaultArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetPanicOnFaultArgs) Reset() {
*x = SetPanicOnFaultArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetPanicOnFaultArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetPanicOnFaultArgs) ProtoMessage() {}
func (x *SetPanicOnFaultArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetPanicOnFaultArgs.ProtoReflect.Descriptor instead.
func (*SetPanicOnFaultArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *SetPanicOnFaultArgs) GetEnabled() bool {
if x != nil {
return x.Enabled
}
return false
}
type SetTracebackArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Level string `protobuf:"bytes,1,opt,name=level,proto3" json:"level,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetTracebackArgs) Reset() {
*x = SetTracebackArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetTracebackArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetTracebackArgs) ProtoMessage() {}
func (x *SetTracebackArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetTracebackArgs.ProtoReflect.Descriptor instead.
func (*SetTracebackArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *SetTracebackArgs) GetLevel() string {
if x != nil {
return x.Level
}
return ""
}
type SetMemoryLimitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Limit int64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SetMemoryLimitArgs) Reset() {
*x = SetMemoryLimitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SetMemoryLimitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetMemoryLimitArgs) ProtoMessage() {}
func (x *SetMemoryLimitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetMemoryLimitArgs.ProtoReflect.Descriptor instead.
func (*SetMemoryLimitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *SetMemoryLimitArgs) GetLimit() int64 {
if x != nil {
return x.Limit
}
return 0
}
type ReadBuildInfoArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReadBuildInfoArgs) Reset() {
*x = ReadBuildInfoArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReadBuildInfoArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReadBuildInfoArgs) ProtoMessage() {}
func (x *ReadBuildInfoArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReadBuildInfoArgs.ProtoReflect.Descriptor instead.
func (*ReadBuildInfoArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type BuildInfoNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *BuildInfoNgdotStringArgs) Reset() {
*x = BuildInfoNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *BuildInfoNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BuildInfoNgdotStringArgs) ProtoMessage() {}
func (x *BuildInfoNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BuildInfoNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*BuildInfoNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type ParseBuildInfoArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseBuildInfoArgs) Reset() {
*x = ParseBuildInfoArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseBuildInfoArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseBuildInfoArgs) ProtoMessage() {}
func (x *ParseBuildInfoArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseBuildInfoArgs.ProtoReflect.Descriptor instead.
func (*ParseBuildInfoArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *ParseBuildInfoArgs) GetData() string {
if x != nil {
return x.Data
}
return ""
}
type PrintStackArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PrintStackArgs) Reset() {
*x = PrintStackArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PrintStackArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PrintStackArgs) ProtoMessage() {}
func (x *PrintStackArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PrintStackArgs.ProtoReflect.Descriptor instead.
func (*PrintStackArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type StackArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StackArgs) Reset() {
*x = StackArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StackArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StackArgs) ProtoMessage() {}
func (x *StackArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StackArgs.ProtoReflect.Descriptor instead.
func (*StackArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_SetGCPercent
// *NgoloFuzzOne_FreeOSMemory
// *NgoloFuzzOne_SetPanicOnFault
// *NgoloFuzzOne_SetTraceback
// *NgoloFuzzOne_SetMemoryLimit
// *NgoloFuzzOne_ReadBuildInfo
// *NgoloFuzzOne_BuildInfoNgdotString
// *NgoloFuzzOne_ParseBuildInfo
// *NgoloFuzzOne_PrintStack
// *NgoloFuzzOne_Stack
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetSetGCPercent() *SetGCPercentArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SetGCPercent); ok {
return x.SetGCPercent
}
}
return nil
}
func (x *NgoloFuzzOne) GetFreeOSMemory() *FreeOSMemoryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FreeOSMemory); ok {
return x.FreeOSMemory
}
}
return nil
}
func (x *NgoloFuzzOne) GetSetPanicOnFault() *SetPanicOnFaultArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SetPanicOnFault); ok {
return x.SetPanicOnFault
}
}
return nil
}
func (x *NgoloFuzzOne) GetSetTraceback() *SetTracebackArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SetTraceback); ok {
return x.SetTraceback
}
}
return nil
}
func (x *NgoloFuzzOne) GetSetMemoryLimit() *SetMemoryLimitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SetMemoryLimit); ok {
return x.SetMemoryLimit
}
}
return nil
}
func (x *NgoloFuzzOne) GetReadBuildInfo() *ReadBuildInfoArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReadBuildInfo); ok {
return x.ReadBuildInfo
}
}
return nil
}
func (x *NgoloFuzzOne) GetBuildInfoNgdotString() *BuildInfoNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_BuildInfoNgdotString); ok {
return x.BuildInfoNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseBuildInfo() *ParseBuildInfoArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseBuildInfo); ok {
return x.ParseBuildInfo
}
}
return nil
}
func (x *NgoloFuzzOne) GetPrintStack() *PrintStackArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PrintStack); ok {
return x.PrintStack
}
}
return nil
}
func (x *NgoloFuzzOne) GetStack() *StackArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Stack); ok {
return x.Stack
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_SetGCPercent struct {
SetGCPercent *SetGCPercentArgs `protobuf:"bytes,1,opt,name=SetGCPercent,proto3,oneof"`
}
type NgoloFuzzOne_FreeOSMemory struct {
FreeOSMemory *FreeOSMemoryArgs `protobuf:"bytes,2,opt,name=FreeOSMemory,proto3,oneof"`
}
type NgoloFuzzOne_SetPanicOnFault struct {
SetPanicOnFault *SetPanicOnFaultArgs `protobuf:"bytes,3,opt,name=SetPanicOnFault,proto3,oneof"`
}
type NgoloFuzzOne_SetTraceback struct {
SetTraceback *SetTracebackArgs `protobuf:"bytes,4,opt,name=SetTraceback,proto3,oneof"`
}
type NgoloFuzzOne_SetMemoryLimit struct {
SetMemoryLimit *SetMemoryLimitArgs `protobuf:"bytes,5,opt,name=SetMemoryLimit,proto3,oneof"`
}
type NgoloFuzzOne_ReadBuildInfo struct {
ReadBuildInfo *ReadBuildInfoArgs `protobuf:"bytes,6,opt,name=ReadBuildInfo,proto3,oneof"`
}
type NgoloFuzzOne_BuildInfoNgdotString struct {
BuildInfoNgdotString *BuildInfoNgdotStringArgs `protobuf:"bytes,7,opt,name=BuildInfoNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_ParseBuildInfo struct {
ParseBuildInfo *ParseBuildInfoArgs `protobuf:"bytes,8,opt,name=ParseBuildInfo,proto3,oneof"`
}
type NgoloFuzzOne_PrintStack struct {
PrintStack *PrintStackArgs `protobuf:"bytes,9,opt,name=PrintStack,proto3,oneof"`
}
type NgoloFuzzOne_Stack struct {
Stack *StackArgs `protobuf:"bytes,10,opt,name=Stack,proto3,oneof"`
}
func (*NgoloFuzzOne_SetGCPercent) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FreeOSMemory) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SetPanicOnFault) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SetTraceback) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SetMemoryLimit) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReadBuildInfo) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_BuildInfoNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseBuildInfo) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PrintStack) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Stack) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\",\n" +
"\x10SetGCPercentArgs\x12\x18\n" +
"\apercent\x18\x01 \x01(\x03R\apercent\"\x12\n" +
"\x10FreeOSMemoryArgs\"/\n" +
"\x13SetPanicOnFaultArgs\x12\x18\n" +
"\aenabled\x18\x01 \x01(\bR\aenabled\"(\n" +
"\x10SetTracebackArgs\x12\x14\n" +
"\x05level\x18\x01 \x01(\tR\x05level\"*\n" +
"\x12SetMemoryLimitArgs\x12\x14\n" +
"\x05limit\x18\x01 \x01(\x03R\x05limit\"\x13\n" +
"\x11ReadBuildInfoArgs\"\x1a\n" +
"\x18BuildInfoNgdotStringArgs\"(\n" +
"\x12ParseBuildInfoArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\tR\x04data\"\x10\n" +
"\x0ePrintStackArgs\"\v\n" +
"\tStackArgs\"\xc9\x05\n" +
"\fNgoloFuzzOne\x12A\n" +
"\fSetGCPercent\x18\x01 \x01(\v2\x1b.ngolofuzz.SetGCPercentArgsH\x00R\fSetGCPercent\x12A\n" +
"\fFreeOSMemory\x18\x02 \x01(\v2\x1b.ngolofuzz.FreeOSMemoryArgsH\x00R\fFreeOSMemory\x12J\n" +
"\x0fSetPanicOnFault\x18\x03 \x01(\v2\x1e.ngolofuzz.SetPanicOnFaultArgsH\x00R\x0fSetPanicOnFault\x12A\n" +
"\fSetTraceback\x18\x04 \x01(\v2\x1b.ngolofuzz.SetTracebackArgsH\x00R\fSetTraceback\x12G\n" +
"\x0eSetMemoryLimit\x18\x05 \x01(\v2\x1d.ngolofuzz.SetMemoryLimitArgsH\x00R\x0eSetMemoryLimit\x12D\n" +
"\rReadBuildInfo\x18\x06 \x01(\v2\x1c.ngolofuzz.ReadBuildInfoArgsH\x00R\rReadBuildInfo\x12Y\n" +
"\x14BuildInfoNgdotString\x18\a \x01(\v2#.ngolofuzz.BuildInfoNgdotStringArgsH\x00R\x14BuildInfoNgdotString\x12G\n" +
"\x0eParseBuildInfo\x18\b \x01(\v2\x1d.ngolofuzz.ParseBuildInfoArgsH\x00R\x0eParseBuildInfo\x12;\n" +
"\n" +
"PrintStack\x18\t \x01(\v2\x19.ngolofuzz.PrintStackArgsH\x00R\n" +
"PrintStack\x12,\n" +
"\x05Stack\x18\n" +
" \x01(\v2\x14.ngolofuzz.StackArgsH\x00R\x05StackB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_runtime_debugb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
var file_ngolofuzz_proto_goTypes = []any{
(*SetGCPercentArgs)(nil), // 0: ngolofuzz.SetGCPercentArgs
(*FreeOSMemoryArgs)(nil), // 1: ngolofuzz.FreeOSMemoryArgs
(*SetPanicOnFaultArgs)(nil), // 2: ngolofuzz.SetPanicOnFaultArgs
(*SetTracebackArgs)(nil), // 3: ngolofuzz.SetTracebackArgs
(*SetMemoryLimitArgs)(nil), // 4: ngolofuzz.SetMemoryLimitArgs
(*ReadBuildInfoArgs)(nil), // 5: ngolofuzz.ReadBuildInfoArgs
(*BuildInfoNgdotStringArgs)(nil), // 6: ngolofuzz.BuildInfoNgdotStringArgs
(*ParseBuildInfoArgs)(nil), // 7: ngolofuzz.ParseBuildInfoArgs
(*PrintStackArgs)(nil), // 8: ngolofuzz.PrintStackArgs
(*StackArgs)(nil), // 9: ngolofuzz.StackArgs
(*NgoloFuzzOne)(nil), // 10: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 11: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 12: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.SetGCPercent:type_name -> ngolofuzz.SetGCPercentArgs
1, // 1: ngolofuzz.NgoloFuzzOne.FreeOSMemory:type_name -> ngolofuzz.FreeOSMemoryArgs
2, // 2: ngolofuzz.NgoloFuzzOne.SetPanicOnFault:type_name -> ngolofuzz.SetPanicOnFaultArgs
3, // 3: ngolofuzz.NgoloFuzzOne.SetTraceback:type_name -> ngolofuzz.SetTracebackArgs
4, // 4: ngolofuzz.NgoloFuzzOne.SetMemoryLimit:type_name -> ngolofuzz.SetMemoryLimitArgs
5, // 5: ngolofuzz.NgoloFuzzOne.ReadBuildInfo:type_name -> ngolofuzz.ReadBuildInfoArgs
6, // 6: ngolofuzz.NgoloFuzzOne.BuildInfoNgdotString:type_name -> ngolofuzz.BuildInfoNgdotStringArgs
7, // 7: ngolofuzz.NgoloFuzzOne.ParseBuildInfo:type_name -> ngolofuzz.ParseBuildInfoArgs
8, // 8: ngolofuzz.NgoloFuzzOne.PrintStack:type_name -> ngolofuzz.PrintStackArgs
9, // 9: ngolofuzz.NgoloFuzzOne.Stack:type_name -> ngolofuzz.StackArgs
10, // 10: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
11, // [11:11] is the sub-list for method output_type
11, // [11:11] is the sub-list for method input_type
11, // [11:11] is the sub-list for extension type_name
11, // [11:11] is the sub-list for extension extendee
0, // [0:11] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[10].OneofWrappers = []any{
(*NgoloFuzzOne_SetGCPercent)(nil),
(*NgoloFuzzOne_FreeOSMemory)(nil),
(*NgoloFuzzOne_SetPanicOnFault)(nil),
(*NgoloFuzzOne_SetTraceback)(nil),
(*NgoloFuzzOne_SetMemoryLimit)(nil),
(*NgoloFuzzOne_ReadBuildInfo)(nil),
(*NgoloFuzzOne_BuildInfoNgdotString)(nil),
(*NgoloFuzzOne_ParseBuildInfo)(nil),
(*NgoloFuzzOne_PrintStack)(nil),
(*NgoloFuzzOne_Stack)(nil),
}
file_ngolofuzz_proto_msgTypes[11].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 13,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_runtime_trace
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"runtime/trace"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var TaskResults []*trace.Task
TaskResultsIndex := 0
var RegionResults []*trace.Region
RegionResultsIndex := 0
var FlightRecorderResults []*trace.FlightRecorder
FlightRecorderResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_TaskNgdotEnd:
if len(TaskResults) == 0 {
continue
}
arg0 := TaskResults[TaskResultsIndex]
TaskResultsIndex = (TaskResultsIndex + 1) % len(TaskResults)
arg0.End()
case *NgoloFuzzOne_RegionNgdotEnd:
if len(RegionResults) == 0 {
continue
}
arg0 := RegionResults[RegionResultsIndex]
RegionResultsIndex = (RegionResultsIndex + 1) % len(RegionResults)
arg0.End()
case *NgoloFuzzOne_IsEnabled:
trace.IsEnabled()
case *NgoloFuzzOne_FlightRecorderNgdotStart:
if len(FlightRecorderResults) == 0 {
continue
}
arg0 := FlightRecorderResults[FlightRecorderResultsIndex]
FlightRecorderResultsIndex = (FlightRecorderResultsIndex + 1) % len(FlightRecorderResults)
r0 := arg0.Start()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_FlightRecorderNgdotStop:
if len(FlightRecorderResults) == 0 {
continue
}
arg0 := FlightRecorderResults[FlightRecorderResultsIndex]
FlightRecorderResultsIndex = (FlightRecorderResultsIndex + 1) % len(FlightRecorderResults)
arg0.Stop()
case *NgoloFuzzOne_FlightRecorderNgdotEnabled:
if len(FlightRecorderResults) == 0 {
continue
}
arg0 := FlightRecorderResults[FlightRecorderResultsIndex]
FlightRecorderResultsIndex = (FlightRecorderResultsIndex + 1) % len(FlightRecorderResults)
arg0.Enabled()
case *NgoloFuzzOne_FlightRecorderNgdotWriteTo:
if len(FlightRecorderResults) == 0 {
continue
}
arg0 := FlightRecorderResults[FlightRecorderResultsIndex]
FlightRecorderResultsIndex = (FlightRecorderResultsIndex + 1) % len(FlightRecorderResults)
arg1 := bytes.NewBuffer(a.FlightRecorderNgdotWriteTo.W)
_, r1 := arg0.WriteTo(arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Start:
arg0 := bytes.NewBuffer(a.Start.W)
r0 := trace.Start(arg0)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_Stop:
trace.Stop()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
TaskNb := 0
TaskResultsIndex := 0
RegionNb := 0
RegionResultsIndex := 0
FlightRecorderNb := 0
FlightRecorderResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_TaskNgdotEnd:
if TaskNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Task%d.End()\n", TaskResultsIndex))
TaskResultsIndex = (TaskResultsIndex + 1) % TaskNb
case *NgoloFuzzOne_RegionNgdotEnd:
if RegionNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Region%d.End()\n", RegionResultsIndex))
RegionResultsIndex = (RegionResultsIndex + 1) % RegionNb
case *NgoloFuzzOne_IsEnabled:
w.WriteString(fmt.Sprintf("trace.IsEnabled()\n"))
case *NgoloFuzzOne_FlightRecorderNgdotStart:
if FlightRecorderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("FlightRecorder%d.Start()\n", FlightRecorderResultsIndex))
FlightRecorderResultsIndex = (FlightRecorderResultsIndex + 1) % FlightRecorderNb
case *NgoloFuzzOne_FlightRecorderNgdotStop:
if FlightRecorderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("FlightRecorder%d.Stop()\n", FlightRecorderResultsIndex))
FlightRecorderResultsIndex = (FlightRecorderResultsIndex + 1) % FlightRecorderNb
case *NgoloFuzzOne_FlightRecorderNgdotEnabled:
if FlightRecorderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("FlightRecorder%d.Enabled()\n", FlightRecorderResultsIndex))
FlightRecorderResultsIndex = (FlightRecorderResultsIndex + 1) % FlightRecorderNb
case *NgoloFuzzOne_FlightRecorderNgdotWriteTo:
if FlightRecorderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("FlightRecorder%d.WriteTo(bytes.NewBuffer(%#+v))\n", FlightRecorderResultsIndex, a.FlightRecorderNgdotWriteTo.W))
FlightRecorderResultsIndex = (FlightRecorderResultsIndex + 1) % FlightRecorderNb
case *NgoloFuzzOne_Start:
w.WriteString(fmt.Sprintf("trace.Start(bytes.NewBuffer(%#+v))\n", a.Start.W))
case *NgoloFuzzOne_Stop:
w.WriteString(fmt.Sprintf("trace.Stop()\n"))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_runtime_trace
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type TaskNgdotEndArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TaskNgdotEndArgs) Reset() {
*x = TaskNgdotEndArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TaskNgdotEndArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TaskNgdotEndArgs) ProtoMessage() {}
func (x *TaskNgdotEndArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TaskNgdotEndArgs.ProtoReflect.Descriptor instead.
func (*TaskNgdotEndArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type RegionNgdotEndArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RegionNgdotEndArgs) Reset() {
*x = RegionNgdotEndArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RegionNgdotEndArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegionNgdotEndArgs) ProtoMessage() {}
func (x *RegionNgdotEndArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegionNgdotEndArgs.ProtoReflect.Descriptor instead.
func (*RegionNgdotEndArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type IsEnabledArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsEnabledArgs) Reset() {
*x = IsEnabledArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsEnabledArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsEnabledArgs) ProtoMessage() {}
func (x *IsEnabledArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsEnabledArgs.ProtoReflect.Descriptor instead.
func (*IsEnabledArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type FlightRecorderNgdotStartArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FlightRecorderNgdotStartArgs) Reset() {
*x = FlightRecorderNgdotStartArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FlightRecorderNgdotStartArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FlightRecorderNgdotStartArgs) ProtoMessage() {}
func (x *FlightRecorderNgdotStartArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FlightRecorderNgdotStartArgs.ProtoReflect.Descriptor instead.
func (*FlightRecorderNgdotStartArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type FlightRecorderNgdotStopArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FlightRecorderNgdotStopArgs) Reset() {
*x = FlightRecorderNgdotStopArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FlightRecorderNgdotStopArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FlightRecorderNgdotStopArgs) ProtoMessage() {}
func (x *FlightRecorderNgdotStopArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FlightRecorderNgdotStopArgs.ProtoReflect.Descriptor instead.
func (*FlightRecorderNgdotStopArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type FlightRecorderNgdotEnabledArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FlightRecorderNgdotEnabledArgs) Reset() {
*x = FlightRecorderNgdotEnabledArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FlightRecorderNgdotEnabledArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FlightRecorderNgdotEnabledArgs) ProtoMessage() {}
func (x *FlightRecorderNgdotEnabledArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FlightRecorderNgdotEnabledArgs.ProtoReflect.Descriptor instead.
func (*FlightRecorderNgdotEnabledArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type FlightRecorderNgdotWriteToArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FlightRecorderNgdotWriteToArgs) Reset() {
*x = FlightRecorderNgdotWriteToArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FlightRecorderNgdotWriteToArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FlightRecorderNgdotWriteToArgs) ProtoMessage() {}
func (x *FlightRecorderNgdotWriteToArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FlightRecorderNgdotWriteToArgs.ProtoReflect.Descriptor instead.
func (*FlightRecorderNgdotWriteToArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *FlightRecorderNgdotWriteToArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type StartArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StartArgs) Reset() {
*x = StartArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StartArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StartArgs) ProtoMessage() {}
func (x *StartArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StartArgs.ProtoReflect.Descriptor instead.
func (*StartArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *StartArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type StopArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StopArgs) Reset() {
*x = StopArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StopArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StopArgs) ProtoMessage() {}
func (x *StopArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StopArgs.ProtoReflect.Descriptor instead.
func (*StopArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_TaskNgdotEnd
// *NgoloFuzzOne_RegionNgdotEnd
// *NgoloFuzzOne_IsEnabled
// *NgoloFuzzOne_FlightRecorderNgdotStart
// *NgoloFuzzOne_FlightRecorderNgdotStop
// *NgoloFuzzOne_FlightRecorderNgdotEnabled
// *NgoloFuzzOne_FlightRecorderNgdotWriteTo
// *NgoloFuzzOne_Start
// *NgoloFuzzOne_Stop
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetTaskNgdotEnd() *TaskNgdotEndArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TaskNgdotEnd); ok {
return x.TaskNgdotEnd
}
}
return nil
}
func (x *NgoloFuzzOne) GetRegionNgdotEnd() *RegionNgdotEndArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RegionNgdotEnd); ok {
return x.RegionNgdotEnd
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsEnabled() *IsEnabledArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsEnabled); ok {
return x.IsEnabled
}
}
return nil
}
func (x *NgoloFuzzOne) GetFlightRecorderNgdotStart() *FlightRecorderNgdotStartArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FlightRecorderNgdotStart); ok {
return x.FlightRecorderNgdotStart
}
}
return nil
}
func (x *NgoloFuzzOne) GetFlightRecorderNgdotStop() *FlightRecorderNgdotStopArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FlightRecorderNgdotStop); ok {
return x.FlightRecorderNgdotStop
}
}
return nil
}
func (x *NgoloFuzzOne) GetFlightRecorderNgdotEnabled() *FlightRecorderNgdotEnabledArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FlightRecorderNgdotEnabled); ok {
return x.FlightRecorderNgdotEnabled
}
}
return nil
}
func (x *NgoloFuzzOne) GetFlightRecorderNgdotWriteTo() *FlightRecorderNgdotWriteToArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FlightRecorderNgdotWriteTo); ok {
return x.FlightRecorderNgdotWriteTo
}
}
return nil
}
func (x *NgoloFuzzOne) GetStart() *StartArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Start); ok {
return x.Start
}
}
return nil
}
func (x *NgoloFuzzOne) GetStop() *StopArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Stop); ok {
return x.Stop
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_TaskNgdotEnd struct {
TaskNgdotEnd *TaskNgdotEndArgs `protobuf:"bytes,1,opt,name=TaskNgdotEnd,proto3,oneof"`
}
type NgoloFuzzOne_RegionNgdotEnd struct {
RegionNgdotEnd *RegionNgdotEndArgs `protobuf:"bytes,2,opt,name=RegionNgdotEnd,proto3,oneof"`
}
type NgoloFuzzOne_IsEnabled struct {
IsEnabled *IsEnabledArgs `protobuf:"bytes,3,opt,name=IsEnabled,proto3,oneof"`
}
type NgoloFuzzOne_FlightRecorderNgdotStart struct {
FlightRecorderNgdotStart *FlightRecorderNgdotStartArgs `protobuf:"bytes,4,opt,name=FlightRecorderNgdotStart,proto3,oneof"`
}
type NgoloFuzzOne_FlightRecorderNgdotStop struct {
FlightRecorderNgdotStop *FlightRecorderNgdotStopArgs `protobuf:"bytes,5,opt,name=FlightRecorderNgdotStop,proto3,oneof"`
}
type NgoloFuzzOne_FlightRecorderNgdotEnabled struct {
FlightRecorderNgdotEnabled *FlightRecorderNgdotEnabledArgs `protobuf:"bytes,6,opt,name=FlightRecorderNgdotEnabled,proto3,oneof"`
}
type NgoloFuzzOne_FlightRecorderNgdotWriteTo struct {
FlightRecorderNgdotWriteTo *FlightRecorderNgdotWriteToArgs `protobuf:"bytes,7,opt,name=FlightRecorderNgdotWriteTo,proto3,oneof"`
}
type NgoloFuzzOne_Start struct {
Start *StartArgs `protobuf:"bytes,8,opt,name=Start,proto3,oneof"`
}
type NgoloFuzzOne_Stop struct {
Stop *StopArgs `protobuf:"bytes,9,opt,name=Stop,proto3,oneof"`
}
func (*NgoloFuzzOne_TaskNgdotEnd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RegionNgdotEnd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsEnabled) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FlightRecorderNgdotStart) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FlightRecorderNgdotStop) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FlightRecorderNgdotEnabled) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FlightRecorderNgdotWriteTo) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Start) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Stop) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x12\n" +
"\x10TaskNgdotEndArgs\"\x14\n" +
"\x12RegionNgdotEndArgs\"\x0f\n" +
"\rIsEnabledArgs\"\x1e\n" +
"\x1cFlightRecorderNgdotStartArgs\"\x1d\n" +
"\x1bFlightRecorderNgdotStopArgs\" \n" +
"\x1eFlightRecorderNgdotEnabledArgs\".\n" +
"\x1eFlightRecorderNgdotWriteToArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"\x19\n" +
"\tStartArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"\n" +
"\n" +
"\bStopArgs\"\xda\x05\n" +
"\fNgoloFuzzOne\x12A\n" +
"\fTaskNgdotEnd\x18\x01 \x01(\v2\x1b.ngolofuzz.TaskNgdotEndArgsH\x00R\fTaskNgdotEnd\x12G\n" +
"\x0eRegionNgdotEnd\x18\x02 \x01(\v2\x1d.ngolofuzz.RegionNgdotEndArgsH\x00R\x0eRegionNgdotEnd\x128\n" +
"\tIsEnabled\x18\x03 \x01(\v2\x18.ngolofuzz.IsEnabledArgsH\x00R\tIsEnabled\x12e\n" +
"\x18FlightRecorderNgdotStart\x18\x04 \x01(\v2'.ngolofuzz.FlightRecorderNgdotStartArgsH\x00R\x18FlightRecorderNgdotStart\x12b\n" +
"\x17FlightRecorderNgdotStop\x18\x05 \x01(\v2&.ngolofuzz.FlightRecorderNgdotStopArgsH\x00R\x17FlightRecorderNgdotStop\x12k\n" +
"\x1aFlightRecorderNgdotEnabled\x18\x06 \x01(\v2).ngolofuzz.FlightRecorderNgdotEnabledArgsH\x00R\x1aFlightRecorderNgdotEnabled\x12k\n" +
"\x1aFlightRecorderNgdotWriteTo\x18\a \x01(\v2).ngolofuzz.FlightRecorderNgdotWriteToArgsH\x00R\x1aFlightRecorderNgdotWriteTo\x12,\n" +
"\x05Start\x18\b \x01(\v2\x14.ngolofuzz.StartArgsH\x00R\x05Start\x12)\n" +
"\x04Stop\x18\t \x01(\v2\x13.ngolofuzz.StopArgsH\x00R\x04StopB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_runtime_traceb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
var file_ngolofuzz_proto_goTypes = []any{
(*TaskNgdotEndArgs)(nil), // 0: ngolofuzz.TaskNgdotEndArgs
(*RegionNgdotEndArgs)(nil), // 1: ngolofuzz.RegionNgdotEndArgs
(*IsEnabledArgs)(nil), // 2: ngolofuzz.IsEnabledArgs
(*FlightRecorderNgdotStartArgs)(nil), // 3: ngolofuzz.FlightRecorderNgdotStartArgs
(*FlightRecorderNgdotStopArgs)(nil), // 4: ngolofuzz.FlightRecorderNgdotStopArgs
(*FlightRecorderNgdotEnabledArgs)(nil), // 5: ngolofuzz.FlightRecorderNgdotEnabledArgs
(*FlightRecorderNgdotWriteToArgs)(nil), // 6: ngolofuzz.FlightRecorderNgdotWriteToArgs
(*StartArgs)(nil), // 7: ngolofuzz.StartArgs
(*StopArgs)(nil), // 8: ngolofuzz.StopArgs
(*NgoloFuzzOne)(nil), // 9: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 10: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 11: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.TaskNgdotEnd:type_name -> ngolofuzz.TaskNgdotEndArgs
1, // 1: ngolofuzz.NgoloFuzzOne.RegionNgdotEnd:type_name -> ngolofuzz.RegionNgdotEndArgs
2, // 2: ngolofuzz.NgoloFuzzOne.IsEnabled:type_name -> ngolofuzz.IsEnabledArgs
3, // 3: ngolofuzz.NgoloFuzzOne.FlightRecorderNgdotStart:type_name -> ngolofuzz.FlightRecorderNgdotStartArgs
4, // 4: ngolofuzz.NgoloFuzzOne.FlightRecorderNgdotStop:type_name -> ngolofuzz.FlightRecorderNgdotStopArgs
5, // 5: ngolofuzz.NgoloFuzzOne.FlightRecorderNgdotEnabled:type_name -> ngolofuzz.FlightRecorderNgdotEnabledArgs
6, // 6: ngolofuzz.NgoloFuzzOne.FlightRecorderNgdotWriteTo:type_name -> ngolofuzz.FlightRecorderNgdotWriteToArgs
7, // 7: ngolofuzz.NgoloFuzzOne.Start:type_name -> ngolofuzz.StartArgs
8, // 8: ngolofuzz.NgoloFuzzOne.Stop:type_name -> ngolofuzz.StopArgs
9, // 9: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
10, // [10:10] is the sub-list for method output_type
10, // [10:10] is the sub-list for method input_type
10, // [10:10] is the sub-list for extension type_name
10, // [10:10] is the sub-list for extension extendee
0, // [0:10] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[9].OneofWrappers = []any{
(*NgoloFuzzOne_TaskNgdotEnd)(nil),
(*NgoloFuzzOne_RegionNgdotEnd)(nil),
(*NgoloFuzzOne_IsEnabled)(nil),
(*NgoloFuzzOne_FlightRecorderNgdotStart)(nil),
(*NgoloFuzzOne_FlightRecorderNgdotStop)(nil),
(*NgoloFuzzOne_FlightRecorderNgdotEnabled)(nil),
(*NgoloFuzzOne_FlightRecorderNgdotWriteTo)(nil),
(*NgoloFuzzOne_Start)(nil),
(*NgoloFuzzOne_Stop)(nil),
}
file_ngolofuzz_proto_msgTypes[10].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 12,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_simd_archsimd__gen_unify
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"simd/archsimd/_gen/unify"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ClosureResults []*unify.Closure
ClosureResultsIndex := 0
var PosResults []*unify.Pos
PosResultsIndex := 0
var DefResults []*unify.Def
DefResultsIndex := 0
var TupleResults []*unify.Tuple
TupleResultsIndex := 0
var StringResults []*unify.String
StringResultsIndex := 0
var ValueResults []*unify.Value
ValueResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ClosureNgdotIsBottom:
if len(ClosureResults) == 0 {
continue
}
arg0 := ClosureResults[ClosureResultsIndex]
ClosureResultsIndex = (ClosureResultsIndex + 1) % len(ClosureResults)
arg0.IsBottom()
case *NgoloFuzzOne_ClosureNgdotSummands:
if len(ClosureResults) == 0 {
continue
}
arg0 := ClosureResults[ClosureResultsIndex]
ClosureResultsIndex = (ClosureResultsIndex + 1) % len(ClosureResults)
arg0.Summands()
case *NgoloFuzzOne_ClosureNgdotAll:
if len(ClosureResults) == 0 {
continue
}
arg0 := ClosureResults[ClosureResultsIndex]
ClosureResultsIndex = (ClosureResultsIndex + 1) % len(ClosureResults)
arg0.All()
case *NgoloFuzzOne_DefNgdotExact:
if len(DefResults) == 0 {
continue
}
arg0 := DefResults[DefResultsIndex]
DefResultsIndex = (DefResultsIndex + 1) % len(DefResults)
arg0.Exact()
case *NgoloFuzzOne_DefNgdotWhyNotExact:
if len(DefResults) == 0 {
continue
}
arg0 := DefResults[DefResultsIndex]
DefResultsIndex = (DefResultsIndex + 1) % len(DefResults)
arg0.WhyNotExact()
case *NgoloFuzzOne_DefNgdotAll:
if len(DefResults) == 0 {
continue
}
arg0 := DefResults[DefResultsIndex]
DefResultsIndex = (DefResultsIndex + 1) % len(DefResults)
arg0.All()
case *NgoloFuzzOne_TupleNgdotExact:
if len(TupleResults) == 0 {
continue
}
arg0 := TupleResults[TupleResultsIndex]
TupleResultsIndex = (TupleResultsIndex + 1) % len(TupleResults)
arg0.Exact()
case *NgoloFuzzOne_TupleNgdotWhyNotExact:
if len(TupleResults) == 0 {
continue
}
arg0 := TupleResults[TupleResultsIndex]
TupleResultsIndex = (TupleResultsIndex + 1) % len(TupleResults)
arg0.WhyNotExact()
case *NgoloFuzzOne_NewStringExact:
r0 := unify.NewStringExact(a.NewStringExact.S)
StringResults = append(StringResults, &r0)
case *NgoloFuzzOne_StringNgdotExact:
if len(StringResults) == 0 {
continue
}
arg0 := StringResults[StringResultsIndex]
StringResultsIndex = (StringResultsIndex + 1) % len(StringResults)
arg0.Exact()
case *NgoloFuzzOne_StringNgdotWhyNotExact:
if len(StringResults) == 0 {
continue
}
arg0 := StringResults[StringResultsIndex]
StringResultsIndex = (StringResultsIndex + 1) % len(StringResults)
arg0.WhyNotExact()
case *NgoloFuzzOne_PosNgdotString:
if len(PosResults) == 0 {
continue
}
arg0 := PosResults[PosResultsIndex]
PosResultsIndex = (PosResultsIndex + 1) % len(PosResults)
arg0.String()
case *NgoloFuzzOne_PosNgdotAppendText:
if len(PosResults) == 0 {
continue
}
arg0 := PosResults[PosResultsIndex]
PosResultsIndex = (PosResultsIndex + 1) % len(PosResults)
_, r1 := arg0.AppendText(a.PosNgdotAppendText.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ValueNgdotPos:
if len(ValueResults) == 0 {
continue
}
arg0 := ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
arg0.Pos()
case *NgoloFuzzOne_ValueNgdotPosString:
if len(ValueResults) == 0 {
continue
}
arg0 := ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
arg0.PosString()
case *NgoloFuzzOne_ValueNgdotWhyNotExact:
if len(ValueResults) == 0 {
continue
}
arg0 := ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
arg0.WhyNotExact()
case *NgoloFuzzOne_ValueNgdotExact:
if len(ValueResults) == 0 {
continue
}
arg0 := ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
arg0.Exact()
case *NgoloFuzzOne_ValueNgdotDecode:
if len(ValueResults) == 0 {
continue
}
arg0 := ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
r0 := arg0.Decode(a.ValueNgdotDecode.Into)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ValueNgdotProvenance:
if len(ValueResults) == 0 {
continue
}
arg0 := ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
arg0.Provenance()
case *NgoloFuzzOne_ClosureNgdotMarshalYAML:
if len(ClosureResults) == 0 {
continue
}
arg0 := ClosureResults[ClosureResultsIndex]
ClosureResultsIndex = (ClosureResultsIndex + 1) % len(ClosureResults)
_, r1 := arg0.MarshalYAML()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ClosureNgdotString:
if len(ClosureResults) == 0 {
continue
}
arg0 := ClosureResults[ClosureResultsIndex]
ClosureResultsIndex = (ClosureResultsIndex + 1) % len(ClosureResults)
arg0.String()
case *NgoloFuzzOne_ValueNgdotMarshalYAML:
if len(ValueResults) == 0 {
continue
}
arg0 := ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
_, r1 := arg0.MarshalYAML()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ValueNgdotString:
if len(ValueResults) == 0 {
continue
}
arg0 := ValueResults[ValueResultsIndex]
ValueResultsIndex = (ValueResultsIndex + 1) % len(ValueResults)
arg0.String()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ClosureNb := 0
ClosureResultsIndex := 0
PosNb := 0
PosResultsIndex := 0
DefNb := 0
DefResultsIndex := 0
TupleNb := 0
TupleResultsIndex := 0
StringNb := 0
StringResultsIndex := 0
ValueNb := 0
ValueResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ClosureNgdotIsBottom:
if ClosureNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Closure%d.IsBottom()\n", ClosureResultsIndex))
ClosureResultsIndex = (ClosureResultsIndex + 1) % ClosureNb
case *NgoloFuzzOne_ClosureNgdotSummands:
if ClosureNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Closure%d.Summands()\n", ClosureResultsIndex))
ClosureResultsIndex = (ClosureResultsIndex + 1) % ClosureNb
case *NgoloFuzzOne_ClosureNgdotAll:
if ClosureNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Closure%d.All()\n", ClosureResultsIndex))
ClosureResultsIndex = (ClosureResultsIndex + 1) % ClosureNb
case *NgoloFuzzOne_DefNgdotExact:
if DefNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Def%d.Exact()\n", DefResultsIndex))
DefResultsIndex = (DefResultsIndex + 1) % DefNb
case *NgoloFuzzOne_DefNgdotWhyNotExact:
if DefNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Def%d.WhyNotExact()\n", DefResultsIndex))
DefResultsIndex = (DefResultsIndex + 1) % DefNb
case *NgoloFuzzOne_DefNgdotAll:
if DefNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Def%d.All()\n", DefResultsIndex))
DefResultsIndex = (DefResultsIndex + 1) % DefNb
case *NgoloFuzzOne_TupleNgdotExact:
if TupleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Tuple%d.Exact()\n", TupleResultsIndex))
TupleResultsIndex = (TupleResultsIndex + 1) % TupleNb
case *NgoloFuzzOne_TupleNgdotWhyNotExact:
if TupleNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Tuple%d.WhyNotExact()\n", TupleResultsIndex))
TupleResultsIndex = (TupleResultsIndex + 1) % TupleNb
case *NgoloFuzzOne_NewStringExact:
w.WriteString(fmt.Sprintf("String%d := unify.NewStringExact(%#+v)\n", StringNb, a.NewStringExact.S))
StringNb = StringNb + 1
case *NgoloFuzzOne_StringNgdotExact:
if StringNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("String%d.Exact()\n", StringResultsIndex))
StringResultsIndex = (StringResultsIndex + 1) % StringNb
case *NgoloFuzzOne_StringNgdotWhyNotExact:
if StringNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("String%d.WhyNotExact()\n", StringResultsIndex))
StringResultsIndex = (StringResultsIndex + 1) % StringNb
case *NgoloFuzzOne_PosNgdotString:
if PosNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Pos%d.String()\n", PosResultsIndex))
PosResultsIndex = (PosResultsIndex + 1) % PosNb
case *NgoloFuzzOne_PosNgdotAppendText:
if PosNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Pos%d.AppendText(%#+v)\n", PosResultsIndex, a.PosNgdotAppendText.B))
PosResultsIndex = (PosResultsIndex + 1) % PosNb
case *NgoloFuzzOne_ValueNgdotPos:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d.Pos()\n", ValueResultsIndex))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_ValueNgdotPosString:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d.PosString()\n", ValueResultsIndex))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_ValueNgdotWhyNotExact:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d.WhyNotExact()\n", ValueResultsIndex))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_ValueNgdotExact:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d.Exact()\n", ValueResultsIndex))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_ValueNgdotDecode:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d.Decode(%#+v)\n", ValueResultsIndex, a.ValueNgdotDecode.Into))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_ValueNgdotProvenance:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d.Provenance()\n", ValueResultsIndex))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_ClosureNgdotMarshalYAML:
if ClosureNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Closure%d.MarshalYAML()\n", ClosureResultsIndex))
ClosureResultsIndex = (ClosureResultsIndex + 1) % ClosureNb
case *NgoloFuzzOne_ClosureNgdotString:
if ClosureNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Closure%d.String()\n", ClosureResultsIndex))
ClosureResultsIndex = (ClosureResultsIndex + 1) % ClosureNb
case *NgoloFuzzOne_ValueNgdotMarshalYAML:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d.MarshalYAML()\n", ValueResultsIndex))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
case *NgoloFuzzOne_ValueNgdotString:
if ValueNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Value%d.String()\n", ValueResultsIndex))
ValueResultsIndex = (ValueResultsIndex + 1) % ValueNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_simd_archsimd__gen_unify
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ClosureNgdotIsBottomArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClosureNgdotIsBottomArgs) Reset() {
*x = ClosureNgdotIsBottomArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClosureNgdotIsBottomArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClosureNgdotIsBottomArgs) ProtoMessage() {}
func (x *ClosureNgdotIsBottomArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClosureNgdotIsBottomArgs.ProtoReflect.Descriptor instead.
func (*ClosureNgdotIsBottomArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type ClosureNgdotSummandsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClosureNgdotSummandsArgs) Reset() {
*x = ClosureNgdotSummandsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClosureNgdotSummandsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClosureNgdotSummandsArgs) ProtoMessage() {}
func (x *ClosureNgdotSummandsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClosureNgdotSummandsArgs.ProtoReflect.Descriptor instead.
func (*ClosureNgdotSummandsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type ClosureNgdotAllArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClosureNgdotAllArgs) Reset() {
*x = ClosureNgdotAllArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClosureNgdotAllArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClosureNgdotAllArgs) ProtoMessage() {}
func (x *ClosureNgdotAllArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClosureNgdotAllArgs.ProtoReflect.Descriptor instead.
func (*ClosureNgdotAllArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type DefNgdotExactArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DefNgdotExactArgs) Reset() {
*x = DefNgdotExactArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DefNgdotExactArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DefNgdotExactArgs) ProtoMessage() {}
func (x *DefNgdotExactArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DefNgdotExactArgs.ProtoReflect.Descriptor instead.
func (*DefNgdotExactArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type DefNgdotWhyNotExactArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DefNgdotWhyNotExactArgs) Reset() {
*x = DefNgdotWhyNotExactArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DefNgdotWhyNotExactArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DefNgdotWhyNotExactArgs) ProtoMessage() {}
func (x *DefNgdotWhyNotExactArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DefNgdotWhyNotExactArgs.ProtoReflect.Descriptor instead.
func (*DefNgdotWhyNotExactArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type DefNgdotAllArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DefNgdotAllArgs) Reset() {
*x = DefNgdotAllArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DefNgdotAllArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DefNgdotAllArgs) ProtoMessage() {}
func (x *DefNgdotAllArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DefNgdotAllArgs.ProtoReflect.Descriptor instead.
func (*DefNgdotAllArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type TupleNgdotExactArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TupleNgdotExactArgs) Reset() {
*x = TupleNgdotExactArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TupleNgdotExactArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TupleNgdotExactArgs) ProtoMessage() {}
func (x *TupleNgdotExactArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TupleNgdotExactArgs.ProtoReflect.Descriptor instead.
func (*TupleNgdotExactArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type TupleNgdotWhyNotExactArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TupleNgdotWhyNotExactArgs) Reset() {
*x = TupleNgdotWhyNotExactArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TupleNgdotWhyNotExactArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TupleNgdotWhyNotExactArgs) ProtoMessage() {}
func (x *TupleNgdotWhyNotExactArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TupleNgdotWhyNotExactArgs.ProtoReflect.Descriptor instead.
func (*TupleNgdotWhyNotExactArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type NewStringExactArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewStringExactArgs) Reset() {
*x = NewStringExactArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewStringExactArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewStringExactArgs) ProtoMessage() {}
func (x *NewStringExactArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewStringExactArgs.ProtoReflect.Descriptor instead.
func (*NewStringExactArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NewStringExactArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type StringNgdotExactArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StringNgdotExactArgs) Reset() {
*x = StringNgdotExactArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StringNgdotExactArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StringNgdotExactArgs) ProtoMessage() {}
func (x *StringNgdotExactArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StringNgdotExactArgs.ProtoReflect.Descriptor instead.
func (*StringNgdotExactArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type StringNgdotWhyNotExactArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StringNgdotWhyNotExactArgs) Reset() {
*x = StringNgdotWhyNotExactArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StringNgdotWhyNotExactArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StringNgdotWhyNotExactArgs) ProtoMessage() {}
func (x *StringNgdotWhyNotExactArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StringNgdotWhyNotExactArgs.ProtoReflect.Descriptor instead.
func (*StringNgdotWhyNotExactArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
type PosNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PosNgdotStringArgs) Reset() {
*x = PosNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PosNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PosNgdotStringArgs) ProtoMessage() {}
func (x *PosNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PosNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*PosNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type PosNgdotAppendTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PosNgdotAppendTextArgs) Reset() {
*x = PosNgdotAppendTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PosNgdotAppendTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PosNgdotAppendTextArgs) ProtoMessage() {}
func (x *PosNgdotAppendTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PosNgdotAppendTextArgs.ProtoReflect.Descriptor instead.
func (*PosNgdotAppendTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *PosNgdotAppendTextArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type ValueNgdotPosArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValueNgdotPosArgs) Reset() {
*x = ValueNgdotPosArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValueNgdotPosArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValueNgdotPosArgs) ProtoMessage() {}
func (x *ValueNgdotPosArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValueNgdotPosArgs.ProtoReflect.Descriptor instead.
func (*ValueNgdotPosArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type ValueNgdotPosStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValueNgdotPosStringArgs) Reset() {
*x = ValueNgdotPosStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValueNgdotPosStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValueNgdotPosStringArgs) ProtoMessage() {}
func (x *ValueNgdotPosStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValueNgdotPosStringArgs.ProtoReflect.Descriptor instead.
func (*ValueNgdotPosStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type ValueNgdotWhyNotExactArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValueNgdotWhyNotExactArgs) Reset() {
*x = ValueNgdotWhyNotExactArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValueNgdotWhyNotExactArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValueNgdotWhyNotExactArgs) ProtoMessage() {}
func (x *ValueNgdotWhyNotExactArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValueNgdotWhyNotExactArgs.ProtoReflect.Descriptor instead.
func (*ValueNgdotWhyNotExactArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
type ValueNgdotExactArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValueNgdotExactArgs) Reset() {
*x = ValueNgdotExactArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValueNgdotExactArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValueNgdotExactArgs) ProtoMessage() {}
func (x *ValueNgdotExactArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValueNgdotExactArgs.ProtoReflect.Descriptor instead.
func (*ValueNgdotExactArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
type ValueNgdotDecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Into *NgoloFuzzAny `protobuf:"bytes,1,opt,name=into,proto3" json:"into,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValueNgdotDecodeArgs) Reset() {
*x = ValueNgdotDecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValueNgdotDecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValueNgdotDecodeArgs) ProtoMessage() {}
func (x *ValueNgdotDecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValueNgdotDecodeArgs.ProtoReflect.Descriptor instead.
func (*ValueNgdotDecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *ValueNgdotDecodeArgs) GetInto() *NgoloFuzzAny {
if x != nil {
return x.Into
}
return nil
}
type ValueNgdotProvenanceArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValueNgdotProvenanceArgs) Reset() {
*x = ValueNgdotProvenanceArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValueNgdotProvenanceArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValueNgdotProvenanceArgs) ProtoMessage() {}
func (x *ValueNgdotProvenanceArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValueNgdotProvenanceArgs.ProtoReflect.Descriptor instead.
func (*ValueNgdotProvenanceArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
type ClosureNgdotMarshalYAMLArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClosureNgdotMarshalYAMLArgs) Reset() {
*x = ClosureNgdotMarshalYAMLArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClosureNgdotMarshalYAMLArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClosureNgdotMarshalYAMLArgs) ProtoMessage() {}
func (x *ClosureNgdotMarshalYAMLArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClosureNgdotMarshalYAMLArgs.ProtoReflect.Descriptor instead.
func (*ClosureNgdotMarshalYAMLArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
type ClosureNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ClosureNgdotStringArgs) Reset() {
*x = ClosureNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ClosureNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClosureNgdotStringArgs) ProtoMessage() {}
func (x *ClosureNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClosureNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*ClosureNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
type ValueNgdotMarshalYAMLArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValueNgdotMarshalYAMLArgs) Reset() {
*x = ValueNgdotMarshalYAMLArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValueNgdotMarshalYAMLArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValueNgdotMarshalYAMLArgs) ProtoMessage() {}
func (x *ValueNgdotMarshalYAMLArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValueNgdotMarshalYAMLArgs.ProtoReflect.Descriptor instead.
func (*ValueNgdotMarshalYAMLArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
type ValueNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValueNgdotStringArgs) Reset() {
*x = ValueNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValueNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValueNgdotStringArgs) ProtoMessage() {}
func (x *ValueNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValueNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*ValueNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_ClosureNgdotIsBottom
// *NgoloFuzzOne_ClosureNgdotSummands
// *NgoloFuzzOne_ClosureNgdotAll
// *NgoloFuzzOne_DefNgdotExact
// *NgoloFuzzOne_DefNgdotWhyNotExact
// *NgoloFuzzOne_DefNgdotAll
// *NgoloFuzzOne_TupleNgdotExact
// *NgoloFuzzOne_TupleNgdotWhyNotExact
// *NgoloFuzzOne_NewStringExact
// *NgoloFuzzOne_StringNgdotExact
// *NgoloFuzzOne_StringNgdotWhyNotExact
// *NgoloFuzzOne_PosNgdotString
// *NgoloFuzzOne_PosNgdotAppendText
// *NgoloFuzzOne_ValueNgdotPos
// *NgoloFuzzOne_ValueNgdotPosString
// *NgoloFuzzOne_ValueNgdotWhyNotExact
// *NgoloFuzzOne_ValueNgdotExact
// *NgoloFuzzOne_ValueNgdotDecode
// *NgoloFuzzOne_ValueNgdotProvenance
// *NgoloFuzzOne_ClosureNgdotMarshalYAML
// *NgoloFuzzOne_ClosureNgdotString
// *NgoloFuzzOne_ValueNgdotMarshalYAML
// *NgoloFuzzOne_ValueNgdotString
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetClosureNgdotIsBottom() *ClosureNgdotIsBottomArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClosureNgdotIsBottom); ok {
return x.ClosureNgdotIsBottom
}
}
return nil
}
func (x *NgoloFuzzOne) GetClosureNgdotSummands() *ClosureNgdotSummandsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClosureNgdotSummands); ok {
return x.ClosureNgdotSummands
}
}
return nil
}
func (x *NgoloFuzzOne) GetClosureNgdotAll() *ClosureNgdotAllArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClosureNgdotAll); ok {
return x.ClosureNgdotAll
}
}
return nil
}
func (x *NgoloFuzzOne) GetDefNgdotExact() *DefNgdotExactArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DefNgdotExact); ok {
return x.DefNgdotExact
}
}
return nil
}
func (x *NgoloFuzzOne) GetDefNgdotWhyNotExact() *DefNgdotWhyNotExactArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DefNgdotWhyNotExact); ok {
return x.DefNgdotWhyNotExact
}
}
return nil
}
func (x *NgoloFuzzOne) GetDefNgdotAll() *DefNgdotAllArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DefNgdotAll); ok {
return x.DefNgdotAll
}
}
return nil
}
func (x *NgoloFuzzOne) GetTupleNgdotExact() *TupleNgdotExactArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TupleNgdotExact); ok {
return x.TupleNgdotExact
}
}
return nil
}
func (x *NgoloFuzzOne) GetTupleNgdotWhyNotExact() *TupleNgdotWhyNotExactArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TupleNgdotWhyNotExact); ok {
return x.TupleNgdotWhyNotExact
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewStringExact() *NewStringExactArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewStringExact); ok {
return x.NewStringExact
}
}
return nil
}
func (x *NgoloFuzzOne) GetStringNgdotExact() *StringNgdotExactArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_StringNgdotExact); ok {
return x.StringNgdotExact
}
}
return nil
}
func (x *NgoloFuzzOne) GetStringNgdotWhyNotExact() *StringNgdotWhyNotExactArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_StringNgdotWhyNotExact); ok {
return x.StringNgdotWhyNotExact
}
}
return nil
}
func (x *NgoloFuzzOne) GetPosNgdotString() *PosNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PosNgdotString); ok {
return x.PosNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetPosNgdotAppendText() *PosNgdotAppendTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PosNgdotAppendText); ok {
return x.PosNgdotAppendText
}
}
return nil
}
func (x *NgoloFuzzOne) GetValueNgdotPos() *ValueNgdotPosArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValueNgdotPos); ok {
return x.ValueNgdotPos
}
}
return nil
}
func (x *NgoloFuzzOne) GetValueNgdotPosString() *ValueNgdotPosStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValueNgdotPosString); ok {
return x.ValueNgdotPosString
}
}
return nil
}
func (x *NgoloFuzzOne) GetValueNgdotWhyNotExact() *ValueNgdotWhyNotExactArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValueNgdotWhyNotExact); ok {
return x.ValueNgdotWhyNotExact
}
}
return nil
}
func (x *NgoloFuzzOne) GetValueNgdotExact() *ValueNgdotExactArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValueNgdotExact); ok {
return x.ValueNgdotExact
}
}
return nil
}
func (x *NgoloFuzzOne) GetValueNgdotDecode() *ValueNgdotDecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValueNgdotDecode); ok {
return x.ValueNgdotDecode
}
}
return nil
}
func (x *NgoloFuzzOne) GetValueNgdotProvenance() *ValueNgdotProvenanceArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValueNgdotProvenance); ok {
return x.ValueNgdotProvenance
}
}
return nil
}
func (x *NgoloFuzzOne) GetClosureNgdotMarshalYAML() *ClosureNgdotMarshalYAMLArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClosureNgdotMarshalYAML); ok {
return x.ClosureNgdotMarshalYAML
}
}
return nil
}
func (x *NgoloFuzzOne) GetClosureNgdotString() *ClosureNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ClosureNgdotString); ok {
return x.ClosureNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetValueNgdotMarshalYAML() *ValueNgdotMarshalYAMLArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValueNgdotMarshalYAML); ok {
return x.ValueNgdotMarshalYAML
}
}
return nil
}
func (x *NgoloFuzzOne) GetValueNgdotString() *ValueNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValueNgdotString); ok {
return x.ValueNgdotString
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_ClosureNgdotIsBottom struct {
ClosureNgdotIsBottom *ClosureNgdotIsBottomArgs `protobuf:"bytes,1,opt,name=ClosureNgdotIsBottom,proto3,oneof"`
}
type NgoloFuzzOne_ClosureNgdotSummands struct {
ClosureNgdotSummands *ClosureNgdotSummandsArgs `protobuf:"bytes,2,opt,name=ClosureNgdotSummands,proto3,oneof"`
}
type NgoloFuzzOne_ClosureNgdotAll struct {
ClosureNgdotAll *ClosureNgdotAllArgs `protobuf:"bytes,3,opt,name=ClosureNgdotAll,proto3,oneof"`
}
type NgoloFuzzOne_DefNgdotExact struct {
DefNgdotExact *DefNgdotExactArgs `protobuf:"bytes,4,opt,name=DefNgdotExact,proto3,oneof"`
}
type NgoloFuzzOne_DefNgdotWhyNotExact struct {
DefNgdotWhyNotExact *DefNgdotWhyNotExactArgs `protobuf:"bytes,5,opt,name=DefNgdotWhyNotExact,proto3,oneof"`
}
type NgoloFuzzOne_DefNgdotAll struct {
DefNgdotAll *DefNgdotAllArgs `protobuf:"bytes,6,opt,name=DefNgdotAll,proto3,oneof"`
}
type NgoloFuzzOne_TupleNgdotExact struct {
TupleNgdotExact *TupleNgdotExactArgs `protobuf:"bytes,7,opt,name=TupleNgdotExact,proto3,oneof"`
}
type NgoloFuzzOne_TupleNgdotWhyNotExact struct {
TupleNgdotWhyNotExact *TupleNgdotWhyNotExactArgs `protobuf:"bytes,8,opt,name=TupleNgdotWhyNotExact,proto3,oneof"`
}
type NgoloFuzzOne_NewStringExact struct {
NewStringExact *NewStringExactArgs `protobuf:"bytes,9,opt,name=NewStringExact,proto3,oneof"`
}
type NgoloFuzzOne_StringNgdotExact struct {
StringNgdotExact *StringNgdotExactArgs `protobuf:"bytes,10,opt,name=StringNgdotExact,proto3,oneof"`
}
type NgoloFuzzOne_StringNgdotWhyNotExact struct {
StringNgdotWhyNotExact *StringNgdotWhyNotExactArgs `protobuf:"bytes,11,opt,name=StringNgdotWhyNotExact,proto3,oneof"`
}
type NgoloFuzzOne_PosNgdotString struct {
PosNgdotString *PosNgdotStringArgs `protobuf:"bytes,12,opt,name=PosNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_PosNgdotAppendText struct {
PosNgdotAppendText *PosNgdotAppendTextArgs `protobuf:"bytes,13,opt,name=PosNgdotAppendText,proto3,oneof"`
}
type NgoloFuzzOne_ValueNgdotPos struct {
ValueNgdotPos *ValueNgdotPosArgs `protobuf:"bytes,14,opt,name=ValueNgdotPos,proto3,oneof"`
}
type NgoloFuzzOne_ValueNgdotPosString struct {
ValueNgdotPosString *ValueNgdotPosStringArgs `protobuf:"bytes,15,opt,name=ValueNgdotPosString,proto3,oneof"`
}
type NgoloFuzzOne_ValueNgdotWhyNotExact struct {
ValueNgdotWhyNotExact *ValueNgdotWhyNotExactArgs `protobuf:"bytes,16,opt,name=ValueNgdotWhyNotExact,proto3,oneof"`
}
type NgoloFuzzOne_ValueNgdotExact struct {
ValueNgdotExact *ValueNgdotExactArgs `protobuf:"bytes,17,opt,name=ValueNgdotExact,proto3,oneof"`
}
type NgoloFuzzOne_ValueNgdotDecode struct {
ValueNgdotDecode *ValueNgdotDecodeArgs `protobuf:"bytes,18,opt,name=ValueNgdotDecode,proto3,oneof"`
}
type NgoloFuzzOne_ValueNgdotProvenance struct {
ValueNgdotProvenance *ValueNgdotProvenanceArgs `protobuf:"bytes,19,opt,name=ValueNgdotProvenance,proto3,oneof"`
}
type NgoloFuzzOne_ClosureNgdotMarshalYAML struct {
ClosureNgdotMarshalYAML *ClosureNgdotMarshalYAMLArgs `protobuf:"bytes,20,opt,name=ClosureNgdotMarshalYAML,proto3,oneof"`
}
type NgoloFuzzOne_ClosureNgdotString struct {
ClosureNgdotString *ClosureNgdotStringArgs `protobuf:"bytes,21,opt,name=ClosureNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_ValueNgdotMarshalYAML struct {
ValueNgdotMarshalYAML *ValueNgdotMarshalYAMLArgs `protobuf:"bytes,22,opt,name=ValueNgdotMarshalYAML,proto3,oneof"`
}
type NgoloFuzzOne_ValueNgdotString struct {
ValueNgdotString *ValueNgdotStringArgs `protobuf:"bytes,23,opt,name=ValueNgdotString,proto3,oneof"`
}
func (*NgoloFuzzOne_ClosureNgdotIsBottom) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClosureNgdotSummands) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClosureNgdotAll) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DefNgdotExact) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DefNgdotWhyNotExact) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DefNgdotAll) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TupleNgdotExact) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TupleNgdotWhyNotExact) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewStringExact) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_StringNgdotExact) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_StringNgdotWhyNotExact) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PosNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PosNgdotAppendText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValueNgdotPos) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValueNgdotPosString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValueNgdotWhyNotExact) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValueNgdotExact) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValueNgdotDecode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValueNgdotProvenance) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClosureNgdotMarshalYAML) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ClosureNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValueNgdotMarshalYAML) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValueNgdotString) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1a\n" +
"\x18ClosureNgdotIsBottomArgs\"\x1a\n" +
"\x18ClosureNgdotSummandsArgs\"\x15\n" +
"\x13ClosureNgdotAllArgs\"\x13\n" +
"\x11DefNgdotExactArgs\"\x19\n" +
"\x17DefNgdotWhyNotExactArgs\"\x11\n" +
"\x0fDefNgdotAllArgs\"\x15\n" +
"\x13TupleNgdotExactArgs\"\x1b\n" +
"\x19TupleNgdotWhyNotExactArgs\"\"\n" +
"\x12NewStringExactArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x16\n" +
"\x14StringNgdotExactArgs\"\x1c\n" +
"\x1aStringNgdotWhyNotExactArgs\"\x14\n" +
"\x12PosNgdotStringArgs\"&\n" +
"\x16PosNgdotAppendTextArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"\x13\n" +
"\x11ValueNgdotPosArgs\"\x19\n" +
"\x17ValueNgdotPosStringArgs\"\x1b\n" +
"\x19ValueNgdotWhyNotExactArgs\"\x15\n" +
"\x13ValueNgdotExactArgs\"C\n" +
"\x14ValueNgdotDecodeArgs\x12+\n" +
"\x04into\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x04into\"\x1a\n" +
"\x18ValueNgdotProvenanceArgs\"\x1d\n" +
"\x1bClosureNgdotMarshalYAMLArgs\"\x18\n" +
"\x16ClosureNgdotStringArgs\"\x1b\n" +
"\x19ValueNgdotMarshalYAMLArgs\"\x16\n" +
"\x14ValueNgdotStringArgs\"\x8f\x0f\n" +
"\fNgoloFuzzOne\x12Y\n" +
"\x14ClosureNgdotIsBottom\x18\x01 \x01(\v2#.ngolofuzz.ClosureNgdotIsBottomArgsH\x00R\x14ClosureNgdotIsBottom\x12Y\n" +
"\x14ClosureNgdotSummands\x18\x02 \x01(\v2#.ngolofuzz.ClosureNgdotSummandsArgsH\x00R\x14ClosureNgdotSummands\x12J\n" +
"\x0fClosureNgdotAll\x18\x03 \x01(\v2\x1e.ngolofuzz.ClosureNgdotAllArgsH\x00R\x0fClosureNgdotAll\x12D\n" +
"\rDefNgdotExact\x18\x04 \x01(\v2\x1c.ngolofuzz.DefNgdotExactArgsH\x00R\rDefNgdotExact\x12V\n" +
"\x13DefNgdotWhyNotExact\x18\x05 \x01(\v2\".ngolofuzz.DefNgdotWhyNotExactArgsH\x00R\x13DefNgdotWhyNotExact\x12>\n" +
"\vDefNgdotAll\x18\x06 \x01(\v2\x1a.ngolofuzz.DefNgdotAllArgsH\x00R\vDefNgdotAll\x12J\n" +
"\x0fTupleNgdotExact\x18\a \x01(\v2\x1e.ngolofuzz.TupleNgdotExactArgsH\x00R\x0fTupleNgdotExact\x12\\\n" +
"\x15TupleNgdotWhyNotExact\x18\b \x01(\v2$.ngolofuzz.TupleNgdotWhyNotExactArgsH\x00R\x15TupleNgdotWhyNotExact\x12G\n" +
"\x0eNewStringExact\x18\t \x01(\v2\x1d.ngolofuzz.NewStringExactArgsH\x00R\x0eNewStringExact\x12M\n" +
"\x10StringNgdotExact\x18\n" +
" \x01(\v2\x1f.ngolofuzz.StringNgdotExactArgsH\x00R\x10StringNgdotExact\x12_\n" +
"\x16StringNgdotWhyNotExact\x18\v \x01(\v2%.ngolofuzz.StringNgdotWhyNotExactArgsH\x00R\x16StringNgdotWhyNotExact\x12G\n" +
"\x0ePosNgdotString\x18\f \x01(\v2\x1d.ngolofuzz.PosNgdotStringArgsH\x00R\x0ePosNgdotString\x12S\n" +
"\x12PosNgdotAppendText\x18\r \x01(\v2!.ngolofuzz.PosNgdotAppendTextArgsH\x00R\x12PosNgdotAppendText\x12D\n" +
"\rValueNgdotPos\x18\x0e \x01(\v2\x1c.ngolofuzz.ValueNgdotPosArgsH\x00R\rValueNgdotPos\x12V\n" +
"\x13ValueNgdotPosString\x18\x0f \x01(\v2\".ngolofuzz.ValueNgdotPosStringArgsH\x00R\x13ValueNgdotPosString\x12\\\n" +
"\x15ValueNgdotWhyNotExact\x18\x10 \x01(\v2$.ngolofuzz.ValueNgdotWhyNotExactArgsH\x00R\x15ValueNgdotWhyNotExact\x12J\n" +
"\x0fValueNgdotExact\x18\x11 \x01(\v2\x1e.ngolofuzz.ValueNgdotExactArgsH\x00R\x0fValueNgdotExact\x12M\n" +
"\x10ValueNgdotDecode\x18\x12 \x01(\v2\x1f.ngolofuzz.ValueNgdotDecodeArgsH\x00R\x10ValueNgdotDecode\x12Y\n" +
"\x14ValueNgdotProvenance\x18\x13 \x01(\v2#.ngolofuzz.ValueNgdotProvenanceArgsH\x00R\x14ValueNgdotProvenance\x12b\n" +
"\x17ClosureNgdotMarshalYAML\x18\x14 \x01(\v2&.ngolofuzz.ClosureNgdotMarshalYAMLArgsH\x00R\x17ClosureNgdotMarshalYAML\x12S\n" +
"\x12ClosureNgdotString\x18\x15 \x01(\v2!.ngolofuzz.ClosureNgdotStringArgsH\x00R\x12ClosureNgdotString\x12\\\n" +
"\x15ValueNgdotMarshalYAML\x18\x16 \x01(\v2$.ngolofuzz.ValueNgdotMarshalYAMLArgsH\x00R\x15ValueNgdotMarshalYAML\x12M\n" +
"\x10ValueNgdotString\x18\x17 \x01(\v2\x1f.ngolofuzz.ValueNgdotStringArgsH\x00R\x10ValueNgdotStringB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB%Z#./;fuzz_ng_simd_archsimd__gen_unifyb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 26)
var file_ngolofuzz_proto_goTypes = []any{
(*ClosureNgdotIsBottomArgs)(nil), // 0: ngolofuzz.ClosureNgdotIsBottomArgs
(*ClosureNgdotSummandsArgs)(nil), // 1: ngolofuzz.ClosureNgdotSummandsArgs
(*ClosureNgdotAllArgs)(nil), // 2: ngolofuzz.ClosureNgdotAllArgs
(*DefNgdotExactArgs)(nil), // 3: ngolofuzz.DefNgdotExactArgs
(*DefNgdotWhyNotExactArgs)(nil), // 4: ngolofuzz.DefNgdotWhyNotExactArgs
(*DefNgdotAllArgs)(nil), // 5: ngolofuzz.DefNgdotAllArgs
(*TupleNgdotExactArgs)(nil), // 6: ngolofuzz.TupleNgdotExactArgs
(*TupleNgdotWhyNotExactArgs)(nil), // 7: ngolofuzz.TupleNgdotWhyNotExactArgs
(*NewStringExactArgs)(nil), // 8: ngolofuzz.NewStringExactArgs
(*StringNgdotExactArgs)(nil), // 9: ngolofuzz.StringNgdotExactArgs
(*StringNgdotWhyNotExactArgs)(nil), // 10: ngolofuzz.StringNgdotWhyNotExactArgs
(*PosNgdotStringArgs)(nil), // 11: ngolofuzz.PosNgdotStringArgs
(*PosNgdotAppendTextArgs)(nil), // 12: ngolofuzz.PosNgdotAppendTextArgs
(*ValueNgdotPosArgs)(nil), // 13: ngolofuzz.ValueNgdotPosArgs
(*ValueNgdotPosStringArgs)(nil), // 14: ngolofuzz.ValueNgdotPosStringArgs
(*ValueNgdotWhyNotExactArgs)(nil), // 15: ngolofuzz.ValueNgdotWhyNotExactArgs
(*ValueNgdotExactArgs)(nil), // 16: ngolofuzz.ValueNgdotExactArgs
(*ValueNgdotDecodeArgs)(nil), // 17: ngolofuzz.ValueNgdotDecodeArgs
(*ValueNgdotProvenanceArgs)(nil), // 18: ngolofuzz.ValueNgdotProvenanceArgs
(*ClosureNgdotMarshalYAMLArgs)(nil), // 19: ngolofuzz.ClosureNgdotMarshalYAMLArgs
(*ClosureNgdotStringArgs)(nil), // 20: ngolofuzz.ClosureNgdotStringArgs
(*ValueNgdotMarshalYAMLArgs)(nil), // 21: ngolofuzz.ValueNgdotMarshalYAMLArgs
(*ValueNgdotStringArgs)(nil), // 22: ngolofuzz.ValueNgdotStringArgs
(*NgoloFuzzOne)(nil), // 23: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 24: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 25: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
24, // 0: ngolofuzz.ValueNgdotDecodeArgs.into:type_name -> ngolofuzz.NgoloFuzzAny
0, // 1: ngolofuzz.NgoloFuzzOne.ClosureNgdotIsBottom:type_name -> ngolofuzz.ClosureNgdotIsBottomArgs
1, // 2: ngolofuzz.NgoloFuzzOne.ClosureNgdotSummands:type_name -> ngolofuzz.ClosureNgdotSummandsArgs
2, // 3: ngolofuzz.NgoloFuzzOne.ClosureNgdotAll:type_name -> ngolofuzz.ClosureNgdotAllArgs
3, // 4: ngolofuzz.NgoloFuzzOne.DefNgdotExact:type_name -> ngolofuzz.DefNgdotExactArgs
4, // 5: ngolofuzz.NgoloFuzzOne.DefNgdotWhyNotExact:type_name -> ngolofuzz.DefNgdotWhyNotExactArgs
5, // 6: ngolofuzz.NgoloFuzzOne.DefNgdotAll:type_name -> ngolofuzz.DefNgdotAllArgs
6, // 7: ngolofuzz.NgoloFuzzOne.TupleNgdotExact:type_name -> ngolofuzz.TupleNgdotExactArgs
7, // 8: ngolofuzz.NgoloFuzzOne.TupleNgdotWhyNotExact:type_name -> ngolofuzz.TupleNgdotWhyNotExactArgs
8, // 9: ngolofuzz.NgoloFuzzOne.NewStringExact:type_name -> ngolofuzz.NewStringExactArgs
9, // 10: ngolofuzz.NgoloFuzzOne.StringNgdotExact:type_name -> ngolofuzz.StringNgdotExactArgs
10, // 11: ngolofuzz.NgoloFuzzOne.StringNgdotWhyNotExact:type_name -> ngolofuzz.StringNgdotWhyNotExactArgs
11, // 12: ngolofuzz.NgoloFuzzOne.PosNgdotString:type_name -> ngolofuzz.PosNgdotStringArgs
12, // 13: ngolofuzz.NgoloFuzzOne.PosNgdotAppendText:type_name -> ngolofuzz.PosNgdotAppendTextArgs
13, // 14: ngolofuzz.NgoloFuzzOne.ValueNgdotPos:type_name -> ngolofuzz.ValueNgdotPosArgs
14, // 15: ngolofuzz.NgoloFuzzOne.ValueNgdotPosString:type_name -> ngolofuzz.ValueNgdotPosStringArgs
15, // 16: ngolofuzz.NgoloFuzzOne.ValueNgdotWhyNotExact:type_name -> ngolofuzz.ValueNgdotWhyNotExactArgs
16, // 17: ngolofuzz.NgoloFuzzOne.ValueNgdotExact:type_name -> ngolofuzz.ValueNgdotExactArgs
17, // 18: ngolofuzz.NgoloFuzzOne.ValueNgdotDecode:type_name -> ngolofuzz.ValueNgdotDecodeArgs
18, // 19: ngolofuzz.NgoloFuzzOne.ValueNgdotProvenance:type_name -> ngolofuzz.ValueNgdotProvenanceArgs
19, // 20: ngolofuzz.NgoloFuzzOne.ClosureNgdotMarshalYAML:type_name -> ngolofuzz.ClosureNgdotMarshalYAMLArgs
20, // 21: ngolofuzz.NgoloFuzzOne.ClosureNgdotString:type_name -> ngolofuzz.ClosureNgdotStringArgs
21, // 22: ngolofuzz.NgoloFuzzOne.ValueNgdotMarshalYAML:type_name -> ngolofuzz.ValueNgdotMarshalYAMLArgs
22, // 23: ngolofuzz.NgoloFuzzOne.ValueNgdotString:type_name -> ngolofuzz.ValueNgdotStringArgs
23, // 24: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
25, // [25:25] is the sub-list for method output_type
25, // [25:25] is the sub-list for method input_type
25, // [25:25] is the sub-list for extension type_name
25, // [25:25] is the sub-list for extension extendee
0, // [0:25] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[23].OneofWrappers = []any{
(*NgoloFuzzOne_ClosureNgdotIsBottom)(nil),
(*NgoloFuzzOne_ClosureNgdotSummands)(nil),
(*NgoloFuzzOne_ClosureNgdotAll)(nil),
(*NgoloFuzzOne_DefNgdotExact)(nil),
(*NgoloFuzzOne_DefNgdotWhyNotExact)(nil),
(*NgoloFuzzOne_DefNgdotAll)(nil),
(*NgoloFuzzOne_TupleNgdotExact)(nil),
(*NgoloFuzzOne_TupleNgdotWhyNotExact)(nil),
(*NgoloFuzzOne_NewStringExact)(nil),
(*NgoloFuzzOne_StringNgdotExact)(nil),
(*NgoloFuzzOne_StringNgdotWhyNotExact)(nil),
(*NgoloFuzzOne_PosNgdotString)(nil),
(*NgoloFuzzOne_PosNgdotAppendText)(nil),
(*NgoloFuzzOne_ValueNgdotPos)(nil),
(*NgoloFuzzOne_ValueNgdotPosString)(nil),
(*NgoloFuzzOne_ValueNgdotWhyNotExact)(nil),
(*NgoloFuzzOne_ValueNgdotExact)(nil),
(*NgoloFuzzOne_ValueNgdotDecode)(nil),
(*NgoloFuzzOne_ValueNgdotProvenance)(nil),
(*NgoloFuzzOne_ClosureNgdotMarshalYAML)(nil),
(*NgoloFuzzOne_ClosureNgdotString)(nil),
(*NgoloFuzzOne_ValueNgdotMarshalYAML)(nil),
(*NgoloFuzzOne_ValueNgdotString)(nil),
}
file_ngolofuzz_proto_msgTypes[24].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 26,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_strconv
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"strconv"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ParseBool:
_, r1 := strconv.ParseBool(a.ParseBool.Str)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FormatBool:
strconv.FormatBool(a.FormatBool.B)
case *NgoloFuzzOne_AppendBool:
strconv.AppendBool(a.AppendBool.Dst, a.AppendBool.B)
case *NgoloFuzzOne_ParseComplex:
arg1 := int(a.ParseComplex.BitSize)
_, r1 := strconv.ParseComplex(a.ParseComplex.S, arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ParseFloat:
arg1 := int(a.ParseFloat.BitSize)
_, r1 := strconv.ParseFloat(a.ParseFloat.S, arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ParseUint:
arg1 := int(a.ParseUint.Base)
arg2 := int(a.ParseUint.BitSize)
_, r1 := strconv.ParseUint(a.ParseUint.S, arg1, arg2)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ParseInt:
arg1 := int(a.ParseInt.Base)
arg2 := int(a.ParseInt.BitSize)
_, r1 := strconv.ParseInt(a.ParseInt.S, arg1, arg2)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Atoi:
_, r1 := strconv.Atoi(a.Atoi.S)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_FormatFloat:
arg1 := byte(a.FormatFloat.Fmt)
arg2 := int(a.FormatFloat.Prec)
arg3 := int(a.FormatFloat.BitSize)
strconv.FormatFloat(a.FormatFloat.F, arg1, arg2 % 0x10001, arg3)
case *NgoloFuzzOne_AppendFloat:
arg2 := byte(a.AppendFloat.Fmt)
arg3 := int(a.AppendFloat.Prec)
arg4 := int(a.AppendFloat.BitSize)
strconv.AppendFloat(a.AppendFloat.Dst, a.AppendFloat.F, arg2, arg3 % 0x10001, arg4)
case *NgoloFuzzOne_FormatUint:
arg1 := int(a.FormatUint.Base)
strconv.FormatUint(a.FormatUint.I, arg1)
case *NgoloFuzzOne_FormatInt:
arg1 := int(a.FormatInt.Base)
strconv.FormatInt(a.FormatInt.I, arg1)
case *NgoloFuzzOne_Itoa:
arg0 := int(a.Itoa.I)
strconv.Itoa(arg0)
case *NgoloFuzzOne_AppendInt:
arg2 := int(a.AppendInt.Base)
strconv.AppendInt(a.AppendInt.Dst, a.AppendInt.I, arg2)
case *NgoloFuzzOne_AppendUint:
arg2 := int(a.AppendUint.Base)
strconv.AppendUint(a.AppendUint.Dst, a.AppendUint.I, arg2)
case *NgoloFuzzOne_Quote:
strconv.Quote(a.Quote.S)
case *NgoloFuzzOne_AppendQuote:
strconv.AppendQuote(a.AppendQuote.Dst, a.AppendQuote.S)
case *NgoloFuzzOne_QuoteToASCII:
strconv.QuoteToASCII(a.QuoteToASCII.S)
case *NgoloFuzzOne_AppendQuoteToASCII:
strconv.AppendQuoteToASCII(a.AppendQuoteToASCII.Dst, a.AppendQuoteToASCII.S)
case *NgoloFuzzOne_QuoteToGraphic:
strconv.QuoteToGraphic(a.QuoteToGraphic.S)
case *NgoloFuzzOne_AppendQuoteToGraphic:
strconv.AppendQuoteToGraphic(a.AppendQuoteToGraphic.Dst, a.AppendQuoteToGraphic.S)
case *NgoloFuzzOne_QuoteRune:
arg0 := GetRune(a.QuoteRune.R)
strconv.QuoteRune(arg0)
case *NgoloFuzzOne_AppendQuoteRune:
arg1 := GetRune(a.AppendQuoteRune.R)
strconv.AppendQuoteRune(a.AppendQuoteRune.Dst, arg1)
case *NgoloFuzzOne_QuoteRuneToASCII:
arg0 := GetRune(a.QuoteRuneToASCII.R)
strconv.QuoteRuneToASCII(arg0)
case *NgoloFuzzOne_AppendQuoteRuneToASCII:
arg1 := GetRune(a.AppendQuoteRuneToASCII.R)
strconv.AppendQuoteRuneToASCII(a.AppendQuoteRuneToASCII.Dst, arg1)
case *NgoloFuzzOne_QuoteRuneToGraphic:
arg0 := GetRune(a.QuoteRuneToGraphic.R)
strconv.QuoteRuneToGraphic(arg0)
case *NgoloFuzzOne_AppendQuoteRuneToGraphic:
arg1 := GetRune(a.AppendQuoteRuneToGraphic.R)
strconv.AppendQuoteRuneToGraphic(a.AppendQuoteRuneToGraphic.Dst, arg1)
case *NgoloFuzzOne_CanBackquote:
strconv.CanBackquote(a.CanBackquote.S)
case *NgoloFuzzOne_UnquoteChar:
arg1 := byte(a.UnquoteChar.Quote)
_, _, _, r3 := strconv.UnquoteChar(a.UnquoteChar.S, arg1)
if r3 != nil{
r3.Error()
return 0
}
case *NgoloFuzzOne_QuotedPrefix:
_, r1 := strconv.QuotedPrefix(a.QuotedPrefix.S)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Unquote:
_, r1 := strconv.Unquote(a.Unquote.S)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_IsPrint:
arg0 := GetRune(a.IsPrint.R)
strconv.IsPrint(arg0)
case *NgoloFuzzOne_IsGraphic:
arg0 := GetRune(a.IsGraphic.R)
strconv.IsGraphic(arg0)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_ParseBool:
w.WriteString(fmt.Sprintf("strconv.ParseBool(%#+v)\n", a.ParseBool.Str))
case *NgoloFuzzOne_FormatBool:
w.WriteString(fmt.Sprintf("strconv.FormatBool(%#+v)\n", a.FormatBool.B))
case *NgoloFuzzOne_AppendBool:
w.WriteString(fmt.Sprintf("strconv.AppendBool(%#+v, %#+v)\n", a.AppendBool.Dst, a.AppendBool.B))
case *NgoloFuzzOne_ParseComplex:
w.WriteString(fmt.Sprintf("strconv.ParseComplex(%#+v, int(%#+v))\n", a.ParseComplex.S, a.ParseComplex.BitSize))
case *NgoloFuzzOne_ParseFloat:
w.WriteString(fmt.Sprintf("strconv.ParseFloat(%#+v, int(%#+v))\n", a.ParseFloat.S, a.ParseFloat.BitSize))
case *NgoloFuzzOne_ParseUint:
w.WriteString(fmt.Sprintf("strconv.ParseUint(%#+v, int(%#+v), int(%#+v))\n", a.ParseUint.S, a.ParseUint.Base, a.ParseUint.BitSize))
case *NgoloFuzzOne_ParseInt:
w.WriteString(fmt.Sprintf("strconv.ParseInt(%#+v, int(%#+v), int(%#+v))\n", a.ParseInt.S, a.ParseInt.Base, a.ParseInt.BitSize))
case *NgoloFuzzOne_Atoi:
w.WriteString(fmt.Sprintf("strconv.Atoi(%#+v)\n", a.Atoi.S))
case *NgoloFuzzOne_FormatFloat:
w.WriteString(fmt.Sprintf("strconv.FormatFloat(%#+v, byte(%#+v), int(%#+v) %% 0x10001, int(%#+v))\n", a.FormatFloat.F, a.FormatFloat.Fmt, a.FormatFloat.Prec, a.FormatFloat.BitSize))
case *NgoloFuzzOne_AppendFloat:
w.WriteString(fmt.Sprintf("strconv.AppendFloat(%#+v, %#+v, byte(%#+v), int(%#+v) %% 0x10001, int(%#+v))\n", a.AppendFloat.Dst, a.AppendFloat.F, a.AppendFloat.Fmt, a.AppendFloat.Prec, a.AppendFloat.BitSize))
case *NgoloFuzzOne_FormatUint:
w.WriteString(fmt.Sprintf("strconv.FormatUint(%#+v, int(%#+v))\n", a.FormatUint.I, a.FormatUint.Base))
case *NgoloFuzzOne_FormatInt:
w.WriteString(fmt.Sprintf("strconv.FormatInt(%#+v, int(%#+v))\n", a.FormatInt.I, a.FormatInt.Base))
case *NgoloFuzzOne_Itoa:
w.WriteString(fmt.Sprintf("strconv.Itoa(int(%#+v))\n", a.Itoa.I))
case *NgoloFuzzOne_AppendInt:
w.WriteString(fmt.Sprintf("strconv.AppendInt(%#+v, %#+v, int(%#+v))\n", a.AppendInt.Dst, a.AppendInt.I, a.AppendInt.Base))
case *NgoloFuzzOne_AppendUint:
w.WriteString(fmt.Sprintf("strconv.AppendUint(%#+v, %#+v, int(%#+v))\n", a.AppendUint.Dst, a.AppendUint.I, a.AppendUint.Base))
case *NgoloFuzzOne_Quote:
w.WriteString(fmt.Sprintf("strconv.Quote(%#+v)\n", a.Quote.S))
case *NgoloFuzzOne_AppendQuote:
w.WriteString(fmt.Sprintf("strconv.AppendQuote(%#+v, %#+v)\n", a.AppendQuote.Dst, a.AppendQuote.S))
case *NgoloFuzzOne_QuoteToASCII:
w.WriteString(fmt.Sprintf("strconv.QuoteToASCII(%#+v)\n", a.QuoteToASCII.S))
case *NgoloFuzzOne_AppendQuoteToASCII:
w.WriteString(fmt.Sprintf("strconv.AppendQuoteToASCII(%#+v, %#+v)\n", a.AppendQuoteToASCII.Dst, a.AppendQuoteToASCII.S))
case *NgoloFuzzOne_QuoteToGraphic:
w.WriteString(fmt.Sprintf("strconv.QuoteToGraphic(%#+v)\n", a.QuoteToGraphic.S))
case *NgoloFuzzOne_AppendQuoteToGraphic:
w.WriteString(fmt.Sprintf("strconv.AppendQuoteToGraphic(%#+v, %#+v)\n", a.AppendQuoteToGraphic.Dst, a.AppendQuoteToGraphic.S))
case *NgoloFuzzOne_QuoteRune:
w.WriteString(fmt.Sprintf("strconv.QuoteRune(GetRune(%#+v))\n", a.QuoteRune.R))
case *NgoloFuzzOne_AppendQuoteRune:
w.WriteString(fmt.Sprintf("strconv.AppendQuoteRune(%#+v, GetRune(%#+v))\n", a.AppendQuoteRune.Dst, a.AppendQuoteRune.R))
case *NgoloFuzzOne_QuoteRuneToASCII:
w.WriteString(fmt.Sprintf("strconv.QuoteRuneToASCII(GetRune(%#+v))\n", a.QuoteRuneToASCII.R))
case *NgoloFuzzOne_AppendQuoteRuneToASCII:
w.WriteString(fmt.Sprintf("strconv.AppendQuoteRuneToASCII(%#+v, GetRune(%#+v))\n", a.AppendQuoteRuneToASCII.Dst, a.AppendQuoteRuneToASCII.R))
case *NgoloFuzzOne_QuoteRuneToGraphic:
w.WriteString(fmt.Sprintf("strconv.QuoteRuneToGraphic(GetRune(%#+v))\n", a.QuoteRuneToGraphic.R))
case *NgoloFuzzOne_AppendQuoteRuneToGraphic:
w.WriteString(fmt.Sprintf("strconv.AppendQuoteRuneToGraphic(%#+v, GetRune(%#+v))\n", a.AppendQuoteRuneToGraphic.Dst, a.AppendQuoteRuneToGraphic.R))
case *NgoloFuzzOne_CanBackquote:
w.WriteString(fmt.Sprintf("strconv.CanBackquote(%#+v)\n", a.CanBackquote.S))
case *NgoloFuzzOne_UnquoteChar:
w.WriteString(fmt.Sprintf("strconv.UnquoteChar(%#+v, byte(%#+v))\n", a.UnquoteChar.S, a.UnquoteChar.Quote))
case *NgoloFuzzOne_QuotedPrefix:
w.WriteString(fmt.Sprintf("strconv.QuotedPrefix(%#+v)\n", a.QuotedPrefix.S))
case *NgoloFuzzOne_Unquote:
w.WriteString(fmt.Sprintf("strconv.Unquote(%#+v)\n", a.Unquote.S))
case *NgoloFuzzOne_IsPrint:
w.WriteString(fmt.Sprintf("strconv.IsPrint(GetRune(%#+v))\n", a.IsPrint.R))
case *NgoloFuzzOne_IsGraphic:
w.WriteString(fmt.Sprintf("strconv.IsGraphic(GetRune(%#+v))\n", a.IsGraphic.R))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_strconv
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ParseBoolArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Str string `protobuf:"bytes,1,opt,name=str,proto3" json:"str,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseBoolArgs) Reset() {
*x = ParseBoolArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseBoolArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseBoolArgs) ProtoMessage() {}
func (x *ParseBoolArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseBoolArgs.ProtoReflect.Descriptor instead.
func (*ParseBoolArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *ParseBoolArgs) GetStr() string {
if x != nil {
return x.Str
}
return ""
}
type FormatBoolArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B bool `protobuf:"varint,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FormatBoolArgs) Reset() {
*x = FormatBoolArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FormatBoolArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FormatBoolArgs) ProtoMessage() {}
func (x *FormatBoolArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FormatBoolArgs.ProtoReflect.Descriptor instead.
func (*FormatBoolArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *FormatBoolArgs) GetB() bool {
if x != nil {
return x.B
}
return false
}
type AppendBoolArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
B bool `protobuf:"varint,2,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendBoolArgs) Reset() {
*x = AppendBoolArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendBoolArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendBoolArgs) ProtoMessage() {}
func (x *AppendBoolArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendBoolArgs.ProtoReflect.Descriptor instead.
func (*AppendBoolArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *AppendBoolArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *AppendBoolArgs) GetB() bool {
if x != nil {
return x.B
}
return false
}
type ParseComplexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
BitSize int64 `protobuf:"varint,2,opt,name=bitSize,proto3" json:"bitSize,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseComplexArgs) Reset() {
*x = ParseComplexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseComplexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseComplexArgs) ProtoMessage() {}
func (x *ParseComplexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseComplexArgs.ProtoReflect.Descriptor instead.
func (*ParseComplexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ParseComplexArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *ParseComplexArgs) GetBitSize() int64 {
if x != nil {
return x.BitSize
}
return 0
}
type ParseFloatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
BitSize int64 `protobuf:"varint,2,opt,name=bitSize,proto3" json:"bitSize,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseFloatArgs) Reset() {
*x = ParseFloatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseFloatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseFloatArgs) ProtoMessage() {}
func (x *ParseFloatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseFloatArgs.ProtoReflect.Descriptor instead.
func (*ParseFloatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *ParseFloatArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *ParseFloatArgs) GetBitSize() int64 {
if x != nil {
return x.BitSize
}
return 0
}
type ParseUintArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Base int64 `protobuf:"varint,2,opt,name=base,proto3" json:"base,omitempty"`
BitSize int64 `protobuf:"varint,3,opt,name=bitSize,proto3" json:"bitSize,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseUintArgs) Reset() {
*x = ParseUintArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseUintArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseUintArgs) ProtoMessage() {}
func (x *ParseUintArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseUintArgs.ProtoReflect.Descriptor instead.
func (*ParseUintArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *ParseUintArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *ParseUintArgs) GetBase() int64 {
if x != nil {
return x.Base
}
return 0
}
func (x *ParseUintArgs) GetBitSize() int64 {
if x != nil {
return x.BitSize
}
return 0
}
type ParseIntArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Base int64 `protobuf:"varint,2,opt,name=base,proto3" json:"base,omitempty"`
BitSize int64 `protobuf:"varint,3,opt,name=bitSize,proto3" json:"bitSize,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseIntArgs) Reset() {
*x = ParseIntArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseIntArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseIntArgs) ProtoMessage() {}
func (x *ParseIntArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseIntArgs.ProtoReflect.Descriptor instead.
func (*ParseIntArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *ParseIntArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *ParseIntArgs) GetBase() int64 {
if x != nil {
return x.Base
}
return 0
}
func (x *ParseIntArgs) GetBitSize() int64 {
if x != nil {
return x.BitSize
}
return 0
}
type AtoiArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AtoiArgs) Reset() {
*x = AtoiArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AtoiArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AtoiArgs) ProtoMessage() {}
func (x *AtoiArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AtoiArgs.ProtoReflect.Descriptor instead.
func (*AtoiArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *AtoiArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type FormatFloatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
F float64 `protobuf:"fixed64,1,opt,name=f,proto3" json:"f,omitempty"`
Fmt uint32 `protobuf:"varint,2,opt,name=fmt,proto3" json:"fmt,omitempty"`
Prec int64 `protobuf:"varint,3,opt,name=prec,proto3" json:"prec,omitempty"`
BitSize int64 `protobuf:"varint,4,opt,name=bitSize,proto3" json:"bitSize,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FormatFloatArgs) Reset() {
*x = FormatFloatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FormatFloatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FormatFloatArgs) ProtoMessage() {}
func (x *FormatFloatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FormatFloatArgs.ProtoReflect.Descriptor instead.
func (*FormatFloatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *FormatFloatArgs) GetF() float64 {
if x != nil {
return x.F
}
return 0
}
func (x *FormatFloatArgs) GetFmt() uint32 {
if x != nil {
return x.Fmt
}
return 0
}
func (x *FormatFloatArgs) GetPrec() int64 {
if x != nil {
return x.Prec
}
return 0
}
func (x *FormatFloatArgs) GetBitSize() int64 {
if x != nil {
return x.BitSize
}
return 0
}
type AppendFloatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
F float64 `protobuf:"fixed64,2,opt,name=f,proto3" json:"f,omitempty"`
Fmt uint32 `protobuf:"varint,3,opt,name=fmt,proto3" json:"fmt,omitempty"`
Prec int64 `protobuf:"varint,4,opt,name=prec,proto3" json:"prec,omitempty"`
BitSize int64 `protobuf:"varint,5,opt,name=bitSize,proto3" json:"bitSize,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendFloatArgs) Reset() {
*x = AppendFloatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendFloatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendFloatArgs) ProtoMessage() {}
func (x *AppendFloatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendFloatArgs.ProtoReflect.Descriptor instead.
func (*AppendFloatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *AppendFloatArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *AppendFloatArgs) GetF() float64 {
if x != nil {
return x.F
}
return 0
}
func (x *AppendFloatArgs) GetFmt() uint32 {
if x != nil {
return x.Fmt
}
return 0
}
func (x *AppendFloatArgs) GetPrec() int64 {
if x != nil {
return x.Prec
}
return 0
}
func (x *AppendFloatArgs) GetBitSize() int64 {
if x != nil {
return x.BitSize
}
return 0
}
type FormatUintArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I uint64 `protobuf:"varint,1,opt,name=i,proto3" json:"i,omitempty"`
Base int64 `protobuf:"varint,2,opt,name=base,proto3" json:"base,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FormatUintArgs) Reset() {
*x = FormatUintArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FormatUintArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FormatUintArgs) ProtoMessage() {}
func (x *FormatUintArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FormatUintArgs.ProtoReflect.Descriptor instead.
func (*FormatUintArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *FormatUintArgs) GetI() uint64 {
if x != nil {
return x.I
}
return 0
}
func (x *FormatUintArgs) GetBase() int64 {
if x != nil {
return x.Base
}
return 0
}
type FormatIntArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I int64 `protobuf:"varint,1,opt,name=i,proto3" json:"i,omitempty"`
Base int64 `protobuf:"varint,2,opt,name=base,proto3" json:"base,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FormatIntArgs) Reset() {
*x = FormatIntArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FormatIntArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FormatIntArgs) ProtoMessage() {}
func (x *FormatIntArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FormatIntArgs.ProtoReflect.Descriptor instead.
func (*FormatIntArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *FormatIntArgs) GetI() int64 {
if x != nil {
return x.I
}
return 0
}
func (x *FormatIntArgs) GetBase() int64 {
if x != nil {
return x.Base
}
return 0
}
type ItoaArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
I int64 `protobuf:"varint,1,opt,name=i,proto3" json:"i,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ItoaArgs) Reset() {
*x = ItoaArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ItoaArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ItoaArgs) ProtoMessage() {}
func (x *ItoaArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ItoaArgs.ProtoReflect.Descriptor instead.
func (*ItoaArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *ItoaArgs) GetI() int64 {
if x != nil {
return x.I
}
return 0
}
type AppendIntArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
I int64 `protobuf:"varint,2,opt,name=i,proto3" json:"i,omitempty"`
Base int64 `protobuf:"varint,3,opt,name=base,proto3" json:"base,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendIntArgs) Reset() {
*x = AppendIntArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendIntArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendIntArgs) ProtoMessage() {}
func (x *AppendIntArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendIntArgs.ProtoReflect.Descriptor instead.
func (*AppendIntArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *AppendIntArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *AppendIntArgs) GetI() int64 {
if x != nil {
return x.I
}
return 0
}
func (x *AppendIntArgs) GetBase() int64 {
if x != nil {
return x.Base
}
return 0
}
type AppendUintArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
I uint64 `protobuf:"varint,2,opt,name=i,proto3" json:"i,omitempty"`
Base int64 `protobuf:"varint,3,opt,name=base,proto3" json:"base,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendUintArgs) Reset() {
*x = AppendUintArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendUintArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendUintArgs) ProtoMessage() {}
func (x *AppendUintArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendUintArgs.ProtoReflect.Descriptor instead.
func (*AppendUintArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *AppendUintArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *AppendUintArgs) GetI() uint64 {
if x != nil {
return x.I
}
return 0
}
func (x *AppendUintArgs) GetBase() int64 {
if x != nil {
return x.Base
}
return 0
}
type QuoteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *QuoteArgs) Reset() {
*x = QuoteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *QuoteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*QuoteArgs) ProtoMessage() {}
func (x *QuoteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use QuoteArgs.ProtoReflect.Descriptor instead.
func (*QuoteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *QuoteArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type AppendQuoteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
S string `protobuf:"bytes,2,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendQuoteArgs) Reset() {
*x = AppendQuoteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendQuoteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendQuoteArgs) ProtoMessage() {}
func (x *AppendQuoteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendQuoteArgs.ProtoReflect.Descriptor instead.
func (*AppendQuoteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *AppendQuoteArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *AppendQuoteArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type QuoteToASCIIArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *QuoteToASCIIArgs) Reset() {
*x = QuoteToASCIIArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *QuoteToASCIIArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*QuoteToASCIIArgs) ProtoMessage() {}
func (x *QuoteToASCIIArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use QuoteToASCIIArgs.ProtoReflect.Descriptor instead.
func (*QuoteToASCIIArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *QuoteToASCIIArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type AppendQuoteToASCIIArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
S string `protobuf:"bytes,2,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendQuoteToASCIIArgs) Reset() {
*x = AppendQuoteToASCIIArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendQuoteToASCIIArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendQuoteToASCIIArgs) ProtoMessage() {}
func (x *AppendQuoteToASCIIArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendQuoteToASCIIArgs.ProtoReflect.Descriptor instead.
func (*AppendQuoteToASCIIArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *AppendQuoteToASCIIArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *AppendQuoteToASCIIArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type QuoteToGraphicArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *QuoteToGraphicArgs) Reset() {
*x = QuoteToGraphicArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *QuoteToGraphicArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*QuoteToGraphicArgs) ProtoMessage() {}
func (x *QuoteToGraphicArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use QuoteToGraphicArgs.ProtoReflect.Descriptor instead.
func (*QuoteToGraphicArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *QuoteToGraphicArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type AppendQuoteToGraphicArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
S string `protobuf:"bytes,2,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendQuoteToGraphicArgs) Reset() {
*x = AppendQuoteToGraphicArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendQuoteToGraphicArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendQuoteToGraphicArgs) ProtoMessage() {}
func (x *AppendQuoteToGraphicArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendQuoteToGraphicArgs.ProtoReflect.Descriptor instead.
func (*AppendQuoteToGraphicArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *AppendQuoteToGraphicArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *AppendQuoteToGraphicArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type QuoteRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *QuoteRuneArgs) Reset() {
*x = QuoteRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *QuoteRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*QuoteRuneArgs) ProtoMessage() {}
func (x *QuoteRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use QuoteRuneArgs.ProtoReflect.Descriptor instead.
func (*QuoteRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
func (x *QuoteRuneArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type AppendQuoteRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
R string `protobuf:"bytes,2,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendQuoteRuneArgs) Reset() {
*x = AppendQuoteRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendQuoteRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendQuoteRuneArgs) ProtoMessage() {}
func (x *AppendQuoteRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendQuoteRuneArgs.ProtoReflect.Descriptor instead.
func (*AppendQuoteRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
func (x *AppendQuoteRuneArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *AppendQuoteRuneArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type QuoteRuneToASCIIArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *QuoteRuneToASCIIArgs) Reset() {
*x = QuoteRuneToASCIIArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *QuoteRuneToASCIIArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*QuoteRuneToASCIIArgs) ProtoMessage() {}
func (x *QuoteRuneToASCIIArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use QuoteRuneToASCIIArgs.ProtoReflect.Descriptor instead.
func (*QuoteRuneToASCIIArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
func (x *QuoteRuneToASCIIArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type AppendQuoteRuneToASCIIArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
R string `protobuf:"bytes,2,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendQuoteRuneToASCIIArgs) Reset() {
*x = AppendQuoteRuneToASCIIArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendQuoteRuneToASCIIArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendQuoteRuneToASCIIArgs) ProtoMessage() {}
func (x *AppendQuoteRuneToASCIIArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendQuoteRuneToASCIIArgs.ProtoReflect.Descriptor instead.
func (*AppendQuoteRuneToASCIIArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
func (x *AppendQuoteRuneToASCIIArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *AppendQuoteRuneToASCIIArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type QuoteRuneToGraphicArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *QuoteRuneToGraphicArgs) Reset() {
*x = QuoteRuneToGraphicArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *QuoteRuneToGraphicArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*QuoteRuneToGraphicArgs) ProtoMessage() {}
func (x *QuoteRuneToGraphicArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use QuoteRuneToGraphicArgs.ProtoReflect.Descriptor instead.
func (*QuoteRuneToGraphicArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
func (x *QuoteRuneToGraphicArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type AppendQuoteRuneToGraphicArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Dst []byte `protobuf:"bytes,1,opt,name=dst,proto3" json:"dst,omitempty"`
R string `protobuf:"bytes,2,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendQuoteRuneToGraphicArgs) Reset() {
*x = AppendQuoteRuneToGraphicArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendQuoteRuneToGraphicArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendQuoteRuneToGraphicArgs) ProtoMessage() {}
func (x *AppendQuoteRuneToGraphicArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendQuoteRuneToGraphicArgs.ProtoReflect.Descriptor instead.
func (*AppendQuoteRuneToGraphicArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
func (x *AppendQuoteRuneToGraphicArgs) GetDst() []byte {
if x != nil {
return x.Dst
}
return nil
}
func (x *AppendQuoteRuneToGraphicArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type CanBackquoteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CanBackquoteArgs) Reset() {
*x = CanBackquoteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CanBackquoteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CanBackquoteArgs) ProtoMessage() {}
func (x *CanBackquoteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CanBackquoteArgs.ProtoReflect.Descriptor instead.
func (*CanBackquoteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
func (x *CanBackquoteArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type UnquoteCharArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Quote uint32 `protobuf:"varint,2,opt,name=quote,proto3" json:"quote,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnquoteCharArgs) Reset() {
*x = UnquoteCharArgs{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnquoteCharArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnquoteCharArgs) ProtoMessage() {}
func (x *UnquoteCharArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnquoteCharArgs.ProtoReflect.Descriptor instead.
func (*UnquoteCharArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
func (x *UnquoteCharArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *UnquoteCharArgs) GetQuote() uint32 {
if x != nil {
return x.Quote
}
return 0
}
type QuotedPrefixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *QuotedPrefixArgs) Reset() {
*x = QuotedPrefixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *QuotedPrefixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*QuotedPrefixArgs) ProtoMessage() {}
func (x *QuotedPrefixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use QuotedPrefixArgs.ProtoReflect.Descriptor instead.
func (*QuotedPrefixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
func (x *QuotedPrefixArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type UnquoteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnquoteArgs) Reset() {
*x = UnquoteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnquoteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnquoteArgs) ProtoMessage() {}
func (x *UnquoteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnquoteArgs.ProtoReflect.Descriptor instead.
func (*UnquoteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{30}
}
func (x *UnquoteArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type IsPrintArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsPrintArgs) Reset() {
*x = IsPrintArgs{}
mi := &file_ngolofuzz_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsPrintArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsPrintArgs) ProtoMessage() {}
func (x *IsPrintArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsPrintArgs.ProtoReflect.Descriptor instead.
func (*IsPrintArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{31}
}
func (x *IsPrintArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IsGraphicArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsGraphicArgs) Reset() {
*x = IsGraphicArgs{}
mi := &file_ngolofuzz_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsGraphicArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsGraphicArgs) ProtoMessage() {}
func (x *IsGraphicArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsGraphicArgs.ProtoReflect.Descriptor instead.
func (*IsGraphicArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{32}
}
func (x *IsGraphicArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_ParseBool
// *NgoloFuzzOne_FormatBool
// *NgoloFuzzOne_AppendBool
// *NgoloFuzzOne_ParseComplex
// *NgoloFuzzOne_ParseFloat
// *NgoloFuzzOne_ParseUint
// *NgoloFuzzOne_ParseInt
// *NgoloFuzzOne_Atoi
// *NgoloFuzzOne_FormatFloat
// *NgoloFuzzOne_AppendFloat
// *NgoloFuzzOne_FormatUint
// *NgoloFuzzOne_FormatInt
// *NgoloFuzzOne_Itoa
// *NgoloFuzzOne_AppendInt
// *NgoloFuzzOne_AppendUint
// *NgoloFuzzOne_Quote
// *NgoloFuzzOne_AppendQuote
// *NgoloFuzzOne_QuoteToASCII
// *NgoloFuzzOne_AppendQuoteToASCII
// *NgoloFuzzOne_QuoteToGraphic
// *NgoloFuzzOne_AppendQuoteToGraphic
// *NgoloFuzzOne_QuoteRune
// *NgoloFuzzOne_AppendQuoteRune
// *NgoloFuzzOne_QuoteRuneToASCII
// *NgoloFuzzOne_AppendQuoteRuneToASCII
// *NgoloFuzzOne_QuoteRuneToGraphic
// *NgoloFuzzOne_AppendQuoteRuneToGraphic
// *NgoloFuzzOne_CanBackquote
// *NgoloFuzzOne_UnquoteChar
// *NgoloFuzzOne_QuotedPrefix
// *NgoloFuzzOne_Unquote
// *NgoloFuzzOne_IsPrint
// *NgoloFuzzOne_IsGraphic
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[33]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{33}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetParseBool() *ParseBoolArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseBool); ok {
return x.ParseBool
}
}
return nil
}
func (x *NgoloFuzzOne) GetFormatBool() *FormatBoolArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FormatBool); ok {
return x.FormatBool
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendBool() *AppendBoolArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendBool); ok {
return x.AppendBool
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseComplex() *ParseComplexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseComplex); ok {
return x.ParseComplex
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseFloat() *ParseFloatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseFloat); ok {
return x.ParseFloat
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseUint() *ParseUintArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseUint); ok {
return x.ParseUint
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseInt() *ParseIntArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseInt); ok {
return x.ParseInt
}
}
return nil
}
func (x *NgoloFuzzOne) GetAtoi() *AtoiArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Atoi); ok {
return x.Atoi
}
}
return nil
}
func (x *NgoloFuzzOne) GetFormatFloat() *FormatFloatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FormatFloat); ok {
return x.FormatFloat
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendFloat() *AppendFloatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendFloat); ok {
return x.AppendFloat
}
}
return nil
}
func (x *NgoloFuzzOne) GetFormatUint() *FormatUintArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FormatUint); ok {
return x.FormatUint
}
}
return nil
}
func (x *NgoloFuzzOne) GetFormatInt() *FormatIntArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FormatInt); ok {
return x.FormatInt
}
}
return nil
}
func (x *NgoloFuzzOne) GetItoa() *ItoaArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Itoa); ok {
return x.Itoa
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendInt() *AppendIntArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendInt); ok {
return x.AppendInt
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendUint() *AppendUintArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendUint); ok {
return x.AppendUint
}
}
return nil
}
func (x *NgoloFuzzOne) GetQuote() *QuoteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Quote); ok {
return x.Quote
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendQuote() *AppendQuoteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendQuote); ok {
return x.AppendQuote
}
}
return nil
}
func (x *NgoloFuzzOne) GetQuoteToASCII() *QuoteToASCIIArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_QuoteToASCII); ok {
return x.QuoteToASCII
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendQuoteToASCII() *AppendQuoteToASCIIArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendQuoteToASCII); ok {
return x.AppendQuoteToASCII
}
}
return nil
}
func (x *NgoloFuzzOne) GetQuoteToGraphic() *QuoteToGraphicArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_QuoteToGraphic); ok {
return x.QuoteToGraphic
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendQuoteToGraphic() *AppendQuoteToGraphicArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendQuoteToGraphic); ok {
return x.AppendQuoteToGraphic
}
}
return nil
}
func (x *NgoloFuzzOne) GetQuoteRune() *QuoteRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_QuoteRune); ok {
return x.QuoteRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendQuoteRune() *AppendQuoteRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendQuoteRune); ok {
return x.AppendQuoteRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetQuoteRuneToASCII() *QuoteRuneToASCIIArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_QuoteRuneToASCII); ok {
return x.QuoteRuneToASCII
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendQuoteRuneToASCII() *AppendQuoteRuneToASCIIArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendQuoteRuneToASCII); ok {
return x.AppendQuoteRuneToASCII
}
}
return nil
}
func (x *NgoloFuzzOne) GetQuoteRuneToGraphic() *QuoteRuneToGraphicArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_QuoteRuneToGraphic); ok {
return x.QuoteRuneToGraphic
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendQuoteRuneToGraphic() *AppendQuoteRuneToGraphicArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendQuoteRuneToGraphic); ok {
return x.AppendQuoteRuneToGraphic
}
}
return nil
}
func (x *NgoloFuzzOne) GetCanBackquote() *CanBackquoteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CanBackquote); ok {
return x.CanBackquote
}
}
return nil
}
func (x *NgoloFuzzOne) GetUnquoteChar() *UnquoteCharArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UnquoteChar); ok {
return x.UnquoteChar
}
}
return nil
}
func (x *NgoloFuzzOne) GetQuotedPrefix() *QuotedPrefixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_QuotedPrefix); ok {
return x.QuotedPrefix
}
}
return nil
}
func (x *NgoloFuzzOne) GetUnquote() *UnquoteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Unquote); ok {
return x.Unquote
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsPrint() *IsPrintArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsPrint); ok {
return x.IsPrint
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsGraphic() *IsGraphicArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsGraphic); ok {
return x.IsGraphic
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_ParseBool struct {
ParseBool *ParseBoolArgs `protobuf:"bytes,1,opt,name=ParseBool,proto3,oneof"`
}
type NgoloFuzzOne_FormatBool struct {
FormatBool *FormatBoolArgs `protobuf:"bytes,2,opt,name=FormatBool,proto3,oneof"`
}
type NgoloFuzzOne_AppendBool struct {
AppendBool *AppendBoolArgs `protobuf:"bytes,3,opt,name=AppendBool,proto3,oneof"`
}
type NgoloFuzzOne_ParseComplex struct {
ParseComplex *ParseComplexArgs `protobuf:"bytes,4,opt,name=ParseComplex,proto3,oneof"`
}
type NgoloFuzzOne_ParseFloat struct {
ParseFloat *ParseFloatArgs `protobuf:"bytes,5,opt,name=ParseFloat,proto3,oneof"`
}
type NgoloFuzzOne_ParseUint struct {
ParseUint *ParseUintArgs `protobuf:"bytes,6,opt,name=ParseUint,proto3,oneof"`
}
type NgoloFuzzOne_ParseInt struct {
ParseInt *ParseIntArgs `protobuf:"bytes,7,opt,name=ParseInt,proto3,oneof"`
}
type NgoloFuzzOne_Atoi struct {
Atoi *AtoiArgs `protobuf:"bytes,8,opt,name=Atoi,proto3,oneof"`
}
type NgoloFuzzOne_FormatFloat struct {
FormatFloat *FormatFloatArgs `protobuf:"bytes,9,opt,name=FormatFloat,proto3,oneof"`
}
type NgoloFuzzOne_AppendFloat struct {
AppendFloat *AppendFloatArgs `protobuf:"bytes,10,opt,name=AppendFloat,proto3,oneof"`
}
type NgoloFuzzOne_FormatUint struct {
FormatUint *FormatUintArgs `protobuf:"bytes,11,opt,name=FormatUint,proto3,oneof"`
}
type NgoloFuzzOne_FormatInt struct {
FormatInt *FormatIntArgs `protobuf:"bytes,12,opt,name=FormatInt,proto3,oneof"`
}
type NgoloFuzzOne_Itoa struct {
Itoa *ItoaArgs `protobuf:"bytes,13,opt,name=Itoa,proto3,oneof"`
}
type NgoloFuzzOne_AppendInt struct {
AppendInt *AppendIntArgs `protobuf:"bytes,14,opt,name=AppendInt,proto3,oneof"`
}
type NgoloFuzzOne_AppendUint struct {
AppendUint *AppendUintArgs `protobuf:"bytes,15,opt,name=AppendUint,proto3,oneof"`
}
type NgoloFuzzOne_Quote struct {
Quote *QuoteArgs `protobuf:"bytes,16,opt,name=Quote,proto3,oneof"`
}
type NgoloFuzzOne_AppendQuote struct {
AppendQuote *AppendQuoteArgs `protobuf:"bytes,17,opt,name=AppendQuote,proto3,oneof"`
}
type NgoloFuzzOne_QuoteToASCII struct {
QuoteToASCII *QuoteToASCIIArgs `protobuf:"bytes,18,opt,name=QuoteToASCII,proto3,oneof"`
}
type NgoloFuzzOne_AppendQuoteToASCII struct {
AppendQuoteToASCII *AppendQuoteToASCIIArgs `protobuf:"bytes,19,opt,name=AppendQuoteToASCII,proto3,oneof"`
}
type NgoloFuzzOne_QuoteToGraphic struct {
QuoteToGraphic *QuoteToGraphicArgs `protobuf:"bytes,20,opt,name=QuoteToGraphic,proto3,oneof"`
}
type NgoloFuzzOne_AppendQuoteToGraphic struct {
AppendQuoteToGraphic *AppendQuoteToGraphicArgs `protobuf:"bytes,21,opt,name=AppendQuoteToGraphic,proto3,oneof"`
}
type NgoloFuzzOne_QuoteRune struct {
QuoteRune *QuoteRuneArgs `protobuf:"bytes,22,opt,name=QuoteRune,proto3,oneof"`
}
type NgoloFuzzOne_AppendQuoteRune struct {
AppendQuoteRune *AppendQuoteRuneArgs `protobuf:"bytes,23,opt,name=AppendQuoteRune,proto3,oneof"`
}
type NgoloFuzzOne_QuoteRuneToASCII struct {
QuoteRuneToASCII *QuoteRuneToASCIIArgs `protobuf:"bytes,24,opt,name=QuoteRuneToASCII,proto3,oneof"`
}
type NgoloFuzzOne_AppendQuoteRuneToASCII struct {
AppendQuoteRuneToASCII *AppendQuoteRuneToASCIIArgs `protobuf:"bytes,25,opt,name=AppendQuoteRuneToASCII,proto3,oneof"`
}
type NgoloFuzzOne_QuoteRuneToGraphic struct {
QuoteRuneToGraphic *QuoteRuneToGraphicArgs `protobuf:"bytes,26,opt,name=QuoteRuneToGraphic,proto3,oneof"`
}
type NgoloFuzzOne_AppendQuoteRuneToGraphic struct {
AppendQuoteRuneToGraphic *AppendQuoteRuneToGraphicArgs `protobuf:"bytes,27,opt,name=AppendQuoteRuneToGraphic,proto3,oneof"`
}
type NgoloFuzzOne_CanBackquote struct {
CanBackquote *CanBackquoteArgs `protobuf:"bytes,28,opt,name=CanBackquote,proto3,oneof"`
}
type NgoloFuzzOne_UnquoteChar struct {
UnquoteChar *UnquoteCharArgs `protobuf:"bytes,29,opt,name=UnquoteChar,proto3,oneof"`
}
type NgoloFuzzOne_QuotedPrefix struct {
QuotedPrefix *QuotedPrefixArgs `protobuf:"bytes,30,opt,name=QuotedPrefix,proto3,oneof"`
}
type NgoloFuzzOne_Unquote struct {
Unquote *UnquoteArgs `protobuf:"bytes,31,opt,name=Unquote,proto3,oneof"`
}
type NgoloFuzzOne_IsPrint struct {
IsPrint *IsPrintArgs `protobuf:"bytes,32,opt,name=IsPrint,proto3,oneof"`
}
type NgoloFuzzOne_IsGraphic struct {
IsGraphic *IsGraphicArgs `protobuf:"bytes,33,opt,name=IsGraphic,proto3,oneof"`
}
func (*NgoloFuzzOne_ParseBool) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FormatBool) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendBool) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseComplex) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseFloat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseUint) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseInt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Atoi) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FormatFloat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendFloat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FormatUint) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FormatInt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Itoa) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendInt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendUint) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Quote) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendQuote) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_QuoteToASCII) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendQuoteToASCII) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_QuoteToGraphic) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendQuoteToGraphic) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_QuoteRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendQuoteRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_QuoteRuneToASCII) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendQuoteRuneToASCII) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_QuoteRuneToGraphic) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendQuoteRuneToGraphic) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CanBackquote) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UnquoteChar) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_QuotedPrefix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Unquote) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsPrint) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsGraphic) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[34]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{34}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[35]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{35}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"!\n" +
"\rParseBoolArgs\x12\x10\n" +
"\x03str\x18\x01 \x01(\tR\x03str\"\x1e\n" +
"\x0eFormatBoolArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\bR\x01b\"0\n" +
"\x0eAppendBoolArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\f\n" +
"\x01b\x18\x02 \x01(\bR\x01b\":\n" +
"\x10ParseComplexArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x18\n" +
"\abitSize\x18\x02 \x01(\x03R\abitSize\"8\n" +
"\x0eParseFloatArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x18\n" +
"\abitSize\x18\x02 \x01(\x03R\abitSize\"K\n" +
"\rParseUintArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x12\n" +
"\x04base\x18\x02 \x01(\x03R\x04base\x12\x18\n" +
"\abitSize\x18\x03 \x01(\x03R\abitSize\"J\n" +
"\fParseIntArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x12\n" +
"\x04base\x18\x02 \x01(\x03R\x04base\x12\x18\n" +
"\abitSize\x18\x03 \x01(\x03R\abitSize\"\x18\n" +
"\bAtoiArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"_\n" +
"\x0fFormatFloatArgs\x12\f\n" +
"\x01f\x18\x01 \x01(\x01R\x01f\x12\x10\n" +
"\x03fmt\x18\x02 \x01(\rR\x03fmt\x12\x12\n" +
"\x04prec\x18\x03 \x01(\x03R\x04prec\x12\x18\n" +
"\abitSize\x18\x04 \x01(\x03R\abitSize\"q\n" +
"\x0fAppendFloatArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\f\n" +
"\x01f\x18\x02 \x01(\x01R\x01f\x12\x10\n" +
"\x03fmt\x18\x03 \x01(\rR\x03fmt\x12\x12\n" +
"\x04prec\x18\x04 \x01(\x03R\x04prec\x12\x18\n" +
"\abitSize\x18\x05 \x01(\x03R\abitSize\"2\n" +
"\x0eFormatUintArgs\x12\f\n" +
"\x01i\x18\x01 \x01(\x04R\x01i\x12\x12\n" +
"\x04base\x18\x02 \x01(\x03R\x04base\"1\n" +
"\rFormatIntArgs\x12\f\n" +
"\x01i\x18\x01 \x01(\x03R\x01i\x12\x12\n" +
"\x04base\x18\x02 \x01(\x03R\x04base\"\x18\n" +
"\bItoaArgs\x12\f\n" +
"\x01i\x18\x01 \x01(\x03R\x01i\"C\n" +
"\rAppendIntArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\f\n" +
"\x01i\x18\x02 \x01(\x03R\x01i\x12\x12\n" +
"\x04base\x18\x03 \x01(\x03R\x04base\"D\n" +
"\x0eAppendUintArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\f\n" +
"\x01i\x18\x02 \x01(\x04R\x01i\x12\x12\n" +
"\x04base\x18\x03 \x01(\x03R\x04base\"\x19\n" +
"\tQuoteArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"1\n" +
"\x0fAppendQuoteArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\f\n" +
"\x01s\x18\x02 \x01(\tR\x01s\" \n" +
"\x10QuoteToASCIIArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"8\n" +
"\x16AppendQuoteToASCIIArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\f\n" +
"\x01s\x18\x02 \x01(\tR\x01s\"\"\n" +
"\x12QuoteToGraphicArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\":\n" +
"\x18AppendQuoteToGraphicArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\f\n" +
"\x01s\x18\x02 \x01(\tR\x01s\"\x1d\n" +
"\rQuoteRuneArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"5\n" +
"\x13AppendQuoteRuneArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\f\n" +
"\x01r\x18\x02 \x01(\tR\x01r\"$\n" +
"\x14QuoteRuneToASCIIArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"<\n" +
"\x1aAppendQuoteRuneToASCIIArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\f\n" +
"\x01r\x18\x02 \x01(\tR\x01r\"&\n" +
"\x16QuoteRuneToGraphicArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\">\n" +
"\x1cAppendQuoteRuneToGraphicArgs\x12\x10\n" +
"\x03dst\x18\x01 \x01(\fR\x03dst\x12\f\n" +
"\x01r\x18\x02 \x01(\tR\x01r\" \n" +
"\x10CanBackquoteArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"5\n" +
"\x0fUnquoteCharArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x14\n" +
"\x05quote\x18\x02 \x01(\rR\x05quote\" \n" +
"\x10QuotedPrefixArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1b\n" +
"\vUnquoteArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1b\n" +
"\vIsPrintArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1d\n" +
"\rIsGraphicArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x83\x11\n" +
"\fNgoloFuzzOne\x128\n" +
"\tParseBool\x18\x01 \x01(\v2\x18.ngolofuzz.ParseBoolArgsH\x00R\tParseBool\x12;\n" +
"\n" +
"FormatBool\x18\x02 \x01(\v2\x19.ngolofuzz.FormatBoolArgsH\x00R\n" +
"FormatBool\x12;\n" +
"\n" +
"AppendBool\x18\x03 \x01(\v2\x19.ngolofuzz.AppendBoolArgsH\x00R\n" +
"AppendBool\x12A\n" +
"\fParseComplex\x18\x04 \x01(\v2\x1b.ngolofuzz.ParseComplexArgsH\x00R\fParseComplex\x12;\n" +
"\n" +
"ParseFloat\x18\x05 \x01(\v2\x19.ngolofuzz.ParseFloatArgsH\x00R\n" +
"ParseFloat\x128\n" +
"\tParseUint\x18\x06 \x01(\v2\x18.ngolofuzz.ParseUintArgsH\x00R\tParseUint\x125\n" +
"\bParseInt\x18\a \x01(\v2\x17.ngolofuzz.ParseIntArgsH\x00R\bParseInt\x12)\n" +
"\x04Atoi\x18\b \x01(\v2\x13.ngolofuzz.AtoiArgsH\x00R\x04Atoi\x12>\n" +
"\vFormatFloat\x18\t \x01(\v2\x1a.ngolofuzz.FormatFloatArgsH\x00R\vFormatFloat\x12>\n" +
"\vAppendFloat\x18\n" +
" \x01(\v2\x1a.ngolofuzz.AppendFloatArgsH\x00R\vAppendFloat\x12;\n" +
"\n" +
"FormatUint\x18\v \x01(\v2\x19.ngolofuzz.FormatUintArgsH\x00R\n" +
"FormatUint\x128\n" +
"\tFormatInt\x18\f \x01(\v2\x18.ngolofuzz.FormatIntArgsH\x00R\tFormatInt\x12)\n" +
"\x04Itoa\x18\r \x01(\v2\x13.ngolofuzz.ItoaArgsH\x00R\x04Itoa\x128\n" +
"\tAppendInt\x18\x0e \x01(\v2\x18.ngolofuzz.AppendIntArgsH\x00R\tAppendInt\x12;\n" +
"\n" +
"AppendUint\x18\x0f \x01(\v2\x19.ngolofuzz.AppendUintArgsH\x00R\n" +
"AppendUint\x12,\n" +
"\x05Quote\x18\x10 \x01(\v2\x14.ngolofuzz.QuoteArgsH\x00R\x05Quote\x12>\n" +
"\vAppendQuote\x18\x11 \x01(\v2\x1a.ngolofuzz.AppendQuoteArgsH\x00R\vAppendQuote\x12A\n" +
"\fQuoteToASCII\x18\x12 \x01(\v2\x1b.ngolofuzz.QuoteToASCIIArgsH\x00R\fQuoteToASCII\x12S\n" +
"\x12AppendQuoteToASCII\x18\x13 \x01(\v2!.ngolofuzz.AppendQuoteToASCIIArgsH\x00R\x12AppendQuoteToASCII\x12G\n" +
"\x0eQuoteToGraphic\x18\x14 \x01(\v2\x1d.ngolofuzz.QuoteToGraphicArgsH\x00R\x0eQuoteToGraphic\x12Y\n" +
"\x14AppendQuoteToGraphic\x18\x15 \x01(\v2#.ngolofuzz.AppendQuoteToGraphicArgsH\x00R\x14AppendQuoteToGraphic\x128\n" +
"\tQuoteRune\x18\x16 \x01(\v2\x18.ngolofuzz.QuoteRuneArgsH\x00R\tQuoteRune\x12J\n" +
"\x0fAppendQuoteRune\x18\x17 \x01(\v2\x1e.ngolofuzz.AppendQuoteRuneArgsH\x00R\x0fAppendQuoteRune\x12M\n" +
"\x10QuoteRuneToASCII\x18\x18 \x01(\v2\x1f.ngolofuzz.QuoteRuneToASCIIArgsH\x00R\x10QuoteRuneToASCII\x12_\n" +
"\x16AppendQuoteRuneToASCII\x18\x19 \x01(\v2%.ngolofuzz.AppendQuoteRuneToASCIIArgsH\x00R\x16AppendQuoteRuneToASCII\x12S\n" +
"\x12QuoteRuneToGraphic\x18\x1a \x01(\v2!.ngolofuzz.QuoteRuneToGraphicArgsH\x00R\x12QuoteRuneToGraphic\x12e\n" +
"\x18AppendQuoteRuneToGraphic\x18\x1b \x01(\v2'.ngolofuzz.AppendQuoteRuneToGraphicArgsH\x00R\x18AppendQuoteRuneToGraphic\x12A\n" +
"\fCanBackquote\x18\x1c \x01(\v2\x1b.ngolofuzz.CanBackquoteArgsH\x00R\fCanBackquote\x12>\n" +
"\vUnquoteChar\x18\x1d \x01(\v2\x1a.ngolofuzz.UnquoteCharArgsH\x00R\vUnquoteChar\x12A\n" +
"\fQuotedPrefix\x18\x1e \x01(\v2\x1b.ngolofuzz.QuotedPrefixArgsH\x00R\fQuotedPrefix\x122\n" +
"\aUnquote\x18\x1f \x01(\v2\x16.ngolofuzz.UnquoteArgsH\x00R\aUnquote\x122\n" +
"\aIsPrint\x18 \x01(\v2\x16.ngolofuzz.IsPrintArgsH\x00R\aIsPrint\x128\n" +
"\tIsGraphic\x18! \x01(\v2\x18.ngolofuzz.IsGraphicArgsH\x00R\tIsGraphicB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x14Z\x12./;fuzz_ng_strconvb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 36)
var file_ngolofuzz_proto_goTypes = []any{
(*ParseBoolArgs)(nil), // 0: ngolofuzz.ParseBoolArgs
(*FormatBoolArgs)(nil), // 1: ngolofuzz.FormatBoolArgs
(*AppendBoolArgs)(nil), // 2: ngolofuzz.AppendBoolArgs
(*ParseComplexArgs)(nil), // 3: ngolofuzz.ParseComplexArgs
(*ParseFloatArgs)(nil), // 4: ngolofuzz.ParseFloatArgs
(*ParseUintArgs)(nil), // 5: ngolofuzz.ParseUintArgs
(*ParseIntArgs)(nil), // 6: ngolofuzz.ParseIntArgs
(*AtoiArgs)(nil), // 7: ngolofuzz.AtoiArgs
(*FormatFloatArgs)(nil), // 8: ngolofuzz.FormatFloatArgs
(*AppendFloatArgs)(nil), // 9: ngolofuzz.AppendFloatArgs
(*FormatUintArgs)(nil), // 10: ngolofuzz.FormatUintArgs
(*FormatIntArgs)(nil), // 11: ngolofuzz.FormatIntArgs
(*ItoaArgs)(nil), // 12: ngolofuzz.ItoaArgs
(*AppendIntArgs)(nil), // 13: ngolofuzz.AppendIntArgs
(*AppendUintArgs)(nil), // 14: ngolofuzz.AppendUintArgs
(*QuoteArgs)(nil), // 15: ngolofuzz.QuoteArgs
(*AppendQuoteArgs)(nil), // 16: ngolofuzz.AppendQuoteArgs
(*QuoteToASCIIArgs)(nil), // 17: ngolofuzz.QuoteToASCIIArgs
(*AppendQuoteToASCIIArgs)(nil), // 18: ngolofuzz.AppendQuoteToASCIIArgs
(*QuoteToGraphicArgs)(nil), // 19: ngolofuzz.QuoteToGraphicArgs
(*AppendQuoteToGraphicArgs)(nil), // 20: ngolofuzz.AppendQuoteToGraphicArgs
(*QuoteRuneArgs)(nil), // 21: ngolofuzz.QuoteRuneArgs
(*AppendQuoteRuneArgs)(nil), // 22: ngolofuzz.AppendQuoteRuneArgs
(*QuoteRuneToASCIIArgs)(nil), // 23: ngolofuzz.QuoteRuneToASCIIArgs
(*AppendQuoteRuneToASCIIArgs)(nil), // 24: ngolofuzz.AppendQuoteRuneToASCIIArgs
(*QuoteRuneToGraphicArgs)(nil), // 25: ngolofuzz.QuoteRuneToGraphicArgs
(*AppendQuoteRuneToGraphicArgs)(nil), // 26: ngolofuzz.AppendQuoteRuneToGraphicArgs
(*CanBackquoteArgs)(nil), // 27: ngolofuzz.CanBackquoteArgs
(*UnquoteCharArgs)(nil), // 28: ngolofuzz.UnquoteCharArgs
(*QuotedPrefixArgs)(nil), // 29: ngolofuzz.QuotedPrefixArgs
(*UnquoteArgs)(nil), // 30: ngolofuzz.UnquoteArgs
(*IsPrintArgs)(nil), // 31: ngolofuzz.IsPrintArgs
(*IsGraphicArgs)(nil), // 32: ngolofuzz.IsGraphicArgs
(*NgoloFuzzOne)(nil), // 33: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 34: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 35: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.ParseBool:type_name -> ngolofuzz.ParseBoolArgs
1, // 1: ngolofuzz.NgoloFuzzOne.FormatBool:type_name -> ngolofuzz.FormatBoolArgs
2, // 2: ngolofuzz.NgoloFuzzOne.AppendBool:type_name -> ngolofuzz.AppendBoolArgs
3, // 3: ngolofuzz.NgoloFuzzOne.ParseComplex:type_name -> ngolofuzz.ParseComplexArgs
4, // 4: ngolofuzz.NgoloFuzzOne.ParseFloat:type_name -> ngolofuzz.ParseFloatArgs
5, // 5: ngolofuzz.NgoloFuzzOne.ParseUint:type_name -> ngolofuzz.ParseUintArgs
6, // 6: ngolofuzz.NgoloFuzzOne.ParseInt:type_name -> ngolofuzz.ParseIntArgs
7, // 7: ngolofuzz.NgoloFuzzOne.Atoi:type_name -> ngolofuzz.AtoiArgs
8, // 8: ngolofuzz.NgoloFuzzOne.FormatFloat:type_name -> ngolofuzz.FormatFloatArgs
9, // 9: ngolofuzz.NgoloFuzzOne.AppendFloat:type_name -> ngolofuzz.AppendFloatArgs
10, // 10: ngolofuzz.NgoloFuzzOne.FormatUint:type_name -> ngolofuzz.FormatUintArgs
11, // 11: ngolofuzz.NgoloFuzzOne.FormatInt:type_name -> ngolofuzz.FormatIntArgs
12, // 12: ngolofuzz.NgoloFuzzOne.Itoa:type_name -> ngolofuzz.ItoaArgs
13, // 13: ngolofuzz.NgoloFuzzOne.AppendInt:type_name -> ngolofuzz.AppendIntArgs
14, // 14: ngolofuzz.NgoloFuzzOne.AppendUint:type_name -> ngolofuzz.AppendUintArgs
15, // 15: ngolofuzz.NgoloFuzzOne.Quote:type_name -> ngolofuzz.QuoteArgs
16, // 16: ngolofuzz.NgoloFuzzOne.AppendQuote:type_name -> ngolofuzz.AppendQuoteArgs
17, // 17: ngolofuzz.NgoloFuzzOne.QuoteToASCII:type_name -> ngolofuzz.QuoteToASCIIArgs
18, // 18: ngolofuzz.NgoloFuzzOne.AppendQuoteToASCII:type_name -> ngolofuzz.AppendQuoteToASCIIArgs
19, // 19: ngolofuzz.NgoloFuzzOne.QuoteToGraphic:type_name -> ngolofuzz.QuoteToGraphicArgs
20, // 20: ngolofuzz.NgoloFuzzOne.AppendQuoteToGraphic:type_name -> ngolofuzz.AppendQuoteToGraphicArgs
21, // 21: ngolofuzz.NgoloFuzzOne.QuoteRune:type_name -> ngolofuzz.QuoteRuneArgs
22, // 22: ngolofuzz.NgoloFuzzOne.AppendQuoteRune:type_name -> ngolofuzz.AppendQuoteRuneArgs
23, // 23: ngolofuzz.NgoloFuzzOne.QuoteRuneToASCII:type_name -> ngolofuzz.QuoteRuneToASCIIArgs
24, // 24: ngolofuzz.NgoloFuzzOne.AppendQuoteRuneToASCII:type_name -> ngolofuzz.AppendQuoteRuneToASCIIArgs
25, // 25: ngolofuzz.NgoloFuzzOne.QuoteRuneToGraphic:type_name -> ngolofuzz.QuoteRuneToGraphicArgs
26, // 26: ngolofuzz.NgoloFuzzOne.AppendQuoteRuneToGraphic:type_name -> ngolofuzz.AppendQuoteRuneToGraphicArgs
27, // 27: ngolofuzz.NgoloFuzzOne.CanBackquote:type_name -> ngolofuzz.CanBackquoteArgs
28, // 28: ngolofuzz.NgoloFuzzOne.UnquoteChar:type_name -> ngolofuzz.UnquoteCharArgs
29, // 29: ngolofuzz.NgoloFuzzOne.QuotedPrefix:type_name -> ngolofuzz.QuotedPrefixArgs
30, // 30: ngolofuzz.NgoloFuzzOne.Unquote:type_name -> ngolofuzz.UnquoteArgs
31, // 31: ngolofuzz.NgoloFuzzOne.IsPrint:type_name -> ngolofuzz.IsPrintArgs
32, // 32: ngolofuzz.NgoloFuzzOne.IsGraphic:type_name -> ngolofuzz.IsGraphicArgs
33, // 33: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
34, // [34:34] is the sub-list for method output_type
34, // [34:34] is the sub-list for method input_type
34, // [34:34] is the sub-list for extension type_name
34, // [34:34] is the sub-list for extension extendee
0, // [0:34] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[33].OneofWrappers = []any{
(*NgoloFuzzOne_ParseBool)(nil),
(*NgoloFuzzOne_FormatBool)(nil),
(*NgoloFuzzOne_AppendBool)(nil),
(*NgoloFuzzOne_ParseComplex)(nil),
(*NgoloFuzzOne_ParseFloat)(nil),
(*NgoloFuzzOne_ParseUint)(nil),
(*NgoloFuzzOne_ParseInt)(nil),
(*NgoloFuzzOne_Atoi)(nil),
(*NgoloFuzzOne_FormatFloat)(nil),
(*NgoloFuzzOne_AppendFloat)(nil),
(*NgoloFuzzOne_FormatUint)(nil),
(*NgoloFuzzOne_FormatInt)(nil),
(*NgoloFuzzOne_Itoa)(nil),
(*NgoloFuzzOne_AppendInt)(nil),
(*NgoloFuzzOne_AppendUint)(nil),
(*NgoloFuzzOne_Quote)(nil),
(*NgoloFuzzOne_AppendQuote)(nil),
(*NgoloFuzzOne_QuoteToASCII)(nil),
(*NgoloFuzzOne_AppendQuoteToASCII)(nil),
(*NgoloFuzzOne_QuoteToGraphic)(nil),
(*NgoloFuzzOne_AppendQuoteToGraphic)(nil),
(*NgoloFuzzOne_QuoteRune)(nil),
(*NgoloFuzzOne_AppendQuoteRune)(nil),
(*NgoloFuzzOne_QuoteRuneToASCII)(nil),
(*NgoloFuzzOne_AppendQuoteRuneToASCII)(nil),
(*NgoloFuzzOne_QuoteRuneToGraphic)(nil),
(*NgoloFuzzOne_AppendQuoteRuneToGraphic)(nil),
(*NgoloFuzzOne_CanBackquote)(nil),
(*NgoloFuzzOne_UnquoteChar)(nil),
(*NgoloFuzzOne_QuotedPrefix)(nil),
(*NgoloFuzzOne_Unquote)(nil),
(*NgoloFuzzOne_IsPrint)(nil),
(*NgoloFuzzOne_IsGraphic)(nil),
}
file_ngolofuzz_proto_msgTypes[34].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 36,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_strings
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"strings"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var ReaderResults []*strings.Reader
ReaderResultsIndex := 0
var ReplacerResults []*strings.Replacer
ReplacerResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Clone:
strings.Clone(a.Clone.S)
case *NgoloFuzzOne_Compare:
strings.Compare(a.Compare.A, a.Compare.B)
case *NgoloFuzzOne_Lines:
strings.Lines(a.Lines.S)
case *NgoloFuzzOne_SplitSeq:
strings.SplitSeq(a.SplitSeq.S, a.SplitSeq.Sep)
case *NgoloFuzzOne_SplitAfterSeq:
strings.SplitAfterSeq(a.SplitAfterSeq.S, a.SplitAfterSeq.Sep)
case *NgoloFuzzOne_FieldsSeq:
strings.FieldsSeq(a.FieldsSeq.S)
case *NgoloFuzzOne_ReaderNgdotLen:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg0.Len()
case *NgoloFuzzOne_ReaderNgdotSize:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg0.Size()
case *NgoloFuzzOne_ReaderNgdotRead:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.Read(a.ReaderNgdotRead.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadAt:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.ReadAt(a.ReaderNgdotReadAt.B, a.ReaderNgdotReadAt.Off)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadByte:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, r1 := arg0.ReadByte()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotUnreadByte:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
r0 := arg0.UnreadByte()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReadRune:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
_, _, r2 := arg0.ReadRune()
if r2 != nil{
r2.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotUnreadRune:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
r0 := arg0.UnreadRune()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotSeek:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg2 := int(a.ReaderNgdotSeek.Whence)
_, r1 := arg0.Seek(a.ReaderNgdotSeek.Offset, arg2)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotWriteTo:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg1 := bytes.NewBuffer(a.ReaderNgdotWriteTo.W)
_, r1 := arg0.WriteTo(arg1)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ReaderNgdotReset:
if len(ReaderResults) == 0 {
continue
}
arg0 := ReaderResults[ReaderResultsIndex]
ReaderResultsIndex = (ReaderResultsIndex + 1) % len(ReaderResults)
arg0.Reset(a.ReaderNgdotReset.S)
case *NgoloFuzzOne_NewReader:
r0 := strings.NewReader(a.NewReader.S)
if r0 != nil{
ReaderResults = append(ReaderResults, r0)
}
case *NgoloFuzzOne_ReplacerNgdotReplace:
if len(ReplacerResults) == 0 {
continue
}
arg0 := ReplacerResults[ReplacerResultsIndex]
ReplacerResultsIndex = (ReplacerResultsIndex + 1) % len(ReplacerResults)
arg0.Replace(a.ReplacerNgdotReplace.S)
case *NgoloFuzzOne_ReplacerNgdotWriteString:
if len(ReplacerResults) == 0 {
continue
}
arg0 := ReplacerResults[ReplacerResultsIndex]
ReplacerResultsIndex = (ReplacerResultsIndex + 1) % len(ReplacerResults)
arg1 := bytes.NewBuffer(a.ReplacerNgdotWriteString.W)
_, r1 := arg0.WriteString(arg1, a.ReplacerNgdotWriteString.S)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_Count:
strings.Count(a.Count.S, a.Count.Substr)
case *NgoloFuzzOne_Contains:
strings.Contains(a.Contains.S, a.Contains.Substr)
case *NgoloFuzzOne_ContainsAny:
strings.ContainsAny(a.ContainsAny.S, a.ContainsAny.Chars)
case *NgoloFuzzOne_ContainsRune:
arg1 := GetRune(a.ContainsRune.R)
strings.ContainsRune(a.ContainsRune.S, arg1)
case *NgoloFuzzOne_LastIndex:
strings.LastIndex(a.LastIndex.S, a.LastIndex.Substr)
case *NgoloFuzzOne_IndexByte:
arg1 := byte(a.IndexByte.C)
strings.IndexByte(a.IndexByte.S, arg1)
case *NgoloFuzzOne_IndexRune:
arg1 := GetRune(a.IndexRune.R)
strings.IndexRune(a.IndexRune.S, arg1)
case *NgoloFuzzOne_IndexAny:
strings.IndexAny(a.IndexAny.S, a.IndexAny.Chars)
case *NgoloFuzzOne_LastIndexAny:
strings.LastIndexAny(a.LastIndexAny.S, a.LastIndexAny.Chars)
case *NgoloFuzzOne_LastIndexByte:
arg1 := byte(a.LastIndexByte.C)
strings.LastIndexByte(a.LastIndexByte.S, arg1)
case *NgoloFuzzOne_SplitN:
arg2 := int(a.SplitN.N)
strings.SplitN(a.SplitN.S, a.SplitN.Sep, arg2)
case *NgoloFuzzOne_SplitAfterN:
arg2 := int(a.SplitAfterN.N)
strings.SplitAfterN(a.SplitAfterN.S, a.SplitAfterN.Sep, arg2)
case *NgoloFuzzOne_Split:
strings.Split(a.Split.S, a.Split.Sep)
case *NgoloFuzzOne_SplitAfter:
strings.SplitAfter(a.SplitAfter.S, a.SplitAfter.Sep)
case *NgoloFuzzOne_Fields:
strings.Fields(a.Fields.S)
case *NgoloFuzzOne_Join:
strings.Join(a.Join.Elems, a.Join.Sep)
case *NgoloFuzzOne_HasPrefix:
strings.HasPrefix(a.HasPrefix.S, a.HasPrefix.Prefix)
case *NgoloFuzzOne_HasSuffix:
strings.HasSuffix(a.HasSuffix.S, a.HasSuffix.Suffix)
case *NgoloFuzzOne_Repeat:
arg1 := int(a.Repeat.Count)
strings.Repeat(a.Repeat.S, arg1 % 0x10001)
case *NgoloFuzzOne_ToUpper:
strings.ToUpper(a.ToUpper.S)
case *NgoloFuzzOne_ToLower:
strings.ToLower(a.ToLower.S)
case *NgoloFuzzOne_ToTitle:
strings.ToTitle(a.ToTitle.S)
case *NgoloFuzzOne_ToValidUTF8:
strings.ToValidUTF8(a.ToValidUTF8.S, a.ToValidUTF8.Replacement)
case *NgoloFuzzOne_Title:
strings.Title(a.Title.S)
case *NgoloFuzzOne_Trim:
strings.Trim(a.Trim.S, a.Trim.Cutset)
case *NgoloFuzzOne_TrimLeft:
strings.TrimLeft(a.TrimLeft.S, a.TrimLeft.Cutset)
case *NgoloFuzzOne_TrimRight:
strings.TrimRight(a.TrimRight.S, a.TrimRight.Cutset)
case *NgoloFuzzOne_TrimSpace:
strings.TrimSpace(a.TrimSpace.S)
case *NgoloFuzzOne_TrimPrefix:
strings.TrimPrefix(a.TrimPrefix.S, a.TrimPrefix.Prefix)
case *NgoloFuzzOne_TrimSuffix:
strings.TrimSuffix(a.TrimSuffix.S, a.TrimSuffix.Suffix)
case *NgoloFuzzOne_Replace:
arg3 := int(a.Replace.N)
strings.Replace(a.Replace.S, a.Replace.Old, a.Replace.New, arg3)
case *NgoloFuzzOne_ReplaceAll:
strings.ReplaceAll(a.ReplaceAll.S, a.ReplaceAll.Old, a.ReplaceAll.New)
case *NgoloFuzzOne_EqualFold:
strings.EqualFold(a.EqualFold.S, a.EqualFold.T)
case *NgoloFuzzOne_Index:
strings.Index(a.Index.S, a.Index.Substr)
case *NgoloFuzzOne_Cut:
strings.Cut(a.Cut.S, a.Cut.Sep)
case *NgoloFuzzOne_CutPrefix:
strings.CutPrefix(a.CutPrefix.S, a.CutPrefix.Prefix)
case *NgoloFuzzOne_CutSuffix:
strings.CutSuffix(a.CutSuffix.S, a.CutSuffix.Suffix)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
ReaderNb := 0
ReaderResultsIndex := 0
ReplacerNb := 0
ReplacerResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_Clone:
w.WriteString(fmt.Sprintf("strings.Clone(%#+v)\n", a.Clone.S))
case *NgoloFuzzOne_Compare:
w.WriteString(fmt.Sprintf("strings.Compare(%#+v, %#+v)\n", a.Compare.A, a.Compare.B))
case *NgoloFuzzOne_Lines:
w.WriteString(fmt.Sprintf("strings.Lines(%#+v)\n", a.Lines.S))
case *NgoloFuzzOne_SplitSeq:
w.WriteString(fmt.Sprintf("strings.SplitSeq(%#+v, %#+v)\n", a.SplitSeq.S, a.SplitSeq.Sep))
case *NgoloFuzzOne_SplitAfterSeq:
w.WriteString(fmt.Sprintf("strings.SplitAfterSeq(%#+v, %#+v)\n", a.SplitAfterSeq.S, a.SplitAfterSeq.Sep))
case *NgoloFuzzOne_FieldsSeq:
w.WriteString(fmt.Sprintf("strings.FieldsSeq(%#+v)\n", a.FieldsSeq.S))
case *NgoloFuzzOne_ReaderNgdotLen:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Len()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotSize:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Size()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotRead:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Read(%#+v)\n", ReaderResultsIndex, a.ReaderNgdotRead.B))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadAt:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadAt(%#+v, %#+v)\n", ReaderResultsIndex, a.ReaderNgdotReadAt.B, a.ReaderNgdotReadAt.Off))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadByte:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadByte()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotUnreadByte:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.UnreadByte()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReadRune:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.ReadRune()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotUnreadRune:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.UnreadRune()\n", ReaderResultsIndex))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotSeek:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Seek(%#+v, int(%#+v))\n", ReaderResultsIndex, a.ReaderNgdotSeek.Offset, a.ReaderNgdotSeek.Whence))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotWriteTo:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.WriteTo(bytes.NewBuffer(%#+v))\n", ReaderResultsIndex, a.ReaderNgdotWriteTo.W))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_ReaderNgdotReset:
if ReaderNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Reader%d.Reset(%#+v)\n", ReaderResultsIndex, a.ReaderNgdotReset.S))
ReaderResultsIndex = (ReaderResultsIndex + 1) % ReaderNb
case *NgoloFuzzOne_NewReader:
w.WriteString(fmt.Sprintf("Reader%d := strings.NewReader(%#+v)\n", ReaderNb, a.NewReader.S))
ReaderNb = ReaderNb + 1
case *NgoloFuzzOne_ReplacerNgdotReplace:
if ReplacerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Replacer%d.Replace(%#+v)\n", ReplacerResultsIndex, a.ReplacerNgdotReplace.S))
ReplacerResultsIndex = (ReplacerResultsIndex + 1) % ReplacerNb
case *NgoloFuzzOne_ReplacerNgdotWriteString:
if ReplacerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Replacer%d.WriteString(bytes.NewBuffer(%#+v), %#+v)\n", ReplacerResultsIndex, a.ReplacerNgdotWriteString.W, a.ReplacerNgdotWriteString.S))
ReplacerResultsIndex = (ReplacerResultsIndex + 1) % ReplacerNb
case *NgoloFuzzOne_Count:
w.WriteString(fmt.Sprintf("strings.Count(%#+v, %#+v)\n", a.Count.S, a.Count.Substr))
case *NgoloFuzzOne_Contains:
w.WriteString(fmt.Sprintf("strings.Contains(%#+v, %#+v)\n", a.Contains.S, a.Contains.Substr))
case *NgoloFuzzOne_ContainsAny:
w.WriteString(fmt.Sprintf("strings.ContainsAny(%#+v, %#+v)\n", a.ContainsAny.S, a.ContainsAny.Chars))
case *NgoloFuzzOne_ContainsRune:
w.WriteString(fmt.Sprintf("strings.ContainsRune(%#+v, GetRune(%#+v))\n", a.ContainsRune.S, a.ContainsRune.R))
case *NgoloFuzzOne_LastIndex:
w.WriteString(fmt.Sprintf("strings.LastIndex(%#+v, %#+v)\n", a.LastIndex.S, a.LastIndex.Substr))
case *NgoloFuzzOne_IndexByte:
w.WriteString(fmt.Sprintf("strings.IndexByte(%#+v, byte(%#+v))\n", a.IndexByte.S, a.IndexByte.C))
case *NgoloFuzzOne_IndexRune:
w.WriteString(fmt.Sprintf("strings.IndexRune(%#+v, GetRune(%#+v))\n", a.IndexRune.S, a.IndexRune.R))
case *NgoloFuzzOne_IndexAny:
w.WriteString(fmt.Sprintf("strings.IndexAny(%#+v, %#+v)\n", a.IndexAny.S, a.IndexAny.Chars))
case *NgoloFuzzOne_LastIndexAny:
w.WriteString(fmt.Sprintf("strings.LastIndexAny(%#+v, %#+v)\n", a.LastIndexAny.S, a.LastIndexAny.Chars))
case *NgoloFuzzOne_LastIndexByte:
w.WriteString(fmt.Sprintf("strings.LastIndexByte(%#+v, byte(%#+v))\n", a.LastIndexByte.S, a.LastIndexByte.C))
case *NgoloFuzzOne_SplitN:
w.WriteString(fmt.Sprintf("strings.SplitN(%#+v, %#+v, int(%#+v))\n", a.SplitN.S, a.SplitN.Sep, a.SplitN.N))
case *NgoloFuzzOne_SplitAfterN:
w.WriteString(fmt.Sprintf("strings.SplitAfterN(%#+v, %#+v, int(%#+v))\n", a.SplitAfterN.S, a.SplitAfterN.Sep, a.SplitAfterN.N))
case *NgoloFuzzOne_Split:
w.WriteString(fmt.Sprintf("strings.Split(%#+v, %#+v)\n", a.Split.S, a.Split.Sep))
case *NgoloFuzzOne_SplitAfter:
w.WriteString(fmt.Sprintf("strings.SplitAfter(%#+v, %#+v)\n", a.SplitAfter.S, a.SplitAfter.Sep))
case *NgoloFuzzOne_Fields:
w.WriteString(fmt.Sprintf("strings.Fields(%#+v)\n", a.Fields.S))
case *NgoloFuzzOne_Join:
w.WriteString(fmt.Sprintf("strings.Join(%#+v, %#+v)\n", a.Join.Elems, a.Join.Sep))
case *NgoloFuzzOne_HasPrefix:
w.WriteString(fmt.Sprintf("strings.HasPrefix(%#+v, %#+v)\n", a.HasPrefix.S, a.HasPrefix.Prefix))
case *NgoloFuzzOne_HasSuffix:
w.WriteString(fmt.Sprintf("strings.HasSuffix(%#+v, %#+v)\n", a.HasSuffix.S, a.HasSuffix.Suffix))
case *NgoloFuzzOne_Repeat:
w.WriteString(fmt.Sprintf("strings.Repeat(%#+v, int(%#+v) %% 0x10001)\n", a.Repeat.S, a.Repeat.Count))
case *NgoloFuzzOne_ToUpper:
w.WriteString(fmt.Sprintf("strings.ToUpper(%#+v)\n", a.ToUpper.S))
case *NgoloFuzzOne_ToLower:
w.WriteString(fmt.Sprintf("strings.ToLower(%#+v)\n", a.ToLower.S))
case *NgoloFuzzOne_ToTitle:
w.WriteString(fmt.Sprintf("strings.ToTitle(%#+v)\n", a.ToTitle.S))
case *NgoloFuzzOne_ToValidUTF8:
w.WriteString(fmt.Sprintf("strings.ToValidUTF8(%#+v, %#+v)\n", a.ToValidUTF8.S, a.ToValidUTF8.Replacement))
case *NgoloFuzzOne_Title:
w.WriteString(fmt.Sprintf("strings.Title(%#+v)\n", a.Title.S))
case *NgoloFuzzOne_Trim:
w.WriteString(fmt.Sprintf("strings.Trim(%#+v, %#+v)\n", a.Trim.S, a.Trim.Cutset))
case *NgoloFuzzOne_TrimLeft:
w.WriteString(fmt.Sprintf("strings.TrimLeft(%#+v, %#+v)\n", a.TrimLeft.S, a.TrimLeft.Cutset))
case *NgoloFuzzOne_TrimRight:
w.WriteString(fmt.Sprintf("strings.TrimRight(%#+v, %#+v)\n", a.TrimRight.S, a.TrimRight.Cutset))
case *NgoloFuzzOne_TrimSpace:
w.WriteString(fmt.Sprintf("strings.TrimSpace(%#+v)\n", a.TrimSpace.S))
case *NgoloFuzzOne_TrimPrefix:
w.WriteString(fmt.Sprintf("strings.TrimPrefix(%#+v, %#+v)\n", a.TrimPrefix.S, a.TrimPrefix.Prefix))
case *NgoloFuzzOne_TrimSuffix:
w.WriteString(fmt.Sprintf("strings.TrimSuffix(%#+v, %#+v)\n", a.TrimSuffix.S, a.TrimSuffix.Suffix))
case *NgoloFuzzOne_Replace:
w.WriteString(fmt.Sprintf("strings.Replace(%#+v, %#+v, %#+v, int(%#+v))\n", a.Replace.S, a.Replace.Old, a.Replace.New, a.Replace.N))
case *NgoloFuzzOne_ReplaceAll:
w.WriteString(fmt.Sprintf("strings.ReplaceAll(%#+v, %#+v, %#+v)\n", a.ReplaceAll.S, a.ReplaceAll.Old, a.ReplaceAll.New))
case *NgoloFuzzOne_EqualFold:
w.WriteString(fmt.Sprintf("strings.EqualFold(%#+v, %#+v)\n", a.EqualFold.S, a.EqualFold.T))
case *NgoloFuzzOne_Index:
w.WriteString(fmt.Sprintf("strings.Index(%#+v, %#+v)\n", a.Index.S, a.Index.Substr))
case *NgoloFuzzOne_Cut:
w.WriteString(fmt.Sprintf("strings.Cut(%#+v, %#+v)\n", a.Cut.S, a.Cut.Sep))
case *NgoloFuzzOne_CutPrefix:
w.WriteString(fmt.Sprintf("strings.CutPrefix(%#+v, %#+v)\n", a.CutPrefix.S, a.CutPrefix.Prefix))
case *NgoloFuzzOne_CutSuffix:
w.WriteString(fmt.Sprintf("strings.CutSuffix(%#+v, %#+v)\n", a.CutSuffix.S, a.CutSuffix.Suffix))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_strings
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type CloneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CloneArgs) Reset() {
*x = CloneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CloneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CloneArgs) ProtoMessage() {}
func (x *CloneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CloneArgs.ProtoReflect.Descriptor instead.
func (*CloneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *CloneArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type CompareArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
A string `protobuf:"bytes,1,opt,name=a,proto3" json:"a,omitempty"`
B string `protobuf:"bytes,2,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CompareArgs) Reset() {
*x = CompareArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CompareArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CompareArgs) ProtoMessage() {}
func (x *CompareArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CompareArgs.ProtoReflect.Descriptor instead.
func (*CompareArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *CompareArgs) GetA() string {
if x != nil {
return x.A
}
return ""
}
func (x *CompareArgs) GetB() string {
if x != nil {
return x.B
}
return ""
}
type LinesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LinesArgs) Reset() {
*x = LinesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LinesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LinesArgs) ProtoMessage() {}
func (x *LinesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LinesArgs.ProtoReflect.Descriptor instead.
func (*LinesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *LinesArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type SplitSeqArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Sep string `protobuf:"bytes,2,opt,name=sep,proto3" json:"sep,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SplitSeqArgs) Reset() {
*x = SplitSeqArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SplitSeqArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SplitSeqArgs) ProtoMessage() {}
func (x *SplitSeqArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SplitSeqArgs.ProtoReflect.Descriptor instead.
func (*SplitSeqArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *SplitSeqArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *SplitSeqArgs) GetSep() string {
if x != nil {
return x.Sep
}
return ""
}
type SplitAfterSeqArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Sep string `protobuf:"bytes,2,opt,name=sep,proto3" json:"sep,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SplitAfterSeqArgs) Reset() {
*x = SplitAfterSeqArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SplitAfterSeqArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SplitAfterSeqArgs) ProtoMessage() {}
func (x *SplitAfterSeqArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SplitAfterSeqArgs.ProtoReflect.Descriptor instead.
func (*SplitAfterSeqArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *SplitAfterSeqArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *SplitAfterSeqArgs) GetSep() string {
if x != nil {
return x.Sep
}
return ""
}
type FieldsSeqArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FieldsSeqArgs) Reset() {
*x = FieldsSeqArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FieldsSeqArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FieldsSeqArgs) ProtoMessage() {}
func (x *FieldsSeqArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FieldsSeqArgs.ProtoReflect.Descriptor instead.
func (*FieldsSeqArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *FieldsSeqArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type ReaderNgdotLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotLenArgs) Reset() {
*x = ReaderNgdotLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotLenArgs) ProtoMessage() {}
func (x *ReaderNgdotLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotLenArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type ReaderNgdotSizeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotSizeArgs) Reset() {
*x = ReaderNgdotSizeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotSizeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotSizeArgs) ProtoMessage() {}
func (x *ReaderNgdotSizeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotSizeArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotSizeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type ReaderNgdotReadArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadArgs) Reset() {
*x = ReaderNgdotReadArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadArgs) ProtoMessage() {}
func (x *ReaderNgdotReadArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *ReaderNgdotReadArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type ReaderNgdotReadAtArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
Off int64 `protobuf:"varint,2,opt,name=off,proto3" json:"off,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadAtArgs) Reset() {
*x = ReaderNgdotReadAtArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadAtArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadAtArgs) ProtoMessage() {}
func (x *ReaderNgdotReadAtArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadAtArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadAtArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *ReaderNgdotReadAtArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
func (x *ReaderNgdotReadAtArgs) GetOff() int64 {
if x != nil {
return x.Off
}
return 0
}
type ReaderNgdotReadByteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadByteArgs) Reset() {
*x = ReaderNgdotReadByteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadByteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadByteArgs) ProtoMessage() {}
func (x *ReaderNgdotReadByteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadByteArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadByteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
type ReaderNgdotUnreadByteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotUnreadByteArgs) Reset() {
*x = ReaderNgdotUnreadByteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotUnreadByteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotUnreadByteArgs) ProtoMessage() {}
func (x *ReaderNgdotUnreadByteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotUnreadByteArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotUnreadByteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type ReaderNgdotReadRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotReadRuneArgs) Reset() {
*x = ReaderNgdotReadRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotReadRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotReadRuneArgs) ProtoMessage() {}
func (x *ReaderNgdotReadRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotReadRuneArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotReadRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type ReaderNgdotUnreadRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotUnreadRuneArgs) Reset() {
*x = ReaderNgdotUnreadRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotUnreadRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotUnreadRuneArgs) ProtoMessage() {}
func (x *ReaderNgdotUnreadRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotUnreadRuneArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotUnreadRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type ReaderNgdotSeekArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
Whence int64 `protobuf:"varint,2,opt,name=whence,proto3" json:"whence,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotSeekArgs) Reset() {
*x = ReaderNgdotSeekArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotSeekArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotSeekArgs) ProtoMessage() {}
func (x *ReaderNgdotSeekArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotSeekArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotSeekArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *ReaderNgdotSeekArgs) GetOffset() int64 {
if x != nil {
return x.Offset
}
return 0
}
func (x *ReaderNgdotSeekArgs) GetWhence() int64 {
if x != nil {
return x.Whence
}
return 0
}
type ReaderNgdotWriteToArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotWriteToArgs) Reset() {
*x = ReaderNgdotWriteToArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotWriteToArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotWriteToArgs) ProtoMessage() {}
func (x *ReaderNgdotWriteToArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotWriteToArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotWriteToArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *ReaderNgdotWriteToArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type ReaderNgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReaderNgdotResetArgs) Reset() {
*x = ReaderNgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReaderNgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReaderNgdotResetArgs) ProtoMessage() {}
func (x *ReaderNgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReaderNgdotResetArgs.ProtoReflect.Descriptor instead.
func (*ReaderNgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *ReaderNgdotResetArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type NewReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReaderArgs) Reset() {
*x = NewReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReaderArgs) ProtoMessage() {}
func (x *NewReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReaderArgs.ProtoReflect.Descriptor instead.
func (*NewReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *NewReaderArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type ReplacerNgdotReplaceArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReplacerNgdotReplaceArgs) Reset() {
*x = ReplacerNgdotReplaceArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReplacerNgdotReplaceArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReplacerNgdotReplaceArgs) ProtoMessage() {}
func (x *ReplacerNgdotReplaceArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReplacerNgdotReplaceArgs.ProtoReflect.Descriptor instead.
func (*ReplacerNgdotReplaceArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *ReplacerNgdotReplaceArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type ReplacerNgdotWriteStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
S string `protobuf:"bytes,2,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReplacerNgdotWriteStringArgs) Reset() {
*x = ReplacerNgdotWriteStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReplacerNgdotWriteStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReplacerNgdotWriteStringArgs) ProtoMessage() {}
func (x *ReplacerNgdotWriteStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReplacerNgdotWriteStringArgs.ProtoReflect.Descriptor instead.
func (*ReplacerNgdotWriteStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *ReplacerNgdotWriteStringArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *ReplacerNgdotWriteStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type CountArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Substr string `protobuf:"bytes,2,opt,name=substr,proto3" json:"substr,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CountArgs) Reset() {
*x = CountArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CountArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CountArgs) ProtoMessage() {}
func (x *CountArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CountArgs.ProtoReflect.Descriptor instead.
func (*CountArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *CountArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *CountArgs) GetSubstr() string {
if x != nil {
return x.Substr
}
return ""
}
type ContainsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Substr string `protobuf:"bytes,2,opt,name=substr,proto3" json:"substr,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ContainsArgs) Reset() {
*x = ContainsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ContainsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ContainsArgs) ProtoMessage() {}
func (x *ContainsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ContainsArgs.ProtoReflect.Descriptor instead.
func (*ContainsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
func (x *ContainsArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *ContainsArgs) GetSubstr() string {
if x != nil {
return x.Substr
}
return ""
}
type ContainsAnyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Chars string `protobuf:"bytes,2,opt,name=chars,proto3" json:"chars,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ContainsAnyArgs) Reset() {
*x = ContainsAnyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ContainsAnyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ContainsAnyArgs) ProtoMessage() {}
func (x *ContainsAnyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ContainsAnyArgs.ProtoReflect.Descriptor instead.
func (*ContainsAnyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
func (x *ContainsAnyArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *ContainsAnyArgs) GetChars() string {
if x != nil {
return x.Chars
}
return ""
}
type ContainsRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
R string `protobuf:"bytes,2,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ContainsRuneArgs) Reset() {
*x = ContainsRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ContainsRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ContainsRuneArgs) ProtoMessage() {}
func (x *ContainsRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ContainsRuneArgs.ProtoReflect.Descriptor instead.
func (*ContainsRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
func (x *ContainsRuneArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *ContainsRuneArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type LastIndexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Substr string `protobuf:"bytes,2,opt,name=substr,proto3" json:"substr,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LastIndexArgs) Reset() {
*x = LastIndexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LastIndexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LastIndexArgs) ProtoMessage() {}
func (x *LastIndexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LastIndexArgs.ProtoReflect.Descriptor instead.
func (*LastIndexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
func (x *LastIndexArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *LastIndexArgs) GetSubstr() string {
if x != nil {
return x.Substr
}
return ""
}
type IndexByteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
C uint32 `protobuf:"varint,2,opt,name=c,proto3" json:"c,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IndexByteArgs) Reset() {
*x = IndexByteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IndexByteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IndexByteArgs) ProtoMessage() {}
func (x *IndexByteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IndexByteArgs.ProtoReflect.Descriptor instead.
func (*IndexByteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
func (x *IndexByteArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *IndexByteArgs) GetC() uint32 {
if x != nil {
return x.C
}
return 0
}
type IndexRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
R string `protobuf:"bytes,2,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IndexRuneArgs) Reset() {
*x = IndexRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IndexRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IndexRuneArgs) ProtoMessage() {}
func (x *IndexRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IndexRuneArgs.ProtoReflect.Descriptor instead.
func (*IndexRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
func (x *IndexRuneArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *IndexRuneArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IndexAnyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Chars string `protobuf:"bytes,2,opt,name=chars,proto3" json:"chars,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IndexAnyArgs) Reset() {
*x = IndexAnyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IndexAnyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IndexAnyArgs) ProtoMessage() {}
func (x *IndexAnyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IndexAnyArgs.ProtoReflect.Descriptor instead.
func (*IndexAnyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
func (x *IndexAnyArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *IndexAnyArgs) GetChars() string {
if x != nil {
return x.Chars
}
return ""
}
type LastIndexAnyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Chars string `protobuf:"bytes,2,opt,name=chars,proto3" json:"chars,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LastIndexAnyArgs) Reset() {
*x = LastIndexAnyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LastIndexAnyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LastIndexAnyArgs) ProtoMessage() {}
func (x *LastIndexAnyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LastIndexAnyArgs.ProtoReflect.Descriptor instead.
func (*LastIndexAnyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
func (x *LastIndexAnyArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *LastIndexAnyArgs) GetChars() string {
if x != nil {
return x.Chars
}
return ""
}
type LastIndexByteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
C uint32 `protobuf:"varint,2,opt,name=c,proto3" json:"c,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LastIndexByteArgs) Reset() {
*x = LastIndexByteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LastIndexByteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LastIndexByteArgs) ProtoMessage() {}
func (x *LastIndexByteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LastIndexByteArgs.ProtoReflect.Descriptor instead.
func (*LastIndexByteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
func (x *LastIndexByteArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *LastIndexByteArgs) GetC() uint32 {
if x != nil {
return x.C
}
return 0
}
type SplitNArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Sep string `protobuf:"bytes,2,opt,name=sep,proto3" json:"sep,omitempty"`
N int64 `protobuf:"varint,3,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SplitNArgs) Reset() {
*x = SplitNArgs{}
mi := &file_ngolofuzz_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SplitNArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SplitNArgs) ProtoMessage() {}
func (x *SplitNArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SplitNArgs.ProtoReflect.Descriptor instead.
func (*SplitNArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{30}
}
func (x *SplitNArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *SplitNArgs) GetSep() string {
if x != nil {
return x.Sep
}
return ""
}
func (x *SplitNArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type SplitAfterNArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Sep string `protobuf:"bytes,2,opt,name=sep,proto3" json:"sep,omitempty"`
N int64 `protobuf:"varint,3,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SplitAfterNArgs) Reset() {
*x = SplitAfterNArgs{}
mi := &file_ngolofuzz_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SplitAfterNArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SplitAfterNArgs) ProtoMessage() {}
func (x *SplitAfterNArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SplitAfterNArgs.ProtoReflect.Descriptor instead.
func (*SplitAfterNArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{31}
}
func (x *SplitAfterNArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *SplitAfterNArgs) GetSep() string {
if x != nil {
return x.Sep
}
return ""
}
func (x *SplitAfterNArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type SplitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Sep string `protobuf:"bytes,2,opt,name=sep,proto3" json:"sep,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SplitArgs) Reset() {
*x = SplitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SplitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SplitArgs) ProtoMessage() {}
func (x *SplitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SplitArgs.ProtoReflect.Descriptor instead.
func (*SplitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{32}
}
func (x *SplitArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *SplitArgs) GetSep() string {
if x != nil {
return x.Sep
}
return ""
}
type SplitAfterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Sep string `protobuf:"bytes,2,opt,name=sep,proto3" json:"sep,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SplitAfterArgs) Reset() {
*x = SplitAfterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SplitAfterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SplitAfterArgs) ProtoMessage() {}
func (x *SplitAfterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[33]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SplitAfterArgs.ProtoReflect.Descriptor instead.
func (*SplitAfterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{33}
}
func (x *SplitAfterArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *SplitAfterArgs) GetSep() string {
if x != nil {
return x.Sep
}
return ""
}
type FieldsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FieldsArgs) Reset() {
*x = FieldsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FieldsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FieldsArgs) ProtoMessage() {}
func (x *FieldsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[34]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FieldsArgs.ProtoReflect.Descriptor instead.
func (*FieldsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{34}
}
func (x *FieldsArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type JoinArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Elems []string `protobuf:"bytes,1,rep,name=elems,proto3" json:"elems,omitempty"`
Sep string `protobuf:"bytes,2,opt,name=sep,proto3" json:"sep,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *JoinArgs) Reset() {
*x = JoinArgs{}
mi := &file_ngolofuzz_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *JoinArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*JoinArgs) ProtoMessage() {}
func (x *JoinArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[35]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use JoinArgs.ProtoReflect.Descriptor instead.
func (*JoinArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{35}
}
func (x *JoinArgs) GetElems() []string {
if x != nil {
return x.Elems
}
return nil
}
func (x *JoinArgs) GetSep() string {
if x != nil {
return x.Sep
}
return ""
}
type HasPrefixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HasPrefixArgs) Reset() {
*x = HasPrefixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HasPrefixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HasPrefixArgs) ProtoMessage() {}
func (x *HasPrefixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[36]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HasPrefixArgs.ProtoReflect.Descriptor instead.
func (*HasPrefixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{36}
}
func (x *HasPrefixArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *HasPrefixArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
type HasSuffixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Suffix string `protobuf:"bytes,2,opt,name=suffix,proto3" json:"suffix,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HasSuffixArgs) Reset() {
*x = HasSuffixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HasSuffixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HasSuffixArgs) ProtoMessage() {}
func (x *HasSuffixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[37]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HasSuffixArgs.ProtoReflect.Descriptor instead.
func (*HasSuffixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{37}
}
func (x *HasSuffixArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *HasSuffixArgs) GetSuffix() string {
if x != nil {
return x.Suffix
}
return ""
}
type RepeatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Count int64 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RepeatArgs) Reset() {
*x = RepeatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RepeatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RepeatArgs) ProtoMessage() {}
func (x *RepeatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[38]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RepeatArgs.ProtoReflect.Descriptor instead.
func (*RepeatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{38}
}
func (x *RepeatArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *RepeatArgs) GetCount() int64 {
if x != nil {
return x.Count
}
return 0
}
type ToUpperArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToUpperArgs) Reset() {
*x = ToUpperArgs{}
mi := &file_ngolofuzz_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToUpperArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToUpperArgs) ProtoMessage() {}
func (x *ToUpperArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[39]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToUpperArgs.ProtoReflect.Descriptor instead.
func (*ToUpperArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{39}
}
func (x *ToUpperArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type ToLowerArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToLowerArgs) Reset() {
*x = ToLowerArgs{}
mi := &file_ngolofuzz_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToLowerArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToLowerArgs) ProtoMessage() {}
func (x *ToLowerArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[40]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToLowerArgs.ProtoReflect.Descriptor instead.
func (*ToLowerArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{40}
}
func (x *ToLowerArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type ToTitleArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToTitleArgs) Reset() {
*x = ToTitleArgs{}
mi := &file_ngolofuzz_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToTitleArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToTitleArgs) ProtoMessage() {}
func (x *ToTitleArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[41]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToTitleArgs.ProtoReflect.Descriptor instead.
func (*ToTitleArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{41}
}
func (x *ToTitleArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type ToValidUTF8Args struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Replacement string `protobuf:"bytes,2,opt,name=replacement,proto3" json:"replacement,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToValidUTF8Args) Reset() {
*x = ToValidUTF8Args{}
mi := &file_ngolofuzz_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToValidUTF8Args) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToValidUTF8Args) ProtoMessage() {}
func (x *ToValidUTF8Args) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[42]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToValidUTF8Args.ProtoReflect.Descriptor instead.
func (*ToValidUTF8Args) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{42}
}
func (x *ToValidUTF8Args) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *ToValidUTF8Args) GetReplacement() string {
if x != nil {
return x.Replacement
}
return ""
}
type TitleArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TitleArgs) Reset() {
*x = TitleArgs{}
mi := &file_ngolofuzz_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TitleArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TitleArgs) ProtoMessage() {}
func (x *TitleArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[43]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TitleArgs.ProtoReflect.Descriptor instead.
func (*TitleArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{43}
}
func (x *TitleArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type TrimArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Cutset string `protobuf:"bytes,2,opt,name=cutset,proto3" json:"cutset,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TrimArgs) Reset() {
*x = TrimArgs{}
mi := &file_ngolofuzz_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TrimArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrimArgs) ProtoMessage() {}
func (x *TrimArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[44]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrimArgs.ProtoReflect.Descriptor instead.
func (*TrimArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{44}
}
func (x *TrimArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *TrimArgs) GetCutset() string {
if x != nil {
return x.Cutset
}
return ""
}
type TrimLeftArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Cutset string `protobuf:"bytes,2,opt,name=cutset,proto3" json:"cutset,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TrimLeftArgs) Reset() {
*x = TrimLeftArgs{}
mi := &file_ngolofuzz_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TrimLeftArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrimLeftArgs) ProtoMessage() {}
func (x *TrimLeftArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[45]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrimLeftArgs.ProtoReflect.Descriptor instead.
func (*TrimLeftArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{45}
}
func (x *TrimLeftArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *TrimLeftArgs) GetCutset() string {
if x != nil {
return x.Cutset
}
return ""
}
type TrimRightArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Cutset string `protobuf:"bytes,2,opt,name=cutset,proto3" json:"cutset,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TrimRightArgs) Reset() {
*x = TrimRightArgs{}
mi := &file_ngolofuzz_proto_msgTypes[46]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TrimRightArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrimRightArgs) ProtoMessage() {}
func (x *TrimRightArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[46]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrimRightArgs.ProtoReflect.Descriptor instead.
func (*TrimRightArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{46}
}
func (x *TrimRightArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *TrimRightArgs) GetCutset() string {
if x != nil {
return x.Cutset
}
return ""
}
type TrimSpaceArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TrimSpaceArgs) Reset() {
*x = TrimSpaceArgs{}
mi := &file_ngolofuzz_proto_msgTypes[47]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TrimSpaceArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrimSpaceArgs) ProtoMessage() {}
func (x *TrimSpaceArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[47]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrimSpaceArgs.ProtoReflect.Descriptor instead.
func (*TrimSpaceArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{47}
}
func (x *TrimSpaceArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type TrimPrefixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TrimPrefixArgs) Reset() {
*x = TrimPrefixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[48]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TrimPrefixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrimPrefixArgs) ProtoMessage() {}
func (x *TrimPrefixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[48]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrimPrefixArgs.ProtoReflect.Descriptor instead.
func (*TrimPrefixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{48}
}
func (x *TrimPrefixArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *TrimPrefixArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
type TrimSuffixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Suffix string `protobuf:"bytes,2,opt,name=suffix,proto3" json:"suffix,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TrimSuffixArgs) Reset() {
*x = TrimSuffixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[49]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TrimSuffixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TrimSuffixArgs) ProtoMessage() {}
func (x *TrimSuffixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[49]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TrimSuffixArgs.ProtoReflect.Descriptor instead.
func (*TrimSuffixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{49}
}
func (x *TrimSuffixArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *TrimSuffixArgs) GetSuffix() string {
if x != nil {
return x.Suffix
}
return ""
}
type ReplaceArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Old string `protobuf:"bytes,2,opt,name=old,proto3" json:"old,omitempty"`
New string `protobuf:"bytes,3,opt,name=new,proto3" json:"new,omitempty"`
N int64 `protobuf:"varint,4,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReplaceArgs) Reset() {
*x = ReplaceArgs{}
mi := &file_ngolofuzz_proto_msgTypes[50]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReplaceArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReplaceArgs) ProtoMessage() {}
func (x *ReplaceArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[50]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReplaceArgs.ProtoReflect.Descriptor instead.
func (*ReplaceArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{50}
}
func (x *ReplaceArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *ReplaceArgs) GetOld() string {
if x != nil {
return x.Old
}
return ""
}
func (x *ReplaceArgs) GetNew() string {
if x != nil {
return x.New
}
return ""
}
func (x *ReplaceArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type ReplaceAllArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Old string `protobuf:"bytes,2,opt,name=old,proto3" json:"old,omitempty"`
New string `protobuf:"bytes,3,opt,name=new,proto3" json:"new,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ReplaceAllArgs) Reset() {
*x = ReplaceAllArgs{}
mi := &file_ngolofuzz_proto_msgTypes[51]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ReplaceAllArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReplaceAllArgs) ProtoMessage() {}
func (x *ReplaceAllArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[51]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReplaceAllArgs.ProtoReflect.Descriptor instead.
func (*ReplaceAllArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{51}
}
func (x *ReplaceAllArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *ReplaceAllArgs) GetOld() string {
if x != nil {
return x.Old
}
return ""
}
func (x *ReplaceAllArgs) GetNew() string {
if x != nil {
return x.New
}
return ""
}
type EqualFoldArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
T string `protobuf:"bytes,2,opt,name=t,proto3" json:"t,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EqualFoldArgs) Reset() {
*x = EqualFoldArgs{}
mi := &file_ngolofuzz_proto_msgTypes[52]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EqualFoldArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EqualFoldArgs) ProtoMessage() {}
func (x *EqualFoldArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[52]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EqualFoldArgs.ProtoReflect.Descriptor instead.
func (*EqualFoldArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{52}
}
func (x *EqualFoldArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *EqualFoldArgs) GetT() string {
if x != nil {
return x.T
}
return ""
}
type IndexArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Substr string `protobuf:"bytes,2,opt,name=substr,proto3" json:"substr,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IndexArgs) Reset() {
*x = IndexArgs{}
mi := &file_ngolofuzz_proto_msgTypes[53]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IndexArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IndexArgs) ProtoMessage() {}
func (x *IndexArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[53]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IndexArgs.ProtoReflect.Descriptor instead.
func (*IndexArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{53}
}
func (x *IndexArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *IndexArgs) GetSubstr() string {
if x != nil {
return x.Substr
}
return ""
}
type CutArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Sep string `protobuf:"bytes,2,opt,name=sep,proto3" json:"sep,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CutArgs) Reset() {
*x = CutArgs{}
mi := &file_ngolofuzz_proto_msgTypes[54]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CutArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CutArgs) ProtoMessage() {}
func (x *CutArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[54]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CutArgs.ProtoReflect.Descriptor instead.
func (*CutArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{54}
}
func (x *CutArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *CutArgs) GetSep() string {
if x != nil {
return x.Sep
}
return ""
}
type CutPrefixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CutPrefixArgs) Reset() {
*x = CutPrefixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[55]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CutPrefixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CutPrefixArgs) ProtoMessage() {}
func (x *CutPrefixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[55]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CutPrefixArgs.ProtoReflect.Descriptor instead.
func (*CutPrefixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{55}
}
func (x *CutPrefixArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *CutPrefixArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
type CutSuffixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
Suffix string `protobuf:"bytes,2,opt,name=suffix,proto3" json:"suffix,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *CutSuffixArgs) Reset() {
*x = CutSuffixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[56]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *CutSuffixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CutSuffixArgs) ProtoMessage() {}
func (x *CutSuffixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[56]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CutSuffixArgs.ProtoReflect.Descriptor instead.
func (*CutSuffixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{56}
}
func (x *CutSuffixArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
func (x *CutSuffixArgs) GetSuffix() string {
if x != nil {
return x.Suffix
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_Clone
// *NgoloFuzzOne_Compare
// *NgoloFuzzOne_Lines
// *NgoloFuzzOne_SplitSeq
// *NgoloFuzzOne_SplitAfterSeq
// *NgoloFuzzOne_FieldsSeq
// *NgoloFuzzOne_ReaderNgdotLen
// *NgoloFuzzOne_ReaderNgdotSize
// *NgoloFuzzOne_ReaderNgdotRead
// *NgoloFuzzOne_ReaderNgdotReadAt
// *NgoloFuzzOne_ReaderNgdotReadByte
// *NgoloFuzzOne_ReaderNgdotUnreadByte
// *NgoloFuzzOne_ReaderNgdotReadRune
// *NgoloFuzzOne_ReaderNgdotUnreadRune
// *NgoloFuzzOne_ReaderNgdotSeek
// *NgoloFuzzOne_ReaderNgdotWriteTo
// *NgoloFuzzOne_ReaderNgdotReset
// *NgoloFuzzOne_NewReader
// *NgoloFuzzOne_ReplacerNgdotReplace
// *NgoloFuzzOne_ReplacerNgdotWriteString
// *NgoloFuzzOne_Count
// *NgoloFuzzOne_Contains
// *NgoloFuzzOne_ContainsAny
// *NgoloFuzzOne_ContainsRune
// *NgoloFuzzOne_LastIndex
// *NgoloFuzzOne_IndexByte
// *NgoloFuzzOne_IndexRune
// *NgoloFuzzOne_IndexAny
// *NgoloFuzzOne_LastIndexAny
// *NgoloFuzzOne_LastIndexByte
// *NgoloFuzzOne_SplitN
// *NgoloFuzzOne_SplitAfterN
// *NgoloFuzzOne_Split
// *NgoloFuzzOne_SplitAfter
// *NgoloFuzzOne_Fields
// *NgoloFuzzOne_Join
// *NgoloFuzzOne_HasPrefix
// *NgoloFuzzOne_HasSuffix
// *NgoloFuzzOne_Repeat
// *NgoloFuzzOne_ToUpper
// *NgoloFuzzOne_ToLower
// *NgoloFuzzOne_ToTitle
// *NgoloFuzzOne_ToValidUTF8
// *NgoloFuzzOne_Title
// *NgoloFuzzOne_Trim
// *NgoloFuzzOne_TrimLeft
// *NgoloFuzzOne_TrimRight
// *NgoloFuzzOne_TrimSpace
// *NgoloFuzzOne_TrimPrefix
// *NgoloFuzzOne_TrimSuffix
// *NgoloFuzzOne_Replace
// *NgoloFuzzOne_ReplaceAll
// *NgoloFuzzOne_EqualFold
// *NgoloFuzzOne_Index
// *NgoloFuzzOne_Cut
// *NgoloFuzzOne_CutPrefix
// *NgoloFuzzOne_CutSuffix
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[57]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[57]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{57}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetClone() *CloneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Clone); ok {
return x.Clone
}
}
return nil
}
func (x *NgoloFuzzOne) GetCompare() *CompareArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Compare); ok {
return x.Compare
}
}
return nil
}
func (x *NgoloFuzzOne) GetLines() *LinesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Lines); ok {
return x.Lines
}
}
return nil
}
func (x *NgoloFuzzOne) GetSplitSeq() *SplitSeqArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SplitSeq); ok {
return x.SplitSeq
}
}
return nil
}
func (x *NgoloFuzzOne) GetSplitAfterSeq() *SplitAfterSeqArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SplitAfterSeq); ok {
return x.SplitAfterSeq
}
}
return nil
}
func (x *NgoloFuzzOne) GetFieldsSeq() *FieldsSeqArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FieldsSeq); ok {
return x.FieldsSeq
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotLen() *ReaderNgdotLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotLen); ok {
return x.ReaderNgdotLen
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotSize() *ReaderNgdotSizeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotSize); ok {
return x.ReaderNgdotSize
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotRead() *ReaderNgdotReadArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotRead); ok {
return x.ReaderNgdotRead
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadAt() *ReaderNgdotReadAtArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadAt); ok {
return x.ReaderNgdotReadAt
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadByte() *ReaderNgdotReadByteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadByte); ok {
return x.ReaderNgdotReadByte
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotUnreadByte() *ReaderNgdotUnreadByteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotUnreadByte); ok {
return x.ReaderNgdotUnreadByte
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReadRune() *ReaderNgdotReadRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReadRune); ok {
return x.ReaderNgdotReadRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotUnreadRune() *ReaderNgdotUnreadRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotUnreadRune); ok {
return x.ReaderNgdotUnreadRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotSeek() *ReaderNgdotSeekArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotSeek); ok {
return x.ReaderNgdotSeek
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotWriteTo() *ReaderNgdotWriteToArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotWriteTo); ok {
return x.ReaderNgdotWriteTo
}
}
return nil
}
func (x *NgoloFuzzOne) GetReaderNgdotReset() *ReaderNgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReaderNgdotReset); ok {
return x.ReaderNgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewReader() *NewReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReader); ok {
return x.NewReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetReplacerNgdotReplace() *ReplacerNgdotReplaceArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReplacerNgdotReplace); ok {
return x.ReplacerNgdotReplace
}
}
return nil
}
func (x *NgoloFuzzOne) GetReplacerNgdotWriteString() *ReplacerNgdotWriteStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReplacerNgdotWriteString); ok {
return x.ReplacerNgdotWriteString
}
}
return nil
}
func (x *NgoloFuzzOne) GetCount() *CountArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Count); ok {
return x.Count
}
}
return nil
}
func (x *NgoloFuzzOne) GetContains() *ContainsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Contains); ok {
return x.Contains
}
}
return nil
}
func (x *NgoloFuzzOne) GetContainsAny() *ContainsAnyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ContainsAny); ok {
return x.ContainsAny
}
}
return nil
}
func (x *NgoloFuzzOne) GetContainsRune() *ContainsRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ContainsRune); ok {
return x.ContainsRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetLastIndex() *LastIndexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LastIndex); ok {
return x.LastIndex
}
}
return nil
}
func (x *NgoloFuzzOne) GetIndexByte() *IndexByteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IndexByte); ok {
return x.IndexByte
}
}
return nil
}
func (x *NgoloFuzzOne) GetIndexRune() *IndexRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IndexRune); ok {
return x.IndexRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetIndexAny() *IndexAnyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IndexAny); ok {
return x.IndexAny
}
}
return nil
}
func (x *NgoloFuzzOne) GetLastIndexAny() *LastIndexAnyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LastIndexAny); ok {
return x.LastIndexAny
}
}
return nil
}
func (x *NgoloFuzzOne) GetLastIndexByte() *LastIndexByteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LastIndexByte); ok {
return x.LastIndexByte
}
}
return nil
}
func (x *NgoloFuzzOne) GetSplitN() *SplitNArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SplitN); ok {
return x.SplitN
}
}
return nil
}
func (x *NgoloFuzzOne) GetSplitAfterN() *SplitAfterNArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SplitAfterN); ok {
return x.SplitAfterN
}
}
return nil
}
func (x *NgoloFuzzOne) GetSplit() *SplitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Split); ok {
return x.Split
}
}
return nil
}
func (x *NgoloFuzzOne) GetSplitAfter() *SplitAfterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SplitAfter); ok {
return x.SplitAfter
}
}
return nil
}
func (x *NgoloFuzzOne) GetFields() *FieldsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Fields); ok {
return x.Fields
}
}
return nil
}
func (x *NgoloFuzzOne) GetJoin() *JoinArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Join); ok {
return x.Join
}
}
return nil
}
func (x *NgoloFuzzOne) GetHasPrefix() *HasPrefixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_HasPrefix); ok {
return x.HasPrefix
}
}
return nil
}
func (x *NgoloFuzzOne) GetHasSuffix() *HasSuffixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_HasSuffix); ok {
return x.HasSuffix
}
}
return nil
}
func (x *NgoloFuzzOne) GetRepeat() *RepeatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Repeat); ok {
return x.Repeat
}
}
return nil
}
func (x *NgoloFuzzOne) GetToUpper() *ToUpperArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ToUpper); ok {
return x.ToUpper
}
}
return nil
}
func (x *NgoloFuzzOne) GetToLower() *ToLowerArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ToLower); ok {
return x.ToLower
}
}
return nil
}
func (x *NgoloFuzzOne) GetToTitle() *ToTitleArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ToTitle); ok {
return x.ToTitle
}
}
return nil
}
func (x *NgoloFuzzOne) GetToValidUTF8() *ToValidUTF8Args {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ToValidUTF8); ok {
return x.ToValidUTF8
}
}
return nil
}
func (x *NgoloFuzzOne) GetTitle() *TitleArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Title); ok {
return x.Title
}
}
return nil
}
func (x *NgoloFuzzOne) GetTrim() *TrimArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Trim); ok {
return x.Trim
}
}
return nil
}
func (x *NgoloFuzzOne) GetTrimLeft() *TrimLeftArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TrimLeft); ok {
return x.TrimLeft
}
}
return nil
}
func (x *NgoloFuzzOne) GetTrimRight() *TrimRightArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TrimRight); ok {
return x.TrimRight
}
}
return nil
}
func (x *NgoloFuzzOne) GetTrimSpace() *TrimSpaceArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TrimSpace); ok {
return x.TrimSpace
}
}
return nil
}
func (x *NgoloFuzzOne) GetTrimPrefix() *TrimPrefixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TrimPrefix); ok {
return x.TrimPrefix
}
}
return nil
}
func (x *NgoloFuzzOne) GetTrimSuffix() *TrimSuffixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TrimSuffix); ok {
return x.TrimSuffix
}
}
return nil
}
func (x *NgoloFuzzOne) GetReplace() *ReplaceArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Replace); ok {
return x.Replace
}
}
return nil
}
func (x *NgoloFuzzOne) GetReplaceAll() *ReplaceAllArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ReplaceAll); ok {
return x.ReplaceAll
}
}
return nil
}
func (x *NgoloFuzzOne) GetEqualFold() *EqualFoldArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EqualFold); ok {
return x.EqualFold
}
}
return nil
}
func (x *NgoloFuzzOne) GetIndex() *IndexArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Index); ok {
return x.Index
}
}
return nil
}
func (x *NgoloFuzzOne) GetCut() *CutArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Cut); ok {
return x.Cut
}
}
return nil
}
func (x *NgoloFuzzOne) GetCutPrefix() *CutPrefixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CutPrefix); ok {
return x.CutPrefix
}
}
return nil
}
func (x *NgoloFuzzOne) GetCutSuffix() *CutSuffixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_CutSuffix); ok {
return x.CutSuffix
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_Clone struct {
Clone *CloneArgs `protobuf:"bytes,1,opt,name=Clone,proto3,oneof"`
}
type NgoloFuzzOne_Compare struct {
Compare *CompareArgs `protobuf:"bytes,2,opt,name=Compare,proto3,oneof"`
}
type NgoloFuzzOne_Lines struct {
Lines *LinesArgs `protobuf:"bytes,3,opt,name=Lines,proto3,oneof"`
}
type NgoloFuzzOne_SplitSeq struct {
SplitSeq *SplitSeqArgs `protobuf:"bytes,4,opt,name=SplitSeq,proto3,oneof"`
}
type NgoloFuzzOne_SplitAfterSeq struct {
SplitAfterSeq *SplitAfterSeqArgs `protobuf:"bytes,5,opt,name=SplitAfterSeq,proto3,oneof"`
}
type NgoloFuzzOne_FieldsSeq struct {
FieldsSeq *FieldsSeqArgs `protobuf:"bytes,6,opt,name=FieldsSeq,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotLen struct {
ReaderNgdotLen *ReaderNgdotLenArgs `protobuf:"bytes,7,opt,name=ReaderNgdotLen,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotSize struct {
ReaderNgdotSize *ReaderNgdotSizeArgs `protobuf:"bytes,8,opt,name=ReaderNgdotSize,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotRead struct {
ReaderNgdotRead *ReaderNgdotReadArgs `protobuf:"bytes,9,opt,name=ReaderNgdotRead,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadAt struct {
ReaderNgdotReadAt *ReaderNgdotReadAtArgs `protobuf:"bytes,10,opt,name=ReaderNgdotReadAt,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadByte struct {
ReaderNgdotReadByte *ReaderNgdotReadByteArgs `protobuf:"bytes,11,opt,name=ReaderNgdotReadByte,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotUnreadByte struct {
ReaderNgdotUnreadByte *ReaderNgdotUnreadByteArgs `protobuf:"bytes,12,opt,name=ReaderNgdotUnreadByte,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReadRune struct {
ReaderNgdotReadRune *ReaderNgdotReadRuneArgs `protobuf:"bytes,13,opt,name=ReaderNgdotReadRune,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotUnreadRune struct {
ReaderNgdotUnreadRune *ReaderNgdotUnreadRuneArgs `protobuf:"bytes,14,opt,name=ReaderNgdotUnreadRune,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotSeek struct {
ReaderNgdotSeek *ReaderNgdotSeekArgs `protobuf:"bytes,15,opt,name=ReaderNgdotSeek,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotWriteTo struct {
ReaderNgdotWriteTo *ReaderNgdotWriteToArgs `protobuf:"bytes,16,opt,name=ReaderNgdotWriteTo,proto3,oneof"`
}
type NgoloFuzzOne_ReaderNgdotReset struct {
ReaderNgdotReset *ReaderNgdotResetArgs `protobuf:"bytes,17,opt,name=ReaderNgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_NewReader struct {
NewReader *NewReaderArgs `protobuf:"bytes,18,opt,name=NewReader,proto3,oneof"`
}
type NgoloFuzzOne_ReplacerNgdotReplace struct {
ReplacerNgdotReplace *ReplacerNgdotReplaceArgs `protobuf:"bytes,19,opt,name=ReplacerNgdotReplace,proto3,oneof"`
}
type NgoloFuzzOne_ReplacerNgdotWriteString struct {
ReplacerNgdotWriteString *ReplacerNgdotWriteStringArgs `protobuf:"bytes,20,opt,name=ReplacerNgdotWriteString,proto3,oneof"`
}
type NgoloFuzzOne_Count struct {
Count *CountArgs `protobuf:"bytes,21,opt,name=Count,proto3,oneof"`
}
type NgoloFuzzOne_Contains struct {
Contains *ContainsArgs `protobuf:"bytes,22,opt,name=Contains,proto3,oneof"`
}
type NgoloFuzzOne_ContainsAny struct {
ContainsAny *ContainsAnyArgs `protobuf:"bytes,23,opt,name=ContainsAny,proto3,oneof"`
}
type NgoloFuzzOne_ContainsRune struct {
ContainsRune *ContainsRuneArgs `protobuf:"bytes,24,opt,name=ContainsRune,proto3,oneof"`
}
type NgoloFuzzOne_LastIndex struct {
LastIndex *LastIndexArgs `protobuf:"bytes,25,opt,name=LastIndex,proto3,oneof"`
}
type NgoloFuzzOne_IndexByte struct {
IndexByte *IndexByteArgs `protobuf:"bytes,26,opt,name=IndexByte,proto3,oneof"`
}
type NgoloFuzzOne_IndexRune struct {
IndexRune *IndexRuneArgs `protobuf:"bytes,27,opt,name=IndexRune,proto3,oneof"`
}
type NgoloFuzzOne_IndexAny struct {
IndexAny *IndexAnyArgs `protobuf:"bytes,28,opt,name=IndexAny,proto3,oneof"`
}
type NgoloFuzzOne_LastIndexAny struct {
LastIndexAny *LastIndexAnyArgs `protobuf:"bytes,29,opt,name=LastIndexAny,proto3,oneof"`
}
type NgoloFuzzOne_LastIndexByte struct {
LastIndexByte *LastIndexByteArgs `protobuf:"bytes,30,opt,name=LastIndexByte,proto3,oneof"`
}
type NgoloFuzzOne_SplitN struct {
SplitN *SplitNArgs `protobuf:"bytes,31,opt,name=SplitN,proto3,oneof"`
}
type NgoloFuzzOne_SplitAfterN struct {
SplitAfterN *SplitAfterNArgs `protobuf:"bytes,32,opt,name=SplitAfterN,proto3,oneof"`
}
type NgoloFuzzOne_Split struct {
Split *SplitArgs `protobuf:"bytes,33,opt,name=Split,proto3,oneof"`
}
type NgoloFuzzOne_SplitAfter struct {
SplitAfter *SplitAfterArgs `protobuf:"bytes,34,opt,name=SplitAfter,proto3,oneof"`
}
type NgoloFuzzOne_Fields struct {
Fields *FieldsArgs `protobuf:"bytes,35,opt,name=Fields,proto3,oneof"`
}
type NgoloFuzzOne_Join struct {
Join *JoinArgs `protobuf:"bytes,36,opt,name=Join,proto3,oneof"`
}
type NgoloFuzzOne_HasPrefix struct {
HasPrefix *HasPrefixArgs `protobuf:"bytes,37,opt,name=HasPrefix,proto3,oneof"`
}
type NgoloFuzzOne_HasSuffix struct {
HasSuffix *HasSuffixArgs `protobuf:"bytes,38,opt,name=HasSuffix,proto3,oneof"`
}
type NgoloFuzzOne_Repeat struct {
Repeat *RepeatArgs `protobuf:"bytes,39,opt,name=Repeat,proto3,oneof"`
}
type NgoloFuzzOne_ToUpper struct {
ToUpper *ToUpperArgs `protobuf:"bytes,40,opt,name=ToUpper,proto3,oneof"`
}
type NgoloFuzzOne_ToLower struct {
ToLower *ToLowerArgs `protobuf:"bytes,41,opt,name=ToLower,proto3,oneof"`
}
type NgoloFuzzOne_ToTitle struct {
ToTitle *ToTitleArgs `protobuf:"bytes,42,opt,name=ToTitle,proto3,oneof"`
}
type NgoloFuzzOne_ToValidUTF8 struct {
ToValidUTF8 *ToValidUTF8Args `protobuf:"bytes,43,opt,name=ToValidUTF8,proto3,oneof"`
}
type NgoloFuzzOne_Title struct {
Title *TitleArgs `protobuf:"bytes,44,opt,name=Title,proto3,oneof"`
}
type NgoloFuzzOne_Trim struct {
Trim *TrimArgs `protobuf:"bytes,45,opt,name=Trim,proto3,oneof"`
}
type NgoloFuzzOne_TrimLeft struct {
TrimLeft *TrimLeftArgs `protobuf:"bytes,46,opt,name=TrimLeft,proto3,oneof"`
}
type NgoloFuzzOne_TrimRight struct {
TrimRight *TrimRightArgs `protobuf:"bytes,47,opt,name=TrimRight,proto3,oneof"`
}
type NgoloFuzzOne_TrimSpace struct {
TrimSpace *TrimSpaceArgs `protobuf:"bytes,48,opt,name=TrimSpace,proto3,oneof"`
}
type NgoloFuzzOne_TrimPrefix struct {
TrimPrefix *TrimPrefixArgs `protobuf:"bytes,49,opt,name=TrimPrefix,proto3,oneof"`
}
type NgoloFuzzOne_TrimSuffix struct {
TrimSuffix *TrimSuffixArgs `protobuf:"bytes,50,opt,name=TrimSuffix,proto3,oneof"`
}
type NgoloFuzzOne_Replace struct {
Replace *ReplaceArgs `protobuf:"bytes,51,opt,name=Replace,proto3,oneof"`
}
type NgoloFuzzOne_ReplaceAll struct {
ReplaceAll *ReplaceAllArgs `protobuf:"bytes,52,opt,name=ReplaceAll,proto3,oneof"`
}
type NgoloFuzzOne_EqualFold struct {
EqualFold *EqualFoldArgs `protobuf:"bytes,53,opt,name=EqualFold,proto3,oneof"`
}
type NgoloFuzzOne_Index struct {
Index *IndexArgs `protobuf:"bytes,54,opt,name=Index,proto3,oneof"`
}
type NgoloFuzzOne_Cut struct {
Cut *CutArgs `protobuf:"bytes,55,opt,name=Cut,proto3,oneof"`
}
type NgoloFuzzOne_CutPrefix struct {
CutPrefix *CutPrefixArgs `protobuf:"bytes,56,opt,name=CutPrefix,proto3,oneof"`
}
type NgoloFuzzOne_CutSuffix struct {
CutSuffix *CutSuffixArgs `protobuf:"bytes,57,opt,name=CutSuffix,proto3,oneof"`
}
func (*NgoloFuzzOne_Clone) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Compare) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Lines) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SplitSeq) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SplitAfterSeq) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FieldsSeq) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotLen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotSize) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotRead) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadAt) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadByte) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotUnreadByte) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReadRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotUnreadRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotSeek) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotWriteTo) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReaderNgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReplacerNgdotReplace) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReplacerNgdotWriteString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Count) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Contains) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ContainsAny) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ContainsRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LastIndex) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IndexByte) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IndexRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IndexAny) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LastIndexAny) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LastIndexByte) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SplitN) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SplitAfterN) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Split) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SplitAfter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Fields) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Join) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_HasPrefix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_HasSuffix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Repeat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ToUpper) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ToLower) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ToTitle) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ToValidUTF8) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Title) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Trim) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TrimLeft) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TrimRight) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TrimSpace) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TrimPrefix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TrimSuffix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Replace) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ReplaceAll) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EqualFold) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Index) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Cut) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CutPrefix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_CutSuffix) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[58]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[58]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{58}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[59]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[59]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{59}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x19\n" +
"\tCloneArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\")\n" +
"\vCompareArgs\x12\f\n" +
"\x01a\x18\x01 \x01(\tR\x01a\x12\f\n" +
"\x01b\x18\x02 \x01(\tR\x01b\"\x19\n" +
"\tLinesArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\".\n" +
"\fSplitSeqArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x10\n" +
"\x03sep\x18\x02 \x01(\tR\x03sep\"3\n" +
"\x11SplitAfterSeqArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x10\n" +
"\x03sep\x18\x02 \x01(\tR\x03sep\"\x1d\n" +
"\rFieldsSeqArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x14\n" +
"\x12ReaderNgdotLenArgs\"\x15\n" +
"\x13ReaderNgdotSizeArgs\"#\n" +
"\x13ReaderNgdotReadArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"7\n" +
"\x15ReaderNgdotReadAtArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\x12\x10\n" +
"\x03off\x18\x02 \x01(\x03R\x03off\"\x19\n" +
"\x17ReaderNgdotReadByteArgs\"\x1b\n" +
"\x19ReaderNgdotUnreadByteArgs\"\x19\n" +
"\x17ReaderNgdotReadRuneArgs\"\x1b\n" +
"\x19ReaderNgdotUnreadRuneArgs\"E\n" +
"\x13ReaderNgdotSeekArgs\x12\x16\n" +
"\x06offset\x18\x01 \x01(\x03R\x06offset\x12\x16\n" +
"\x06whence\x18\x02 \x01(\x03R\x06whence\"&\n" +
"\x16ReaderNgdotWriteToArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\"$\n" +
"\x14ReaderNgdotResetArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1d\n" +
"\rNewReaderArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"(\n" +
"\x18ReplacerNgdotReplaceArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\":\n" +
"\x1cReplacerNgdotWriteStringArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\f\n" +
"\x01s\x18\x02 \x01(\tR\x01s\"1\n" +
"\tCountArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x16\n" +
"\x06substr\x18\x02 \x01(\tR\x06substr\"4\n" +
"\fContainsArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x16\n" +
"\x06substr\x18\x02 \x01(\tR\x06substr\"5\n" +
"\x0fContainsAnyArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x14\n" +
"\x05chars\x18\x02 \x01(\tR\x05chars\".\n" +
"\x10ContainsRuneArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\f\n" +
"\x01r\x18\x02 \x01(\tR\x01r\"5\n" +
"\rLastIndexArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x16\n" +
"\x06substr\x18\x02 \x01(\tR\x06substr\"+\n" +
"\rIndexByteArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\f\n" +
"\x01c\x18\x02 \x01(\rR\x01c\"+\n" +
"\rIndexRuneArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\f\n" +
"\x01r\x18\x02 \x01(\tR\x01r\"2\n" +
"\fIndexAnyArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x14\n" +
"\x05chars\x18\x02 \x01(\tR\x05chars\"6\n" +
"\x10LastIndexAnyArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x14\n" +
"\x05chars\x18\x02 \x01(\tR\x05chars\"/\n" +
"\x11LastIndexByteArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\f\n" +
"\x01c\x18\x02 \x01(\rR\x01c\":\n" +
"\n" +
"SplitNArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x10\n" +
"\x03sep\x18\x02 \x01(\tR\x03sep\x12\f\n" +
"\x01n\x18\x03 \x01(\x03R\x01n\"?\n" +
"\x0fSplitAfterNArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x10\n" +
"\x03sep\x18\x02 \x01(\tR\x03sep\x12\f\n" +
"\x01n\x18\x03 \x01(\x03R\x01n\"+\n" +
"\tSplitArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x10\n" +
"\x03sep\x18\x02 \x01(\tR\x03sep\"0\n" +
"\x0eSplitAfterArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x10\n" +
"\x03sep\x18\x02 \x01(\tR\x03sep\"\x1a\n" +
"\n" +
"FieldsArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"2\n" +
"\bJoinArgs\x12\x14\n" +
"\x05elems\x18\x01 \x03(\tR\x05elems\x12\x10\n" +
"\x03sep\x18\x02 \x01(\tR\x03sep\"5\n" +
"\rHasPrefixArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x16\n" +
"\x06prefix\x18\x02 \x01(\tR\x06prefix\"5\n" +
"\rHasSuffixArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x16\n" +
"\x06suffix\x18\x02 \x01(\tR\x06suffix\"0\n" +
"\n" +
"RepeatArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x14\n" +
"\x05count\x18\x02 \x01(\x03R\x05count\"\x1b\n" +
"\vToUpperArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1b\n" +
"\vToLowerArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1b\n" +
"\vToTitleArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"A\n" +
"\x0fToValidUTF8Args\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12 \n" +
"\vreplacement\x18\x02 \x01(\tR\vreplacement\"\x19\n" +
"\tTitleArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"0\n" +
"\bTrimArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x16\n" +
"\x06cutset\x18\x02 \x01(\tR\x06cutset\"4\n" +
"\fTrimLeftArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x16\n" +
"\x06cutset\x18\x02 \x01(\tR\x06cutset\"5\n" +
"\rTrimRightArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x16\n" +
"\x06cutset\x18\x02 \x01(\tR\x06cutset\"\x1d\n" +
"\rTrimSpaceArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"6\n" +
"\x0eTrimPrefixArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x16\n" +
"\x06prefix\x18\x02 \x01(\tR\x06prefix\"6\n" +
"\x0eTrimSuffixArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x16\n" +
"\x06suffix\x18\x02 \x01(\tR\x06suffix\"M\n" +
"\vReplaceArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x10\n" +
"\x03old\x18\x02 \x01(\tR\x03old\x12\x10\n" +
"\x03new\x18\x03 \x01(\tR\x03new\x12\f\n" +
"\x01n\x18\x04 \x01(\x03R\x01n\"B\n" +
"\x0eReplaceAllArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x10\n" +
"\x03old\x18\x02 \x01(\tR\x03old\x12\x10\n" +
"\x03new\x18\x03 \x01(\tR\x03new\"+\n" +
"\rEqualFoldArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\f\n" +
"\x01t\x18\x02 \x01(\tR\x01t\"1\n" +
"\tIndexArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x16\n" +
"\x06substr\x18\x02 \x01(\tR\x06substr\")\n" +
"\aCutArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x10\n" +
"\x03sep\x18\x02 \x01(\tR\x03sep\"5\n" +
"\rCutPrefixArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x16\n" +
"\x06prefix\x18\x02 \x01(\tR\x06prefix\"5\n" +
"\rCutSuffixArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\x12\x16\n" +
"\x06suffix\x18\x02 \x01(\tR\x06suffix\"\xea\x1b\n" +
"\fNgoloFuzzOne\x12,\n" +
"\x05Clone\x18\x01 \x01(\v2\x14.ngolofuzz.CloneArgsH\x00R\x05Clone\x122\n" +
"\aCompare\x18\x02 \x01(\v2\x16.ngolofuzz.CompareArgsH\x00R\aCompare\x12,\n" +
"\x05Lines\x18\x03 \x01(\v2\x14.ngolofuzz.LinesArgsH\x00R\x05Lines\x125\n" +
"\bSplitSeq\x18\x04 \x01(\v2\x17.ngolofuzz.SplitSeqArgsH\x00R\bSplitSeq\x12D\n" +
"\rSplitAfterSeq\x18\x05 \x01(\v2\x1c.ngolofuzz.SplitAfterSeqArgsH\x00R\rSplitAfterSeq\x128\n" +
"\tFieldsSeq\x18\x06 \x01(\v2\x18.ngolofuzz.FieldsSeqArgsH\x00R\tFieldsSeq\x12G\n" +
"\x0eReaderNgdotLen\x18\a \x01(\v2\x1d.ngolofuzz.ReaderNgdotLenArgsH\x00R\x0eReaderNgdotLen\x12J\n" +
"\x0fReaderNgdotSize\x18\b \x01(\v2\x1e.ngolofuzz.ReaderNgdotSizeArgsH\x00R\x0fReaderNgdotSize\x12J\n" +
"\x0fReaderNgdotRead\x18\t \x01(\v2\x1e.ngolofuzz.ReaderNgdotReadArgsH\x00R\x0fReaderNgdotRead\x12P\n" +
"\x11ReaderNgdotReadAt\x18\n" +
" \x01(\v2 .ngolofuzz.ReaderNgdotReadAtArgsH\x00R\x11ReaderNgdotReadAt\x12V\n" +
"\x13ReaderNgdotReadByte\x18\v \x01(\v2\".ngolofuzz.ReaderNgdotReadByteArgsH\x00R\x13ReaderNgdotReadByte\x12\\\n" +
"\x15ReaderNgdotUnreadByte\x18\f \x01(\v2$.ngolofuzz.ReaderNgdotUnreadByteArgsH\x00R\x15ReaderNgdotUnreadByte\x12V\n" +
"\x13ReaderNgdotReadRune\x18\r \x01(\v2\".ngolofuzz.ReaderNgdotReadRuneArgsH\x00R\x13ReaderNgdotReadRune\x12\\\n" +
"\x15ReaderNgdotUnreadRune\x18\x0e \x01(\v2$.ngolofuzz.ReaderNgdotUnreadRuneArgsH\x00R\x15ReaderNgdotUnreadRune\x12J\n" +
"\x0fReaderNgdotSeek\x18\x0f \x01(\v2\x1e.ngolofuzz.ReaderNgdotSeekArgsH\x00R\x0fReaderNgdotSeek\x12S\n" +
"\x12ReaderNgdotWriteTo\x18\x10 \x01(\v2!.ngolofuzz.ReaderNgdotWriteToArgsH\x00R\x12ReaderNgdotWriteTo\x12M\n" +
"\x10ReaderNgdotReset\x18\x11 \x01(\v2\x1f.ngolofuzz.ReaderNgdotResetArgsH\x00R\x10ReaderNgdotReset\x128\n" +
"\tNewReader\x18\x12 \x01(\v2\x18.ngolofuzz.NewReaderArgsH\x00R\tNewReader\x12Y\n" +
"\x14ReplacerNgdotReplace\x18\x13 \x01(\v2#.ngolofuzz.ReplacerNgdotReplaceArgsH\x00R\x14ReplacerNgdotReplace\x12e\n" +
"\x18ReplacerNgdotWriteString\x18\x14 \x01(\v2'.ngolofuzz.ReplacerNgdotWriteStringArgsH\x00R\x18ReplacerNgdotWriteString\x12,\n" +
"\x05Count\x18\x15 \x01(\v2\x14.ngolofuzz.CountArgsH\x00R\x05Count\x125\n" +
"\bContains\x18\x16 \x01(\v2\x17.ngolofuzz.ContainsArgsH\x00R\bContains\x12>\n" +
"\vContainsAny\x18\x17 \x01(\v2\x1a.ngolofuzz.ContainsAnyArgsH\x00R\vContainsAny\x12A\n" +
"\fContainsRune\x18\x18 \x01(\v2\x1b.ngolofuzz.ContainsRuneArgsH\x00R\fContainsRune\x128\n" +
"\tLastIndex\x18\x19 \x01(\v2\x18.ngolofuzz.LastIndexArgsH\x00R\tLastIndex\x128\n" +
"\tIndexByte\x18\x1a \x01(\v2\x18.ngolofuzz.IndexByteArgsH\x00R\tIndexByte\x128\n" +
"\tIndexRune\x18\x1b \x01(\v2\x18.ngolofuzz.IndexRuneArgsH\x00R\tIndexRune\x125\n" +
"\bIndexAny\x18\x1c \x01(\v2\x17.ngolofuzz.IndexAnyArgsH\x00R\bIndexAny\x12A\n" +
"\fLastIndexAny\x18\x1d \x01(\v2\x1b.ngolofuzz.LastIndexAnyArgsH\x00R\fLastIndexAny\x12D\n" +
"\rLastIndexByte\x18\x1e \x01(\v2\x1c.ngolofuzz.LastIndexByteArgsH\x00R\rLastIndexByte\x12/\n" +
"\x06SplitN\x18\x1f \x01(\v2\x15.ngolofuzz.SplitNArgsH\x00R\x06SplitN\x12>\n" +
"\vSplitAfterN\x18 \x01(\v2\x1a.ngolofuzz.SplitAfterNArgsH\x00R\vSplitAfterN\x12,\n" +
"\x05Split\x18! \x01(\v2\x14.ngolofuzz.SplitArgsH\x00R\x05Split\x12;\n" +
"\n" +
"SplitAfter\x18\" \x01(\v2\x19.ngolofuzz.SplitAfterArgsH\x00R\n" +
"SplitAfter\x12/\n" +
"\x06Fields\x18# \x01(\v2\x15.ngolofuzz.FieldsArgsH\x00R\x06Fields\x12)\n" +
"\x04Join\x18$ \x01(\v2\x13.ngolofuzz.JoinArgsH\x00R\x04Join\x128\n" +
"\tHasPrefix\x18% \x01(\v2\x18.ngolofuzz.HasPrefixArgsH\x00R\tHasPrefix\x128\n" +
"\tHasSuffix\x18& \x01(\v2\x18.ngolofuzz.HasSuffixArgsH\x00R\tHasSuffix\x12/\n" +
"\x06Repeat\x18' \x01(\v2\x15.ngolofuzz.RepeatArgsH\x00R\x06Repeat\x122\n" +
"\aToUpper\x18( \x01(\v2\x16.ngolofuzz.ToUpperArgsH\x00R\aToUpper\x122\n" +
"\aToLower\x18) \x01(\v2\x16.ngolofuzz.ToLowerArgsH\x00R\aToLower\x122\n" +
"\aToTitle\x18* \x01(\v2\x16.ngolofuzz.ToTitleArgsH\x00R\aToTitle\x12>\n" +
"\vToValidUTF8\x18+ \x01(\v2\x1a.ngolofuzz.ToValidUTF8ArgsH\x00R\vToValidUTF8\x12,\n" +
"\x05Title\x18, \x01(\v2\x14.ngolofuzz.TitleArgsH\x00R\x05Title\x12)\n" +
"\x04Trim\x18- \x01(\v2\x13.ngolofuzz.TrimArgsH\x00R\x04Trim\x125\n" +
"\bTrimLeft\x18. \x01(\v2\x17.ngolofuzz.TrimLeftArgsH\x00R\bTrimLeft\x128\n" +
"\tTrimRight\x18/ \x01(\v2\x18.ngolofuzz.TrimRightArgsH\x00R\tTrimRight\x128\n" +
"\tTrimSpace\x180 \x01(\v2\x18.ngolofuzz.TrimSpaceArgsH\x00R\tTrimSpace\x12;\n" +
"\n" +
"TrimPrefix\x181 \x01(\v2\x19.ngolofuzz.TrimPrefixArgsH\x00R\n" +
"TrimPrefix\x12;\n" +
"\n" +
"TrimSuffix\x182 \x01(\v2\x19.ngolofuzz.TrimSuffixArgsH\x00R\n" +
"TrimSuffix\x122\n" +
"\aReplace\x183 \x01(\v2\x16.ngolofuzz.ReplaceArgsH\x00R\aReplace\x12;\n" +
"\n" +
"ReplaceAll\x184 \x01(\v2\x19.ngolofuzz.ReplaceAllArgsH\x00R\n" +
"ReplaceAll\x128\n" +
"\tEqualFold\x185 \x01(\v2\x18.ngolofuzz.EqualFoldArgsH\x00R\tEqualFold\x12,\n" +
"\x05Index\x186 \x01(\v2\x14.ngolofuzz.IndexArgsH\x00R\x05Index\x12&\n" +
"\x03Cut\x187 \x01(\v2\x12.ngolofuzz.CutArgsH\x00R\x03Cut\x128\n" +
"\tCutPrefix\x188 \x01(\v2\x18.ngolofuzz.CutPrefixArgsH\x00R\tCutPrefix\x128\n" +
"\tCutSuffix\x189 \x01(\v2\x18.ngolofuzz.CutSuffixArgsH\x00R\tCutSuffixB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x14Z\x12./;fuzz_ng_stringsb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 60)
var file_ngolofuzz_proto_goTypes = []any{
(*CloneArgs)(nil), // 0: ngolofuzz.CloneArgs
(*CompareArgs)(nil), // 1: ngolofuzz.CompareArgs
(*LinesArgs)(nil), // 2: ngolofuzz.LinesArgs
(*SplitSeqArgs)(nil), // 3: ngolofuzz.SplitSeqArgs
(*SplitAfterSeqArgs)(nil), // 4: ngolofuzz.SplitAfterSeqArgs
(*FieldsSeqArgs)(nil), // 5: ngolofuzz.FieldsSeqArgs
(*ReaderNgdotLenArgs)(nil), // 6: ngolofuzz.ReaderNgdotLenArgs
(*ReaderNgdotSizeArgs)(nil), // 7: ngolofuzz.ReaderNgdotSizeArgs
(*ReaderNgdotReadArgs)(nil), // 8: ngolofuzz.ReaderNgdotReadArgs
(*ReaderNgdotReadAtArgs)(nil), // 9: ngolofuzz.ReaderNgdotReadAtArgs
(*ReaderNgdotReadByteArgs)(nil), // 10: ngolofuzz.ReaderNgdotReadByteArgs
(*ReaderNgdotUnreadByteArgs)(nil), // 11: ngolofuzz.ReaderNgdotUnreadByteArgs
(*ReaderNgdotReadRuneArgs)(nil), // 12: ngolofuzz.ReaderNgdotReadRuneArgs
(*ReaderNgdotUnreadRuneArgs)(nil), // 13: ngolofuzz.ReaderNgdotUnreadRuneArgs
(*ReaderNgdotSeekArgs)(nil), // 14: ngolofuzz.ReaderNgdotSeekArgs
(*ReaderNgdotWriteToArgs)(nil), // 15: ngolofuzz.ReaderNgdotWriteToArgs
(*ReaderNgdotResetArgs)(nil), // 16: ngolofuzz.ReaderNgdotResetArgs
(*NewReaderArgs)(nil), // 17: ngolofuzz.NewReaderArgs
(*ReplacerNgdotReplaceArgs)(nil), // 18: ngolofuzz.ReplacerNgdotReplaceArgs
(*ReplacerNgdotWriteStringArgs)(nil), // 19: ngolofuzz.ReplacerNgdotWriteStringArgs
(*CountArgs)(nil), // 20: ngolofuzz.CountArgs
(*ContainsArgs)(nil), // 21: ngolofuzz.ContainsArgs
(*ContainsAnyArgs)(nil), // 22: ngolofuzz.ContainsAnyArgs
(*ContainsRuneArgs)(nil), // 23: ngolofuzz.ContainsRuneArgs
(*LastIndexArgs)(nil), // 24: ngolofuzz.LastIndexArgs
(*IndexByteArgs)(nil), // 25: ngolofuzz.IndexByteArgs
(*IndexRuneArgs)(nil), // 26: ngolofuzz.IndexRuneArgs
(*IndexAnyArgs)(nil), // 27: ngolofuzz.IndexAnyArgs
(*LastIndexAnyArgs)(nil), // 28: ngolofuzz.LastIndexAnyArgs
(*LastIndexByteArgs)(nil), // 29: ngolofuzz.LastIndexByteArgs
(*SplitNArgs)(nil), // 30: ngolofuzz.SplitNArgs
(*SplitAfterNArgs)(nil), // 31: ngolofuzz.SplitAfterNArgs
(*SplitArgs)(nil), // 32: ngolofuzz.SplitArgs
(*SplitAfterArgs)(nil), // 33: ngolofuzz.SplitAfterArgs
(*FieldsArgs)(nil), // 34: ngolofuzz.FieldsArgs
(*JoinArgs)(nil), // 35: ngolofuzz.JoinArgs
(*HasPrefixArgs)(nil), // 36: ngolofuzz.HasPrefixArgs
(*HasSuffixArgs)(nil), // 37: ngolofuzz.HasSuffixArgs
(*RepeatArgs)(nil), // 38: ngolofuzz.RepeatArgs
(*ToUpperArgs)(nil), // 39: ngolofuzz.ToUpperArgs
(*ToLowerArgs)(nil), // 40: ngolofuzz.ToLowerArgs
(*ToTitleArgs)(nil), // 41: ngolofuzz.ToTitleArgs
(*ToValidUTF8Args)(nil), // 42: ngolofuzz.ToValidUTF8Args
(*TitleArgs)(nil), // 43: ngolofuzz.TitleArgs
(*TrimArgs)(nil), // 44: ngolofuzz.TrimArgs
(*TrimLeftArgs)(nil), // 45: ngolofuzz.TrimLeftArgs
(*TrimRightArgs)(nil), // 46: ngolofuzz.TrimRightArgs
(*TrimSpaceArgs)(nil), // 47: ngolofuzz.TrimSpaceArgs
(*TrimPrefixArgs)(nil), // 48: ngolofuzz.TrimPrefixArgs
(*TrimSuffixArgs)(nil), // 49: ngolofuzz.TrimSuffixArgs
(*ReplaceArgs)(nil), // 50: ngolofuzz.ReplaceArgs
(*ReplaceAllArgs)(nil), // 51: ngolofuzz.ReplaceAllArgs
(*EqualFoldArgs)(nil), // 52: ngolofuzz.EqualFoldArgs
(*IndexArgs)(nil), // 53: ngolofuzz.IndexArgs
(*CutArgs)(nil), // 54: ngolofuzz.CutArgs
(*CutPrefixArgs)(nil), // 55: ngolofuzz.CutPrefixArgs
(*CutSuffixArgs)(nil), // 56: ngolofuzz.CutSuffixArgs
(*NgoloFuzzOne)(nil), // 57: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 58: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 59: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.Clone:type_name -> ngolofuzz.CloneArgs
1, // 1: ngolofuzz.NgoloFuzzOne.Compare:type_name -> ngolofuzz.CompareArgs
2, // 2: ngolofuzz.NgoloFuzzOne.Lines:type_name -> ngolofuzz.LinesArgs
3, // 3: ngolofuzz.NgoloFuzzOne.SplitSeq:type_name -> ngolofuzz.SplitSeqArgs
4, // 4: ngolofuzz.NgoloFuzzOne.SplitAfterSeq:type_name -> ngolofuzz.SplitAfterSeqArgs
5, // 5: ngolofuzz.NgoloFuzzOne.FieldsSeq:type_name -> ngolofuzz.FieldsSeqArgs
6, // 6: ngolofuzz.NgoloFuzzOne.ReaderNgdotLen:type_name -> ngolofuzz.ReaderNgdotLenArgs
7, // 7: ngolofuzz.NgoloFuzzOne.ReaderNgdotSize:type_name -> ngolofuzz.ReaderNgdotSizeArgs
8, // 8: ngolofuzz.NgoloFuzzOne.ReaderNgdotRead:type_name -> ngolofuzz.ReaderNgdotReadArgs
9, // 9: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadAt:type_name -> ngolofuzz.ReaderNgdotReadAtArgs
10, // 10: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadByte:type_name -> ngolofuzz.ReaderNgdotReadByteArgs
11, // 11: ngolofuzz.NgoloFuzzOne.ReaderNgdotUnreadByte:type_name -> ngolofuzz.ReaderNgdotUnreadByteArgs
12, // 12: ngolofuzz.NgoloFuzzOne.ReaderNgdotReadRune:type_name -> ngolofuzz.ReaderNgdotReadRuneArgs
13, // 13: ngolofuzz.NgoloFuzzOne.ReaderNgdotUnreadRune:type_name -> ngolofuzz.ReaderNgdotUnreadRuneArgs
14, // 14: ngolofuzz.NgoloFuzzOne.ReaderNgdotSeek:type_name -> ngolofuzz.ReaderNgdotSeekArgs
15, // 15: ngolofuzz.NgoloFuzzOne.ReaderNgdotWriteTo:type_name -> ngolofuzz.ReaderNgdotWriteToArgs
16, // 16: ngolofuzz.NgoloFuzzOne.ReaderNgdotReset:type_name -> ngolofuzz.ReaderNgdotResetArgs
17, // 17: ngolofuzz.NgoloFuzzOne.NewReader:type_name -> ngolofuzz.NewReaderArgs
18, // 18: ngolofuzz.NgoloFuzzOne.ReplacerNgdotReplace:type_name -> ngolofuzz.ReplacerNgdotReplaceArgs
19, // 19: ngolofuzz.NgoloFuzzOne.ReplacerNgdotWriteString:type_name -> ngolofuzz.ReplacerNgdotWriteStringArgs
20, // 20: ngolofuzz.NgoloFuzzOne.Count:type_name -> ngolofuzz.CountArgs
21, // 21: ngolofuzz.NgoloFuzzOne.Contains:type_name -> ngolofuzz.ContainsArgs
22, // 22: ngolofuzz.NgoloFuzzOne.ContainsAny:type_name -> ngolofuzz.ContainsAnyArgs
23, // 23: ngolofuzz.NgoloFuzzOne.ContainsRune:type_name -> ngolofuzz.ContainsRuneArgs
24, // 24: ngolofuzz.NgoloFuzzOne.LastIndex:type_name -> ngolofuzz.LastIndexArgs
25, // 25: ngolofuzz.NgoloFuzzOne.IndexByte:type_name -> ngolofuzz.IndexByteArgs
26, // 26: ngolofuzz.NgoloFuzzOne.IndexRune:type_name -> ngolofuzz.IndexRuneArgs
27, // 27: ngolofuzz.NgoloFuzzOne.IndexAny:type_name -> ngolofuzz.IndexAnyArgs
28, // 28: ngolofuzz.NgoloFuzzOne.LastIndexAny:type_name -> ngolofuzz.LastIndexAnyArgs
29, // 29: ngolofuzz.NgoloFuzzOne.LastIndexByte:type_name -> ngolofuzz.LastIndexByteArgs
30, // 30: ngolofuzz.NgoloFuzzOne.SplitN:type_name -> ngolofuzz.SplitNArgs
31, // 31: ngolofuzz.NgoloFuzzOne.SplitAfterN:type_name -> ngolofuzz.SplitAfterNArgs
32, // 32: ngolofuzz.NgoloFuzzOne.Split:type_name -> ngolofuzz.SplitArgs
33, // 33: ngolofuzz.NgoloFuzzOne.SplitAfter:type_name -> ngolofuzz.SplitAfterArgs
34, // 34: ngolofuzz.NgoloFuzzOne.Fields:type_name -> ngolofuzz.FieldsArgs
35, // 35: ngolofuzz.NgoloFuzzOne.Join:type_name -> ngolofuzz.JoinArgs
36, // 36: ngolofuzz.NgoloFuzzOne.HasPrefix:type_name -> ngolofuzz.HasPrefixArgs
37, // 37: ngolofuzz.NgoloFuzzOne.HasSuffix:type_name -> ngolofuzz.HasSuffixArgs
38, // 38: ngolofuzz.NgoloFuzzOne.Repeat:type_name -> ngolofuzz.RepeatArgs
39, // 39: ngolofuzz.NgoloFuzzOne.ToUpper:type_name -> ngolofuzz.ToUpperArgs
40, // 40: ngolofuzz.NgoloFuzzOne.ToLower:type_name -> ngolofuzz.ToLowerArgs
41, // 41: ngolofuzz.NgoloFuzzOne.ToTitle:type_name -> ngolofuzz.ToTitleArgs
42, // 42: ngolofuzz.NgoloFuzzOne.ToValidUTF8:type_name -> ngolofuzz.ToValidUTF8Args
43, // 43: ngolofuzz.NgoloFuzzOne.Title:type_name -> ngolofuzz.TitleArgs
44, // 44: ngolofuzz.NgoloFuzzOne.Trim:type_name -> ngolofuzz.TrimArgs
45, // 45: ngolofuzz.NgoloFuzzOne.TrimLeft:type_name -> ngolofuzz.TrimLeftArgs
46, // 46: ngolofuzz.NgoloFuzzOne.TrimRight:type_name -> ngolofuzz.TrimRightArgs
47, // 47: ngolofuzz.NgoloFuzzOne.TrimSpace:type_name -> ngolofuzz.TrimSpaceArgs
48, // 48: ngolofuzz.NgoloFuzzOne.TrimPrefix:type_name -> ngolofuzz.TrimPrefixArgs
49, // 49: ngolofuzz.NgoloFuzzOne.TrimSuffix:type_name -> ngolofuzz.TrimSuffixArgs
50, // 50: ngolofuzz.NgoloFuzzOne.Replace:type_name -> ngolofuzz.ReplaceArgs
51, // 51: ngolofuzz.NgoloFuzzOne.ReplaceAll:type_name -> ngolofuzz.ReplaceAllArgs
52, // 52: ngolofuzz.NgoloFuzzOne.EqualFold:type_name -> ngolofuzz.EqualFoldArgs
53, // 53: ngolofuzz.NgoloFuzzOne.Index:type_name -> ngolofuzz.IndexArgs
54, // 54: ngolofuzz.NgoloFuzzOne.Cut:type_name -> ngolofuzz.CutArgs
55, // 55: ngolofuzz.NgoloFuzzOne.CutPrefix:type_name -> ngolofuzz.CutPrefixArgs
56, // 56: ngolofuzz.NgoloFuzzOne.CutSuffix:type_name -> ngolofuzz.CutSuffixArgs
57, // 57: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
58, // [58:58] is the sub-list for method output_type
58, // [58:58] is the sub-list for method input_type
58, // [58:58] is the sub-list for extension type_name
58, // [58:58] is the sub-list for extension extendee
0, // [0:58] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[57].OneofWrappers = []any{
(*NgoloFuzzOne_Clone)(nil),
(*NgoloFuzzOne_Compare)(nil),
(*NgoloFuzzOne_Lines)(nil),
(*NgoloFuzzOne_SplitSeq)(nil),
(*NgoloFuzzOne_SplitAfterSeq)(nil),
(*NgoloFuzzOne_FieldsSeq)(nil),
(*NgoloFuzzOne_ReaderNgdotLen)(nil),
(*NgoloFuzzOne_ReaderNgdotSize)(nil),
(*NgoloFuzzOne_ReaderNgdotRead)(nil),
(*NgoloFuzzOne_ReaderNgdotReadAt)(nil),
(*NgoloFuzzOne_ReaderNgdotReadByte)(nil),
(*NgoloFuzzOne_ReaderNgdotUnreadByte)(nil),
(*NgoloFuzzOne_ReaderNgdotReadRune)(nil),
(*NgoloFuzzOne_ReaderNgdotUnreadRune)(nil),
(*NgoloFuzzOne_ReaderNgdotSeek)(nil),
(*NgoloFuzzOne_ReaderNgdotWriteTo)(nil),
(*NgoloFuzzOne_ReaderNgdotReset)(nil),
(*NgoloFuzzOne_NewReader)(nil),
(*NgoloFuzzOne_ReplacerNgdotReplace)(nil),
(*NgoloFuzzOne_ReplacerNgdotWriteString)(nil),
(*NgoloFuzzOne_Count)(nil),
(*NgoloFuzzOne_Contains)(nil),
(*NgoloFuzzOne_ContainsAny)(nil),
(*NgoloFuzzOne_ContainsRune)(nil),
(*NgoloFuzzOne_LastIndex)(nil),
(*NgoloFuzzOne_IndexByte)(nil),
(*NgoloFuzzOne_IndexRune)(nil),
(*NgoloFuzzOne_IndexAny)(nil),
(*NgoloFuzzOne_LastIndexAny)(nil),
(*NgoloFuzzOne_LastIndexByte)(nil),
(*NgoloFuzzOne_SplitN)(nil),
(*NgoloFuzzOne_SplitAfterN)(nil),
(*NgoloFuzzOne_Split)(nil),
(*NgoloFuzzOne_SplitAfter)(nil),
(*NgoloFuzzOne_Fields)(nil),
(*NgoloFuzzOne_Join)(nil),
(*NgoloFuzzOne_HasPrefix)(nil),
(*NgoloFuzzOne_HasSuffix)(nil),
(*NgoloFuzzOne_Repeat)(nil),
(*NgoloFuzzOne_ToUpper)(nil),
(*NgoloFuzzOne_ToLower)(nil),
(*NgoloFuzzOne_ToTitle)(nil),
(*NgoloFuzzOne_ToValidUTF8)(nil),
(*NgoloFuzzOne_Title)(nil),
(*NgoloFuzzOne_Trim)(nil),
(*NgoloFuzzOne_TrimLeft)(nil),
(*NgoloFuzzOne_TrimRight)(nil),
(*NgoloFuzzOne_TrimSpace)(nil),
(*NgoloFuzzOne_TrimPrefix)(nil),
(*NgoloFuzzOne_TrimSuffix)(nil),
(*NgoloFuzzOne_Replace)(nil),
(*NgoloFuzzOne_ReplaceAll)(nil),
(*NgoloFuzzOne_EqualFold)(nil),
(*NgoloFuzzOne_Index)(nil),
(*NgoloFuzzOne_Cut)(nil),
(*NgoloFuzzOne_CutPrefix)(nil),
(*NgoloFuzzOne_CutSuffix)(nil),
}
file_ngolofuzz_proto_msgTypes[58].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 60,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_testing_iotest
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"testing/iotest"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewWriteLogger:
arg1 := bytes.NewBuffer(a.NewWriteLogger.W)
iotest.NewWriteLogger(a.NewWriteLogger.Prefix, arg1)
case *NgoloFuzzOne_NewReadLogger:
arg1 := bytes.NewReader(a.NewReadLogger.R)
iotest.NewReadLogger(a.NewReadLogger.Prefix, arg1)
case *NgoloFuzzOne_OneByteReader:
arg0 := bytes.NewReader(a.OneByteReader.R)
iotest.OneByteReader(arg0)
case *NgoloFuzzOne_HalfReader:
arg0 := bytes.NewReader(a.HalfReader.R)
iotest.HalfReader(arg0)
case *NgoloFuzzOne_DataErrReader:
arg0 := bytes.NewReader(a.DataErrReader.R)
iotest.DataErrReader(arg0)
case *NgoloFuzzOne_TimeoutReader:
arg0 := bytes.NewReader(a.TimeoutReader.R)
iotest.TimeoutReader(arg0)
case *NgoloFuzzOne_TestReader:
arg0 := bytes.NewReader(a.TestReader.R)
r0 := iotest.TestReader(arg0, a.TestReader.Content)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_TruncateWriter:
arg0 := bytes.NewBuffer(a.TruncateWriter.W)
iotest.TruncateWriter(arg0, a.TruncateWriter.N)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_NewWriteLogger:
w.WriteString(fmt.Sprintf("iotest.NewWriteLogger(%#+v, bytes.NewBuffer(%#+v))\n", a.NewWriteLogger.Prefix, a.NewWriteLogger.W))
case *NgoloFuzzOne_NewReadLogger:
w.WriteString(fmt.Sprintf("iotest.NewReadLogger(%#+v, bytes.NewReader(%#+v))\n", a.NewReadLogger.Prefix, a.NewReadLogger.R))
case *NgoloFuzzOne_OneByteReader:
w.WriteString(fmt.Sprintf("iotest.OneByteReader(bytes.NewReader(%#+v))\n", a.OneByteReader.R))
case *NgoloFuzzOne_HalfReader:
w.WriteString(fmt.Sprintf("iotest.HalfReader(bytes.NewReader(%#+v))\n", a.HalfReader.R))
case *NgoloFuzzOne_DataErrReader:
w.WriteString(fmt.Sprintf("iotest.DataErrReader(bytes.NewReader(%#+v))\n", a.DataErrReader.R))
case *NgoloFuzzOne_TimeoutReader:
w.WriteString(fmt.Sprintf("iotest.TimeoutReader(bytes.NewReader(%#+v))\n", a.TimeoutReader.R))
case *NgoloFuzzOne_TestReader:
w.WriteString(fmt.Sprintf("iotest.TestReader(bytes.NewReader(%#+v), %#+v)\n", a.TestReader.R, a.TestReader.Content))
case *NgoloFuzzOne_TruncateWriter:
w.WriteString(fmt.Sprintf("iotest.TruncateWriter(bytes.NewBuffer(%#+v), %#+v)\n", a.TruncateWriter.W, a.TruncateWriter.N))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_testing_iotest
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NewWriteLoggerArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
W []byte `protobuf:"bytes,2,opt,name=w,proto3" json:"w,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriteLoggerArgs) Reset() {
*x = NewWriteLoggerArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriteLoggerArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriteLoggerArgs) ProtoMessage() {}
func (x *NewWriteLoggerArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriteLoggerArgs.ProtoReflect.Descriptor instead.
func (*NewWriteLoggerArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *NewWriteLoggerArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
func (x *NewWriteLoggerArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
type NewReadLoggerArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
R []byte `protobuf:"bytes,2,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewReadLoggerArgs) Reset() {
*x = NewReadLoggerArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewReadLoggerArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewReadLoggerArgs) ProtoMessage() {}
func (x *NewReadLoggerArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewReadLoggerArgs.ProtoReflect.Descriptor instead.
func (*NewReadLoggerArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *NewReadLoggerArgs) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
func (x *NewReadLoggerArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type OneByteReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *OneByteReaderArgs) Reset() {
*x = OneByteReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *OneByteReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OneByteReaderArgs) ProtoMessage() {}
func (x *OneByteReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OneByteReaderArgs.ProtoReflect.Descriptor instead.
func (*OneByteReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *OneByteReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type HalfReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HalfReaderArgs) Reset() {
*x = HalfReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HalfReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HalfReaderArgs) ProtoMessage() {}
func (x *HalfReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HalfReaderArgs.ProtoReflect.Descriptor instead.
func (*HalfReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *HalfReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type DataErrReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DataErrReaderArgs) Reset() {
*x = DataErrReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DataErrReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DataErrReaderArgs) ProtoMessage() {}
func (x *DataErrReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DataErrReaderArgs.ProtoReflect.Descriptor instead.
func (*DataErrReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *DataErrReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type TimeoutReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeoutReaderArgs) Reset() {
*x = TimeoutReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeoutReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeoutReaderArgs) ProtoMessage() {}
func (x *TimeoutReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeoutReaderArgs.ProtoReflect.Descriptor instead.
func (*TimeoutReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *TimeoutReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
type TestReaderArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R []byte `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
Content []byte `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TestReaderArgs) Reset() {
*x = TestReaderArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TestReaderArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TestReaderArgs) ProtoMessage() {}
func (x *TestReaderArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TestReaderArgs.ProtoReflect.Descriptor instead.
func (*TestReaderArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *TestReaderArgs) GetR() []byte {
if x != nil {
return x.R
}
return nil
}
func (x *TestReaderArgs) GetContent() []byte {
if x != nil {
return x.Content
}
return nil
}
type TruncateWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
N int64 `protobuf:"varint,2,opt,name=n,proto3" json:"n,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TruncateWriterArgs) Reset() {
*x = TruncateWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TruncateWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TruncateWriterArgs) ProtoMessage() {}
func (x *TruncateWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TruncateWriterArgs.ProtoReflect.Descriptor instead.
func (*TruncateWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *TruncateWriterArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *TruncateWriterArgs) GetN() int64 {
if x != nil {
return x.N
}
return 0
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_NewWriteLogger
// *NgoloFuzzOne_NewReadLogger
// *NgoloFuzzOne_OneByteReader
// *NgoloFuzzOne_HalfReader
// *NgoloFuzzOne_DataErrReader
// *NgoloFuzzOne_TimeoutReader
// *NgoloFuzzOne_TestReader
// *NgoloFuzzOne_TruncateWriter
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriteLogger() *NewWriteLoggerArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriteLogger); ok {
return x.NewWriteLogger
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewReadLogger() *NewReadLoggerArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewReadLogger); ok {
return x.NewReadLogger
}
}
return nil
}
func (x *NgoloFuzzOne) GetOneByteReader() *OneByteReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_OneByteReader); ok {
return x.OneByteReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetHalfReader() *HalfReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_HalfReader); ok {
return x.HalfReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetDataErrReader() *DataErrReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DataErrReader); ok {
return x.DataErrReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeoutReader() *TimeoutReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeoutReader); ok {
return x.TimeoutReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetTestReader() *TestReaderArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TestReader); ok {
return x.TestReader
}
}
return nil
}
func (x *NgoloFuzzOne) GetTruncateWriter() *TruncateWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TruncateWriter); ok {
return x.TruncateWriter
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_NewWriteLogger struct {
NewWriteLogger *NewWriteLoggerArgs `protobuf:"bytes,1,opt,name=NewWriteLogger,proto3,oneof"`
}
type NgoloFuzzOne_NewReadLogger struct {
NewReadLogger *NewReadLoggerArgs `protobuf:"bytes,2,opt,name=NewReadLogger,proto3,oneof"`
}
type NgoloFuzzOne_OneByteReader struct {
OneByteReader *OneByteReaderArgs `protobuf:"bytes,3,opt,name=OneByteReader,proto3,oneof"`
}
type NgoloFuzzOne_HalfReader struct {
HalfReader *HalfReaderArgs `protobuf:"bytes,4,opt,name=HalfReader,proto3,oneof"`
}
type NgoloFuzzOne_DataErrReader struct {
DataErrReader *DataErrReaderArgs `protobuf:"bytes,5,opt,name=DataErrReader,proto3,oneof"`
}
type NgoloFuzzOne_TimeoutReader struct {
TimeoutReader *TimeoutReaderArgs `protobuf:"bytes,6,opt,name=TimeoutReader,proto3,oneof"`
}
type NgoloFuzzOne_TestReader struct {
TestReader *TestReaderArgs `protobuf:"bytes,7,opt,name=TestReader,proto3,oneof"`
}
type NgoloFuzzOne_TruncateWriter struct {
TruncateWriter *TruncateWriterArgs `protobuf:"bytes,8,opt,name=TruncateWriter,proto3,oneof"`
}
func (*NgoloFuzzOne_NewWriteLogger) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewReadLogger) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_OneByteReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_HalfReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DataErrReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeoutReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TestReader) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TruncateWriter) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\":\n" +
"\x12NewWriteLoggerArgs\x12\x16\n" +
"\x06prefix\x18\x01 \x01(\tR\x06prefix\x12\f\n" +
"\x01w\x18\x02 \x01(\fR\x01w\"9\n" +
"\x11NewReadLoggerArgs\x12\x16\n" +
"\x06prefix\x18\x01 \x01(\tR\x06prefix\x12\f\n" +
"\x01r\x18\x02 \x01(\fR\x01r\"!\n" +
"\x11OneByteReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"\x1e\n" +
"\x0eHalfReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"!\n" +
"\x11DataErrReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"!\n" +
"\x11TimeoutReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\"8\n" +
"\x0eTestReaderArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\fR\x01r\x12\x18\n" +
"\acontent\x18\x02 \x01(\fR\acontent\"0\n" +
"\x12TruncateWriterArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\f\n" +
"\x01n\x18\x02 \x01(\x03R\x01n\"\xba\x04\n" +
"\fNgoloFuzzOne\x12G\n" +
"\x0eNewWriteLogger\x18\x01 \x01(\v2\x1d.ngolofuzz.NewWriteLoggerArgsH\x00R\x0eNewWriteLogger\x12D\n" +
"\rNewReadLogger\x18\x02 \x01(\v2\x1c.ngolofuzz.NewReadLoggerArgsH\x00R\rNewReadLogger\x12D\n" +
"\rOneByteReader\x18\x03 \x01(\v2\x1c.ngolofuzz.OneByteReaderArgsH\x00R\rOneByteReader\x12;\n" +
"\n" +
"HalfReader\x18\x04 \x01(\v2\x19.ngolofuzz.HalfReaderArgsH\x00R\n" +
"HalfReader\x12D\n" +
"\rDataErrReader\x18\x05 \x01(\v2\x1c.ngolofuzz.DataErrReaderArgsH\x00R\rDataErrReader\x12D\n" +
"\rTimeoutReader\x18\x06 \x01(\v2\x1c.ngolofuzz.TimeoutReaderArgsH\x00R\rTimeoutReader\x12;\n" +
"\n" +
"TestReader\x18\a \x01(\v2\x19.ngolofuzz.TestReaderArgsH\x00R\n" +
"TestReader\x12G\n" +
"\x0eTruncateWriter\x18\b \x01(\v2\x1d.ngolofuzz.TruncateWriterArgsH\x00R\x0eTruncateWriterB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1bZ\x19./;fuzz_ng_testing_iotestb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
var file_ngolofuzz_proto_goTypes = []any{
(*NewWriteLoggerArgs)(nil), // 0: ngolofuzz.NewWriteLoggerArgs
(*NewReadLoggerArgs)(nil), // 1: ngolofuzz.NewReadLoggerArgs
(*OneByteReaderArgs)(nil), // 2: ngolofuzz.OneByteReaderArgs
(*HalfReaderArgs)(nil), // 3: ngolofuzz.HalfReaderArgs
(*DataErrReaderArgs)(nil), // 4: ngolofuzz.DataErrReaderArgs
(*TimeoutReaderArgs)(nil), // 5: ngolofuzz.TimeoutReaderArgs
(*TestReaderArgs)(nil), // 6: ngolofuzz.TestReaderArgs
(*TruncateWriterArgs)(nil), // 7: ngolofuzz.TruncateWriterArgs
(*NgoloFuzzOne)(nil), // 8: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 9: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 10: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.NewWriteLogger:type_name -> ngolofuzz.NewWriteLoggerArgs
1, // 1: ngolofuzz.NgoloFuzzOne.NewReadLogger:type_name -> ngolofuzz.NewReadLoggerArgs
2, // 2: ngolofuzz.NgoloFuzzOne.OneByteReader:type_name -> ngolofuzz.OneByteReaderArgs
3, // 3: ngolofuzz.NgoloFuzzOne.HalfReader:type_name -> ngolofuzz.HalfReaderArgs
4, // 4: ngolofuzz.NgoloFuzzOne.DataErrReader:type_name -> ngolofuzz.DataErrReaderArgs
5, // 5: ngolofuzz.NgoloFuzzOne.TimeoutReader:type_name -> ngolofuzz.TimeoutReaderArgs
6, // 6: ngolofuzz.NgoloFuzzOne.TestReader:type_name -> ngolofuzz.TestReaderArgs
7, // 7: ngolofuzz.NgoloFuzzOne.TruncateWriter:type_name -> ngolofuzz.TruncateWriterArgs
8, // 8: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
9, // [9:9] is the sub-list for method output_type
9, // [9:9] is the sub-list for method input_type
9, // [9:9] is the sub-list for extension type_name
9, // [9:9] is the sub-list for extension extendee
0, // [0:9] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[8].OneofWrappers = []any{
(*NgoloFuzzOne_NewWriteLogger)(nil),
(*NgoloFuzzOne_NewReadLogger)(nil),
(*NgoloFuzzOne_OneByteReader)(nil),
(*NgoloFuzzOne_HalfReader)(nil),
(*NgoloFuzzOne_DataErrReader)(nil),
(*NgoloFuzzOne_TimeoutReader)(nil),
(*NgoloFuzzOne_TestReader)(nil),
(*NgoloFuzzOne_TruncateWriter)(nil),
}
file_ngolofuzz_proto_msgTypes[9].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 11,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_text_scanner
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"text/scanner"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var PositionResults []*scanner.Position
PositionResultsIndex := 0
var ScannerResults []*scanner.Scanner
ScannerResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_PositionNgdotIsValid:
if len(PositionResults) == 0 {
continue
}
arg0 := PositionResults[PositionResultsIndex]
PositionResultsIndex = (PositionResultsIndex + 1) % len(PositionResults)
arg0.IsValid()
case *NgoloFuzzOne_PositionNgdotString:
if len(PositionResults) == 0 {
continue
}
arg0 := PositionResults[PositionResultsIndex]
PositionResultsIndex = (PositionResultsIndex + 1) % len(PositionResults)
arg0.String()
case *NgoloFuzzOne_TokenString:
arg0 := GetRune(a.TokenString.Tok)
scanner.TokenString(arg0)
case *NgoloFuzzOne_ScannerNgdotInit:
if len(ScannerResults) == 0 {
continue
}
arg0 := ScannerResults[ScannerResultsIndex]
ScannerResultsIndex = (ScannerResultsIndex + 1) % len(ScannerResults)
arg1 := bytes.NewReader(a.ScannerNgdotInit.Src)
r0 := arg0.Init(arg1)
if r0 != nil{
ScannerResults = append(ScannerResults, r0)
}
case *NgoloFuzzOne_ScannerNgdotNext:
if len(ScannerResults) == 0 {
continue
}
arg0 := ScannerResults[ScannerResultsIndex]
ScannerResultsIndex = (ScannerResultsIndex + 1) % len(ScannerResults)
arg0.Next()
case *NgoloFuzzOne_ScannerNgdotPeek:
if len(ScannerResults) == 0 {
continue
}
arg0 := ScannerResults[ScannerResultsIndex]
ScannerResultsIndex = (ScannerResultsIndex + 1) % len(ScannerResults)
arg0.Peek()
case *NgoloFuzzOne_ScannerNgdotScan:
if len(ScannerResults) == 0 {
continue
}
arg0 := ScannerResults[ScannerResultsIndex]
ScannerResultsIndex = (ScannerResultsIndex + 1) % len(ScannerResults)
arg0.Scan()
case *NgoloFuzzOne_ScannerNgdotPos:
if len(ScannerResults) == 0 {
continue
}
arg0 := ScannerResults[ScannerResultsIndex]
ScannerResultsIndex = (ScannerResultsIndex + 1) % len(ScannerResults)
arg0.Pos()
case *NgoloFuzzOne_ScannerNgdotTokenText:
if len(ScannerResults) == 0 {
continue
}
arg0 := ScannerResults[ScannerResultsIndex]
ScannerResultsIndex = (ScannerResultsIndex + 1) % len(ScannerResults)
arg0.TokenText()
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
PositionNb := 0
PositionResultsIndex := 0
ScannerNb := 0
ScannerResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_PositionNgdotIsValid:
if PositionNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Position%d.IsValid()\n", PositionResultsIndex))
PositionResultsIndex = (PositionResultsIndex + 1) % PositionNb
case *NgoloFuzzOne_PositionNgdotString:
if PositionNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Position%d.String()\n", PositionResultsIndex))
PositionResultsIndex = (PositionResultsIndex + 1) % PositionNb
case *NgoloFuzzOne_TokenString:
w.WriteString(fmt.Sprintf("scanner.TokenString(GetRune(%#+v))\n", a.TokenString.Tok))
case *NgoloFuzzOne_ScannerNgdotInit:
if ScannerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Scanner%d := Scanner%d.Init(bytes.NewReader(%#+v))\n", ScannerNb, ScannerResultsIndex, a.ScannerNgdotInit.Src))
ScannerNb = ScannerNb + 1
ScannerResultsIndex = (ScannerResultsIndex + 1) % ScannerNb
case *NgoloFuzzOne_ScannerNgdotNext:
if ScannerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Scanner%d.Next()\n", ScannerResultsIndex))
ScannerResultsIndex = (ScannerResultsIndex + 1) % ScannerNb
case *NgoloFuzzOne_ScannerNgdotPeek:
if ScannerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Scanner%d.Peek()\n", ScannerResultsIndex))
ScannerResultsIndex = (ScannerResultsIndex + 1) % ScannerNb
case *NgoloFuzzOne_ScannerNgdotScan:
if ScannerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Scanner%d.Scan()\n", ScannerResultsIndex))
ScannerResultsIndex = (ScannerResultsIndex + 1) % ScannerNb
case *NgoloFuzzOne_ScannerNgdotPos:
if ScannerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Scanner%d.Pos()\n", ScannerResultsIndex))
ScannerResultsIndex = (ScannerResultsIndex + 1) % ScannerNb
case *NgoloFuzzOne_ScannerNgdotTokenText:
if ScannerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Scanner%d.TokenText()\n", ScannerResultsIndex))
ScannerResultsIndex = (ScannerResultsIndex + 1) % ScannerNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_text_scanner
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type PositionNgdotIsValidArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PositionNgdotIsValidArgs) Reset() {
*x = PositionNgdotIsValidArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PositionNgdotIsValidArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PositionNgdotIsValidArgs) ProtoMessage() {}
func (x *PositionNgdotIsValidArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PositionNgdotIsValidArgs.ProtoReflect.Descriptor instead.
func (*PositionNgdotIsValidArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type PositionNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PositionNgdotStringArgs) Reset() {
*x = PositionNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PositionNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PositionNgdotStringArgs) ProtoMessage() {}
func (x *PositionNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PositionNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*PositionNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type TokenStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Tok string `protobuf:"bytes,1,opt,name=tok,proto3" json:"tok,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TokenStringArgs) Reset() {
*x = TokenStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TokenStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TokenStringArgs) ProtoMessage() {}
func (x *TokenStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TokenStringArgs.ProtoReflect.Descriptor instead.
func (*TokenStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *TokenStringArgs) GetTok() string {
if x != nil {
return x.Tok
}
return ""
}
type ScannerNgdotInitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Src []byte `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScannerNgdotInitArgs) Reset() {
*x = ScannerNgdotInitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScannerNgdotInitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScannerNgdotInitArgs) ProtoMessage() {}
func (x *ScannerNgdotInitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScannerNgdotInitArgs.ProtoReflect.Descriptor instead.
func (*ScannerNgdotInitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *ScannerNgdotInitArgs) GetSrc() []byte {
if x != nil {
return x.Src
}
return nil
}
type ScannerNgdotNextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScannerNgdotNextArgs) Reset() {
*x = ScannerNgdotNextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScannerNgdotNextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScannerNgdotNextArgs) ProtoMessage() {}
func (x *ScannerNgdotNextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScannerNgdotNextArgs.ProtoReflect.Descriptor instead.
func (*ScannerNgdotNextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type ScannerNgdotPeekArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScannerNgdotPeekArgs) Reset() {
*x = ScannerNgdotPeekArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScannerNgdotPeekArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScannerNgdotPeekArgs) ProtoMessage() {}
func (x *ScannerNgdotPeekArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScannerNgdotPeekArgs.ProtoReflect.Descriptor instead.
func (*ScannerNgdotPeekArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type ScannerNgdotScanArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScannerNgdotScanArgs) Reset() {
*x = ScannerNgdotScanArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScannerNgdotScanArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScannerNgdotScanArgs) ProtoMessage() {}
func (x *ScannerNgdotScanArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScannerNgdotScanArgs.ProtoReflect.Descriptor instead.
func (*ScannerNgdotScanArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type ScannerNgdotPosArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScannerNgdotPosArgs) Reset() {
*x = ScannerNgdotPosArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScannerNgdotPosArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScannerNgdotPosArgs) ProtoMessage() {}
func (x *ScannerNgdotPosArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScannerNgdotPosArgs.ProtoReflect.Descriptor instead.
func (*ScannerNgdotPosArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type ScannerNgdotTokenTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ScannerNgdotTokenTextArgs) Reset() {
*x = ScannerNgdotTokenTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ScannerNgdotTokenTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ScannerNgdotTokenTextArgs) ProtoMessage() {}
func (x *ScannerNgdotTokenTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ScannerNgdotTokenTextArgs.ProtoReflect.Descriptor instead.
func (*ScannerNgdotTokenTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_PositionNgdotIsValid
// *NgoloFuzzOne_PositionNgdotString
// *NgoloFuzzOne_TokenString
// *NgoloFuzzOne_ScannerNgdotInit
// *NgoloFuzzOne_ScannerNgdotNext
// *NgoloFuzzOne_ScannerNgdotPeek
// *NgoloFuzzOne_ScannerNgdotScan
// *NgoloFuzzOne_ScannerNgdotPos
// *NgoloFuzzOne_ScannerNgdotTokenText
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetPositionNgdotIsValid() *PositionNgdotIsValidArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PositionNgdotIsValid); ok {
return x.PositionNgdotIsValid
}
}
return nil
}
func (x *NgoloFuzzOne) GetPositionNgdotString() *PositionNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PositionNgdotString); ok {
return x.PositionNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetTokenString() *TokenStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TokenString); ok {
return x.TokenString
}
}
return nil
}
func (x *NgoloFuzzOne) GetScannerNgdotInit() *ScannerNgdotInitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScannerNgdotInit); ok {
return x.ScannerNgdotInit
}
}
return nil
}
func (x *NgoloFuzzOne) GetScannerNgdotNext() *ScannerNgdotNextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScannerNgdotNext); ok {
return x.ScannerNgdotNext
}
}
return nil
}
func (x *NgoloFuzzOne) GetScannerNgdotPeek() *ScannerNgdotPeekArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScannerNgdotPeek); ok {
return x.ScannerNgdotPeek
}
}
return nil
}
func (x *NgoloFuzzOne) GetScannerNgdotScan() *ScannerNgdotScanArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScannerNgdotScan); ok {
return x.ScannerNgdotScan
}
}
return nil
}
func (x *NgoloFuzzOne) GetScannerNgdotPos() *ScannerNgdotPosArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScannerNgdotPos); ok {
return x.ScannerNgdotPos
}
}
return nil
}
func (x *NgoloFuzzOne) GetScannerNgdotTokenText() *ScannerNgdotTokenTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ScannerNgdotTokenText); ok {
return x.ScannerNgdotTokenText
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_PositionNgdotIsValid struct {
PositionNgdotIsValid *PositionNgdotIsValidArgs `protobuf:"bytes,1,opt,name=PositionNgdotIsValid,proto3,oneof"`
}
type NgoloFuzzOne_PositionNgdotString struct {
PositionNgdotString *PositionNgdotStringArgs `protobuf:"bytes,2,opt,name=PositionNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_TokenString struct {
TokenString *TokenStringArgs `protobuf:"bytes,3,opt,name=TokenString,proto3,oneof"`
}
type NgoloFuzzOne_ScannerNgdotInit struct {
ScannerNgdotInit *ScannerNgdotInitArgs `protobuf:"bytes,4,opt,name=ScannerNgdotInit,proto3,oneof"`
}
type NgoloFuzzOne_ScannerNgdotNext struct {
ScannerNgdotNext *ScannerNgdotNextArgs `protobuf:"bytes,5,opt,name=ScannerNgdotNext,proto3,oneof"`
}
type NgoloFuzzOne_ScannerNgdotPeek struct {
ScannerNgdotPeek *ScannerNgdotPeekArgs `protobuf:"bytes,6,opt,name=ScannerNgdotPeek,proto3,oneof"`
}
type NgoloFuzzOne_ScannerNgdotScan struct {
ScannerNgdotScan *ScannerNgdotScanArgs `protobuf:"bytes,7,opt,name=ScannerNgdotScan,proto3,oneof"`
}
type NgoloFuzzOne_ScannerNgdotPos struct {
ScannerNgdotPos *ScannerNgdotPosArgs `protobuf:"bytes,8,opt,name=ScannerNgdotPos,proto3,oneof"`
}
type NgoloFuzzOne_ScannerNgdotTokenText struct {
ScannerNgdotTokenText *ScannerNgdotTokenTextArgs `protobuf:"bytes,9,opt,name=ScannerNgdotTokenText,proto3,oneof"`
}
func (*NgoloFuzzOne_PositionNgdotIsValid) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PositionNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TokenString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScannerNgdotInit) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScannerNgdotNext) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScannerNgdotPeek) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScannerNgdotScan) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScannerNgdotPos) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ScannerNgdotTokenText) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1a\n" +
"\x18PositionNgdotIsValidArgs\"\x19\n" +
"\x17PositionNgdotStringArgs\"#\n" +
"\x0fTokenStringArgs\x12\x10\n" +
"\x03tok\x18\x01 \x01(\tR\x03tok\"(\n" +
"\x14ScannerNgdotInitArgs\x12\x10\n" +
"\x03src\x18\x01 \x01(\fR\x03src\"\x16\n" +
"\x14ScannerNgdotNextArgs\"\x16\n" +
"\x14ScannerNgdotPeekArgs\"\x16\n" +
"\x14ScannerNgdotScanArgs\"\x15\n" +
"\x13ScannerNgdotPosArgs\"\x1b\n" +
"\x19ScannerNgdotTokenTextArgs\"\xef\x05\n" +
"\fNgoloFuzzOne\x12Y\n" +
"\x14PositionNgdotIsValid\x18\x01 \x01(\v2#.ngolofuzz.PositionNgdotIsValidArgsH\x00R\x14PositionNgdotIsValid\x12V\n" +
"\x13PositionNgdotString\x18\x02 \x01(\v2\".ngolofuzz.PositionNgdotStringArgsH\x00R\x13PositionNgdotString\x12>\n" +
"\vTokenString\x18\x03 \x01(\v2\x1a.ngolofuzz.TokenStringArgsH\x00R\vTokenString\x12M\n" +
"\x10ScannerNgdotInit\x18\x04 \x01(\v2\x1f.ngolofuzz.ScannerNgdotInitArgsH\x00R\x10ScannerNgdotInit\x12M\n" +
"\x10ScannerNgdotNext\x18\x05 \x01(\v2\x1f.ngolofuzz.ScannerNgdotNextArgsH\x00R\x10ScannerNgdotNext\x12M\n" +
"\x10ScannerNgdotPeek\x18\x06 \x01(\v2\x1f.ngolofuzz.ScannerNgdotPeekArgsH\x00R\x10ScannerNgdotPeek\x12M\n" +
"\x10ScannerNgdotScan\x18\a \x01(\v2\x1f.ngolofuzz.ScannerNgdotScanArgsH\x00R\x10ScannerNgdotScan\x12J\n" +
"\x0fScannerNgdotPos\x18\b \x01(\v2\x1e.ngolofuzz.ScannerNgdotPosArgsH\x00R\x0fScannerNgdotPos\x12\\\n" +
"\x15ScannerNgdotTokenText\x18\t \x01(\v2$.ngolofuzz.ScannerNgdotTokenTextArgsH\x00R\x15ScannerNgdotTokenTextB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x19Z\x17./;fuzz_ng_text_scannerb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
var file_ngolofuzz_proto_goTypes = []any{
(*PositionNgdotIsValidArgs)(nil), // 0: ngolofuzz.PositionNgdotIsValidArgs
(*PositionNgdotStringArgs)(nil), // 1: ngolofuzz.PositionNgdotStringArgs
(*TokenStringArgs)(nil), // 2: ngolofuzz.TokenStringArgs
(*ScannerNgdotInitArgs)(nil), // 3: ngolofuzz.ScannerNgdotInitArgs
(*ScannerNgdotNextArgs)(nil), // 4: ngolofuzz.ScannerNgdotNextArgs
(*ScannerNgdotPeekArgs)(nil), // 5: ngolofuzz.ScannerNgdotPeekArgs
(*ScannerNgdotScanArgs)(nil), // 6: ngolofuzz.ScannerNgdotScanArgs
(*ScannerNgdotPosArgs)(nil), // 7: ngolofuzz.ScannerNgdotPosArgs
(*ScannerNgdotTokenTextArgs)(nil), // 8: ngolofuzz.ScannerNgdotTokenTextArgs
(*NgoloFuzzOne)(nil), // 9: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 10: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 11: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.PositionNgdotIsValid:type_name -> ngolofuzz.PositionNgdotIsValidArgs
1, // 1: ngolofuzz.NgoloFuzzOne.PositionNgdotString:type_name -> ngolofuzz.PositionNgdotStringArgs
2, // 2: ngolofuzz.NgoloFuzzOne.TokenString:type_name -> ngolofuzz.TokenStringArgs
3, // 3: ngolofuzz.NgoloFuzzOne.ScannerNgdotInit:type_name -> ngolofuzz.ScannerNgdotInitArgs
4, // 4: ngolofuzz.NgoloFuzzOne.ScannerNgdotNext:type_name -> ngolofuzz.ScannerNgdotNextArgs
5, // 5: ngolofuzz.NgoloFuzzOne.ScannerNgdotPeek:type_name -> ngolofuzz.ScannerNgdotPeekArgs
6, // 6: ngolofuzz.NgoloFuzzOne.ScannerNgdotScan:type_name -> ngolofuzz.ScannerNgdotScanArgs
7, // 7: ngolofuzz.NgoloFuzzOne.ScannerNgdotPos:type_name -> ngolofuzz.ScannerNgdotPosArgs
8, // 8: ngolofuzz.NgoloFuzzOne.ScannerNgdotTokenText:type_name -> ngolofuzz.ScannerNgdotTokenTextArgs
9, // 9: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
10, // [10:10] is the sub-list for method output_type
10, // [10:10] is the sub-list for method input_type
10, // [10:10] is the sub-list for extension type_name
10, // [10:10] is the sub-list for extension extendee
0, // [0:10] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[9].OneofWrappers = []any{
(*NgoloFuzzOne_PositionNgdotIsValid)(nil),
(*NgoloFuzzOne_PositionNgdotString)(nil),
(*NgoloFuzzOne_TokenString)(nil),
(*NgoloFuzzOne_ScannerNgdotInit)(nil),
(*NgoloFuzzOne_ScannerNgdotNext)(nil),
(*NgoloFuzzOne_ScannerNgdotPeek)(nil),
(*NgoloFuzzOne_ScannerNgdotScan)(nil),
(*NgoloFuzzOne_ScannerNgdotPos)(nil),
(*NgoloFuzzOne_ScannerNgdotTokenText)(nil),
}
file_ngolofuzz_proto_msgTypes[10].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 12,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_text_tabwriter
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"text/tabwriter"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var WriterResults []*tabwriter.Writer
WriterResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_WriterNgdotInit:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
arg1 := bytes.NewBuffer(a.WriterNgdotInit.Output)
arg2 := int(a.WriterNgdotInit.Minwidth)
arg3 := int(a.WriterNgdotInit.Tabwidth)
arg4 := int(a.WriterNgdotInit.Padding)
arg5 := byte(a.WriterNgdotInit.Padchar)
arg6 := uint(a.WriterNgdotInit.Flags)
r0 := arg0.Init(arg1, arg2, arg3, arg4, arg5, arg6)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
case *NgoloFuzzOne_WriterNgdotFlush:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
r0 := arg0.Flush()
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_WriterNgdotWrite:
if len(WriterResults) == 0 {
continue
}
arg0 := WriterResults[WriterResultsIndex]
WriterResultsIndex = (WriterResultsIndex + 1) % len(WriterResults)
_, r1 := arg0.Write(a.WriterNgdotWrite.Buf)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_NewWriter:
arg0 := bytes.NewBuffer(a.NewWriter.Output)
arg1 := int(a.NewWriter.Minwidth)
arg2 := int(a.NewWriter.Tabwidth)
arg3 := int(a.NewWriter.Padding)
arg4 := byte(a.NewWriter.Padchar)
arg5 := uint(a.NewWriter.Flags)
r0 := tabwriter.NewWriter(arg0, arg1, arg2, arg3 % 0x10001, arg4, arg5)
if r0 != nil{
WriterResults = append(WriterResults, r0)
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
WriterNb := 0
WriterResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_WriterNgdotInit:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d := Writer%d.Init(bytes.NewBuffer(%#+v), int(%#+v), int(%#+v), int(%#+v), byte(%#+v), uint(%#+v))\n", WriterNb, WriterResultsIndex, a.WriterNgdotInit.Output, a.WriterNgdotInit.Minwidth, a.WriterNgdotInit.Tabwidth, a.WriterNgdotInit.Padding, a.WriterNgdotInit.Padchar, a.WriterNgdotInit.Flags))
WriterNb = WriterNb + 1
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotFlush:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Flush()\n", WriterResultsIndex))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_WriterNgdotWrite:
if WriterNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Writer%d.Write(%#+v)\n", WriterResultsIndex, a.WriterNgdotWrite.Buf))
WriterResultsIndex = (WriterResultsIndex + 1) % WriterNb
case *NgoloFuzzOne_NewWriter:
w.WriteString(fmt.Sprintf("Writer%d := tabwriter.NewWriter(bytes.NewBuffer(%#+v), int(%#+v), int(%#+v), int(%#+v) %% 0x10001, byte(%#+v), uint(%#+v))\n", WriterNb, a.NewWriter.Output, a.NewWriter.Minwidth, a.NewWriter.Tabwidth, a.NewWriter.Padding, a.NewWriter.Padchar, a.NewWriter.Flags))
WriterNb = WriterNb + 1
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_text_tabwriter
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type WriterNgdotInitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Output []byte `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"`
Minwidth int64 `protobuf:"varint,2,opt,name=minwidth,proto3" json:"minwidth,omitempty"`
Tabwidth int64 `protobuf:"varint,3,opt,name=tabwidth,proto3" json:"tabwidth,omitempty"`
Padding int64 `protobuf:"varint,4,opt,name=padding,proto3" json:"padding,omitempty"`
Padchar uint32 `protobuf:"varint,5,opt,name=padchar,proto3" json:"padchar,omitempty"`
Flags uint32 `protobuf:"varint,6,opt,name=flags,proto3" json:"flags,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotInitArgs) Reset() {
*x = WriterNgdotInitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotInitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotInitArgs) ProtoMessage() {}
func (x *WriterNgdotInitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotInitArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotInitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *WriterNgdotInitArgs) GetOutput() []byte {
if x != nil {
return x.Output
}
return nil
}
func (x *WriterNgdotInitArgs) GetMinwidth() int64 {
if x != nil {
return x.Minwidth
}
return 0
}
func (x *WriterNgdotInitArgs) GetTabwidth() int64 {
if x != nil {
return x.Tabwidth
}
return 0
}
func (x *WriterNgdotInitArgs) GetPadding() int64 {
if x != nil {
return x.Padding
}
return 0
}
func (x *WriterNgdotInitArgs) GetPadchar() uint32 {
if x != nil {
return x.Padchar
}
return 0
}
func (x *WriterNgdotInitArgs) GetFlags() uint32 {
if x != nil {
return x.Flags
}
return 0
}
type WriterNgdotFlushArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotFlushArgs) Reset() {
*x = WriterNgdotFlushArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotFlushArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotFlushArgs) ProtoMessage() {}
func (x *WriterNgdotFlushArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotFlushArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotFlushArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type WriterNgdotWriteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Buf []byte `protobuf:"bytes,1,opt,name=buf,proto3" json:"buf,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WriterNgdotWriteArgs) Reset() {
*x = WriterNgdotWriteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WriterNgdotWriteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WriterNgdotWriteArgs) ProtoMessage() {}
func (x *WriterNgdotWriteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WriterNgdotWriteArgs.ProtoReflect.Descriptor instead.
func (*WriterNgdotWriteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *WriterNgdotWriteArgs) GetBuf() []byte {
if x != nil {
return x.Buf
}
return nil
}
type NewWriterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Output []byte `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"`
Minwidth int64 `protobuf:"varint,2,opt,name=minwidth,proto3" json:"minwidth,omitempty"`
Tabwidth int64 `protobuf:"varint,3,opt,name=tabwidth,proto3" json:"tabwidth,omitempty"`
Padding int64 `protobuf:"varint,4,opt,name=padding,proto3" json:"padding,omitempty"`
Padchar uint32 `protobuf:"varint,5,opt,name=padchar,proto3" json:"padchar,omitempty"`
Flags uint32 `protobuf:"varint,6,opt,name=flags,proto3" json:"flags,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewWriterArgs) Reset() {
*x = NewWriterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewWriterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewWriterArgs) ProtoMessage() {}
func (x *NewWriterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewWriterArgs.ProtoReflect.Descriptor instead.
func (*NewWriterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *NewWriterArgs) GetOutput() []byte {
if x != nil {
return x.Output
}
return nil
}
func (x *NewWriterArgs) GetMinwidth() int64 {
if x != nil {
return x.Minwidth
}
return 0
}
func (x *NewWriterArgs) GetTabwidth() int64 {
if x != nil {
return x.Tabwidth
}
return 0
}
func (x *NewWriterArgs) GetPadding() int64 {
if x != nil {
return x.Padding
}
return 0
}
func (x *NewWriterArgs) GetPadchar() uint32 {
if x != nil {
return x.Padchar
}
return 0
}
func (x *NewWriterArgs) GetFlags() uint32 {
if x != nil {
return x.Flags
}
return 0
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_WriterNgdotInit
// *NgoloFuzzOne_WriterNgdotFlush
// *NgoloFuzzOne_WriterNgdotWrite
// *NgoloFuzzOne_NewWriter
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotInit() *WriterNgdotInitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotInit); ok {
return x.WriterNgdotInit
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotFlush() *WriterNgdotFlushArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotFlush); ok {
return x.WriterNgdotFlush
}
}
return nil
}
func (x *NgoloFuzzOne) GetWriterNgdotWrite() *WriterNgdotWriteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WriterNgdotWrite); ok {
return x.WriterNgdotWrite
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewWriter() *NewWriterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewWriter); ok {
return x.NewWriter
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_WriterNgdotInit struct {
WriterNgdotInit *WriterNgdotInitArgs `protobuf:"bytes,1,opt,name=WriterNgdotInit,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotFlush struct {
WriterNgdotFlush *WriterNgdotFlushArgs `protobuf:"bytes,2,opt,name=WriterNgdotFlush,proto3,oneof"`
}
type NgoloFuzzOne_WriterNgdotWrite struct {
WriterNgdotWrite *WriterNgdotWriteArgs `protobuf:"bytes,3,opt,name=WriterNgdotWrite,proto3,oneof"`
}
type NgoloFuzzOne_NewWriter struct {
NewWriter *NewWriterArgs `protobuf:"bytes,4,opt,name=NewWriter,proto3,oneof"`
}
func (*NgoloFuzzOne_WriterNgdotInit) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotFlush) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WriterNgdotWrite) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewWriter) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\xaf\x01\n" +
"\x13WriterNgdotInitArgs\x12\x16\n" +
"\x06output\x18\x01 \x01(\fR\x06output\x12\x1a\n" +
"\bminwidth\x18\x02 \x01(\x03R\bminwidth\x12\x1a\n" +
"\btabwidth\x18\x03 \x01(\x03R\btabwidth\x12\x18\n" +
"\apadding\x18\x04 \x01(\x03R\apadding\x12\x18\n" +
"\apadchar\x18\x05 \x01(\rR\apadchar\x12\x14\n" +
"\x05flags\x18\x06 \x01(\rR\x05flags\"\x16\n" +
"\x14WriterNgdotFlushArgs\"(\n" +
"\x14WriterNgdotWriteArgs\x12\x10\n" +
"\x03buf\x18\x01 \x01(\fR\x03buf\"\xa9\x01\n" +
"\rNewWriterArgs\x12\x16\n" +
"\x06output\x18\x01 \x01(\fR\x06output\x12\x1a\n" +
"\bminwidth\x18\x02 \x01(\x03R\bminwidth\x12\x1a\n" +
"\btabwidth\x18\x03 \x01(\x03R\btabwidth\x12\x18\n" +
"\apadding\x18\x04 \x01(\x03R\apadding\x12\x18\n" +
"\apadchar\x18\x05 \x01(\rR\apadchar\x12\x14\n" +
"\x05flags\x18\x06 \x01(\rR\x05flags\"\xba\x02\n" +
"\fNgoloFuzzOne\x12J\n" +
"\x0fWriterNgdotInit\x18\x01 \x01(\v2\x1e.ngolofuzz.WriterNgdotInitArgsH\x00R\x0fWriterNgdotInit\x12M\n" +
"\x10WriterNgdotFlush\x18\x02 \x01(\v2\x1f.ngolofuzz.WriterNgdotFlushArgsH\x00R\x10WriterNgdotFlush\x12M\n" +
"\x10WriterNgdotWrite\x18\x03 \x01(\v2\x1f.ngolofuzz.WriterNgdotWriteArgsH\x00R\x10WriterNgdotWrite\x128\n" +
"\tNewWriter\x18\x04 \x01(\v2\x18.ngolofuzz.NewWriterArgsH\x00R\tNewWriterB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1bZ\x19./;fuzz_ng_text_tabwriterb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_ngolofuzz_proto_goTypes = []any{
(*WriterNgdotInitArgs)(nil), // 0: ngolofuzz.WriterNgdotInitArgs
(*WriterNgdotFlushArgs)(nil), // 1: ngolofuzz.WriterNgdotFlushArgs
(*WriterNgdotWriteArgs)(nil), // 2: ngolofuzz.WriterNgdotWriteArgs
(*NewWriterArgs)(nil), // 3: ngolofuzz.NewWriterArgs
(*NgoloFuzzOne)(nil), // 4: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 5: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 6: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.WriterNgdotInit:type_name -> ngolofuzz.WriterNgdotInitArgs
1, // 1: ngolofuzz.NgoloFuzzOne.WriterNgdotFlush:type_name -> ngolofuzz.WriterNgdotFlushArgs
2, // 2: ngolofuzz.NgoloFuzzOne.WriterNgdotWrite:type_name -> ngolofuzz.WriterNgdotWriteArgs
3, // 3: ngolofuzz.NgoloFuzzOne.NewWriter:type_name -> ngolofuzz.NewWriterArgs
4, // 4: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
5, // [5:5] is the sub-list for method output_type
5, // [5:5] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[4].OneofWrappers = []any{
(*NgoloFuzzOne_WriterNgdotInit)(nil),
(*NgoloFuzzOne_WriterNgdotFlush)(nil),
(*NgoloFuzzOne_WriterNgdotWrite)(nil),
(*NgoloFuzzOne_NewWriter)(nil),
}
file_ngolofuzz_proto_msgTypes[5].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_text_template
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"text/template"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var TemplateResults []*template.Template
TemplateResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_TemplateNgdotExecuteTemplate:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
arg1 := bytes.NewBuffer(a.TemplateNgdotExecuteTemplate.Wr)
r0 := arg0.ExecuteTemplate(arg1, a.TemplateNgdotExecuteTemplate.Name, a.TemplateNgdotExecuteTemplate.Data)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_TemplateNgdotExecute:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
arg1 := bytes.NewBuffer(a.TemplateNgdotExecute.Wr)
r0 := arg0.Execute(arg1, a.TemplateNgdotExecute.Data)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_TemplateNgdotDefinedTemplates:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
arg0.DefinedTemplates()
case *NgoloFuzzOne_IsTrue:
template.IsTrue(a.IsTrue.Val)
case *NgoloFuzzOne_HTMLEscape:
arg0 := bytes.NewBuffer(a.HTMLEscape.W)
template.HTMLEscape(arg0, a.HTMLEscape.B)
case *NgoloFuzzOne_HTMLEscapeString:
template.HTMLEscapeString(a.HTMLEscapeString.S)
case *NgoloFuzzOne_JSEscape:
arg0 := bytes.NewBuffer(a.JSEscape.W)
template.JSEscape(arg0, a.JSEscape.B)
case *NgoloFuzzOne_JSEscapeString:
template.JSEscapeString(a.JSEscapeString.S)
case *NgoloFuzzOne_New:
r0 := template.New(a.New.Name)
if r0 != nil{
TemplateResults = append(TemplateResults, r0)
}
case *NgoloFuzzOne_TemplateNgdotName:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
arg0.Name()
case *NgoloFuzzOne_TemplateNgdotNew:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
r0 := arg0.New(a.TemplateNgdotNew.Name)
if r0 != nil{
TemplateResults = append(TemplateResults, r0)
}
case *NgoloFuzzOne_TemplateNgdotClone:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
r0, r1 := arg0.Clone()
if r0 != nil{
TemplateResults = append(TemplateResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TemplateNgdotTemplates:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
r0 := arg0.Templates()
TemplateResults = append(TemplateResults, r0...)
case *NgoloFuzzOne_TemplateNgdotDelims:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
r0 := arg0.Delims(a.TemplateNgdotDelims.Left, a.TemplateNgdotDelims.Right)
if r0 != nil{
TemplateResults = append(TemplateResults, r0)
}
case *NgoloFuzzOne_TemplateNgdotLookup:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
r0 := arg0.Lookup(a.TemplateNgdotLookup.Name)
if r0 != nil{
TemplateResults = append(TemplateResults, r0)
}
case *NgoloFuzzOne_TemplateNgdotParse:
if len(TemplateResults) == 0 {
continue
}
arg0 := TemplateResults[TemplateResultsIndex]
TemplateResultsIndex = (TemplateResultsIndex + 1) % len(TemplateResults)
r0, r1 := arg0.Parse(a.TemplateNgdotParse.Text)
if r0 != nil{
TemplateResults = append(TemplateResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
TemplateNb := 0
TemplateResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_TemplateNgdotExecuteTemplate:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d.ExecuteTemplate(bytes.NewBuffer(%#+v), %#+v, %#+v)\n", TemplateResultsIndex, a.TemplateNgdotExecuteTemplate.Wr, a.TemplateNgdotExecuteTemplate.Name, a.TemplateNgdotExecuteTemplate.Data))
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotExecute:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d.Execute(bytes.NewBuffer(%#+v), %#+v)\n", TemplateResultsIndex, a.TemplateNgdotExecute.Wr, a.TemplateNgdotExecute.Data))
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotDefinedTemplates:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d.DefinedTemplates()\n", TemplateResultsIndex))
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_IsTrue:
w.WriteString(fmt.Sprintf("template.IsTrue(%#+v)\n", a.IsTrue.Val))
case *NgoloFuzzOne_HTMLEscape:
w.WriteString(fmt.Sprintf("template.HTMLEscape(bytes.NewBuffer(%#+v), %#+v)\n", a.HTMLEscape.W, a.HTMLEscape.B))
case *NgoloFuzzOne_HTMLEscapeString:
w.WriteString(fmt.Sprintf("template.HTMLEscapeString(%#+v)\n", a.HTMLEscapeString.S))
case *NgoloFuzzOne_JSEscape:
w.WriteString(fmt.Sprintf("template.JSEscape(bytes.NewBuffer(%#+v), %#+v)\n", a.JSEscape.W, a.JSEscape.B))
case *NgoloFuzzOne_JSEscapeString:
w.WriteString(fmt.Sprintf("template.JSEscapeString(%#+v)\n", a.JSEscapeString.S))
case *NgoloFuzzOne_New:
w.WriteString(fmt.Sprintf("Template%d := template.New(%#+v)\n", TemplateNb, a.New.Name))
TemplateNb = TemplateNb + 1
case *NgoloFuzzOne_TemplateNgdotName:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d.Name()\n", TemplateResultsIndex))
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotNew:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d := Template%d.New(%#+v)\n", TemplateNb, TemplateResultsIndex, a.TemplateNgdotNew.Name))
TemplateNb = TemplateNb + 1
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotClone:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d, _ := Template%d.Clone()\n", TemplateNb, TemplateResultsIndex))
TemplateNb = TemplateNb + 1
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotTemplates:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d := Template%d.Templates()\n", TemplateNb, TemplateResultsIndex))
TemplateNb = TemplateNb + 1
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotDelims:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d := Template%d.Delims(%#+v, %#+v)\n", TemplateNb, TemplateResultsIndex, a.TemplateNgdotDelims.Left, a.TemplateNgdotDelims.Right))
TemplateNb = TemplateNb + 1
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotLookup:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d := Template%d.Lookup(%#+v)\n", TemplateNb, TemplateResultsIndex, a.TemplateNgdotLookup.Name))
TemplateNb = TemplateNb + 1
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
case *NgoloFuzzOne_TemplateNgdotParse:
if TemplateNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Template%d, _ := Template%d.Parse(%#+v)\n", TemplateNb, TemplateResultsIndex, a.TemplateNgdotParse.Text))
TemplateNb = TemplateNb + 1
TemplateResultsIndex = (TemplateResultsIndex + 1) % TemplateNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_text_template
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type TemplateNgdotExecuteTemplateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Wr []byte `protobuf:"bytes,1,opt,name=wr,proto3" json:"wr,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
Data *NgoloFuzzAny `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotExecuteTemplateArgs) Reset() {
*x = TemplateNgdotExecuteTemplateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotExecuteTemplateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotExecuteTemplateArgs) ProtoMessage() {}
func (x *TemplateNgdotExecuteTemplateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotExecuteTemplateArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotExecuteTemplateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *TemplateNgdotExecuteTemplateArgs) GetWr() []byte {
if x != nil {
return x.Wr
}
return nil
}
func (x *TemplateNgdotExecuteTemplateArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *TemplateNgdotExecuteTemplateArgs) GetData() *NgoloFuzzAny {
if x != nil {
return x.Data
}
return nil
}
type TemplateNgdotExecuteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Wr []byte `protobuf:"bytes,1,opt,name=wr,proto3" json:"wr,omitempty"`
Data *NgoloFuzzAny `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotExecuteArgs) Reset() {
*x = TemplateNgdotExecuteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotExecuteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotExecuteArgs) ProtoMessage() {}
func (x *TemplateNgdotExecuteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotExecuteArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotExecuteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *TemplateNgdotExecuteArgs) GetWr() []byte {
if x != nil {
return x.Wr
}
return nil
}
func (x *TemplateNgdotExecuteArgs) GetData() *NgoloFuzzAny {
if x != nil {
return x.Data
}
return nil
}
type TemplateNgdotDefinedTemplatesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotDefinedTemplatesArgs) Reset() {
*x = TemplateNgdotDefinedTemplatesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotDefinedTemplatesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotDefinedTemplatesArgs) ProtoMessage() {}
func (x *TemplateNgdotDefinedTemplatesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotDefinedTemplatesArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotDefinedTemplatesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type IsTrueArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Val *NgoloFuzzAny `protobuf:"bytes,1,opt,name=val,proto3" json:"val,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsTrueArgs) Reset() {
*x = IsTrueArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsTrueArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsTrueArgs) ProtoMessage() {}
func (x *IsTrueArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsTrueArgs.ProtoReflect.Descriptor instead.
func (*IsTrueArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *IsTrueArgs) GetVal() *NgoloFuzzAny {
if x != nil {
return x.Val
}
return nil
}
type HTMLEscapeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
B []byte `protobuf:"bytes,2,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HTMLEscapeArgs) Reset() {
*x = HTMLEscapeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HTMLEscapeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HTMLEscapeArgs) ProtoMessage() {}
func (x *HTMLEscapeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HTMLEscapeArgs.ProtoReflect.Descriptor instead.
func (*HTMLEscapeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *HTMLEscapeArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *HTMLEscapeArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type HTMLEscapeStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *HTMLEscapeStringArgs) Reset() {
*x = HTMLEscapeStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *HTMLEscapeStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*HTMLEscapeStringArgs) ProtoMessage() {}
func (x *HTMLEscapeStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use HTMLEscapeStringArgs.ProtoReflect.Descriptor instead.
func (*HTMLEscapeStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *HTMLEscapeStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type JSEscapeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
W []byte `protobuf:"bytes,1,opt,name=w,proto3" json:"w,omitempty"`
B []byte `protobuf:"bytes,2,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *JSEscapeArgs) Reset() {
*x = JSEscapeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *JSEscapeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*JSEscapeArgs) ProtoMessage() {}
func (x *JSEscapeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use JSEscapeArgs.ProtoReflect.Descriptor instead.
func (*JSEscapeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *JSEscapeArgs) GetW() []byte {
if x != nil {
return x.W
}
return nil
}
func (x *JSEscapeArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type JSEscapeStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *JSEscapeStringArgs) Reset() {
*x = JSEscapeStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *JSEscapeStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*JSEscapeStringArgs) ProtoMessage() {}
func (x *JSEscapeStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use JSEscapeStringArgs.ProtoReflect.Descriptor instead.
func (*JSEscapeStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *JSEscapeStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type NewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewArgs) Reset() {
*x = NewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewArgs) ProtoMessage() {}
func (x *NewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewArgs.ProtoReflect.Descriptor instead.
func (*NewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NewArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type TemplateNgdotNameArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotNameArgs) Reset() {
*x = TemplateNgdotNameArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotNameArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotNameArgs) ProtoMessage() {}
func (x *TemplateNgdotNameArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotNameArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotNameArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type TemplateNgdotNewArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotNewArgs) Reset() {
*x = TemplateNgdotNewArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotNewArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotNewArgs) ProtoMessage() {}
func (x *TemplateNgdotNewArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotNewArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotNewArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *TemplateNgdotNewArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type TemplateNgdotCloneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotCloneArgs) Reset() {
*x = TemplateNgdotCloneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotCloneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotCloneArgs) ProtoMessage() {}
func (x *TemplateNgdotCloneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotCloneArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotCloneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type TemplateNgdotTemplatesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotTemplatesArgs) Reset() {
*x = TemplateNgdotTemplatesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotTemplatesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotTemplatesArgs) ProtoMessage() {}
func (x *TemplateNgdotTemplatesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotTemplatesArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotTemplatesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type TemplateNgdotDelimsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Left string `protobuf:"bytes,1,opt,name=left,proto3" json:"left,omitempty"`
Right string `protobuf:"bytes,2,opt,name=right,proto3" json:"right,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotDelimsArgs) Reset() {
*x = TemplateNgdotDelimsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotDelimsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotDelimsArgs) ProtoMessage() {}
func (x *TemplateNgdotDelimsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotDelimsArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotDelimsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *TemplateNgdotDelimsArgs) GetLeft() string {
if x != nil {
return x.Left
}
return ""
}
func (x *TemplateNgdotDelimsArgs) GetRight() string {
if x != nil {
return x.Right
}
return ""
}
type TemplateNgdotLookupArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotLookupArgs) Reset() {
*x = TemplateNgdotLookupArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotLookupArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotLookupArgs) ProtoMessage() {}
func (x *TemplateNgdotLookupArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotLookupArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotLookupArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *TemplateNgdotLookupArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type TemplateNgdotParseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TemplateNgdotParseArgs) Reset() {
*x = TemplateNgdotParseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TemplateNgdotParseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TemplateNgdotParseArgs) ProtoMessage() {}
func (x *TemplateNgdotParseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TemplateNgdotParseArgs.ProtoReflect.Descriptor instead.
func (*TemplateNgdotParseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *TemplateNgdotParseArgs) GetText() string {
if x != nil {
return x.Text
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_TemplateNgdotExecuteTemplate
// *NgoloFuzzOne_TemplateNgdotExecute
// *NgoloFuzzOne_TemplateNgdotDefinedTemplates
// *NgoloFuzzOne_IsTrue
// *NgoloFuzzOne_HTMLEscape
// *NgoloFuzzOne_HTMLEscapeString
// *NgoloFuzzOne_JSEscape
// *NgoloFuzzOne_JSEscapeString
// *NgoloFuzzOne_New
// *NgoloFuzzOne_TemplateNgdotName
// *NgoloFuzzOne_TemplateNgdotNew
// *NgoloFuzzOne_TemplateNgdotClone
// *NgoloFuzzOne_TemplateNgdotTemplates
// *NgoloFuzzOne_TemplateNgdotDelims
// *NgoloFuzzOne_TemplateNgdotLookup
// *NgoloFuzzOne_TemplateNgdotParse
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotExecuteTemplate() *TemplateNgdotExecuteTemplateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotExecuteTemplate); ok {
return x.TemplateNgdotExecuteTemplate
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotExecute() *TemplateNgdotExecuteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotExecute); ok {
return x.TemplateNgdotExecute
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotDefinedTemplates() *TemplateNgdotDefinedTemplatesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotDefinedTemplates); ok {
return x.TemplateNgdotDefinedTemplates
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsTrue() *IsTrueArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsTrue); ok {
return x.IsTrue
}
}
return nil
}
func (x *NgoloFuzzOne) GetHTMLEscape() *HTMLEscapeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_HTMLEscape); ok {
return x.HTMLEscape
}
}
return nil
}
func (x *NgoloFuzzOne) GetHTMLEscapeString() *HTMLEscapeStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_HTMLEscapeString); ok {
return x.HTMLEscapeString
}
}
return nil
}
func (x *NgoloFuzzOne) GetJSEscape() *JSEscapeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_JSEscape); ok {
return x.JSEscape
}
}
return nil
}
func (x *NgoloFuzzOne) GetJSEscapeString() *JSEscapeStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_JSEscapeString); ok {
return x.JSEscapeString
}
}
return nil
}
func (x *NgoloFuzzOne) GetNew() *NewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_New); ok {
return x.New
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotName() *TemplateNgdotNameArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotName); ok {
return x.TemplateNgdotName
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotNew() *TemplateNgdotNewArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotNew); ok {
return x.TemplateNgdotNew
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotClone() *TemplateNgdotCloneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotClone); ok {
return x.TemplateNgdotClone
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotTemplates() *TemplateNgdotTemplatesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotTemplates); ok {
return x.TemplateNgdotTemplates
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotDelims() *TemplateNgdotDelimsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotDelims); ok {
return x.TemplateNgdotDelims
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotLookup() *TemplateNgdotLookupArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotLookup); ok {
return x.TemplateNgdotLookup
}
}
return nil
}
func (x *NgoloFuzzOne) GetTemplateNgdotParse() *TemplateNgdotParseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TemplateNgdotParse); ok {
return x.TemplateNgdotParse
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_TemplateNgdotExecuteTemplate struct {
TemplateNgdotExecuteTemplate *TemplateNgdotExecuteTemplateArgs `protobuf:"bytes,1,opt,name=TemplateNgdotExecuteTemplate,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotExecute struct {
TemplateNgdotExecute *TemplateNgdotExecuteArgs `protobuf:"bytes,2,opt,name=TemplateNgdotExecute,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotDefinedTemplates struct {
TemplateNgdotDefinedTemplates *TemplateNgdotDefinedTemplatesArgs `protobuf:"bytes,3,opt,name=TemplateNgdotDefinedTemplates,proto3,oneof"`
}
type NgoloFuzzOne_IsTrue struct {
IsTrue *IsTrueArgs `protobuf:"bytes,4,opt,name=IsTrue,proto3,oneof"`
}
type NgoloFuzzOne_HTMLEscape struct {
HTMLEscape *HTMLEscapeArgs `protobuf:"bytes,5,opt,name=HTMLEscape,proto3,oneof"`
}
type NgoloFuzzOne_HTMLEscapeString struct {
HTMLEscapeString *HTMLEscapeStringArgs `protobuf:"bytes,6,opt,name=HTMLEscapeString,proto3,oneof"`
}
type NgoloFuzzOne_JSEscape struct {
JSEscape *JSEscapeArgs `protobuf:"bytes,7,opt,name=JSEscape,proto3,oneof"`
}
type NgoloFuzzOne_JSEscapeString struct {
JSEscapeString *JSEscapeStringArgs `protobuf:"bytes,8,opt,name=JSEscapeString,proto3,oneof"`
}
type NgoloFuzzOne_New struct {
New *NewArgs `protobuf:"bytes,9,opt,name=New,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotName struct {
TemplateNgdotName *TemplateNgdotNameArgs `protobuf:"bytes,10,opt,name=TemplateNgdotName,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotNew struct {
TemplateNgdotNew *TemplateNgdotNewArgs `protobuf:"bytes,11,opt,name=TemplateNgdotNew,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotClone struct {
TemplateNgdotClone *TemplateNgdotCloneArgs `protobuf:"bytes,12,opt,name=TemplateNgdotClone,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotTemplates struct {
TemplateNgdotTemplates *TemplateNgdotTemplatesArgs `protobuf:"bytes,13,opt,name=TemplateNgdotTemplates,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotDelims struct {
TemplateNgdotDelims *TemplateNgdotDelimsArgs `protobuf:"bytes,14,opt,name=TemplateNgdotDelims,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotLookup struct {
TemplateNgdotLookup *TemplateNgdotLookupArgs `protobuf:"bytes,15,opt,name=TemplateNgdotLookup,proto3,oneof"`
}
type NgoloFuzzOne_TemplateNgdotParse struct {
TemplateNgdotParse *TemplateNgdotParseArgs `protobuf:"bytes,16,opt,name=TemplateNgdotParse,proto3,oneof"`
}
func (*NgoloFuzzOne_TemplateNgdotExecuteTemplate) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotExecute) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotDefinedTemplates) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsTrue) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_HTMLEscape) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_HTMLEscapeString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_JSEscape) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_JSEscapeString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_New) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotName) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotNew) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotClone) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotTemplates) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotDelims) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotLookup) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TemplateNgdotParse) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"s\n" +
" TemplateNgdotExecuteTemplateArgs\x12\x0e\n" +
"\x02wr\x18\x01 \x01(\fR\x02wr\x12\x12\n" +
"\x04name\x18\x02 \x01(\tR\x04name\x12+\n" +
"\x04data\x18\x03 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x04data\"W\n" +
"\x18TemplateNgdotExecuteArgs\x12\x0e\n" +
"\x02wr\x18\x01 \x01(\fR\x02wr\x12+\n" +
"\x04data\x18\x02 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x04data\"#\n" +
"!TemplateNgdotDefinedTemplatesArgs\"7\n" +
"\n" +
"IsTrueArgs\x12)\n" +
"\x03val\x18\x01 \x01(\v2\x17.ngolofuzz.NgoloFuzzAnyR\x03val\",\n" +
"\x0eHTMLEscapeArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\f\n" +
"\x01b\x18\x02 \x01(\fR\x01b\"$\n" +
"\x14HTMLEscapeStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"*\n" +
"\fJSEscapeArgs\x12\f\n" +
"\x01w\x18\x01 \x01(\fR\x01w\x12\f\n" +
"\x01b\x18\x02 \x01(\fR\x01b\"\"\n" +
"\x12JSEscapeStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1d\n" +
"\aNewArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x17\n" +
"\x15TemplateNgdotNameArgs\"*\n" +
"\x14TemplateNgdotNewArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"\x18\n" +
"\x16TemplateNgdotCloneArgs\"\x1c\n" +
"\x1aTemplateNgdotTemplatesArgs\"C\n" +
"\x17TemplateNgdotDelimsArgs\x12\x12\n" +
"\x04left\x18\x01 \x01(\tR\x04left\x12\x14\n" +
"\x05right\x18\x02 \x01(\tR\x05right\"-\n" +
"\x17TemplateNgdotLookupArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\",\n" +
"\x16TemplateNgdotParseArgs\x12\x12\n" +
"\x04text\x18\x01 \x01(\tR\x04text\"\x9b\n" +
"\n" +
"\fNgoloFuzzOne\x12q\n" +
"\x1cTemplateNgdotExecuteTemplate\x18\x01 \x01(\v2+.ngolofuzz.TemplateNgdotExecuteTemplateArgsH\x00R\x1cTemplateNgdotExecuteTemplate\x12Y\n" +
"\x14TemplateNgdotExecute\x18\x02 \x01(\v2#.ngolofuzz.TemplateNgdotExecuteArgsH\x00R\x14TemplateNgdotExecute\x12t\n" +
"\x1dTemplateNgdotDefinedTemplates\x18\x03 \x01(\v2,.ngolofuzz.TemplateNgdotDefinedTemplatesArgsH\x00R\x1dTemplateNgdotDefinedTemplates\x12/\n" +
"\x06IsTrue\x18\x04 \x01(\v2\x15.ngolofuzz.IsTrueArgsH\x00R\x06IsTrue\x12;\n" +
"\n" +
"HTMLEscape\x18\x05 \x01(\v2\x19.ngolofuzz.HTMLEscapeArgsH\x00R\n" +
"HTMLEscape\x12M\n" +
"\x10HTMLEscapeString\x18\x06 \x01(\v2\x1f.ngolofuzz.HTMLEscapeStringArgsH\x00R\x10HTMLEscapeString\x125\n" +
"\bJSEscape\x18\a \x01(\v2\x17.ngolofuzz.JSEscapeArgsH\x00R\bJSEscape\x12G\n" +
"\x0eJSEscapeString\x18\b \x01(\v2\x1d.ngolofuzz.JSEscapeStringArgsH\x00R\x0eJSEscapeString\x12&\n" +
"\x03New\x18\t \x01(\v2\x12.ngolofuzz.NewArgsH\x00R\x03New\x12P\n" +
"\x11TemplateNgdotName\x18\n" +
" \x01(\v2 .ngolofuzz.TemplateNgdotNameArgsH\x00R\x11TemplateNgdotName\x12M\n" +
"\x10TemplateNgdotNew\x18\v \x01(\v2\x1f.ngolofuzz.TemplateNgdotNewArgsH\x00R\x10TemplateNgdotNew\x12S\n" +
"\x12TemplateNgdotClone\x18\f \x01(\v2!.ngolofuzz.TemplateNgdotCloneArgsH\x00R\x12TemplateNgdotClone\x12_\n" +
"\x16TemplateNgdotTemplates\x18\r \x01(\v2%.ngolofuzz.TemplateNgdotTemplatesArgsH\x00R\x16TemplateNgdotTemplates\x12V\n" +
"\x13TemplateNgdotDelims\x18\x0e \x01(\v2\".ngolofuzz.TemplateNgdotDelimsArgsH\x00R\x13TemplateNgdotDelims\x12V\n" +
"\x13TemplateNgdotLookup\x18\x0f \x01(\v2\".ngolofuzz.TemplateNgdotLookupArgsH\x00R\x13TemplateNgdotLookup\x12S\n" +
"\x12TemplateNgdotParse\x18\x10 \x01(\v2!.ngolofuzz.TemplateNgdotParseArgsH\x00R\x12TemplateNgdotParseB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_text_templateb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
var file_ngolofuzz_proto_goTypes = []any{
(*TemplateNgdotExecuteTemplateArgs)(nil), // 0: ngolofuzz.TemplateNgdotExecuteTemplateArgs
(*TemplateNgdotExecuteArgs)(nil), // 1: ngolofuzz.TemplateNgdotExecuteArgs
(*TemplateNgdotDefinedTemplatesArgs)(nil), // 2: ngolofuzz.TemplateNgdotDefinedTemplatesArgs
(*IsTrueArgs)(nil), // 3: ngolofuzz.IsTrueArgs
(*HTMLEscapeArgs)(nil), // 4: ngolofuzz.HTMLEscapeArgs
(*HTMLEscapeStringArgs)(nil), // 5: ngolofuzz.HTMLEscapeStringArgs
(*JSEscapeArgs)(nil), // 6: ngolofuzz.JSEscapeArgs
(*JSEscapeStringArgs)(nil), // 7: ngolofuzz.JSEscapeStringArgs
(*NewArgs)(nil), // 8: ngolofuzz.NewArgs
(*TemplateNgdotNameArgs)(nil), // 9: ngolofuzz.TemplateNgdotNameArgs
(*TemplateNgdotNewArgs)(nil), // 10: ngolofuzz.TemplateNgdotNewArgs
(*TemplateNgdotCloneArgs)(nil), // 11: ngolofuzz.TemplateNgdotCloneArgs
(*TemplateNgdotTemplatesArgs)(nil), // 12: ngolofuzz.TemplateNgdotTemplatesArgs
(*TemplateNgdotDelimsArgs)(nil), // 13: ngolofuzz.TemplateNgdotDelimsArgs
(*TemplateNgdotLookupArgs)(nil), // 14: ngolofuzz.TemplateNgdotLookupArgs
(*TemplateNgdotParseArgs)(nil), // 15: ngolofuzz.TemplateNgdotParseArgs
(*NgoloFuzzOne)(nil), // 16: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 17: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 18: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
17, // 0: ngolofuzz.TemplateNgdotExecuteTemplateArgs.data:type_name -> ngolofuzz.NgoloFuzzAny
17, // 1: ngolofuzz.TemplateNgdotExecuteArgs.data:type_name -> ngolofuzz.NgoloFuzzAny
17, // 2: ngolofuzz.IsTrueArgs.val:type_name -> ngolofuzz.NgoloFuzzAny
0, // 3: ngolofuzz.NgoloFuzzOne.TemplateNgdotExecuteTemplate:type_name -> ngolofuzz.TemplateNgdotExecuteTemplateArgs
1, // 4: ngolofuzz.NgoloFuzzOne.TemplateNgdotExecute:type_name -> ngolofuzz.TemplateNgdotExecuteArgs
2, // 5: ngolofuzz.NgoloFuzzOne.TemplateNgdotDefinedTemplates:type_name -> ngolofuzz.TemplateNgdotDefinedTemplatesArgs
3, // 6: ngolofuzz.NgoloFuzzOne.IsTrue:type_name -> ngolofuzz.IsTrueArgs
4, // 7: ngolofuzz.NgoloFuzzOne.HTMLEscape:type_name -> ngolofuzz.HTMLEscapeArgs
5, // 8: ngolofuzz.NgoloFuzzOne.HTMLEscapeString:type_name -> ngolofuzz.HTMLEscapeStringArgs
6, // 9: ngolofuzz.NgoloFuzzOne.JSEscape:type_name -> ngolofuzz.JSEscapeArgs
7, // 10: ngolofuzz.NgoloFuzzOne.JSEscapeString:type_name -> ngolofuzz.JSEscapeStringArgs
8, // 11: ngolofuzz.NgoloFuzzOne.New:type_name -> ngolofuzz.NewArgs
9, // 12: ngolofuzz.NgoloFuzzOne.TemplateNgdotName:type_name -> ngolofuzz.TemplateNgdotNameArgs
10, // 13: ngolofuzz.NgoloFuzzOne.TemplateNgdotNew:type_name -> ngolofuzz.TemplateNgdotNewArgs
11, // 14: ngolofuzz.NgoloFuzzOne.TemplateNgdotClone:type_name -> ngolofuzz.TemplateNgdotCloneArgs
12, // 15: ngolofuzz.NgoloFuzzOne.TemplateNgdotTemplates:type_name -> ngolofuzz.TemplateNgdotTemplatesArgs
13, // 16: ngolofuzz.NgoloFuzzOne.TemplateNgdotDelims:type_name -> ngolofuzz.TemplateNgdotDelimsArgs
14, // 17: ngolofuzz.NgoloFuzzOne.TemplateNgdotLookup:type_name -> ngolofuzz.TemplateNgdotLookupArgs
15, // 18: ngolofuzz.NgoloFuzzOne.TemplateNgdotParse:type_name -> ngolofuzz.TemplateNgdotParseArgs
16, // 19: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
20, // [20:20] is the sub-list for method output_type
20, // [20:20] is the sub-list for method input_type
20, // [20:20] is the sub-list for extension type_name
20, // [20:20] is the sub-list for extension extendee
0, // [0:20] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[16].OneofWrappers = []any{
(*NgoloFuzzOne_TemplateNgdotExecuteTemplate)(nil),
(*NgoloFuzzOne_TemplateNgdotExecute)(nil),
(*NgoloFuzzOne_TemplateNgdotDefinedTemplates)(nil),
(*NgoloFuzzOne_IsTrue)(nil),
(*NgoloFuzzOne_HTMLEscape)(nil),
(*NgoloFuzzOne_HTMLEscapeString)(nil),
(*NgoloFuzzOne_JSEscape)(nil),
(*NgoloFuzzOne_JSEscapeString)(nil),
(*NgoloFuzzOne_New)(nil),
(*NgoloFuzzOne_TemplateNgdotName)(nil),
(*NgoloFuzzOne_TemplateNgdotNew)(nil),
(*NgoloFuzzOne_TemplateNgdotClone)(nil),
(*NgoloFuzzOne_TemplateNgdotTemplates)(nil),
(*NgoloFuzzOne_TemplateNgdotDelims)(nil),
(*NgoloFuzzOne_TemplateNgdotLookup)(nil),
(*NgoloFuzzOne_TemplateNgdotParse)(nil),
}
file_ngolofuzz_proto_msgTypes[17].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 19,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_text_template_parse
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"text/template/parse"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func StringNodeNewFromFuzz(p *StringNodeStruct) *parse.StringNode{
if p == nil {
return nil
}
return &parse.StringNode{
Quoted: p.Quoted,
Text: p.Text,
}
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var NodeResults []*parse.Node
NodeResultsIndex := 0
var IdentifierNodeResults []*parse.IdentifierNode
IdentifierNodeResultsIndex := 0
var TreeResults []*parse.Tree
TreeResultsIndex := 0
var PosResults []*parse.Pos
PosResultsIndex := 0
var NodeTypeResults []*parse.NodeType
NodeTypeResultsIndex := 0
var ListNodeResults []*parse.ListNode
ListNodeResultsIndex := 0
var PipeNodeResults []*parse.PipeNode
PipeNodeResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_PosNgdotPosition:
if len(PosResults) == 0 {
continue
}
arg0 := PosResults[PosResultsIndex]
PosResultsIndex = (PosResultsIndex + 1) % len(PosResults)
r0 := arg0.Position()
PosResults = append(PosResults, &r0)
case *NgoloFuzzOne_NodeTypeNgdotType:
if len(NodeTypeResults) == 0 {
continue
}
arg0 := NodeTypeResults[NodeTypeResultsIndex]
NodeTypeResultsIndex = (NodeTypeResultsIndex + 1) % len(NodeTypeResults)
r0 := arg0.Type()
NodeTypeResults = append(NodeTypeResults, &r0)
case *NgoloFuzzOne_ListNodeNgdotString:
if len(ListNodeResults) == 0 {
continue
}
arg0 := ListNodeResults[ListNodeResultsIndex]
ListNodeResultsIndex = (ListNodeResultsIndex + 1) % len(ListNodeResults)
arg0.String()
case *NgoloFuzzOne_ListNodeNgdotCopyList:
if len(ListNodeResults) == 0 {
continue
}
arg0 := ListNodeResults[ListNodeResultsIndex]
ListNodeResultsIndex = (ListNodeResultsIndex + 1) % len(ListNodeResults)
r0 := arg0.CopyList()
if r0 != nil{
ListNodeResults = append(ListNodeResults, r0)
}
case *NgoloFuzzOne_ListNodeNgdotCopy:
if len(ListNodeResults) == 0 {
continue
}
arg0 := ListNodeResults[ListNodeResultsIndex]
ListNodeResultsIndex = (ListNodeResultsIndex + 1) % len(ListNodeResults)
r0 := arg0.Copy()
NodeResults = append(NodeResults, &r0)
case *NgoloFuzzOne_PipeNodeNgdotString:
if len(PipeNodeResults) == 0 {
continue
}
arg0 := PipeNodeResults[PipeNodeResultsIndex]
PipeNodeResultsIndex = (PipeNodeResultsIndex + 1) % len(PipeNodeResults)
arg0.String()
case *NgoloFuzzOne_PipeNodeNgdotCopyPipe:
if len(PipeNodeResults) == 0 {
continue
}
arg0 := PipeNodeResults[PipeNodeResultsIndex]
PipeNodeResultsIndex = (PipeNodeResultsIndex + 1) % len(PipeNodeResults)
arg0.CopyPipe()
case *NgoloFuzzOne_PipeNodeNgdotCopy:
if len(PipeNodeResults) == 0 {
continue
}
arg0 := PipeNodeResults[PipeNodeResultsIndex]
PipeNodeResultsIndex = (PipeNodeResultsIndex + 1) % len(PipeNodeResults)
r0 := arg0.Copy()
NodeResults = append(NodeResults, &r0)
case *NgoloFuzzOne_NewIdentifier:
r0 := parse.NewIdentifier(a.NewIdentifier.Ident)
if r0 != nil{
IdentifierNodeResults = append(IdentifierNodeResults, r0)
}
case *NgoloFuzzOne_IdentifierNodeNgdotSetPos:
if len(IdentifierNodeResults) == 0 {
continue
}
arg0 := IdentifierNodeResults[IdentifierNodeResultsIndex]
IdentifierNodeResultsIndex = (IdentifierNodeResultsIndex + 1) % len(IdentifierNodeResults)
if len(PosResults) == 0 {
continue
}
arg1 := *PosResults[PosResultsIndex]
PosResultsIndex = (PosResultsIndex + 1) % len(PosResults)
r0 := arg0.SetPos(arg1)
if r0 != nil{
IdentifierNodeResults = append(IdentifierNodeResults, r0)
}
case *NgoloFuzzOne_IdentifierNodeNgdotSetTree:
if len(IdentifierNodeResults) == 0 {
continue
}
arg0 := IdentifierNodeResults[IdentifierNodeResultsIndex]
IdentifierNodeResultsIndex = (IdentifierNodeResultsIndex + 1) % len(IdentifierNodeResults)
if len(TreeResults) == 0 {
continue
}
arg1 := TreeResults[TreeResultsIndex]
TreeResultsIndex = (TreeResultsIndex + 1) % len(TreeResults)
r0 := arg0.SetTree(arg1)
if r0 != nil{
IdentifierNodeResults = append(IdentifierNodeResults, r0)
}
case *NgoloFuzzOne_IdentifierNodeNgdotString:
if len(IdentifierNodeResults) == 0 {
continue
}
arg0 := IdentifierNodeResults[IdentifierNodeResultsIndex]
IdentifierNodeResultsIndex = (IdentifierNodeResultsIndex + 1) % len(IdentifierNodeResults)
arg0.String()
case *NgoloFuzzOne_IdentifierNodeNgdotCopy:
if len(IdentifierNodeResults) == 0 {
continue
}
arg0 := IdentifierNodeResults[IdentifierNodeResultsIndex]
IdentifierNodeResultsIndex = (IdentifierNodeResultsIndex + 1) % len(IdentifierNodeResults)
r0 := arg0.Copy()
NodeResults = append(NodeResults, &r0)
case *NgoloFuzzOne_StringNodeNgdotString:
arg0 := StringNodeNewFromFuzz(a.StringNodeNgdotString.S)
if arg0 == nil {
continue
}
arg0.String()
case *NgoloFuzzOne_StringNodeNgdotCopy:
arg0 := StringNodeNewFromFuzz(a.StringNodeNgdotCopy.S)
if arg0 == nil {
continue
}
r0 := arg0.Copy()
NodeResults = append(NodeResults, &r0)
case *NgoloFuzzOne_TreeNgdotCopy:
if len(TreeResults) == 0 {
continue
}
arg0 := TreeResults[TreeResultsIndex]
TreeResultsIndex = (TreeResultsIndex + 1) % len(TreeResults)
r0 := arg0.Copy()
if r0 != nil{
TreeResults = append(TreeResults, r0)
}
case *NgoloFuzzOne_TreeNgdotErrorContext:
if len(TreeResults) == 0 {
continue
}
arg0 := TreeResults[TreeResultsIndex]
TreeResultsIndex = (TreeResultsIndex + 1) % len(TreeResults)
if len(NodeResults) == 0 {
continue
}
arg1 := *NodeResults[NodeResultsIndex]
NodeResultsIndex = (NodeResultsIndex + 1) % len(NodeResults)
arg0.ErrorContext(arg1)
case *NgoloFuzzOne_IsEmptyTree:
if len(NodeResults) == 0 {
continue
}
arg0 := *NodeResults[NodeResultsIndex]
NodeResultsIndex = (NodeResultsIndex + 1) % len(NodeResults)
parse.IsEmptyTree(arg0)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
NodeNb := 0
NodeResultsIndex := 0
IdentifierNodeNb := 0
IdentifierNodeResultsIndex := 0
TreeNb := 0
TreeResultsIndex := 0
PosNb := 0
PosResultsIndex := 0
NodeTypeNb := 0
NodeTypeResultsIndex := 0
ListNodeNb := 0
ListNodeResultsIndex := 0
PipeNodeNb := 0
PipeNodeResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_PosNgdotPosition:
if PosNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Pos%d := Pos%d.Position()\n", PosNb, PosResultsIndex))
PosNb = PosNb + 1
PosResultsIndex = (PosResultsIndex + 1) % PosNb
case *NgoloFuzzOne_NodeTypeNgdotType:
if NodeTypeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("NodeType%d := NodeType%d.Type()\n", NodeTypeNb, NodeTypeResultsIndex))
NodeTypeNb = NodeTypeNb + 1
NodeTypeResultsIndex = (NodeTypeResultsIndex + 1) % NodeTypeNb
case *NgoloFuzzOne_ListNodeNgdotString:
if ListNodeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ListNode%d.String()\n", ListNodeResultsIndex))
ListNodeResultsIndex = (ListNodeResultsIndex + 1) % ListNodeNb
case *NgoloFuzzOne_ListNodeNgdotCopyList:
if ListNodeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("ListNode%d := ListNode%d.CopyList()\n", ListNodeNb, ListNodeResultsIndex))
ListNodeNb = ListNodeNb + 1
ListNodeResultsIndex = (ListNodeResultsIndex + 1) % ListNodeNb
case *NgoloFuzzOne_ListNodeNgdotCopy:
if ListNodeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Node%d := ListNode%d.Copy()\n", NodeNb, ListNodeResultsIndex))
NodeNb = NodeNb + 1
ListNodeResultsIndex = (ListNodeResultsIndex + 1) % ListNodeNb
case *NgoloFuzzOne_PipeNodeNgdotString:
if PipeNodeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PipeNode%d.String()\n", PipeNodeResultsIndex))
PipeNodeResultsIndex = (PipeNodeResultsIndex + 1) % PipeNodeNb
case *NgoloFuzzOne_PipeNodeNgdotCopyPipe:
if PipeNodeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("PipeNode%d.CopyPipe()\n", PipeNodeResultsIndex))
PipeNodeResultsIndex = (PipeNodeResultsIndex + 1) % PipeNodeNb
case *NgoloFuzzOne_PipeNodeNgdotCopy:
if PipeNodeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Node%d := PipeNode%d.Copy()\n", NodeNb, PipeNodeResultsIndex))
NodeNb = NodeNb + 1
PipeNodeResultsIndex = (PipeNodeResultsIndex + 1) % PipeNodeNb
case *NgoloFuzzOne_NewIdentifier:
w.WriteString(fmt.Sprintf("IdentifierNode%d := parse.NewIdentifier(%#+v)\n", IdentifierNodeNb, a.NewIdentifier.Ident))
IdentifierNodeNb = IdentifierNodeNb + 1
case *NgoloFuzzOne_IdentifierNodeNgdotSetPos:
if IdentifierNodeNb == 0 {
continue
}
if PosNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("IdentifierNode%d := IdentifierNode%d.SetPos(Pos%d)\n", IdentifierNodeNb, IdentifierNodeResultsIndex, (PosResultsIndex + 0) % PosNb))
IdentifierNodeNb = IdentifierNodeNb + 1
IdentifierNodeResultsIndex = (IdentifierNodeResultsIndex + 1) % IdentifierNodeNb
PosResultsIndex = (PosResultsIndex + 1) % PosNb
case *NgoloFuzzOne_IdentifierNodeNgdotSetTree:
if IdentifierNodeNb == 0 {
continue
}
if TreeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("IdentifierNode%d := IdentifierNode%d.SetTree(Tree%d)\n", IdentifierNodeNb, IdentifierNodeResultsIndex, (TreeResultsIndex + 0) % TreeNb))
IdentifierNodeNb = IdentifierNodeNb + 1
IdentifierNodeResultsIndex = (IdentifierNodeResultsIndex + 1) % IdentifierNodeNb
TreeResultsIndex = (TreeResultsIndex + 1) % TreeNb
case *NgoloFuzzOne_IdentifierNodeNgdotString:
if IdentifierNodeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("IdentifierNode%d.String()\n", IdentifierNodeResultsIndex))
IdentifierNodeResultsIndex = (IdentifierNodeResultsIndex + 1) % IdentifierNodeNb
case *NgoloFuzzOne_IdentifierNodeNgdotCopy:
if IdentifierNodeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Node%d := IdentifierNode%d.Copy()\n", NodeNb, IdentifierNodeResultsIndex))
NodeNb = NodeNb + 1
IdentifierNodeResultsIndex = (IdentifierNodeResultsIndex + 1) % IdentifierNodeNb
case *NgoloFuzzOne_StringNodeNgdotString:
w.WriteString(fmt.Sprintf("StringNodeNewFromFuzz(%#+v).String()\n", a.StringNodeNgdotString.S))
case *NgoloFuzzOne_StringNodeNgdotCopy:
w.WriteString(fmt.Sprintf("Node%d := StringNodeNewFromFuzz(%#+v).Copy()\n", NodeNb, a.StringNodeNgdotCopy.S))
NodeNb = NodeNb + 1
case *NgoloFuzzOne_TreeNgdotCopy:
if TreeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Tree%d := Tree%d.Copy()\n", TreeNb, TreeResultsIndex))
TreeNb = TreeNb + 1
TreeResultsIndex = (TreeResultsIndex + 1) % TreeNb
case *NgoloFuzzOne_TreeNgdotErrorContext:
if TreeNb == 0 {
continue
}
if NodeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Tree%d.ErrorContext(Node%d)\n", TreeResultsIndex, (NodeResultsIndex + 0) % NodeNb))
TreeResultsIndex = (TreeResultsIndex + 1) % TreeNb
NodeResultsIndex = (NodeResultsIndex + 1) % NodeNb
case *NgoloFuzzOne_IsEmptyTree:
if NodeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("parse.IsEmptyTree(Node%d)\n", (NodeResultsIndex + 0) % NodeNb))
NodeResultsIndex = (NodeResultsIndex + 1) % NodeNb
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_text_template_parse
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type StringNodeStruct struct {
state protoimpl.MessageState `protogen:"open.v1"`
Quoted string `protobuf:"bytes,1,opt,name=Quoted,proto3" json:"Quoted,omitempty"`
Text string `protobuf:"bytes,2,opt,name=Text,proto3" json:"Text,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StringNodeStruct) Reset() {
*x = StringNodeStruct{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StringNodeStruct) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StringNodeStruct) ProtoMessage() {}
func (x *StringNodeStruct) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StringNodeStruct.ProtoReflect.Descriptor instead.
func (*StringNodeStruct) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *StringNodeStruct) GetQuoted() string {
if x != nil {
return x.Quoted
}
return ""
}
func (x *StringNodeStruct) GetText() string {
if x != nil {
return x.Text
}
return ""
}
type PosNgdotPositionArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PosNgdotPositionArgs) Reset() {
*x = PosNgdotPositionArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PosNgdotPositionArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PosNgdotPositionArgs) ProtoMessage() {}
func (x *PosNgdotPositionArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PosNgdotPositionArgs.ProtoReflect.Descriptor instead.
func (*PosNgdotPositionArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type NodeTypeNgdotTypeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NodeTypeNgdotTypeArgs) Reset() {
*x = NodeTypeNgdotTypeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NodeTypeNgdotTypeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeTypeNgdotTypeArgs) ProtoMessage() {}
func (x *NodeTypeNgdotTypeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeTypeNgdotTypeArgs.ProtoReflect.Descriptor instead.
func (*NodeTypeNgdotTypeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
type ListNodeNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNodeNgdotStringArgs) Reset() {
*x = ListNodeNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNodeNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNodeNgdotStringArgs) ProtoMessage() {}
func (x *ListNodeNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNodeNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*ListNodeNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
type ListNodeNgdotCopyListArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNodeNgdotCopyListArgs) Reset() {
*x = ListNodeNgdotCopyListArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNodeNgdotCopyListArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNodeNgdotCopyListArgs) ProtoMessage() {}
func (x *ListNodeNgdotCopyListArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNodeNgdotCopyListArgs.ProtoReflect.Descriptor instead.
func (*ListNodeNgdotCopyListArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
type ListNodeNgdotCopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ListNodeNgdotCopyArgs) Reset() {
*x = ListNodeNgdotCopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ListNodeNgdotCopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNodeNgdotCopyArgs) ProtoMessage() {}
func (x *ListNodeNgdotCopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNodeNgdotCopyArgs.ProtoReflect.Descriptor instead.
func (*ListNodeNgdotCopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
type PipeNodeNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PipeNodeNgdotStringArgs) Reset() {
*x = PipeNodeNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PipeNodeNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PipeNodeNgdotStringArgs) ProtoMessage() {}
func (x *PipeNodeNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PipeNodeNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*PipeNodeNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
type PipeNodeNgdotCopyPipeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PipeNodeNgdotCopyPipeArgs) Reset() {
*x = PipeNodeNgdotCopyPipeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PipeNodeNgdotCopyPipeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PipeNodeNgdotCopyPipeArgs) ProtoMessage() {}
func (x *PipeNodeNgdotCopyPipeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PipeNodeNgdotCopyPipeArgs.ProtoReflect.Descriptor instead.
func (*PipeNodeNgdotCopyPipeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type PipeNodeNgdotCopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *PipeNodeNgdotCopyArgs) Reset() {
*x = PipeNodeNgdotCopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *PipeNodeNgdotCopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PipeNodeNgdotCopyArgs) ProtoMessage() {}
func (x *PipeNodeNgdotCopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PipeNodeNgdotCopyArgs.ProtoReflect.Descriptor instead.
func (*PipeNodeNgdotCopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type NewIdentifierArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ident string `protobuf:"bytes,1,opt,name=ident,proto3" json:"ident,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewIdentifierArgs) Reset() {
*x = NewIdentifierArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewIdentifierArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewIdentifierArgs) ProtoMessage() {}
func (x *NewIdentifierArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewIdentifierArgs.ProtoReflect.Descriptor instead.
func (*NewIdentifierArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *NewIdentifierArgs) GetIdent() string {
if x != nil {
return x.Ident
}
return ""
}
type IdentifierNodeNgdotSetPosArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IdentifierNodeNgdotSetPosArgs) Reset() {
*x = IdentifierNodeNgdotSetPosArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IdentifierNodeNgdotSetPosArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IdentifierNodeNgdotSetPosArgs) ProtoMessage() {}
func (x *IdentifierNodeNgdotSetPosArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IdentifierNodeNgdotSetPosArgs.ProtoReflect.Descriptor instead.
func (*IdentifierNodeNgdotSetPosArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
type IdentifierNodeNgdotSetTreeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IdentifierNodeNgdotSetTreeArgs) Reset() {
*x = IdentifierNodeNgdotSetTreeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IdentifierNodeNgdotSetTreeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IdentifierNodeNgdotSetTreeArgs) ProtoMessage() {}
func (x *IdentifierNodeNgdotSetTreeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IdentifierNodeNgdotSetTreeArgs.ProtoReflect.Descriptor instead.
func (*IdentifierNodeNgdotSetTreeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type IdentifierNodeNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IdentifierNodeNgdotStringArgs) Reset() {
*x = IdentifierNodeNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IdentifierNodeNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IdentifierNodeNgdotStringArgs) ProtoMessage() {}
func (x *IdentifierNodeNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IdentifierNodeNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*IdentifierNodeNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type IdentifierNodeNgdotCopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IdentifierNodeNgdotCopyArgs) Reset() {
*x = IdentifierNodeNgdotCopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IdentifierNodeNgdotCopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IdentifierNodeNgdotCopyArgs) ProtoMessage() {}
func (x *IdentifierNodeNgdotCopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IdentifierNodeNgdotCopyArgs.ProtoReflect.Descriptor instead.
func (*IdentifierNodeNgdotCopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type StringNodeNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S *StringNodeStruct `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StringNodeNgdotStringArgs) Reset() {
*x = StringNodeNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StringNodeNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StringNodeNgdotStringArgs) ProtoMessage() {}
func (x *StringNodeNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StringNodeNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*StringNodeNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *StringNodeNgdotStringArgs) GetS() *StringNodeStruct {
if x != nil {
return x.S
}
return nil
}
type StringNodeNgdotCopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S *StringNodeStruct `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *StringNodeNgdotCopyArgs) Reset() {
*x = StringNodeNgdotCopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *StringNodeNgdotCopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StringNodeNgdotCopyArgs) ProtoMessage() {}
func (x *StringNodeNgdotCopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StringNodeNgdotCopyArgs.ProtoReflect.Descriptor instead.
func (*StringNodeNgdotCopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *StringNodeNgdotCopyArgs) GetS() *StringNodeStruct {
if x != nil {
return x.S
}
return nil
}
type TreeNgdotCopyArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TreeNgdotCopyArgs) Reset() {
*x = TreeNgdotCopyArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TreeNgdotCopyArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TreeNgdotCopyArgs) ProtoMessage() {}
func (x *TreeNgdotCopyArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TreeNgdotCopyArgs.ProtoReflect.Descriptor instead.
func (*TreeNgdotCopyArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
type TreeNgdotErrorContextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TreeNgdotErrorContextArgs) Reset() {
*x = TreeNgdotErrorContextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TreeNgdotErrorContextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TreeNgdotErrorContextArgs) ProtoMessage() {}
func (x *TreeNgdotErrorContextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TreeNgdotErrorContextArgs.ProtoReflect.Descriptor instead.
func (*TreeNgdotErrorContextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
type IsEmptyTreeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsEmptyTreeArgs) Reset() {
*x = IsEmptyTreeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsEmptyTreeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsEmptyTreeArgs) ProtoMessage() {}
func (x *IsEmptyTreeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsEmptyTreeArgs.ProtoReflect.Descriptor instead.
func (*IsEmptyTreeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_PosNgdotPosition
// *NgoloFuzzOne_NodeTypeNgdotType
// *NgoloFuzzOne_ListNodeNgdotString
// *NgoloFuzzOne_ListNodeNgdotCopyList
// *NgoloFuzzOne_ListNodeNgdotCopy
// *NgoloFuzzOne_PipeNodeNgdotString
// *NgoloFuzzOne_PipeNodeNgdotCopyPipe
// *NgoloFuzzOne_PipeNodeNgdotCopy
// *NgoloFuzzOne_NewIdentifier
// *NgoloFuzzOne_IdentifierNodeNgdotSetPos
// *NgoloFuzzOne_IdentifierNodeNgdotSetTree
// *NgoloFuzzOne_IdentifierNodeNgdotString
// *NgoloFuzzOne_IdentifierNodeNgdotCopy
// *NgoloFuzzOne_StringNodeNgdotString
// *NgoloFuzzOne_StringNodeNgdotCopy
// *NgoloFuzzOne_TreeNgdotCopy
// *NgoloFuzzOne_TreeNgdotErrorContext
// *NgoloFuzzOne_IsEmptyTree
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetPosNgdotPosition() *PosNgdotPositionArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PosNgdotPosition); ok {
return x.PosNgdotPosition
}
}
return nil
}
func (x *NgoloFuzzOne) GetNodeTypeNgdotType() *NodeTypeNgdotTypeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NodeTypeNgdotType); ok {
return x.NodeTypeNgdotType
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNodeNgdotString() *ListNodeNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNodeNgdotString); ok {
return x.ListNodeNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNodeNgdotCopyList() *ListNodeNgdotCopyListArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNodeNgdotCopyList); ok {
return x.ListNodeNgdotCopyList
}
}
return nil
}
func (x *NgoloFuzzOne) GetListNodeNgdotCopy() *ListNodeNgdotCopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ListNodeNgdotCopy); ok {
return x.ListNodeNgdotCopy
}
}
return nil
}
func (x *NgoloFuzzOne) GetPipeNodeNgdotString() *PipeNodeNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PipeNodeNgdotString); ok {
return x.PipeNodeNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetPipeNodeNgdotCopyPipe() *PipeNodeNgdotCopyPipeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PipeNodeNgdotCopyPipe); ok {
return x.PipeNodeNgdotCopyPipe
}
}
return nil
}
func (x *NgoloFuzzOne) GetPipeNodeNgdotCopy() *PipeNodeNgdotCopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_PipeNodeNgdotCopy); ok {
return x.PipeNodeNgdotCopy
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewIdentifier() *NewIdentifierArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewIdentifier); ok {
return x.NewIdentifier
}
}
return nil
}
func (x *NgoloFuzzOne) GetIdentifierNodeNgdotSetPos() *IdentifierNodeNgdotSetPosArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IdentifierNodeNgdotSetPos); ok {
return x.IdentifierNodeNgdotSetPos
}
}
return nil
}
func (x *NgoloFuzzOne) GetIdentifierNodeNgdotSetTree() *IdentifierNodeNgdotSetTreeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IdentifierNodeNgdotSetTree); ok {
return x.IdentifierNodeNgdotSetTree
}
}
return nil
}
func (x *NgoloFuzzOne) GetIdentifierNodeNgdotString() *IdentifierNodeNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IdentifierNodeNgdotString); ok {
return x.IdentifierNodeNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetIdentifierNodeNgdotCopy() *IdentifierNodeNgdotCopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IdentifierNodeNgdotCopy); ok {
return x.IdentifierNodeNgdotCopy
}
}
return nil
}
func (x *NgoloFuzzOne) GetStringNodeNgdotString() *StringNodeNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_StringNodeNgdotString); ok {
return x.StringNodeNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetStringNodeNgdotCopy() *StringNodeNgdotCopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_StringNodeNgdotCopy); ok {
return x.StringNodeNgdotCopy
}
}
return nil
}
func (x *NgoloFuzzOne) GetTreeNgdotCopy() *TreeNgdotCopyArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TreeNgdotCopy); ok {
return x.TreeNgdotCopy
}
}
return nil
}
func (x *NgoloFuzzOne) GetTreeNgdotErrorContext() *TreeNgdotErrorContextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TreeNgdotErrorContext); ok {
return x.TreeNgdotErrorContext
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsEmptyTree() *IsEmptyTreeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsEmptyTree); ok {
return x.IsEmptyTree
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_PosNgdotPosition struct {
PosNgdotPosition *PosNgdotPositionArgs `protobuf:"bytes,1,opt,name=PosNgdotPosition,proto3,oneof"`
}
type NgoloFuzzOne_NodeTypeNgdotType struct {
NodeTypeNgdotType *NodeTypeNgdotTypeArgs `protobuf:"bytes,2,opt,name=NodeTypeNgdotType,proto3,oneof"`
}
type NgoloFuzzOne_ListNodeNgdotString struct {
ListNodeNgdotString *ListNodeNgdotStringArgs `protobuf:"bytes,3,opt,name=ListNodeNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_ListNodeNgdotCopyList struct {
ListNodeNgdotCopyList *ListNodeNgdotCopyListArgs `protobuf:"bytes,4,opt,name=ListNodeNgdotCopyList,proto3,oneof"`
}
type NgoloFuzzOne_ListNodeNgdotCopy struct {
ListNodeNgdotCopy *ListNodeNgdotCopyArgs `protobuf:"bytes,5,opt,name=ListNodeNgdotCopy,proto3,oneof"`
}
type NgoloFuzzOne_PipeNodeNgdotString struct {
PipeNodeNgdotString *PipeNodeNgdotStringArgs `protobuf:"bytes,6,opt,name=PipeNodeNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_PipeNodeNgdotCopyPipe struct {
PipeNodeNgdotCopyPipe *PipeNodeNgdotCopyPipeArgs `protobuf:"bytes,7,opt,name=PipeNodeNgdotCopyPipe,proto3,oneof"`
}
type NgoloFuzzOne_PipeNodeNgdotCopy struct {
PipeNodeNgdotCopy *PipeNodeNgdotCopyArgs `protobuf:"bytes,8,opt,name=PipeNodeNgdotCopy,proto3,oneof"`
}
type NgoloFuzzOne_NewIdentifier struct {
NewIdentifier *NewIdentifierArgs `protobuf:"bytes,9,opt,name=NewIdentifier,proto3,oneof"`
}
type NgoloFuzzOne_IdentifierNodeNgdotSetPos struct {
IdentifierNodeNgdotSetPos *IdentifierNodeNgdotSetPosArgs `protobuf:"bytes,10,opt,name=IdentifierNodeNgdotSetPos,proto3,oneof"`
}
type NgoloFuzzOne_IdentifierNodeNgdotSetTree struct {
IdentifierNodeNgdotSetTree *IdentifierNodeNgdotSetTreeArgs `protobuf:"bytes,11,opt,name=IdentifierNodeNgdotSetTree,proto3,oneof"`
}
type NgoloFuzzOne_IdentifierNodeNgdotString struct {
IdentifierNodeNgdotString *IdentifierNodeNgdotStringArgs `protobuf:"bytes,12,opt,name=IdentifierNodeNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_IdentifierNodeNgdotCopy struct {
IdentifierNodeNgdotCopy *IdentifierNodeNgdotCopyArgs `protobuf:"bytes,13,opt,name=IdentifierNodeNgdotCopy,proto3,oneof"`
}
type NgoloFuzzOne_StringNodeNgdotString struct {
StringNodeNgdotString *StringNodeNgdotStringArgs `protobuf:"bytes,14,opt,name=StringNodeNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_StringNodeNgdotCopy struct {
StringNodeNgdotCopy *StringNodeNgdotCopyArgs `protobuf:"bytes,15,opt,name=StringNodeNgdotCopy,proto3,oneof"`
}
type NgoloFuzzOne_TreeNgdotCopy struct {
TreeNgdotCopy *TreeNgdotCopyArgs `protobuf:"bytes,16,opt,name=TreeNgdotCopy,proto3,oneof"`
}
type NgoloFuzzOne_TreeNgdotErrorContext struct {
TreeNgdotErrorContext *TreeNgdotErrorContextArgs `protobuf:"bytes,17,opt,name=TreeNgdotErrorContext,proto3,oneof"`
}
type NgoloFuzzOne_IsEmptyTree struct {
IsEmptyTree *IsEmptyTreeArgs `protobuf:"bytes,18,opt,name=IsEmptyTree,proto3,oneof"`
}
func (*NgoloFuzzOne_PosNgdotPosition) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NodeTypeNgdotType) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNodeNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNodeNgdotCopyList) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ListNodeNgdotCopy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PipeNodeNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PipeNodeNgdotCopyPipe) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_PipeNodeNgdotCopy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewIdentifier) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IdentifierNodeNgdotSetPos) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IdentifierNodeNgdotSetTree) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IdentifierNodeNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IdentifierNodeNgdotCopy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_StringNodeNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_StringNodeNgdotCopy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TreeNgdotCopy) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TreeNgdotErrorContext) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsEmptyTree) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\">\n" +
"\x10StringNodeStruct\x12\x16\n" +
"\x06Quoted\x18\x01 \x01(\tR\x06Quoted\x12\x12\n" +
"\x04Text\x18\x02 \x01(\tR\x04Text\"\x16\n" +
"\x14PosNgdotPositionArgs\"\x17\n" +
"\x15NodeTypeNgdotTypeArgs\"\x19\n" +
"\x17ListNodeNgdotStringArgs\"\x1b\n" +
"\x19ListNodeNgdotCopyListArgs\"\x17\n" +
"\x15ListNodeNgdotCopyArgs\"\x19\n" +
"\x17PipeNodeNgdotStringArgs\"\x1b\n" +
"\x19PipeNodeNgdotCopyPipeArgs\"\x17\n" +
"\x15PipeNodeNgdotCopyArgs\")\n" +
"\x11NewIdentifierArgs\x12\x14\n" +
"\x05ident\x18\x01 \x01(\tR\x05ident\"\x1f\n" +
"\x1dIdentifierNodeNgdotSetPosArgs\" \n" +
"\x1eIdentifierNodeNgdotSetTreeArgs\"\x1f\n" +
"\x1dIdentifierNodeNgdotStringArgs\"\x1d\n" +
"\x1bIdentifierNodeNgdotCopyArgs\"F\n" +
"\x19StringNodeNgdotStringArgs\x12)\n" +
"\x01s\x18\x01 \x01(\v2\x1b.ngolofuzz.StringNodeStructR\x01s\"D\n" +
"\x17StringNodeNgdotCopyArgs\x12)\n" +
"\x01s\x18\x01 \x01(\v2\x1b.ngolofuzz.StringNodeStructR\x01s\"\x13\n" +
"\x11TreeNgdotCopyArgs\"\x1b\n" +
"\x19TreeNgdotErrorContextArgs\"\x11\n" +
"\x0fIsEmptyTreeArgs\"\xcc\f\n" +
"\fNgoloFuzzOne\x12M\n" +
"\x10PosNgdotPosition\x18\x01 \x01(\v2\x1f.ngolofuzz.PosNgdotPositionArgsH\x00R\x10PosNgdotPosition\x12P\n" +
"\x11NodeTypeNgdotType\x18\x02 \x01(\v2 .ngolofuzz.NodeTypeNgdotTypeArgsH\x00R\x11NodeTypeNgdotType\x12V\n" +
"\x13ListNodeNgdotString\x18\x03 \x01(\v2\".ngolofuzz.ListNodeNgdotStringArgsH\x00R\x13ListNodeNgdotString\x12\\\n" +
"\x15ListNodeNgdotCopyList\x18\x04 \x01(\v2$.ngolofuzz.ListNodeNgdotCopyListArgsH\x00R\x15ListNodeNgdotCopyList\x12P\n" +
"\x11ListNodeNgdotCopy\x18\x05 \x01(\v2 .ngolofuzz.ListNodeNgdotCopyArgsH\x00R\x11ListNodeNgdotCopy\x12V\n" +
"\x13PipeNodeNgdotString\x18\x06 \x01(\v2\".ngolofuzz.PipeNodeNgdotStringArgsH\x00R\x13PipeNodeNgdotString\x12\\\n" +
"\x15PipeNodeNgdotCopyPipe\x18\a \x01(\v2$.ngolofuzz.PipeNodeNgdotCopyPipeArgsH\x00R\x15PipeNodeNgdotCopyPipe\x12P\n" +
"\x11PipeNodeNgdotCopy\x18\b \x01(\v2 .ngolofuzz.PipeNodeNgdotCopyArgsH\x00R\x11PipeNodeNgdotCopy\x12D\n" +
"\rNewIdentifier\x18\t \x01(\v2\x1c.ngolofuzz.NewIdentifierArgsH\x00R\rNewIdentifier\x12h\n" +
"\x19IdentifierNodeNgdotSetPos\x18\n" +
" \x01(\v2(.ngolofuzz.IdentifierNodeNgdotSetPosArgsH\x00R\x19IdentifierNodeNgdotSetPos\x12k\n" +
"\x1aIdentifierNodeNgdotSetTree\x18\v \x01(\v2).ngolofuzz.IdentifierNodeNgdotSetTreeArgsH\x00R\x1aIdentifierNodeNgdotSetTree\x12h\n" +
"\x19IdentifierNodeNgdotString\x18\f \x01(\v2(.ngolofuzz.IdentifierNodeNgdotStringArgsH\x00R\x19IdentifierNodeNgdotString\x12b\n" +
"\x17IdentifierNodeNgdotCopy\x18\r \x01(\v2&.ngolofuzz.IdentifierNodeNgdotCopyArgsH\x00R\x17IdentifierNodeNgdotCopy\x12\\\n" +
"\x15StringNodeNgdotString\x18\x0e \x01(\v2$.ngolofuzz.StringNodeNgdotStringArgsH\x00R\x15StringNodeNgdotString\x12V\n" +
"\x13StringNodeNgdotCopy\x18\x0f \x01(\v2\".ngolofuzz.StringNodeNgdotCopyArgsH\x00R\x13StringNodeNgdotCopy\x12D\n" +
"\rTreeNgdotCopy\x18\x10 \x01(\v2\x1c.ngolofuzz.TreeNgdotCopyArgsH\x00R\rTreeNgdotCopy\x12\\\n" +
"\x15TreeNgdotErrorContext\x18\x11 \x01(\v2$.ngolofuzz.TreeNgdotErrorContextArgsH\x00R\x15TreeNgdotErrorContext\x12>\n" +
"\vIsEmptyTree\x18\x12 \x01(\v2\x1a.ngolofuzz.IsEmptyTreeArgsH\x00R\vIsEmptyTreeB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB Z\x1e./;fuzz_ng_text_template_parseb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 22)
var file_ngolofuzz_proto_goTypes = []any{
(*StringNodeStruct)(nil), // 0: ngolofuzz.StringNodeStruct
(*PosNgdotPositionArgs)(nil), // 1: ngolofuzz.PosNgdotPositionArgs
(*NodeTypeNgdotTypeArgs)(nil), // 2: ngolofuzz.NodeTypeNgdotTypeArgs
(*ListNodeNgdotStringArgs)(nil), // 3: ngolofuzz.ListNodeNgdotStringArgs
(*ListNodeNgdotCopyListArgs)(nil), // 4: ngolofuzz.ListNodeNgdotCopyListArgs
(*ListNodeNgdotCopyArgs)(nil), // 5: ngolofuzz.ListNodeNgdotCopyArgs
(*PipeNodeNgdotStringArgs)(nil), // 6: ngolofuzz.PipeNodeNgdotStringArgs
(*PipeNodeNgdotCopyPipeArgs)(nil), // 7: ngolofuzz.PipeNodeNgdotCopyPipeArgs
(*PipeNodeNgdotCopyArgs)(nil), // 8: ngolofuzz.PipeNodeNgdotCopyArgs
(*NewIdentifierArgs)(nil), // 9: ngolofuzz.NewIdentifierArgs
(*IdentifierNodeNgdotSetPosArgs)(nil), // 10: ngolofuzz.IdentifierNodeNgdotSetPosArgs
(*IdentifierNodeNgdotSetTreeArgs)(nil), // 11: ngolofuzz.IdentifierNodeNgdotSetTreeArgs
(*IdentifierNodeNgdotStringArgs)(nil), // 12: ngolofuzz.IdentifierNodeNgdotStringArgs
(*IdentifierNodeNgdotCopyArgs)(nil), // 13: ngolofuzz.IdentifierNodeNgdotCopyArgs
(*StringNodeNgdotStringArgs)(nil), // 14: ngolofuzz.StringNodeNgdotStringArgs
(*StringNodeNgdotCopyArgs)(nil), // 15: ngolofuzz.StringNodeNgdotCopyArgs
(*TreeNgdotCopyArgs)(nil), // 16: ngolofuzz.TreeNgdotCopyArgs
(*TreeNgdotErrorContextArgs)(nil), // 17: ngolofuzz.TreeNgdotErrorContextArgs
(*IsEmptyTreeArgs)(nil), // 18: ngolofuzz.IsEmptyTreeArgs
(*NgoloFuzzOne)(nil), // 19: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 20: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 21: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.StringNodeNgdotStringArgs.s:type_name -> ngolofuzz.StringNodeStruct
0, // 1: ngolofuzz.StringNodeNgdotCopyArgs.s:type_name -> ngolofuzz.StringNodeStruct
1, // 2: ngolofuzz.NgoloFuzzOne.PosNgdotPosition:type_name -> ngolofuzz.PosNgdotPositionArgs
2, // 3: ngolofuzz.NgoloFuzzOne.NodeTypeNgdotType:type_name -> ngolofuzz.NodeTypeNgdotTypeArgs
3, // 4: ngolofuzz.NgoloFuzzOne.ListNodeNgdotString:type_name -> ngolofuzz.ListNodeNgdotStringArgs
4, // 5: ngolofuzz.NgoloFuzzOne.ListNodeNgdotCopyList:type_name -> ngolofuzz.ListNodeNgdotCopyListArgs
5, // 6: ngolofuzz.NgoloFuzzOne.ListNodeNgdotCopy:type_name -> ngolofuzz.ListNodeNgdotCopyArgs
6, // 7: ngolofuzz.NgoloFuzzOne.PipeNodeNgdotString:type_name -> ngolofuzz.PipeNodeNgdotStringArgs
7, // 8: ngolofuzz.NgoloFuzzOne.PipeNodeNgdotCopyPipe:type_name -> ngolofuzz.PipeNodeNgdotCopyPipeArgs
8, // 9: ngolofuzz.NgoloFuzzOne.PipeNodeNgdotCopy:type_name -> ngolofuzz.PipeNodeNgdotCopyArgs
9, // 10: ngolofuzz.NgoloFuzzOne.NewIdentifier:type_name -> ngolofuzz.NewIdentifierArgs
10, // 11: ngolofuzz.NgoloFuzzOne.IdentifierNodeNgdotSetPos:type_name -> ngolofuzz.IdentifierNodeNgdotSetPosArgs
11, // 12: ngolofuzz.NgoloFuzzOne.IdentifierNodeNgdotSetTree:type_name -> ngolofuzz.IdentifierNodeNgdotSetTreeArgs
12, // 13: ngolofuzz.NgoloFuzzOne.IdentifierNodeNgdotString:type_name -> ngolofuzz.IdentifierNodeNgdotStringArgs
13, // 14: ngolofuzz.NgoloFuzzOne.IdentifierNodeNgdotCopy:type_name -> ngolofuzz.IdentifierNodeNgdotCopyArgs
14, // 15: ngolofuzz.NgoloFuzzOne.StringNodeNgdotString:type_name -> ngolofuzz.StringNodeNgdotStringArgs
15, // 16: ngolofuzz.NgoloFuzzOne.StringNodeNgdotCopy:type_name -> ngolofuzz.StringNodeNgdotCopyArgs
16, // 17: ngolofuzz.NgoloFuzzOne.TreeNgdotCopy:type_name -> ngolofuzz.TreeNgdotCopyArgs
17, // 18: ngolofuzz.NgoloFuzzOne.TreeNgdotErrorContext:type_name -> ngolofuzz.TreeNgdotErrorContextArgs
18, // 19: ngolofuzz.NgoloFuzzOne.IsEmptyTree:type_name -> ngolofuzz.IsEmptyTreeArgs
19, // 20: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
21, // [21:21] is the sub-list for method output_type
21, // [21:21] is the sub-list for method input_type
21, // [21:21] is the sub-list for extension type_name
21, // [21:21] is the sub-list for extension extendee
0, // [0:21] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[19].OneofWrappers = []any{
(*NgoloFuzzOne_PosNgdotPosition)(nil),
(*NgoloFuzzOne_NodeTypeNgdotType)(nil),
(*NgoloFuzzOne_ListNodeNgdotString)(nil),
(*NgoloFuzzOne_ListNodeNgdotCopyList)(nil),
(*NgoloFuzzOne_ListNodeNgdotCopy)(nil),
(*NgoloFuzzOne_PipeNodeNgdotString)(nil),
(*NgoloFuzzOne_PipeNodeNgdotCopyPipe)(nil),
(*NgoloFuzzOne_PipeNodeNgdotCopy)(nil),
(*NgoloFuzzOne_NewIdentifier)(nil),
(*NgoloFuzzOne_IdentifierNodeNgdotSetPos)(nil),
(*NgoloFuzzOne_IdentifierNodeNgdotSetTree)(nil),
(*NgoloFuzzOne_IdentifierNodeNgdotString)(nil),
(*NgoloFuzzOne_IdentifierNodeNgdotCopy)(nil),
(*NgoloFuzzOne_StringNodeNgdotString)(nil),
(*NgoloFuzzOne_StringNodeNgdotCopy)(nil),
(*NgoloFuzzOne_TreeNgdotCopy)(nil),
(*NgoloFuzzOne_TreeNgdotErrorContext)(nil),
(*NgoloFuzzOne_IsEmptyTree)(nil),
}
file_ngolofuzz_proto_msgTypes[20].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 22,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_time
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
var WeekdayResults []*time.Weekday
WeekdayResultsIndex := 0
var LocationResults []*time.Location
LocationResultsIndex := 0
var TimerResults []*time.Timer
TimerResultsIndex := 0
var TickerResults []*time.Ticker
TickerResultsIndex := 0
var TimeResults []*time.Time
TimeResultsIndex := 0
var MonthResults []*time.Month
MonthResultsIndex := 0
var DurationResults []*time.Duration
DurationResultsIndex := 0
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_TimeNgdotString:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.String()
case *NgoloFuzzOne_TimeNgdotGoString:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.GoString()
case *NgoloFuzzOne_TimeNgdotFormat:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.Format(a.TimeNgdotFormat.Layout)
case *NgoloFuzzOne_TimeNgdotAppendFormat:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.AppendFormat(a.TimeNgdotAppendFormat.B, a.TimeNgdotAppendFormat.Layout)
case *NgoloFuzzOne_Parse:
r0, r1 := time.Parse(a.Parse.Layout, a.Parse.Value)
TimeResults = append(TimeResults, &r0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ParseInLocation:
if len(LocationResults) == 0 {
continue
}
arg2 := LocationResults[LocationResultsIndex]
LocationResultsIndex = (LocationResultsIndex + 1) % len(LocationResults)
r0, r1 := time.ParseInLocation(a.ParseInLocation.Layout, a.ParseInLocation.Value, arg2)
TimeResults = append(TimeResults, &r0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_ParseDuration:
r0, r1 := time.ParseDuration(a.ParseDuration.S)
DurationResults = append(DurationResults, &r0)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TimerNgdotStop:
if len(TimerResults) == 0 {
continue
}
arg0 := TimerResults[TimerResultsIndex]
TimerResultsIndex = (TimerResultsIndex + 1) % len(TimerResults)
arg0.Stop()
case *NgoloFuzzOne_NewTimer:
if len(DurationResults) == 0 {
continue
}
arg0 := *DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
r0 := time.NewTimer(arg0)
if r0 != nil{
TimerResults = append(TimerResults, r0)
}
case *NgoloFuzzOne_TimerNgdotReset:
if len(TimerResults) == 0 {
continue
}
arg0 := TimerResults[TimerResultsIndex]
TimerResultsIndex = (TimerResultsIndex + 1) % len(TimerResults)
if len(DurationResults) == 0 {
continue
}
arg1 := *DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
arg0.Reset(arg1)
case *NgoloFuzzOne_After:
if len(DurationResults) == 0 {
continue
}
arg0 := *DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
time.After(arg0)
case *NgoloFuzzOne_NewTicker:
if len(DurationResults) == 0 {
continue
}
arg0 := *DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
r0 := time.NewTicker(arg0)
if r0 != nil{
TickerResults = append(TickerResults, r0)
}
case *NgoloFuzzOne_TickerNgdotStop:
if len(TickerResults) == 0 {
continue
}
arg0 := TickerResults[TickerResultsIndex]
TickerResultsIndex = (TickerResultsIndex + 1) % len(TickerResults)
arg0.Stop()
case *NgoloFuzzOne_TickerNgdotReset:
if len(TickerResults) == 0 {
continue
}
arg0 := TickerResults[TickerResultsIndex]
TickerResultsIndex = (TickerResultsIndex + 1) % len(TickerResults)
if len(DurationResults) == 0 {
continue
}
arg1 := *DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
arg0.Reset(arg1)
case *NgoloFuzzOne_Tick:
if len(DurationResults) == 0 {
continue
}
arg0 := *DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
time.Tick(arg0)
case *NgoloFuzzOne_TimeNgdotIsZero:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.IsZero()
case *NgoloFuzzOne_TimeNgdotAfter:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
if len(TimeResults) == 0 {
continue
}
arg1 := *TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.After(arg1)
case *NgoloFuzzOne_TimeNgdotBefore:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
if len(TimeResults) == 0 {
continue
}
arg1 := *TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.Before(arg1)
case *NgoloFuzzOne_TimeNgdotCompare:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
if len(TimeResults) == 0 {
continue
}
arg1 := *TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.Compare(arg1)
case *NgoloFuzzOne_TimeNgdotEqual:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
if len(TimeResults) == 0 {
continue
}
arg1 := *TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.Equal(arg1)
case *NgoloFuzzOne_MonthNgdotString:
if len(MonthResults) == 0 {
continue
}
arg0 := MonthResults[MonthResultsIndex]
MonthResultsIndex = (MonthResultsIndex + 1) % len(MonthResults)
arg0.String()
case *NgoloFuzzOne_WeekdayNgdotString:
if len(WeekdayResults) == 0 {
continue
}
arg0 := WeekdayResults[WeekdayResultsIndex]
WeekdayResultsIndex = (WeekdayResultsIndex + 1) % len(WeekdayResults)
arg0.String()
case *NgoloFuzzOne_TimeNgdotDate:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
_, r1, _ := arg0.Date()
MonthResults = append(MonthResults, &r1)
case *NgoloFuzzOne_TimeNgdotYear:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.Year()
case *NgoloFuzzOne_TimeNgdotMonth:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
r0 := arg0.Month()
MonthResults = append(MonthResults, &r0)
case *NgoloFuzzOne_TimeNgdotDay:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.Day()
case *NgoloFuzzOne_TimeNgdotWeekday:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
r0 := arg0.Weekday()
WeekdayResults = append(WeekdayResults, &r0)
case *NgoloFuzzOne_TimeNgdotISOWeek:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.ISOWeek()
case *NgoloFuzzOne_TimeNgdotClock:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.Clock()
case *NgoloFuzzOne_TimeNgdotHour:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.Hour()
case *NgoloFuzzOne_TimeNgdotMinute:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.Minute()
case *NgoloFuzzOne_TimeNgdotSecond:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.Second()
case *NgoloFuzzOne_TimeNgdotNanosecond:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.Nanosecond()
case *NgoloFuzzOne_TimeNgdotYearDay:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.YearDay()
case *NgoloFuzzOne_DurationNgdotString:
if len(DurationResults) == 0 {
continue
}
arg0 := DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
arg0.String()
case *NgoloFuzzOne_DurationNgdotNanoseconds:
if len(DurationResults) == 0 {
continue
}
arg0 := DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
arg0.Nanoseconds()
case *NgoloFuzzOne_DurationNgdotMicroseconds:
if len(DurationResults) == 0 {
continue
}
arg0 := DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
arg0.Microseconds()
case *NgoloFuzzOne_DurationNgdotMilliseconds:
if len(DurationResults) == 0 {
continue
}
arg0 := DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
arg0.Milliseconds()
case *NgoloFuzzOne_DurationNgdotSeconds:
if len(DurationResults) == 0 {
continue
}
arg0 := DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
arg0.Seconds()
case *NgoloFuzzOne_DurationNgdotMinutes:
if len(DurationResults) == 0 {
continue
}
arg0 := DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
arg0.Minutes()
case *NgoloFuzzOne_DurationNgdotHours:
if len(DurationResults) == 0 {
continue
}
arg0 := DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
arg0.Hours()
case *NgoloFuzzOne_DurationNgdotTruncate:
if len(DurationResults) == 0 {
continue
}
arg0 := DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
if len(DurationResults) == 0 {
continue
}
arg1 := *DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
r0 := arg0.Truncate(arg1)
DurationResults = append(DurationResults, &r0)
case *NgoloFuzzOne_DurationNgdotRound:
if len(DurationResults) == 0 {
continue
}
arg0 := DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
if len(DurationResults) == 0 {
continue
}
arg1 := *DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
r0 := arg0.Round(arg1)
DurationResults = append(DurationResults, &r0)
case *NgoloFuzzOne_DurationNgdotAbs:
if len(DurationResults) == 0 {
continue
}
arg0 := DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
r0 := arg0.Abs()
DurationResults = append(DurationResults, &r0)
case *NgoloFuzzOne_TimeNgdotAdd:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
if len(DurationResults) == 0 {
continue
}
arg1 := *DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
r0 := arg0.Add(arg1)
TimeResults = append(TimeResults, &r0)
case *NgoloFuzzOne_TimeNgdotSub:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
if len(TimeResults) == 0 {
continue
}
arg1 := *TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
r0 := arg0.Sub(arg1)
DurationResults = append(DurationResults, &r0)
case *NgoloFuzzOne_Since:
if len(TimeResults) == 0 {
continue
}
arg0 := *TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
r0 := time.Since(arg0)
DurationResults = append(DurationResults, &r0)
case *NgoloFuzzOne_Until:
if len(TimeResults) == 0 {
continue
}
arg0 := *TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
r0 := time.Until(arg0)
DurationResults = append(DurationResults, &r0)
case *NgoloFuzzOne_TimeNgdotAddDate:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg1 := int(a.TimeNgdotAddDate.Years)
arg2 := int(a.TimeNgdotAddDate.Months)
arg3 := int(a.TimeNgdotAddDate.Days)
r0 := arg0.AddDate(arg1, arg2, arg3)
TimeResults = append(TimeResults, &r0)
case *NgoloFuzzOne_Now:
r0 := time.Now()
TimeResults = append(TimeResults, &r0)
case *NgoloFuzzOne_TimeNgdotUTC:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
r0 := arg0.UTC()
TimeResults = append(TimeResults, &r0)
case *NgoloFuzzOne_TimeNgdotLocal:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
r0 := arg0.Local()
TimeResults = append(TimeResults, &r0)
case *NgoloFuzzOne_TimeNgdotIn:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
if len(LocationResults) == 0 {
continue
}
arg1 := LocationResults[LocationResultsIndex]
LocationResultsIndex = (LocationResultsIndex + 1) % len(LocationResults)
r0 := arg0.In(arg1)
TimeResults = append(TimeResults, &r0)
case *NgoloFuzzOne_TimeNgdotLocation:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
r0 := arg0.Location()
if r0 != nil{
LocationResults = append(LocationResults, r0)
}
case *NgoloFuzzOne_TimeNgdotZone:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.Zone()
case *NgoloFuzzOne_TimeNgdotZoneBounds:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
r0, r1 := arg0.ZoneBounds()
TimeResults = append(TimeResults, &r0)
TimeResults = append(TimeResults, &r1)
case *NgoloFuzzOne_TimeNgdotUnix:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.Unix()
case *NgoloFuzzOne_TimeNgdotUnixMilli:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.UnixMilli()
case *NgoloFuzzOne_TimeNgdotUnixMicro:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.UnixMicro()
case *NgoloFuzzOne_TimeNgdotUnixNano:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.UnixNano()
case *NgoloFuzzOne_TimeNgdotAppendBinary:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
_, r1 := arg0.AppendBinary(a.TimeNgdotAppendBinary.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TimeNgdotMarshalBinary:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
_, r1 := arg0.MarshalBinary()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TimeNgdotUnmarshalBinary:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
r0 := arg0.UnmarshalBinary(a.TimeNgdotUnmarshalBinary.Data)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_TimeNgdotGobEncode:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
_, r1 := arg0.GobEncode()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TimeNgdotGobDecode:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
r0 := arg0.GobDecode(a.TimeNgdotGobDecode.Data)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_TimeNgdotMarshalJSON:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
_, r1 := arg0.MarshalJSON()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TimeNgdotUnmarshalJSON:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
r0 := arg0.UnmarshalJSON(a.TimeNgdotUnmarshalJSON.Data)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_TimeNgdotAppendText:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
_, r1 := arg0.AppendText(a.TimeNgdotAppendText.B)
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TimeNgdotMarshalText:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
_, r1 := arg0.MarshalText()
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_TimeNgdotUnmarshalText:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
r0 := arg0.UnmarshalText(a.TimeNgdotUnmarshalText.Data)
if r0 != nil{
r0.Error()
return 0
}
case *NgoloFuzzOne_Unix:
r0 := time.Unix(a.Unix.Sec, a.Unix.Nsec)
TimeResults = append(TimeResults, &r0)
case *NgoloFuzzOne_UnixMilli:
r0 := time.UnixMilli(a.UnixMilli.Msec)
TimeResults = append(TimeResults, &r0)
case *NgoloFuzzOne_UnixMicro:
r0 := time.UnixMicro(a.UnixMicro.Usec)
TimeResults = append(TimeResults, &r0)
case *NgoloFuzzOne_TimeNgdotIsDST:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
arg0.IsDST()
case *NgoloFuzzOne_Date:
arg0 := int(a.Date.Year)
if len(MonthResults) == 0 {
continue
}
arg1 := *MonthResults[MonthResultsIndex]
MonthResultsIndex = (MonthResultsIndex + 1) % len(MonthResults)
arg2 := int(a.Date.Day)
arg3 := int(a.Date.Hour)
arg4 := int(a.Date.Min)
arg5 := int(a.Date.Sec)
arg6 := int(a.Date.Nsec)
if len(LocationResults) == 0 {
continue
}
arg7 := LocationResults[LocationResultsIndex]
LocationResultsIndex = (LocationResultsIndex + 1) % len(LocationResults)
r0 := time.Date(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
TimeResults = append(TimeResults, &r0)
case *NgoloFuzzOne_TimeNgdotTruncate:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
if len(DurationResults) == 0 {
continue
}
arg1 := *DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
r0 := arg0.Truncate(arg1)
TimeResults = append(TimeResults, &r0)
case *NgoloFuzzOne_TimeNgdotRound:
if len(TimeResults) == 0 {
continue
}
arg0 := TimeResults[TimeResultsIndex]
TimeResultsIndex = (TimeResultsIndex + 1) % len(TimeResults)
if len(DurationResults) == 0 {
continue
}
arg1 := *DurationResults[DurationResultsIndex]
DurationResultsIndex = (DurationResultsIndex + 1) % len(DurationResults)
r0 := arg0.Round(arg1)
TimeResults = append(TimeResults, &r0)
case *NgoloFuzzOne_LocationNgdotString:
if len(LocationResults) == 0 {
continue
}
arg0 := LocationResults[LocationResultsIndex]
LocationResultsIndex = (LocationResultsIndex + 1) % len(LocationResults)
arg0.String()
case *NgoloFuzzOne_FixedZone:
arg1 := int(a.FixedZone.Offset)
r0 := time.FixedZone(a.FixedZone.Name, arg1)
if r0 != nil{
LocationResults = append(LocationResults, r0)
}
case *NgoloFuzzOne_LoadLocation:
r0, r1 := time.LoadLocation(a.LoadLocation.Name)
if r0 != nil{
LocationResults = append(LocationResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
case *NgoloFuzzOne_LoadLocationFromTZData:
r0, r1 := time.LoadLocationFromTZData(a.LoadLocationFromTZData.Name, a.LoadLocationFromTZData.Data)
if r0 != nil{
LocationResults = append(LocationResults, r0)
}
if r1 != nil{
r1.Error()
return 0
}
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
WeekdayNb := 0
WeekdayResultsIndex := 0
LocationNb := 0
LocationResultsIndex := 0
TimerNb := 0
TimerResultsIndex := 0
TickerNb := 0
TickerResultsIndex := 0
TimeNb := 0
TimeResultsIndex := 0
MonthNb := 0
MonthResultsIndex := 0
DurationNb := 0
DurationResultsIndex := 0
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_TimeNgdotString:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.String()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotGoString:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.GoString()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotFormat:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.Format(%#+v)\n", TimeResultsIndex, a.TimeNgdotFormat.Layout))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotAppendFormat:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.AppendFormat(%#+v, %#+v)\n", TimeResultsIndex, a.TimeNgdotAppendFormat.B, a.TimeNgdotAppendFormat.Layout))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_Parse:
w.WriteString(fmt.Sprintf("Time%d, _ := time.Parse(%#+v, %#+v)\n", TimeNb, a.Parse.Layout, a.Parse.Value))
TimeNb = TimeNb + 1
case *NgoloFuzzOne_ParseInLocation:
if LocationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d, _ := time.ParseInLocation(%#+v, %#+v, Location%d)\n", TimeNb, a.ParseInLocation.Layout, a.ParseInLocation.Value, (LocationResultsIndex + 0) % LocationNb))
TimeNb = TimeNb + 1
LocationResultsIndex = (LocationResultsIndex + 1) % LocationNb
case *NgoloFuzzOne_ParseDuration:
w.WriteString(fmt.Sprintf("Duration%d, _ := time.ParseDuration(%#+v)\n", DurationNb, a.ParseDuration.S))
DurationNb = DurationNb + 1
case *NgoloFuzzOne_TimerNgdotStop:
if TimerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Timer%d.Stop()\n", TimerResultsIndex))
TimerResultsIndex = (TimerResultsIndex + 1) % TimerNb
case *NgoloFuzzOne_NewTimer:
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Timer%d := time.NewTimer(Duration%d)\n", TimerNb, (DurationResultsIndex + 0) % DurationNb))
TimerNb = TimerNb + 1
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_TimerNgdotReset:
if TimerNb == 0 {
continue
}
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Timer%d.Reset(Duration%d)\n", TimerResultsIndex, (DurationResultsIndex + 0) % DurationNb))
TimerResultsIndex = (TimerResultsIndex + 1) % TimerNb
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_After:
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("time.After(Duration%d)\n", (DurationResultsIndex + 0) % DurationNb))
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_NewTicker:
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Ticker%d := time.NewTicker(Duration%d)\n", TickerNb, (DurationResultsIndex + 0) % DurationNb))
TickerNb = TickerNb + 1
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_TickerNgdotStop:
if TickerNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Ticker%d.Stop()\n", TickerResultsIndex))
TickerResultsIndex = (TickerResultsIndex + 1) % TickerNb
case *NgoloFuzzOne_TickerNgdotReset:
if TickerNb == 0 {
continue
}
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Ticker%d.Reset(Duration%d)\n", TickerResultsIndex, (DurationResultsIndex + 0) % DurationNb))
TickerResultsIndex = (TickerResultsIndex + 1) % TickerNb
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_Tick:
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("time.Tick(Duration%d)\n", (DurationResultsIndex + 0) % DurationNb))
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_TimeNgdotIsZero:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.IsZero()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotAfter:
if TimeNb == 0 {
continue
}
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.After(Time%d)\n", TimeResultsIndex, (TimeResultsIndex + 1) % TimeNb))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotBefore:
if TimeNb == 0 {
continue
}
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.Before(Time%d)\n", TimeResultsIndex, (TimeResultsIndex + 1) % TimeNb))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotCompare:
if TimeNb == 0 {
continue
}
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.Compare(Time%d)\n", TimeResultsIndex, (TimeResultsIndex + 1) % TimeNb))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotEqual:
if TimeNb == 0 {
continue
}
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.Equal(Time%d)\n", TimeResultsIndex, (TimeResultsIndex + 1) % TimeNb))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_MonthNgdotString:
if MonthNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Month%d.String()\n", MonthResultsIndex))
MonthResultsIndex = (MonthResultsIndex + 1) % MonthNb
case *NgoloFuzzOne_WeekdayNgdotString:
if WeekdayNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Weekday%d.String()\n", WeekdayResultsIndex))
WeekdayResultsIndex = (WeekdayResultsIndex + 1) % WeekdayNb
case *NgoloFuzzOne_TimeNgdotDate:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("_, Month%d, _ := Time%d.Date()\n", MonthNb, TimeResultsIndex))
MonthNb = MonthNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotYear:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.Year()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotMonth:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Month%d := Time%d.Month()\n", MonthNb, TimeResultsIndex))
MonthNb = MonthNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotDay:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.Day()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotWeekday:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Weekday%d := Time%d.Weekday()\n", WeekdayNb, TimeResultsIndex))
WeekdayNb = WeekdayNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotISOWeek:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.ISOWeek()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotClock:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.Clock()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotHour:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.Hour()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotMinute:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.Minute()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotSecond:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.Second()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotNanosecond:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.Nanosecond()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotYearDay:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.YearDay()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_DurationNgdotString:
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Duration%d.String()\n", DurationResultsIndex))
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_DurationNgdotNanoseconds:
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Duration%d.Nanoseconds()\n", DurationResultsIndex))
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_DurationNgdotMicroseconds:
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Duration%d.Microseconds()\n", DurationResultsIndex))
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_DurationNgdotMilliseconds:
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Duration%d.Milliseconds()\n", DurationResultsIndex))
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_DurationNgdotSeconds:
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Duration%d.Seconds()\n", DurationResultsIndex))
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_DurationNgdotMinutes:
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Duration%d.Minutes()\n", DurationResultsIndex))
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_DurationNgdotHours:
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Duration%d.Hours()\n", DurationResultsIndex))
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_DurationNgdotTruncate:
if DurationNb == 0 {
continue
}
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Duration%d := Duration%d.Truncate(Duration%d)\n", DurationNb, DurationResultsIndex, (DurationResultsIndex + 1) % DurationNb))
DurationNb = DurationNb + 1
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_DurationNgdotRound:
if DurationNb == 0 {
continue
}
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Duration%d := Duration%d.Round(Duration%d)\n", DurationNb, DurationResultsIndex, (DurationResultsIndex + 1) % DurationNb))
DurationNb = DurationNb + 1
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_DurationNgdotAbs:
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Duration%d := Duration%d.Abs()\n", DurationNb, DurationResultsIndex))
DurationNb = DurationNb + 1
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_TimeNgdotAdd:
if TimeNb == 0 {
continue
}
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d := Time%d.Add(Duration%d)\n", TimeNb, TimeResultsIndex, (DurationResultsIndex + 0) % DurationNb))
TimeNb = TimeNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_TimeNgdotSub:
if TimeNb == 0 {
continue
}
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Duration%d := Time%d.Sub(Time%d)\n", DurationNb, TimeResultsIndex, (TimeResultsIndex + 1) % TimeNb))
DurationNb = DurationNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_Since:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Duration%d := time.Since(Time%d)\n", DurationNb, (TimeResultsIndex + 0) % TimeNb))
DurationNb = DurationNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_Until:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Duration%d := time.Until(Time%d)\n", DurationNb, (TimeResultsIndex + 0) % TimeNb))
DurationNb = DurationNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotAddDate:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d := Time%d.AddDate(int(%#+v), int(%#+v), int(%#+v))\n", TimeNb, TimeResultsIndex, a.TimeNgdotAddDate.Years, a.TimeNgdotAddDate.Months, a.TimeNgdotAddDate.Days))
TimeNb = TimeNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_Now:
w.WriteString(fmt.Sprintf("Time%d := time.Now()\n", TimeNb))
TimeNb = TimeNb + 1
case *NgoloFuzzOne_TimeNgdotUTC:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d := Time%d.UTC()\n", TimeNb, TimeResultsIndex))
TimeNb = TimeNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotLocal:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d := Time%d.Local()\n", TimeNb, TimeResultsIndex))
TimeNb = TimeNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotIn:
if TimeNb == 0 {
continue
}
if LocationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d := Time%d.In(Location%d)\n", TimeNb, TimeResultsIndex, (LocationResultsIndex + 0) % LocationNb))
TimeNb = TimeNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
LocationResultsIndex = (LocationResultsIndex + 1) % LocationNb
case *NgoloFuzzOne_TimeNgdotLocation:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Location%d := Time%d.Location()\n", LocationNb, TimeResultsIndex))
LocationNb = LocationNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotZone:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.Zone()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotZoneBounds:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d, Time%d := Time%d.ZoneBounds()\n", TimeNb, TimeNb, TimeResultsIndex))
TimeNb = TimeNb + 1
TimeNb = TimeNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotUnix:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.Unix()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotUnixMilli:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.UnixMilli()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotUnixMicro:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.UnixMicro()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotUnixNano:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.UnixNano()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotAppendBinary:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.AppendBinary(%#+v)\n", TimeResultsIndex, a.TimeNgdotAppendBinary.B))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotMarshalBinary:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.MarshalBinary()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotUnmarshalBinary:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.UnmarshalBinary(%#+v)\n", TimeResultsIndex, a.TimeNgdotUnmarshalBinary.Data))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotGobEncode:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.GobEncode()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotGobDecode:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.GobDecode(%#+v)\n", TimeResultsIndex, a.TimeNgdotGobDecode.Data))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotMarshalJSON:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.MarshalJSON()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotUnmarshalJSON:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.UnmarshalJSON(%#+v)\n", TimeResultsIndex, a.TimeNgdotUnmarshalJSON.Data))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotAppendText:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.AppendText(%#+v)\n", TimeResultsIndex, a.TimeNgdotAppendText.B))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotMarshalText:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.MarshalText()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_TimeNgdotUnmarshalText:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.UnmarshalText(%#+v)\n", TimeResultsIndex, a.TimeNgdotUnmarshalText.Data))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_Unix:
w.WriteString(fmt.Sprintf("Time%d := time.Unix(%#+v, %#+v)\n", TimeNb, a.Unix.Sec, a.Unix.Nsec))
TimeNb = TimeNb + 1
case *NgoloFuzzOne_UnixMilli:
w.WriteString(fmt.Sprintf("Time%d := time.UnixMilli(%#+v)\n", TimeNb, a.UnixMilli.Msec))
TimeNb = TimeNb + 1
case *NgoloFuzzOne_UnixMicro:
w.WriteString(fmt.Sprintf("Time%d := time.UnixMicro(%#+v)\n", TimeNb, a.UnixMicro.Usec))
TimeNb = TimeNb + 1
case *NgoloFuzzOne_TimeNgdotIsDST:
if TimeNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d.IsDST()\n", TimeResultsIndex))
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
case *NgoloFuzzOne_Date:
if MonthNb == 0 {
continue
}
if LocationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d := time.Date(int(%#+v), Month%d, int(%#+v), int(%#+v), int(%#+v), int(%#+v), int(%#+v), Location%d)\n", TimeNb, a.Date.Year, (MonthResultsIndex + 0) % MonthNb, a.Date.Day, a.Date.Hour, a.Date.Min, a.Date.Sec, a.Date.Nsec, (LocationResultsIndex + 0) % LocationNb))
TimeNb = TimeNb + 1
MonthResultsIndex = (MonthResultsIndex + 1) % MonthNb
LocationResultsIndex = (LocationResultsIndex + 1) % LocationNb
case *NgoloFuzzOne_TimeNgdotTruncate:
if TimeNb == 0 {
continue
}
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d := Time%d.Truncate(Duration%d)\n", TimeNb, TimeResultsIndex, (DurationResultsIndex + 0) % DurationNb))
TimeNb = TimeNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_TimeNgdotRound:
if TimeNb == 0 {
continue
}
if DurationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Time%d := Time%d.Round(Duration%d)\n", TimeNb, TimeResultsIndex, (DurationResultsIndex + 0) % DurationNb))
TimeNb = TimeNb + 1
TimeResultsIndex = (TimeResultsIndex + 1) % TimeNb
DurationResultsIndex = (DurationResultsIndex + 1) % DurationNb
case *NgoloFuzzOne_LocationNgdotString:
if LocationNb == 0 {
continue
}
w.WriteString(fmt.Sprintf("Location%d.String()\n", LocationResultsIndex))
LocationResultsIndex = (LocationResultsIndex + 1) % LocationNb
case *NgoloFuzzOne_FixedZone:
w.WriteString(fmt.Sprintf("Location%d := time.FixedZone(%#+v, int(%#+v))\n", LocationNb, a.FixedZone.Name, a.FixedZone.Offset))
LocationNb = LocationNb + 1
case *NgoloFuzzOne_LoadLocation:
w.WriteString(fmt.Sprintf("Location%d, _ := time.LoadLocation(%#+v)\n", LocationNb, a.LoadLocation.Name))
LocationNb = LocationNb + 1
case *NgoloFuzzOne_LoadLocationFromTZData:
w.WriteString(fmt.Sprintf("Location%d, _ := time.LoadLocationFromTZData(%#+v, %#+v)\n", LocationNb, a.LoadLocationFromTZData.Name, a.LoadLocationFromTZData.Data))
LocationNb = LocationNb + 1
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_time
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type TimeNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotStringArgs) Reset() {
*x = TimeNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotStringArgs) ProtoMessage() {}
func (x *TimeNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
type TimeNgdotGoStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotGoStringArgs) Reset() {
*x = TimeNgdotGoStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotGoStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotGoStringArgs) ProtoMessage() {}
func (x *TimeNgdotGoStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotGoStringArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotGoStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
type TimeNgdotFormatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Layout string `protobuf:"bytes,1,opt,name=layout,proto3" json:"layout,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotFormatArgs) Reset() {
*x = TimeNgdotFormatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotFormatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotFormatArgs) ProtoMessage() {}
func (x *TimeNgdotFormatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotFormatArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotFormatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *TimeNgdotFormatArgs) GetLayout() string {
if x != nil {
return x.Layout
}
return ""
}
type TimeNgdotAppendFormatArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
Layout string `protobuf:"bytes,2,opt,name=layout,proto3" json:"layout,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotAppendFormatArgs) Reset() {
*x = TimeNgdotAppendFormatArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotAppendFormatArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotAppendFormatArgs) ProtoMessage() {}
func (x *TimeNgdotAppendFormatArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotAppendFormatArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotAppendFormatArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *TimeNgdotAppendFormatArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
func (x *TimeNgdotAppendFormatArgs) GetLayout() string {
if x != nil {
return x.Layout
}
return ""
}
type ParseArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Layout string `protobuf:"bytes,1,opt,name=layout,proto3" json:"layout,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseArgs) Reset() {
*x = ParseArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseArgs) ProtoMessage() {}
func (x *ParseArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseArgs.ProtoReflect.Descriptor instead.
func (*ParseArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *ParseArgs) GetLayout() string {
if x != nil {
return x.Layout
}
return ""
}
func (x *ParseArgs) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
type ParseInLocationArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Layout string `protobuf:"bytes,1,opt,name=layout,proto3" json:"layout,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseInLocationArgs) Reset() {
*x = ParseInLocationArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseInLocationArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseInLocationArgs) ProtoMessage() {}
func (x *ParseInLocationArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseInLocationArgs.ProtoReflect.Descriptor instead.
func (*ParseInLocationArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *ParseInLocationArgs) GetLayout() string {
if x != nil {
return x.Layout
}
return ""
}
func (x *ParseInLocationArgs) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
type ParseDurationArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ParseDurationArgs) Reset() {
*x = ParseDurationArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ParseDurationArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ParseDurationArgs) ProtoMessage() {}
func (x *ParseDurationArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ParseDurationArgs.ProtoReflect.Descriptor instead.
func (*ParseDurationArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *ParseDurationArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type TimerNgdotStopArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimerNgdotStopArgs) Reset() {
*x = TimerNgdotStopArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimerNgdotStopArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimerNgdotStopArgs) ProtoMessage() {}
func (x *TimerNgdotStopArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimerNgdotStopArgs.ProtoReflect.Descriptor instead.
func (*TimerNgdotStopArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
type NewTimerArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewTimerArgs) Reset() {
*x = NewTimerArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewTimerArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewTimerArgs) ProtoMessage() {}
func (x *NewTimerArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewTimerArgs.ProtoReflect.Descriptor instead.
func (*NewTimerArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
type TimerNgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimerNgdotResetArgs) Reset() {
*x = TimerNgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimerNgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimerNgdotResetArgs) ProtoMessage() {}
func (x *TimerNgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimerNgdotResetArgs.ProtoReflect.Descriptor instead.
func (*TimerNgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
type AfterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AfterArgs) Reset() {
*x = AfterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AfterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AfterArgs) ProtoMessage() {}
func (x *AfterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AfterArgs.ProtoReflect.Descriptor instead.
func (*AfterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
type NewTickerArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NewTickerArgs) Reset() {
*x = NewTickerArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NewTickerArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NewTickerArgs) ProtoMessage() {}
func (x *NewTickerArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NewTickerArgs.ProtoReflect.Descriptor instead.
func (*NewTickerArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
type TickerNgdotStopArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TickerNgdotStopArgs) Reset() {
*x = TickerNgdotStopArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TickerNgdotStopArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TickerNgdotStopArgs) ProtoMessage() {}
func (x *TickerNgdotStopArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TickerNgdotStopArgs.ProtoReflect.Descriptor instead.
func (*TickerNgdotStopArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
type TickerNgdotResetArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TickerNgdotResetArgs) Reset() {
*x = TickerNgdotResetArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TickerNgdotResetArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TickerNgdotResetArgs) ProtoMessage() {}
func (x *TickerNgdotResetArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TickerNgdotResetArgs.ProtoReflect.Descriptor instead.
func (*TickerNgdotResetArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
type TickArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TickArgs) Reset() {
*x = TickArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TickArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TickArgs) ProtoMessage() {}
func (x *TickArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TickArgs.ProtoReflect.Descriptor instead.
func (*TickArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
type TimeNgdotIsZeroArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotIsZeroArgs) Reset() {
*x = TimeNgdotIsZeroArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotIsZeroArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotIsZeroArgs) ProtoMessage() {}
func (x *TimeNgdotIsZeroArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotIsZeroArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotIsZeroArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
type TimeNgdotAfterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotAfterArgs) Reset() {
*x = TimeNgdotAfterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotAfterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotAfterArgs) ProtoMessage() {}
func (x *TimeNgdotAfterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotAfterArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotAfterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
type TimeNgdotBeforeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotBeforeArgs) Reset() {
*x = TimeNgdotBeforeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotBeforeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotBeforeArgs) ProtoMessage() {}
func (x *TimeNgdotBeforeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotBeforeArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotBeforeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
type TimeNgdotCompareArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotCompareArgs) Reset() {
*x = TimeNgdotCompareArgs{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotCompareArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotCompareArgs) ProtoMessage() {}
func (x *TimeNgdotCompareArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotCompareArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotCompareArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
type TimeNgdotEqualArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotEqualArgs) Reset() {
*x = TimeNgdotEqualArgs{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotEqualArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotEqualArgs) ProtoMessage() {}
func (x *TimeNgdotEqualArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotEqualArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotEqualArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
type MonthNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *MonthNgdotStringArgs) Reset() {
*x = MonthNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *MonthNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MonthNgdotStringArgs) ProtoMessage() {}
func (x *MonthNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MonthNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*MonthNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
type WeekdayNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *WeekdayNgdotStringArgs) Reset() {
*x = WeekdayNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *WeekdayNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WeekdayNgdotStringArgs) ProtoMessage() {}
func (x *WeekdayNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WeekdayNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*WeekdayNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{21}
}
type TimeNgdotDateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotDateArgs) Reset() {
*x = TimeNgdotDateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotDateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotDateArgs) ProtoMessage() {}
func (x *TimeNgdotDateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotDateArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotDateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{22}
}
type TimeNgdotYearArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotYearArgs) Reset() {
*x = TimeNgdotYearArgs{}
mi := &file_ngolofuzz_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotYearArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotYearArgs) ProtoMessage() {}
func (x *TimeNgdotYearArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotYearArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotYearArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{23}
}
type TimeNgdotMonthArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotMonthArgs) Reset() {
*x = TimeNgdotMonthArgs{}
mi := &file_ngolofuzz_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotMonthArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotMonthArgs) ProtoMessage() {}
func (x *TimeNgdotMonthArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotMonthArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotMonthArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{24}
}
type TimeNgdotDayArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotDayArgs) Reset() {
*x = TimeNgdotDayArgs{}
mi := &file_ngolofuzz_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotDayArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotDayArgs) ProtoMessage() {}
func (x *TimeNgdotDayArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotDayArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotDayArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{25}
}
type TimeNgdotWeekdayArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotWeekdayArgs) Reset() {
*x = TimeNgdotWeekdayArgs{}
mi := &file_ngolofuzz_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotWeekdayArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotWeekdayArgs) ProtoMessage() {}
func (x *TimeNgdotWeekdayArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotWeekdayArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotWeekdayArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{26}
}
type TimeNgdotISOWeekArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotISOWeekArgs) Reset() {
*x = TimeNgdotISOWeekArgs{}
mi := &file_ngolofuzz_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotISOWeekArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotISOWeekArgs) ProtoMessage() {}
func (x *TimeNgdotISOWeekArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotISOWeekArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotISOWeekArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{27}
}
type TimeNgdotClockArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotClockArgs) Reset() {
*x = TimeNgdotClockArgs{}
mi := &file_ngolofuzz_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotClockArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotClockArgs) ProtoMessage() {}
func (x *TimeNgdotClockArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotClockArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotClockArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{28}
}
type TimeNgdotHourArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotHourArgs) Reset() {
*x = TimeNgdotHourArgs{}
mi := &file_ngolofuzz_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotHourArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotHourArgs) ProtoMessage() {}
func (x *TimeNgdotHourArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotHourArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotHourArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{29}
}
type TimeNgdotMinuteArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotMinuteArgs) Reset() {
*x = TimeNgdotMinuteArgs{}
mi := &file_ngolofuzz_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotMinuteArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotMinuteArgs) ProtoMessage() {}
func (x *TimeNgdotMinuteArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotMinuteArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotMinuteArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{30}
}
type TimeNgdotSecondArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotSecondArgs) Reset() {
*x = TimeNgdotSecondArgs{}
mi := &file_ngolofuzz_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotSecondArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotSecondArgs) ProtoMessage() {}
func (x *TimeNgdotSecondArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotSecondArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotSecondArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{31}
}
type TimeNgdotNanosecondArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotNanosecondArgs) Reset() {
*x = TimeNgdotNanosecondArgs{}
mi := &file_ngolofuzz_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotNanosecondArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotNanosecondArgs) ProtoMessage() {}
func (x *TimeNgdotNanosecondArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotNanosecondArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotNanosecondArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{32}
}
type TimeNgdotYearDayArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotYearDayArgs) Reset() {
*x = TimeNgdotYearDayArgs{}
mi := &file_ngolofuzz_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotYearDayArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotYearDayArgs) ProtoMessage() {}
func (x *TimeNgdotYearDayArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[33]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotYearDayArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotYearDayArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{33}
}
type DurationNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DurationNgdotStringArgs) Reset() {
*x = DurationNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DurationNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DurationNgdotStringArgs) ProtoMessage() {}
func (x *DurationNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[34]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DurationNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*DurationNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{34}
}
type DurationNgdotNanosecondsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DurationNgdotNanosecondsArgs) Reset() {
*x = DurationNgdotNanosecondsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DurationNgdotNanosecondsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DurationNgdotNanosecondsArgs) ProtoMessage() {}
func (x *DurationNgdotNanosecondsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[35]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DurationNgdotNanosecondsArgs.ProtoReflect.Descriptor instead.
func (*DurationNgdotNanosecondsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{35}
}
type DurationNgdotMicrosecondsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DurationNgdotMicrosecondsArgs) Reset() {
*x = DurationNgdotMicrosecondsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DurationNgdotMicrosecondsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DurationNgdotMicrosecondsArgs) ProtoMessage() {}
func (x *DurationNgdotMicrosecondsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[36]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DurationNgdotMicrosecondsArgs.ProtoReflect.Descriptor instead.
func (*DurationNgdotMicrosecondsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{36}
}
type DurationNgdotMillisecondsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DurationNgdotMillisecondsArgs) Reset() {
*x = DurationNgdotMillisecondsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DurationNgdotMillisecondsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DurationNgdotMillisecondsArgs) ProtoMessage() {}
func (x *DurationNgdotMillisecondsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[37]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DurationNgdotMillisecondsArgs.ProtoReflect.Descriptor instead.
func (*DurationNgdotMillisecondsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{37}
}
type DurationNgdotSecondsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DurationNgdotSecondsArgs) Reset() {
*x = DurationNgdotSecondsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DurationNgdotSecondsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DurationNgdotSecondsArgs) ProtoMessage() {}
func (x *DurationNgdotSecondsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[38]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DurationNgdotSecondsArgs.ProtoReflect.Descriptor instead.
func (*DurationNgdotSecondsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{38}
}
type DurationNgdotMinutesArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DurationNgdotMinutesArgs) Reset() {
*x = DurationNgdotMinutesArgs{}
mi := &file_ngolofuzz_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DurationNgdotMinutesArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DurationNgdotMinutesArgs) ProtoMessage() {}
func (x *DurationNgdotMinutesArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[39]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DurationNgdotMinutesArgs.ProtoReflect.Descriptor instead.
func (*DurationNgdotMinutesArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{39}
}
type DurationNgdotHoursArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DurationNgdotHoursArgs) Reset() {
*x = DurationNgdotHoursArgs{}
mi := &file_ngolofuzz_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DurationNgdotHoursArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DurationNgdotHoursArgs) ProtoMessage() {}
func (x *DurationNgdotHoursArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[40]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DurationNgdotHoursArgs.ProtoReflect.Descriptor instead.
func (*DurationNgdotHoursArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{40}
}
type DurationNgdotTruncateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DurationNgdotTruncateArgs) Reset() {
*x = DurationNgdotTruncateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DurationNgdotTruncateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DurationNgdotTruncateArgs) ProtoMessage() {}
func (x *DurationNgdotTruncateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[41]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DurationNgdotTruncateArgs.ProtoReflect.Descriptor instead.
func (*DurationNgdotTruncateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{41}
}
type DurationNgdotRoundArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DurationNgdotRoundArgs) Reset() {
*x = DurationNgdotRoundArgs{}
mi := &file_ngolofuzz_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DurationNgdotRoundArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DurationNgdotRoundArgs) ProtoMessage() {}
func (x *DurationNgdotRoundArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[42]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DurationNgdotRoundArgs.ProtoReflect.Descriptor instead.
func (*DurationNgdotRoundArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{42}
}
type DurationNgdotAbsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DurationNgdotAbsArgs) Reset() {
*x = DurationNgdotAbsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DurationNgdotAbsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DurationNgdotAbsArgs) ProtoMessage() {}
func (x *DurationNgdotAbsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[43]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DurationNgdotAbsArgs.ProtoReflect.Descriptor instead.
func (*DurationNgdotAbsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{43}
}
type TimeNgdotAddArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotAddArgs) Reset() {
*x = TimeNgdotAddArgs{}
mi := &file_ngolofuzz_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotAddArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotAddArgs) ProtoMessage() {}
func (x *TimeNgdotAddArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[44]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotAddArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotAddArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{44}
}
type TimeNgdotSubArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotSubArgs) Reset() {
*x = TimeNgdotSubArgs{}
mi := &file_ngolofuzz_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotSubArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotSubArgs) ProtoMessage() {}
func (x *TimeNgdotSubArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[45]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotSubArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotSubArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{45}
}
type SinceArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SinceArgs) Reset() {
*x = SinceArgs{}
mi := &file_ngolofuzz_proto_msgTypes[46]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SinceArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SinceArgs) ProtoMessage() {}
func (x *SinceArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[46]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SinceArgs.ProtoReflect.Descriptor instead.
func (*SinceArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{46}
}
type UntilArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UntilArgs) Reset() {
*x = UntilArgs{}
mi := &file_ngolofuzz_proto_msgTypes[47]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UntilArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UntilArgs) ProtoMessage() {}
func (x *UntilArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[47]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UntilArgs.ProtoReflect.Descriptor instead.
func (*UntilArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{47}
}
type TimeNgdotAddDateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Years int64 `protobuf:"varint,1,opt,name=years,proto3" json:"years,omitempty"`
Months int64 `protobuf:"varint,2,opt,name=months,proto3" json:"months,omitempty"`
Days int64 `protobuf:"varint,3,opt,name=days,proto3" json:"days,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotAddDateArgs) Reset() {
*x = TimeNgdotAddDateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[48]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotAddDateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotAddDateArgs) ProtoMessage() {}
func (x *TimeNgdotAddDateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[48]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotAddDateArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotAddDateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{48}
}
func (x *TimeNgdotAddDateArgs) GetYears() int64 {
if x != nil {
return x.Years
}
return 0
}
func (x *TimeNgdotAddDateArgs) GetMonths() int64 {
if x != nil {
return x.Months
}
return 0
}
func (x *TimeNgdotAddDateArgs) GetDays() int64 {
if x != nil {
return x.Days
}
return 0
}
type NowArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NowArgs) Reset() {
*x = NowArgs{}
mi := &file_ngolofuzz_proto_msgTypes[49]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NowArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NowArgs) ProtoMessage() {}
func (x *NowArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[49]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NowArgs.ProtoReflect.Descriptor instead.
func (*NowArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{49}
}
type TimeNgdotUTCArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotUTCArgs) Reset() {
*x = TimeNgdotUTCArgs{}
mi := &file_ngolofuzz_proto_msgTypes[50]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotUTCArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotUTCArgs) ProtoMessage() {}
func (x *TimeNgdotUTCArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[50]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotUTCArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotUTCArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{50}
}
type TimeNgdotLocalArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotLocalArgs) Reset() {
*x = TimeNgdotLocalArgs{}
mi := &file_ngolofuzz_proto_msgTypes[51]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotLocalArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotLocalArgs) ProtoMessage() {}
func (x *TimeNgdotLocalArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[51]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotLocalArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotLocalArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{51}
}
type TimeNgdotInArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotInArgs) Reset() {
*x = TimeNgdotInArgs{}
mi := &file_ngolofuzz_proto_msgTypes[52]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotInArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotInArgs) ProtoMessage() {}
func (x *TimeNgdotInArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[52]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotInArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotInArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{52}
}
type TimeNgdotLocationArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotLocationArgs) Reset() {
*x = TimeNgdotLocationArgs{}
mi := &file_ngolofuzz_proto_msgTypes[53]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotLocationArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotLocationArgs) ProtoMessage() {}
func (x *TimeNgdotLocationArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[53]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotLocationArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotLocationArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{53}
}
type TimeNgdotZoneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotZoneArgs) Reset() {
*x = TimeNgdotZoneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[54]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotZoneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotZoneArgs) ProtoMessage() {}
func (x *TimeNgdotZoneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[54]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotZoneArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotZoneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{54}
}
type TimeNgdotZoneBoundsArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotZoneBoundsArgs) Reset() {
*x = TimeNgdotZoneBoundsArgs{}
mi := &file_ngolofuzz_proto_msgTypes[55]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotZoneBoundsArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotZoneBoundsArgs) ProtoMessage() {}
func (x *TimeNgdotZoneBoundsArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[55]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotZoneBoundsArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotZoneBoundsArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{55}
}
type TimeNgdotUnixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotUnixArgs) Reset() {
*x = TimeNgdotUnixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[56]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotUnixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotUnixArgs) ProtoMessage() {}
func (x *TimeNgdotUnixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[56]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotUnixArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotUnixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{56}
}
type TimeNgdotUnixMilliArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotUnixMilliArgs) Reset() {
*x = TimeNgdotUnixMilliArgs{}
mi := &file_ngolofuzz_proto_msgTypes[57]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotUnixMilliArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotUnixMilliArgs) ProtoMessage() {}
func (x *TimeNgdotUnixMilliArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[57]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotUnixMilliArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotUnixMilliArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{57}
}
type TimeNgdotUnixMicroArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotUnixMicroArgs) Reset() {
*x = TimeNgdotUnixMicroArgs{}
mi := &file_ngolofuzz_proto_msgTypes[58]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotUnixMicroArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotUnixMicroArgs) ProtoMessage() {}
func (x *TimeNgdotUnixMicroArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[58]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotUnixMicroArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotUnixMicroArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{58}
}
type TimeNgdotUnixNanoArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotUnixNanoArgs) Reset() {
*x = TimeNgdotUnixNanoArgs{}
mi := &file_ngolofuzz_proto_msgTypes[59]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotUnixNanoArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotUnixNanoArgs) ProtoMessage() {}
func (x *TimeNgdotUnixNanoArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[59]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotUnixNanoArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotUnixNanoArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{59}
}
type TimeNgdotAppendBinaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotAppendBinaryArgs) Reset() {
*x = TimeNgdotAppendBinaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[60]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotAppendBinaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotAppendBinaryArgs) ProtoMessage() {}
func (x *TimeNgdotAppendBinaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[60]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotAppendBinaryArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotAppendBinaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{60}
}
func (x *TimeNgdotAppendBinaryArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type TimeNgdotMarshalBinaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotMarshalBinaryArgs) Reset() {
*x = TimeNgdotMarshalBinaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[61]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotMarshalBinaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotMarshalBinaryArgs) ProtoMessage() {}
func (x *TimeNgdotMarshalBinaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[61]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotMarshalBinaryArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotMarshalBinaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{61}
}
type TimeNgdotUnmarshalBinaryArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotUnmarshalBinaryArgs) Reset() {
*x = TimeNgdotUnmarshalBinaryArgs{}
mi := &file_ngolofuzz_proto_msgTypes[62]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotUnmarshalBinaryArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotUnmarshalBinaryArgs) ProtoMessage() {}
func (x *TimeNgdotUnmarshalBinaryArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[62]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotUnmarshalBinaryArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotUnmarshalBinaryArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{62}
}
func (x *TimeNgdotUnmarshalBinaryArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type TimeNgdotGobEncodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotGobEncodeArgs) Reset() {
*x = TimeNgdotGobEncodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[63]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotGobEncodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotGobEncodeArgs) ProtoMessage() {}
func (x *TimeNgdotGobEncodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[63]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotGobEncodeArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotGobEncodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{63}
}
type TimeNgdotGobDecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotGobDecodeArgs) Reset() {
*x = TimeNgdotGobDecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[64]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotGobDecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotGobDecodeArgs) ProtoMessage() {}
func (x *TimeNgdotGobDecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[64]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotGobDecodeArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotGobDecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{64}
}
func (x *TimeNgdotGobDecodeArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type TimeNgdotMarshalJSONArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotMarshalJSONArgs) Reset() {
*x = TimeNgdotMarshalJSONArgs{}
mi := &file_ngolofuzz_proto_msgTypes[65]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotMarshalJSONArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotMarshalJSONArgs) ProtoMessage() {}
func (x *TimeNgdotMarshalJSONArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[65]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotMarshalJSONArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotMarshalJSONArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{65}
}
type TimeNgdotUnmarshalJSONArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotUnmarshalJSONArgs) Reset() {
*x = TimeNgdotUnmarshalJSONArgs{}
mi := &file_ngolofuzz_proto_msgTypes[66]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotUnmarshalJSONArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotUnmarshalJSONArgs) ProtoMessage() {}
func (x *TimeNgdotUnmarshalJSONArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[66]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotUnmarshalJSONArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotUnmarshalJSONArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{66}
}
func (x *TimeNgdotUnmarshalJSONArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type TimeNgdotAppendTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B []byte `protobuf:"bytes,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotAppendTextArgs) Reset() {
*x = TimeNgdotAppendTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[67]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotAppendTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotAppendTextArgs) ProtoMessage() {}
func (x *TimeNgdotAppendTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[67]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotAppendTextArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotAppendTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{67}
}
func (x *TimeNgdotAppendTextArgs) GetB() []byte {
if x != nil {
return x.B
}
return nil
}
type TimeNgdotMarshalTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotMarshalTextArgs) Reset() {
*x = TimeNgdotMarshalTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[68]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotMarshalTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotMarshalTextArgs) ProtoMessage() {}
func (x *TimeNgdotMarshalTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[68]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotMarshalTextArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotMarshalTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{68}
}
type TimeNgdotUnmarshalTextArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotUnmarshalTextArgs) Reset() {
*x = TimeNgdotUnmarshalTextArgs{}
mi := &file_ngolofuzz_proto_msgTypes[69]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotUnmarshalTextArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotUnmarshalTextArgs) ProtoMessage() {}
func (x *TimeNgdotUnmarshalTextArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[69]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotUnmarshalTextArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotUnmarshalTextArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{69}
}
func (x *TimeNgdotUnmarshalTextArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type UnixArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Sec int64 `protobuf:"varint,1,opt,name=sec,proto3" json:"sec,omitempty"`
Nsec int64 `protobuf:"varint,2,opt,name=nsec,proto3" json:"nsec,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnixArgs) Reset() {
*x = UnixArgs{}
mi := &file_ngolofuzz_proto_msgTypes[70]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnixArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnixArgs) ProtoMessage() {}
func (x *UnixArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[70]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnixArgs.ProtoReflect.Descriptor instead.
func (*UnixArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{70}
}
func (x *UnixArgs) GetSec() int64 {
if x != nil {
return x.Sec
}
return 0
}
func (x *UnixArgs) GetNsec() int64 {
if x != nil {
return x.Nsec
}
return 0
}
type UnixMilliArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Msec int64 `protobuf:"varint,1,opt,name=msec,proto3" json:"msec,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnixMilliArgs) Reset() {
*x = UnixMilliArgs{}
mi := &file_ngolofuzz_proto_msgTypes[71]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnixMilliArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnixMilliArgs) ProtoMessage() {}
func (x *UnixMilliArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[71]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnixMilliArgs.ProtoReflect.Descriptor instead.
func (*UnixMilliArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{71}
}
func (x *UnixMilliArgs) GetMsec() int64 {
if x != nil {
return x.Msec
}
return 0
}
type UnixMicroArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Usec int64 `protobuf:"varint,1,opt,name=usec,proto3" json:"usec,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *UnixMicroArgs) Reset() {
*x = UnixMicroArgs{}
mi := &file_ngolofuzz_proto_msgTypes[72]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *UnixMicroArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UnixMicroArgs) ProtoMessage() {}
func (x *UnixMicroArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[72]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UnixMicroArgs.ProtoReflect.Descriptor instead.
func (*UnixMicroArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{72}
}
func (x *UnixMicroArgs) GetUsec() int64 {
if x != nil {
return x.Usec
}
return 0
}
type TimeNgdotIsDSTArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotIsDSTArgs) Reset() {
*x = TimeNgdotIsDSTArgs{}
mi := &file_ngolofuzz_proto_msgTypes[73]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotIsDSTArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotIsDSTArgs) ProtoMessage() {}
func (x *TimeNgdotIsDSTArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[73]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotIsDSTArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotIsDSTArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{73}
}
type DateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Year int64 `protobuf:"varint,1,opt,name=year,proto3" json:"year,omitempty"`
Day int64 `protobuf:"varint,2,opt,name=day,proto3" json:"day,omitempty"`
Hour int64 `protobuf:"varint,3,opt,name=hour,proto3" json:"hour,omitempty"`
Min int64 `protobuf:"varint,4,opt,name=min,proto3" json:"min,omitempty"`
Sec int64 `protobuf:"varint,5,opt,name=sec,proto3" json:"sec,omitempty"`
Nsec int64 `protobuf:"varint,6,opt,name=nsec,proto3" json:"nsec,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DateArgs) Reset() {
*x = DateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[74]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DateArgs) ProtoMessage() {}
func (x *DateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[74]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DateArgs.ProtoReflect.Descriptor instead.
func (*DateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{74}
}
func (x *DateArgs) GetYear() int64 {
if x != nil {
return x.Year
}
return 0
}
func (x *DateArgs) GetDay() int64 {
if x != nil {
return x.Day
}
return 0
}
func (x *DateArgs) GetHour() int64 {
if x != nil {
return x.Hour
}
return 0
}
func (x *DateArgs) GetMin() int64 {
if x != nil {
return x.Min
}
return 0
}
func (x *DateArgs) GetSec() int64 {
if x != nil {
return x.Sec
}
return 0
}
func (x *DateArgs) GetNsec() int64 {
if x != nil {
return x.Nsec
}
return 0
}
type TimeNgdotTruncateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotTruncateArgs) Reset() {
*x = TimeNgdotTruncateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[75]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotTruncateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotTruncateArgs) ProtoMessage() {}
func (x *TimeNgdotTruncateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[75]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotTruncateArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotTruncateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{75}
}
type TimeNgdotRoundArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TimeNgdotRoundArgs) Reset() {
*x = TimeNgdotRoundArgs{}
mi := &file_ngolofuzz_proto_msgTypes[76]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TimeNgdotRoundArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TimeNgdotRoundArgs) ProtoMessage() {}
func (x *TimeNgdotRoundArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[76]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TimeNgdotRoundArgs.ProtoReflect.Descriptor instead.
func (*TimeNgdotRoundArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{76}
}
type LocationNgdotStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LocationNgdotStringArgs) Reset() {
*x = LocationNgdotStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[77]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LocationNgdotStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LocationNgdotStringArgs) ProtoMessage() {}
func (x *LocationNgdotStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[77]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LocationNgdotStringArgs.ProtoReflect.Descriptor instead.
func (*LocationNgdotStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{77}
}
type FixedZoneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FixedZoneArgs) Reset() {
*x = FixedZoneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[78]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FixedZoneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FixedZoneArgs) ProtoMessage() {}
func (x *FixedZoneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[78]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FixedZoneArgs.ProtoReflect.Descriptor instead.
func (*FixedZoneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{78}
}
func (x *FixedZoneArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *FixedZoneArgs) GetOffset() int64 {
if x != nil {
return x.Offset
}
return 0
}
type LoadLocationArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LoadLocationArgs) Reset() {
*x = LoadLocationArgs{}
mi := &file_ngolofuzz_proto_msgTypes[79]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LoadLocationArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LoadLocationArgs) ProtoMessage() {}
func (x *LoadLocationArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[79]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LoadLocationArgs.ProtoReflect.Descriptor instead.
func (*LoadLocationArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{79}
}
func (x *LoadLocationArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
type LoadLocationFromTZDataArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *LoadLocationFromTZDataArgs) Reset() {
*x = LoadLocationFromTZDataArgs{}
mi := &file_ngolofuzz_proto_msgTypes[80]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *LoadLocationFromTZDataArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LoadLocationFromTZDataArgs) ProtoMessage() {}
func (x *LoadLocationFromTZDataArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[80]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LoadLocationFromTZDataArgs.ProtoReflect.Descriptor instead.
func (*LoadLocationFromTZDataArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{80}
}
func (x *LoadLocationFromTZDataArgs) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *LoadLocationFromTZDataArgs) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_TimeNgdotString
// *NgoloFuzzOne_TimeNgdotGoString
// *NgoloFuzzOne_TimeNgdotFormat
// *NgoloFuzzOne_TimeNgdotAppendFormat
// *NgoloFuzzOne_Parse
// *NgoloFuzzOne_ParseInLocation
// *NgoloFuzzOne_ParseDuration
// *NgoloFuzzOne_TimerNgdotStop
// *NgoloFuzzOne_NewTimer
// *NgoloFuzzOne_TimerNgdotReset
// *NgoloFuzzOne_After
// *NgoloFuzzOne_NewTicker
// *NgoloFuzzOne_TickerNgdotStop
// *NgoloFuzzOne_TickerNgdotReset
// *NgoloFuzzOne_Tick
// *NgoloFuzzOne_TimeNgdotIsZero
// *NgoloFuzzOne_TimeNgdotAfter
// *NgoloFuzzOne_TimeNgdotBefore
// *NgoloFuzzOne_TimeNgdotCompare
// *NgoloFuzzOne_TimeNgdotEqual
// *NgoloFuzzOne_MonthNgdotString
// *NgoloFuzzOne_WeekdayNgdotString
// *NgoloFuzzOne_TimeNgdotDate
// *NgoloFuzzOne_TimeNgdotYear
// *NgoloFuzzOne_TimeNgdotMonth
// *NgoloFuzzOne_TimeNgdotDay
// *NgoloFuzzOne_TimeNgdotWeekday
// *NgoloFuzzOne_TimeNgdotISOWeek
// *NgoloFuzzOne_TimeNgdotClock
// *NgoloFuzzOne_TimeNgdotHour
// *NgoloFuzzOne_TimeNgdotMinute
// *NgoloFuzzOne_TimeNgdotSecond
// *NgoloFuzzOne_TimeNgdotNanosecond
// *NgoloFuzzOne_TimeNgdotYearDay
// *NgoloFuzzOne_DurationNgdotString
// *NgoloFuzzOne_DurationNgdotNanoseconds
// *NgoloFuzzOne_DurationNgdotMicroseconds
// *NgoloFuzzOne_DurationNgdotMilliseconds
// *NgoloFuzzOne_DurationNgdotSeconds
// *NgoloFuzzOne_DurationNgdotMinutes
// *NgoloFuzzOne_DurationNgdotHours
// *NgoloFuzzOne_DurationNgdotTruncate
// *NgoloFuzzOne_DurationNgdotRound
// *NgoloFuzzOne_DurationNgdotAbs
// *NgoloFuzzOne_TimeNgdotAdd
// *NgoloFuzzOne_TimeNgdotSub
// *NgoloFuzzOne_Since
// *NgoloFuzzOne_Until
// *NgoloFuzzOne_TimeNgdotAddDate
// *NgoloFuzzOne_Now
// *NgoloFuzzOne_TimeNgdotUTC
// *NgoloFuzzOne_TimeNgdotLocal
// *NgoloFuzzOne_TimeNgdotIn
// *NgoloFuzzOne_TimeNgdotLocation
// *NgoloFuzzOne_TimeNgdotZone
// *NgoloFuzzOne_TimeNgdotZoneBounds
// *NgoloFuzzOne_TimeNgdotUnix
// *NgoloFuzzOne_TimeNgdotUnixMilli
// *NgoloFuzzOne_TimeNgdotUnixMicro
// *NgoloFuzzOne_TimeNgdotUnixNano
// *NgoloFuzzOne_TimeNgdotAppendBinary
// *NgoloFuzzOne_TimeNgdotMarshalBinary
// *NgoloFuzzOne_TimeNgdotUnmarshalBinary
// *NgoloFuzzOne_TimeNgdotGobEncode
// *NgoloFuzzOne_TimeNgdotGobDecode
// *NgoloFuzzOne_TimeNgdotMarshalJSON
// *NgoloFuzzOne_TimeNgdotUnmarshalJSON
// *NgoloFuzzOne_TimeNgdotAppendText
// *NgoloFuzzOne_TimeNgdotMarshalText
// *NgoloFuzzOne_TimeNgdotUnmarshalText
// *NgoloFuzzOne_Unix
// *NgoloFuzzOne_UnixMilli
// *NgoloFuzzOne_UnixMicro
// *NgoloFuzzOne_TimeNgdotIsDST
// *NgoloFuzzOne_Date
// *NgoloFuzzOne_TimeNgdotTruncate
// *NgoloFuzzOne_TimeNgdotRound
// *NgoloFuzzOne_LocationNgdotString
// *NgoloFuzzOne_FixedZone
// *NgoloFuzzOne_LoadLocation
// *NgoloFuzzOne_LoadLocationFromTZData
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[81]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[81]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{81}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotString() *TimeNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotString); ok {
return x.TimeNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotGoString() *TimeNgdotGoStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotGoString); ok {
return x.TimeNgdotGoString
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotFormat() *TimeNgdotFormatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotFormat); ok {
return x.TimeNgdotFormat
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotAppendFormat() *TimeNgdotAppendFormatArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotAppendFormat); ok {
return x.TimeNgdotAppendFormat
}
}
return nil
}
func (x *NgoloFuzzOne) GetParse() *ParseArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Parse); ok {
return x.Parse
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseInLocation() *ParseInLocationArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseInLocation); ok {
return x.ParseInLocation
}
}
return nil
}
func (x *NgoloFuzzOne) GetParseDuration() *ParseDurationArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ParseDuration); ok {
return x.ParseDuration
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimerNgdotStop() *TimerNgdotStopArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimerNgdotStop); ok {
return x.TimerNgdotStop
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewTimer() *NewTimerArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewTimer); ok {
return x.NewTimer
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimerNgdotReset() *TimerNgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimerNgdotReset); ok {
return x.TimerNgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetAfter() *AfterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_After); ok {
return x.After
}
}
return nil
}
func (x *NgoloFuzzOne) GetNewTicker() *NewTickerArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_NewTicker); ok {
return x.NewTicker
}
}
return nil
}
func (x *NgoloFuzzOne) GetTickerNgdotStop() *TickerNgdotStopArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TickerNgdotStop); ok {
return x.TickerNgdotStop
}
}
return nil
}
func (x *NgoloFuzzOne) GetTickerNgdotReset() *TickerNgdotResetArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TickerNgdotReset); ok {
return x.TickerNgdotReset
}
}
return nil
}
func (x *NgoloFuzzOne) GetTick() *TickArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Tick); ok {
return x.Tick
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotIsZero() *TimeNgdotIsZeroArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotIsZero); ok {
return x.TimeNgdotIsZero
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotAfter() *TimeNgdotAfterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotAfter); ok {
return x.TimeNgdotAfter
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotBefore() *TimeNgdotBeforeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotBefore); ok {
return x.TimeNgdotBefore
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotCompare() *TimeNgdotCompareArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotCompare); ok {
return x.TimeNgdotCompare
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotEqual() *TimeNgdotEqualArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotEqual); ok {
return x.TimeNgdotEqual
}
}
return nil
}
func (x *NgoloFuzzOne) GetMonthNgdotString() *MonthNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_MonthNgdotString); ok {
return x.MonthNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetWeekdayNgdotString() *WeekdayNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_WeekdayNgdotString); ok {
return x.WeekdayNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotDate() *TimeNgdotDateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotDate); ok {
return x.TimeNgdotDate
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotYear() *TimeNgdotYearArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotYear); ok {
return x.TimeNgdotYear
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotMonth() *TimeNgdotMonthArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotMonth); ok {
return x.TimeNgdotMonth
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotDay() *TimeNgdotDayArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotDay); ok {
return x.TimeNgdotDay
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotWeekday() *TimeNgdotWeekdayArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotWeekday); ok {
return x.TimeNgdotWeekday
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotISOWeek() *TimeNgdotISOWeekArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotISOWeek); ok {
return x.TimeNgdotISOWeek
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotClock() *TimeNgdotClockArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotClock); ok {
return x.TimeNgdotClock
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotHour() *TimeNgdotHourArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotHour); ok {
return x.TimeNgdotHour
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotMinute() *TimeNgdotMinuteArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotMinute); ok {
return x.TimeNgdotMinute
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotSecond() *TimeNgdotSecondArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotSecond); ok {
return x.TimeNgdotSecond
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotNanosecond() *TimeNgdotNanosecondArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotNanosecond); ok {
return x.TimeNgdotNanosecond
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotYearDay() *TimeNgdotYearDayArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotYearDay); ok {
return x.TimeNgdotYearDay
}
}
return nil
}
func (x *NgoloFuzzOne) GetDurationNgdotString() *DurationNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DurationNgdotString); ok {
return x.DurationNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetDurationNgdotNanoseconds() *DurationNgdotNanosecondsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DurationNgdotNanoseconds); ok {
return x.DurationNgdotNanoseconds
}
}
return nil
}
func (x *NgoloFuzzOne) GetDurationNgdotMicroseconds() *DurationNgdotMicrosecondsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DurationNgdotMicroseconds); ok {
return x.DurationNgdotMicroseconds
}
}
return nil
}
func (x *NgoloFuzzOne) GetDurationNgdotMilliseconds() *DurationNgdotMillisecondsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DurationNgdotMilliseconds); ok {
return x.DurationNgdotMilliseconds
}
}
return nil
}
func (x *NgoloFuzzOne) GetDurationNgdotSeconds() *DurationNgdotSecondsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DurationNgdotSeconds); ok {
return x.DurationNgdotSeconds
}
}
return nil
}
func (x *NgoloFuzzOne) GetDurationNgdotMinutes() *DurationNgdotMinutesArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DurationNgdotMinutes); ok {
return x.DurationNgdotMinutes
}
}
return nil
}
func (x *NgoloFuzzOne) GetDurationNgdotHours() *DurationNgdotHoursArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DurationNgdotHours); ok {
return x.DurationNgdotHours
}
}
return nil
}
func (x *NgoloFuzzOne) GetDurationNgdotTruncate() *DurationNgdotTruncateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DurationNgdotTruncate); ok {
return x.DurationNgdotTruncate
}
}
return nil
}
func (x *NgoloFuzzOne) GetDurationNgdotRound() *DurationNgdotRoundArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DurationNgdotRound); ok {
return x.DurationNgdotRound
}
}
return nil
}
func (x *NgoloFuzzOne) GetDurationNgdotAbs() *DurationNgdotAbsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DurationNgdotAbs); ok {
return x.DurationNgdotAbs
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotAdd() *TimeNgdotAddArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotAdd); ok {
return x.TimeNgdotAdd
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotSub() *TimeNgdotSubArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotSub); ok {
return x.TimeNgdotSub
}
}
return nil
}
func (x *NgoloFuzzOne) GetSince() *SinceArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Since); ok {
return x.Since
}
}
return nil
}
func (x *NgoloFuzzOne) GetUntil() *UntilArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Until); ok {
return x.Until
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotAddDate() *TimeNgdotAddDateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotAddDate); ok {
return x.TimeNgdotAddDate
}
}
return nil
}
func (x *NgoloFuzzOne) GetNow() *NowArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Now); ok {
return x.Now
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotUTC() *TimeNgdotUTCArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotUTC); ok {
return x.TimeNgdotUTC
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotLocal() *TimeNgdotLocalArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotLocal); ok {
return x.TimeNgdotLocal
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotIn() *TimeNgdotInArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotIn); ok {
return x.TimeNgdotIn
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotLocation() *TimeNgdotLocationArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotLocation); ok {
return x.TimeNgdotLocation
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotZone() *TimeNgdotZoneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotZone); ok {
return x.TimeNgdotZone
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotZoneBounds() *TimeNgdotZoneBoundsArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotZoneBounds); ok {
return x.TimeNgdotZoneBounds
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotUnix() *TimeNgdotUnixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotUnix); ok {
return x.TimeNgdotUnix
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotUnixMilli() *TimeNgdotUnixMilliArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotUnixMilli); ok {
return x.TimeNgdotUnixMilli
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotUnixMicro() *TimeNgdotUnixMicroArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotUnixMicro); ok {
return x.TimeNgdotUnixMicro
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotUnixNano() *TimeNgdotUnixNanoArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotUnixNano); ok {
return x.TimeNgdotUnixNano
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotAppendBinary() *TimeNgdotAppendBinaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotAppendBinary); ok {
return x.TimeNgdotAppendBinary
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotMarshalBinary() *TimeNgdotMarshalBinaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotMarshalBinary); ok {
return x.TimeNgdotMarshalBinary
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotUnmarshalBinary() *TimeNgdotUnmarshalBinaryArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotUnmarshalBinary); ok {
return x.TimeNgdotUnmarshalBinary
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotGobEncode() *TimeNgdotGobEncodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotGobEncode); ok {
return x.TimeNgdotGobEncode
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotGobDecode() *TimeNgdotGobDecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotGobDecode); ok {
return x.TimeNgdotGobDecode
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotMarshalJSON() *TimeNgdotMarshalJSONArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotMarshalJSON); ok {
return x.TimeNgdotMarshalJSON
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotUnmarshalJSON() *TimeNgdotUnmarshalJSONArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotUnmarshalJSON); ok {
return x.TimeNgdotUnmarshalJSON
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotAppendText() *TimeNgdotAppendTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotAppendText); ok {
return x.TimeNgdotAppendText
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotMarshalText() *TimeNgdotMarshalTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotMarshalText); ok {
return x.TimeNgdotMarshalText
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotUnmarshalText() *TimeNgdotUnmarshalTextArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotUnmarshalText); ok {
return x.TimeNgdotUnmarshalText
}
}
return nil
}
func (x *NgoloFuzzOne) GetUnix() *UnixArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Unix); ok {
return x.Unix
}
}
return nil
}
func (x *NgoloFuzzOne) GetUnixMilli() *UnixMilliArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UnixMilli); ok {
return x.UnixMilli
}
}
return nil
}
func (x *NgoloFuzzOne) GetUnixMicro() *UnixMicroArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_UnixMicro); ok {
return x.UnixMicro
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotIsDST() *TimeNgdotIsDSTArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotIsDST); ok {
return x.TimeNgdotIsDST
}
}
return nil
}
func (x *NgoloFuzzOne) GetDate() *DateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Date); ok {
return x.Date
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotTruncate() *TimeNgdotTruncateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotTruncate); ok {
return x.TimeNgdotTruncate
}
}
return nil
}
func (x *NgoloFuzzOne) GetTimeNgdotRound() *TimeNgdotRoundArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_TimeNgdotRound); ok {
return x.TimeNgdotRound
}
}
return nil
}
func (x *NgoloFuzzOne) GetLocationNgdotString() *LocationNgdotStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LocationNgdotString); ok {
return x.LocationNgdotString
}
}
return nil
}
func (x *NgoloFuzzOne) GetFixedZone() *FixedZoneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FixedZone); ok {
return x.FixedZone
}
}
return nil
}
func (x *NgoloFuzzOne) GetLoadLocation() *LoadLocationArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LoadLocation); ok {
return x.LoadLocation
}
}
return nil
}
func (x *NgoloFuzzOne) GetLoadLocationFromTZData() *LoadLocationFromTZDataArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_LoadLocationFromTZData); ok {
return x.LoadLocationFromTZData
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_TimeNgdotString struct {
TimeNgdotString *TimeNgdotStringArgs `protobuf:"bytes,1,opt,name=TimeNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotGoString struct {
TimeNgdotGoString *TimeNgdotGoStringArgs `protobuf:"bytes,2,opt,name=TimeNgdotGoString,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotFormat struct {
TimeNgdotFormat *TimeNgdotFormatArgs `protobuf:"bytes,3,opt,name=TimeNgdotFormat,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotAppendFormat struct {
TimeNgdotAppendFormat *TimeNgdotAppendFormatArgs `protobuf:"bytes,4,opt,name=TimeNgdotAppendFormat,proto3,oneof"`
}
type NgoloFuzzOne_Parse struct {
Parse *ParseArgs `protobuf:"bytes,5,opt,name=Parse,proto3,oneof"`
}
type NgoloFuzzOne_ParseInLocation struct {
ParseInLocation *ParseInLocationArgs `protobuf:"bytes,6,opt,name=ParseInLocation,proto3,oneof"`
}
type NgoloFuzzOne_ParseDuration struct {
ParseDuration *ParseDurationArgs `protobuf:"bytes,7,opt,name=ParseDuration,proto3,oneof"`
}
type NgoloFuzzOne_TimerNgdotStop struct {
TimerNgdotStop *TimerNgdotStopArgs `protobuf:"bytes,8,opt,name=TimerNgdotStop,proto3,oneof"`
}
type NgoloFuzzOne_NewTimer struct {
NewTimer *NewTimerArgs `protobuf:"bytes,9,opt,name=NewTimer,proto3,oneof"`
}
type NgoloFuzzOne_TimerNgdotReset struct {
TimerNgdotReset *TimerNgdotResetArgs `protobuf:"bytes,10,opt,name=TimerNgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_After struct {
After *AfterArgs `protobuf:"bytes,11,opt,name=After,proto3,oneof"`
}
type NgoloFuzzOne_NewTicker struct {
NewTicker *NewTickerArgs `protobuf:"bytes,12,opt,name=NewTicker,proto3,oneof"`
}
type NgoloFuzzOne_TickerNgdotStop struct {
TickerNgdotStop *TickerNgdotStopArgs `protobuf:"bytes,13,opt,name=TickerNgdotStop,proto3,oneof"`
}
type NgoloFuzzOne_TickerNgdotReset struct {
TickerNgdotReset *TickerNgdotResetArgs `protobuf:"bytes,14,opt,name=TickerNgdotReset,proto3,oneof"`
}
type NgoloFuzzOne_Tick struct {
Tick *TickArgs `protobuf:"bytes,15,opt,name=Tick,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotIsZero struct {
TimeNgdotIsZero *TimeNgdotIsZeroArgs `protobuf:"bytes,16,opt,name=TimeNgdotIsZero,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotAfter struct {
TimeNgdotAfter *TimeNgdotAfterArgs `protobuf:"bytes,17,opt,name=TimeNgdotAfter,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotBefore struct {
TimeNgdotBefore *TimeNgdotBeforeArgs `protobuf:"bytes,18,opt,name=TimeNgdotBefore,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotCompare struct {
TimeNgdotCompare *TimeNgdotCompareArgs `protobuf:"bytes,19,opt,name=TimeNgdotCompare,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotEqual struct {
TimeNgdotEqual *TimeNgdotEqualArgs `protobuf:"bytes,20,opt,name=TimeNgdotEqual,proto3,oneof"`
}
type NgoloFuzzOne_MonthNgdotString struct {
MonthNgdotString *MonthNgdotStringArgs `protobuf:"bytes,21,opt,name=MonthNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_WeekdayNgdotString struct {
WeekdayNgdotString *WeekdayNgdotStringArgs `protobuf:"bytes,22,opt,name=WeekdayNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotDate struct {
TimeNgdotDate *TimeNgdotDateArgs `protobuf:"bytes,23,opt,name=TimeNgdotDate,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotYear struct {
TimeNgdotYear *TimeNgdotYearArgs `protobuf:"bytes,24,opt,name=TimeNgdotYear,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotMonth struct {
TimeNgdotMonth *TimeNgdotMonthArgs `protobuf:"bytes,25,opt,name=TimeNgdotMonth,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotDay struct {
TimeNgdotDay *TimeNgdotDayArgs `protobuf:"bytes,26,opt,name=TimeNgdotDay,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotWeekday struct {
TimeNgdotWeekday *TimeNgdotWeekdayArgs `protobuf:"bytes,27,opt,name=TimeNgdotWeekday,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotISOWeek struct {
TimeNgdotISOWeek *TimeNgdotISOWeekArgs `protobuf:"bytes,28,opt,name=TimeNgdotISOWeek,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotClock struct {
TimeNgdotClock *TimeNgdotClockArgs `protobuf:"bytes,29,opt,name=TimeNgdotClock,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotHour struct {
TimeNgdotHour *TimeNgdotHourArgs `protobuf:"bytes,30,opt,name=TimeNgdotHour,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotMinute struct {
TimeNgdotMinute *TimeNgdotMinuteArgs `protobuf:"bytes,31,opt,name=TimeNgdotMinute,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotSecond struct {
TimeNgdotSecond *TimeNgdotSecondArgs `protobuf:"bytes,32,opt,name=TimeNgdotSecond,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotNanosecond struct {
TimeNgdotNanosecond *TimeNgdotNanosecondArgs `protobuf:"bytes,33,opt,name=TimeNgdotNanosecond,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotYearDay struct {
TimeNgdotYearDay *TimeNgdotYearDayArgs `protobuf:"bytes,34,opt,name=TimeNgdotYearDay,proto3,oneof"`
}
type NgoloFuzzOne_DurationNgdotString struct {
DurationNgdotString *DurationNgdotStringArgs `protobuf:"bytes,35,opt,name=DurationNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_DurationNgdotNanoseconds struct {
DurationNgdotNanoseconds *DurationNgdotNanosecondsArgs `protobuf:"bytes,36,opt,name=DurationNgdotNanoseconds,proto3,oneof"`
}
type NgoloFuzzOne_DurationNgdotMicroseconds struct {
DurationNgdotMicroseconds *DurationNgdotMicrosecondsArgs `protobuf:"bytes,37,opt,name=DurationNgdotMicroseconds,proto3,oneof"`
}
type NgoloFuzzOne_DurationNgdotMilliseconds struct {
DurationNgdotMilliseconds *DurationNgdotMillisecondsArgs `protobuf:"bytes,38,opt,name=DurationNgdotMilliseconds,proto3,oneof"`
}
type NgoloFuzzOne_DurationNgdotSeconds struct {
DurationNgdotSeconds *DurationNgdotSecondsArgs `protobuf:"bytes,39,opt,name=DurationNgdotSeconds,proto3,oneof"`
}
type NgoloFuzzOne_DurationNgdotMinutes struct {
DurationNgdotMinutes *DurationNgdotMinutesArgs `protobuf:"bytes,40,opt,name=DurationNgdotMinutes,proto3,oneof"`
}
type NgoloFuzzOne_DurationNgdotHours struct {
DurationNgdotHours *DurationNgdotHoursArgs `protobuf:"bytes,41,opt,name=DurationNgdotHours,proto3,oneof"`
}
type NgoloFuzzOne_DurationNgdotTruncate struct {
DurationNgdotTruncate *DurationNgdotTruncateArgs `protobuf:"bytes,42,opt,name=DurationNgdotTruncate,proto3,oneof"`
}
type NgoloFuzzOne_DurationNgdotRound struct {
DurationNgdotRound *DurationNgdotRoundArgs `protobuf:"bytes,43,opt,name=DurationNgdotRound,proto3,oneof"`
}
type NgoloFuzzOne_DurationNgdotAbs struct {
DurationNgdotAbs *DurationNgdotAbsArgs `protobuf:"bytes,44,opt,name=DurationNgdotAbs,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotAdd struct {
TimeNgdotAdd *TimeNgdotAddArgs `protobuf:"bytes,45,opt,name=TimeNgdotAdd,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotSub struct {
TimeNgdotSub *TimeNgdotSubArgs `protobuf:"bytes,46,opt,name=TimeNgdotSub,proto3,oneof"`
}
type NgoloFuzzOne_Since struct {
Since *SinceArgs `protobuf:"bytes,47,opt,name=Since,proto3,oneof"`
}
type NgoloFuzzOne_Until struct {
Until *UntilArgs `protobuf:"bytes,48,opt,name=Until,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotAddDate struct {
TimeNgdotAddDate *TimeNgdotAddDateArgs `protobuf:"bytes,49,opt,name=TimeNgdotAddDate,proto3,oneof"`
}
type NgoloFuzzOne_Now struct {
Now *NowArgs `protobuf:"bytes,50,opt,name=Now,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotUTC struct {
TimeNgdotUTC *TimeNgdotUTCArgs `protobuf:"bytes,51,opt,name=TimeNgdotUTC,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotLocal struct {
TimeNgdotLocal *TimeNgdotLocalArgs `protobuf:"bytes,52,opt,name=TimeNgdotLocal,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotIn struct {
TimeNgdotIn *TimeNgdotInArgs `protobuf:"bytes,53,opt,name=TimeNgdotIn,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotLocation struct {
TimeNgdotLocation *TimeNgdotLocationArgs `protobuf:"bytes,54,opt,name=TimeNgdotLocation,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotZone struct {
TimeNgdotZone *TimeNgdotZoneArgs `protobuf:"bytes,55,opt,name=TimeNgdotZone,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotZoneBounds struct {
TimeNgdotZoneBounds *TimeNgdotZoneBoundsArgs `protobuf:"bytes,56,opt,name=TimeNgdotZoneBounds,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotUnix struct {
TimeNgdotUnix *TimeNgdotUnixArgs `protobuf:"bytes,57,opt,name=TimeNgdotUnix,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotUnixMilli struct {
TimeNgdotUnixMilli *TimeNgdotUnixMilliArgs `protobuf:"bytes,58,opt,name=TimeNgdotUnixMilli,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotUnixMicro struct {
TimeNgdotUnixMicro *TimeNgdotUnixMicroArgs `protobuf:"bytes,59,opt,name=TimeNgdotUnixMicro,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotUnixNano struct {
TimeNgdotUnixNano *TimeNgdotUnixNanoArgs `protobuf:"bytes,60,opt,name=TimeNgdotUnixNano,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotAppendBinary struct {
TimeNgdotAppendBinary *TimeNgdotAppendBinaryArgs `protobuf:"bytes,61,opt,name=TimeNgdotAppendBinary,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotMarshalBinary struct {
TimeNgdotMarshalBinary *TimeNgdotMarshalBinaryArgs `protobuf:"bytes,62,opt,name=TimeNgdotMarshalBinary,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotUnmarshalBinary struct {
TimeNgdotUnmarshalBinary *TimeNgdotUnmarshalBinaryArgs `protobuf:"bytes,63,opt,name=TimeNgdotUnmarshalBinary,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotGobEncode struct {
TimeNgdotGobEncode *TimeNgdotGobEncodeArgs `protobuf:"bytes,64,opt,name=TimeNgdotGobEncode,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotGobDecode struct {
TimeNgdotGobDecode *TimeNgdotGobDecodeArgs `protobuf:"bytes,65,opt,name=TimeNgdotGobDecode,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotMarshalJSON struct {
TimeNgdotMarshalJSON *TimeNgdotMarshalJSONArgs `protobuf:"bytes,66,opt,name=TimeNgdotMarshalJSON,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotUnmarshalJSON struct {
TimeNgdotUnmarshalJSON *TimeNgdotUnmarshalJSONArgs `protobuf:"bytes,67,opt,name=TimeNgdotUnmarshalJSON,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotAppendText struct {
TimeNgdotAppendText *TimeNgdotAppendTextArgs `protobuf:"bytes,68,opt,name=TimeNgdotAppendText,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotMarshalText struct {
TimeNgdotMarshalText *TimeNgdotMarshalTextArgs `protobuf:"bytes,69,opt,name=TimeNgdotMarshalText,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotUnmarshalText struct {
TimeNgdotUnmarshalText *TimeNgdotUnmarshalTextArgs `protobuf:"bytes,70,opt,name=TimeNgdotUnmarshalText,proto3,oneof"`
}
type NgoloFuzzOne_Unix struct {
Unix *UnixArgs `protobuf:"bytes,71,opt,name=Unix,proto3,oneof"`
}
type NgoloFuzzOne_UnixMilli struct {
UnixMilli *UnixMilliArgs `protobuf:"bytes,72,opt,name=UnixMilli,proto3,oneof"`
}
type NgoloFuzzOne_UnixMicro struct {
UnixMicro *UnixMicroArgs `protobuf:"bytes,73,opt,name=UnixMicro,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotIsDST struct {
TimeNgdotIsDST *TimeNgdotIsDSTArgs `protobuf:"bytes,74,opt,name=TimeNgdotIsDST,proto3,oneof"`
}
type NgoloFuzzOne_Date struct {
Date *DateArgs `protobuf:"bytes,75,opt,name=Date,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotTruncate struct {
TimeNgdotTruncate *TimeNgdotTruncateArgs `protobuf:"bytes,76,opt,name=TimeNgdotTruncate,proto3,oneof"`
}
type NgoloFuzzOne_TimeNgdotRound struct {
TimeNgdotRound *TimeNgdotRoundArgs `protobuf:"bytes,77,opt,name=TimeNgdotRound,proto3,oneof"`
}
type NgoloFuzzOne_LocationNgdotString struct {
LocationNgdotString *LocationNgdotStringArgs `protobuf:"bytes,78,opt,name=LocationNgdotString,proto3,oneof"`
}
type NgoloFuzzOne_FixedZone struct {
FixedZone *FixedZoneArgs `protobuf:"bytes,79,opt,name=FixedZone,proto3,oneof"`
}
type NgoloFuzzOne_LoadLocation struct {
LoadLocation *LoadLocationArgs `protobuf:"bytes,80,opt,name=LoadLocation,proto3,oneof"`
}
type NgoloFuzzOne_LoadLocationFromTZData struct {
LoadLocationFromTZData *LoadLocationFromTZDataArgs `protobuf:"bytes,81,opt,name=LoadLocationFromTZData,proto3,oneof"`
}
func (*NgoloFuzzOne_TimeNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotGoString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotFormat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotAppendFormat) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Parse) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseInLocation) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ParseDuration) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimerNgdotStop) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewTimer) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimerNgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_After) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_NewTicker) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TickerNgdotStop) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TickerNgdotReset) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Tick) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotIsZero) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotAfter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotBefore) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotCompare) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotEqual) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_MonthNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_WeekdayNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotDate) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotYear) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotMonth) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotDay) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotWeekday) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotISOWeek) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotClock) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotHour) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotMinute) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotSecond) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotNanosecond) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotYearDay) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DurationNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DurationNgdotNanoseconds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DurationNgdotMicroseconds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DurationNgdotMilliseconds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DurationNgdotSeconds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DurationNgdotMinutes) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DurationNgdotHours) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DurationNgdotTruncate) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DurationNgdotRound) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DurationNgdotAbs) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotAdd) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotSub) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Since) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Until) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotAddDate) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Now) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotUTC) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotLocal) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotIn) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotLocation) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotZone) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotZoneBounds) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotUnix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotUnixMilli) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotUnixMicro) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotUnixNano) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotAppendBinary) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotMarshalBinary) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotUnmarshalBinary) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotGobEncode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotGobDecode) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotMarshalJSON) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotUnmarshalJSON) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotAppendText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotMarshalText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotUnmarshalText) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Unix) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UnixMilli) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_UnixMicro) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotIsDST) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Date) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotTruncate) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_TimeNgdotRound) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LocationNgdotString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FixedZone) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LoadLocation) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_LoadLocationFromTZData) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[82]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[82]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{82}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[83]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[83]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{83}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x15\n" +
"\x13TimeNgdotStringArgs\"\x17\n" +
"\x15TimeNgdotGoStringArgs\"-\n" +
"\x13TimeNgdotFormatArgs\x12\x16\n" +
"\x06layout\x18\x01 \x01(\tR\x06layout\"A\n" +
"\x19TimeNgdotAppendFormatArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\x12\x16\n" +
"\x06layout\x18\x02 \x01(\tR\x06layout\"9\n" +
"\tParseArgs\x12\x16\n" +
"\x06layout\x18\x01 \x01(\tR\x06layout\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value\"C\n" +
"\x13ParseInLocationArgs\x12\x16\n" +
"\x06layout\x18\x01 \x01(\tR\x06layout\x12\x14\n" +
"\x05value\x18\x02 \x01(\tR\x05value\"!\n" +
"\x11ParseDurationArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x14\n" +
"\x12TimerNgdotStopArgs\"\x0e\n" +
"\fNewTimerArgs\"\x15\n" +
"\x13TimerNgdotResetArgs\"\v\n" +
"\tAfterArgs\"\x0f\n" +
"\rNewTickerArgs\"\x15\n" +
"\x13TickerNgdotStopArgs\"\x16\n" +
"\x14TickerNgdotResetArgs\"\n" +
"\n" +
"\bTickArgs\"\x15\n" +
"\x13TimeNgdotIsZeroArgs\"\x14\n" +
"\x12TimeNgdotAfterArgs\"\x15\n" +
"\x13TimeNgdotBeforeArgs\"\x16\n" +
"\x14TimeNgdotCompareArgs\"\x14\n" +
"\x12TimeNgdotEqualArgs\"\x16\n" +
"\x14MonthNgdotStringArgs\"\x18\n" +
"\x16WeekdayNgdotStringArgs\"\x13\n" +
"\x11TimeNgdotDateArgs\"\x13\n" +
"\x11TimeNgdotYearArgs\"\x14\n" +
"\x12TimeNgdotMonthArgs\"\x12\n" +
"\x10TimeNgdotDayArgs\"\x16\n" +
"\x14TimeNgdotWeekdayArgs\"\x16\n" +
"\x14TimeNgdotISOWeekArgs\"\x14\n" +
"\x12TimeNgdotClockArgs\"\x13\n" +
"\x11TimeNgdotHourArgs\"\x15\n" +
"\x13TimeNgdotMinuteArgs\"\x15\n" +
"\x13TimeNgdotSecondArgs\"\x19\n" +
"\x17TimeNgdotNanosecondArgs\"\x16\n" +
"\x14TimeNgdotYearDayArgs\"\x19\n" +
"\x17DurationNgdotStringArgs\"\x1e\n" +
"\x1cDurationNgdotNanosecondsArgs\"\x1f\n" +
"\x1dDurationNgdotMicrosecondsArgs\"\x1f\n" +
"\x1dDurationNgdotMillisecondsArgs\"\x1a\n" +
"\x18DurationNgdotSecondsArgs\"\x1a\n" +
"\x18DurationNgdotMinutesArgs\"\x18\n" +
"\x16DurationNgdotHoursArgs\"\x1b\n" +
"\x19DurationNgdotTruncateArgs\"\x18\n" +
"\x16DurationNgdotRoundArgs\"\x16\n" +
"\x14DurationNgdotAbsArgs\"\x12\n" +
"\x10TimeNgdotAddArgs\"\x12\n" +
"\x10TimeNgdotSubArgs\"\v\n" +
"\tSinceArgs\"\v\n" +
"\tUntilArgs\"X\n" +
"\x14TimeNgdotAddDateArgs\x12\x14\n" +
"\x05years\x18\x01 \x01(\x03R\x05years\x12\x16\n" +
"\x06months\x18\x02 \x01(\x03R\x06months\x12\x12\n" +
"\x04days\x18\x03 \x01(\x03R\x04days\"\t\n" +
"\aNowArgs\"\x12\n" +
"\x10TimeNgdotUTCArgs\"\x14\n" +
"\x12TimeNgdotLocalArgs\"\x11\n" +
"\x0fTimeNgdotInArgs\"\x17\n" +
"\x15TimeNgdotLocationArgs\"\x13\n" +
"\x11TimeNgdotZoneArgs\"\x19\n" +
"\x17TimeNgdotZoneBoundsArgs\"\x13\n" +
"\x11TimeNgdotUnixArgs\"\x18\n" +
"\x16TimeNgdotUnixMilliArgs\"\x18\n" +
"\x16TimeNgdotUnixMicroArgs\"\x17\n" +
"\x15TimeNgdotUnixNanoArgs\")\n" +
"\x19TimeNgdotAppendBinaryArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"\x1c\n" +
"\x1aTimeNgdotMarshalBinaryArgs\"2\n" +
"\x1cTimeNgdotUnmarshalBinaryArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\x18\n" +
"\x16TimeNgdotGobEncodeArgs\",\n" +
"\x16TimeNgdotGobDecodeArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"\x1a\n" +
"\x18TimeNgdotMarshalJSONArgs\"0\n" +
"\x1aTimeNgdotUnmarshalJSONArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"'\n" +
"\x17TimeNgdotAppendTextArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\fR\x01b\"\x1a\n" +
"\x18TimeNgdotMarshalTextArgs\"0\n" +
"\x1aTimeNgdotUnmarshalTextArgs\x12\x12\n" +
"\x04data\x18\x01 \x01(\fR\x04data\"0\n" +
"\bUnixArgs\x12\x10\n" +
"\x03sec\x18\x01 \x01(\x03R\x03sec\x12\x12\n" +
"\x04nsec\x18\x02 \x01(\x03R\x04nsec\"#\n" +
"\rUnixMilliArgs\x12\x12\n" +
"\x04msec\x18\x01 \x01(\x03R\x04msec\"#\n" +
"\rUnixMicroArgs\x12\x12\n" +
"\x04usec\x18\x01 \x01(\x03R\x04usec\"\x14\n" +
"\x12TimeNgdotIsDSTArgs\"|\n" +
"\bDateArgs\x12\x12\n" +
"\x04year\x18\x01 \x01(\x03R\x04year\x12\x10\n" +
"\x03day\x18\x02 \x01(\x03R\x03day\x12\x12\n" +
"\x04hour\x18\x03 \x01(\x03R\x04hour\x12\x10\n" +
"\x03min\x18\x04 \x01(\x03R\x03min\x12\x10\n" +
"\x03sec\x18\x05 \x01(\x03R\x03sec\x12\x12\n" +
"\x04nsec\x18\x06 \x01(\x03R\x04nsec\"\x17\n" +
"\x15TimeNgdotTruncateArgs\"\x14\n" +
"\x12TimeNgdotRoundArgs\"\x19\n" +
"\x17LocationNgdotStringArgs\";\n" +
"\rFixedZoneArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
"\x06offset\x18\x02 \x01(\x03R\x06offset\"&\n" +
"\x10LoadLocationArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\"D\n" +
"\x1aLoadLocationFromTZDataArgs\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n" +
"\x04data\x18\x02 \x01(\fR\x04data\"\xb40\n" +
"\fNgoloFuzzOne\x12J\n" +
"\x0fTimeNgdotString\x18\x01 \x01(\v2\x1e.ngolofuzz.TimeNgdotStringArgsH\x00R\x0fTimeNgdotString\x12P\n" +
"\x11TimeNgdotGoString\x18\x02 \x01(\v2 .ngolofuzz.TimeNgdotGoStringArgsH\x00R\x11TimeNgdotGoString\x12J\n" +
"\x0fTimeNgdotFormat\x18\x03 \x01(\v2\x1e.ngolofuzz.TimeNgdotFormatArgsH\x00R\x0fTimeNgdotFormat\x12\\\n" +
"\x15TimeNgdotAppendFormat\x18\x04 \x01(\v2$.ngolofuzz.TimeNgdotAppendFormatArgsH\x00R\x15TimeNgdotAppendFormat\x12,\n" +
"\x05Parse\x18\x05 \x01(\v2\x14.ngolofuzz.ParseArgsH\x00R\x05Parse\x12J\n" +
"\x0fParseInLocation\x18\x06 \x01(\v2\x1e.ngolofuzz.ParseInLocationArgsH\x00R\x0fParseInLocation\x12D\n" +
"\rParseDuration\x18\a \x01(\v2\x1c.ngolofuzz.ParseDurationArgsH\x00R\rParseDuration\x12G\n" +
"\x0eTimerNgdotStop\x18\b \x01(\v2\x1d.ngolofuzz.TimerNgdotStopArgsH\x00R\x0eTimerNgdotStop\x125\n" +
"\bNewTimer\x18\t \x01(\v2\x17.ngolofuzz.NewTimerArgsH\x00R\bNewTimer\x12J\n" +
"\x0fTimerNgdotReset\x18\n" +
" \x01(\v2\x1e.ngolofuzz.TimerNgdotResetArgsH\x00R\x0fTimerNgdotReset\x12,\n" +
"\x05After\x18\v \x01(\v2\x14.ngolofuzz.AfterArgsH\x00R\x05After\x128\n" +
"\tNewTicker\x18\f \x01(\v2\x18.ngolofuzz.NewTickerArgsH\x00R\tNewTicker\x12J\n" +
"\x0fTickerNgdotStop\x18\r \x01(\v2\x1e.ngolofuzz.TickerNgdotStopArgsH\x00R\x0fTickerNgdotStop\x12M\n" +
"\x10TickerNgdotReset\x18\x0e \x01(\v2\x1f.ngolofuzz.TickerNgdotResetArgsH\x00R\x10TickerNgdotReset\x12)\n" +
"\x04Tick\x18\x0f \x01(\v2\x13.ngolofuzz.TickArgsH\x00R\x04Tick\x12J\n" +
"\x0fTimeNgdotIsZero\x18\x10 \x01(\v2\x1e.ngolofuzz.TimeNgdotIsZeroArgsH\x00R\x0fTimeNgdotIsZero\x12G\n" +
"\x0eTimeNgdotAfter\x18\x11 \x01(\v2\x1d.ngolofuzz.TimeNgdotAfterArgsH\x00R\x0eTimeNgdotAfter\x12J\n" +
"\x0fTimeNgdotBefore\x18\x12 \x01(\v2\x1e.ngolofuzz.TimeNgdotBeforeArgsH\x00R\x0fTimeNgdotBefore\x12M\n" +
"\x10TimeNgdotCompare\x18\x13 \x01(\v2\x1f.ngolofuzz.TimeNgdotCompareArgsH\x00R\x10TimeNgdotCompare\x12G\n" +
"\x0eTimeNgdotEqual\x18\x14 \x01(\v2\x1d.ngolofuzz.TimeNgdotEqualArgsH\x00R\x0eTimeNgdotEqual\x12M\n" +
"\x10MonthNgdotString\x18\x15 \x01(\v2\x1f.ngolofuzz.MonthNgdotStringArgsH\x00R\x10MonthNgdotString\x12S\n" +
"\x12WeekdayNgdotString\x18\x16 \x01(\v2!.ngolofuzz.WeekdayNgdotStringArgsH\x00R\x12WeekdayNgdotString\x12D\n" +
"\rTimeNgdotDate\x18\x17 \x01(\v2\x1c.ngolofuzz.TimeNgdotDateArgsH\x00R\rTimeNgdotDate\x12D\n" +
"\rTimeNgdotYear\x18\x18 \x01(\v2\x1c.ngolofuzz.TimeNgdotYearArgsH\x00R\rTimeNgdotYear\x12G\n" +
"\x0eTimeNgdotMonth\x18\x19 \x01(\v2\x1d.ngolofuzz.TimeNgdotMonthArgsH\x00R\x0eTimeNgdotMonth\x12A\n" +
"\fTimeNgdotDay\x18\x1a \x01(\v2\x1b.ngolofuzz.TimeNgdotDayArgsH\x00R\fTimeNgdotDay\x12M\n" +
"\x10TimeNgdotWeekday\x18\x1b \x01(\v2\x1f.ngolofuzz.TimeNgdotWeekdayArgsH\x00R\x10TimeNgdotWeekday\x12M\n" +
"\x10TimeNgdotISOWeek\x18\x1c \x01(\v2\x1f.ngolofuzz.TimeNgdotISOWeekArgsH\x00R\x10TimeNgdotISOWeek\x12G\n" +
"\x0eTimeNgdotClock\x18\x1d \x01(\v2\x1d.ngolofuzz.TimeNgdotClockArgsH\x00R\x0eTimeNgdotClock\x12D\n" +
"\rTimeNgdotHour\x18\x1e \x01(\v2\x1c.ngolofuzz.TimeNgdotHourArgsH\x00R\rTimeNgdotHour\x12J\n" +
"\x0fTimeNgdotMinute\x18\x1f \x01(\v2\x1e.ngolofuzz.TimeNgdotMinuteArgsH\x00R\x0fTimeNgdotMinute\x12J\n" +
"\x0fTimeNgdotSecond\x18 \x01(\v2\x1e.ngolofuzz.TimeNgdotSecondArgsH\x00R\x0fTimeNgdotSecond\x12V\n" +
"\x13TimeNgdotNanosecond\x18! \x01(\v2\".ngolofuzz.TimeNgdotNanosecondArgsH\x00R\x13TimeNgdotNanosecond\x12M\n" +
"\x10TimeNgdotYearDay\x18\" \x01(\v2\x1f.ngolofuzz.TimeNgdotYearDayArgsH\x00R\x10TimeNgdotYearDay\x12V\n" +
"\x13DurationNgdotString\x18# \x01(\v2\".ngolofuzz.DurationNgdotStringArgsH\x00R\x13DurationNgdotString\x12e\n" +
"\x18DurationNgdotNanoseconds\x18$ \x01(\v2'.ngolofuzz.DurationNgdotNanosecondsArgsH\x00R\x18DurationNgdotNanoseconds\x12h\n" +
"\x19DurationNgdotMicroseconds\x18% \x01(\v2(.ngolofuzz.DurationNgdotMicrosecondsArgsH\x00R\x19DurationNgdotMicroseconds\x12h\n" +
"\x19DurationNgdotMilliseconds\x18& \x01(\v2(.ngolofuzz.DurationNgdotMillisecondsArgsH\x00R\x19DurationNgdotMilliseconds\x12Y\n" +
"\x14DurationNgdotSeconds\x18' \x01(\v2#.ngolofuzz.DurationNgdotSecondsArgsH\x00R\x14DurationNgdotSeconds\x12Y\n" +
"\x14DurationNgdotMinutes\x18( \x01(\v2#.ngolofuzz.DurationNgdotMinutesArgsH\x00R\x14DurationNgdotMinutes\x12S\n" +
"\x12DurationNgdotHours\x18) \x01(\v2!.ngolofuzz.DurationNgdotHoursArgsH\x00R\x12DurationNgdotHours\x12\\\n" +
"\x15DurationNgdotTruncate\x18* \x01(\v2$.ngolofuzz.DurationNgdotTruncateArgsH\x00R\x15DurationNgdotTruncate\x12S\n" +
"\x12DurationNgdotRound\x18+ \x01(\v2!.ngolofuzz.DurationNgdotRoundArgsH\x00R\x12DurationNgdotRound\x12M\n" +
"\x10DurationNgdotAbs\x18, \x01(\v2\x1f.ngolofuzz.DurationNgdotAbsArgsH\x00R\x10DurationNgdotAbs\x12A\n" +
"\fTimeNgdotAdd\x18- \x01(\v2\x1b.ngolofuzz.TimeNgdotAddArgsH\x00R\fTimeNgdotAdd\x12A\n" +
"\fTimeNgdotSub\x18. \x01(\v2\x1b.ngolofuzz.TimeNgdotSubArgsH\x00R\fTimeNgdotSub\x12,\n" +
"\x05Since\x18/ \x01(\v2\x14.ngolofuzz.SinceArgsH\x00R\x05Since\x12,\n" +
"\x05Until\x180 \x01(\v2\x14.ngolofuzz.UntilArgsH\x00R\x05Until\x12M\n" +
"\x10TimeNgdotAddDate\x181 \x01(\v2\x1f.ngolofuzz.TimeNgdotAddDateArgsH\x00R\x10TimeNgdotAddDate\x12&\n" +
"\x03Now\x182 \x01(\v2\x12.ngolofuzz.NowArgsH\x00R\x03Now\x12A\n" +
"\fTimeNgdotUTC\x183 \x01(\v2\x1b.ngolofuzz.TimeNgdotUTCArgsH\x00R\fTimeNgdotUTC\x12G\n" +
"\x0eTimeNgdotLocal\x184 \x01(\v2\x1d.ngolofuzz.TimeNgdotLocalArgsH\x00R\x0eTimeNgdotLocal\x12>\n" +
"\vTimeNgdotIn\x185 \x01(\v2\x1a.ngolofuzz.TimeNgdotInArgsH\x00R\vTimeNgdotIn\x12P\n" +
"\x11TimeNgdotLocation\x186 \x01(\v2 .ngolofuzz.TimeNgdotLocationArgsH\x00R\x11TimeNgdotLocation\x12D\n" +
"\rTimeNgdotZone\x187 \x01(\v2\x1c.ngolofuzz.TimeNgdotZoneArgsH\x00R\rTimeNgdotZone\x12V\n" +
"\x13TimeNgdotZoneBounds\x188 \x01(\v2\".ngolofuzz.TimeNgdotZoneBoundsArgsH\x00R\x13TimeNgdotZoneBounds\x12D\n" +
"\rTimeNgdotUnix\x189 \x01(\v2\x1c.ngolofuzz.TimeNgdotUnixArgsH\x00R\rTimeNgdotUnix\x12S\n" +
"\x12TimeNgdotUnixMilli\x18: \x01(\v2!.ngolofuzz.TimeNgdotUnixMilliArgsH\x00R\x12TimeNgdotUnixMilli\x12S\n" +
"\x12TimeNgdotUnixMicro\x18; \x01(\v2!.ngolofuzz.TimeNgdotUnixMicroArgsH\x00R\x12TimeNgdotUnixMicro\x12P\n" +
"\x11TimeNgdotUnixNano\x18< \x01(\v2 .ngolofuzz.TimeNgdotUnixNanoArgsH\x00R\x11TimeNgdotUnixNano\x12\\\n" +
"\x15TimeNgdotAppendBinary\x18= \x01(\v2$.ngolofuzz.TimeNgdotAppendBinaryArgsH\x00R\x15TimeNgdotAppendBinary\x12_\n" +
"\x16TimeNgdotMarshalBinary\x18> \x01(\v2%.ngolofuzz.TimeNgdotMarshalBinaryArgsH\x00R\x16TimeNgdotMarshalBinary\x12e\n" +
"\x18TimeNgdotUnmarshalBinary\x18? \x01(\v2'.ngolofuzz.TimeNgdotUnmarshalBinaryArgsH\x00R\x18TimeNgdotUnmarshalBinary\x12S\n" +
"\x12TimeNgdotGobEncode\x18@ \x01(\v2!.ngolofuzz.TimeNgdotGobEncodeArgsH\x00R\x12TimeNgdotGobEncode\x12S\n" +
"\x12TimeNgdotGobDecode\x18A \x01(\v2!.ngolofuzz.TimeNgdotGobDecodeArgsH\x00R\x12TimeNgdotGobDecode\x12Y\n" +
"\x14TimeNgdotMarshalJSON\x18B \x01(\v2#.ngolofuzz.TimeNgdotMarshalJSONArgsH\x00R\x14TimeNgdotMarshalJSON\x12_\n" +
"\x16TimeNgdotUnmarshalJSON\x18C \x01(\v2%.ngolofuzz.TimeNgdotUnmarshalJSONArgsH\x00R\x16TimeNgdotUnmarshalJSON\x12V\n" +
"\x13TimeNgdotAppendText\x18D \x01(\v2\".ngolofuzz.TimeNgdotAppendTextArgsH\x00R\x13TimeNgdotAppendText\x12Y\n" +
"\x14TimeNgdotMarshalText\x18E \x01(\v2#.ngolofuzz.TimeNgdotMarshalTextArgsH\x00R\x14TimeNgdotMarshalText\x12_\n" +
"\x16TimeNgdotUnmarshalText\x18F \x01(\v2%.ngolofuzz.TimeNgdotUnmarshalTextArgsH\x00R\x16TimeNgdotUnmarshalText\x12)\n" +
"\x04Unix\x18G \x01(\v2\x13.ngolofuzz.UnixArgsH\x00R\x04Unix\x128\n" +
"\tUnixMilli\x18H \x01(\v2\x18.ngolofuzz.UnixMilliArgsH\x00R\tUnixMilli\x128\n" +
"\tUnixMicro\x18I \x01(\v2\x18.ngolofuzz.UnixMicroArgsH\x00R\tUnixMicro\x12G\n" +
"\x0eTimeNgdotIsDST\x18J \x01(\v2\x1d.ngolofuzz.TimeNgdotIsDSTArgsH\x00R\x0eTimeNgdotIsDST\x12)\n" +
"\x04Date\x18K \x01(\v2\x13.ngolofuzz.DateArgsH\x00R\x04Date\x12P\n" +
"\x11TimeNgdotTruncate\x18L \x01(\v2 .ngolofuzz.TimeNgdotTruncateArgsH\x00R\x11TimeNgdotTruncate\x12G\n" +
"\x0eTimeNgdotRound\x18M \x01(\v2\x1d.ngolofuzz.TimeNgdotRoundArgsH\x00R\x0eTimeNgdotRound\x12V\n" +
"\x13LocationNgdotString\x18N \x01(\v2\".ngolofuzz.LocationNgdotStringArgsH\x00R\x13LocationNgdotString\x128\n" +
"\tFixedZone\x18O \x01(\v2\x18.ngolofuzz.FixedZoneArgsH\x00R\tFixedZone\x12A\n" +
"\fLoadLocation\x18P \x01(\v2\x1b.ngolofuzz.LoadLocationArgsH\x00R\fLoadLocation\x12_\n" +
"\x16LoadLocationFromTZData\x18Q \x01(\v2%.ngolofuzz.LoadLocationFromTZDataArgsH\x00R\x16LoadLocationFromTZDataB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x11Z\x0f./;fuzz_ng_timeb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 84)
var file_ngolofuzz_proto_goTypes = []any{
(*TimeNgdotStringArgs)(nil), // 0: ngolofuzz.TimeNgdotStringArgs
(*TimeNgdotGoStringArgs)(nil), // 1: ngolofuzz.TimeNgdotGoStringArgs
(*TimeNgdotFormatArgs)(nil), // 2: ngolofuzz.TimeNgdotFormatArgs
(*TimeNgdotAppendFormatArgs)(nil), // 3: ngolofuzz.TimeNgdotAppendFormatArgs
(*ParseArgs)(nil), // 4: ngolofuzz.ParseArgs
(*ParseInLocationArgs)(nil), // 5: ngolofuzz.ParseInLocationArgs
(*ParseDurationArgs)(nil), // 6: ngolofuzz.ParseDurationArgs
(*TimerNgdotStopArgs)(nil), // 7: ngolofuzz.TimerNgdotStopArgs
(*NewTimerArgs)(nil), // 8: ngolofuzz.NewTimerArgs
(*TimerNgdotResetArgs)(nil), // 9: ngolofuzz.TimerNgdotResetArgs
(*AfterArgs)(nil), // 10: ngolofuzz.AfterArgs
(*NewTickerArgs)(nil), // 11: ngolofuzz.NewTickerArgs
(*TickerNgdotStopArgs)(nil), // 12: ngolofuzz.TickerNgdotStopArgs
(*TickerNgdotResetArgs)(nil), // 13: ngolofuzz.TickerNgdotResetArgs
(*TickArgs)(nil), // 14: ngolofuzz.TickArgs
(*TimeNgdotIsZeroArgs)(nil), // 15: ngolofuzz.TimeNgdotIsZeroArgs
(*TimeNgdotAfterArgs)(nil), // 16: ngolofuzz.TimeNgdotAfterArgs
(*TimeNgdotBeforeArgs)(nil), // 17: ngolofuzz.TimeNgdotBeforeArgs
(*TimeNgdotCompareArgs)(nil), // 18: ngolofuzz.TimeNgdotCompareArgs
(*TimeNgdotEqualArgs)(nil), // 19: ngolofuzz.TimeNgdotEqualArgs
(*MonthNgdotStringArgs)(nil), // 20: ngolofuzz.MonthNgdotStringArgs
(*WeekdayNgdotStringArgs)(nil), // 21: ngolofuzz.WeekdayNgdotStringArgs
(*TimeNgdotDateArgs)(nil), // 22: ngolofuzz.TimeNgdotDateArgs
(*TimeNgdotYearArgs)(nil), // 23: ngolofuzz.TimeNgdotYearArgs
(*TimeNgdotMonthArgs)(nil), // 24: ngolofuzz.TimeNgdotMonthArgs
(*TimeNgdotDayArgs)(nil), // 25: ngolofuzz.TimeNgdotDayArgs
(*TimeNgdotWeekdayArgs)(nil), // 26: ngolofuzz.TimeNgdotWeekdayArgs
(*TimeNgdotISOWeekArgs)(nil), // 27: ngolofuzz.TimeNgdotISOWeekArgs
(*TimeNgdotClockArgs)(nil), // 28: ngolofuzz.TimeNgdotClockArgs
(*TimeNgdotHourArgs)(nil), // 29: ngolofuzz.TimeNgdotHourArgs
(*TimeNgdotMinuteArgs)(nil), // 30: ngolofuzz.TimeNgdotMinuteArgs
(*TimeNgdotSecondArgs)(nil), // 31: ngolofuzz.TimeNgdotSecondArgs
(*TimeNgdotNanosecondArgs)(nil), // 32: ngolofuzz.TimeNgdotNanosecondArgs
(*TimeNgdotYearDayArgs)(nil), // 33: ngolofuzz.TimeNgdotYearDayArgs
(*DurationNgdotStringArgs)(nil), // 34: ngolofuzz.DurationNgdotStringArgs
(*DurationNgdotNanosecondsArgs)(nil), // 35: ngolofuzz.DurationNgdotNanosecondsArgs
(*DurationNgdotMicrosecondsArgs)(nil), // 36: ngolofuzz.DurationNgdotMicrosecondsArgs
(*DurationNgdotMillisecondsArgs)(nil), // 37: ngolofuzz.DurationNgdotMillisecondsArgs
(*DurationNgdotSecondsArgs)(nil), // 38: ngolofuzz.DurationNgdotSecondsArgs
(*DurationNgdotMinutesArgs)(nil), // 39: ngolofuzz.DurationNgdotMinutesArgs
(*DurationNgdotHoursArgs)(nil), // 40: ngolofuzz.DurationNgdotHoursArgs
(*DurationNgdotTruncateArgs)(nil), // 41: ngolofuzz.DurationNgdotTruncateArgs
(*DurationNgdotRoundArgs)(nil), // 42: ngolofuzz.DurationNgdotRoundArgs
(*DurationNgdotAbsArgs)(nil), // 43: ngolofuzz.DurationNgdotAbsArgs
(*TimeNgdotAddArgs)(nil), // 44: ngolofuzz.TimeNgdotAddArgs
(*TimeNgdotSubArgs)(nil), // 45: ngolofuzz.TimeNgdotSubArgs
(*SinceArgs)(nil), // 46: ngolofuzz.SinceArgs
(*UntilArgs)(nil), // 47: ngolofuzz.UntilArgs
(*TimeNgdotAddDateArgs)(nil), // 48: ngolofuzz.TimeNgdotAddDateArgs
(*NowArgs)(nil), // 49: ngolofuzz.NowArgs
(*TimeNgdotUTCArgs)(nil), // 50: ngolofuzz.TimeNgdotUTCArgs
(*TimeNgdotLocalArgs)(nil), // 51: ngolofuzz.TimeNgdotLocalArgs
(*TimeNgdotInArgs)(nil), // 52: ngolofuzz.TimeNgdotInArgs
(*TimeNgdotLocationArgs)(nil), // 53: ngolofuzz.TimeNgdotLocationArgs
(*TimeNgdotZoneArgs)(nil), // 54: ngolofuzz.TimeNgdotZoneArgs
(*TimeNgdotZoneBoundsArgs)(nil), // 55: ngolofuzz.TimeNgdotZoneBoundsArgs
(*TimeNgdotUnixArgs)(nil), // 56: ngolofuzz.TimeNgdotUnixArgs
(*TimeNgdotUnixMilliArgs)(nil), // 57: ngolofuzz.TimeNgdotUnixMilliArgs
(*TimeNgdotUnixMicroArgs)(nil), // 58: ngolofuzz.TimeNgdotUnixMicroArgs
(*TimeNgdotUnixNanoArgs)(nil), // 59: ngolofuzz.TimeNgdotUnixNanoArgs
(*TimeNgdotAppendBinaryArgs)(nil), // 60: ngolofuzz.TimeNgdotAppendBinaryArgs
(*TimeNgdotMarshalBinaryArgs)(nil), // 61: ngolofuzz.TimeNgdotMarshalBinaryArgs
(*TimeNgdotUnmarshalBinaryArgs)(nil), // 62: ngolofuzz.TimeNgdotUnmarshalBinaryArgs
(*TimeNgdotGobEncodeArgs)(nil), // 63: ngolofuzz.TimeNgdotGobEncodeArgs
(*TimeNgdotGobDecodeArgs)(nil), // 64: ngolofuzz.TimeNgdotGobDecodeArgs
(*TimeNgdotMarshalJSONArgs)(nil), // 65: ngolofuzz.TimeNgdotMarshalJSONArgs
(*TimeNgdotUnmarshalJSONArgs)(nil), // 66: ngolofuzz.TimeNgdotUnmarshalJSONArgs
(*TimeNgdotAppendTextArgs)(nil), // 67: ngolofuzz.TimeNgdotAppendTextArgs
(*TimeNgdotMarshalTextArgs)(nil), // 68: ngolofuzz.TimeNgdotMarshalTextArgs
(*TimeNgdotUnmarshalTextArgs)(nil), // 69: ngolofuzz.TimeNgdotUnmarshalTextArgs
(*UnixArgs)(nil), // 70: ngolofuzz.UnixArgs
(*UnixMilliArgs)(nil), // 71: ngolofuzz.UnixMilliArgs
(*UnixMicroArgs)(nil), // 72: ngolofuzz.UnixMicroArgs
(*TimeNgdotIsDSTArgs)(nil), // 73: ngolofuzz.TimeNgdotIsDSTArgs
(*DateArgs)(nil), // 74: ngolofuzz.DateArgs
(*TimeNgdotTruncateArgs)(nil), // 75: ngolofuzz.TimeNgdotTruncateArgs
(*TimeNgdotRoundArgs)(nil), // 76: ngolofuzz.TimeNgdotRoundArgs
(*LocationNgdotStringArgs)(nil), // 77: ngolofuzz.LocationNgdotStringArgs
(*FixedZoneArgs)(nil), // 78: ngolofuzz.FixedZoneArgs
(*LoadLocationArgs)(nil), // 79: ngolofuzz.LoadLocationArgs
(*LoadLocationFromTZDataArgs)(nil), // 80: ngolofuzz.LoadLocationFromTZDataArgs
(*NgoloFuzzOne)(nil), // 81: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 82: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 83: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.TimeNgdotString:type_name -> ngolofuzz.TimeNgdotStringArgs
1, // 1: ngolofuzz.NgoloFuzzOne.TimeNgdotGoString:type_name -> ngolofuzz.TimeNgdotGoStringArgs
2, // 2: ngolofuzz.NgoloFuzzOne.TimeNgdotFormat:type_name -> ngolofuzz.TimeNgdotFormatArgs
3, // 3: ngolofuzz.NgoloFuzzOne.TimeNgdotAppendFormat:type_name -> ngolofuzz.TimeNgdotAppendFormatArgs
4, // 4: ngolofuzz.NgoloFuzzOne.Parse:type_name -> ngolofuzz.ParseArgs
5, // 5: ngolofuzz.NgoloFuzzOne.ParseInLocation:type_name -> ngolofuzz.ParseInLocationArgs
6, // 6: ngolofuzz.NgoloFuzzOne.ParseDuration:type_name -> ngolofuzz.ParseDurationArgs
7, // 7: ngolofuzz.NgoloFuzzOne.TimerNgdotStop:type_name -> ngolofuzz.TimerNgdotStopArgs
8, // 8: ngolofuzz.NgoloFuzzOne.NewTimer:type_name -> ngolofuzz.NewTimerArgs
9, // 9: ngolofuzz.NgoloFuzzOne.TimerNgdotReset:type_name -> ngolofuzz.TimerNgdotResetArgs
10, // 10: ngolofuzz.NgoloFuzzOne.After:type_name -> ngolofuzz.AfterArgs
11, // 11: ngolofuzz.NgoloFuzzOne.NewTicker:type_name -> ngolofuzz.NewTickerArgs
12, // 12: ngolofuzz.NgoloFuzzOne.TickerNgdotStop:type_name -> ngolofuzz.TickerNgdotStopArgs
13, // 13: ngolofuzz.NgoloFuzzOne.TickerNgdotReset:type_name -> ngolofuzz.TickerNgdotResetArgs
14, // 14: ngolofuzz.NgoloFuzzOne.Tick:type_name -> ngolofuzz.TickArgs
15, // 15: ngolofuzz.NgoloFuzzOne.TimeNgdotIsZero:type_name -> ngolofuzz.TimeNgdotIsZeroArgs
16, // 16: ngolofuzz.NgoloFuzzOne.TimeNgdotAfter:type_name -> ngolofuzz.TimeNgdotAfterArgs
17, // 17: ngolofuzz.NgoloFuzzOne.TimeNgdotBefore:type_name -> ngolofuzz.TimeNgdotBeforeArgs
18, // 18: ngolofuzz.NgoloFuzzOne.TimeNgdotCompare:type_name -> ngolofuzz.TimeNgdotCompareArgs
19, // 19: ngolofuzz.NgoloFuzzOne.TimeNgdotEqual:type_name -> ngolofuzz.TimeNgdotEqualArgs
20, // 20: ngolofuzz.NgoloFuzzOne.MonthNgdotString:type_name -> ngolofuzz.MonthNgdotStringArgs
21, // 21: ngolofuzz.NgoloFuzzOne.WeekdayNgdotString:type_name -> ngolofuzz.WeekdayNgdotStringArgs
22, // 22: ngolofuzz.NgoloFuzzOne.TimeNgdotDate:type_name -> ngolofuzz.TimeNgdotDateArgs
23, // 23: ngolofuzz.NgoloFuzzOne.TimeNgdotYear:type_name -> ngolofuzz.TimeNgdotYearArgs
24, // 24: ngolofuzz.NgoloFuzzOne.TimeNgdotMonth:type_name -> ngolofuzz.TimeNgdotMonthArgs
25, // 25: ngolofuzz.NgoloFuzzOne.TimeNgdotDay:type_name -> ngolofuzz.TimeNgdotDayArgs
26, // 26: ngolofuzz.NgoloFuzzOne.TimeNgdotWeekday:type_name -> ngolofuzz.TimeNgdotWeekdayArgs
27, // 27: ngolofuzz.NgoloFuzzOne.TimeNgdotISOWeek:type_name -> ngolofuzz.TimeNgdotISOWeekArgs
28, // 28: ngolofuzz.NgoloFuzzOne.TimeNgdotClock:type_name -> ngolofuzz.TimeNgdotClockArgs
29, // 29: ngolofuzz.NgoloFuzzOne.TimeNgdotHour:type_name -> ngolofuzz.TimeNgdotHourArgs
30, // 30: ngolofuzz.NgoloFuzzOne.TimeNgdotMinute:type_name -> ngolofuzz.TimeNgdotMinuteArgs
31, // 31: ngolofuzz.NgoloFuzzOne.TimeNgdotSecond:type_name -> ngolofuzz.TimeNgdotSecondArgs
32, // 32: ngolofuzz.NgoloFuzzOne.TimeNgdotNanosecond:type_name -> ngolofuzz.TimeNgdotNanosecondArgs
33, // 33: ngolofuzz.NgoloFuzzOne.TimeNgdotYearDay:type_name -> ngolofuzz.TimeNgdotYearDayArgs
34, // 34: ngolofuzz.NgoloFuzzOne.DurationNgdotString:type_name -> ngolofuzz.DurationNgdotStringArgs
35, // 35: ngolofuzz.NgoloFuzzOne.DurationNgdotNanoseconds:type_name -> ngolofuzz.DurationNgdotNanosecondsArgs
36, // 36: ngolofuzz.NgoloFuzzOne.DurationNgdotMicroseconds:type_name -> ngolofuzz.DurationNgdotMicrosecondsArgs
37, // 37: ngolofuzz.NgoloFuzzOne.DurationNgdotMilliseconds:type_name -> ngolofuzz.DurationNgdotMillisecondsArgs
38, // 38: ngolofuzz.NgoloFuzzOne.DurationNgdotSeconds:type_name -> ngolofuzz.DurationNgdotSecondsArgs
39, // 39: ngolofuzz.NgoloFuzzOne.DurationNgdotMinutes:type_name -> ngolofuzz.DurationNgdotMinutesArgs
40, // 40: ngolofuzz.NgoloFuzzOne.DurationNgdotHours:type_name -> ngolofuzz.DurationNgdotHoursArgs
41, // 41: ngolofuzz.NgoloFuzzOne.DurationNgdotTruncate:type_name -> ngolofuzz.DurationNgdotTruncateArgs
42, // 42: ngolofuzz.NgoloFuzzOne.DurationNgdotRound:type_name -> ngolofuzz.DurationNgdotRoundArgs
43, // 43: ngolofuzz.NgoloFuzzOne.DurationNgdotAbs:type_name -> ngolofuzz.DurationNgdotAbsArgs
44, // 44: ngolofuzz.NgoloFuzzOne.TimeNgdotAdd:type_name -> ngolofuzz.TimeNgdotAddArgs
45, // 45: ngolofuzz.NgoloFuzzOne.TimeNgdotSub:type_name -> ngolofuzz.TimeNgdotSubArgs
46, // 46: ngolofuzz.NgoloFuzzOne.Since:type_name -> ngolofuzz.SinceArgs
47, // 47: ngolofuzz.NgoloFuzzOne.Until:type_name -> ngolofuzz.UntilArgs
48, // 48: ngolofuzz.NgoloFuzzOne.TimeNgdotAddDate:type_name -> ngolofuzz.TimeNgdotAddDateArgs
49, // 49: ngolofuzz.NgoloFuzzOne.Now:type_name -> ngolofuzz.NowArgs
50, // 50: ngolofuzz.NgoloFuzzOne.TimeNgdotUTC:type_name -> ngolofuzz.TimeNgdotUTCArgs
51, // 51: ngolofuzz.NgoloFuzzOne.TimeNgdotLocal:type_name -> ngolofuzz.TimeNgdotLocalArgs
52, // 52: ngolofuzz.NgoloFuzzOne.TimeNgdotIn:type_name -> ngolofuzz.TimeNgdotInArgs
53, // 53: ngolofuzz.NgoloFuzzOne.TimeNgdotLocation:type_name -> ngolofuzz.TimeNgdotLocationArgs
54, // 54: ngolofuzz.NgoloFuzzOne.TimeNgdotZone:type_name -> ngolofuzz.TimeNgdotZoneArgs
55, // 55: ngolofuzz.NgoloFuzzOne.TimeNgdotZoneBounds:type_name -> ngolofuzz.TimeNgdotZoneBoundsArgs
56, // 56: ngolofuzz.NgoloFuzzOne.TimeNgdotUnix:type_name -> ngolofuzz.TimeNgdotUnixArgs
57, // 57: ngolofuzz.NgoloFuzzOne.TimeNgdotUnixMilli:type_name -> ngolofuzz.TimeNgdotUnixMilliArgs
58, // 58: ngolofuzz.NgoloFuzzOne.TimeNgdotUnixMicro:type_name -> ngolofuzz.TimeNgdotUnixMicroArgs
59, // 59: ngolofuzz.NgoloFuzzOne.TimeNgdotUnixNano:type_name -> ngolofuzz.TimeNgdotUnixNanoArgs
60, // 60: ngolofuzz.NgoloFuzzOne.TimeNgdotAppendBinary:type_name -> ngolofuzz.TimeNgdotAppendBinaryArgs
61, // 61: ngolofuzz.NgoloFuzzOne.TimeNgdotMarshalBinary:type_name -> ngolofuzz.TimeNgdotMarshalBinaryArgs
62, // 62: ngolofuzz.NgoloFuzzOne.TimeNgdotUnmarshalBinary:type_name -> ngolofuzz.TimeNgdotUnmarshalBinaryArgs
63, // 63: ngolofuzz.NgoloFuzzOne.TimeNgdotGobEncode:type_name -> ngolofuzz.TimeNgdotGobEncodeArgs
64, // 64: ngolofuzz.NgoloFuzzOne.TimeNgdotGobDecode:type_name -> ngolofuzz.TimeNgdotGobDecodeArgs
65, // 65: ngolofuzz.NgoloFuzzOne.TimeNgdotMarshalJSON:type_name -> ngolofuzz.TimeNgdotMarshalJSONArgs
66, // 66: ngolofuzz.NgoloFuzzOne.TimeNgdotUnmarshalJSON:type_name -> ngolofuzz.TimeNgdotUnmarshalJSONArgs
67, // 67: ngolofuzz.NgoloFuzzOne.TimeNgdotAppendText:type_name -> ngolofuzz.TimeNgdotAppendTextArgs
68, // 68: ngolofuzz.NgoloFuzzOne.TimeNgdotMarshalText:type_name -> ngolofuzz.TimeNgdotMarshalTextArgs
69, // 69: ngolofuzz.NgoloFuzzOne.TimeNgdotUnmarshalText:type_name -> ngolofuzz.TimeNgdotUnmarshalTextArgs
70, // 70: ngolofuzz.NgoloFuzzOne.Unix:type_name -> ngolofuzz.UnixArgs
71, // 71: ngolofuzz.NgoloFuzzOne.UnixMilli:type_name -> ngolofuzz.UnixMilliArgs
72, // 72: ngolofuzz.NgoloFuzzOne.UnixMicro:type_name -> ngolofuzz.UnixMicroArgs
73, // 73: ngolofuzz.NgoloFuzzOne.TimeNgdotIsDST:type_name -> ngolofuzz.TimeNgdotIsDSTArgs
74, // 74: ngolofuzz.NgoloFuzzOne.Date:type_name -> ngolofuzz.DateArgs
75, // 75: ngolofuzz.NgoloFuzzOne.TimeNgdotTruncate:type_name -> ngolofuzz.TimeNgdotTruncateArgs
76, // 76: ngolofuzz.NgoloFuzzOne.TimeNgdotRound:type_name -> ngolofuzz.TimeNgdotRoundArgs
77, // 77: ngolofuzz.NgoloFuzzOne.LocationNgdotString:type_name -> ngolofuzz.LocationNgdotStringArgs
78, // 78: ngolofuzz.NgoloFuzzOne.FixedZone:type_name -> ngolofuzz.FixedZoneArgs
79, // 79: ngolofuzz.NgoloFuzzOne.LoadLocation:type_name -> ngolofuzz.LoadLocationArgs
80, // 80: ngolofuzz.NgoloFuzzOne.LoadLocationFromTZData:type_name -> ngolofuzz.LoadLocationFromTZDataArgs
81, // 81: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
82, // [82:82] is the sub-list for method output_type
82, // [82:82] is the sub-list for method input_type
82, // [82:82] is the sub-list for extension type_name
82, // [82:82] is the sub-list for extension extendee
0, // [0:82] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[81].OneofWrappers = []any{
(*NgoloFuzzOne_TimeNgdotString)(nil),
(*NgoloFuzzOne_TimeNgdotGoString)(nil),
(*NgoloFuzzOne_TimeNgdotFormat)(nil),
(*NgoloFuzzOne_TimeNgdotAppendFormat)(nil),
(*NgoloFuzzOne_Parse)(nil),
(*NgoloFuzzOne_ParseInLocation)(nil),
(*NgoloFuzzOne_ParseDuration)(nil),
(*NgoloFuzzOne_TimerNgdotStop)(nil),
(*NgoloFuzzOne_NewTimer)(nil),
(*NgoloFuzzOne_TimerNgdotReset)(nil),
(*NgoloFuzzOne_After)(nil),
(*NgoloFuzzOne_NewTicker)(nil),
(*NgoloFuzzOne_TickerNgdotStop)(nil),
(*NgoloFuzzOne_TickerNgdotReset)(nil),
(*NgoloFuzzOne_Tick)(nil),
(*NgoloFuzzOne_TimeNgdotIsZero)(nil),
(*NgoloFuzzOne_TimeNgdotAfter)(nil),
(*NgoloFuzzOne_TimeNgdotBefore)(nil),
(*NgoloFuzzOne_TimeNgdotCompare)(nil),
(*NgoloFuzzOne_TimeNgdotEqual)(nil),
(*NgoloFuzzOne_MonthNgdotString)(nil),
(*NgoloFuzzOne_WeekdayNgdotString)(nil),
(*NgoloFuzzOne_TimeNgdotDate)(nil),
(*NgoloFuzzOne_TimeNgdotYear)(nil),
(*NgoloFuzzOne_TimeNgdotMonth)(nil),
(*NgoloFuzzOne_TimeNgdotDay)(nil),
(*NgoloFuzzOne_TimeNgdotWeekday)(nil),
(*NgoloFuzzOne_TimeNgdotISOWeek)(nil),
(*NgoloFuzzOne_TimeNgdotClock)(nil),
(*NgoloFuzzOne_TimeNgdotHour)(nil),
(*NgoloFuzzOne_TimeNgdotMinute)(nil),
(*NgoloFuzzOne_TimeNgdotSecond)(nil),
(*NgoloFuzzOne_TimeNgdotNanosecond)(nil),
(*NgoloFuzzOne_TimeNgdotYearDay)(nil),
(*NgoloFuzzOne_DurationNgdotString)(nil),
(*NgoloFuzzOne_DurationNgdotNanoseconds)(nil),
(*NgoloFuzzOne_DurationNgdotMicroseconds)(nil),
(*NgoloFuzzOne_DurationNgdotMilliseconds)(nil),
(*NgoloFuzzOne_DurationNgdotSeconds)(nil),
(*NgoloFuzzOne_DurationNgdotMinutes)(nil),
(*NgoloFuzzOne_DurationNgdotHours)(nil),
(*NgoloFuzzOne_DurationNgdotTruncate)(nil),
(*NgoloFuzzOne_DurationNgdotRound)(nil),
(*NgoloFuzzOne_DurationNgdotAbs)(nil),
(*NgoloFuzzOne_TimeNgdotAdd)(nil),
(*NgoloFuzzOne_TimeNgdotSub)(nil),
(*NgoloFuzzOne_Since)(nil),
(*NgoloFuzzOne_Until)(nil),
(*NgoloFuzzOne_TimeNgdotAddDate)(nil),
(*NgoloFuzzOne_Now)(nil),
(*NgoloFuzzOne_TimeNgdotUTC)(nil),
(*NgoloFuzzOne_TimeNgdotLocal)(nil),
(*NgoloFuzzOne_TimeNgdotIn)(nil),
(*NgoloFuzzOne_TimeNgdotLocation)(nil),
(*NgoloFuzzOne_TimeNgdotZone)(nil),
(*NgoloFuzzOne_TimeNgdotZoneBounds)(nil),
(*NgoloFuzzOne_TimeNgdotUnix)(nil),
(*NgoloFuzzOne_TimeNgdotUnixMilli)(nil),
(*NgoloFuzzOne_TimeNgdotUnixMicro)(nil),
(*NgoloFuzzOne_TimeNgdotUnixNano)(nil),
(*NgoloFuzzOne_TimeNgdotAppendBinary)(nil),
(*NgoloFuzzOne_TimeNgdotMarshalBinary)(nil),
(*NgoloFuzzOne_TimeNgdotUnmarshalBinary)(nil),
(*NgoloFuzzOne_TimeNgdotGobEncode)(nil),
(*NgoloFuzzOne_TimeNgdotGobDecode)(nil),
(*NgoloFuzzOne_TimeNgdotMarshalJSON)(nil),
(*NgoloFuzzOne_TimeNgdotUnmarshalJSON)(nil),
(*NgoloFuzzOne_TimeNgdotAppendText)(nil),
(*NgoloFuzzOne_TimeNgdotMarshalText)(nil),
(*NgoloFuzzOne_TimeNgdotUnmarshalText)(nil),
(*NgoloFuzzOne_Unix)(nil),
(*NgoloFuzzOne_UnixMilli)(nil),
(*NgoloFuzzOne_UnixMicro)(nil),
(*NgoloFuzzOne_TimeNgdotIsDST)(nil),
(*NgoloFuzzOne_Date)(nil),
(*NgoloFuzzOne_TimeNgdotTruncate)(nil),
(*NgoloFuzzOne_TimeNgdotRound)(nil),
(*NgoloFuzzOne_LocationNgdotString)(nil),
(*NgoloFuzzOne_FixedZone)(nil),
(*NgoloFuzzOne_LoadLocation)(nil),
(*NgoloFuzzOne_LoadLocationFromTZData)(nil),
}
file_ngolofuzz_proto_msgTypes[82].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 84,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_unicode
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
"unicode"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_IsDigit:
arg0 := GetRune(a.IsDigit.R)
unicode.IsDigit(arg0)
case *NgoloFuzzOne_IsGraphic:
arg0 := GetRune(a.IsGraphic.R)
unicode.IsGraphic(arg0)
case *NgoloFuzzOne_IsPrint:
arg0 := GetRune(a.IsPrint.R)
unicode.IsPrint(arg0)
case *NgoloFuzzOne_IsControl:
arg0 := GetRune(a.IsControl.R)
unicode.IsControl(arg0)
case *NgoloFuzzOne_IsLetter:
arg0 := GetRune(a.IsLetter.R)
unicode.IsLetter(arg0)
case *NgoloFuzzOne_IsMark:
arg0 := GetRune(a.IsMark.R)
unicode.IsMark(arg0)
case *NgoloFuzzOne_IsNumber:
arg0 := GetRune(a.IsNumber.R)
unicode.IsNumber(arg0)
case *NgoloFuzzOne_IsPunct:
arg0 := GetRune(a.IsPunct.R)
unicode.IsPunct(arg0)
case *NgoloFuzzOne_IsSpace:
arg0 := GetRune(a.IsSpace.R)
unicode.IsSpace(arg0)
case *NgoloFuzzOne_IsSymbol:
arg0 := GetRune(a.IsSymbol.R)
unicode.IsSymbol(arg0)
case *NgoloFuzzOne_IsUpper:
arg0 := GetRune(a.IsUpper.R)
unicode.IsUpper(arg0)
case *NgoloFuzzOne_IsLower:
arg0 := GetRune(a.IsLower.R)
unicode.IsLower(arg0)
case *NgoloFuzzOne_IsTitle:
arg0 := GetRune(a.IsTitle.R)
unicode.IsTitle(arg0)
case *NgoloFuzzOne_To:
arg0 := int(a.To.XCase)
arg1 := GetRune(a.To.R)
unicode.To(arg0, arg1)
case *NgoloFuzzOne_ToUpper:
arg0 := GetRune(a.ToUpper.R)
unicode.ToUpper(arg0)
case *NgoloFuzzOne_ToLower:
arg0 := GetRune(a.ToLower.R)
unicode.ToLower(arg0)
case *NgoloFuzzOne_ToTitle:
arg0 := GetRune(a.ToTitle.R)
unicode.ToTitle(arg0)
case *NgoloFuzzOne_SimpleFold:
arg0 := GetRune(a.SimpleFold.R)
unicode.SimpleFold(arg0)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_IsDigit:
w.WriteString(fmt.Sprintf("unicode.IsDigit(GetRune(%#+v))\n", a.IsDigit.R))
case *NgoloFuzzOne_IsGraphic:
w.WriteString(fmt.Sprintf("unicode.IsGraphic(GetRune(%#+v))\n", a.IsGraphic.R))
case *NgoloFuzzOne_IsPrint:
w.WriteString(fmt.Sprintf("unicode.IsPrint(GetRune(%#+v))\n", a.IsPrint.R))
case *NgoloFuzzOne_IsControl:
w.WriteString(fmt.Sprintf("unicode.IsControl(GetRune(%#+v))\n", a.IsControl.R))
case *NgoloFuzzOne_IsLetter:
w.WriteString(fmt.Sprintf("unicode.IsLetter(GetRune(%#+v))\n", a.IsLetter.R))
case *NgoloFuzzOne_IsMark:
w.WriteString(fmt.Sprintf("unicode.IsMark(GetRune(%#+v))\n", a.IsMark.R))
case *NgoloFuzzOne_IsNumber:
w.WriteString(fmt.Sprintf("unicode.IsNumber(GetRune(%#+v))\n", a.IsNumber.R))
case *NgoloFuzzOne_IsPunct:
w.WriteString(fmt.Sprintf("unicode.IsPunct(GetRune(%#+v))\n", a.IsPunct.R))
case *NgoloFuzzOne_IsSpace:
w.WriteString(fmt.Sprintf("unicode.IsSpace(GetRune(%#+v))\n", a.IsSpace.R))
case *NgoloFuzzOne_IsSymbol:
w.WriteString(fmt.Sprintf("unicode.IsSymbol(GetRune(%#+v))\n", a.IsSymbol.R))
case *NgoloFuzzOne_IsUpper:
w.WriteString(fmt.Sprintf("unicode.IsUpper(GetRune(%#+v))\n", a.IsUpper.R))
case *NgoloFuzzOne_IsLower:
w.WriteString(fmt.Sprintf("unicode.IsLower(GetRune(%#+v))\n", a.IsLower.R))
case *NgoloFuzzOne_IsTitle:
w.WriteString(fmt.Sprintf("unicode.IsTitle(GetRune(%#+v))\n", a.IsTitle.R))
case *NgoloFuzzOne_To:
w.WriteString(fmt.Sprintf("unicode.To(int(%#+v), GetRune(%#+v))\n", a.To.XCase, a.To.R))
case *NgoloFuzzOne_ToUpper:
w.WriteString(fmt.Sprintf("unicode.ToUpper(GetRune(%#+v))\n", a.ToUpper.R))
case *NgoloFuzzOne_ToLower:
w.WriteString(fmt.Sprintf("unicode.ToLower(GetRune(%#+v))\n", a.ToLower.R))
case *NgoloFuzzOne_ToTitle:
w.WriteString(fmt.Sprintf("unicode.ToTitle(GetRune(%#+v))\n", a.ToTitle.R))
case *NgoloFuzzOne_SimpleFold:
w.WriteString(fmt.Sprintf("unicode.SimpleFold(GetRune(%#+v))\n", a.SimpleFold.R))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_unicode
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type IsDigitArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsDigitArgs) Reset() {
*x = IsDigitArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsDigitArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsDigitArgs) ProtoMessage() {}
func (x *IsDigitArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsDigitArgs.ProtoReflect.Descriptor instead.
func (*IsDigitArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *IsDigitArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IsGraphicArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsGraphicArgs) Reset() {
*x = IsGraphicArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsGraphicArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsGraphicArgs) ProtoMessage() {}
func (x *IsGraphicArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsGraphicArgs.ProtoReflect.Descriptor instead.
func (*IsGraphicArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *IsGraphicArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IsPrintArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsPrintArgs) Reset() {
*x = IsPrintArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsPrintArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsPrintArgs) ProtoMessage() {}
func (x *IsPrintArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsPrintArgs.ProtoReflect.Descriptor instead.
func (*IsPrintArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *IsPrintArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IsControlArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsControlArgs) Reset() {
*x = IsControlArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsControlArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsControlArgs) ProtoMessage() {}
func (x *IsControlArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsControlArgs.ProtoReflect.Descriptor instead.
func (*IsControlArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *IsControlArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IsLetterArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsLetterArgs) Reset() {
*x = IsLetterArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsLetterArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsLetterArgs) ProtoMessage() {}
func (x *IsLetterArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsLetterArgs.ProtoReflect.Descriptor instead.
func (*IsLetterArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *IsLetterArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IsMarkArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsMarkArgs) Reset() {
*x = IsMarkArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsMarkArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsMarkArgs) ProtoMessage() {}
func (x *IsMarkArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsMarkArgs.ProtoReflect.Descriptor instead.
func (*IsMarkArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *IsMarkArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IsNumberArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsNumberArgs) Reset() {
*x = IsNumberArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsNumberArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsNumberArgs) ProtoMessage() {}
func (x *IsNumberArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsNumberArgs.ProtoReflect.Descriptor instead.
func (*IsNumberArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *IsNumberArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IsPunctArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsPunctArgs) Reset() {
*x = IsPunctArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsPunctArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsPunctArgs) ProtoMessage() {}
func (x *IsPunctArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsPunctArgs.ProtoReflect.Descriptor instead.
func (*IsPunctArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *IsPunctArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IsSpaceArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsSpaceArgs) Reset() {
*x = IsSpaceArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsSpaceArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsSpaceArgs) ProtoMessage() {}
func (x *IsSpaceArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsSpaceArgs.ProtoReflect.Descriptor instead.
func (*IsSpaceArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *IsSpaceArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IsSymbolArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsSymbolArgs) Reset() {
*x = IsSymbolArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsSymbolArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsSymbolArgs) ProtoMessage() {}
func (x *IsSymbolArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsSymbolArgs.ProtoReflect.Descriptor instead.
func (*IsSymbolArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *IsSymbolArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IsUpperArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsUpperArgs) Reset() {
*x = IsUpperArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsUpperArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsUpperArgs) ProtoMessage() {}
func (x *IsUpperArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsUpperArgs.ProtoReflect.Descriptor instead.
func (*IsUpperArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *IsUpperArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IsLowerArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsLowerArgs) Reset() {
*x = IsLowerArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsLowerArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsLowerArgs) ProtoMessage() {}
func (x *IsLowerArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsLowerArgs.ProtoReflect.Descriptor instead.
func (*IsLowerArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *IsLowerArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type IsTitleArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsTitleArgs) Reset() {
*x = IsTitleArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsTitleArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsTitleArgs) ProtoMessage() {}
func (x *IsTitleArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsTitleArgs.ProtoReflect.Descriptor instead.
func (*IsTitleArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *IsTitleArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type ToArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
XCase int64 `protobuf:"varint,1,opt,name=_case,json=Case,proto3" json:"_case,omitempty"`
R string `protobuf:"bytes,2,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToArgs) Reset() {
*x = ToArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToArgs) ProtoMessage() {}
func (x *ToArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToArgs.ProtoReflect.Descriptor instead.
func (*ToArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *ToArgs) GetXCase() int64 {
if x != nil {
return x.XCase
}
return 0
}
func (x *ToArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type ToUpperArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToUpperArgs) Reset() {
*x = ToUpperArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToUpperArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToUpperArgs) ProtoMessage() {}
func (x *ToUpperArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToUpperArgs.ProtoReflect.Descriptor instead.
func (*ToUpperArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *ToUpperArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type ToLowerArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToLowerArgs) Reset() {
*x = ToLowerArgs{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToLowerArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToLowerArgs) ProtoMessage() {}
func (x *ToLowerArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToLowerArgs.ProtoReflect.Descriptor instead.
func (*ToLowerArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *ToLowerArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type ToTitleArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ToTitleArgs) Reset() {
*x = ToTitleArgs{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ToTitleArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ToTitleArgs) ProtoMessage() {}
func (x *ToTitleArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ToTitleArgs.ProtoReflect.Descriptor instead.
func (*ToTitleArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *ToTitleArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type SimpleFoldArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SimpleFoldArgs) Reset() {
*x = SimpleFoldArgs{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SimpleFoldArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SimpleFoldArgs) ProtoMessage() {}
func (x *SimpleFoldArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SimpleFoldArgs.ProtoReflect.Descriptor instead.
func (*SimpleFoldArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *SimpleFoldArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_IsDigit
// *NgoloFuzzOne_IsGraphic
// *NgoloFuzzOne_IsPrint
// *NgoloFuzzOne_IsControl
// *NgoloFuzzOne_IsLetter
// *NgoloFuzzOne_IsMark
// *NgoloFuzzOne_IsNumber
// *NgoloFuzzOne_IsPunct
// *NgoloFuzzOne_IsSpace
// *NgoloFuzzOne_IsSymbol
// *NgoloFuzzOne_IsUpper
// *NgoloFuzzOne_IsLower
// *NgoloFuzzOne_IsTitle
// *NgoloFuzzOne_To
// *NgoloFuzzOne_ToUpper
// *NgoloFuzzOne_ToLower
// *NgoloFuzzOne_ToTitle
// *NgoloFuzzOne_SimpleFold
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{18}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetIsDigit() *IsDigitArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsDigit); ok {
return x.IsDigit
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsGraphic() *IsGraphicArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsGraphic); ok {
return x.IsGraphic
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsPrint() *IsPrintArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsPrint); ok {
return x.IsPrint
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsControl() *IsControlArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsControl); ok {
return x.IsControl
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsLetter() *IsLetterArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsLetter); ok {
return x.IsLetter
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsMark() *IsMarkArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsMark); ok {
return x.IsMark
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsNumber() *IsNumberArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsNumber); ok {
return x.IsNumber
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsPunct() *IsPunctArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsPunct); ok {
return x.IsPunct
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsSpace() *IsSpaceArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsSpace); ok {
return x.IsSpace
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsSymbol() *IsSymbolArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsSymbol); ok {
return x.IsSymbol
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsUpper() *IsUpperArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsUpper); ok {
return x.IsUpper
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsLower() *IsLowerArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsLower); ok {
return x.IsLower
}
}
return nil
}
func (x *NgoloFuzzOne) GetIsTitle() *IsTitleArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsTitle); ok {
return x.IsTitle
}
}
return nil
}
func (x *NgoloFuzzOne) GetTo() *ToArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_To); ok {
return x.To
}
}
return nil
}
func (x *NgoloFuzzOne) GetToUpper() *ToUpperArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ToUpper); ok {
return x.ToUpper
}
}
return nil
}
func (x *NgoloFuzzOne) GetToLower() *ToLowerArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ToLower); ok {
return x.ToLower
}
}
return nil
}
func (x *NgoloFuzzOne) GetToTitle() *ToTitleArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ToTitle); ok {
return x.ToTitle
}
}
return nil
}
func (x *NgoloFuzzOne) GetSimpleFold() *SimpleFoldArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_SimpleFold); ok {
return x.SimpleFold
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_IsDigit struct {
IsDigit *IsDigitArgs `protobuf:"bytes,1,opt,name=IsDigit,proto3,oneof"`
}
type NgoloFuzzOne_IsGraphic struct {
IsGraphic *IsGraphicArgs `protobuf:"bytes,2,opt,name=IsGraphic,proto3,oneof"`
}
type NgoloFuzzOne_IsPrint struct {
IsPrint *IsPrintArgs `protobuf:"bytes,3,opt,name=IsPrint,proto3,oneof"`
}
type NgoloFuzzOne_IsControl struct {
IsControl *IsControlArgs `protobuf:"bytes,4,opt,name=IsControl,proto3,oneof"`
}
type NgoloFuzzOne_IsLetter struct {
IsLetter *IsLetterArgs `protobuf:"bytes,5,opt,name=IsLetter,proto3,oneof"`
}
type NgoloFuzzOne_IsMark struct {
IsMark *IsMarkArgs `protobuf:"bytes,6,opt,name=IsMark,proto3,oneof"`
}
type NgoloFuzzOne_IsNumber struct {
IsNumber *IsNumberArgs `protobuf:"bytes,7,opt,name=IsNumber,proto3,oneof"`
}
type NgoloFuzzOne_IsPunct struct {
IsPunct *IsPunctArgs `protobuf:"bytes,8,opt,name=IsPunct,proto3,oneof"`
}
type NgoloFuzzOne_IsSpace struct {
IsSpace *IsSpaceArgs `protobuf:"bytes,9,opt,name=IsSpace,proto3,oneof"`
}
type NgoloFuzzOne_IsSymbol struct {
IsSymbol *IsSymbolArgs `protobuf:"bytes,10,opt,name=IsSymbol,proto3,oneof"`
}
type NgoloFuzzOne_IsUpper struct {
IsUpper *IsUpperArgs `protobuf:"bytes,11,opt,name=IsUpper,proto3,oneof"`
}
type NgoloFuzzOne_IsLower struct {
IsLower *IsLowerArgs `protobuf:"bytes,12,opt,name=IsLower,proto3,oneof"`
}
type NgoloFuzzOne_IsTitle struct {
IsTitle *IsTitleArgs `protobuf:"bytes,13,opt,name=IsTitle,proto3,oneof"`
}
type NgoloFuzzOne_To struct {
To *ToArgs `protobuf:"bytes,14,opt,name=To,proto3,oneof"`
}
type NgoloFuzzOne_ToUpper struct {
ToUpper *ToUpperArgs `protobuf:"bytes,15,opt,name=ToUpper,proto3,oneof"`
}
type NgoloFuzzOne_ToLower struct {
ToLower *ToLowerArgs `protobuf:"bytes,16,opt,name=ToLower,proto3,oneof"`
}
type NgoloFuzzOne_ToTitle struct {
ToTitle *ToTitleArgs `protobuf:"bytes,17,opt,name=ToTitle,proto3,oneof"`
}
type NgoloFuzzOne_SimpleFold struct {
SimpleFold *SimpleFoldArgs `protobuf:"bytes,18,opt,name=SimpleFold,proto3,oneof"`
}
func (*NgoloFuzzOne_IsDigit) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsGraphic) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsPrint) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsControl) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsLetter) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsMark) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsNumber) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsPunct) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsSpace) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsSymbol) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsUpper) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsLower) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_IsTitle) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_To) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ToUpper) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ToLower) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ToTitle) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_SimpleFold) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{19}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{20}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1b\n" +
"\vIsDigitArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1d\n" +
"\rIsGraphicArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1b\n" +
"\vIsPrintArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1d\n" +
"\rIsControlArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1c\n" +
"\fIsLetterArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1a\n" +
"\n" +
"IsMarkArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1c\n" +
"\fIsNumberArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1b\n" +
"\vIsPunctArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1b\n" +
"\vIsSpaceArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1c\n" +
"\fIsSymbolArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1b\n" +
"\vIsUpperArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1b\n" +
"\vIsLowerArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1b\n" +
"\vIsTitleArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"+\n" +
"\x06ToArgs\x12\x13\n" +
"\x05_case\x18\x01 \x01(\x03R\x04Case\x12\f\n" +
"\x01r\x18\x02 \x01(\tR\x01r\"\x1b\n" +
"\vToUpperArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1b\n" +
"\vToLowerArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1b\n" +
"\vToTitleArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1e\n" +
"\x0eSimpleFoldArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\xca\a\n" +
"\fNgoloFuzzOne\x122\n" +
"\aIsDigit\x18\x01 \x01(\v2\x16.ngolofuzz.IsDigitArgsH\x00R\aIsDigit\x128\n" +
"\tIsGraphic\x18\x02 \x01(\v2\x18.ngolofuzz.IsGraphicArgsH\x00R\tIsGraphic\x122\n" +
"\aIsPrint\x18\x03 \x01(\v2\x16.ngolofuzz.IsPrintArgsH\x00R\aIsPrint\x128\n" +
"\tIsControl\x18\x04 \x01(\v2\x18.ngolofuzz.IsControlArgsH\x00R\tIsControl\x125\n" +
"\bIsLetter\x18\x05 \x01(\v2\x17.ngolofuzz.IsLetterArgsH\x00R\bIsLetter\x12/\n" +
"\x06IsMark\x18\x06 \x01(\v2\x15.ngolofuzz.IsMarkArgsH\x00R\x06IsMark\x125\n" +
"\bIsNumber\x18\a \x01(\v2\x17.ngolofuzz.IsNumberArgsH\x00R\bIsNumber\x122\n" +
"\aIsPunct\x18\b \x01(\v2\x16.ngolofuzz.IsPunctArgsH\x00R\aIsPunct\x122\n" +
"\aIsSpace\x18\t \x01(\v2\x16.ngolofuzz.IsSpaceArgsH\x00R\aIsSpace\x125\n" +
"\bIsSymbol\x18\n" +
" \x01(\v2\x17.ngolofuzz.IsSymbolArgsH\x00R\bIsSymbol\x122\n" +
"\aIsUpper\x18\v \x01(\v2\x16.ngolofuzz.IsUpperArgsH\x00R\aIsUpper\x122\n" +
"\aIsLower\x18\f \x01(\v2\x16.ngolofuzz.IsLowerArgsH\x00R\aIsLower\x122\n" +
"\aIsTitle\x18\r \x01(\v2\x16.ngolofuzz.IsTitleArgsH\x00R\aIsTitle\x12#\n" +
"\x02To\x18\x0e \x01(\v2\x11.ngolofuzz.ToArgsH\x00R\x02To\x122\n" +
"\aToUpper\x18\x0f \x01(\v2\x16.ngolofuzz.ToUpperArgsH\x00R\aToUpper\x122\n" +
"\aToLower\x18\x10 \x01(\v2\x16.ngolofuzz.ToLowerArgsH\x00R\aToLower\x122\n" +
"\aToTitle\x18\x11 \x01(\v2\x16.ngolofuzz.ToTitleArgsH\x00R\aToTitle\x12;\n" +
"\n" +
"SimpleFold\x18\x12 \x01(\v2\x19.ngolofuzz.SimpleFoldArgsH\x00R\n" +
"SimpleFoldB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x14Z\x12./;fuzz_ng_unicodeb\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
var file_ngolofuzz_proto_goTypes = []any{
(*IsDigitArgs)(nil), // 0: ngolofuzz.IsDigitArgs
(*IsGraphicArgs)(nil), // 1: ngolofuzz.IsGraphicArgs
(*IsPrintArgs)(nil), // 2: ngolofuzz.IsPrintArgs
(*IsControlArgs)(nil), // 3: ngolofuzz.IsControlArgs
(*IsLetterArgs)(nil), // 4: ngolofuzz.IsLetterArgs
(*IsMarkArgs)(nil), // 5: ngolofuzz.IsMarkArgs
(*IsNumberArgs)(nil), // 6: ngolofuzz.IsNumberArgs
(*IsPunctArgs)(nil), // 7: ngolofuzz.IsPunctArgs
(*IsSpaceArgs)(nil), // 8: ngolofuzz.IsSpaceArgs
(*IsSymbolArgs)(nil), // 9: ngolofuzz.IsSymbolArgs
(*IsUpperArgs)(nil), // 10: ngolofuzz.IsUpperArgs
(*IsLowerArgs)(nil), // 11: ngolofuzz.IsLowerArgs
(*IsTitleArgs)(nil), // 12: ngolofuzz.IsTitleArgs
(*ToArgs)(nil), // 13: ngolofuzz.ToArgs
(*ToUpperArgs)(nil), // 14: ngolofuzz.ToUpperArgs
(*ToLowerArgs)(nil), // 15: ngolofuzz.ToLowerArgs
(*ToTitleArgs)(nil), // 16: ngolofuzz.ToTitleArgs
(*SimpleFoldArgs)(nil), // 17: ngolofuzz.SimpleFoldArgs
(*NgoloFuzzOne)(nil), // 18: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 19: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 20: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.IsDigit:type_name -> ngolofuzz.IsDigitArgs
1, // 1: ngolofuzz.NgoloFuzzOne.IsGraphic:type_name -> ngolofuzz.IsGraphicArgs
2, // 2: ngolofuzz.NgoloFuzzOne.IsPrint:type_name -> ngolofuzz.IsPrintArgs
3, // 3: ngolofuzz.NgoloFuzzOne.IsControl:type_name -> ngolofuzz.IsControlArgs
4, // 4: ngolofuzz.NgoloFuzzOne.IsLetter:type_name -> ngolofuzz.IsLetterArgs
5, // 5: ngolofuzz.NgoloFuzzOne.IsMark:type_name -> ngolofuzz.IsMarkArgs
6, // 6: ngolofuzz.NgoloFuzzOne.IsNumber:type_name -> ngolofuzz.IsNumberArgs
7, // 7: ngolofuzz.NgoloFuzzOne.IsPunct:type_name -> ngolofuzz.IsPunctArgs
8, // 8: ngolofuzz.NgoloFuzzOne.IsSpace:type_name -> ngolofuzz.IsSpaceArgs
9, // 9: ngolofuzz.NgoloFuzzOne.IsSymbol:type_name -> ngolofuzz.IsSymbolArgs
10, // 10: ngolofuzz.NgoloFuzzOne.IsUpper:type_name -> ngolofuzz.IsUpperArgs
11, // 11: ngolofuzz.NgoloFuzzOne.IsLower:type_name -> ngolofuzz.IsLowerArgs
12, // 12: ngolofuzz.NgoloFuzzOne.IsTitle:type_name -> ngolofuzz.IsTitleArgs
13, // 13: ngolofuzz.NgoloFuzzOne.To:type_name -> ngolofuzz.ToArgs
14, // 14: ngolofuzz.NgoloFuzzOne.ToUpper:type_name -> ngolofuzz.ToUpperArgs
15, // 15: ngolofuzz.NgoloFuzzOne.ToLower:type_name -> ngolofuzz.ToLowerArgs
16, // 16: ngolofuzz.NgoloFuzzOne.ToTitle:type_name -> ngolofuzz.ToTitleArgs
17, // 17: ngolofuzz.NgoloFuzzOne.SimpleFold:type_name -> ngolofuzz.SimpleFoldArgs
18, // 18: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
19, // [19:19] is the sub-list for method output_type
19, // [19:19] is the sub-list for method input_type
19, // [19:19] is the sub-list for extension type_name
19, // [19:19] is the sub-list for extension extendee
0, // [0:19] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[18].OneofWrappers = []any{
(*NgoloFuzzOne_IsDigit)(nil),
(*NgoloFuzzOne_IsGraphic)(nil),
(*NgoloFuzzOne_IsPrint)(nil),
(*NgoloFuzzOne_IsControl)(nil),
(*NgoloFuzzOne_IsLetter)(nil),
(*NgoloFuzzOne_IsMark)(nil),
(*NgoloFuzzOne_IsNumber)(nil),
(*NgoloFuzzOne_IsPunct)(nil),
(*NgoloFuzzOne_IsSpace)(nil),
(*NgoloFuzzOne_IsSymbol)(nil),
(*NgoloFuzzOne_IsUpper)(nil),
(*NgoloFuzzOne_IsLower)(nil),
(*NgoloFuzzOne_IsTitle)(nil),
(*NgoloFuzzOne_To)(nil),
(*NgoloFuzzOne_ToUpper)(nil),
(*NgoloFuzzOne_ToLower)(nil),
(*NgoloFuzzOne_ToTitle)(nil),
(*NgoloFuzzOne_SimpleFold)(nil),
}
file_ngolofuzz_proto_msgTypes[19].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 21,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_unicode_utf16
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
"unicode/utf16"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_IsSurrogate:
arg0 := GetRune(a.IsSurrogate.R)
utf16.IsSurrogate(arg0)
case *NgoloFuzzOne_DecodeRune:
arg0 := GetRune(a.DecodeRune.R1)
arg1 := GetRune(a.DecodeRune.R2)
utf16.DecodeRune(arg0, arg1)
case *NgoloFuzzOne_EncodeRune:
arg0 := GetRune(a.EncodeRune.R)
utf16.EncodeRune(arg0)
case *NgoloFuzzOne_RuneLen:
arg0 := GetRune(a.RuneLen.R)
utf16.RuneLen(arg0)
case *NgoloFuzzOne_AppendRune:
arg0 := ConvertUint16Array(a.AppendRune.A)
arg1 := GetRune(a.AppendRune.R)
utf16.AppendRune(arg0, arg1)
case *NgoloFuzzOne_Decode:
arg0 := ConvertUint16Array(a.Decode.S)
utf16.Decode(arg0)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_IsSurrogate:
w.WriteString(fmt.Sprintf("utf16.IsSurrogate(GetRune(%#+v))\n", a.IsSurrogate.R))
case *NgoloFuzzOne_DecodeRune:
w.WriteString(fmt.Sprintf("utf16.DecodeRune(GetRune(%#+v), GetRune(%#+v))\n", a.DecodeRune.R1, a.DecodeRune.R2))
case *NgoloFuzzOne_EncodeRune:
w.WriteString(fmt.Sprintf("utf16.EncodeRune(GetRune(%#+v))\n", a.EncodeRune.R))
case *NgoloFuzzOne_RuneLen:
w.WriteString(fmt.Sprintf("utf16.RuneLen(GetRune(%#+v))\n", a.RuneLen.R))
case *NgoloFuzzOne_AppendRune:
w.WriteString(fmt.Sprintf("utf16.AppendRune(ConvertUint16Array(%#+v), GetRune(%#+v))\n", a.AppendRune.A, a.AppendRune.R))
case *NgoloFuzzOne_Decode:
w.WriteString(fmt.Sprintf("utf16.Decode(ConvertUint16Array(%#+v))\n", a.Decode.S))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_unicode_utf16
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type IsSurrogateArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *IsSurrogateArgs) Reset() {
*x = IsSurrogateArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *IsSurrogateArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IsSurrogateArgs) ProtoMessage() {}
func (x *IsSurrogateArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IsSurrogateArgs.ProtoReflect.Descriptor instead.
func (*IsSurrogateArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *IsSurrogateArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type DecodeRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R1 string `protobuf:"bytes,1,opt,name=r1,proto3" json:"r1,omitempty"`
R2 string `protobuf:"bytes,2,opt,name=r2,proto3" json:"r2,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeRuneArgs) Reset() {
*x = DecodeRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeRuneArgs) ProtoMessage() {}
func (x *DecodeRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeRuneArgs.ProtoReflect.Descriptor instead.
func (*DecodeRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *DecodeRuneArgs) GetR1() string {
if x != nil {
return x.R1
}
return ""
}
func (x *DecodeRuneArgs) GetR2() string {
if x != nil {
return x.R2
}
return ""
}
type EncodeRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodeRuneArgs) Reset() {
*x = EncodeRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodeRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodeRuneArgs) ProtoMessage() {}
func (x *EncodeRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodeRuneArgs.ProtoReflect.Descriptor instead.
func (*EncodeRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *EncodeRuneArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type RuneLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RuneLenArgs) Reset() {
*x = RuneLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RuneLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RuneLenArgs) ProtoMessage() {}
func (x *RuneLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RuneLenArgs.ProtoReflect.Descriptor instead.
func (*RuneLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *RuneLenArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type AppendRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
A []int64 `protobuf:"varint,1,rep,packed,name=a,proto3" json:"a,omitempty"`
R string `protobuf:"bytes,2,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendRuneArgs) Reset() {
*x = AppendRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendRuneArgs) ProtoMessage() {}
func (x *AppendRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendRuneArgs.ProtoReflect.Descriptor instead.
func (*AppendRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *AppendRuneArgs) GetA() []int64 {
if x != nil {
return x.A
}
return nil
}
func (x *AppendRuneArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type DecodeArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S []int64 `protobuf:"varint,1,rep,packed,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeArgs) Reset() {
*x = DecodeArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeArgs) ProtoMessage() {}
func (x *DecodeArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeArgs.ProtoReflect.Descriptor instead.
func (*DecodeArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *DecodeArgs) GetS() []int64 {
if x != nil {
return x.S
}
return nil
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_IsSurrogate
// *NgoloFuzzOne_DecodeRune
// *NgoloFuzzOne_EncodeRune
// *NgoloFuzzOne_RuneLen
// *NgoloFuzzOne_AppendRune
// *NgoloFuzzOne_Decode
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetIsSurrogate() *IsSurrogateArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_IsSurrogate); ok {
return x.IsSurrogate
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecodeRune() *DecodeRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecodeRune); ok {
return x.DecodeRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodeRune() *EncodeRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodeRune); ok {
return x.EncodeRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetRuneLen() *RuneLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RuneLen); ok {
return x.RuneLen
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendRune() *AppendRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendRune); ok {
return x.AppendRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecode() *DecodeArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Decode); ok {
return x.Decode
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_IsSurrogate struct {
IsSurrogate *IsSurrogateArgs `protobuf:"bytes,1,opt,name=IsSurrogate,proto3,oneof"`
}
type NgoloFuzzOne_DecodeRune struct {
DecodeRune *DecodeRuneArgs `protobuf:"bytes,2,opt,name=DecodeRune,proto3,oneof"`
}
type NgoloFuzzOne_EncodeRune struct {
EncodeRune *EncodeRuneArgs `protobuf:"bytes,3,opt,name=EncodeRune,proto3,oneof"`
}
type NgoloFuzzOne_RuneLen struct {
RuneLen *RuneLenArgs `protobuf:"bytes,4,opt,name=RuneLen,proto3,oneof"`
}
type NgoloFuzzOne_AppendRune struct {
AppendRune *AppendRuneArgs `protobuf:"bytes,5,opt,name=AppendRune,proto3,oneof"`
}
type NgoloFuzzOne_Decode struct {
Decode *DecodeArgs `protobuf:"bytes,6,opt,name=Decode,proto3,oneof"`
}
func (*NgoloFuzzOne_IsSurrogate) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecodeRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodeRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RuneLen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Decode) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1f\n" +
"\x0fIsSurrogateArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"0\n" +
"\x0eDecodeRuneArgs\x12\x0e\n" +
"\x02r1\x18\x01 \x01(\tR\x02r1\x12\x0e\n" +
"\x02r2\x18\x02 \x01(\tR\x02r2\"\x1e\n" +
"\x0eEncodeRuneArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\x1b\n" +
"\vRuneLenArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\",\n" +
"\x0eAppendRuneArgs\x12\f\n" +
"\x01a\x18\x01 \x03(\x03R\x01a\x12\f\n" +
"\x01r\x18\x02 \x01(\tR\x01r\"\x1a\n" +
"\n" +
"DecodeArgs\x12\f\n" +
"\x01s\x18\x01 \x03(\x03R\x01s\"\xf2\x02\n" +
"\fNgoloFuzzOne\x12>\n" +
"\vIsSurrogate\x18\x01 \x01(\v2\x1a.ngolofuzz.IsSurrogateArgsH\x00R\vIsSurrogate\x12;\n" +
"\n" +
"DecodeRune\x18\x02 \x01(\v2\x19.ngolofuzz.DecodeRuneArgsH\x00R\n" +
"DecodeRune\x12;\n" +
"\n" +
"EncodeRune\x18\x03 \x01(\v2\x19.ngolofuzz.EncodeRuneArgsH\x00R\n" +
"EncodeRune\x122\n" +
"\aRuneLen\x18\x04 \x01(\v2\x16.ngolofuzz.RuneLenArgsH\x00R\aRuneLen\x12;\n" +
"\n" +
"AppendRune\x18\x05 \x01(\v2\x19.ngolofuzz.AppendRuneArgsH\x00R\n" +
"AppendRune\x12/\n" +
"\x06Decode\x18\x06 \x01(\v2\x15.ngolofuzz.DecodeArgsH\x00R\x06DecodeB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x1aZ\x18./;fuzz_ng_unicode_utf16b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
var file_ngolofuzz_proto_goTypes = []any{
(*IsSurrogateArgs)(nil), // 0: ngolofuzz.IsSurrogateArgs
(*DecodeRuneArgs)(nil), // 1: ngolofuzz.DecodeRuneArgs
(*EncodeRuneArgs)(nil), // 2: ngolofuzz.EncodeRuneArgs
(*RuneLenArgs)(nil), // 3: ngolofuzz.RuneLenArgs
(*AppendRuneArgs)(nil), // 4: ngolofuzz.AppendRuneArgs
(*DecodeArgs)(nil), // 5: ngolofuzz.DecodeArgs
(*NgoloFuzzOne)(nil), // 6: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 7: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 8: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.IsSurrogate:type_name -> ngolofuzz.IsSurrogateArgs
1, // 1: ngolofuzz.NgoloFuzzOne.DecodeRune:type_name -> ngolofuzz.DecodeRuneArgs
2, // 2: ngolofuzz.NgoloFuzzOne.EncodeRune:type_name -> ngolofuzz.EncodeRuneArgs
3, // 3: ngolofuzz.NgoloFuzzOne.RuneLen:type_name -> ngolofuzz.RuneLenArgs
4, // 4: ngolofuzz.NgoloFuzzOne.AppendRune:type_name -> ngolofuzz.AppendRuneArgs
5, // 5: ngolofuzz.NgoloFuzzOne.Decode:type_name -> ngolofuzz.DecodeArgs
6, // 6: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
7, // [7:7] is the sub-list for method output_type
7, // [7:7] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[6].OneofWrappers = []any{
(*NgoloFuzzOne_IsSurrogate)(nil),
(*NgoloFuzzOne_DecodeRune)(nil),
(*NgoloFuzzOne_EncodeRune)(nil),
(*NgoloFuzzOne_RuneLen)(nil),
(*NgoloFuzzOne_AppendRune)(nil),
(*NgoloFuzzOne_Decode)(nil),
}
file_ngolofuzz_proto_msgTypes[7].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}
//go:build gofuzz
package fuzz_ng_unicode_utf8
import (
"google.golang.org/protobuf/proto"
"bufio"
"bytes"
"fmt"
"io"
"log"
"math/big"
"net"
"os"
"runtime"
"time"
"unicode/utf8"
)
type FuzzingConn struct {
buf []byte
offset int
}
func (c *FuzzingConn) Read(b []byte) (n int, err error) {
if c.offset >= len(c.buf) {
return 0, io.EOF
}
if len(b) < len(c.buf)+c.offset {
copy(b, c.buf[c.offset:])
c.offset += len(b)
return len(b), nil
}
copy(b, c.buf[c.offset:])
r := len(c.buf) - c.offset
c.offset = len(c.buf)
return r, nil
}
func (c *FuzzingConn) Write(b []byte) (n int, err error) {
return len(b), nil
}
func (c *FuzzingConn) Close() error {
c.offset = len(c.buf)
return nil
}
type FuzzingAddr struct{}
func (c *FuzzingAddr) Network() string {
return "fuzz_addr_net"
}
func (c *FuzzingAddr) String() string {
return "fuzz_addr_string"
}
func (c *FuzzingConn) LocalAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) RemoteAddr() net.Addr {
return &FuzzingAddr{}
}
func (c *FuzzingConn) SetDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *FuzzingConn) SetWriteDeadline(t time.Time) error {
return nil
}
func CreateFuzzingConn(a []byte) *FuzzingConn {
r := &FuzzingConn{}
r.buf = a
return r
}
//TODO only add these functions if needed
func CreateBigInt(a []byte) *big.Int {
r := new(big.Int)
r.SetBytes(a)
return r
}
func CreateBufioReader(a []byte) *bufio.Reader {
return bufio.NewReader(bytes.NewBuffer(a))
}
func ConvertIntArray(a []int64) []int {
r := make([]int, len(a))
for i := range a {
r[i] = int(a[i])
}
return r
}
func ConvertUint16Array(a []int64) []uint16 {
r := make([]uint16, len(a))
for i := range a {
r[i] = uint16(a[i])
}
return r
}
func GetRune(s string) rune {
for _, c := range s {
return c
}
return '\x00'
}
func FuzzNG_valid(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
panic("Failed to unmarshal LPM generated variables")
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
// we are unsure the input is a valid protobuf
func FuzzNG_unsure(data []byte) int {
gen := &NgoloFuzzList{}
err := proto.Unmarshal(data, gen)
if err != nil {
return 0
}
defer func() {
if r := recover(); r != nil {
switch r.(type) {
case string:
//do nothing
default:
panic(r)
}
}
}()
runtime.GC()
return FuzzNG_List(gen)
}
var initialized bool
func FuzzNG_List(gen *NgoloFuzzList) int {
if !initialized {
repro := os.Getenv("FUZZ_NG_REPRODUCER")
if len(repro) > 0 {
f, err := os.Create(repro)
if err != nil {
log.Fatalf("Failed to open %s : %s", repro, err)
} else {
PrintNG_List(gen, f)
}
}
initialized = true
}
for l := range gen.List {
if l > 4096 {
return 0
}
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_FullRune:
utf8.FullRune(a.FullRune.P)
case *NgoloFuzzOne_FullRuneInString:
utf8.FullRuneInString(a.FullRuneInString.S)
case *NgoloFuzzOne_DecodeRune:
utf8.DecodeRune(a.DecodeRune.P)
case *NgoloFuzzOne_DecodeRuneInString:
utf8.DecodeRuneInString(a.DecodeRuneInString.S)
case *NgoloFuzzOne_DecodeLastRune:
utf8.DecodeLastRune(a.DecodeLastRune.P)
case *NgoloFuzzOne_DecodeLastRuneInString:
utf8.DecodeLastRuneInString(a.DecodeLastRuneInString.S)
case *NgoloFuzzOne_RuneLen:
arg0 := GetRune(a.RuneLen.R)
utf8.RuneLen(arg0)
case *NgoloFuzzOne_EncodeRune:
arg1 := GetRune(a.EncodeRune.R)
utf8.EncodeRune(a.EncodeRune.P, arg1)
case *NgoloFuzzOne_AppendRune:
arg1 := GetRune(a.AppendRune.R)
utf8.AppendRune(a.AppendRune.P, arg1)
case *NgoloFuzzOne_RuneCount:
utf8.RuneCount(a.RuneCount.P)
case *NgoloFuzzOne_RuneCountInString:
utf8.RuneCountInString(a.RuneCountInString.S)
case *NgoloFuzzOne_RuneStart:
arg0 := byte(a.RuneStart.B)
utf8.RuneStart(arg0)
case *NgoloFuzzOne_Valid:
utf8.Valid(a.Valid.P)
case *NgoloFuzzOne_ValidString:
utf8.ValidString(a.ValidString.S)
case *NgoloFuzzOne_ValidRune:
arg0 := GetRune(a.ValidRune.R)
utf8.ValidRune(arg0)
}
}
return 1
}
func PrintNG_List(gen *NgoloFuzzList, w io.StringWriter) {
for l := range gen.List {
switch a := gen.List[l].Item.(type) {
case *NgoloFuzzOne_FullRune:
w.WriteString(fmt.Sprintf("utf8.FullRune(%#+v)\n", a.FullRune.P))
case *NgoloFuzzOne_FullRuneInString:
w.WriteString(fmt.Sprintf("utf8.FullRuneInString(%#+v)\n", a.FullRuneInString.S))
case *NgoloFuzzOne_DecodeRune:
w.WriteString(fmt.Sprintf("utf8.DecodeRune(%#+v)\n", a.DecodeRune.P))
case *NgoloFuzzOne_DecodeRuneInString:
w.WriteString(fmt.Sprintf("utf8.DecodeRuneInString(%#+v)\n", a.DecodeRuneInString.S))
case *NgoloFuzzOne_DecodeLastRune:
w.WriteString(fmt.Sprintf("utf8.DecodeLastRune(%#+v)\n", a.DecodeLastRune.P))
case *NgoloFuzzOne_DecodeLastRuneInString:
w.WriteString(fmt.Sprintf("utf8.DecodeLastRuneInString(%#+v)\n", a.DecodeLastRuneInString.S))
case *NgoloFuzzOne_RuneLen:
w.WriteString(fmt.Sprintf("utf8.RuneLen(GetRune(%#+v))\n", a.RuneLen.R))
case *NgoloFuzzOne_EncodeRune:
w.WriteString(fmt.Sprintf("utf8.EncodeRune(%#+v, GetRune(%#+v))\n", a.EncodeRune.P, a.EncodeRune.R))
case *NgoloFuzzOne_AppendRune:
w.WriteString(fmt.Sprintf("utf8.AppendRune(%#+v, GetRune(%#+v))\n", a.AppendRune.P, a.AppendRune.R))
case *NgoloFuzzOne_RuneCount:
w.WriteString(fmt.Sprintf("utf8.RuneCount(%#+v)\n", a.RuneCount.P))
case *NgoloFuzzOne_RuneCountInString:
w.WriteString(fmt.Sprintf("utf8.RuneCountInString(%#+v)\n", a.RuneCountInString.S))
case *NgoloFuzzOne_RuneStart:
w.WriteString(fmt.Sprintf("utf8.RuneStart(byte(%#+v))\n", a.RuneStart.B))
case *NgoloFuzzOne_Valid:
w.WriteString(fmt.Sprintf("utf8.Valid(%#+v)\n", a.Valid.P))
case *NgoloFuzzOne_ValidString:
w.WriteString(fmt.Sprintf("utf8.ValidString(%#+v)\n", a.ValidString.S))
case *NgoloFuzzOne_ValidRune:
w.WriteString(fmt.Sprintf("utf8.ValidRune(GetRune(%#+v))\n", a.ValidRune.R))
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v5.29.3
// source: ngolofuzz.proto
package fuzz_ng_unicode_utf8
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type FullRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FullRuneArgs) Reset() {
*x = FullRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FullRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FullRuneArgs) ProtoMessage() {}
func (x *FullRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FullRuneArgs.ProtoReflect.Descriptor instead.
func (*FullRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{0}
}
func (x *FullRuneArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type FullRuneInStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *FullRuneInStringArgs) Reset() {
*x = FullRuneInStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *FullRuneInStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FullRuneInStringArgs) ProtoMessage() {}
func (x *FullRuneInStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use FullRuneInStringArgs.ProtoReflect.Descriptor instead.
func (*FullRuneInStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{1}
}
func (x *FullRuneInStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type DecodeRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeRuneArgs) Reset() {
*x = DecodeRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeRuneArgs) ProtoMessage() {}
func (x *DecodeRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeRuneArgs.ProtoReflect.Descriptor instead.
func (*DecodeRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{2}
}
func (x *DecodeRuneArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type DecodeRuneInStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeRuneInStringArgs) Reset() {
*x = DecodeRuneInStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeRuneInStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeRuneInStringArgs) ProtoMessage() {}
func (x *DecodeRuneInStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeRuneInStringArgs.ProtoReflect.Descriptor instead.
func (*DecodeRuneInStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{3}
}
func (x *DecodeRuneInStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type DecodeLastRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeLastRuneArgs) Reset() {
*x = DecodeLastRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeLastRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeLastRuneArgs) ProtoMessage() {}
func (x *DecodeLastRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeLastRuneArgs.ProtoReflect.Descriptor instead.
func (*DecodeLastRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{4}
}
func (x *DecodeLastRuneArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type DecodeLastRuneInStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DecodeLastRuneInStringArgs) Reset() {
*x = DecodeLastRuneInStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DecodeLastRuneInStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DecodeLastRuneInStringArgs) ProtoMessage() {}
func (x *DecodeLastRuneInStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DecodeLastRuneInStringArgs.ProtoReflect.Descriptor instead.
func (*DecodeLastRuneInStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{5}
}
func (x *DecodeLastRuneInStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type RuneLenArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RuneLenArgs) Reset() {
*x = RuneLenArgs{}
mi := &file_ngolofuzz_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RuneLenArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RuneLenArgs) ProtoMessage() {}
func (x *RuneLenArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RuneLenArgs.ProtoReflect.Descriptor instead.
func (*RuneLenArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{6}
}
func (x *RuneLenArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type EncodeRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
R string `protobuf:"bytes,2,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *EncodeRuneArgs) Reset() {
*x = EncodeRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EncodeRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EncodeRuneArgs) ProtoMessage() {}
func (x *EncodeRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EncodeRuneArgs.ProtoReflect.Descriptor instead.
func (*EncodeRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{7}
}
func (x *EncodeRuneArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
func (x *EncodeRuneArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type AppendRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
R string `protobuf:"bytes,2,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *AppendRuneArgs) Reset() {
*x = AppendRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *AppendRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AppendRuneArgs) ProtoMessage() {}
func (x *AppendRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AppendRuneArgs.ProtoReflect.Descriptor instead.
func (*AppendRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{8}
}
func (x *AppendRuneArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
func (x *AppendRuneArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type RuneCountArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RuneCountArgs) Reset() {
*x = RuneCountArgs{}
mi := &file_ngolofuzz_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RuneCountArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RuneCountArgs) ProtoMessage() {}
func (x *RuneCountArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RuneCountArgs.ProtoReflect.Descriptor instead.
func (*RuneCountArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{9}
}
func (x *RuneCountArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type RuneCountInStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RuneCountInStringArgs) Reset() {
*x = RuneCountInStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RuneCountInStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RuneCountInStringArgs) ProtoMessage() {}
func (x *RuneCountInStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RuneCountInStringArgs.ProtoReflect.Descriptor instead.
func (*RuneCountInStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{10}
}
func (x *RuneCountInStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type RuneStartArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
B uint32 `protobuf:"varint,1,opt,name=b,proto3" json:"b,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *RuneStartArgs) Reset() {
*x = RuneStartArgs{}
mi := &file_ngolofuzz_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *RuneStartArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RuneStartArgs) ProtoMessage() {}
func (x *RuneStartArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[11]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RuneStartArgs.ProtoReflect.Descriptor instead.
func (*RuneStartArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{11}
}
func (x *RuneStartArgs) GetB() uint32 {
if x != nil {
return x.B
}
return 0
}
type ValidArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
P []byte `protobuf:"bytes,1,opt,name=p,proto3" json:"p,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValidArgs) Reset() {
*x = ValidArgs{}
mi := &file_ngolofuzz_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValidArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValidArgs) ProtoMessage() {}
func (x *ValidArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[12]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValidArgs.ProtoReflect.Descriptor instead.
func (*ValidArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{12}
}
func (x *ValidArgs) GetP() []byte {
if x != nil {
return x.P
}
return nil
}
type ValidStringArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
S string `protobuf:"bytes,1,opt,name=s,proto3" json:"s,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValidStringArgs) Reset() {
*x = ValidStringArgs{}
mi := &file_ngolofuzz_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValidStringArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValidStringArgs) ProtoMessage() {}
func (x *ValidStringArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[13]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValidStringArgs.ProtoReflect.Descriptor instead.
func (*ValidStringArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{13}
}
func (x *ValidStringArgs) GetS() string {
if x != nil {
return x.S
}
return ""
}
type ValidRuneArgs struct {
state protoimpl.MessageState `protogen:"open.v1"`
R string `protobuf:"bytes,1,opt,name=r,proto3" json:"r,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ValidRuneArgs) Reset() {
*x = ValidRuneArgs{}
mi := &file_ngolofuzz_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ValidRuneArgs) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ValidRuneArgs) ProtoMessage() {}
func (x *ValidRuneArgs) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[14]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ValidRuneArgs.ProtoReflect.Descriptor instead.
func (*ValidRuneArgs) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{14}
}
func (x *ValidRuneArgs) GetR() string {
if x != nil {
return x.R
}
return ""
}
type NgoloFuzzOne struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzOne_FullRune
// *NgoloFuzzOne_FullRuneInString
// *NgoloFuzzOne_DecodeRune
// *NgoloFuzzOne_DecodeRuneInString
// *NgoloFuzzOne_DecodeLastRune
// *NgoloFuzzOne_DecodeLastRuneInString
// *NgoloFuzzOne_RuneLen
// *NgoloFuzzOne_EncodeRune
// *NgoloFuzzOne_AppendRune
// *NgoloFuzzOne_RuneCount
// *NgoloFuzzOne_RuneCountInString
// *NgoloFuzzOne_RuneStart
// *NgoloFuzzOne_Valid
// *NgoloFuzzOne_ValidString
// *NgoloFuzzOne_ValidRune
Item isNgoloFuzzOne_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzOne) Reset() {
*x = NgoloFuzzOne{}
mi := &file_ngolofuzz_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzOne) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzOne) ProtoMessage() {}
func (x *NgoloFuzzOne) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[15]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzOne.ProtoReflect.Descriptor instead.
func (*NgoloFuzzOne) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{15}
}
func (x *NgoloFuzzOne) GetItem() isNgoloFuzzOne_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzOne) GetFullRune() *FullRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FullRune); ok {
return x.FullRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetFullRuneInString() *FullRuneInStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_FullRuneInString); ok {
return x.FullRuneInString
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecodeRune() *DecodeRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecodeRune); ok {
return x.DecodeRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecodeRuneInString() *DecodeRuneInStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecodeRuneInString); ok {
return x.DecodeRuneInString
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecodeLastRune() *DecodeLastRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecodeLastRune); ok {
return x.DecodeLastRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetDecodeLastRuneInString() *DecodeLastRuneInStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_DecodeLastRuneInString); ok {
return x.DecodeLastRuneInString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRuneLen() *RuneLenArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RuneLen); ok {
return x.RuneLen
}
}
return nil
}
func (x *NgoloFuzzOne) GetEncodeRune() *EncodeRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_EncodeRune); ok {
return x.EncodeRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetAppendRune() *AppendRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_AppendRune); ok {
return x.AppendRune
}
}
return nil
}
func (x *NgoloFuzzOne) GetRuneCount() *RuneCountArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RuneCount); ok {
return x.RuneCount
}
}
return nil
}
func (x *NgoloFuzzOne) GetRuneCountInString() *RuneCountInStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RuneCountInString); ok {
return x.RuneCountInString
}
}
return nil
}
func (x *NgoloFuzzOne) GetRuneStart() *RuneStartArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_RuneStart); ok {
return x.RuneStart
}
}
return nil
}
func (x *NgoloFuzzOne) GetValid() *ValidArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_Valid); ok {
return x.Valid
}
}
return nil
}
func (x *NgoloFuzzOne) GetValidString() *ValidStringArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValidString); ok {
return x.ValidString
}
}
return nil
}
func (x *NgoloFuzzOne) GetValidRune() *ValidRuneArgs {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzOne_ValidRune); ok {
return x.ValidRune
}
}
return nil
}
type isNgoloFuzzOne_Item interface {
isNgoloFuzzOne_Item()
}
type NgoloFuzzOne_FullRune struct {
FullRune *FullRuneArgs `protobuf:"bytes,1,opt,name=FullRune,proto3,oneof"`
}
type NgoloFuzzOne_FullRuneInString struct {
FullRuneInString *FullRuneInStringArgs `protobuf:"bytes,2,opt,name=FullRuneInString,proto3,oneof"`
}
type NgoloFuzzOne_DecodeRune struct {
DecodeRune *DecodeRuneArgs `protobuf:"bytes,3,opt,name=DecodeRune,proto3,oneof"`
}
type NgoloFuzzOne_DecodeRuneInString struct {
DecodeRuneInString *DecodeRuneInStringArgs `protobuf:"bytes,4,opt,name=DecodeRuneInString,proto3,oneof"`
}
type NgoloFuzzOne_DecodeLastRune struct {
DecodeLastRune *DecodeLastRuneArgs `protobuf:"bytes,5,opt,name=DecodeLastRune,proto3,oneof"`
}
type NgoloFuzzOne_DecodeLastRuneInString struct {
DecodeLastRuneInString *DecodeLastRuneInStringArgs `protobuf:"bytes,6,opt,name=DecodeLastRuneInString,proto3,oneof"`
}
type NgoloFuzzOne_RuneLen struct {
RuneLen *RuneLenArgs `protobuf:"bytes,7,opt,name=RuneLen,proto3,oneof"`
}
type NgoloFuzzOne_EncodeRune struct {
EncodeRune *EncodeRuneArgs `protobuf:"bytes,8,opt,name=EncodeRune,proto3,oneof"`
}
type NgoloFuzzOne_AppendRune struct {
AppendRune *AppendRuneArgs `protobuf:"bytes,9,opt,name=AppendRune,proto3,oneof"`
}
type NgoloFuzzOne_RuneCount struct {
RuneCount *RuneCountArgs `protobuf:"bytes,10,opt,name=RuneCount,proto3,oneof"`
}
type NgoloFuzzOne_RuneCountInString struct {
RuneCountInString *RuneCountInStringArgs `protobuf:"bytes,11,opt,name=RuneCountInString,proto3,oneof"`
}
type NgoloFuzzOne_RuneStart struct {
RuneStart *RuneStartArgs `protobuf:"bytes,12,opt,name=RuneStart,proto3,oneof"`
}
type NgoloFuzzOne_Valid struct {
Valid *ValidArgs `protobuf:"bytes,13,opt,name=Valid,proto3,oneof"`
}
type NgoloFuzzOne_ValidString struct {
ValidString *ValidStringArgs `protobuf:"bytes,14,opt,name=ValidString,proto3,oneof"`
}
type NgoloFuzzOne_ValidRune struct {
ValidRune *ValidRuneArgs `protobuf:"bytes,15,opt,name=ValidRune,proto3,oneof"`
}
func (*NgoloFuzzOne_FullRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_FullRuneInString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecodeRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecodeRuneInString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecodeLastRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_DecodeLastRuneInString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RuneLen) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_EncodeRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_AppendRune) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RuneCount) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RuneCountInString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_RuneStart) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_Valid) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValidString) isNgoloFuzzOne_Item() {}
func (*NgoloFuzzOne_ValidRune) isNgoloFuzzOne_Item() {}
type NgoloFuzzAny struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Item:
//
// *NgoloFuzzAny_DoubleArgs
// *NgoloFuzzAny_Int64Args
// *NgoloFuzzAny_BoolArgs
// *NgoloFuzzAny_StringArgs
// *NgoloFuzzAny_BytesArgs
Item isNgoloFuzzAny_Item `protobuf_oneof:"item"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzAny) Reset() {
*x = NgoloFuzzAny{}
mi := &file_ngolofuzz_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzAny) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzAny) ProtoMessage() {}
func (x *NgoloFuzzAny) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[16]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzAny.ProtoReflect.Descriptor instead.
func (*NgoloFuzzAny) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{16}
}
func (x *NgoloFuzzAny) GetItem() isNgoloFuzzAny_Item {
if x != nil {
return x.Item
}
return nil
}
func (x *NgoloFuzzAny) GetDoubleArgs() float64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_DoubleArgs); ok {
return x.DoubleArgs
}
}
return 0
}
func (x *NgoloFuzzAny) GetInt64Args() int64 {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_Int64Args); ok {
return x.Int64Args
}
}
return 0
}
func (x *NgoloFuzzAny) GetBoolArgs() bool {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BoolArgs); ok {
return x.BoolArgs
}
}
return false
}
func (x *NgoloFuzzAny) GetStringArgs() string {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_StringArgs); ok {
return x.StringArgs
}
}
return ""
}
func (x *NgoloFuzzAny) GetBytesArgs() []byte {
if x != nil {
if x, ok := x.Item.(*NgoloFuzzAny_BytesArgs); ok {
return x.BytesArgs
}
}
return nil
}
type isNgoloFuzzAny_Item interface {
isNgoloFuzzAny_Item()
}
type NgoloFuzzAny_DoubleArgs struct {
DoubleArgs float64 `protobuf:"fixed64,1,opt,name=DoubleArgs,proto3,oneof"`
}
type NgoloFuzzAny_Int64Args struct {
Int64Args int64 `protobuf:"varint,2,opt,name=Int64Args,proto3,oneof"`
}
type NgoloFuzzAny_BoolArgs struct {
BoolArgs bool `protobuf:"varint,3,opt,name=BoolArgs,proto3,oneof"`
}
type NgoloFuzzAny_StringArgs struct {
StringArgs string `protobuf:"bytes,4,opt,name=StringArgs,proto3,oneof"`
}
type NgoloFuzzAny_BytesArgs struct {
BytesArgs []byte `protobuf:"bytes,5,opt,name=BytesArgs,proto3,oneof"`
}
func (*NgoloFuzzAny_DoubleArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_Int64Args) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BoolArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_StringArgs) isNgoloFuzzAny_Item() {}
func (*NgoloFuzzAny_BytesArgs) isNgoloFuzzAny_Item() {}
type NgoloFuzzList struct {
state protoimpl.MessageState `protogen:"open.v1"`
List []*NgoloFuzzOne `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NgoloFuzzList) Reset() {
*x = NgoloFuzzList{}
mi := &file_ngolofuzz_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NgoloFuzzList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NgoloFuzzList) ProtoMessage() {}
func (x *NgoloFuzzList) ProtoReflect() protoreflect.Message {
mi := &file_ngolofuzz_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NgoloFuzzList.ProtoReflect.Descriptor instead.
func (*NgoloFuzzList) Descriptor() ([]byte, []int) {
return file_ngolofuzz_proto_rawDescGZIP(), []int{17}
}
func (x *NgoloFuzzList) GetList() []*NgoloFuzzOne {
if x != nil {
return x.List
}
return nil
}
var File_ngolofuzz_proto protoreflect.FileDescriptor
const file_ngolofuzz_proto_rawDesc = "" +
"\n" +
"\x0fngolofuzz.proto\x12\tngolofuzz\"\x1c\n" +
"\fFullRuneArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"$\n" +
"\x14FullRuneInStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1e\n" +
"\x0eDecodeRuneArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"&\n" +
"\x16DecodeRuneInStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\"\n" +
"\x12DecodeLastRuneArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"*\n" +
"\x1aDecodeLastRuneInStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1b\n" +
"\vRuneLenArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\",\n" +
"\x0eEncodeRuneArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\x12\f\n" +
"\x01r\x18\x02 \x01(\tR\x01r\",\n" +
"\x0eAppendRuneArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\x12\f\n" +
"\x01r\x18\x02 \x01(\tR\x01r\"\x1d\n" +
"\rRuneCountArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"%\n" +
"\x15RuneCountInStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1d\n" +
"\rRuneStartArgs\x12\f\n" +
"\x01b\x18\x01 \x01(\rR\x01b\"\x19\n" +
"\tValidArgs\x12\f\n" +
"\x01p\x18\x01 \x01(\fR\x01p\"\x1f\n" +
"\x0fValidStringArgs\x12\f\n" +
"\x01s\x18\x01 \x01(\tR\x01s\"\x1d\n" +
"\rValidRuneArgs\x12\f\n" +
"\x01r\x18\x01 \x01(\tR\x01r\"\xf4\a\n" +
"\fNgoloFuzzOne\x125\n" +
"\bFullRune\x18\x01 \x01(\v2\x17.ngolofuzz.FullRuneArgsH\x00R\bFullRune\x12M\n" +
"\x10FullRuneInString\x18\x02 \x01(\v2\x1f.ngolofuzz.FullRuneInStringArgsH\x00R\x10FullRuneInString\x12;\n" +
"\n" +
"DecodeRune\x18\x03 \x01(\v2\x19.ngolofuzz.DecodeRuneArgsH\x00R\n" +
"DecodeRune\x12S\n" +
"\x12DecodeRuneInString\x18\x04 \x01(\v2!.ngolofuzz.DecodeRuneInStringArgsH\x00R\x12DecodeRuneInString\x12G\n" +
"\x0eDecodeLastRune\x18\x05 \x01(\v2\x1d.ngolofuzz.DecodeLastRuneArgsH\x00R\x0eDecodeLastRune\x12_\n" +
"\x16DecodeLastRuneInString\x18\x06 \x01(\v2%.ngolofuzz.DecodeLastRuneInStringArgsH\x00R\x16DecodeLastRuneInString\x122\n" +
"\aRuneLen\x18\a \x01(\v2\x16.ngolofuzz.RuneLenArgsH\x00R\aRuneLen\x12;\n" +
"\n" +
"EncodeRune\x18\b \x01(\v2\x19.ngolofuzz.EncodeRuneArgsH\x00R\n" +
"EncodeRune\x12;\n" +
"\n" +
"AppendRune\x18\t \x01(\v2\x19.ngolofuzz.AppendRuneArgsH\x00R\n" +
"AppendRune\x128\n" +
"\tRuneCount\x18\n" +
" \x01(\v2\x18.ngolofuzz.RuneCountArgsH\x00R\tRuneCount\x12P\n" +
"\x11RuneCountInString\x18\v \x01(\v2 .ngolofuzz.RuneCountInStringArgsH\x00R\x11RuneCountInString\x128\n" +
"\tRuneStart\x18\f \x01(\v2\x18.ngolofuzz.RuneStartArgsH\x00R\tRuneStart\x12,\n" +
"\x05Valid\x18\r \x01(\v2\x14.ngolofuzz.ValidArgsH\x00R\x05Valid\x12>\n" +
"\vValidString\x18\x0e \x01(\v2\x1a.ngolofuzz.ValidStringArgsH\x00R\vValidString\x128\n" +
"\tValidRune\x18\x0f \x01(\v2\x18.ngolofuzz.ValidRuneArgsH\x00R\tValidRuneB\x06\n" +
"\x04item\"\xb8\x01\n" +
"\fNgoloFuzzAny\x12 \n" +
"\n" +
"DoubleArgs\x18\x01 \x01(\x01H\x00R\n" +
"DoubleArgs\x12\x1e\n" +
"\tInt64Args\x18\x02 \x01(\x03H\x00R\tInt64Args\x12\x1c\n" +
"\bBoolArgs\x18\x03 \x01(\bH\x00R\bBoolArgs\x12 \n" +
"\n" +
"StringArgs\x18\x04 \x01(\tH\x00R\n" +
"StringArgs\x12\x1e\n" +
"\tBytesArgs\x18\x05 \x01(\fH\x00R\tBytesArgsB\x06\n" +
"\x04item\"<\n" +
"\rNgoloFuzzList\x12+\n" +
"\x04list\x18\x01 \x03(\v2\x17.ngolofuzz.NgoloFuzzOneR\x04listB\x19Z\x17./;fuzz_ng_unicode_utf8b\x06proto3"
var (
file_ngolofuzz_proto_rawDescOnce sync.Once
file_ngolofuzz_proto_rawDescData []byte
)
func file_ngolofuzz_proto_rawDescGZIP() []byte {
file_ngolofuzz_proto_rawDescOnce.Do(func() {
file_ngolofuzz_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)))
})
return file_ngolofuzz_proto_rawDescData
}
var file_ngolofuzz_proto_msgTypes = make([]protoimpl.MessageInfo, 18)
var file_ngolofuzz_proto_goTypes = []any{
(*FullRuneArgs)(nil), // 0: ngolofuzz.FullRuneArgs
(*FullRuneInStringArgs)(nil), // 1: ngolofuzz.FullRuneInStringArgs
(*DecodeRuneArgs)(nil), // 2: ngolofuzz.DecodeRuneArgs
(*DecodeRuneInStringArgs)(nil), // 3: ngolofuzz.DecodeRuneInStringArgs
(*DecodeLastRuneArgs)(nil), // 4: ngolofuzz.DecodeLastRuneArgs
(*DecodeLastRuneInStringArgs)(nil), // 5: ngolofuzz.DecodeLastRuneInStringArgs
(*RuneLenArgs)(nil), // 6: ngolofuzz.RuneLenArgs
(*EncodeRuneArgs)(nil), // 7: ngolofuzz.EncodeRuneArgs
(*AppendRuneArgs)(nil), // 8: ngolofuzz.AppendRuneArgs
(*RuneCountArgs)(nil), // 9: ngolofuzz.RuneCountArgs
(*RuneCountInStringArgs)(nil), // 10: ngolofuzz.RuneCountInStringArgs
(*RuneStartArgs)(nil), // 11: ngolofuzz.RuneStartArgs
(*ValidArgs)(nil), // 12: ngolofuzz.ValidArgs
(*ValidStringArgs)(nil), // 13: ngolofuzz.ValidStringArgs
(*ValidRuneArgs)(nil), // 14: ngolofuzz.ValidRuneArgs
(*NgoloFuzzOne)(nil), // 15: ngolofuzz.NgoloFuzzOne
(*NgoloFuzzAny)(nil), // 16: ngolofuzz.NgoloFuzzAny
(*NgoloFuzzList)(nil), // 17: ngolofuzz.NgoloFuzzList
}
var file_ngolofuzz_proto_depIdxs = []int32{
0, // 0: ngolofuzz.NgoloFuzzOne.FullRune:type_name -> ngolofuzz.FullRuneArgs
1, // 1: ngolofuzz.NgoloFuzzOne.FullRuneInString:type_name -> ngolofuzz.FullRuneInStringArgs
2, // 2: ngolofuzz.NgoloFuzzOne.DecodeRune:type_name -> ngolofuzz.DecodeRuneArgs
3, // 3: ngolofuzz.NgoloFuzzOne.DecodeRuneInString:type_name -> ngolofuzz.DecodeRuneInStringArgs
4, // 4: ngolofuzz.NgoloFuzzOne.DecodeLastRune:type_name -> ngolofuzz.DecodeLastRuneArgs
5, // 5: ngolofuzz.NgoloFuzzOne.DecodeLastRuneInString:type_name -> ngolofuzz.DecodeLastRuneInStringArgs
6, // 6: ngolofuzz.NgoloFuzzOne.RuneLen:type_name -> ngolofuzz.RuneLenArgs
7, // 7: ngolofuzz.NgoloFuzzOne.EncodeRune:type_name -> ngolofuzz.EncodeRuneArgs
8, // 8: ngolofuzz.NgoloFuzzOne.AppendRune:type_name -> ngolofuzz.AppendRuneArgs
9, // 9: ngolofuzz.NgoloFuzzOne.RuneCount:type_name -> ngolofuzz.RuneCountArgs
10, // 10: ngolofuzz.NgoloFuzzOne.RuneCountInString:type_name -> ngolofuzz.RuneCountInStringArgs
11, // 11: ngolofuzz.NgoloFuzzOne.RuneStart:type_name -> ngolofuzz.RuneStartArgs
12, // 12: ngolofuzz.NgoloFuzzOne.Valid:type_name -> ngolofuzz.ValidArgs
13, // 13: ngolofuzz.NgoloFuzzOne.ValidString:type_name -> ngolofuzz.ValidStringArgs
14, // 14: ngolofuzz.NgoloFuzzOne.ValidRune:type_name -> ngolofuzz.ValidRuneArgs
15, // 15: ngolofuzz.NgoloFuzzList.list:type_name -> ngolofuzz.NgoloFuzzOne
16, // [16:16] is the sub-list for method output_type
16, // [16:16] is the sub-list for method input_type
16, // [16:16] is the sub-list for extension type_name
16, // [16:16] is the sub-list for extension extendee
0, // [0:16] is the sub-list for field type_name
}
func init() { file_ngolofuzz_proto_init() }
func file_ngolofuzz_proto_init() {
if File_ngolofuzz_proto != nil {
return
}
file_ngolofuzz_proto_msgTypes[15].OneofWrappers = []any{
(*NgoloFuzzOne_FullRune)(nil),
(*NgoloFuzzOne_FullRuneInString)(nil),
(*NgoloFuzzOne_DecodeRune)(nil),
(*NgoloFuzzOne_DecodeRuneInString)(nil),
(*NgoloFuzzOne_DecodeLastRune)(nil),
(*NgoloFuzzOne_DecodeLastRuneInString)(nil),
(*NgoloFuzzOne_RuneLen)(nil),
(*NgoloFuzzOne_EncodeRune)(nil),
(*NgoloFuzzOne_AppendRune)(nil),
(*NgoloFuzzOne_RuneCount)(nil),
(*NgoloFuzzOne_RuneCountInString)(nil),
(*NgoloFuzzOne_RuneStart)(nil),
(*NgoloFuzzOne_Valid)(nil),
(*NgoloFuzzOne_ValidString)(nil),
(*NgoloFuzzOne_ValidRune)(nil),
}
file_ngolofuzz_proto_msgTypes[16].OneofWrappers = []any{
(*NgoloFuzzAny_DoubleArgs)(nil),
(*NgoloFuzzAny_Int64Args)(nil),
(*NgoloFuzzAny_BoolArgs)(nil),
(*NgoloFuzzAny_StringArgs)(nil),
(*NgoloFuzzAny_BytesArgs)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_ngolofuzz_proto_rawDesc), len(file_ngolofuzz_proto_rawDesc)),
NumEnums: 0,
NumMessages: 18,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_ngolofuzz_proto_goTypes,
DependencyIndexes: file_ngolofuzz_proto_depIdxs,
MessageInfos: file_ngolofuzz_proto_msgTypes,
}.Build()
File_ngolofuzz_proto = out.File
file_ngolofuzz_proto_goTypes = nil
file_ngolofuzz_proto_depIdxs = nil
}